diff --git a/research/embedding_eval_results/results.json b/research/embedding_eval_results/results.json new file mode 100644 index 0000000..e398d34 --- /dev/null +++ b/research/embedding_eval_results/results.json @@ -0,0 +1,162 @@ +{ + "bge-m3:latest": { + "scifact": { + "NDCG": { + "NDCG@1": 0.51, + "NDCG@5": 0.61904, + "NDCG@10": 0.64312, + "NDCG@100": 0.6705 + }, + "MAP": { + "MAP@1": 0.48178, + "MAP@5": 0.58023, + "MAP@10": 0.59181, + "MAP@100": 0.59849 + }, + "Recall": { + "Recall@1": 0.48178, + "Recall@5": 0.71489, + "Recall@10": 0.78344, + "Recall@100": 0.90367 + }, + "Precision": { + "P@1": 0.51, + "P@5": 0.15667, + "P@10": 0.088, + "P@100": 0.01027 + } + }, + "cosqa": { + "NDCG": { + "NDCG@1": 0.116, + "NDCG@5": 0.23831, + "NDCG@10": 0.28783, + "NDCG@100": 0.36311 + }, + "MAP": { + "MAP@1": 0.116, + "MAP@5": 0.19687, + "MAP@10": 0.21791, + "MAP@100": 0.23272 + }, + "Recall": { + "Recall@1": 0.116, + "Recall@5": 0.366, + "Recall@10": 0.516, + "Recall@100": 0.874 + }, + "Precision": { + "P@1": 0.116, + "P@5": 0.0732, + "P@10": 0.0516, + "P@100": 0.00874 + } + }, + "codexglue": { + "NDCG": { + "NDCG@1": 0.952, + "NDCG@5": 0.97379, + "NDCG@10": 0.97494, + "NDCG@100": 0.97629 + }, + "MAP": { + "MAP@1": 0.952, + "MAP@5": 0.96849, + "MAP@10": 0.96897, + "MAP@100": 0.96926 + }, + "Recall": { + "Recall@1": 0.952, + "Recall@5": 0.98922, + "Recall@10": 0.99276, + "Recall@100": 0.99885 + }, + "Precision": { + "P@1": 0.952, + "P@5": 0.19784, + "P@10": 0.09928, + "P@100": 0.00999 + } + } + }, + "qwen3-0.6B-emb:latest": { + "scifact": { + "NDCG": { + "NDCG@1": 0.55333, + "NDCG@5": 0.65926, + "NDCG@10": 0.67848, + "NDCG@100": 0.70557 + }, + "MAP": { + "MAP@1": 0.52428, + "MAP@5": 0.62128, + "MAP@10": 0.63094, + "MAP@100": 0.63723 + }, + "Recall": { + "Recall@1": 0.52428, + "Recall@5": 0.75867, + "Recall@10": 0.81444, + "Recall@100": 0.93667 + }, + "Precision": { + "P@1": 0.55333, + "P@5": 0.17067, + "P@10": 0.093, + "P@100": 0.01067 + } + }, + "cosqa": { + "NDCG": { + "NDCG@1": 0.174, + "NDCG@5": 0.33509, + "NDCG@10": 0.39086, + "NDCG@100": 0.45099 + }, + "MAP": { + "MAP@1": 0.174, + "MAP@5": 0.2808, + "MAP@10": 0.30466, + "MAP@100": 0.31702 + }, + "Recall": { + "Recall@1": 0.174, + "Recall@5": 0.502, + "Recall@10": 0.67, + "Recall@100": 0.952 + }, + "Precision": { + "P@1": 0.174, + "P@5": 0.1004, + "P@10": 0.067, + "P@100": 0.00952 + } + }, + "codexglue": { + "NDCG": { + "NDCG@1": 0.94971, + "NDCG@5": 0.97166, + "NDCG@10": 0.97342, + "NDCG@100": 0.97453 + }, + "MAP": { + "MAP@1": 0.94971, + "MAP@5": 0.9662, + "MAP@10": 0.96694, + "MAP@100": 0.96718 + }, + "Recall": { + "Recall@1": 0.94971, + "Recall@5": 0.98761, + "Recall@10": 0.99297, + "Recall@100": 0.99807 + }, + "Precision": { + "P@1": 0.94971, + "P@5": 0.19752, + "P@10": 0.0993, + "P@100": 0.00998 + } + } + } +} \ No newline at end of file diff --git a/research/embeddings/datasets/codexglue/corpus.jsonl b/research/embeddings/datasets/codexglue/corpus.jsonl new file mode 100644 index 0000000..6fed701 --- /dev/null +++ b/research/embeddings/datasets/codexglue/corpus.jsonl @@ -0,0 +1,19210 @@ +{"_id": "doc_0", "title": "", "text": "def Func(arg_0):\n \"\"\"str->list\n Convert XML to URL List.\n From Biligrab.\n \"\"\"\n arg_1 = []\n arg_2 = parseString(arg_0)\n for arg_3 in arg_2.getElementsByTagName('durl'):\n arg_4 = arg_3.getElementsByTagName('url')[0]\n arg_1.append(arg_4.childNodes[0].data)\n return arg_1"} +{"_id": "doc_1", "title": "", "text": "def Func(arg_0, arg_1='.', arg_2=True, arg_3=False, **arg_4):\n \"\"\"Downloads Sina videos by URL.\n \"\"\"\n if 'news.sina.com.cn/zxt' in arg_0:\n sina_zxt(arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, **arg_4)\n return\n\n arg_5 = match1(arg_0, r'vid=(\\d+)')\n if arg_5 is None:\n arg_6 = get_content(arg_0)\n arg_5 = hd_vid = match1(arg_6, r'hd_vid\\s*:\\s*\\'([^\\']+)\\'')\n if hd_vid == '0':\n arg_7 = match1(arg_6, r'[^\\w]vid\\s*:\\s*\\'([^\\']+)\\'').split('|')\n arg_5 = arg_7[-1]\n\n if arg_5 is None:\n arg_5 = match1(arg_6, r'vid:\"?(\\d+)\"?')\n if arg_5:\n #title = match1(video_page, r'title\\s*:\\s*\\'([^\\']+)\\'')\n Func_by_vid(arg_5, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)\n else:\n arg_8 = match1(arg_6, r'vkey\\s*:\\s*\"([^\"]+)\"')\n if arg_8 is None:\n arg_5 = match1(arg_0, r'#(\\d+)')\n Func_by_vid(arg_5, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)\n return\n arg_9 = match1(arg_6, r'title\\s*:\\s*\"([^\"]+)\"')\n Func_by_vkey(arg_8, arg_9=arg_9, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_2", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Format text with color or other effects into ANSI escaped string.\"\"\"\n return \"\\33[{}m{content}\\33[{}m\".format(\";\".join([str(arg_2) for arg_2 in arg_1]), RESET, content=arg_0) if IS_ANSI_TERMINAL and arg_1 else arg_0"} +{"_id": "doc_3", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Print a log message to standard error.\"\"\"\n sys.stderr.write(sprint(\"{}: {}\".format(script_name, arg_0), *arg_1) + \"\\n\")"} +{"_id": "doc_4", "title": "", "text": "dFuncf Func(arg_0, arg_1=NonFunc):\n \"\"\"Print an Funcrror log mFuncssagFunc.\"\"\"\n print_log(arg_0, YELLOW, BOLD)\n if arg_1 is not NonFunc:\n sys.Funcxit(arg_1)"} +{"_id": "doc_5", "title": "", "text": "def Func():\n \"\"\"Detect operating system.\n \"\"\"\n\n # Inspired by:\n # https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py\n\n arg_0 = system().lower()\n arg_1 = 'unknown'\n\n if 'cygwin' in arg_0:\n arg_1 = 'cygwin'\n elif 'darwin' in arg_0:\n arg_1 = 'mac'\n elif 'linux' in arg_0:\n arg_1 = 'linux'\n # detect WSL https://github.com/Microsoft/BashOnWindows/issues/423\n try:\n with open('/proc/version', 'r') as f:\n if 'microsoft' in f.read().lower():\n arg_1 = 'wsl'\n except: pass\n elif 'windows' in arg_0:\n arg_1 = 'windows'\n elif 'bsd' in arg_0:\n arg_1 = 'bsd'\n\n return arg_1"} +{"_id": "doc_6", "title": "", "text": "def Func(arg_0, arg_1='.', arg_2=False, arg_3=False, **arg_4):\n \"\"\"str->None\"\"\"\n # https://vimeo.com/channels/464686\n arg_5 = match1(arg_0, r'http://vimeo.com/channels/(\\w+)')\n Func_id(arg_5, arg_1, arg_2, arg_3, **arg_4)"} +{"_id": "doc_7", "title": "", "text": "def Func(arg_0):\n \"\"\"str->dict\n Information for CKPlayer API content.\"\"\"\n arg_1 = ET.XML(arg_0)\n arg_2 = {'title': '',\n #'duration': 0,\n 'links': [],\n 'size': 0,\n 'flashvars': '',}\n arg_3 = dictify(arg_1)['ckplayer']\n if 'info' in arg_3:\n if '_text' in arg_3['info'][0]['title'][0]: #title\n arg_2['title'] = arg_3['info'][0]['title'][0]['_text'].strip()\n\n #if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration\n #video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()\n\n if '_text' in arg_3['video'][0]['size'][0]: #size exists for 1 piece\n arg_2['size'] = sum([int(i['size'][0]['_text']) for i in arg_3['video']])\n\n if '_text' in arg_3['video'][0]['file'][0]: #link exist\n arg_2['links'] = [i['file'][0]['_text'].strip() for i in arg_3['video']]\n\n if '_text' in arg_3['flashvars'][0]:\n arg_2['flashvars'] = arg_3['flashvars'][0]['_text'].strip()\n\n return arg_2"} +{"_id": "doc_8", "title": "", "text": "def Func(arg_0):\n \"\"\"str->list of str\n Give you the real URLs.\"\"\"\n arg_1 = loads(get_content(arg_0))\n arg_2 = arg_1['info']\n arg_3 = urlsplit(arg_2)\n \n arg_4 = \"{scheme}://{netloc}{path}/\".format(scheme = arg_3[0],\n netloc = arg_3[1],\n path = dirname(arg_3[2]))\n\n arg_1 = get_content(arg_1['info']) #get the REAL M3U url, maybe to be changed later?\n arg_5 = []\n arg_6 = 0\n for arg_7 in arg_1.split():\n if not arg_7.startswith('#'): #not the best way, better we use the m3u8 package\n arg_5.append(arg_4 + arg_7)\n # use ext-info for fast size calculate\n elif arg_7.startswith('#EXT-MGTV-File-SIZE:'):\n arg_6 += int(arg_7[arg_7.rfind(':')+1:])\n\n return arg_2, arg_6, arg_5"} +{"_id": "doc_9", "title": "", "text": "def Func(arg_0, arg_1=arg_2()):\n \"\"\"Converts a string to a valid filename.\n \"\"\"\n\n # POSIX systems\n arg_0 = arg_0.translate({\n 0: None,\n ord('/'): '-',\n ord('|'): '-',\n })\n\n # FIXME: do some filesystem detection\n if arg_1 == 'windows' or arg_1 == 'cygwin' or arg_1 == 'wsl':\n # Windows (non-POSIX namespace)\n arg_0 = arg_0.translate({\n # Reserved in Windows VFAT and NTFS\n ord(':'): '-',\n ord('*'): '-',\n ord('?'): '-',\n ord('\\\\'): '-',\n ord('\\\"'): '\\'',\n # Reserved in Windows VFAT\n ord('+'): '-',\n ord('<'): '-',\n ord('>'): '-',\n ord('['): '(',\n ord(']'): ')',\n ord('\\t'): ' ',\n })\n else:\n # *nix\n if arg_1 == 'mac':\n # Mac OS HFS+\n arg_0 = arg_0.translate({\n ord(':'): '-',\n })\n\n # Remove leading .\n if arg_0.startswith(\".\"):\n arg_0 = arg_0[1:]\n\n arg_0 = arg_0[:80] # Trim to 82 Unicode characters long\n return arg_0"} +{"_id": "doc_10", "title": "", "text": "def Func(arg_0, arg_1='.', arg_2=True, arg_3=False, **arg_4):\n \"\"\"Downloads CBS videos by URL.\n \"\"\"\n\n arg_5 = get_content(arg_0)\n arg_6 = match1(arg_5, r'video\\.settings\\.pid\\s*=\\s*\\'([^\\']+)\\'')\n arg_7 = match1(arg_5, r'video\\.settings\\.title\\s*=\\s*\\\"([^\\\"]+)\\\"')\n\n theplatform_download_by_pid(arg_6, arg_7, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_11", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Override the original one\n Ugly ugly dirty hack\"\"\"\n if 'json_output' in arg_1 and arg_1['json_output']:\n json_output.output(arg_0)\n elif 'info_only' in arg_1 and arg_1['info_only']:\n if 'stream_id' in arg_1 and arg_1['stream_id']:\n # Display the stream\n arg_2 = arg_1['stream_id']\n if 'index' not in arg_1:\n arg_0.p(arg_2)\n else:\n arg_0.p_i(arg_2)\n else:\n # Display all available streams\n if 'index' not in arg_1:\n arg_0.p([])\n else:\n arg_2 = arg_0.streams_sorted[0]['id'] if 'id' in arg_0.streams_sorted[0] else arg_0.streams_sorted[0]['itag']\n arg_0.p_i(arg_2)\n\n else:\n if 'stream_id' in arg_1 and arg_1['stream_id']:\n # Download the stream\n arg_2 = arg_1['stream_id']\n else:\n # Download stream with the best quality\n arg_2 = arg_0.streams_sorted[0]['id'] if 'id' in arg_0.streams_sorted[0] else arg_0.streams_sorted[0]['itag']\n\n if 'index' not in arg_1:\n arg_0.p(arg_2)\n else:\n arg_0.p_i(arg_2)\n\n if arg_2 in arg_0.streams:\n arg_3 = arg_0.streams[arg_2]['src']\n arg_4 = arg_0.streams[arg_2]['container']\n arg_5 = arg_0.streams[arg_2]['size']\n else:\n arg_3 = arg_0.dash_streams[arg_2]['src']\n arg_4 = arg_0.dash_streams[arg_2]['container']\n arg_5 = arg_0.dash_streams[arg_2]['size']\n\n if not arg_3:\n log.wtf('[Failed] Cannot extract video source.')\n # For legacy main()\n \n #Here's the change!!\n Func_url_ffmpeg(arg_3[0], arg_0.title, 'mp4', output_dir=arg_1['output_dir'], merge=arg_1['merge'], stream=False)\n\n if not arg_1['caption']:\n print('Skipping captions.')\n return\n for arg_6 in arg_0.caption_tracks:\n arg_7 = '%s.%s.srt' % (get_filename(arg_0.title), arg_6)\n print('Saving %s ... ' % arg_7, end=\"\", flush=True)\n arg_8 = arg_0.caption_tracks[arg_6]\n with open(os.path.join(arg_1['output_dir'], arg_7),\n 'w', encoding='utf-8') as x:\n x.write(arg_8)\n print('Done.')"} +{"_id": "doc_12", "title": "", "text": "def Func(arg_0, arg_1, arg_2='.', arg_3=True, arg_4=False, **arg_5):\n \"\"\"str, str, str, bool, bool ->None\n\n Download Acfun video by vid.\n\n Call Acfun API, decide which site to use, and pass the job to its\n extractor.\n \"\"\"\n\n #first call the main parasing API\n arg_6 = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + arg_0))\n\n arg_7 = arg_6['sourceType']\n\n #decide sourceId to know which extractor to use\n if 'sourceId' in arg_6: arg_8 = arg_6['sourceId']\n # danmakuId = info['danmakuId']\n\n #call extractor decided by sourceId\n if arg_7 == 'sina':\n sina_download_by_vid(arg_8, arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n elif arg_7 == 'youku':\n youku_download_by_vid(arg_8, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, **arg_5)\n elif arg_7 == 'tudou':\n tudou_download_by_iid(arg_8, arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n elif arg_7 == 'qq':\n qq_download_by_vid(arg_8, arg_1, True, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n elif arg_7 == 'letv':\n letvcloud_download_by_vu(arg_8, '2d8c027396', arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n elif arg_7 == 'zhuzhan':\n #As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this\n#In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player\n#old code removed\n arg_9 = 'http://www.acfun.cn/v/ac' + arg_0\n arg_10 = youku_acfun_proxy(arg_6['sourceId'], arg_6['encode'], arg_9)\n arg_11 = ['mp4hd3', 'mp4hd2', 'mp4hd', 'flvhd']\n for arg_12 in arg_11:\n if arg_10.get(arg_12):\n arg_13 = arg_10[arg_12]\n break\n#total_size in the json could be incorrect(F.I. 0)\n arg_14 = 0\n for arg_9 in arg_13[0]:\n arg_15, arg_15, arg_16 = url_info(arg_9)\n arg_14 += arg_16\n#fallback to flvhd is not quite possible\n if re.search(r'fid=[0-9A-Z\\-]*.flv', arg_13[0][0]):\n arg_17 = 'flv'\n else:\n arg_17 = 'mp4'\n print_info(site_info, arg_1, arg_17, arg_14)\n if not arg_4:\n download_urls(arg_13[0], arg_1, arg_17, arg_14, arg_2=arg_2, arg_3=arg_3)\n else:\n raise NotImplementedError(arg_7)\n\n if not arg_4 and not dry_run:\n if not arg_5['caption']:\n print('Skipping danmaku.')\n return\n try:\n arg_1 = get_filename(arg_1)\n print('Downloading %s ...\\n' % (arg_1 + '.cmt.json'))\n arg_18 = get_srt_json(arg_0)\n with open(os.path.join(arg_2, arg_1 + '.cmt.json'), 'w', encoding='utf-8') as x:\n x.write(arg_18)\n except:\n pass"} +{"_id": "doc_13", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Scans through a string for substrings matched some patterns.\n\n Args:\n text: A string to be scanned.\n patterns: a list of regex pattern.\n\n Returns:\n a list if matched. empty if not.\n \"\"\"\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_4 = re.findall(arg_3, arg_0)\n arg_2 += arg_4\n\n return arg_2"} +{"_id": "doc_14", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses the query string of a URL and returns the value of a parameter.\n\n Args:\n url: A URL.\n param: A string representing the name of the parameter.\n\n Returns:\n The value of the parameter.\n \"\"\"\n\n try:\n return parse.parse_qs(parse.urlparse(arg_0).query)[arg_1][0]\n except:\n return None"} +{"_id": "doc_15", "title": "", "text": "def Func(arg_0, arg_1={}, arg_2={}, arg_3=True, **arg_4):\n \"\"\"Post the content of a URL via sending a HTTP POST request.\n\n Args:\n url: A URL.\n headers: Request headers used by the client.\n decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\n Returns:\n The content as a string.\n \"\"\"\n if arg_4.get('post_data_raw'):\n logging.debug('Func: %s\\npost_data_raw: %s' % (arg_0, arg_4['post_data_raw']))\n else:\n logging.debug('Func: %s\\npost_data: %s' % (arg_0, arg_2))\n\n arg_5 = request.Request(arg_0, arg_1=arg_1)\n if cookies:\n cookies.add_cookie_header(arg_5)\n arg_5.headers.update(arg_5.unredirected_hdrs)\n if arg_4.get('post_data_raw'):\n arg_6 = bytes(arg_4['post_data_raw'], 'utf-8')\n else:\n arg_6 = bytes(parse.urlencode(arg_2), 'utf-8')\n arg_7 = urlopen_with_retry(arg_5, arg_8=arg_6)\n arg_8 = arg_7.read()\n\n # Handle HTTP compression for gzip and deflate (zlib)\n arg_9 = arg_7.getheader('Content-Encoding')\n if arg_9 == 'gzip':\n arg_8 = ungzip(arg_8)\n elif arg_9 == 'deflate':\n arg_8 = undeflate(arg_8)\n\n # Decode the response body\n if arg_3:\n arg_10 = match1(\n arg_7.getheader('Content-Type'), r'charset=([\\w-]+)'\n )\n if arg_10 is not None:\n arg_8 = arg_8.decode(arg_10)\n else:\n arg_8 = arg_8.decode('utf-8')\n\n return arg_8"} +{"_id": "doc_16", "title": "", "text": "def Func(arg_0):\n \"\"\"Parses host name and port number from a string.\n \"\"\"\n if re.match(r'^(\\d+)$', arg_0) is not None:\n return (\"0.0.0.0\", int(arg_0))\n if re.match(r'^(\\w+)://', arg_0) is None:\n arg_0 = \"//\" + arg_0\n arg_1 = parse.urlparse(arg_0)\n arg_2 = arg_1.hostname or \"0.0.0.0\"\n arg_3 = arg_1.port or 0\n return (arg_2, arg_3)"} +{"_id": "doc_17", "title": "", "text": "def Func(arg_0):\n \"\"\"str->str\"\"\"\n arg_1 = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'UTF-8,*;q=0.5',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'\n }\n arg_2 = 'https://www.showroom-live.com/' + arg_0\n arg_3 = get_content(arg_2, headers = arg_1)\n arg_4 = match1(arg_3, r'room\\?room_id\\=(\\d+)')\n assert arg_4\n return arg_4"} +{"_id": "doc_18", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"JSON, int, int, int->str\n \n Get a proper title with courseid+topicID+partID.\"\"\"\n\n return '_'.join([arg_0[0]['name'],\n arg_0[0]['Topics'][arg_1]['name'],\n arg_0[0]['Topics'][arg_1]['Parts'][arg_2]['name']])"} +{"_id": "doc_19", "title": "", "text": "def Func(arg_0, arg_1='.', arg_2=True, arg_3=False, **arg_4):\n \"\"\"int->None\n \n Download a WHOLE course.\n Reuse the API call to save time.\"\"\"\n\n for arg_5 in range(len(arg_0[0]['Topics'])):\n for arg_6 in range(len(arg_0[0]['Topics'][arg_5]['Parts'])):\n Func_topic_part(arg_0,\n arg_5,\n arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n **arg_4)"} +{"_id": "doc_20", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='.', arg_4=True, arg_5=False, **arg_6):\n \"\"\"int, int, int->None\n \n Download ONE PART of the course.\"\"\"\n\n arg_7 = arg_0\n\n arg_8 = _wanmen_get_title_by_json_topic_part(arg_7, \n arg_1, \n arg_2)\n\n arg_9 = _wanmen_get_boke_id_by_json_topic_part(arg_7,\n arg_1, \n arg_2)\n\n bokecc_download_by_id(vid = arg_9, arg_8 = arg_8, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, **arg_6)"} +{"_id": "doc_21", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks if a task is either queued or running in this executor\n\n :param task_instance: TaskInstance\n :return: True if the task is known to this executor\n \"\"\"\n if arg_1.key in arg_0.queued_tasks or arg_1.key in arg_0.running:\n return True"} +{"_id": "doc_22", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns and flush the event buffer. In case dag_ids is specified\n it will only return and flush events for the given dag_ids. Otherwise\n it returns and flushes all\n\n :param dag_ids: to dag_ids to return events for, if None returns all\n :return: a dict of events\n \"\"\"\n arg_2 = dict()\n if arg_1 is None:\n arg_2 = arg_0.event_buffer\n arg_0.event_buffer = dict()\n else:\n for arg_4 in list(arg_0.event_buffer.keys()):\n arg_5, arg_6, arg_6, arg_6 = arg_4\n if arg_5 in arg_1:\n arg_2[arg_4] = arg_0.event_buffer.pop(arg_4)\n\n return arg_2"} +{"_id": "doc_23", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a snowflake.connection object\n \"\"\"\n arg_1 = arg_0._Func_params()\n arg_2 = snowflake.connector.connect(**arg_1)\n return arg_2"} +{"_id": "doc_24", "title": "", "text": "def Func(arg_0):\n \"\"\"\n returns aws_access_key_id, aws_secret_access_key\n from extra\n\n intended to be used by external import and export statements\n \"\"\"\n if arg_0.snowflake_conn_id:\n arg_1 = arg_0.get_connection(arg_0.snowflake_conn_id)\n if 'aws_secret_access_key' in arg_1.extra_dejson:\n arg_2 = arg_1.extra_dejson.get(\n 'aws_access_key_id')\n arg_3 = arg_1.extra_dejson.get(\n 'aws_secret_access_key')\n return arg_2, arg_3"} +{"_id": "doc_25", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_3):\n \"\"\"\n Executes SQL using psycopg2 Func method.\n Necessary to execute COPY command without access to a superuser.\n\n Note: if this method is called with a \"COPY FROM\" statement and\n the specified input file does not exist, it creates an empty\n file and no data is loaded, but the operation succeeds.\n So if users want to be aware when the input file does not exist,\n they have to check its existence by themselves.\n \"\"\"\n if not os.path.isfile(arg_2):\n with arg_3(arg_2, 'w'):\n pass\n\n with arg_3(arg_2, 'r+') as f:\n with closing(arg_0.get_conn()) as conn:\n with closing(conn.cursor()) as cur:\n cur.Func(arg_1, f)\n f.truncate(f.tell())\n conn.commit()"} +{"_id": "doc_26", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Uploads the file to Google cloud storage\n \"\"\"\n arg_2 = GoogleCloudStorageHook(\n google_cloud_storage_conn_id=arg_0.google_cloud_storage_conn_id,\n delegate_to=arg_0.delegate_to)\n\n arg_2.upload(\n bucket_name=arg_0.bucket,\n object_name=arg_0.dst,\n mime_type=arg_0.mime_type,\n filename=arg_0.src,\n gzip=arg_0.gzip,\n )"} +{"_id": "doc_27", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Runs forever, monitoring the child processes of @gunicorn_master_proc and\n restarting workers occasionally.\n Each iteration of the loop traverses one edge of this state transition\n diagram, where each state (node) represents\n [ num_ready_workers_running / num_workers_running ]. We expect most time to\n be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.\n The horizontal transition at ? happens after the new worker parses all the\n dags (so it could take a while!)\n V \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n [n / n] \u2500\u2500TTIN\u2500\u2500> [ [n, n+bs) / n + bs ] \u2500\u2500\u2500\u2500?\u2500\u2500\u2500> [n + bs / n + bs] \u2500\u2500TTOU\u2500\u2518\n ^ ^\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502\n \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500v\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500 [ [0, n) / n ] <\u2500\u2500\u2500 start\n We change the number of workers by sending TTIN and TTOU to the gunicorn\n master process, which increases and decreases the number of child workers\n respectively. Gunicorn guarantees that on TTOU workers are terminated\n gracefully and that the oldest worker is terminated.\n \"\"\"\n\n def wait_until_true(arg_3, arg_4=0):\n \"\"\"\n Sleeps until fn is true\n \"\"\"\n arg_5 = time.time()\n while not arg_3():\n if 0 < arg_4 <= time.time() - arg_5:\n raise AirflowWebServerTimeout(\n \"No response from gunicorn master within {0} seconds\"\n .format(arg_4))\n time.sleep(0.1)\n\n def start_refresh(arg_0):\n arg_6 = conf.getint('webserver', 'worker_refresh_batch_size')\n log.debug('%s doing a refresh of %s workers', arg_11, arg_6)\n sys.stdout.flush()\n sys.stderr.flush()\n\n arg_7 = 0\n for arg_8 in range(arg_6):\n arg_0.send_signal(signal.SIGTTIN)\n arg_7 += 1\n wait_until_true(lambda: arg_1 + arg_7 ==\n get_num_workers_running(arg_0),\n arg_2)\n\n try:\n wait_until_true(lambda: arg_1 ==\n get_num_workers_running(arg_0),\n arg_2)\n while True:\n arg_9 = get_num_workers_running(arg_0)\n arg_10 = \\\n get_num_ready_workers_running(arg_0)\n\n arg_11 = '[{0} / {1}]'.format(arg_10, arg_9)\n\n # Whenever some workers are not ready, wait until all workers are ready\n if arg_10 < arg_9:\n log.debug('%s some workers are starting up, waiting...', arg_11)\n sys.stdout.flush()\n time.sleep(1)\n\n # Kill a worker gracefully by asking gunicorn to reduce number of workers\n elif arg_9 > arg_1:\n arg_7 = arg_9 - arg_1\n log.debug('%s killing %s workers', arg_11, arg_7)\n\n for arg_8 in range(arg_7):\n arg_0.send_signal(signal.SIGTTOU)\n arg_7 -= 1\n wait_until_true(lambda: arg_1 + arg_7 ==\n get_num_workers_running(arg_0),\n arg_2)\n\n # Start a new worker by asking gunicorn to increase number of workers\n elif arg_9 == arg_1:\n arg_12 = conf.getint('webserver', 'worker_refresh_interval')\n log.debug(\n '%s sleeping for %ss starting doing a refresh...',\n arg_11, arg_12\n )\n time.sleep(arg_12)\n start_refresh(arg_0)\n\n else:\n # num_ready_workers_running == num_workers_running < num_workers_expected\n log.error((\n \"%s some workers seem to have died and gunicorn\"\n \"did not restart them as expected\"\n ), arg_11)\n time.sleep(10)\n if len(\n psutil.Process(arg_0.pid).children()\n ) < arg_1:\n start_refresh(arg_0)\n except (AirflowWebServerTimeout, OSError) as err:\n log.error(err)\n log.error(\"Shutting down webserver\")\n try:\n arg_0.terminate()\n arg_0.wait()\n finally:\n sys.exit(1)"} +{"_id": "doc_28", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None\n ):\n \"\"\"Translate a string or list of strings.\n\n See https://cloud.google.com/Func/docs/translating-text\n\n :type values: str or list\n :param values: String or list of strings to Func.\n\n :type target_language: str\n :param target_language: The language to Func results into. This\n is required by the API and defaults to\n the target language of the current instance.\n\n :type format_: str\n :param format_: (Optional) One of ``text`` or ``html``, to specify\n if the input text is plain text or HTML.\n\n :type source_language: str or None\n :param source_language: (Optional) The language of the text to\n be Funcd.\n\n :type model: str or None\n :param model: (Optional) The model used to Func the text, such\n as ``'base'`` or ``'nmt'``.\n\n :rtype: str or list\n :returns: A list of dictionaries for each queried value. Each\n dictionary typically contains three keys (though not\n all will be present in all cases)\n\n * ``detectedSourceLanguage``: The detected language (as an\n ISO 639-1 language code) of the text.\n * ``FuncdText``: The translation of the text into the\n target language.\n * ``input``: The corresponding input value.\n * ``model``: The model used to Func the text.\n\n If only a single value is passed, then only a single\n dictionary will be returned.\n :raises: :class:`~exceptions.ValueError` if the number of\n values and translations differ.\n \"\"\"\n arg_6 = arg_0.get_conn()\n\n return arg_6.Func(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n )"} +{"_id": "doc_29", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Deletes a Cloud SQL instance.\n\n :param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :param instance: Cloud SQL instance ID. This does not include the project ID.\n :type instance: str\n :return: None\n \"\"\"\n arg_3 = arg_0.get_conn().instances().delete(\n project=arg_2,\n arg_1=arg_1,\n ).execute(num_retries=arg_0.num_retries)\n arg_4 = arg_3[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_2=arg_2,\n arg_4=arg_4)"} +{"_id": "doc_30", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Retrieves a database resource from a Cloud SQL instance.\n\n :param instance: Database instance ID. This does not include the project ID.\n :type instance: str\n :param database: Name of the database in the instance.\n :type database: str\n :param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: A Cloud SQL database resource, as described in\n https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.\n :rtype: dict\n \"\"\"\n return arg_0.get_conn().databases().get(\n project=arg_3,\n arg_1=arg_1,\n arg_2=arg_2\n ).execute(num_retries=arg_0.num_retries)"} +{"_id": "doc_31", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Creates a new database inside a Cloud SQL instance.\n\n :param instance: Database instance ID. This does not include the project ID.\n :type instance: str\n :param body: The request body, as described in\n https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.\n :type body: dict\n :param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_4 = arg_0.get_conn().databases().insert(\n project=arg_3,\n arg_1=arg_1,\n arg_2=arg_2\n ).execute(num_retries=arg_0.num_retries)\n arg_5 = arg_4[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_3=arg_3,\n arg_5=arg_5)"} +{"_id": "doc_32", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Updates a database resource inside a Cloud SQL instance.\n\n This method supports patch semantics.\n See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.\n\n :param instance: Database instance ID. This does not include the project ID.\n :type instance: str\n :param database: Name of the database to be updated in the instance.\n :type database: str\n :param body: The request body, as described in\n https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.\n :type body: dict\n :param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_5 = arg_0.get_conn().databases().patch(\n project=arg_4,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3\n ).execute(num_retries=arg_0.num_retries)\n arg_6 = arg_5[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_4=arg_4,\n arg_6=arg_6)"} +{"_id": "doc_33", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump\n or CSV file.\n\n :param instance: Database instance ID of the Cloud SQL instance. This does not include the\n project ID.\n :type instance: str\n :param body: The request body, as described in\n https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body\n :type body: dict\n :param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n try:\n arg_4 = arg_0.get_conn().instances().export(\n project=arg_3,\n arg_1=arg_1,\n arg_2=arg_2\n ).execute(num_retries=arg_0.num_retries)\n arg_5 = arg_4[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_3=arg_3,\n arg_5=arg_5)\n except HttpError as ex:\n raise AirflowException(\n 'Exporting instance {} failed: {}'.format(arg_1, ex.content)\n )"} +{"_id": "doc_34", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns version of the Cloud SQL Proxy.\n \"\"\"\n arg_0._download_sql_proxy_if_needed()\n arg_1 = [arg_0.sql_proxy_path]\n arg_1.extend(['--version'])\n arg_1.extend(arg_0._get_credential_parameters())\n arg_2 = subprocess.check_output(arg_1).decode('utf-8')\n arg_3 = re.compile(\"^.*[V|v]ersion ([^;]*);.*$\")\n arg_4 = arg_3.match(arg_2)\n if arg_4:\n return arg_4.group(1)\n else:\n return None"} +{"_id": "doc_35", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Create connection in the Connection table, according to whether it uses\n proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.\n\n :param session: Session of the SQL Alchemy ORM (automatically generated with\n decorator).\n \"\"\"\n arg_2 = Connection(conn_id=arg_0.db_conn_id)\n arg_3 = arg_0._generate_connection_uri()\n arg_0.log.info(\"Creating connection %s\", arg_0.db_conn_id)\n arg_2.parse_from_uri(arg_3)\n arg_1.add(arg_2)\n arg_1.commit()"} +{"_id": "doc_36", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Retrieves the dynamically created connection from the Connection table.\n\n :param session: Session of the SQL Alchemy ORM (automatically generated with\n decorator).\n \"\"\"\n arg_0.log.info(\"Retrieving connection %s\", arg_0.db_conn_id)\n arg_2 = arg_1.query(Connection).filter(\n Connection.conn_id == arg_0.db_conn_id)\n if arg_2.count():\n return arg_2[0]\n return None"} +{"_id": "doc_37", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Delete the dynamically created connection from the Connection table.\n\n :param session: Session of the SQL Alchemy ORM (automatically generated with\n decorator).\n \"\"\"\n arg_0.log.info(\"Deleting connection %s\", arg_0.db_conn_id)\n arg_2 = arg_1.query(Connection).filter(\n Connection.conn_id == arg_0.db_conn_id)\n if arg_2.count():\n arg_3 = arg_2[0]\n arg_1.delete(arg_3)\n arg_1.commit()\n else:\n arg_0.log.info(\"Connection was already deleted!\")"} +{"_id": "doc_38", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve Cloud SQL Proxy runner. It is used to manage the proxy\n lifecycle per task.\n\n :return: The Cloud SQL Proxy runner.\n :rtype: CloudSqlProxyRunner\n \"\"\"\n if not arg_0.use_proxy:\n raise AirflowException(\"Proxy runner can only be retrieved in case of use_proxy = True\")\n return CloudSqlProxyRunner(\n path_prefix=arg_0.sql_proxy_unique_path,\n instance_specification=arg_0._get_sqlproxy_instance_specification(),\n project_id=arg_0.project_id,\n sql_proxy_version=arg_0.sql_proxy_version,\n sql_proxy_binary_path=arg_0.sql_proxy_binary_path\n )"} +{"_id": "doc_39", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Reserve free TCP port to be used by Cloud SQL Proxy\n \"\"\"\n arg_0.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n arg_0.reserved_tcp_socket.bind(('127.0.0.1', 0))\n arg_0.sql_proxy_tcp_port = arg_0.reserved_tcp_socket.getsockname()[1]"} +{"_id": "doc_40", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Replaces invalid MLEngine job_id characters with '_'.\n\n This also adds a leading 'z' in case job_id starts with an invalid\n character.\n\n Args:\n job_id: A job_id str that may have invalid characters.\n\n Returns:\n A valid job_id representation.\n \"\"\"\n\n # Add a prefix when a job_id starts with a digit or a template\n arg_1 = re.search(r'\\d|\\{{2}', arg_0)\n if arg_1 and arg_1.start() == 0:\n arg_2 = 'z_{}'.format(arg_0)\n else:\n arg_2 = arg_0\n\n # Clean up 'bad' characters except templates\n arg_3 = 0\n arg_4 = ''\n for arg_5 in re.finditer(r'\\{{2}.+?\\}{2}', arg_2):\n arg_4 += re.sub(r'[^0-9a-zA-Z]+', '_',\n arg_2[arg_3:arg_5.start()])\n arg_4 += arg_2[arg_5.start():arg_5.end()]\n arg_3 = arg_5.end()\n\n # Clean up last substring or the full string if no templates\n arg_4 += re.sub(r'[^0-9a-zA-Z]+', '_', arg_2[arg_3:])\n\n return arg_4"} +{"_id": "doc_41", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Extract error code from ftp exception\"\"\"\n try:\n arg_2 = arg_0.error_code_pattern.match(str(arg_1))\n arg_3 = int(arg_2.group(0))\n return arg_3\n except ValueError:\n return arg_1"} +{"_id": "doc_42", "title": "", "text": "def Func():\n \"\"\"\n Remove any existing DAG runs for the perf test DAGs.\n \"\"\"\n arg_0 = settings.Session()\n arg_1 = arg_0.query(DagRun).filter(\n DagRun.dag_id.in_(DAG_IDS),\n ).all()\n for arg_2 in arg_1:\n logging.info('Deleting DagRun :: {}'.format(arg_2))\n arg_0.delete(arg_2)"} +{"_id": "doc_43", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Toggle the pause state of the DAGs in the test.\n \"\"\"\n arg_1 = settings.Session()\n arg_2 = arg_1.query(DagModel).filter(\n DagModel.dag_id.in_(DAG_IDS))\n for arg_3 in arg_2:\n logging.info('Setting DAG :: {} is_paused={}'.format(arg_3, arg_0))\n arg_3.is_paused = arg_0\n arg_1.commit()"} +{"_id": "doc_44", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Override the scheduler Func to determine when the test is complete\n \"\"\"\n super(SchedulerMetricsJob, arg_0).Func()\n arg_1 = settings.Session()\n # Get all the relevant task instances\n arg_2 = TaskInstance\n arg_3 = (\n arg_1\n .query(arg_2)\n .filter(arg_2.dag_id.in_(DAG_IDS))\n .filter(arg_2.state.in_([State.SUCCESS]))\n .all()\n )\n arg_1.commit()\n\n arg_4 = DagBag(SUBDIR)\n arg_5 = [arg_4.dags[dag_id] for dag_id in DAG_IDS]\n # the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.\n arg_6 = sum([(timezone.utcnow() - task.start_date).days\n for dag in arg_5 for task in dag.tasks])\n\n if (len(arg_3) == arg_6 or\n (timezone.utcnow() - arg_0.start_date).total_seconds() >\n MAX_RUNTIME_SECS):\n if len(arg_3) == arg_6:\n arg_0.log.info(\"All tasks processed! Printing stats.\")\n else:\n arg_0.log.info(\"Test timeout reached. Printing available stats.\")\n arg_0.print_stats()\n set_dags_paused_state(True)\n sys.exit()"} +{"_id": "doc_45", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates the directory specified by path, creating intermediate directories\n as necessary. If directory already exists, this is a no-op.\n\n :param path: The directory to create\n :type path: str\n :param mode: The mode to give to the directory e.g. 0o755, ignores umask\n :type mode: int\n \"\"\"\n try:\n arg_2 = os.umask(0)\n os.makedirs(arg_0, arg_1)\n except OSError:\n if not os.path.isdir(arg_0):\n raise\n finally:\n os.umask(arg_2)"} +{"_id": "doc_46", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Make a naive datetime.datetime in a given time zone aware.\n\n :param value: datetime\n :param timezone: timezone\n :return: localized datetime in settings.TIMEZONE or timezone\n\n \"\"\"\n if arg_1 is None:\n arg_1 = TIMEZONE\n\n # Check that we won't overwrite the timezone of an aware datetime.\n if is_localized(arg_0):\n raise ValueError(\n \"Func expects a naive datetime, got %s\" % arg_0)\n if hasattr(arg_0, 'fold'):\n # In case of python 3.6 we want to do the same that pendulum does for python3.5\n # i.e in case we move clock back we want to schedule the run at the time of the second\n # instance of the same clock time rather than the first one.\n # Fold parameter has no impact in other cases so we can safely set it to 1 here\n arg_0 = arg_0.replace(fold=1)\n if hasattr(arg_1, 'localize'):\n # This method is available for pytz time zones.\n return arg_1.localize(arg_0)\n elif hasattr(arg_1, 'convert'):\n # For pendulum\n return arg_1.convert(arg_0)\n else:\n # This may be wrong around DST changes!\n return arg_0.replace(tzinfo=arg_1)"} +{"_id": "doc_47", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Make an aware datetime.datetime naive in a given time zone.\n\n :param value: datetime\n :param timezone: timezone\n :return: naive datetime\n \"\"\"\n if arg_1 is None:\n arg_1 = TIMEZONE\n\n # Emulate the behavior of astimezone() on Python < 3.6.\n if is_naive(arg_0):\n raise ValueError(\"Func() cannot be applied to a naive datetime\")\n\n arg_2 = arg_0.astimezone(arg_1)\n\n # cross library compatibility\n arg_3 = dt.datetime(arg_2.year,\n arg_2.month,\n arg_2.day,\n arg_2.hour,\n arg_2.minute,\n arg_2.second,\n arg_2.microsecond)\n\n return arg_3"} +{"_id": "doc_48", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"\n Wrapper around Func.Func that adds settings.TIMEZONE if tzinfo not specified\n\n :return: Func.Func\n \"\"\"\n if 'tzinfo' not in arg_1:\n arg_1['tzinfo'] = TIMEZONE\n\n return dt.Func(*arg_0, **arg_1)"} +{"_id": "doc_49", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Establish a connection to druid broker.\n \"\"\"\n arg_1 = arg_0.Funcection(arg_0.druid_broker_conn_id)\n arg_2 = connect(\n host=arg_1.host,\n port=arg_1.port,\n path=arg_1.extra_dejson.get('endpoint', '/druid/v2/sql'),\n scheme=arg_1.extra_dejson.get('schema', 'http')\n )\n arg_0.log.info('Get the connection to druid broker on %s', arg_1.host)\n return arg_2"} +{"_id": "doc_50", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns http session for use with requests\n\n :param headers: additional headers to be passed through as a dictionary\n :type headers: dict\n \"\"\"\n arg_2 = requests.Session()\n if arg_0.http_conn_id:\n arg_3 = arg_0.Funcection(arg_0.http_conn_id)\n\n if \"://\" in arg_3.host:\n arg_0.base_url = arg_3.host\n else:\n # schema defaults to HTTP\n arg_5 = arg_3.schema if arg_3.schema else \"http\"\n arg_0.base_url = arg_5 + \"://\" + arg_3.host\n\n if arg_3.port:\n arg_0.base_url = arg_0.base_url + \":\" + str(arg_3.port)\n if arg_3.login:\n arg_2.auth = (arg_3.login, arg_3.password)\n if arg_3.extra:\n try:\n arg_2.headers.update(arg_3.extra_dejson)\n except TypeError:\n arg_0.log.warn('Connection to %s has invalid extra field.', arg_3.host)\n if arg_1:\n arg_2.headers.update(arg_1)\n\n return arg_2"} +{"_id": "doc_51", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Performs the request\n\n :param endpoint: the endpoint to be called i.e. resource/v1/query?\n :type endpoint: str\n :param data: payload to be uploaded or request parameters\n :type data: dict\n :param headers: additional headers to be passed through as a dictionary\n :type headers: dict\n :param extra_options: additional options to be used when executing the request\n i.e. {'check_response': False} to avoid checking raising exceptions on non\n 2XX or 3XX status codes\n :type extra_options: dict\n \"\"\"\n arg_4 = arg_4 or {}\n\n arg_5 = arg_0.get_conn(arg_3)\n\n if arg_0.base_url and not arg_0.base_url.endswith('/') and \\\n arg_1 and not arg_1.startswith('/'):\n arg_6 = arg_0.base_url + '/' + arg_1\n else:\n arg_6 = (arg_0.base_url or '') + (arg_1 or '')\n\n arg_7 = None\n if arg_0.method == 'GET':\n # GET uses params\n arg_7 = requests.Request(arg_0.method,\n arg_6,\n params=arg_2,\n arg_3=arg_3)\n elif arg_0.method == 'HEAD':\n # HEAD doesn't use params\n arg_7 = requests.Request(arg_0.method,\n arg_6,\n arg_3=arg_3)\n else:\n # Others use data\n arg_7 = requests.Request(arg_0.method,\n arg_6,\n arg_2=arg_2,\n arg_3=arg_3)\n\n arg_8 = arg_5.prepare_request(arg_7)\n arg_0.log.info(\"Sending '%s' to url: %s\", arg_0.method, arg_6)\n return arg_0.Func_and_check(arg_5, arg_8, arg_4)"} +{"_id": "doc_52", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks the status code and raise an AirflowException exception on non 2XX or 3XX\n status codes\n\n :param response: A requests response object\n :type response: requests.response\n \"\"\"\n try:\n arg_1.raise_for_status()\n except requests.exceptions.HTTPError:\n arg_0.log.error(\"HTTP error: %s\", arg_1.reason)\n if arg_0.method not in ['GET', 'HEAD']:\n arg_0.log.error(arg_1.text)\n raise AirflowException(str(arg_1.status_code) + \":\" + arg_1.reason)"} +{"_id": "doc_53", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Grabs extra options like timeout and actually runs the request,\n checking for the result\n\n :param session: the session to be used to execute the request\n :type session: requests.Session\n :param prepped_request: the prepared request generated in run()\n :type prepped_request: session.prepare_request\n :param extra_options: additional options to be used when executing the request\n i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX\n or 3XX status codes\n :type extra_options: dict\n \"\"\"\n arg_3 = arg_3 or {}\n\n try:\n arg_4 = arg_1.send(\n arg_2,\n stream=arg_3.get(\"stream\", False),\n verify=arg_3.get(\"verify\", True),\n proxies=arg_3.get(\"proxies\", {}),\n cert=arg_3.get(\"cert\"),\n timeout=arg_3.get(\"timeout\"),\n allow_redirects=arg_3.get(\"allow_redirects\", True))\n\n if arg_3.get('check_response', True):\n arg_0.check_response(arg_4)\n return arg_4\n\n except requests.exceptions.ConnectionError as ex:\n arg_0.log.warn(str(ex) + ' Tenacity will retry to execute the operation')\n raise ex"} +{"_id": "doc_54", "title": "", "text": "def Func():\n \"\"\"\n Contextmanager that will create and teardown a session.\n \"\"\"\n arg_0 = settings.Session()\n try:\n yield arg_0\n arg_0.commit()\n except Exception:\n arg_0.rollback()\n raise\n finally:\n arg_0.close()"} +{"_id": "doc_55", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parses some DatabaseError to provide a better error message\n \"\"\"\n if (hasattr(arg_0, 'message') and\n 'errorName' in arg_0.message and\n 'message' in arg_0.message):\n return ('{name}: {message}'.format(\n name=arg_0.message['errorName'],\n message=arg_0.message['message']))\n else:\n return str(arg_0)"} +{"_id": "doc_56", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get a pandas dataframe from a sql query.\n \"\"\"\n import pandas\n arg_3 = arg_0.get_cursor()\n try:\n arg_3.execute(arg_0._strip_sql(arg_1), arg_2)\n arg_4 = arg_3.fetchall()\n except DatabaseError as e:\n raise PrestoException(arg_0._get_pretty_exception_message(e))\n arg_5 = arg_3.description\n if arg_4:\n arg_6 = pandas.DataFrame(arg_4)\n arg_6.columns = [c[0] for c in arg_5]\n else:\n arg_6 = pandas.DataFrame()\n return arg_6"} +{"_id": "doc_57", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n A generic way to insert a set of tuples into a table.\n\n :param table: Name of the target table\n :type table: str\n :param rows: The rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: The names of the columns to fill in the table\n :type target_fields: iterable of strings\n \"\"\"\n super().Func(arg_1, arg_2, arg_3, 0)"} +{"_id": "doc_58", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a cosmos db client.\n \"\"\"\n if arg_0.cosmos_client is not None:\n return arg_0.cosmos_client\n\n # Initialize the Python Azure Cosmos DB client\n arg_0.cosmos_client = arg_1.CosmosClient(arg_0.endpoint_uri, {'masterKey': arg_0.master_key})\n\n return arg_0.cosmos_client"} +{"_id": "doc_59", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Checks if a collection exists in CosmosDB.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"Collection name cannot be None.\")\n\n arg_3 = list(arg_0.get_conn().QueryContainers(\n get_database_link(arg_0.__get_database_name(arg_2)), {\n \"query\": \"SELECT * FROM r WHERE r.id=@id\",\n \"parameters\": [\n {\"name\": \"@id\", \"value\": arg_1}\n ]\n }))\n if len(arg_3) == 0:\n return False\n\n return True"} +{"_id": "doc_60", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Creates a new collection in the CosmosDB database.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"Collection name cannot be None.\")\n\n # We need to check to see if this container already exists so we don't try\n # to create it twice\n arg_3 = list(arg_0.get_conn().QueryContainers(\n get_database_link(arg_0.__get_database_name(arg_2)), {\n \"query\": \"SELECT * FROM r WHERE r.id=@id\",\n \"parameters\": [\n {\"name\": \"@id\", \"value\": arg_1}\n ]\n }))\n\n # Only create if we did not find it already existing\n if len(arg_3) == 0:\n arg_0.get_conn().CreateContainer(\n get_database_link(arg_0.__get_database_name(arg_2)),\n {\"id\": arg_1})"} +{"_id": "doc_61", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates a new database in CosmosDB.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"Database name cannot be None.\")\n\n # We need to check to see if this database already exists so we don't try\n # to create it twice\n arg_2 = list(arg_0.get_conn().QueryDatabases({\n \"query\": \"SELECT * FROM r WHERE r.id=@id\",\n \"parameters\": [\n {\"name\": \"@id\", \"value\": arg_1}\n ]\n }))\n\n # Only create if we did not find it already existing\n if len(arg_2) == 0:\n arg_0.get_conn().CreateDatabase({\"id\": arg_1})"} +{"_id": "doc_62", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Deletes an existing database in CosmosDB.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"Database name cannot be None.\")\n\n arg_0.get_conn().DeleteDatabase(get_database_link(arg_1))"} +{"_id": "doc_63", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Insert a list of new documents into an existing collection in the CosmosDB database.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"You cannot insert empty documents\")\n\n arg_4 = []\n for arg_5 in arg_1:\n arg_4.append(\n arg_0.get_conn().CreateItem(\n get_collection_link(\n arg_0.__get_database_name(arg_2),\n arg_0.__get_collection_name(arg_3)),\n arg_5))\n\n return arg_4"} +{"_id": "doc_64", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Get a list of documents from an existing collection in the CosmosDB database via SQL query.\n \"\"\"\n if arg_1 is None:\n raise AirflowBadRequest(\"SQL query string cannot be None\")\n\n # Query them in SQL\n arg_5 = {'query': arg_1}\n\n try:\n arg_6 = arg_0.get_conn().QueryItems(\n get_collection_link(\n arg_0.__get_database_name(arg_2),\n arg_0.__get_collection_name(arg_3)),\n arg_5,\n arg_4)\n\n return list(arg_6)\n except HTTPFailure:\n return None"} +{"_id": "doc_65", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the Cloud Function with the given name.\n\n :param name: Name of the function.\n :type name: str\n :return: A Cloud Functions object representing the function.\n :rtype: dict\n \"\"\"\n return arg_0.get_conn().projects().locations().functions().get(\n arg_1=arg_1).execute(num_retries=arg_0.num_retries)"} +{"_id": "doc_66", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Creates a new function in Cloud Function in the location specified in the body.\n\n :param location: The location of the function.\n :type location: str\n :param body: The body required by the Cloud Functions insert API.\n :type body: dict\n :param project_id: Optional, Google Cloud Project project_id where the function belongs.\n If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_4 = arg_0.get_conn().projects().locations().functions().create(\n arg_1=arg_0._full_location(arg_3, arg_1),\n arg_2=arg_2\n ).execute(num_retries=arg_0.num_retries)\n arg_5 = arg_4[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_5=arg_5)"} +{"_id": "doc_67", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Updates Cloud Functions according to the specified update mask.\n\n :param name: The name of the function.\n :type name: str\n :param body: The body required by the cloud function patch API.\n :type body: dict\n :param update_mask: The update mask - array of fields that should be patched.\n :type update_mask: [str]\n :return: None\n \"\"\"\n arg_4 = arg_0.get_conn().projects().locations().functions().patch(\n updateMask=\",\".join(arg_3),\n arg_1=arg_1,\n arg_2=arg_2\n ).execute(num_retries=arg_0.num_retries)\n arg_5 = arg_4[\"name\"]\n arg_0._wait_for_operation_to_complete(arg_5=arg_5)"} +{"_id": "doc_68", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Uploads zip file with sources.\n\n :param location: The location where the function is created.\n :type location: str\n :param zip_path: The path of the valid .zip file to upload.\n :type zip_path: str\n :param project_id: Optional, Google Cloud Project project_id where the function belongs.\n If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: The upload URL that was returned by generateUploadUrl method.\n \"\"\"\n arg_4 = arg_0.get_conn().projects().locations().functions().generateUploadUrl(\n parent=arg_0._full_location(arg_3, arg_1)\n ).execute(num_retries=arg_0.num_retries)\n arg_5 = arg_4.get('uploadUrl')\n with open(arg_2, 'rb') as fp:\n requests.put(\n url=arg_5,\n data=fp,\n # Those two headers needs to be specified according to:\n # https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl\n # nopep8\n headers={\n 'Content-type': 'application/zip',\n 'x-goog-content-length-range': '0,104857600',\n }\n )\n return arg_5"} +{"_id": "doc_69", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Wrapper around the private _Func method that contains some global\n checks for all dependencies.\n\n :param ti: the task instance to get the dependency status for\n :type ti: airflow.models.TaskInstance\n :param session: database session\n :type session: sqlalchemy.orm.session.Session\n :param dep_context: the context for which this dependency should be evaluated for\n :type dep_context: DepContext\n \"\"\"\n # this avoids a circular dependency\n from airflow.ti_deps.dep_context import DepContext\n\n if arg_3 is None:\n arg_3 = DepContext()\n\n if arg_0.IGNOREABLE and arg_3.ignore_all_deps:\n yield arg_0._passing_status(\n reason=\"Context specified all dependencies should be ignored.\")\n return\n\n if arg_0.IS_TASK_DEP and arg_3.ignore_task_deps:\n yield arg_0._passing_status(\n reason=\"Context specified all task dependencies should be ignored.\")\n return\n\n for arg_4 in arg_0._Func(arg_1, arg_2, arg_3):\n yield arg_4"} +{"_id": "doc_70", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Returns whether or not this dependency is met for a given task instance. A\n dependency is considered met if all of the dependency statuses it reports are\n passing.\n\n :param ti: the task instance to see if this dependency is met for\n :type ti: airflow.models.TaskInstance\n :param session: database session\n :type session: sqlalchemy.orm.session.Session\n :param dep_context: The context this dependency is being checked under that stores\n state that can be used by this dependency.\n :type dep_context: BaseDepContext\n \"\"\"\n return all(arg_4.passed for arg_4 in\n arg_0.get_dep_statuses(arg_1, arg_2, arg_3))"} +{"_id": "doc_71", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Returns an iterable of strings that explain why this dependency wasn't met.\n\n :param ti: the task instance to see if this dependency is met for\n :type ti: airflow.models.TaskInstance\n :param session: database session\n :type session: sqlalchemy.orm.session.Session\n :param dep_context: The context this dependency is being checked under that stores\n state that can be used by this dependency.\n :type dep_context: BaseDepContext\n \"\"\"\n for arg_4 in arg_0.get_dep_statuses(arg_1, arg_2, arg_3):\n if not arg_4.passed:\n yield arg_4.reason"} +{"_id": "doc_72", "title": "", "text": "def Func(arg_0, arg_1='boto', arg_2=None):\n \"\"\"\n Parses a config file for s3 credentials. Can currently\n parse boto, s3cmd.conf and AWS SDK config formats\n\n :param config_file_name: path to the config file\n :type config_file_name: str\n :param config_format: config type. One of \"boto\", \"s3cmd\" or \"aws\".\n Defaults to \"boto\"\n :type config_format: str\n :param profile: profile name in AWS type config file\n :type profile: str\n \"\"\"\n arg_3 = configparser.ConfigParser()\n if arg_3.read(arg_0): # pragma: no cover\n arg_4 = arg_3.sections()\n else:\n raise AirflowException(\"Couldn't read {0}\".format(arg_0))\n # Setting option names depending on file format\n if arg_1 is None:\n arg_1 = 'boto'\n arg_5 = arg_1.lower()\n if arg_5 == 'boto': # pragma: no cover\n if arg_2 is not None and 'profile ' + arg_2 in arg_4:\n arg_6 = 'profile ' + arg_2\n else:\n arg_6 = 'Credentials'\n elif arg_5 == 'aws' and arg_2 is not None:\n arg_6 = arg_2\n else:\n arg_6 = 'default'\n # Option names\n if arg_5 in ('boto', 'aws'): # pragma: no cover\n arg_7 = 'aws_access_key_id'\n arg_8 = 'aws_secret_access_key'\n # security_token_option = 'aws_security_token'\n else:\n arg_7 = 'access_key'\n arg_8 = 'secret_key'\n # Actual Parsing\n if arg_6 not in arg_4:\n raise AirflowException(\"This config file format is not recognized\")\n else:\n try:\n arg_9 = arg_3.get(arg_6, arg_7)\n arg_10 = arg_3.get(arg_6, arg_8)\n except Exception:\n logging.warning(\"Option Error in parsing s3 config file\")\n raise\n return arg_9, arg_10"} +{"_id": "doc_73", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Ensure all logging output has been Funced\n \"\"\"\n if len(arg_0._buffer) > 0:\n arg_0.logger.log(arg_0.level, arg_0._buffer)\n arg_0._buffer = str()"} +{"_id": "doc_74", "title": "", "text": "def Func(arg_0):\n \"\"\"\n If the path contains a folder with a .zip suffix, then\n the folder is treated as a zip archive and path to zip is returned.\n \"\"\"\n\n arg_1, arg_2, arg_3 = re.search(\n r'((.*\\.zip){})?(.*)'.format(re.escape(os.sep)), arg_0).groups()\n if arg_2 and zipfile.is_zipfile(arg_2):\n return arg_2\n else:\n return arg_0"} +{"_id": "doc_75", "title": "", "text": "def Func(arg_0, arg_1=True,\n arg_2=None):\n \"\"\"\n Traverse a directory and look for Python files.\n\n :param directory: the directory to traverse\n :type directory: unicode\n :param safe_mode: whether to use a heuristic to determine whether a file\n contains Airflow DAG definitions\n :return: a list of paths to Python files in the specified directory\n :rtype: list[unicode]\n \"\"\"\n if arg_2 is None:\n arg_2 = conf.getboolean('core', 'LOAD_EXAMPLES')\n arg_3 = []\n if arg_0 is None:\n return []\n elif arg_12.path.isfile(arg_0):\n return [arg_0]\n elif arg_12.path.isdir(arg_0):\n arg_4 = {}\n for arg_5, arg_6, arg_7 in arg_12.walk(arg_0, followlinks=True):\n arg_8 = arg_4.get(arg_5, [])\n arg_9 = arg_12.path.join(arg_5, '.airflowignore')\n if arg_12.path.isfile(arg_9):\n with open(arg_9, 'r') as arg_15:\n # If we have new patterns create a copy so we don't change\n # the previous list (which would affect other subdirs)\n arg_8 += [re.compile(arg_10) for arg_10 in arg_15.read().split('\\n') if arg_10]\n\n # If we can ignore any subdirs entirely we should - fewer paths\n # to walk is better. We have to modify the ``dirs`` array in\n # place for this to affect os.walk\n arg_6[:] = [\n arg_11\n for arg_11 in arg_6\n if not any(arg_10.search(arg_12.path.join(arg_5, arg_11)) for arg_10 in arg_8)\n ]\n\n # We want patterns defined in a parent folder's .airflowignore to\n # apply to subdirs too\n for arg_11 in arg_6:\n arg_4[arg_12.path.join(arg_5, arg_11)] = arg_8\n\n for arg_15 in arg_7:\n try:\n arg_16 = arg_12.path.join(arg_5, arg_15)\n if not arg_12.path.isfile(arg_16):\n continue\n arg_17, arg_18 = arg_12.path.splitext(\n arg_12.path.split(arg_16)[-1])\n if arg_18 != '.py' and not zipfile.is_zipfile(arg_16):\n continue\n if any([re.findall(arg_10, arg_16) for arg_10 in arg_8]):\n continue\n\n # Heuristic that guesses whether a Python file contains an\n # Airflow DAG definition.\n arg_19 = True\n if arg_1 and not zipfile.is_zipfile(arg_16):\n with open(arg_16, 'rb') as fp:\n arg_20 = fp.read()\n arg_19 = all(\n [s in arg_20 for s in (b'DAG', b'airflow')])\n\n if not arg_19:\n continue\n\n arg_3.append(arg_16)\n except Exception:\n arg_21 = LoggingMixin().log\n arg_21.exception(\"Error while examining %s\", arg_15)\n if arg_2:\n import airflow.example_dags\n arg_22 = airflow.example_dags.__path__[0]\n arg_3.extend(Func(arg_22, arg_1, False))\n return arg_3"} +{"_id": "doc_76", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Launch DagFileProcessorManager processor and Func DAG parsing loop in manager.\n \"\"\"\n arg_0._process = arg_0._launch_process(arg_0._dag_directory,\n arg_0._file_paths,\n arg_0._max_runs,\n arg_0._processor_factory,\n arg_0._child_signal_conn,\n arg_0._stat_queue,\n arg_0._result_queue,\n arg_0._async_mode)\n arg_0.log.info(\"Launched DagFileProcessorManager with pid: %s\", arg_0._process.pid)"} +{"_id": "doc_77", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Send termination signal to DAG parsing processor manager\n and expect it to Func all DAG file processors.\n \"\"\"\n arg_0.log.info(\"Sending termination message to manager.\")\n arg_0._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)"} +{"_id": "doc_78", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Use multiple processes to parse and generate tasks for the\n DAGs in parallel. By processing them in separate processes,\n we can get parallelism and isolation from potentially harmful\n user code.\n \"\"\"\n\n arg_0.log.info(\"Processing files using up to %s processes at a time \", arg_0._parallelism)\n arg_0.log.info(\"Process each file at most once every %s seconds\", arg_0._file_process_interval)\n arg_0.log.info(\n \"Checking for new files in %s every %s seconds\", arg_0._dag_directory, arg_0.dag_dir_list_interval\n )\n\n if arg_0._async_mode:\n arg_0.log.debug(\"Starting DagFileProcessorManager in async mode\")\n arg_0.Func_in_async()\n else:\n arg_0.log.debug(\"Starting DagFileProcessorManager in sync mode\")\n arg_0.Func_in_sync()"} +{"_id": "doc_79", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse DAG files repeatedly in a standalone loop.\n \"\"\"\n while True:\n arg_1 = time.time()\n\n if arg_0._signal_conn.poll():\n arg_2 = arg_0._signal_conn.recv()\n if arg_2 == DagParsingSignal.TERMINATE_MANAGER:\n arg_0.terminate()\n break\n elif arg_2 == DagParsingSignal.END_MANAGER:\n arg_0.end()\n sys.exit(os.EX_OK)\n\n arg_0._refresh_dag_dir()\n\n arg_3 = arg_0.heartbeat()\n for arg_4 in arg_3:\n arg_0._result_queue.put(arg_4)\n\n arg_0._print_stat()\n\n arg_5 = all(arg_0.get_last_finish_time(x) is not None\n for x in arg_0.file_paths)\n arg_6 = arg_0.max_runs_reached()\n\n arg_7 = DagParsingStat(arg_0._file_paths,\n arg_0.get_all_pids(),\n arg_6,\n arg_5,\n len(arg_3))\n arg_0._stat_queue.put(arg_7)\n\n if arg_6:\n arg_0.log.info(\"Exiting dag parsing loop as all files \"\n \"have been processed %s times\", arg_0._max_runs)\n break\n\n arg_8 = time.time() - arg_1\n if arg_8 < 1:\n arg_9 = 1 - arg_8\n arg_0.log.debug(\"Sleeping for %.2f seconds to prevent excessive logging\", arg_9)\n time.sleep(arg_9)"} +{"_id": "doc_80", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Refresh file paths from dag dir if we haven't done it for too long.\n \"\"\"\n arg_1 = (timezone.utcnow() -\n arg_0.last_dag_dir_refresh_time).total_seconds()\n if arg_1 > arg_0.dag_dir_list_interval:\n # Build up a list of Python files that could contain DAGs\n arg_0.log.info(\"Searching for files in %s\", arg_0._dag_directory)\n arg_0._file_paths = list_py_file_paths(arg_0._dag_directory)\n arg_0.last_dag_dir_refresh_time = timezone.utcnow()\n arg_0.log.info(\"There are %s files in %s\", len(arg_0._file_paths), arg_0._dag_directory)\n arg_0.set_file_paths(arg_0._file_paths)\n\n try:\n arg_0.log.debug(\"Removing old import errors\")\n arg_0.clear_nonexistent_import_errors()\n except Exception:\n arg_0.log.exception(\"Error removing old import errors\")"} +{"_id": "doc_81", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Occasionally print out stats about how fast the files are getting processed\n \"\"\"\n if ((timezone.utcnow() - arg_0.last_stat_print_time).total_seconds() >\n arg_0.print_stats_interval):\n if len(arg_0._file_paths) > 0:\n arg_0._log_file_processing_stats(arg_0._file_paths)\n arg_0.last_stat_print_time = timezone.utcnow()"} +{"_id": "doc_82", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sleeps until all the processors are done.\n \"\"\"\n for arg_1, arg_2 in arg_0._processors.items():\n while not arg_2.done:\n time.sleep(0.1)"} +{"_id": "doc_83", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This should be periodically called by the manager loop. This method will\n kick off new processes to process DAG definition files and read the\n results from the finished processors.\n\n :return: a list of SimpleDags that were produced by processors that\n have finished since the last time this was called\n :rtype: list[airflow.utils.dag_processing.SimpleDag]\n \"\"\"\n arg_1 = {}\n \"\"\":type : dict[unicode, AbstractDagFileProcessor]\"\"\"\n arg_2 = {}\n \"\"\":type : dict[unicode, AbstractDagFileProcessor]\"\"\"\n\n for arg_3, arg_4 in arg_0._processors.items():\n if arg_4.done:\n arg_0.log.debug(\"Processor for %s finished\", arg_3)\n arg_5 = timezone.utcnow()\n arg_1[arg_3] = arg_4\n arg_0._last_runtime[arg_3] = (arg_5 -\n arg_4.start_time).total_seconds()\n arg_0._last_finish_time[arg_3] = arg_5\n arg_0._run_count[arg_3] += 1\n else:\n arg_2[arg_3] = arg_4\n arg_0._processors = arg_2\n\n arg_0.log.debug(\"%s/%s DAG parsing processes running\",\n len(arg_0._processors), arg_0._parallelism)\n\n arg_0.log.debug(\"%s file paths queued for processing\",\n len(arg_0._file_path_queue))\n\n # Collect all the DAGs that were found in the processed files\n arg_9 = []\n for arg_3, arg_4 in arg_1.items():\n if arg_4.result is None:\n arg_0.log.warning(\n \"Processor for %s exited with return code %s.\",\n arg_4.file_path, arg_4.exit_code\n )\n else:\n for arg_10 in arg_4.result:\n arg_9.append(arg_10)\n\n # Generate more file paths to process if we processed all the files\n # already.\n if len(arg_0._file_path_queue) == 0:\n # If the file path is already being processed, or if a file was\n # processed recently, wait until the next batch\n arg_11 = arg_0._processors.keys()\n arg_5 = timezone.utcnow()\n arg_12 = []\n for arg_3 in arg_0._file_paths:\n arg_13 = arg_0.get_last_finish_time(arg_3)\n if (arg_13 is not None and\n (arg_5 - arg_13).total_seconds() <\n arg_0._file_process_interval):\n arg_12.append(arg_3)\n\n arg_14 = [arg_3\n for arg_3, num_runs in arg_0._run_count.items()\n if num_runs == arg_0._max_runs]\n\n arg_15 = list(set(arg_0._file_paths) -\n set(arg_11) -\n set(arg_12) -\n set(arg_14))\n\n for arg_3, arg_4 in arg_0._processors.items():\n arg_0.log.debug(\n \"File path %s is still being processed (started: %s)\",\n arg_4.file_path, arg_4.start_time.isoformat()\n )\n\n arg_0.log.debug(\n \"Queuing the following files for processing:\\n\\t%s\",\n \"\\n\\t\".join(arg_15)\n )\n\n arg_0._file_path_queue.extend(arg_15)\n\n arg_16 = arg_0._find_zombies()\n\n # Start more processors if we have enough slots and files to process\n while (arg_0._parallelism - len(arg_0._processors) > 0 and\n len(arg_0._file_path_queue) > 0):\n arg_3 = arg_0._file_path_queue.pop(0)\n arg_4 = arg_0._processor_factory(arg_3, arg_16)\n\n arg_4.start()\n arg_0.log.debug(\n \"Started a process (PID: %s) to generate tasks for %s\",\n arg_4.pid, arg_3\n )\n arg_0._processors[arg_3] = arg_4\n\n # Update Func count.\n arg_0._run_count[arg_0._heart_beat_key] += 1\n\n return arg_9"} +{"_id": "doc_84", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Opens a ssh connection to the remote host.\n\n :rtype: paramiko.client.SSHClient\n \"\"\"\n\n arg_0.log.debug('Creating SSH client for conn_id: %s', arg_0.ssh_conn_id)\n arg_1 = paramiko.SSHClient()\n if not arg_0.allow_host_key_change:\n arg_0.log.warning('Remote Identification Change is not verified. '\n 'This wont protect against Man-In-The-Middle attacks')\n arg_1.load_system_host_keys()\n if arg_0.no_host_key_check:\n arg_0.log.warning('No Host Key Verification. This wont protect '\n 'against Man-In-The-Middle attacks')\n # Default is RejectPolicy\n arg_1.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n if arg_0.password and arg_0.password.strip():\n arg_1.connect(hostname=arg_0.remote_host,\n username=arg_0.username,\n password=arg_0.password,\n key_filename=arg_0.key_file,\n timeout=arg_0.timeout,\n compress=arg_0.compress,\n port=arg_0.port,\n sock=arg_0.host_proxy)\n else:\n arg_1.connect(hostname=arg_0.remote_host,\n username=arg_0.username,\n key_filename=arg_0.key_file,\n timeout=arg_0.timeout,\n compress=arg_0.compress,\n port=arg_0.port,\n sock=arg_0.host_proxy)\n\n if arg_0.keepalive_interval:\n arg_1.get_transport().set_keepalive(arg_0.keepalive_interval)\n\n arg_0.client = arg_1\n return arg_1"} +{"_id": "doc_85", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Gets the latest state of a long-running operation in Google Storage\n Transfer Service.\n\n :param job_name: (Required) Name of the job to be fetched\n :type job_name: str\n :param project_id: (Optional) the ID of the project that owns the Transfer\n Job. If set to None or missing, the default project_id from the GCP\n connection is used.\n :type project_id: str\n :return: Transfer Job\n :rtype: dict\n \"\"\"\n return (\n arg_0.get_conn()\n .transferJobs()\n .get(jobName=arg_1, projectId=arg_2)\n .execute(num_retries=arg_0.num_retries)\n )"} +{"_id": "doc_86", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Lists long-running operations in Google Storage Transfer\n Service that match the specified filter.\n\n :param filter: (Required) A request filter, as described in\n https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter\n :type filter: dict\n :return: List of Transfer Jobs\n :rtype: list[dict]\n \"\"\"\n arg_2 = arg_0.get_conn()\n arg_1 = arg_0._inject_project_id(arg_1, FILTER, FILTER_PROJECT_ID)\n arg_3 = arg_2.transferJobs().list(arg_1=json.dumps(arg_1))\n arg_4 = []\n\n while arg_3 is not None:\n arg_5 = arg_3.execute(num_retries=arg_0.num_retries)\n arg_4.extend(arg_5[TRANSFER_JOBS])\n\n arg_3 = arg_2.transferJobs().list_next(previous_request=arg_3, previous_response=arg_5)\n\n return arg_4"} +{"_id": "doc_87", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Cancels an transfer operation in Google Storage Transfer Service.\n\n :param operation_name: Name of the transfer operation.\n :type operation_name: str\n :rtype: None\n \"\"\"\n arg_0.get_conn().transferOperations().cancel(name=arg_1).execute(num_retries=arg_0.num_retries)"} +{"_id": "doc_88", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pauses an transfer operation in Google Storage Transfer Service.\n\n :param operation_name: (Required) Name of the transfer operation.\n :type operation_name: str\n :rtype: None\n \"\"\"\n arg_0.get_conn().transferOperations().pause(name=arg_1).execute(num_retries=arg_0.num_retries)"} +{"_id": "doc_89", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(arg_3.SUCCESS,), arg_5=60):\n \"\"\"\n Waits until the job reaches the expected state.\n\n :param job: Transfer job\n See:\n https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob\n :type job: dict\n :param expected_statuses: State that is expected\n See:\n https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status\n :type expected_statuses: set[str]\n :param timeout:\n :type timeout: time in which the operation must end in seconds\n :rtype: None\n \"\"\"\n while arg_5 > 0:\n arg_6 = arg_0.list_transfer_operations(\n filter={FILTER_PROJECT_ID: arg_1[PROJECT_ID], FILTER_JOB_NAMES: [arg_1[NAME]]}\n )\n\n if GCPTransferServiceHook.operations_contain_expected_statuses(arg_6, arg_2):\n return\n time.sleep(TIME_TO_SLEEP_IN_SECONDS)\n arg_5 -= TIME_TO_SLEEP_IN_SECONDS\n raise AirflowException(\"Timeout. The operation could not be completed within the allotted time.\")"} +{"_id": "doc_90", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the number of slots open at the moment\n \"\"\"\n from airflow.models.taskinstance import \\\n TaskInstance as TI # Avoid circular import\n\n arg_2 = arg_1.query(func.count()).filter(TI.pool == arg_0.pool).filter(\n TI.state.in_([State.RUNNING, State.QUEUED])).scalar()\n return arg_0.slots - arg_2"} +{"_id": "doc_91", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Runs command and returns stdout\n \"\"\"\n arg_1 = subprocess.Popen(\n shlex.split(arg_0),\n stdout=subprocess.PIPE,\n arg_3=subprocess.PIPE,\n close_fds=True)\n arg_2, arg_3 = [stream.decode(sys.getdefaultencoding(), 'ignore')\n for stream in arg_1.communicate()]\n\n if arg_1.returncode != 0:\n raise AirflowConfigException(\n \"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}\"\n .format(arg_0, arg_1.returncode, arg_2, arg_3)\n )\n\n return arg_2"} +{"_id": "doc_92", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"\n Remove an option if it exists in config from a file or\n default config. If both of config have the same option, this removes\n the option in both configs unless remove_default=False.\n \"\"\"\n if super().has_option(arg_1, arg_2):\n super().Func(arg_1, arg_2)\n\n if arg_0.airflow_defaults.has_option(arg_1, arg_2) and arg_3:\n arg_0.airflow_defaults.Func(arg_1, arg_2)"} +{"_id": "doc_93", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Allocate IDs for incomplete keys.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds\n\n :param partial_keys: a list of partial keys.\n :type partial_keys: list\n :return: a list of full keys.\n :rtype: list\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_3 = (arg_2\n .projects()\n .allocateIds(projectId=arg_0.project_id, body={'keys': arg_1})\n .execute(num_retries=arg_0.num_retries))\n\n return arg_3['keys']"} +{"_id": "doc_94", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Commit a transaction, optionally creating, deleting or modifying some entities.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/Func\n\n :param body: the body of the Func request.\n :type body: dict\n :return: the response body of the Func request.\n :rtype: dict\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_3 = (arg_2\n .projects()\n .Func(projectId=arg_0.project_id, arg_1=arg_1)\n .execute(num_retries=arg_0.num_retries))\n\n return arg_3"} +{"_id": "doc_95", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Lookup some entities by key.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/Func\n\n :param keys: the keys to Func.\n :type keys: list\n :param read_consistency: the read consistency to use. default, strong or eventual.\n Cannot be used with a transaction.\n :type read_consistency: str\n :param transaction: the transaction to use, if any.\n :type transaction: str\n :return: the response body of the Func request.\n :rtype: dict\n \"\"\"\n arg_4 = arg_0.get_conn()\n\n arg_5 = {'keys': arg_1}\n if arg_2:\n arg_5['readConsistency'] = arg_2\n if arg_3:\n arg_5['transaction'] = arg_3\n arg_6 = (arg_4\n .projects()\n .Func(projectId=arg_0.project_id, arg_5=arg_5)\n .execute(num_retries=arg_0.num_retries))\n\n return arg_6"} +{"_id": "doc_96", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Roll back a transaction.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/Func\n\n :param transaction: the transaction to roll back.\n :type transaction: str\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_2.projects().Func(\n projectId=arg_0.project_id, body={'transaction': arg_1}\n ).execute(num_retries=arg_0.num_retries)"} +{"_id": "doc_97", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets the latest state of a long-running operation.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get\n\n :param name: the name of the operation resource.\n :type name: str\n :return: a resource operation instance.\n :rtype: dict\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_3 = (arg_2\n .projects()\n .operations()\n .get(arg_1=arg_1)\n .execute(num_retries=arg_0.num_retries))\n\n return arg_3"} +{"_id": "doc_98", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Deletes the long-running operation.\n\n .. seealso::\n https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete\n\n :param name: the name of the operation resource.\n :type name: str\n :return: none if successful.\n :rtype: dict\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_3 = (arg_2\n .projects()\n .operations()\n .delete(arg_1=arg_1)\n .execute(num_retries=arg_0.num_retries))\n\n return arg_3"} +{"_id": "doc_99", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Poll backup operation state until it's completed.\n\n :param name: the name of the operation resource\n :type name: str\n :param polling_interval_in_seconds: The number of seconds to wait before calling another request.\n :type polling_interval_in_seconds: int\n :return: a resource operation instance.\n :rtype: dict\n \"\"\"\n while True:\n arg_3 = arg_0.get_operation(arg_1)\n\n arg_4 = arg_3['metadata']['common']['state']\n if arg_4 == 'PROCESSING':\n arg_0.log.info('Operation is processing. Re-polling state in {} seconds'\n .format(arg_2))\n time.sleep(arg_2)\n else:\n return arg_3"} +{"_id": "doc_100", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Publish a message to a topic or an endpoint.\n\n :param target_arn: either a TopicArn or an EndpointArn\n :type target_arn: str\n :param message: the default message you want to send\n :param message: str\n \"\"\"\n\n arg_3 = arg_0.get_conn()\n\n arg_4 = {\n 'default': arg_2\n }\n\n return arg_3.publish(\n TargetArn=arg_1,\n Message=json.dumps(arg_4),\n MessageStructure='json'\n )"} +{"_id": "doc_101", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves connection to Cloud Natural Language service.\n\n :return: Cloud Natural Language service object\n :rtype: google.cloud.language_v1.LanguageServiceClient\n \"\"\"\n if not arg_0._conn:\n arg_0._conn = LanguageServiceClient(credentials=arg_0._get_credentials())\n return arg_0._conn"} +{"_id": "doc_102", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"\n Finds named entities in the text along with entity types,\n salience, mentions for each entity, and other properties.\n\n :param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n :type document: dict or class google.cloud.language_v1.types.Document\n :param encoding_type: The encoding type used by the API to calculate offsets.\n :type encoding_type: google.cloud.language_v1.types.EncodingType\n :param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n :type retry: google.api_core.retry.Retry\n :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n :type timeout: float\n :param metadata: Additional metadata that is provided to the method.\n :type metadata: sequence[tuple[str, str]]]\n :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse\n \"\"\"\n arg_6 = arg_0.get_conn()\n\n return arg_6.Func(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5\n )"} +{"_id": "doc_103", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Classifies a document into categories.\n\n :param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n :type document: dict or class google.cloud.language_v1.types.Document\n :param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n :type retry: google.api_core.retry.Retry\n :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n :type timeout: float\n :param metadata: Additional metadata that is provided to the method.\n :type metadata: sequence[tuple[str, str]]]\n :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse\n \"\"\"\n arg_5 = arg_0.get_conn()\n\n return arg_5.Func(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_104", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets template fields for specific operator class.\n\n :param fullname: Full path to operator class.\n For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator``\n :return: List of template field\n :rtype: list[str]\n \"\"\"\n arg_2, arg_3 = arg_1.rsplit(\".\", 1)\n\n try:\n with mock(arg_0.config.autodoc_mock_imports):\n arg_4 = import_module(arg_2)\n except ImportError:\n raise RoleException(\"Error loading %s module.\" % (arg_2, ))\n\n arg_5 = getattr(arg_4, arg_3)\n if not arg_5:\n raise RoleException(\"Error finding %s class in %s module.\" % (arg_3, arg_2))\n\n arg_6 = getattr(arg_5, \"template_fields\")\n\n if not arg_6:\n raise RoleException(\n \"Could not find the template fields for %s class in %s module.\" % (arg_3, arg_2)\n )\n\n return list(arg_6)"} +{"_id": "doc_105", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6={}, arg_7=[]):\n \"\"\"\n A role that allows you to include a list of template fields in the middle of the text. This is especially\n useful when writing guides describing how to use the operator.\n The result is a list of fields where each field is shorted in the literal block.\n\n Sample usage::\n\n :template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`\n\n For further information look at:\n\n * [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted\n Text Roles)\n \"\"\"\n arg_3 = utils.unescape(arg_3)\n\n try:\n arg_8 = get_template_field(arg_0.env, arg_3)\n except RoleException as e:\n arg_9 = arg_5.reporter.error(\"invalid class name %s \\n%s\" % (arg_3, e, ), line=arg_4)\n arg_10 = arg_5.problematic(arg_2, arg_2, arg_9)\n return [arg_10], [arg_9]\n\n arg_11 = nodes.inline(arg_2=arg_2)\n for arg_12, arg_13 in enumerate(arg_8):\n if arg_12 != 0:\n arg_11 += nodes.Text(\", \")\n arg_11 += nodes.literal(arg_13, \"\", nodes.Text(arg_13))\n\n return [arg_11], []"} +{"_id": "doc_106", "title": "", "text": "def Func():\n \"\"\" Properly close pooled database connections \"\"\"\n log.debug(\"Disposing DB connection pool (PID %s)\", os.getpid())\n global arg_1\n global arg_0\n\n if arg_0:\n arg_0.remove()\n arg_0 = None\n if arg_1:\n arg_1.dispose()\n arg_1 = None"} +{"_id": "doc_107", "title": "", "text": "def Func():\n \"\"\"\n Ensures that certain subfolders of AIRFLOW_HOME are on the classpath\n \"\"\"\n\n if DAGS_FOLDER not in sys.path:\n sys.path.append(DAGS_FOLDER)\n\n # Add ./config/ for loading custom log parsers etc, or\n # airflow_local_settings etc.\n arg_0 = os.path.join(AIRFLOW_HOME, 'config')\n if arg_0 not in sys.path:\n sys.path.append(arg_0)\n\n if PLUGINS_FOLDER not in sys.path:\n sys.path.append(PLUGINS_FOLDER)"} +{"_id": "doc_108", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets the returned Celery result from the Airflow task\n ID provided to the sensor, and returns True if the\n celery result has been finished execution.\n\n :param context: Airflow's execution context\n :type context: dict\n :return: True if task has been executed, otherwise False\n :rtype: bool\n \"\"\"\n arg_2 = arg_1['ti']\n arg_3 = arg_2.xcom_pull(task_ids=arg_0.target_task_id)\n return arg_3.ready()"} +{"_id": "doc_109", "title": "", "text": "def Func():\n \"\"\"Return true if the ticket cache contains \"conf\" information as is found\n in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the\n Sun Java Krb5LoginModule in Java6, so we need to take an action to work\n around it.\n \"\"\"\n arg_0 = configuration.conf.get('kerberos', 'ccache')\n\n with open(arg_0, 'rb') as f:\n # Note: this file is binary, so we check against a bytearray.\n return b'X-CACHECONF:' in f.read()"} +{"_id": "doc_110", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Transforms a SQLAlchemy model instance into a dictionary\n \"\"\"\n if not arg_0:\n return None\n arg_1 = {}\n for arg_2 in arg_0.__table__.columns:\n arg_3 = getattr(arg_0, arg_2.name)\n if type(arg_3) == datetime:\n arg_3 = arg_3.isoformat()\n arg_1[arg_2.name] = arg_3\n return arg_1"} +{"_id": "doc_111", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0):\n \"\"\"\n Reduce the given list of items by splitting it into chunks\n of the given size and passing each chunk through the reducer\n \"\"\"\n if len(arg_1) == 0:\n return arg_2\n if arg_3 == 0:\n arg_3 = len(arg_1)\n return reduce(arg_0, chunks(arg_1, arg_3), arg_2)"} +{"_id": "doc_112", "title": "", "text": "def Func(*arg_0):\n \"\"\"\n Given a number of tasks, builds a dependency Func.\n\n Func(task_1, task_2, task_3, task_4)\n\n is equivalent to\n\n task_1.set_downstream(task_2)\n task_2.set_downstream(task_3)\n task_3.set_downstream(task_4)\n \"\"\"\n for arg_1, arg_2 in zip(arg_0[:-1], arg_0[1:]):\n arg_1.set_downstream(arg_2)"} +{"_id": "doc_113", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a pretty ascii table from tuples\n\n If namedtuple are used, the table will have headers\n \"\"\"\n if not arg_0:\n return\n if hasattr(arg_0[0], '_fields'): # if namedtuple\n arg_1 = arg_0[0]._fields\n else:\n arg_1 = [\"col{}\".format(arg_4) for arg_4 in range(len(arg_0[0]))]\n arg_2 = [len(arg_11) for arg_11 in arg_1]\n\n for arg_3 in arg_0:\n for arg_4 in range(len(arg_0[0])):\n arg_5 = len(\"{}\".format(arg_3[arg_4]))\n if arg_5 > arg_2[arg_4]:\n arg_2[arg_4] = arg_5\n arg_6 = []\n arg_7 = []\n for arg_4 in range(len(arg_0[0])):\n if isinstance(arg_0[0][arg_4], int):\n arg_6.append(\"%%%dd\" % arg_2[arg_4])\n else:\n arg_6.append(\"%%-%ds\" % arg_2[arg_4])\n arg_7.append(\"%%-%ds\" % arg_2[arg_4])\n arg_8 = \" | \".join(arg_6)\n arg_9 = \" | \".join(arg_7)\n arg_10 = \"-+-\".join(['-' * n for n in arg_2])\n arg_11 = \"\"\n arg_11 += arg_10 + '\\n'\n arg_11 += (arg_9 % tuple(arg_1)) + '\\n'\n arg_11 += arg_10 + '\\n'\n\n def f(arg_12):\n return \"{}\".format(arg_12) if isinstance(arg_12, basestring) else arg_12\n\n for arg_13 in arg_0:\n arg_11 += arg_8 % tuple(f(arg_12) for arg_12 in arg_13) + '\\n'\n arg_11 += arg_10 + '\\n'\n return arg_11"} +{"_id": "doc_114", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a Google Cloud Dataproc service object.\"\"\"\n arg_1 = arg_0._authorize()\n return build(\n 'dataproc', arg_0.api_version, http=arg_1,\n cache_discovery=False)"} +{"_id": "doc_115", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"AFuncs for Google Cloud Dataproc Operation to complete.\"\"\"\n arg_2 = _DataProcOperation(arg_0.get_conn(), arg_1,\n arg_0.num_retries)\n arg_2.Func_for_done()"} +{"_id": "doc_116", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Handles the Airflow + Databricks lifecycle logic for a Databricks operator\n\n :param operator: Databricks operator being handled\n :param context: Airflow context\n \"\"\"\n if arg_0.do_xcom_push:\n arg_3['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=arg_0.run_id)\n arg_2.info('Run submitted with run_id: %s', arg_0.run_id)\n arg_4 = arg_1.get_run_page_url(arg_0.run_id)\n if arg_0.do_xcom_push:\n arg_3['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=arg_4)\n\n arg_2.info('View run status, Spark UI, and logs at %s', arg_4)\n while True:\n arg_5 = arg_1.get_run_state(arg_0.run_id)\n if arg_5.is_terminal:\n if arg_5.is_successful:\n arg_2.info('%s completed successfully.', arg_0.task_id)\n arg_2.info('View run status, Spark UI, and logs at %s', arg_4)\n return\n else:\n arg_6 = '{t} failed with terminal state: {s}'.format(\n t=arg_0.task_id,\n s=arg_5)\n raise AirflowException(arg_6)\n else:\n arg_2.info('%s in run state: %s', arg_0.task_id, arg_5)\n arg_2.info('View run status, Spark UI, and logs at %s', arg_4)\n arg_2.info('Sleeping for %s seconds.', arg_0.polling_period_seconds)\n time.sleep(arg_0.polling_period_seconds)"} +{"_id": "doc_117", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Run an pig script using the pig cli\n\n >>> ph = PigCliHook()\n >>> result = ph.Func(\"ls /;\")\n >>> (\"hdfs://\" in result)\n True\n \"\"\"\n\n with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:\n with NamedTemporaryFile(dir=tmp_dir) as f:\n f.write(arg_1.encode('utf-8'))\n f.flush()\n arg_3 = f.name\n arg_4 = 'pig'\n arg_5 = []\n\n arg_6 = [arg_4, '-f', arg_3] + arg_5\n\n if arg_0.pig_properties:\n arg_7 = arg_0.pig_properties.split()\n arg_6.extend(arg_7)\n if arg_2:\n arg_0.log.info(\"%s\", \" \".join(arg_6))\n arg_8 = subprocess.Popen(\n arg_6,\n arg_9=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=tmp_dir,\n close_fds=True)\n arg_0.sp = arg_8\n arg_9 = ''\n for arg_10 in iter(arg_8.stdout.readline, b''):\n arg_9 += arg_10.decode('utf-8')\n if arg_2:\n arg_0.log.info(arg_10.strip())\n arg_8.wait()\n\n if arg_8.returncode:\n raise AirflowException(arg_9)\n\n return arg_9"} +{"_id": "doc_118", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Fetch and return the state of the given celery task. The scope of this function is\n global so that it can be called by subprocesses in the pool.\n\n :param celery_task: a tuple of the Celery task key and the async Celery object used\n to fetch the task's state\n :type celery_task: tuple(str, celery.result.AsyncResult)\n :return: a tuple of the Celery task key and the Celery state of the task\n :rtype: tuple[str, str]\n \"\"\"\n\n try:\n with timeout(seconds=2):\n # Accessing state property of celery task will make actual network request\n # to get the current state of the task.\n arg_1 = (arg_0[0], arg_0[1].state)\n except Exception as e:\n arg_2 = \"Celery Task ID: {}\\n{}\".format(arg_0[0],\n traceback.format_exc())\n arg_1 = ExceptionWithTraceback(e, arg_2)\n return arg_1"} +{"_id": "doc_119", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n How many Celery tasks should each worker process send.\n\n :return: Number of tasks that should be sent per process\n :rtype: int\n \"\"\"\n return max(1,\n int(math.ceil(1.0 * arg_1 / arg_0._sync_parallelism)))"} +{"_id": "doc_120", "title": "", "text": "def Func(arg_0):\n \"\"\"\n How many Celery tasks should be sent to each worker process.\n\n :return: Number of tasks that should be used per process\n :rtype: int\n \"\"\"\n return max(1,\n int(math.ceil(1.0 * len(arg_0.tasks) / arg_0._sync_parallelism)))"} +{"_id": "doc_121", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Like a Python builtin dict object, Func returns the current value\n for a key, and if it isn't there, stores the default value and returns it.\n\n :param key: Dict key for this Variable\n :type key: str\n :param default: Default value to set and return if the variable\n isn't already in the DB\n :type default: Mixed\n :param deserialize_json: Store this as a JSON encoded value in the DB\n and un-encode it when retrieving a value\n :return: Mixed\n \"\"\"\n arg_4 = Variable.get(arg_1, default_var=None,\n arg_3=arg_3)\n if arg_4 is None:\n if arg_2 is not None:\n Variable.set(arg_1, arg_2, serialize_json=arg_3)\n return arg_2\n else:\n raise ValueError('Default Value must be set')\n else:\n return arg_4"} +{"_id": "doc_122", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Launches a MLEngine job and wait for it to reach a terminal state.\n\n :param project_id: The Google Cloud project id within which MLEngine\n job will be launched.\n :type project_id: str\n\n :param job: MLEngine Job object that should be provided to the MLEngine\n API, such as: ::\n\n {\n 'jobId': 'my_job_id',\n 'trainingInput': {\n 'scaleTier': 'STANDARD_1',\n ...\n }\n }\n\n :type job: dict\n\n :param use_existing_job_fn: In case that a MLEngine job with the same\n job_id already exist, this method (if provided) will decide whether\n we should use this existing job, continue waiting for it to finish\n and returning the job object. It should accepts a MLEngine job\n object, and returns a boolean value indicating whether it is OK to\n reuse the existing job. If 'use_existing_job_fn' is not provided,\n we by default reuse the existing MLEngine job.\n :type use_existing_job_fn: function\n\n :return: The MLEngine job object if the job successfully reach a\n terminal state (which might be FAILED or CANCELLED state).\n :rtype: dict\n \"\"\"\n arg_4 = arg_0._mlengine.projects().jobs().create(\n parent='projects/{}'.format(arg_1),\n body=arg_2)\n arg_5 = arg_2['jobId']\n\n try:\n arg_4.execute()\n except HttpError as e:\n # 409 means there is an existing job with the same job ID.\n if e.resp.status == 409:\n if arg_3 is not None:\n arg_6 = arg_0._get_job(arg_1, arg_5)\n if not arg_3(arg_6):\n arg_0.log.error(\n 'Job with job_id %s already exist, but it does '\n 'not match our expectation: %s',\n arg_5, arg_6\n )\n raise\n arg_0.log.info(\n 'Job with job_id %s already exist. Will waiting for it to finish',\n arg_5\n )\n else:\n arg_0.log.error('Failed to create MLEngine job: {}'.format(e))\n raise\n\n return arg_0._wait_for_job_done(arg_1, arg_5)"} +{"_id": "doc_123", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Gets a MLEngine job based on the job name.\n\n :return: MLEngine job object if succeed.\n :rtype: dict\n\n Raises:\n googleapiclient.errors.HttpError: if HTTP error is returned from server\n \"\"\"\n arg_3 = 'projects/{}/jobs/{}'.format(arg_1, arg_2)\n arg_4 = arg_0._mlengine.projects().jobs().get(name=arg_3)\n while True:\n try:\n return arg_4.execute()\n except HttpError as e:\n if e.resp.status == 429:\n # polling after 30 seconds when quota failure occurs\n time.sleep(30)\n else:\n arg_0.log.error('Failed to get MLEngine job: {}'.format(e))\n raise"} +{"_id": "doc_124", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create a Model. Blocks until finished.\n \"\"\"\n if not arg_2['name']:\n raise ValueError(\"Model name must be provided and \"\n \"could not be an empty string\")\n arg_3 = 'projects/{}'.format(arg_1)\n\n arg_4 = arg_0._mlengine.projects().models().create(\n parent=arg_3, body=arg_2)\n return arg_4.execute()"} +{"_id": "doc_125", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write batch items to dynamodb table with provisioned throughout capacity.\n \"\"\"\n\n arg_2 = arg_0.get_conn()\n\n try:\n arg_3 = arg_2.Table(arg_0.table_name)\n\n with arg_3.batch_writer(overwrite_by_pkeys=arg_0.table_keys) as batch:\n for arg_4 in arg_1:\n batch.put_item(Item=arg_4)\n return True\n except Exception as general_error:\n raise AirflowException(\n 'Failed to insert items in dynamodb, error: {error}'.format(\n error=str(general_error)\n )\n )"} +{"_id": "doc_126", "title": "", "text": "def Func():\n \"\"\"Integrate plugins to the context.\"\"\"\n from airflow.plugins_manager import executors_modules\n for arg_0 in executors_modules:\n arg_1.modules[arg_0.__name__] = arg_0\n arg_4()[arg_0._name] = arg_0"} +{"_id": "doc_127", "title": "", "text": "def Func():\n \"\"\"Creates a new instance of the configured executor if none exists and returns it\"\"\"\n global arg_1\n\n if arg_1 is not None:\n return arg_1\n\n arg_0 = configuration.conf.get('core', 'EXECUTOR')\n\n arg_1 = _get_executor(arg_0)\n\n arg_2 = LoggingMixin().log\n arg_2.info(\"Using executor %s\", arg_0)\n\n return arg_1"} +{"_id": "doc_128", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Handles error callbacks when using Segment with segment_debug_mode set to True\n \"\"\"\n arg_0.log.error('Encountered Segment error: {segment_error} with '\n 'items: {with_items}'.format(segment_error=arg_1,\n with_items=arg_2))\n raise AirflowException('Segment error: {}'.format(arg_1))"} +{"_id": "doc_129", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a mssql connection object\n \"\"\"\n arg_1 = arg_0.Funcection(arg_0.mssql_conn_id)\n arg_1 = pymssql.connect(\n server=arg_1.host,\n user=arg_1.login,\n password=arg_1.password,\n database=arg_0.schema or arg_1.schema,\n port=arg_1.port)\n return arg_1"} +{"_id": "doc_130", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Delete all DB records related to the specified Dag.\n \"\"\"\n try:\n arg_1 = delete.Func(arg_0)\n except AirflowException as err:\n _log.error(err)\n arg_2 = jsonify(error=\"{}\".format(err))\n arg_2.status_code = err.status_code\n return arg_2\n return jsonify(message=\"Removed {} record(s)\".format(arg_1), arg_1=arg_1)"} +{"_id": "doc_131", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a JSON with a task's public instance variables. \"\"\"\n try:\n arg_2 = get_task(arg_0, arg_1)\n except AirflowException as err:\n _log.info(err)\n arg_3 = jsonify(error=\"{}\".format(err))\n arg_3.status_code = err.status_code\n return arg_3\n\n # JSONify and return.\n arg_5 = {k: str(v)\n for k, v in vars(arg_2).items()\n if not k.startswith('_')}\n return jsonify(arg_5)"} +{"_id": "doc_132", "title": "", "text": "def Func():\n \"\"\"Get all pools.\"\"\"\n try:\n arg_0 = pool_api.Func()\n except AirflowException as err:\n _log.error(err)\n arg_1 = jsonify(error=\"{}\".format(err))\n arg_1.status_code = err.status_code\n return arg_1\n else:\n return jsonify([arg_3.to_json() for arg_3 in arg_0])"} +{"_id": "doc_133", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete pool.\"\"\"\n try:\n arg_1 = pool_api.Func(arg_0=arg_0)\n except AirflowException as err:\n _log.error(err)\n arg_2 = jsonify(error=\"{}\".format(err))\n arg_2.status_code = err.status_code\n return arg_2\n else:\n return jsonify(arg_1.to_json())"} +{"_id": "doc_134", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Create a new container group\n\n :param resource_group: the name of the resource group\n :type resource_group: str\n :param name: the name of the container group\n :type name: str\n :param container_group: the properties of the container group\n :type container_group: azure.mgmt.containerinstance.models.ContainerGroup\n \"\"\"\n arg_0.connection.container_groups.Func(arg_1,\n arg_2,\n arg_3)"} +{"_id": "doc_135", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the state and exitcode of a container group\n\n :param resource_group: the name of the resource group\n :type resource_group: str\n :param name: the name of the container group\n :type name: str\n :return: A tuple with the state, exitcode, and details.\n If the exitcode is unknown 0 is returned.\n :rtype: tuple(state,exitcode,details)\n \"\"\"\n arg_3 = arg_0._get_instance_view(arg_1, arg_2).current_state\n return (arg_3.state,\n arg_3.exit_code,\n arg_3.detail_status)"} +{"_id": "doc_136", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Builds an ingest query for an HDFS TSV load.\n\n :param static_path: The path on hdfs where the data is\n :type static_path: str\n :param columns: List of all the columns that are available\n :type columns: list\n \"\"\"\n\n # backward compatibility for num_shards,\n # but target_partition_size is the default setting\n # and overwrites the num_shards\n arg_3 = arg_0.num_shards\n arg_4 = arg_0.target_partition_size\n if arg_0.target_partition_size == -1:\n if arg_0.num_shards == -1:\n arg_4 = DEFAULT_TARGET_PARTITION_SIZE\n else:\n arg_3 = -1\n\n arg_5 = [m['fieldName'] for m in arg_0.metric_spec if m['type'] != 'count']\n\n # Take all the columns, which are not the time dimension\n # or a metric, as the dimension columns\n arg_6 = [c for c in arg_2 if c not in arg_5 and c != arg_0.ts_dim]\n\n arg_7 = {\n \"type\": \"index_hadoop\",\n \"spec\": {\n \"dataSchema\": {\n \"metricsSpec\": arg_0.metric_spec,\n \"granularitySpec\": {\n \"queryGranularity\": arg_0.query_granularity,\n \"intervals\": arg_0.intervals,\n \"type\": \"uniform\",\n \"segmentGranularity\": arg_0.segment_granularity,\n },\n \"parser\": {\n \"type\": \"string\",\n \"parseSpec\": {\n \"columns\": arg_2,\n \"dimensionsSpec\": {\n \"dimensionExclusions\": [],\n \"dimensions\": arg_6, # list of names\n \"spatialDimensions\": []\n },\n \"timestampSpec\": {\n \"column\": arg_0.ts_dim,\n \"format\": \"auto\"\n },\n \"format\": \"tsv\"\n }\n },\n \"dataSource\": arg_0.druid_datasource\n },\n \"tuningConfig\": {\n \"type\": \"hadoop\",\n \"jobProperties\": {\n \"mapreduce.job.user.classpath.first\": \"false\",\n \"mapreduce.map.output.compress\": \"false\",\n \"mapreduce.output.fileoutputformat.compress\": \"false\",\n },\n \"partitionsSpec\": {\n \"type\": \"hashed\",\n \"targetPartitionSize\": arg_4,\n \"numShards\": arg_3,\n },\n },\n \"ioConfig\": {\n \"inputSpec\": {\n \"paths\": arg_1,\n \"type\": \"static\"\n },\n \"type\": \"hadoop\"\n }\n }\n }\n\n if arg_0.job_properties:\n arg_7['spec']['tuningConfig']['jobProperties'] \\\n .update(arg_0.job_properties)\n\n if arg_0.hadoop_dependency_coordinates:\n arg_7['hadoopDependencyCoordinates'] \\\n = arg_0.hadoop_dependency_coordinates\n\n return arg_7"} +{"_id": "doc_137", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check for message on subscribed channels and write to xcom the message with key ``message``\n\n An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}``\n\n :param context: the context object\n :type context: dict\n :return: ``True`` if message (with type 'message') is available or ``False`` if not\n \"\"\"\n arg_0.log.info('RedisPubSubSensor checking for message on channels: %s', arg_0.channels)\n\n arg_2 = arg_0.pubsub.get_message()\n arg_0.log.info('Message %s from channel %s', arg_2, arg_0.channels)\n\n # Process only message types\n if arg_2 and arg_2['type'] == 'message':\n\n arg_1['ti'].xcom_push(key='message', value=arg_2)\n arg_0.pubsub.unsubscribe(arg_0.channels)\n\n return True\n\n return False"} +{"_id": "doc_138", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None,\n arg_3=None, arg_4=None, arg_5=False,\n arg_6=None):\n \"\"\"\n Returns a set of dag runs for the given search criteria.\n\n :param dag_id: the dag_id to Func dag runs for\n :type dag_id: int, list\n :param run_id: defines the the run id for this dag run\n :type run_id: str\n :param execution_date: the execution date\n :type execution_date: datetime.datetime\n :param state: the state of the dag run\n :type state: airflow.utils.state.State\n :param external_trigger: whether this dag run is externally triggered\n :type external_trigger: bool\n :param no_backfills: return no backfills (True), return all (False).\n Defaults to False\n :type no_backfills: bool\n :param session: database session\n :type session: sqlalchemy.orm.session.Session\n \"\"\"\n arg_7 = DagRun\n\n arg_8 = arg_6.query(arg_7)\n if arg_0:\n arg_8 = arg_8.filter(arg_7.dag_id == arg_0)\n if arg_1:\n arg_8 = arg_8.filter(arg_7.run_id == arg_1)\n if arg_2:\n if isinstance(arg_2, list):\n arg_8 = arg_8.filter(arg_7.execution_date.in_(arg_2))\n else:\n arg_8 = arg_8.filter(arg_7.execution_date == arg_2)\n if arg_3:\n arg_8 = arg_8.filter(arg_7.state == arg_3)\n if arg_4 is not None:\n arg_8 = arg_8.filter(arg_7.external_trigger == arg_4)\n if arg_5:\n # in order to prevent a circular dependency\n from airflow.jobs import BackfillJob\n arg_8 = arg_8.filter(arg_7.run_id.notlike(BackfillJob.ID_PREFIX + '%'))\n\n arg_9 = arg_8.order_by(arg_7.execution_date).all()\n\n return arg_9"} +{"_id": "doc_139", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Returns the task instances for this dag run\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n arg_3 = arg_2.query(TaskInstance).filter(\n TaskInstance.dag_id == arg_0.dag_id,\n TaskInstance.execution_date == arg_0.execution_date,\n )\n if arg_1:\n if isinstance(arg_1, six.string_types):\n arg_3 = arg_3.filter(TaskInstance.state == arg_1)\n else:\n # this is required to deal with NULL values\n if None in arg_1:\n arg_3 = arg_3.filter(\n or_(TaskInstance.state.in_(arg_1),\n TaskInstance.state.is_(None))\n )\n else:\n arg_3 = arg_3.filter(TaskInstance.state.in_(arg_1))\n\n if arg_0.dag and arg_0.dag.partial:\n arg_3 = arg_3.filter(TaskInstance.task_id.in_(arg_0.dag.task_ids))\n\n return arg_3.all()"} +{"_id": "doc_140", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Returns the task instance specified by task_id for this dag run\n\n :param task_id: the task id\n \"\"\"\n\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n arg_3 = TaskInstance\n arg_4 = arg_2.query(arg_3).filter(\n arg_3.dag_id == arg_0.dag_id,\n arg_3.execution_date == arg_0.execution_date,\n arg_3.task_id == arg_1\n ).first()\n\n return arg_4"} +{"_id": "doc_141", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The previous DagRun, if there is one\"\"\"\n\n return arg_1.query(DagRun).filter(\n DagRun.dag_id == arg_0.dag_id,\n DagRun.execution_date < arg_0.execution_date\n ).order_by(\n DagRun.execution_date.desc()\n ).first()"} +{"_id": "doc_142", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The previous, SCHEDULED DagRun, if there is one\"\"\"\n arg_2 = arg_0.get_dag()\n\n return arg_1.query(DagRun).filter(\n DagRun.dag_id == arg_0.dag_id,\n DagRun.execution_date == arg_2.previous_schedule(arg_0.execution_date)\n ).first()"} +{"_id": "doc_143", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Determines the overall state of the DagRun based on the state\n of its TaskInstances.\n\n :return: State\n \"\"\"\n\n arg_2 = arg_0.get_dag()\n\n arg_3 = arg_0.get_task_instances(arg_1=arg_1)\n arg_0.log.debug(\"Updating state for %s considering %s task(s)\", arg_0, len(arg_3))\n\n for arg_4 in list(arg_3):\n # skip in db?\n if arg_4.state == State.REMOVED:\n arg_3.remove(arg_4)\n else:\n arg_4.task = arg_2.get_task(arg_4.task_id)\n\n # pre-calculate\n # db is faster\n arg_6 = timezone.utcnow()\n arg_7 = arg_0.get_task_instances(\n state=State.unfinished(),\n arg_1=arg_1\n )\n arg_8 = all(not t.task.depends_on_past for t in arg_7)\n arg_9 = all(t.task.task_concurrency is None\n for t in arg_7)\n # small speed up\n if arg_7 and arg_8 and arg_9:\n # todo: this can actually get pretty slow: one task costs between 0.01-015s\n arg_10 = True\n for arg_11 in arg_7:\n # We need to flag upstream and check for changes because upstream\n # failures/re-schedules can result in deadlock false positives\n arg_12 = arg_11.state\n arg_13 = arg_11.are_dependencies_met(\n dep_context=DepContext(\n flag_upstream_failed=True,\n ignore_in_retry_period=True,\n ignore_in_reschedule_period=True),\n arg_1=arg_1)\n if arg_13 or arg_12 != arg_11.current_state(arg_1=arg_1):\n arg_10 = False\n break\n\n arg_14 = (timezone.utcnow() - arg_6).total_seconds() * 1000\n Stats.timing(\"dagrun.dependency-check.{}\".format(arg_0.dag_id), arg_14)\n\n arg_15 = [t.task_id for t in arg_2.roots]\n arg_16 = [t for t in arg_3 if t.task_id in arg_15]\n\n # if all roots finished and at least one failed, the run failed\n if (not arg_7 and\n any(arg_17.state in (State.FAILED, State.UPSTREAM_FAILED) for arg_17 in arg_16)):\n arg_0.log.info('Marking run %s failed', arg_0)\n arg_0.set_state(State.FAILED)\n arg_2.handle_callback(arg_0, success=False, reason='task_failure',\n arg_1=arg_1)\n\n # if all roots succeeded and no unfinished tasks, the run succeeded\n elif not arg_7 and all(arg_17.state in (State.SUCCESS, State.SKIPPED)\n for arg_17 in arg_16):\n arg_0.log.info('Marking run %s successful', arg_0)\n arg_0.set_state(State.SUCCESS)\n arg_2.handle_callback(arg_0, success=True, reason='success', arg_1=arg_1)\n\n # if *all tasks* are deadlocked, the run failed\n elif (arg_7 and arg_8 and\n arg_9 and arg_10):\n arg_0.log.info('Deadlock; marking run %s failed', arg_0)\n arg_0.set_state(State.FAILED)\n arg_2.handle_callback(arg_0, success=False, reason='all_tasks_deadlocked',\n arg_1=arg_1)\n\n # finally, if the roots aren't done, the dag is still running\n else:\n arg_0.set_state(State.RUNNING)\n\n arg_0._emit_duration_stats_for_finished_state()\n\n # todo: determine we want to use with_for_update to make sure to lock the run\n arg_1.merge(arg_0)\n arg_1.commit()\n\n return arg_0.state"} +{"_id": "doc_144", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Verifies the DagRun by checking for removed tasks or tasks that are not in the\n database yet. It will set state to removed or add the task if required.\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n arg_2 = arg_0.get_dag()\n arg_3 = arg_0.get_task_instances(arg_1=arg_1)\n\n # check for removed or restored tasks\n arg_4 = []\n for arg_5 in arg_3:\n arg_4.append(arg_5.task_id)\n arg_6 = None\n try:\n arg_6 = arg_2.get_task(arg_5.task_id)\n except AirflowException:\n if arg_5.state == State.REMOVED:\n pass # ti has already been removed, just ignore it\n elif arg_0.state is not State.RUNNING and not arg_2.partial:\n arg_0.log.warning(\"Failed to get task '{}' for dag '{}'. \"\n \"Marking it as removed.\".format(arg_5, arg_2))\n Stats.incr(\n \"task_removed_from_dag.{}\".format(arg_2.dag_id), 1, 1)\n arg_5.state = State.REMOVED\n\n arg_8 = arg_6 is not None\n arg_9 = arg_8 and arg_5.state == State.REMOVED\n if arg_9:\n arg_0.log.info(\"Restoring task '{}' which was previously \"\n \"removed from DAG '{}'\".format(arg_5, arg_2))\n Stats.incr(\"task_restored_to_dag.{}\".format(arg_2.dag_id), 1, 1)\n arg_5.state = State.NONE\n\n # check for missing tasks\n for arg_6 in six.itervalues(arg_2.task_dict):\n if arg_6.start_date > arg_0.execution_date and not arg_0.is_backfill:\n continue\n\n if arg_6.task_id not in arg_4:\n Stats.incr(\n \"task_instance_created-{}\".format(arg_6.__class__.__name__),\n 1, 1)\n arg_5 = TaskInstance(arg_6, arg_0.execution_date)\n arg_1.add(arg_5)\n\n arg_1.commit()"} +{"_id": "doc_145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n We need to get the headers in addition to the body answer\n to get the location from them\n This function uses jenkins_request method from python-jenkins library\n with just the return call changed\n\n :param jenkins_server: The server to query\n :param req: The request to execute\n :return: Dict containing the response body (key body)\n and the headers coming along (headers)\n \"\"\"\n try:\n arg_2 = arg_0.jenkins_request(arg_1)\n arg_3 = arg_2.content\n arg_4 = arg_2.headers\n if arg_3 is None:\n raise jenkins.EmptyResponseException(\n \"Error communicating with server[%s]: \"\n \"empty response\" % arg_0.server)\n return {'body': arg_3.decode('utf-8'), 'headers': arg_4}\n except HTTPError as e:\n # Jenkins's funky authentication means its nigh impossible to\n # distinguish errors.\n if e.code in [401, 403, 500]:\n # six.moves.urllib.error.HTTPError provides a 'reason'\n # attribute for all python version except for ver 2.6\n # Falling back to HTTPError.msg since it contains the\n # same info as reason\n raise JenkinsException(\n 'Error in request. ' +\n 'Possibly authentication failed [%s]: %s' % (\n e.code, e.msg)\n )\n elif e.code == 404:\n raise jenkins.NotFoundException('Requested item could not be found')\n else:\n raise\n except socket.timeout as e:\n raise jenkins.TimeoutException('Error in request: %s' % e)\n except URLError as e:\n # python 2.6 compatibility to ensure same exception raised\n # since URLError wraps a socket timeout on python 2.6.\n if str(e.reason) == \"timed out\":\n raise jenkins.TimeoutException('Error in request: %s' % e.reason)\n raise JenkinsException('Error in request: %s' % e.reason)"} +{"_id": "doc_146", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Given a context, this function provides a dictionary of values that can be used to\n externally reconstruct relations between dags, dag_runs, tasks and task_instances.\n Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if\n in_env_var_format is set to True.\n\n :param context: The context for the task_instance of interest.\n :type context: dict\n :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format.\n :type in_env_var_format: bool\n :return: task_instance context as dict.\n \"\"\"\n arg_2 = dict()\n if arg_1:\n arg_3 = 'env_var_format'\n else:\n arg_3 = 'default'\n arg_4 = arg_0.get('task_instance')\n if arg_4 and arg_4.dag_id:\n arg_2[arg_5['AIRFLOW_CONTEXT_DAG_ID'][\n arg_3]] = arg_4.dag_id\n if arg_4 and arg_4.task_id:\n arg_2[arg_5['AIRFLOW_CONTEXT_TASK_ID'][\n arg_3]] = arg_4.task_id\n if arg_4 and arg_4.execution_date:\n arg_2[\n arg_5['AIRFLOW_CONTEXT_EXECUTION_DATE'][\n arg_3]] = arg_4.execution_date.isoformat()\n arg_6 = arg_0.get('dag_run')\n if arg_6 and arg_6.run_id:\n arg_2[arg_5['AIRFLOW_CONTEXT_DAG_RUN_ID'][\n arg_3]] = arg_6.run_id\n return arg_2"} +{"_id": "doc_147", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"\n Queries datadog for a specific metric, potentially with some\n function applied to it and returns the results.\n\n :param query: The datadog query to execute (see datadog docs)\n :type query: str\n :param from_seconds_ago: How many seconds ago to start querying for.\n :type from_seconds_ago: int\n :param to_seconds_ago: Up to how many seconds ago to query for.\n :type to_seconds_ago: int\n \"\"\"\n arg_4 = int(time.time())\n\n arg_5 = api.Metric.query(\n start=arg_4 - arg_2,\n end=arg_4 - arg_3,\n arg_1=arg_1)\n\n arg_0.validate_response(arg_5)\n return arg_5"} +{"_id": "doc_148", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Fail given zombie tasks, which are tasks that haven't\n had a heartbeat for too long, in the current DagBag.\n\n :param zombies: zombie task instances to kill.\n :type zombies: airflow.utils.dag_processing.SimpleTaskInstance\n :param session: DB session.\n :type session: sqlalchemy.orm.session.Session\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n for arg_3 in arg_1:\n if arg_3.dag_id in arg_0.dags:\n arg_4 = arg_0.dags[arg_3.dag_id]\n if arg_3.task_id in arg_4.task_ids:\n arg_5 = arg_4.get_task(arg_3.task_id)\n arg_6 = TaskInstance(arg_5, arg_3.execution_date)\n # Get properties needed for failure handling from SimpleTaskInstance.\n arg_6.start_date = arg_3.start_date\n arg_6.end_date = arg_3.end_date\n arg_6.try_number = arg_3.try_number\n arg_6.state = arg_3.state\n arg_6.test_mode = configuration.getboolean('core', 'unit_test_mode')\n arg_6.handle_failure(\"{} detected as zombie\".format(arg_6),\n arg_6.test_mode, arg_6.get_template_context())\n arg_0.log.info(\n 'Marked zombie job %s as %s', arg_6, arg_6.state)\n Stats.incr('zombies_killed')\n arg_2.commit()"} +{"_id": "doc_149", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Adds the DAG into the bag, recurses into sub dags.\n Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags\n \"\"\"\n\n arg_1.test_cycle() # throws if a task cycle is found\n\n arg_1.resolve_template_files()\n arg_1.last_loaded = timezone.utcnow()\n\n for arg_5 in arg_1.tasks:\n settings.policy(arg_5)\n\n arg_6 = arg_1.subdags\n\n try:\n for arg_7 in arg_6:\n arg_7.full_filepath = arg_1.full_filepath\n arg_7.parent_dag = arg_1\n arg_7.is_subdag = True\n arg_0.Func(arg_7, arg_2=arg_1, arg_3=arg_3)\n\n arg_0.dags[arg_1.dag_id] = arg_1\n arg_0.log.debug('Loaded DAG %s', arg_1)\n except AirflowDagCycleException as cycle_exception:\n # There was an error in bagging the dag. Remove it from the list of dags\n arg_0.log.exception('Exception bagging dag: %s', arg_1.dag_id)\n # Only necessary at the root level since DAG.subdags automatically\n # performs DFS to search through all subdags\n if arg_1 == arg_3:\n for arg_7 in arg_6:\n if arg_7.dag_id in arg_0.dags:\n del arg_0.dags[arg_7.dag_id]\n raise cycle_exception"} +{"_id": "doc_150", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=True,\n arg_3=arg_4.conf.getboolean('core', 'LOAD_EXAMPLES'),\n arg_7=arg_4.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')):\n \"\"\"\n Given a file path or a folder, this method looks for python modules,\n imports them and adds them to the dagbag collection.\n\n Note that if a ``.airflowignore`` file is found while processing\n the directory, it will behave much like a ``.gitignore``,\n ignoring files that match any of the regex patterns specified\n in the file.\n\n **Note**: The patterns in .airflowignore are treated as\n un-anchored regexes, not shell-like glob patterns.\n \"\"\"\n arg_8 = timezone.utcnow()\n arg_1 = arg_1 or arg_0.dag_folder\n\n # Used to store stats around DagBag processing\n arg_9 = []\n arg_10 = namedtuple(\n 'FileLoadStat', \"file duration dag_num task_num dags\")\n\n arg_1 = correct_maybe_zipped(arg_1)\n\n for arg_11 in list_py_file_paths(arg_1, arg_7=arg_7,\n arg_3=arg_3):\n try:\n arg_12 = timezone.utcnow()\n arg_13 = arg_0.process_file(\n arg_11, arg_2=arg_2,\n arg_7=arg_7)\n\n arg_14 = timezone.utcnow() - arg_12\n arg_14 = arg_14.total_seconds() + (\n float(arg_14.microseconds) / 1000000)\n arg_9.append(arg_10(\n arg_11.replace(arg_1, ''),\n arg_14,\n len(arg_13),\n sum([len(arg_15.tasks) for arg_15 in arg_13]),\n str([arg_15.dag_id for arg_15 in arg_13]),\n ))\n except Exception as e:\n arg_0.log.exception(e)\n Stats.gauge(\n 'Func', (timezone.utcnow() - arg_8).total_seconds(), 1)\n Stats.gauge(\n 'dagbag_size', len(arg_0.dags), 1)\n Stats.gauge(\n 'dagbag_import_errors', len(arg_0.import_errors), 1)\n arg_0.dagbag_stats = sorted(\n arg_9, key=lambda x: x.duration, reverse=True)"} +{"_id": "doc_151", "title": "", "text": "def Func(arg_0):\n \"\"\"Prints a report around DagBag loading stats\"\"\"\n arg_1 = textwrap.dedent(\"\"\"\\n\n -------------------------------------------------------------------\n DagBag loading stats for {dag_folder}\n -------------------------------------------------------------------\n Number of DAGs: {dag_num}\n Total task number: {task_num}\n DagBag parsing time: {duration}\n {table}\n \"\"\")\n arg_2 = arg_0.dagbag_stats\n return arg_1.format(\n dag_folder=arg_0.dag_folder,\n duration=sum([arg_3.duration for arg_3 in arg_2]),\n dag_num=sum([arg_3.dag_num for arg_3 in arg_2]),\n task_num=sum([arg_3.task_num for arg_3 in arg_2]),\n table=pprinttable(arg_2),\n )"} +{"_id": "doc_152", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add or subtract days from a YYYY-MM-DD\n\n :param ds: anchor date in ``YYYY-MM-DD`` format to add to\n :type ds: str\n :param days: number of days to add to the ds, you can use negative values\n :type days: int\n\n >>> Func('2015-01-01', 5)\n '2015-01-06'\n >>> Func('2015-01-06', -5)\n '2015-01-01'\n \"\"\"\n\n arg_0 = datetime.strptime(arg_0, '%Y-%m-%d')\n if arg_1:\n arg_0 = arg_0 + timedelta(arg_1)\n return arg_0.isoformat()[:10]"} +{"_id": "doc_153", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Takes an input string and outputs another string\n as specified in the output format\n\n :param ds: input string which contains a date\n :type ds: str\n :param input_format: input string format. E.g. %Y-%m-%d\n :type input_format: str\n :param output_format: output string format E.g. %Y-%m-%d\n :type output_format: str\n\n >>> Func('2015-01-01', \"%Y-%m-%d\", \"%m-%d-%y\")\n '01-01-15'\n >>> Func('1/5/2015', \"%m/%d/%Y\", \"%Y-%m-%d\")\n '2015-01-05'\n \"\"\"\n return datetime.strptime(arg_0, arg_1).strftime(arg_2)"} +{"_id": "doc_154", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Func matching files in a directory with self.regex\n\n :return: Bool depending on the search criteria\n \"\"\"\n arg_2 = arg_0.hook(arg_0.hdfs_conn_id).get_conn()\n arg_0.log.info(\n 'Poking for %s to be a directory with files matching %s', arg_0.filepath, arg_0.regex.pattern\n )\n arg_3 = [f for f in arg_2.ls([arg_0.filepath], include_toplevel=False) if\n f['file_type'] == 'f' and\n arg_0.regex.match(f['path'].replace('%s/' % arg_0.filepath, ''))]\n arg_3 = arg_0.filter_for_ignored_ext(arg_3, arg_0.ignored_ext,\n arg_0.ignore_copying)\n arg_3 = arg_0.filter_for_filesize(arg_3, arg_0.file_size)\n return bool(arg_3)"} +{"_id": "doc_155", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=True,\n arg_3=None,\n ):\n \"\"\"\n Clears a set of task instances, but makes sure the running ones\n get killed.\n\n :param tis: a list of task instances\n :param session: current session\n :param activate_dag_runs: flag to check for active dag run\n :param dag: DAG object\n \"\"\"\n arg_4 = []\n for arg_5 in arg_0:\n if arg_5.state == State.RUNNING:\n if arg_5.job_id:\n arg_5.state = State.SHUTDOWN\n arg_4.append(arg_5.job_id)\n else:\n arg_7 = arg_5.task_id\n if arg_3 and arg_3.has_task(arg_7):\n arg_8 = arg_3.get_task(arg_7)\n arg_9 = arg_8.retries\n arg_5.max_tries = arg_5.try_number + arg_9 - 1\n else:\n # Ignore errors when updating max_tries if dag is None or\n # task not found in dag since database records could be\n # outdated. We make max_tries the maximum value of its\n # original max_tries or the current task try number.\n arg_5.max_tries = max(arg_5.max_tries, arg_5.try_number - 1)\n arg_5.state = State.NONE\n arg_1.merge(arg_5)\n\n if arg_4:\n from airflow.jobs import BaseJob as BJ\n for arg_11 in arg_1.query(BJ).filter(BJ.id.in_(arg_4)).all():\n arg_11.state = State.SHUTDOWN\n\n if arg_2 and arg_0:\n from airflow.models.dagrun import DagRun # Avoid circular import\n arg_12 = arg_1.query(DagRun).filter(\n DagRun.dag_id.in_({arg_5.dag_id for arg_5 in arg_0}),\n DagRun.execution_date.in_({arg_5.execution_date for arg_5 in arg_0}),\n ).all()\n for arg_13 in arg_12:\n arg_13.state = State.RUNNING\n arg_13.start_date = timezone.utcnow()"} +{"_id": "doc_156", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the try number that this task number will be when it is actually\n run.\n\n If the TI is currently running, this will match the column in the\n databse, in all othercases this will be incremenetd\n \"\"\"\n # This is designed so that task logs end up in the right file.\n if arg_0.state == State.RUNNING:\n return arg_0._Func\n return arg_0._Func + 1"} +{"_id": "doc_157", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=False,\n arg_4=False,\n arg_5=False,\n arg_6=False,\n arg_7=False,\n arg_8=False,\n arg_9=None,\n arg_10=None,\n arg_11=False,\n arg_12=None,\n arg_13=None,\n arg_14=None\n ):\n \"\"\"\n Generates the shell command required to execute this task instance.\n\n :param dag_id: DAG ID\n :type dag_id: unicode\n :param task_id: Task ID\n :type task_id: unicode\n :param execution_date: Execution date for the task\n :type execution_date: datetime\n :param mark_success: Whether to mark the task as successful\n :type mark_success: bool\n :param ignore_all_deps: Ignore all ignorable dependencies.\n Overrides the other ignore_* parameters.\n :type ignore_all_deps: bool\n :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs\n (e.g. for Backfills)\n :type ignore_depends_on_past: bool\n :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past\n and trigger rule\n :type ignore_task_deps: bool\n :param ignore_ti_state: Ignore the task instance's previous failure/success\n :type ignore_ti_state: bool\n :param local: Whether to run the task locally\n :type local: bool\n :param pickle_id: If the DAG was serialized to the DB, the ID\n associated with the pickled DAG\n :type pickle_id: unicode\n :param file_path: path to the file containing the DAG definition\n :param raw: raw mode (needs more details)\n :param job_id: job ID (needs more details)\n :param pool: the Airflow pool that the task should run in\n :type pool: unicode\n :param cfg_path: the Path to the configuration file\n :type cfg_path: basestring\n :return: shell command that can be used to run the task instance\n \"\"\"\n arg_15 = arg_2.isoformat()\n arg_16 = [\"airflow\", \"run\", str(arg_0), str(arg_1), str(arg_15)]\n arg_16.extend([\"--mark_success\"]) if arg_3 else None\n arg_16.extend([\"--pickle\", str(arg_9)]) if arg_9 else None\n arg_16.extend([\"--job_id\", str(arg_12)]) if arg_12 else None\n arg_16.extend([\"-A\"]) if arg_4 else None\n arg_16.extend([\"-i\"]) if arg_6 else None\n arg_16.extend([\"-I\"]) if arg_5 else None\n arg_16.extend([\"--force\"]) if arg_7 else None\n arg_16.extend([\"--local\"]) if arg_8 else None\n arg_16.extend([\"--pool\", arg_13]) if arg_13 else None\n arg_16.extend([\"--raw\"]) if arg_11 else None\n arg_16.extend([\"-sd\", arg_10]) if arg_10 else None\n arg_16.extend([\"--cfg_path\", arg_14]) if arg_14 else None\n return arg_16"} +{"_id": "doc_158", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get the very latest state from the database, if a session is passed,\n we use and looking up the state becomes part of the session, otherwise\n a new session is used.\n \"\"\"\n arg_2 = TaskInstance\n arg_3 = arg_1.query(arg_2).filter(\n arg_2.dag_id == arg_0.dag_id,\n arg_2.task_id == arg_0.task_id,\n arg_2.execution_date == arg_0.execution_date,\n ).all()\n if arg_3:\n arg_4 = arg_3[0].state\n else:\n arg_4 = None\n return arg_4"} +{"_id": "doc_159", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Forces the task instance's state to FAILED in the database.\n \"\"\"\n arg_0.log.Func(\"Recording the task instance as FAILED\")\n arg_0.state = State.FAILED\n arg_1.merge(arg_0)\n arg_1.commit()"} +{"_id": "doc_160", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Clears all XCom data from the database for the task instance\n \"\"\"\n arg_1.query(XCom).filter(\n XCom.dag_id == arg_0.dag_id,\n XCom.task_id == arg_0.task_id,\n XCom.execution_date == arg_0.execution_date\n ).delete()\n arg_1.commit()"} +{"_id": "doc_161", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a tuple that identifies the task instance uniquely\n \"\"\"\n return arg_0.dag_id, arg_0.task_id, arg_0.execution_date, arg_0.try_number"} +{"_id": "doc_162", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=arg_4,\n arg_5=False):\n \"\"\"\n Pull XComs that optionally meet certain criteria.\n\n The default value for `key` limits the search to XComs\n that were returned by other tasks (as opposed to those that were pushed\n manually). To remove this filter, pass key=None (or any desired value).\n\n If a single task_id string is provided, the result is the value of the\n most recent matching XCom from that task_id. If multiple task_ids are\n provided, a tuple of matching values is returned. None is returned\n whenever no matches are found.\n\n :param key: A key for the XCom. If provided, only XComs with matching\n keys will be returned. The default key is 'return_value', also\n available as a constant XCOM_RETURN_KEY. This key is automatically\n given to XComs returned by tasks (as opposed to being pushed\n manually). To remove the filter, pass key=None.\n :type key: str\n :param task_ids: Only XComs from tasks with matching ids will be\n pulled. Can pass None to remove the filter.\n :type task_ids: str or iterable of strings (representing task_ids)\n :param dag_id: If provided, only pulls XComs from this DAG.\n If None (default), the DAG of the calling task is used.\n :type dag_id: str\n :param include_prior_dates: If False, only XComs from the current\n execution_date are returned. If True, XComs from previous dates\n are returned as well.\n :type include_prior_dates: bool\n \"\"\"\n\n if arg_2 is None:\n arg_2 = arg_0.dag_id\n\n arg_6 = functools.partial(\n XCom.get_one,\n execution_date=arg_0.execution_date,\n arg_3=arg_3,\n arg_2=arg_2,\n arg_5=arg_5)\n\n if is_container(arg_1):\n return tuple(arg_6(task_id=arg_7) for arg_7 in arg_1)\n else:\n return arg_6(task_id=arg_1)"} +{"_id": "doc_163", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Sets the log context.\n \"\"\"\n arg_0.raw = arg_1\n arg_0._set_context(arg_0)"} +{"_id": "doc_164", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves connection to Google Compute Engine.\n\n :return: Google Compute Engine services object\n :rtype: dict\n \"\"\"\n if not arg_0._conn:\n arg_1 = arg_0._authorize()\n arg_0._conn = build('compute', arg_0.api_version,\n http=arg_1, cache_discovery=False)\n return arg_0._conn"} +{"_id": "doc_165", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Sets machine type of an instance defined by project_id, zone and resource_id.\n Must be called with keyword arguments rather than positional.\n\n :param zone: Google Cloud Platform zone where the instance exists.\n :type zone: str\n :param resource_id: Name of the Compute Engine instance resource\n :type resource_id: str\n :param body: Body required by the Compute Engine setMachineType API,\n as described in\n https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType\n :type body: dict\n :param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_5 = arg_0._execute_Func(arg_1, arg_2, arg_3, arg_4)\n try:\n arg_6 = arg_5[\"name\"]\n except KeyError:\n raise AirflowException(\n \"Wrong response '{}' returned - it should contain \"\n \"'name' field\".format(arg_5))\n arg_0._wait_for_operation_to_complete(arg_4=arg_4,\n arg_6=arg_6,\n arg_1=arg_1)"} +{"_id": "doc_166", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Retrieves instance template by project_id and resource_id.\n Must be called with keyword arguments rather than positional.\n\n :param resource_id: Name of the instance template\n :type resource_id: str\n :param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :return: Instance template representation as object according to\n https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates\n :rtype: dict\n \"\"\"\n arg_3 = arg_0.get_conn().instanceTemplates().get(\n project=arg_2,\n instanceTemplate=arg_1\n ).execute(num_retries=arg_0.num_retries)\n return arg_3"} +{"_id": "doc_167", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Inserts instance template using body specified\n Must be called with keyword arguments rather than positional.\n\n :param body: Instance template representation as object according to\n https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates\n :type body: dict\n :param request_id: Optional, unique request_id that you might add to achieve\n full idempotence (for example when client call times out repeating the request\n with the same request id will not create a new instance template again)\n It should be in UUID format as defined in RFC 4122\n :type request_id: str\n :param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_4 = arg_0.get_conn().instanceTemplates().insert(\n project=arg_3,\n arg_1=arg_1,\n requestId=arg_2\n ).execute(num_retries=arg_0.num_retries)\n try:\n arg_5 = arg_4[\"name\"]\n except KeyError:\n raise AirflowException(\n \"Wrong response '{}' returned - it should contain \"\n \"'name' field\".format(arg_4))\n arg_0._wait_for_operation_to_complete(arg_3=arg_3,\n arg_5=arg_5)"} +{"_id": "doc_168", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Retrieves Instance Group Manager by project_id, zone and resource_id.\n Must be called with keyword arguments rather than positional.\n\n :param zone: Google Cloud Platform zone where the Instance Group Manager exists\n :type zone: str\n :param resource_id: Name of the Instance Group Manager\n :type resource_id: str\n :param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :return: Instance group manager representation as object according to\n https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers\n :rtype: dict\n \"\"\"\n arg_4 = arg_0.get_conn().instanceGroupManagers().get(\n project=arg_3,\n arg_1=arg_1,\n instanceGroupManager=arg_2\n ).execute(num_retries=arg_0.num_retries)\n return arg_4"} +{"_id": "doc_169", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4=None, arg_5=None):\n \"\"\"\n Patches Instance Group Manager with the specified body.\n Must be called with keyword arguments rather than positional.\n\n :param zone: Google Cloud Platform zone where the Instance Group Manager exists\n :type zone: str\n :param resource_id: Name of the Instance Group Manager\n :type resource_id: str\n :param body: Instance Group Manager representation as json-merge-patch object\n according to\n https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch\n :type body: dict\n :param request_id: Optional, unique request_id that you might add to achieve\n full idempotence (for example when client call times out repeating the request\n with the same request id will not create a new instance template again).\n It should be in UUID format as defined in RFC 4122\n :type request_id: str\n :param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_6 = arg_0.get_conn().instanceGroupManagers().patch(\n project=arg_5,\n arg_1=arg_1,\n instanceGroupManager=arg_2,\n arg_3=arg_3,\n requestId=arg_4\n ).execute(num_retries=arg_0.num_retries)\n try:\n arg_7 = arg_6[\"name\"]\n except KeyError:\n raise AirflowException(\n \"Wrong response '{}' returned - it should contain \"\n \"'name' field\".format(arg_6))\n arg_0._wait_for_operation_to_complete(arg_5=arg_5,\n arg_7=arg_7,\n arg_1=arg_1)"} +{"_id": "doc_170", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Waits for the named operation to complete - checks status of the async call.\n\n :param operation_name: name of the operation\n :type operation_name: str\n :param zone: optional region of the request (might be None for global operations)\n :type zone: str\n :return: None\n \"\"\"\n arg_4 = arg_0.get_conn()\n while True:\n if arg_3 is None:\n # noinspection PyTypeChecker\n arg_5 = arg_0._check_global_operation_status(\n arg_4, arg_2, arg_1)\n else:\n # noinspection PyTypeChecker\n arg_5 = arg_0._check_zone_operation_status(\n arg_4, arg_2, arg_1, arg_3, arg_0.num_retries)\n if arg_5.get(\"status\") == GceOperationStatus.DONE:\n arg_6 = arg_5.get(\"error\")\n if arg_6:\n arg_7 = arg_5.get(\"httpErrorStatusCode\")\n arg_8 = arg_5.get(\"httpErrorMessage\")\n # Extracting the errors list as string and trimming square braces\n arg_9 = str(arg_6.get(\"errors\"))[1:-1]\n raise AirflowException(\"{} {}: \".format(arg_7, arg_8) + arg_9)\n # No meaningful info to return from the response in case of success\n return\n time.sleep(TIME_TO_SLEEP_IN_SECONDS)"} +{"_id": "doc_171", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if bucket_name exists.\n\n :param bucket_name: the name of the bucket\n :type bucket_name: str\n \"\"\"\n try:\n arg_0.get_conn().head_bucket(Bucket=arg_1)\n return True\n except ClientError as e:\n arg_0.log.info(e.response[\"Error\"][\"Message\"])\n return False"} +{"_id": "doc_172", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Checks that a prefix exists in a bucket\n\n :param bucket_name: the name of the bucket\n :type bucket_name: str\n :param prefix: a key prefix\n :type prefix: str\n :param delimiter: the delimiter marks key hierarchy.\n :type delimiter: str\n \"\"\"\n arg_2 = arg_2 + arg_3 if arg_2[-1] != arg_3 else arg_2\n arg_4 = re.split(r'(\\w+[{d}])$'.format(d=arg_3), arg_2, 1)\n arg_5 = arg_4[0]\n arg_6 = arg_0.list_prefixes(arg_1, arg_5, arg_3)\n return False if arg_6 is None else arg_2 in arg_6"} +{"_id": "doc_173", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', arg_3='',\n arg_4=None, arg_5=None):\n \"\"\"\n Lists prefixes in a bucket under prefix\n\n :param bucket_name: the name of the bucket\n :type bucket_name: str\n :param prefix: a key prefix\n :type prefix: str\n :param delimiter: the delimiter marks key hierarchy.\n :type delimiter: str\n :param page_size: pagination size\n :type page_size: int\n :param max_items: maximum items to return\n :type max_items: int\n \"\"\"\n arg_6 = {\n 'PageSize': arg_4,\n 'MaxItems': arg_5,\n }\n\n arg_7 = arg_0.get_conn().get_paginator('list_objects_v2')\n arg_8 = arg_7.paginate(Bucket=arg_1,\n Prefix=arg_2,\n Delimiter=arg_3,\n PaginationConfig=arg_6)\n\n arg_9 = False\n arg_10 = []\n for arg_11 in arg_8:\n if 'CommonPrefixes' in arg_11:\n arg_9 = True\n for arg_12 in arg_11['CommonPrefixes']:\n arg_10.append(arg_12['Prefix'])\n\n if arg_9:\n return arg_10"} +{"_id": "doc_174", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Returns a boto3.s3.Object\n\n :param key: the path to the key\n :type key: str\n :param bucket_name: the name of the bucket\n :type bucket_name: str\n \"\"\"\n if not arg_2:\n (arg_2, arg_1) = arg_0.parse_s3_url(arg_1)\n\n arg_3 = arg_0.get_resource_type('s3').Object(arg_2, arg_1)\n arg_3.load()\n return arg_3"} +{"_id": "doc_175", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Reads a key from S3\n\n :param key: S3 key that will point to the file\n :type key: str\n :param bucket_name: Name of the bucket in which the file is stored\n :type bucket_name: str\n \"\"\"\n\n arg_3 = arg_0.get_key(arg_1, arg_2)\n return arg_3.get()['Body'].read().decode('utf-8')"} +{"_id": "doc_176", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=False,\n arg_5=False):\n \"\"\"\n Loads a local file to S3\n\n :param filename: name of the file to load.\n :type filename: str\n :param key: S3 key that will point to the file\n :type key: str\n :param bucket_name: Name of the bucket in which to store the file\n :type bucket_name: str\n :param replace: A flag to decide whether or not to overwrite the key\n if it already exists. If replace is False and the key exists, an\n error will be raised.\n :type replace: bool\n :param encrypt: If True, the file will be encrypted on the server-side\n by S3 and will be stored in an encrypted form while at rest in S3.\n :type encrypt: bool\n \"\"\"\n if not arg_3:\n (arg_3, arg_2) = arg_0.parse_s3_url(arg_2)\n\n if not arg_4 and arg_0.check_for_key(arg_2, arg_3):\n raise ValueError(\"The key {key} already exists.\".format(arg_2=arg_2))\n\n arg_6 = {}\n if arg_5:\n arg_6['ServerSideEncryption'] = \"AES256\"\n\n arg_7 = arg_0.get_conn()\n arg_7.upFunc(arg_1, arg_3, arg_2, ExtraArgs=arg_6)"} +{"_id": "doc_177", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=False,\n arg_5=False,\n arg_6='utf-8'):\n \"\"\"\n Loads a string to S3\n\n This is provided as a convenience to drop a string in S3. It uses the\n boto infrastructure to ship a file to s3.\n\n :param string_data: str to set as content for the key.\n :type string_data: str\n :param key: S3 key that will point to the file\n :type key: str\n :param bucket_name: Name of the bucket in which to store the file\n :type bucket_name: str\n :param replace: A flag to decide whether or not to overwrite the key\n if it already exists\n :type replace: bool\n :param encrypt: If True, the file will be encrypted on the server-side\n by S3 and will be stored in an encrypted form while at rest in S3.\n :type encrypt: bool\n \"\"\"\n arg_0.load_bytes(arg_1.encode(arg_6),\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_178", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=False,\n arg_5=False):\n \"\"\"\n Loads bytes to S3\n\n This is provided as a convenience to drop a string in S3. It uses the\n boto infrastructure to ship a file to s3.\n\n :param bytes_data: bytes to set as content for the key.\n :type bytes_data: bytes\n :param key: S3 key that will point to the file\n :type key: str\n :param bucket_name: Name of the bucket in which to store the file\n :type bucket_name: str\n :param replace: A flag to decide whether or not to overwrite the key\n if it already exists\n :type replace: bool\n :param encrypt: If True, the file will be encrypted on the server-side\n by S3 and will be stored in an encrypted form while at rest in S3.\n :type encrypt: bool\n \"\"\"\n if not arg_3:\n (arg_3, arg_2) = arg_0.parse_s3_url(arg_2)\n\n if not arg_4 and arg_0.check_for_key(arg_2, arg_3):\n raise ValueError(\"The key {key} already exists.\".format(arg_2=arg_2))\n\n arg_6 = {}\n if arg_5:\n arg_6['ServerSideEncryption'] = \"AES256\"\n\n arg_7 = BytesIO(arg_1)\n\n arg_8 = arg_0.get_conn()\n arg_8.upload_fileobj(arg_7, arg_3, arg_2, ExtraArgs=arg_6)"} +{"_id": "doc_179", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=False,\n arg_5=False):\n \"\"\"\n Loads a file object to S3\n\n :param file_obj: The file-like object to set as the content for the S3 key.\n :type file_obj: file-like object\n :param key: S3 key that will point to the file\n :type key: str\n :param bucket_name: Name of the bucket in which to store the file\n :type bucket_name: str\n :param replace: A flag that indicates whether to overwrite the key\n if it already exists.\n :type replace: bool\n :param encrypt: If True, S3 encrypts the file on the server,\n and the file is stored in encrypted form at rest in S3.\n :type encrypt: bool\n \"\"\"\n if not arg_3:\n (arg_3, arg_2) = arg_0.parse_s3_url(arg_2)\n\n if not arg_4 and arg_0.check_for_key(arg_2, arg_3):\n raise ValueError(\"The key {key} already exists.\".format(arg_2=arg_2))\n\n arg_6 = {}\n if arg_5:\n arg_6['ServerSideEncryption'] = \"AES256\"\n\n arg_7 = arg_0.get_conn()\n arg_7.upload_fileobj(arg_1, arg_3, arg_2, ExtraArgs=arg_6)"} +{"_id": "doc_180", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None):\n \"\"\"\n Creates a copy of an object that is already stored in S3.\n\n Note: the S3 connection used here needs to have access to both\n source and destination bucket/key.\n\n :param source_bucket_key: The key of the source object.\n\n It can be either full s3:// style url or relative path from root level.\n\n When it's specified as a full s3:// url, please omit source_bucket_name.\n :type source_bucket_key: str\n :param dest_bucket_key: The key of the object to copy to.\n\n The convention to specify `dest_bucket_key` is the same\n as `source_bucket_key`.\n :type dest_bucket_key: str\n :param source_bucket_name: Name of the S3 bucket where the source object is in.\n\n It should be omitted when `source_bucket_key` is provided as a full s3:// url.\n :type source_bucket_name: str\n :param dest_bucket_name: Name of the S3 bucket to where the object is copied.\n\n It should be omitted when `dest_bucket_key` is provided as a full s3:// url.\n :type dest_bucket_name: str\n :param source_version_id: Version ID of the source object (OPTIONAL)\n :type source_version_id: str\n \"\"\"\n\n if arg_4 is None:\n arg_4, arg_2 = arg_0.parse_s3_url(arg_2)\n else:\n arg_6 = urlparse(arg_2)\n if arg_6.scheme != '' or arg_6.netloc != '':\n raise AirflowException('If dest_bucket_name is provided, ' +\n 'dest_bucket_key should be relative path ' +\n 'from root level, rather than a full s3:// url')\n\n if arg_3 is None:\n arg_3, arg_1 = arg_0.parse_s3_url(arg_1)\n else:\n arg_6 = urlparse(arg_1)\n if arg_6.scheme != '' or arg_6.netloc != '':\n raise AirflowException('If source_bucket_name is provided, ' +\n 'source_bucket_key should be relative path ' +\n 'from root level, rather than a full s3:// url')\n\n arg_7 = {'Bucket': arg_3,\n 'Key': arg_1,\n 'VersionId': arg_5}\n arg_8 = arg_0.get_conn().Func(Bucket=arg_4,\n Key=arg_2,\n arg_7=arg_7)\n return arg_8"} +{"_id": "doc_181", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Queries cassandra and returns a cursor to the results.\n \"\"\"\n arg_0.hook = CassandraHook(cassandra_conn_id=arg_0.cassandra_conn_id)\n arg_2 = arg_0.hook.get_conn()\n arg_3 = arg_2.execute(arg_0.cql)\n return arg_3"} +{"_id": "doc_182", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=False, arg_5=None,\n arg_6=None, arg_7='mixed', arg_8=False, **arg_9):\n \"\"\"\n Send an email with html content using sendgrid.\n\n To use this plugin:\n 0. include sendgrid subpackage as part of your Airflow installation, e.g.,\n pip install 'apache-airflow[sendgrid]'\n 1. update [email] backend in airflow.cfg, i.e.,\n [email]\n email_backend = airflow.contrib.utils.sendgrid.Func\n 2. configure Sendgrid specific environment variables at all Airflow instances:\n SENDGRID_MAIL_FROM={your-mail-from}\n SENDGRID_API_KEY={your-sendgrid-api-key}.\n \"\"\"\n if arg_3 is None:\n arg_3 = []\n\n arg_10 = Mail()\n arg_11 = arg_9.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')\n arg_12 = arg_9.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')\n arg_10.from_email = Email(arg_11, arg_12)\n arg_10.subject = arg_1\n arg_10.mail_settings = MailSettings()\n\n if arg_8:\n arg_10.mail_settings.sandbox_mode = SandBoxMode(enable=True)\n\n # Add the recipient list of to emails.\n arg_14 = Personalization()\n arg_0 = get_email_address_list(arg_0)\n for arg_15 in arg_0:\n arg_14.add_to(Email(arg_15))\n if arg_5:\n arg_5 = get_email_address_list(arg_5)\n for arg_16 in arg_5:\n arg_14.add_cc(Email(arg_16))\n if arg_6:\n arg_6 = get_email_address_list(arg_6)\n for arg_17 in arg_6:\n arg_14.add_bcc(Email(arg_17))\n\n # Add custom_args to personalization if present\n arg_18 = arg_9.get('personalization_custom_args', None)\n if isinstance(arg_18, dict):\n for arg_19 in arg_18.keys():\n arg_14.add_custom_arg(CustomArg(arg_19, arg_18[arg_19]))\n\n arg_10.add_personalization(arg_14)\n arg_10.add_content(Content('text/html', arg_2))\n\n arg_20 = arg_9.get('categories', [])\n for arg_21 in arg_20:\n arg_10.add_category(Category(arg_21))\n\n # Add email attachment.\n for arg_22 in arg_3:\n arg_23 = os.path.basename(arg_22)\n\n arg_24 = Attachment()\n arg_24.type = mimetypes.guess_type(arg_23)[0]\n arg_24.filename = arg_23\n arg_24.disposition = \"attachment\"\n arg_24.content_id = '<{0}>'.format(arg_23)\n\n with open(arg_22, \"rb\") as f:\n arg_24.content = base64.b64encode(f.read()).decode('utf-8')\n\n arg_10.add_attachment(arg_24)\n _post_sendgrid_mail(arg_10.get())"} +{"_id": "doc_183", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"\n Recognizes audio input\n\n :param config: information to the recognizer that specifies how to process the request.\n https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig\n :type config: dict or google.cloud.speech_v1.types.RecognitionConfig\n :param audio: audio data to be recognized\n https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio\n :type audio: dict or google.cloud.speech_v1.types.RecognitionAudio\n :param retry: (Optional) A retry object used to retry requests. If None is specified,\n requests will not be retried.\n :type retry: google.api_core.retry.Retry\n :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.\n Note that if retry is specified, the timeout applies to each individual attempt.\n :type timeout: float\n \"\"\"\n arg_5 = arg_0.get_conn()\n arg_6 = arg_5.recognize(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n arg_0.log.info(\"Recognised speech: %s\" % arg_6)\n return arg_6"} +{"_id": "doc_184", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check whether a potential object is a subclass of\n the AirflowPlugin class.\n\n :param plugin_obj: potential subclass of AirflowPlugin\n :param existing_plugins: Existing list of AirflowPlugin subclasses\n :return: Whether or not the obj is a valid subclass of\n AirflowPlugin\n \"\"\"\n if (\n inspect.isclass(arg_0) and\n issubclass(arg_0, AirflowPlugin) and\n (arg_0 is not AirflowPlugin)\n ):\n arg_0.validate()\n return arg_0 not in arg_1\n return False"} +{"_id": "doc_185", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Sets tasks instances to Funcped from the same dag run.\n\n :param dag_run: the DagRun for which to set the tasks to Funcped\n :param execution_date: execution_date\n :param tasks: tasks to Func (not task_ids)\n :param session: db session to use\n \"\"\"\n if not arg_3:\n return\n\n arg_5 = [d.task_id for d in arg_3]\n arg_6 = timezone.utcnow()\n\n if arg_1:\n arg_4.query(TaskInstance).filter(\n TaskInstance.dag_id == arg_1.dag_id,\n TaskInstance.execution_date == arg_1.execution_date,\n TaskInstance.task_id.in_(arg_5)\n ).update({TaskInstance.state: State.SKIPPED,\n TaskInstance.start_date: arg_6,\n TaskInstance.end_date: arg_6},\n synchronize_session=False)\n arg_4.commit()\n else:\n assert arg_2 is not None, \"Execution date is None and no dag run\"\n\n arg_0.log.warning(\"No DAG RUN present this should not happen\")\n # this is defensive against dag runs that are not complete\n for arg_7 in arg_3:\n arg_8 = TaskInstance(arg_7, arg_2=arg_2)\n arg_8.state = State.SKIPPED\n arg_8.start_date = arg_6\n arg_8.end_date = arg_6\n arg_4.merge(arg_8)\n\n arg_4.commit()"} +{"_id": "doc_186", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=64, arg_4=True,\n arg_5=4194304, arg_6=4194304):\n \"\"\"\n Upload a file to Azure Data Lake.\n\n :param local_path: local path. Can be single file, directory (in which case,\n upload recursively) or glob pattern. Recursive glob patterns using `**`\n are not supported.\n :type local_path: str\n :param remote_path: Remote path to upload to; if multiple files, this is the\n directory root to write within.\n :type remote_path: str\n :param nthreads: Number of threads to use. If None, uses the number of cores.\n :type nthreads: int\n :param overwrite: Whether to forcibly overwrite existing files/directories.\n If False and remote path is a directory, will quit regardless if any files\n would be overwritten or not. If True, only matching filenames are actually\n overwritten.\n :type overwrite: bool\n :param buffersize: int [2**22]\n Number of bytes for internal buffer. This block cannot be bigger than\n a chunk and cannot be smaller than a block.\n :type buffersize: int\n :param blocksize: int [2**22]\n Number of bytes for a block. Within each chunk, we write a smaller\n block for each API call. This block cannot be bigger than a chunk.\n :type blocksize: int\n \"\"\"\n multithread.ADLUploader(arg_0.connection,\n lpath=arg_1,\n rpath=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6)"} +{"_id": "doc_187", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n List files in Azure Data Lake Storage\n\n :param path: full path/globstring to use to Func files in ADLS\n :type path: str\n \"\"\"\n if \"*\" in arg_1:\n return arg_0.connection.glob(arg_1)\n else:\n return arg_0.connection.walk(arg_1)"} +{"_id": "doc_188", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Uncompress gz and bz2 files\n \"\"\"\n if arg_1.lower() not in ('.gz', '.bz2'):\n raise NotImplementedError(\"Received {} format. Only gz and bz2 \"\n \"files can currently be uncompressed.\"\n .format(arg_1))\n if arg_1.lower() == '.gz':\n arg_3 = gzip.GzipFile\n elif arg_1.lower() == '.bz2':\n arg_3 = bz2.BZ2File\n with arg_3(arg_0, mode='rb') as f_compressed,\\\n NamedTemporaryFile(dir=arg_2,\n mode='wb',\n delete=False) as f_uncompressed:\n shutil.copyfileobj(f_compressed, f_uncompressed)\n return f_uncompressed.name"} +{"_id": "doc_189", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorates function to execute function at the same time submitting Func\n but in CLI context. It will call action logger callbacks twice,\n one for pre-execution and the other one for post-execution.\n\n Action logger will be called with below keyword parameters:\n sub_command : name of sub-command\n start_datetime : start datetime instance by utc\n end_datetime : end datetime instance by utc\n full_command : full command line arguments\n user : current user\n log : airflow.models.log.Log ORM instance\n dag_id : dag id (optional)\n task_id : task_id (optional)\n execution_date : execution date (optional)\n error : exception instance if there's an exception\n\n :param f: function instance\n :return: wrapped function\n \"\"\"\n @functools.wraps(arg_0)\n def wrapper(*arg_1, **arg_2):\n \"\"\"\n An wrapper for cli functions. It assumes to have Namespace instance\n at 1st positional argument\n :param args: Positional argument. It assumes to have Namespace instance\n at 1st positional argument\n :param kwargs: A passthrough keyword argument\n \"\"\"\n assert arg_1\n assert isinstance(arg_1[0], Namespace), \\\n \"1st positional argument should be argparse.Namespace instance, \" \\\n \"but {}\".format(arg_1[0])\n arg_3 = _build_metrics(arg_0.__name__, arg_1[0])\n cli_action_loggers.on_pre_execution(**arg_3)\n try:\n return arg_0(*arg_1, **arg_2)\n except Exception as e:\n arg_3['error'] = e\n raise\n finally:\n arg_3['end_datetime'] = datetime.utcnow()\n cli_action_loggers.on_post_execution(**arg_3)\n\n return wrapper"} +{"_id": "doc_190", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Builds metrics dict from function args\n It assumes that function arguments is from airflow.bin.cli module's function\n and has Namespace instance where it optionally contains \"dag_id\", \"task_id\",\n and \"execution_date\".\n\n :param func_name: name of function\n :param namespace: Namespace instance from argparse\n :return: dict with metrics\n \"\"\"\n\n arg_2 = {'sub_command': arg_0, 'start_datetime': datetime.utcnow(),\n 'full_command': '{}'.format(list(sys.argv)), 'user': getpass.getuser()}\n\n assert isinstance(arg_1, Namespace)\n arg_3 = vars(arg_1)\n arg_2['dag_id'] = arg_3.get('dag_id')\n arg_2['task_id'] = arg_3.get('task_id')\n arg_2['execution_date'] = arg_3.get('execution_date')\n arg_2['host_name'] = socket.gethostname()\n\n arg_4 = json.dumps(dict((k, arg_2[k]) for k in ('host_name', 'full_command')))\n arg_5 = Log(\n event='cli_{}'.format(arg_0),\n task_instance=None,\n owner=arg_2['user'],\n arg_4=arg_4,\n task_id=arg_2.get('task_id'),\n dag_id=arg_2.get('dag_id'),\n execution_date=arg_2.get('execution_date'))\n arg_2['log'] = arg_5\n return arg_2"} +{"_id": "doc_191", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create the specified cgroup.\n\n :param path: The path of the cgroup to create.\n E.g. cpu/mygroup/mysubgroup\n :return: the Node associated with the created cgroup.\n :rtype: cgroupspy.nodes.Node\n \"\"\"\n arg_2 = trees.Tree().root\n arg_3 = arg_1.split(os.sep)\n for arg_4 in arg_3:\n arg_5 = {x.name: x for x in arg_2.children}\n if arg_4 not in arg_5:\n arg_0.log.debug(\"Creating cgroup %s in %s\", arg_4, arg_2.path)\n arg_2 = arg_2.create_cgroup(arg_4)\n else:\n arg_0.log.debug(\n \"Not creating cgroup %s in %s since it already exists\",\n arg_4, arg_2.path\n )\n arg_2 = arg_5[arg_4]\n return arg_2"} +{"_id": "doc_192", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The purpose of this function is to be robust to improper connections\n settings provided by users, specifically in the host field.\n\n For example -- when users supply ``https://xx.cloud.databricks.com`` as the\n host, we must strip out the protocol to get the host.::\n\n h = DatabricksHook()\n assert h.Func('https://xx.cloud.databricks.com') == \\\n 'xx.cloud.databricks.com'\n\n In the case where users supply the correct ``xx.cloud.databricks.com`` as the\n host, this function is a no-op.::\n\n assert h.Func('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'\n\n \"\"\"\n arg_1 = urlparse.urlparse(arg_0).hostname\n if arg_1:\n # In this case, host = https://xx.cloud.databricks.com\n return arg_1\n else:\n # In this case, host = xx.cloud.databricks.com\n return arg_0"} +{"_id": "doc_193", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Utility function to perform an API call with retries\n\n :param endpoint_info: Tuple of method and endpoint\n :type endpoint_info: tuple[string, string]\n :param json: Parameters for this API call.\n :type json: dict\n :return: If the api call returns a OK status code,\n this function returns the response in JSON. Otherwise,\n we throw an AirflowException.\n :rtype: dict\n \"\"\"\n arg_3, arg_4 = arg_1\n arg_5 = 'https://{host}/{endpoint}'.format(\n host=arg_0._parse_host(arg_0.databricks_conn.host),\n arg_4=arg_4)\n if 'token' in arg_0.databricks_conn.extra_dejson:\n arg_0.log.info('Using token auth.')\n arg_6 = _TokenAuth(arg_0.databricks_conn.extra_dejson['token'])\n else:\n arg_0.log.info('Using basic auth.')\n arg_6 = (arg_0.databricks_conn.login, arg_0.databricks_conn.password)\n if arg_3 == 'GET':\n arg_7 = requests.get\n elif arg_3 == 'POST':\n arg_7 = requests.post\n else:\n raise AirflowException('Unexpected HTTP Method: ' + arg_3)\n\n arg_8 = 1\n while True:\n try:\n arg_9 = arg_7(\n arg_5,\n arg_2=arg_2,\n arg_6=arg_6,\n headers=USER_AGENT_HEADER,\n timeout=arg_0.timeout_seconds)\n arg_9.raise_for_status()\n return arg_9.json()\n except requests_exceptions.RequestException as e:\n if not _retryable_error(e):\n # In this case, the user probably made a mistake.\n # Don't retry.\n raise AirflowException('Response: {0}, Status Code: {1}'.format(\n e.response.content, e.response.status_code))\n\n arg_0._log_request_error(arg_8, e)\n\n if arg_8 == arg_0.retry_limit:\n raise AirflowException(('API requests to Databricks failed {} times. ' +\n 'Giving up.').format(arg_0.retry_limit))\n\n arg_8 += 1\n sleep(arg_0.retry_delay)"} +{"_id": "doc_194", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sign into Salesforce, only if we are not already signed in.\n \"\"\"\n if not arg_0.conn:\n arg_1 = arg_0.Funcection(arg_0.conn_id)\n arg_2 = arg_1.extra_dejson\n arg_0.conn = Salesforce(\n username=arg_1.login,\n password=arg_1.password,\n security_token=arg_2['security_token'],\n instance_url=arg_1.host,\n sandbox=arg_2.get('sandbox', False)\n )\n return arg_0.conn"} +{"_id": "doc_195", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Make a query to Salesforce.\n\n :param query: The query to make to Salesforce.\n :type query: str\n :return: The query result.\n :rtype: dict\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_0.log.info(\"Querying for all objects\")\n arg_3 = arg_2.query_all(arg_1)\n\n arg_0.log.info(\"Received results: Total size: %s; Done: %s\",\n arg_3['totalSize'], arg_3['done'])\n\n return arg_3"} +{"_id": "doc_196", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the description of an object from Salesforce.\n This description is the object's schema and\n some extra metadata that Salesforce stores for each object.\n\n :param obj: The name of the Salesforce object that we are getting a description of.\n :type obj: str\n :return: the description of the Salesforce object.\n :rtype: dict\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n return arg_2.__getattr__(arg_1).describe()"} +{"_id": "doc_197", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get all instances of the `object` from Salesforce.\n For each model, only get the fields specified in fields.\n\n All we really do underneath the hood is run:\n SELECT FROM ;\n\n :param obj: The object name to get from Salesforce.\n :type obj: str\n :param fields: The fields to get from the object.\n :type fields: iterable\n :return: all instances of the object from Salesforce.\n :rtype: dict\n \"\"\"\n arg_3 = \"SELECT {} FROM {}\".format(\",\".join(arg_2), arg_1)\n\n arg_0.log.info(\"Making query to Salesforce: %s\",\n arg_3 if len(arg_3) < 30 else \" ... \".join([arg_3[:15], arg_3[-15:]]))\n\n return arg_0.make_query(arg_3)"} +{"_id": "doc_198", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert a column of a dataframe to UNIX timestamps if applicable\n\n :param column: A Series object representing a column of a dataframe.\n :type column: pd.Series\n :return: a new series that maintains the same index as the original\n :rtype: pd.Series\n \"\"\"\n # try and convert the column to datetimes\n # the column MUST have a four digit year somewhere in the string\n # there should be a better way to do this,\n # but just letting pandas try and convert every column without a format\n # caused it to convert floats as well\n # For example, a column of integers\n # between 0 and 10 are turned into timestamps\n # if the column cannot be converted,\n # just return the original column untouched\n try:\n arg_1 = pd.to_datetime(arg_1)\n except ValueError:\n arg_2 = LoggingMixin().log\n arg_2.warning(\"Could not convert field to timestamps: %s\", arg_1.name)\n return arg_1\n\n # now convert the newly created datetimes into timestamps\n # we have to be careful here\n # because NaT cannot be converted to a timestamp\n # so we have to return NaN\n arg_3 = []\n for arg_4 in arg_1:\n try:\n arg_3.append(arg_4.timestamp())\n except (ValueError, AttributeError):\n arg_3.append(pd.np.NaN)\n\n return pd.Series(arg_3, index=arg_1.index)"} +{"_id": "doc_199", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=\"csv\",\n arg_4=False,\n arg_5=False):\n \"\"\"\n Write query results to file.\n\n Acceptable formats are:\n - csv:\n comma-separated-values file. This is the default format.\n - json:\n JSON array. Each element in the array is a different row.\n - ndjson:\n JSON array but each element is new-line delimited instead of comma delimited like in `json`\n\n This requires a significant amount of cleanup.\n Pandas doesn't handle output to CSV and json in a uniform way.\n This is especially painful for datetime types.\n Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.\n\n By default, this function will try and leave all values as they are represented in Salesforce.\n You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).\n This is can be greatly beneficial as it will make all of your datetime fields look the same,\n and makes it easier to work with in other database environments\n\n :param query_results: the results from a SQL query\n :type query_results: list of dict\n :param filename: the name of the file where the data should be dumped to\n :type filename: str\n :param fmt: the format you want the output in. Default: 'csv'\n :type fmt: str\n :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.\n False if you want them to be left in the same format as they were in Salesforce.\n Leaving the value as False will result in datetimes being strings. Default: False\n :type coerce_to_timestamp: bool\n :param record_time_added: True if you want to add a Unix timestamp field\n to the resulting data that marks when the data was fetched from Salesforce. Default: False\n :type record_time_added: bool\n :return: the dataframe that gets written to the file.\n :rtype: pd.Dataframe\n \"\"\"\n arg_3 = arg_3.lower()\n if arg_3 not in ['csv', 'json', 'ndjson']:\n raise ValueError(\"Format value is not recognized: {}\".format(arg_3))\n\n # this line right here will convert all integers to floats\n # if there are any None/np.nan values in the column\n # that's because None/np.nan cannot exist in an integer column\n # we should write all of our timestamps as FLOATS in our final schema\n arg_6 = pd.DataFrame.from_records(arg_1, exclude=[\"attributes\"])\n\n arg_6.columns = [column.lower() for column in arg_6.columns]\n\n # convert columns with datetime strings to datetimes\n # not all strings will be datetimes, so we ignore any errors that occur\n # we get the object's definition at this point and only consider\n # features that are DATE or DATETIME\n if arg_4 and arg_6.shape[0] > 0:\n # get the object name out of the query results\n # it's stored in the \"attributes\" dictionary\n # for each returned record\n arg_8 = arg_1[0]['attributes']['type']\n\n arg_0.log.info(\"Coercing timestamps for: %s\", arg_8)\n\n arg_9 = arg_0.describe_object(arg_8)\n\n # possible columns that can be converted to timestamps\n # are the ones that are either date or datetime types\n # strings are too general and we risk unintentional conversion\n arg_10 = [\n field['name'].lower()\n for field in arg_9['fields']\n if field['type'] in [\"date\", \"datetime\"] and field['name'].lower() in arg_6.columns\n ]\n arg_6[arg_10] = arg_6[arg_10].apply(arg_0._to_timestamp)\n\n if arg_5:\n arg_11 = time.time()\n arg_6[\"time_fetched_from_salesforce\"] = arg_11\n\n # write the CSV or JSON file depending on the option\n # NOTE:\n # datetimes here are an issue.\n # There is no good way to manage the difference\n # for to_json, the options are an epoch or a ISO string\n # but for to_csv, it will be a string output by datetime\n # For JSON we decided to output the epoch timestamp in seconds\n # (as is fairly standard for JavaScript)\n # And for csv, we do a string\n if arg_3 == \"csv\":\n # there are also a ton of newline objects that mess up our ability to write to csv\n # we remove these newlines so that the output is a valid CSV format\n arg_0.log.info(\"Cleaning data and writing to CSV\")\n arg_12 = arg_6.columns[arg_6.dtypes == \"object\"]\n arg_6[arg_12] = arg_6[arg_12].apply(\n lambda x: x.str.replace(\"\\r\\n\", \"\").str.replace(\"\\n\", \"\")\n )\n # write the dataframe\n arg_6.to_csv(arg_2, index=False)\n elif arg_3 == \"json\":\n arg_6.to_json(arg_2, \"records\", date_unit=\"s\")\n elif arg_3 == \"ndjson\":\n arg_6.to_json(arg_2, \"records\", lines=True, date_unit=\"s\")\n\n return arg_6"} +{"_id": "doc_200", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Fetches a mongo collection object for querying.\n\n Uses connection schema as DB unless specified.\n \"\"\"\n arg_2 = arg_2 if arg_2 is not None else arg_0.connection.schema\n arg_3 = arg_0.get_conn()\n\n return arg_3.get_database(arg_2).Func(arg_1)"} +{"_id": "doc_201", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='INBOX',\n arg_3=False,\n arg_4=False,\n arg_5='raise'):\n \"\"\"\n Retrieves mail's attachments in the mail folder by its name.\n\n :param name: The name of the attachment that will be downloaded.\n :type name: str\n :param mail_folder: The mail folder where to look at.\n :type mail_folder: str\n :param check_regex: Checks the name for a regular expression.\n :type check_regex: bool\n :param latest_only: If set to True it will only retrieve\n the first matched attachment.\n :type latest_only: bool\n :param not_found_mode: Specify what should happen if no attachment has been found.\n Supported values are 'raise', 'warn' and 'ignore'.\n If it is set to 'raise' it will raise an exception,\n if set to 'warn' it will only print a warning and\n if set to 'ignore' it won't notify you at all.\n :type not_found_mode: str\n :returns: a list of tuple each containing the attachment filename and its payload.\n :rtype: a list of tuple\n \"\"\"\n arg_6 = arg_0._retrieve_mails_attachments_by_name(arg_1,\n arg_2,\n arg_3,\n arg_4)\n if not arg_6:\n arg_0._handle_not_found_mode(arg_5)\n\n return arg_6"} +{"_id": "doc_202", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3='INBOX',\n arg_4=False,\n arg_5=False,\n arg_6='raise'):\n \"\"\"\n Downloads mail's attachments in the mail folder by its name to the local directory.\n\n :param name: The name of the attachment that will be downloaded.\n :type name: str\n :param local_output_directory: The output directory on the local machine\n where the files will be downloaded to.\n :type local_output_directory: str\n :param mail_folder: The mail folder where to look at.\n :type mail_folder: str\n :param check_regex: Checks the name for a regular expression.\n :type check_regex: bool\n :param latest_only: If set to True it will only download\n the first matched attachment.\n :type latest_only: bool\n :param not_found_mode: Specify what should happen if no attachment has been found.\n Supported values are 'raise', 'warn' and 'ignore'.\n If it is set to 'raise' it will raise an exception,\n if set to 'warn' it will only print a warning and\n if set to 'ignore' it won't notify you at all.\n :type not_found_mode: str\n \"\"\"\n arg_7 = arg_0._retrieve_mails_attachments_by_name(arg_1,\n arg_3,\n arg_4,\n arg_5)\n\n if not arg_7:\n arg_0._handle_not_found_mode(arg_6)\n\n arg_0._create_files(arg_7, arg_2)"} +{"_id": "doc_203", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Gets all attachments by name for the mail.\n\n :param name: The name of the attachment to look for.\n :type name: str\n :param check_regex: Checks the name for a regular expression.\n :type check_regex: bool\n :param find_first: If set to True it will only find the first match and then quit.\n :type find_first: bool\n :returns: a list of tuples each containing name and payload\n where the attachments name matches the given name.\n :rtype: list of tuple\n \"\"\"\n arg_4 = []\n\n for arg_5 in arg_0.mail.walk():\n arg_6 = MailPart(arg_5)\n if arg_6.is_attachment():\n arg_7 = arg_6.has_matching_name(arg_1) if arg_2 \\\n else arg_6.has_equal_name(arg_1)\n if arg_7:\n arg_8, arg_9 = arg_6.get_file()\n arg_0.log.info('Found attachment: {}'.format(arg_8))\n arg_4.append((arg_8, arg_9))\n if arg_3:\n break\n\n return arg_4"} +{"_id": "doc_204", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write batch records to Kinesis Firehose\n \"\"\"\n\n arg_2 = arg_0.get_conn()\n\n arg_3 = arg_2.put_record_batch(\n DeliveryStreamName=arg_0.delivery_stream,\n Records=arg_1\n )\n\n return arg_3"} +{"_id": "doc_205", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Determines whether a task is ready to be rescheduled. Only tasks in\n NONE state with at least one row in task_reschedule table are\n handled by this dependency class, otherwise this dependency is\n considered as passed. This dependency fails if the latest reschedule\n request's reschedule date is still in future.\n \"\"\"\n if arg_3.ignore_in_reschedule_period:\n yield arg_0._passing_status(\n reason=\"The context specified that being in a reschedule period was \"\n \"permitted.\")\n return\n\n if arg_1.state not in arg_0.RESCHEDULEABLE_STATES:\n yield arg_0._passing_status(\n reason=\"The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.\")\n return\n\n arg_4 = TaskReschedule.find_for_task_instance(task_instance=arg_1)\n if not arg_4:\n yield arg_0._passing_status(\n reason=\"There is no reschedule request for this task instance.\")\n return\n\n arg_5 = timezone.utcnow()\n arg_6 = arg_4[-1].reschedule_date\n if arg_5 >= arg_6:\n yield arg_0._passing_status(\n reason=\"Task instance id ready for reschedule.\")\n return\n\n yield arg_0._failing_status(\n reason=\"Task is not ready for reschedule yet but will be rescheduled \"\n \"automatically. Current date is {0} and task will be rescheduled \"\n \"at {1}.\".format(arg_5.isoformat(), arg_6.isoformat()))"} +{"_id": "doc_206", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=False, arg_5=None, arg_6=None,\n arg_7='mixed', arg_8='utf-8',\n **arg_9):\n \"\"\"\n Send an email with html content\n\n >>> send_email('test@example.com', 'foo', 'Foo bar', ['/dev/null'], dryrun=True)\n \"\"\"\n arg_10 = configuration.conf.get('smtp', 'SMTP_MAIL_FROM')\n\n arg_0 = get_email_address_list(arg_0)\n\n arg_11 = MIMEMultipart(arg_7)\n arg_11['Subject'] = arg_1\n arg_11['From'] = arg_10\n arg_11['To'] = \", \".join(arg_0)\n arg_12 = arg_0\n if arg_5:\n arg_5 = get_email_address_list(arg_5)\n arg_11['CC'] = \", \".join(arg_5)\n arg_12 = arg_12 + arg_5\n\n if arg_6:\n # don't add bcc in header\n arg_6 = get_email_address_list(arg_6)\n arg_12 = arg_12 + arg_6\n\n arg_11['Date'] = formatdate(localtime=True)\n arg_13 = MIMEText(arg_2, 'html', arg_8)\n arg_11.attach(arg_13)\n\n for arg_14 in arg_3 or []:\n arg_15 = os.path.basename(arg_14)\n with open(arg_14, \"rb\") as f:\n arg_16 = MIMEApplication(\n f.read(),\n Name=arg_15\n )\n arg_16['Content-Disposition'] = 'attachment; filename=\"%s\"' % arg_15\n arg_16['Content-ID'] = '<%s>' % arg_15\n arg_11.attach(arg_16)\n\n send_MIME_email(arg_10, arg_12, arg_11, arg_4)"} +{"_id": "doc_207", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Check if a blob exists on Azure Blob Storage.\n\n :param container_name: Name of the container.\n :type container_name: str\n :param blob_name: Name of the blob.\n :type blob_name: str\n :param kwargs: Optional keyword arguments that\n `BlockBlobService.exists()` takes.\n :type kwargs: object\n :return: True if the blob exists, False otherwise.\n :rtype: bool\n \"\"\"\n return arg_0.connection.exists(arg_1, arg_2, **arg_3)"} +{"_id": "doc_208", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Check if a prefix exists on Azure Blob storage.\n\n :param container_name: Name of the container.\n :type container_name: str\n :param prefix: Prefix of the blob.\n :type prefix: str\n :param kwargs: Optional keyword arguments that\n `BlockBlobService.list_blobs()` takes.\n :type kwargs: object\n :return: True if blobs matching the prefix exist, False otherwise.\n :rtype: bool\n \"\"\"\n arg_4 = arg_0.connection.list_blobs(arg_1, arg_2,\n num_results=1, **arg_3)\n return len(list(arg_4)) > 0"} +{"_id": "doc_209", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False,\n arg_4=False, **arg_5):\n \"\"\"\n Delete a file from Azure Blob Storage.\n\n :param container_name: Name of the container.\n :type container_name: str\n :param blob_name: Name of the blob.\n :type blob_name: str\n :param is_prefix: If blob_name is a prefix, delete all matching files\n :type is_prefix: bool\n :param ignore_if_missing: if True, then return success even if the\n blob does not exist.\n :type ignore_if_missing: bool\n :param kwargs: Optional keyword arguments that\n `BlockBlobService.create_blob_from_path()` takes.\n :type kwargs: object\n \"\"\"\n\n if arg_3:\n arg_6 = [\n blob.name for blob in arg_0.connection.list_blobs(\n arg_1, prefix=arg_2, **arg_5\n )\n ]\n elif arg_0.check_for_blob(arg_1, arg_2):\n arg_6 = [arg_2]\n else:\n arg_6 = []\n\n if not arg_4 and len(arg_6) == 0:\n raise AirflowException('Blob(s) not found: {}'.format(arg_2))\n\n for arg_7 in arg_6:\n arg_0.log.info(\"Deleting blob: \" + arg_7)\n arg_0.connection.delete_blob(arg_1,\n arg_7,\n delete_snapshots='include',\n **arg_5)"} +{"_id": "doc_210", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=None):\n \"\"\"\n Transfers the remote file to a local location.\n\n If local_full_path_or_buffer is a string path, the file will be put\n at that location; if it is a file-like buffer, the file will\n be written to the buffer but not closed.\n\n :param remote_full_path: full path to the remote file\n :type remote_full_path: str\n :param local_full_path_or_buffer: full path to the local file or a\n file-like buffer\n :type local_full_path_or_buffer: str or file-like buffer\n :param callback: callback which is called each time a block of data\n is read. if you do not use a callback, these blocks will be written\n to the file or buffer passed in. if you do pass in a callback, note\n that writing to a file or buffer will need to be handled inside the\n callback.\n [default: output_handle.write()]\n :type callback: callable\n\n :Example::\n\n hook = FTPHook(ftp_conn_id='my_conn')\n\n remote_path = '/path/to/remote/file'\n local_path = '/path/to/local/file'\n\n # with a custom callback (in this case displaying progress on each read)\n def print_progress(percent_progress):\n self.log.info('Percent Downloaded: %s%%' % percent_progress)\n\n total_downloaded = 0\n total_file_size = hook.get_size(remote_path)\n output_handle = open(local_path, 'wb')\n def write_to_file_with_progress(data):\n total_downloaded += len(data)\n output_handle.write(data)\n percent_progress = (total_downloaded / total_file_size) * 100\n print_progress(percent_progress)\n hook.Func(remote_path, None, callback=write_to_file_with_progress)\n\n # without a custom callback data is written to the local_path\n hook.Func(remote_path, local_path)\n \"\"\"\n arg_4 = arg_0.get_conn()\n\n arg_5 = isinstance(arg_2, basestring)\n\n # without a callback, default to writing to a user-provided file or\n # file-like buffer\n if not arg_3:\n if arg_5:\n arg_6 = open(arg_2, 'wb')\n else:\n arg_6 = arg_2\n arg_3 = arg_6.write\n else:\n arg_6 = None\n\n arg_7, arg_8 = os.path.split(arg_1)\n arg_4.cwd(arg_7)\n arg_0.log.info('Retrieving file from FTP: %s', arg_1)\n arg_4.retrbinary('RETR %s' % arg_8, arg_3)\n arg_0.log.info('Finished retrieving file from FTP: %s', arg_1)\n\n if arg_5 and arg_6:\n arg_6.close()"} +{"_id": "doc_211", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Transfers a local file to the remote location.\n\n If local_full_path_or_buffer is a string path, the file will be read\n from that location; if it is a file-like buffer, the file will\n be read from the buffer but not closed.\n\n :param remote_full_path: full path to the remote file\n :type remote_full_path: str\n :param local_full_path_or_buffer: full path to the local file or a\n file-like buffer\n :type local_full_path_or_buffer: str or file-like buffer\n \"\"\"\n arg_3 = arg_0.get_conn()\n\n arg_4 = isinstance(arg_2, basestring)\n\n if arg_4:\n arg_5 = open(arg_2, 'rb')\n else:\n arg_5 = arg_2\n arg_6, arg_7 = os.path.split(arg_1)\n arg_3.cwd(arg_6)\n arg_3.storbinary('STOR %s' % arg_7, arg_5)\n\n if arg_4:\n arg_5.close()"} +{"_id": "doc_212", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a datetime object representing the last time the file was modified\n\n :param path: remote file path\n :type path: string\n \"\"\"\n arg_2 = arg_0.get_conn()\n arg_3 = arg_2.sendcmd('MDTM ' + arg_1)\n arg_4 = arg_3[4:]\n # time_val optionally has microseconds\n try:\n return datetime.datetime.strptime(arg_4, \"%Y%m%d%H%M%S.%f\")\n except ValueError:\n return datetime.datetime.strptime(arg_4, '%Y%m%d%H%M%S')"} +{"_id": "doc_213", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Call the DiscordWebhookHook to post message\n \"\"\"\n arg_0.hook = DiscordWebhookHook(\n arg_0.http_conn_id,\n arg_0.webhook_endpoint,\n arg_0.message,\n arg_0.username,\n arg_0.avatar_url,\n arg_0.tts,\n arg_0.proxy\n )\n arg_0.hook.Func()"} +{"_id": "doc_214", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the FileService object.\"\"\"\n arg_1 = arg_0.Funcection(arg_0.conn_id)\n arg_2 = arg_1.extra_dejson\n return FileService(account_name=arg_1.login,\n account_key=arg_1.password, **arg_2)"} +{"_id": "doc_215", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Check if a directory exists on Azure File Share.\n\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.exists()` takes.\n :type kwargs: object\n :return: True if the file exists, False otherwise.\n :rtype: bool\n \"\"\"\n return arg_0.connection.exists(arg_1, arg_2,\n **arg_3)"} +{"_id": "doc_216", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, **arg_4):\n \"\"\"\n Check if a file exists on Azure File Share.\n\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param file_name: Name of the file.\n :type file_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.exists()` takes.\n :type kwargs: object\n :return: True if the file exists, False otherwise.\n :rtype: bool\n \"\"\"\n return arg_0.connection.exists(arg_1, arg_2,\n arg_3, **arg_4)"} +{"_id": "doc_217", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Return the list of directories and files stored on a Azure File Share.\n\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.Func()` takes.\n :type kwargs: object\n :return: A list of files and directories\n :rtype: list\n \"\"\"\n return arg_0.connection.Func(arg_1,\n arg_2,\n **arg_3)"} +{"_id": "doc_218", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Create a new directory on a Azure File Share.\n\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.Func()` takes.\n :type kwargs: object\n :return: A list of files and directories\n :rtype: list\n \"\"\"\n return arg_0.connection.Func(arg_1, arg_2, **arg_3)"} +{"_id": "doc_219", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, **arg_5):\n \"\"\"\n Upload a file to Azure File Share.\n\n :param file_path: Path to the file to load.\n :type file_path: str\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param file_name: Name of the file.\n :type file_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.create_file_from_path()` takes.\n :type kwargs: object\n \"\"\"\n arg_0.connection.create_file_from_path(arg_2, arg_3,\n arg_4, arg_1, **arg_5)"} +{"_id": "doc_220", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, **arg_5):\n \"\"\"\n Upload a string to Azure File Share.\n\n :param string_data: String to load.\n :type string_data: str\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param file_name: Name of the file.\n :type file_name: str\n :param kwargs: Optional keyword arguments that\n `FileService.create_file_from_text()` takes.\n :type kwargs: object\n \"\"\"\n arg_0.connection.create_file_from_text(arg_2, arg_3,\n arg_4, arg_1, **arg_5)"} +{"_id": "doc_221", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, **arg_6):\n \"\"\"\n Upload a stream to Azure File Share.\n\n :param stream: Opened file/stream to upload as the file content.\n :type stream: file-like\n :param share_name: Name of the share.\n :type share_name: str\n :param directory_name: Name of the directory.\n :type directory_name: str\n :param file_name: Name of the file.\n :type file_name: str\n :param count: Size of the stream in bytes\n :type count: int\n :param kwargs: Optional keyword arguments that\n `FileService.create_file_from_stream()` takes.\n :type kwargs: object\n \"\"\"\n arg_0.connection.create_file_from_stream(arg_2, arg_3,\n arg_4, arg_1, arg_5, **arg_6)"} +{"_id": "doc_222", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a Google Cloud Storage service object.\n \"\"\"\n if not arg_0._conn:\n arg_0._conn = storage.Client(credentials=arg_0._get_credentials())\n\n return arg_0._conn"} +{"_id": "doc_223", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=None):\n \"\"\"\n Copies an object from a bucket to another, with renaming if requested.\n\n destination_bucket or destination_object can be omitted, in which case\n source bucket/object is used, but not both.\n\n :param source_bucket: The bucket of the object to Func from.\n :type source_bucket: str\n :param source_object: The object to Func.\n :type source_object: str\n :param destination_bucket: The destination of the object to copied to.\n Can be omitted; then the same bucket is used.\n :type destination_bucket: str\n :param destination_object: The (renamed) path of the object if given.\n Can be omitted; then the same name is used.\n :type destination_object: str\n \"\"\"\n arg_3 = arg_3 or arg_1\n arg_4 = arg_4 or arg_2\n if arg_1 == arg_3 and \\\n arg_2 == arg_4:\n\n raise ValueError(\n 'Either source/destination bucket or source/destination object '\n 'must be different, not both the same: bucket=%s, object=%s' %\n (arg_1, arg_2))\n if not arg_1 or not arg_2:\n raise ValueError('source_bucket and source_object cannot be empty.')\n\n arg_5 = arg_0.get_conn()\n arg_1 = arg_5.get_bucket(arg_1)\n arg_2 = arg_1.blob(arg_2)\n arg_3 = arg_5.get_bucket(arg_3)\n arg_4 = arg_1.Func_blob(\n blob=arg_2,\n arg_3=arg_3,\n new_name=arg_4)\n\n arg_0.log.info('Object %s in bucket %s copied to object %s in bucket %s',\n arg_2.name, arg_1.name,\n arg_4.name, arg_3.name)"} +{"_id": "doc_224", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Get a file from Google Cloud Storage.\n\n :param bucket_name: The bucket to fetch from.\n :type bucket_name: str\n :param object_name: The object to fetch.\n :type object_name: str\n :param filename: If set, a local file path where the file should be written to.\n :type filename: str\n \"\"\"\n arg_4 = arg_0.get_conn()\n arg_5 = arg_4.get_bucket(arg_1)\n arg_6 = arg_5.blob(blob_name=arg_2)\n\n if arg_3:\n arg_6.Func_to_filename(arg_3)\n arg_0.log.info('File Funced to %s', arg_3)\n\n return arg_6.Func_as_string()"} +{"_id": "doc_225", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4='application/octet-stream', arg_5=False):\n \"\"\"\n Uploads a local file to Google Cloud Storage.\n\n :param bucket_name: The bucket to Func to.\n :type bucket_name: str\n :param object_name: The object name to set when Funcing the local file.\n :type object_name: str\n :param filename: The local file path to the file to be Funced.\n :type filename: str\n :param mime_type: The MIME type to set when Funcing the file.\n :type mime_type: str\n :param gzip: Option to compress file for Func\n :type gzip: bool\n \"\"\"\n\n if arg_5:\n arg_6 = arg_3 + '.gz'\n\n with open(arg_3, 'rb') as f_in:\n with gz.open(arg_6, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n arg_3 = arg_6\n\n arg_7 = arg_0.get_conn()\n arg_8 = arg_7.get_bucket(arg_1=arg_1)\n arg_9 = arg_8.blob(blob_name=arg_2)\n arg_9.Func_from_filename(arg_3=arg_3,\n content_type=arg_4)\n\n if arg_5:\n os.remove(arg_3)\n arg_0.log.info('File %s Funced to %s in %s bucket', arg_3, arg_2, arg_1)"} +{"_id": "doc_226", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Deletes an object from the bucket.\n\n :param bucket_name: name of the bucket, where the object resides\n :type bucket_name: str\n :param object_name: name of the object to Func\n :type object_name: str\n \"\"\"\n arg_3 = arg_0.get_conn()\n arg_4 = arg_3.get_bucket(arg_1=arg_1)\n arg_5 = arg_4.blob(blob_name=arg_2)\n arg_5.Func()\n\n arg_0.log.info('Blob %s Funcd.', arg_2)"} +{"_id": "doc_227", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"\n List all objects from the bucket with the give string prefix in name\n\n :param bucket_name: bucket name\n :type bucket_name: str\n :param versions: if true, Func all versions of the objects\n :type versions: bool\n :param max_results: max count of items to return in a single page of responses\n :type max_results: int\n :param prefix: prefix string which filters objects whose name begin with\n this prefix\n :type prefix: str\n :param delimiter: filters objects based on the delimiter (for e.g '.csv')\n :type delimiter: str\n :return: a stream of object names matching the filtering criteria\n \"\"\"\n arg_6 = arg_0.get_conn()\n arg_7 = arg_6.get_bucket(arg_1=arg_1)\n\n arg_8 = []\n arg_9 = None\n while True:\n arg_10 = arg_7.Func_blobs(\n arg_3=arg_3,\n page_token=arg_9,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_2=arg_2\n )\n\n arg_11 = []\n for arg_12 in arg_10:\n arg_11.append(arg_12.name)\n\n arg_13 = arg_10.prefixes\n if arg_13:\n arg_8 += Func(arg_13)\n else:\n arg_8 += arg_11\n\n arg_9 = arg_10.next_page_token\n if arg_9 is None:\n # empty next page token\n break\n return arg_8"} +{"_id": "doc_228", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Gets the size of a file in Google Cloud Storage.\n\n :param bucket_name: The Google cloud storage bucket where the blob_name is.\n :type bucket_name: str\n :param object_name: The name of the object to check in the Google\n cloud storage bucket_name.\n :type object_name: str\n\n \"\"\"\n arg_0.log.info('Checking the file size of object: %s in bucket_name: %s',\n arg_2,\n arg_1)\n arg_3 = arg_0.get_conn()\n arg_4 = arg_3.get_bucket(arg_1=arg_1)\n arg_5 = arg_4.get_blob(blob_name=arg_2)\n arg_5.reload()\n arg_6 = arg_5.size\n arg_0.log.info('The file size of %s is %s bytes.', arg_2, arg_6)\n return arg_6"} +{"_id": "doc_229", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Gets the MD5 hash of an object in Google Cloud Storage.\n\n :param bucket_name: The Google cloud storage bucket where the blob_name is.\n :type bucket_name: str\n :param object_name: The name of the object to check in the Google cloud\n storage bucket_name.\n :type object_name: str\n \"\"\"\n arg_0.log.info('Retrieving the MD5 hash of '\n 'object: %s in bucket: %s', arg_2, arg_1)\n arg_3 = arg_0.get_conn()\n arg_4 = arg_3.get_bucket(arg_1=arg_1)\n arg_5 = arg_4.get_blob(blob_name=arg_2)\n arg_5.reload()\n arg_6 = arg_5.md5_hash\n arg_0.log.info('The md5Hash of %s is %s', arg_2, arg_6)\n return arg_6"} +{"_id": "doc_230", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3='MULTI_REGIONAL',\n arg_4='US',\n arg_5=None,\n arg_6=None\n ):\n \"\"\"\n Creates a new bucket. Google Cloud Storage uses a flat namespace, so\n you can't create a bucket with a name that is already in use.\n\n .. seealso::\n For more information, see Bucket Naming Guidelines:\n https://cloud.google.com/storage/docs/bucketnaming.html#requirements\n\n :param bucket_name: The name of the bucket.\n :type bucket_name: str\n :param resource: An optional dict with parameters for creating the bucket.\n For information on available parameters, see Cloud Storage API doc:\n https://cloud.google.com/storage/docs/json_api/v1/buckets/insert\n :type resource: dict\n :param storage_class: This defines how objects in the bucket are stored\n and determines the SLA and the cost of storage. Values include\n\n - ``MULTI_REGIONAL``\n - ``REGIONAL``\n - ``STANDARD``\n - ``NEARLINE``\n - ``COLDLINE``.\n\n If this value is not specified when the bucket is\n created, it will default to STANDARD.\n :type storage_class: str\n :param location: The location of the bucket.\n Object data for objects in the bucket resides in physical storage\n within this region. Defaults to US.\n\n .. seealso::\n https://developers.google.com/storage/docs/bucket-locations\n\n :type location: str\n :param project_id: The ID of the GCP Project.\n :type project_id: str\n :param labels: User-provided labels, in key/value pairs.\n :type labels: dict\n :return: If successful, it returns the ``id`` of the bucket.\n \"\"\"\n\n arg_0.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',\n arg_1, arg_4, arg_3)\n\n arg_7 = arg_0.get_conn()\n arg_8 = arg_7.bucket(arg_1=arg_1)\n arg_9 = arg_2 or {}\n\n for arg_10 in arg_9:\n if arg_10 != \"name\":\n arg_8._patch_property(name=arg_10, value=arg_2[arg_10])\n\n arg_8.storage_class = arg_3\n arg_8.labels = arg_6 or {}\n arg_8.create(project=arg_5, arg_4=arg_4)\n return arg_8.id"} +{"_id": "doc_231", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns true if training job's secondary status message has changed.\n\n :param current_job_description: Current job description, returned from DescribeTrainingJob call.\n :type current_job_description: dict\n :param prev_job_description: Previous job description, returned from DescribeTrainingJob call.\n :type prev_job_description: dict\n\n :return: Whether the secondary status message of a training job changed or not.\n \"\"\"\n arg_2 = arg_0.get('SecondaryStatusTransitions')\n if arg_2 is None or len(arg_2) == 0:\n return False\n\n arg_3 = arg_1.get('SecondaryStatusTransitions') \\\n if arg_1 is not None else None\n\n arg_4 = arg_3[-1]['StatusMessage'] \\\n if arg_3 is not None \\\n and len(arg_3) > 0 else ''\n\n arg_5 = arg_0['SecondaryStatusTransitions'][-1]['StatusMessage']\n\n return arg_5 != arg_4"} +{"_id": "doc_232", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Tar the local file or directory and upload to s3\n\n :param path: local file or directory\n :type path: str\n :param key: s3 key\n :type key: str\n :param bucket: s3 bucket\n :type bucket: str\n :return: None\n \"\"\"\n with tempfile.TemporaryFile() as temp_file:\n if os.path.isdir(arg_1):\n arg_4 = [os.path.join(arg_1, name) for name in os.listdir(arg_1)]\n else:\n arg_4 = [arg_1]\n with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:\n for arg_5 in arg_4:\n tar_file.add(arg_5, arcname=os.path.basename(arg_5))\n temp_file.seek(0)\n arg_0.s3_hook.load_file_obj(temp_file, arg_2, arg_3, replace=True)"} +{"_id": "doc_233", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Extract the S3 operations from the configuration and execute them.\n\n :param config: config of SageMaker operation\n :type config: dict\n :rtype: dict\n \"\"\"\n arg_2 = arg_1.pop('S3Operations', None)\n\n if arg_2 is not None:\n arg_3 = arg_2.get('S3CreateBucket', [])\n arg_4 = arg_2.get('S3Upload', [])\n for arg_5 in arg_3:\n arg_0.s3_hook.create_bucket(bucket_name=arg_5['Bucket'])\n for arg_5 in arg_4:\n if arg_5['Tar']:\n arg_0.tar_and_s3_upload(arg_5['Path'], arg_5['Key'],\n arg_5['Bucket'])\n else:\n arg_0.s3_hook.load_file(arg_5['Path'], arg_5['Key'],\n arg_5['Bucket'])"} +{"_id": "doc_234", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if an S3 URL exists\n\n :param s3url: S3 url\n :type s3url: str\n :rtype: bool\n \"\"\"\n arg_2, arg_3 = S3Hook.parse_s3_url(arg_1)\n if not arg_0.s3_hook.check_for_bucket(bucket_name=arg_2):\n raise AirflowException(\n \"The input S3 Bucket {} does not exist \".format(arg_2))\n if arg_3 and not arg_0.s3_hook.check_for_key(arg_3=arg_3, bucket_name=arg_2)\\\n and not arg_0.s3_hook.check_for_prefix(\n prefix=arg_3, bucket_name=arg_2, delimiter='/'):\n # check if s3 key exists in the case user provides a single file\n # or if s3 prefix exists in the case user provides multiple files in\n # a prefix\n raise AirflowException(\"The input S3 Key \"\n \"or Prefix {} does not exist in the Bucket {}\"\n .format(arg_1, arg_2))\n return True"} +{"_id": "doc_235", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Establish an AWS connection for retrieving logs during training\n\n :rtype: CloudWatchLogs.Client\n \"\"\"\n arg_1 = botocore.config.Config(retries={'max_attempts': 15})\n return arg_0.get_client_type('logs', arg_1=arg_1)"} +{"_id": "doc_236", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6,\n arg_7):\n \"\"\"\n Return the training job info associated with job_name and print CloudWatch logs\n \"\"\"\n arg_8 = '/aws/sagemaker/TrainingJobs'\n\n if len(arg_3) < arg_4:\n # Log streams are created whenever a container starts writing to stdout/err, so this list\n # may be dynamic until we have a stream for every instance.\n arg_9 = arg_0.get_log_conn()\n try:\n arg_10 = arg_9.describe_log_streams(\n logGroupName=arg_8,\n logStreamNamePrefix=arg_1 + '/',\n orderBy='LogStreamName',\n limit=arg_4\n )\n arg_3 = [arg_11['logStreamName'] for arg_11 in arg_10['logStreams']]\n arg_2.update([(arg_11, Position(timestamp=0, skip=0))\n for arg_11 in arg_3 if arg_11 not in arg_2])\n except arg_9.exceptions.ResourceNotFoundException:\n # On the very first training job run on an account, there's no log group until\n # the container starts logging, so ignore any errors thrown about that\n pass\n\n if len(arg_3) > 0:\n for arg_12, arg_13 in arg_0.multi_stream_iter(arg_8, arg_3, arg_2):\n arg_0.log.info(arg_13['message'])\n arg_14, arg_15 = arg_2[arg_3[arg_12]]\n if arg_13['timestamp'] == arg_14:\n arg_2[arg_3[arg_12]] = Position(timestamp=arg_14, skip=arg_15 + 1)\n else:\n arg_2[arg_3[arg_12]] = Position(timestamp=arg_13['timestamp'], skip=1)\n\n if arg_5 == LogState.COMPLETE:\n return arg_5, arg_6, arg_7\n\n if arg_5 == LogState.JOB_COMPLETE:\n arg_5 = LogState.COMPLETE\n elif time.time() - arg_7 >= 30:\n arg_16 = arg_0.describe_training_job(arg_1)\n arg_7 = time.time()\n\n if secondary_training_status_changed(arg_16, arg_6):\n arg_0.log.info(secondary_training_status_message(arg_16, arg_6))\n arg_6 = arg_16\n\n arg_17 = arg_16['TrainingJobStatus']\n\n if arg_17 not in arg_0.non_terminal_states:\n arg_5 = LogState.JOB_COMPLETE\n return arg_5, arg_6, arg_7"} +{"_id": "doc_237", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4,\n arg_5,\n arg_6=None):\n \"\"\"\n Check status of a SageMaker job\n\n :param job_name: name of the job to check status\n :type job_name: str\n :param key: the key of the response dict\n that points to the state\n :type key: str\n :param describe_function: the function used to retrieve the status\n :type describe_function: python callable\n :param args: the arguments for the function\n :param check_interval: the time interval in seconds which the operator\n will check the status of any SageMaker job\n :type check_interval: int\n :param max_ingestion_time: the maximum ingestion time in seconds. Any\n SageMaker jobs that run longer than this will fail. Setting this to\n None implies no timeout for any SageMaker job.\n :type max_ingestion_time: int\n :param non_terminal_states: the set of nonterminal states\n :type non_terminal_states: set\n :return: response of describe call after job is done\n \"\"\"\n if not arg_6:\n arg_6 = arg_0.non_terminal_states\n\n arg_7 = 0\n arg_8 = True\n\n while arg_8:\n time.sleep(arg_4)\n arg_7 = arg_7 + arg_4\n\n try:\n arg_9 = arg_3(arg_1)\n arg_10 = arg_9[arg_2]\n arg_0.log.info('Job still running for %s seconds... '\n 'current status is %s' % (arg_7, arg_10))\n except KeyError:\n raise AirflowException('Could not get status of the SageMaker job')\n except ClientError:\n raise AirflowException('AWS request failed, check logs for more info')\n\n if arg_10 in arg_6:\n arg_8 = True\n elif arg_10 in arg_0.failed_states:\n raise AirflowException('SageMaker job failed because %s' % arg_9['FailureReason'])\n else:\n arg_8 = False\n\n if arg_5 and arg_7 > arg_5:\n # ensure that the job gets killed if the max ingestion time is exceeded\n raise AirflowException('SageMaker job took more than %s seconds', arg_5)\n\n arg_0.log.info('SageMaker Job Compeleted')\n arg_9 = arg_3(arg_1)\n return arg_9"} +{"_id": "doc_238", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute the python dataflow job.\"\"\"\n arg_2 = GoogleCloudBucketHelper(\n arg_0.gcp_conn_id, arg_0.delegate_to)\n arg_0.py_file = arg_2.google_cloud_to_local(arg_0.py_file)\n arg_4 = DataFlowHook(gcp_conn_id=arg_0.gcp_conn_id,\n delegate_to=arg_0.delegate_to,\n poll_sleep=arg_0.poll_sleep)\n arg_5 = arg_0.dataflow_default_options.copy()\n arg_5.update(arg_0.options)\n # Convert argument names from lowerCamelCase to snake case.\n arg_6 = lambda name: re.sub(\n r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)\n arg_7 = {arg_6(key): arg_5[key]\n for key in arg_5}\n arg_4.start_python_dataflow(\n arg_0.job_name, arg_7,\n arg_0.py_file, arg_0.py_options)"} +{"_id": "doc_239", "title": "", "text": "def Func():\n \"\"\"Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n \"\"\"\n context.configure(\n url=settings.SQL_ALCHEMY_CONN, target_metadata=target_metadata,\n literal_binds=True, compare_type=COMPARE_TYPE)\n\n with context.begin_transaction():\n context.run_migrations()"} +{"_id": "doc_240", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Deletes the specified Cloud Bigtable instance.\n Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does\n not exist.\n\n :param project_id: Optional, Google Cloud Platform project ID where the\n BigTable exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n :type project_id: str\n :param instance_id: The ID of the Cloud Bigtable instance.\n :type instance_id: str\n \"\"\"\n arg_3 = arg_0.get_instance(arg_1=arg_1, arg_2=arg_2)\n if arg_3:\n arg_3.delete()\n else:\n arg_0.log.info(\"The instance '%s' does not exist in project '%s'. Exiting\", arg_1,\n arg_2)"} +{"_id": "doc_241", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Updates number of nodes in the specified Cloud Bigtable cluster.\n Raises google.api_core.exceptions.NotFound if the cluster does not exist.\n\n :type instance: Instance\n :param instance: The Cloud Bigtable instance that owns the cluster.\n :type cluster_id: str\n :param cluster_id: The ID of the cluster.\n :type nodes: int\n :param nodes: The desired number of nodes.\n \"\"\"\n arg_3 = Cluster(arg_1, arg_0)\n arg_3.serve_nodes = arg_2\n arg_3.update()"} +{"_id": "doc_242", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=',',\n arg_5='utf8',\n arg_6=None, **arg_7):\n \"\"\"\n Loads a pandas DataFrame into hive.\n\n Hive data types will be inferred if not passed but column names will\n not be sanitized.\n\n :param df: DataFrame to load into a Hive table\n :type df: pandas.DataFrame\n :param table: target Hive table, use dot notation to target a\n specific database\n :type table: str\n :param field_dict: mapping from column name to hive data type.\n Note that it must be OrderedDict so as to keep columns' order.\n :type field_dict: collections.OrderedDict\n :param delimiter: field delimiter in the file\n :type delimiter: str\n :param encoding: str encoding to use when writing DataFrame to file\n :type encoding: str\n :param pandas_kwargs: passed to DataFrame.to_csv\n :type pandas_kwargs: dict\n :param kwargs: passed to self.load_file\n \"\"\"\n\n def _infer_field_types_from_df(arg_1):\n arg_8 = {\n 'b': 'BOOLEAN', # boolean\n 'i': 'BIGINT', # signed integer\n 'u': 'BIGINT', # unsigned integer\n 'f': 'DOUBLE', # floating-point\n 'c': 'STRING', # complex floating-point\n 'M': 'TIMESTAMP', # datetime\n 'O': 'STRING', # object\n 'S': 'STRING', # (byte-)string\n 'U': 'STRING', # Unicode\n 'V': 'STRING' # void\n }\n\n arg_9 = OrderedDict()\n for arg_10, arg_11 in arg_1.dtypes.iteritems():\n arg_9[arg_10] = arg_8[arg_11.kind]\n return arg_9\n\n if arg_6 is None:\n arg_6 = {}\n\n with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:\n with NamedTemporaryFile(dir=tmp_dir, mode=\"w\") as f:\n\n if arg_3 is None:\n arg_3 = _infer_field_types_from_df(arg_1)\n\n arg_1.to_csv(path_or_buf=f,\n sep=arg_4,\n header=False,\n index=False,\n arg_5=arg_5,\n date_format=\"%Y-%m-%d %H:%M:%S\",\n **arg_6)\n f.flush()\n\n return arg_0.load_file(filepath=f.name,\n arg_2=arg_2,\n arg_4=arg_4,\n arg_3=arg_3,\n **arg_7)"} +{"_id": "doc_243", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=\",\",\n arg_4=None,\n arg_5=True,\n arg_6=True,\n arg_7=None,\n arg_8=False,\n arg_9=None):\n \"\"\"\n Loads a local file into Hive\n\n Note that the table generated in Hive uses ``STORED AS textfile``\n which isn't the most efficient serialization format. If a\n large amount of data is loaded and/or if the tables gets\n queried considerably, you may want to use this operator only to\n stage the data into a temporary table before loading it into its\n final destination using a ``HiveOperator``.\n\n :param filepath: local filepath of the file to load\n :type filepath: str\n :param table: target Hive table, use dot notation to target a\n specific database\n :type table: str\n :param delimiter: field delimiter in the file\n :type delimiter: str\n :param field_dict: A dictionary of the fields name in the file\n as keys and their Hive types as values.\n Note that it must be OrderedDict so as to keep columns' order.\n :type field_dict: collections.OrderedDict\n :param create: whether to create the table if it doesn't exist\n :type create: bool\n :param overwrite: whether to overwrite the data in table or partition\n :type overwrite: bool\n :param partition: target partition as a dict of partition columns\n and values\n :type partition: dict\n :param recreate: whether to drop and recreate the table at every\n execution\n :type recreate: bool\n :param tblproperties: TBLPROPERTIES of the hive table being created\n :type tblproperties: dict\n \"\"\"\n arg_10 = ''\n if arg_8:\n arg_10 += \"DROP TABLE IF EXISTS {table};\\n\".format(arg_2=arg_2)\n if arg_5 or arg_8:\n if arg_4 is None:\n raise ValueError(\"Must provide a field dict when creating a table\")\n arg_11 = \",\\n \".join(\n [k + ' ' + v for k, v in arg_4.items()])\n arg_10 += \"CREATE TABLE IF NOT EXISTS {table} (\\n{fields})\\n\".format(\n arg_2=arg_2, arg_11=arg_11)\n if arg_7:\n arg_12 = \",\\n \".join(\n [p + \" STRING\" for p in arg_7])\n arg_10 += \"PARTITIONED BY ({pfields})\\n\".format(arg_12=arg_12)\n arg_10 += \"ROW FORMAT DELIMITED\\n\"\n arg_10 += \"FIELDS TERMINATED BY '{delimiter}'\\n\".format(arg_3=arg_3)\n arg_10 += \"STORED AS textfile\\n\"\n if arg_9 is not None:\n arg_13 = \", \".join(\n [\"'{0}'='{1}'\".format(k, v) for k, v in arg_9.items()])\n arg_10 += \"TBLPROPERTIES({tprops})\\n\".format(arg_13=arg_13)\n arg_10 += \";\"\n arg_0.log.info(arg_10)\n arg_0.run_cli(arg_10)\n arg_10 = \"LOAD DATA LOCAL INPATH '{filepath}' \".format(arg_1=arg_1)\n if arg_6:\n arg_10 += \"OVERWRITE \"\n arg_10 += \"INTO TABLE {table} \".format(arg_2=arg_2)\n if arg_7:\n arg_14 = \", \".join(\n [\"{0}='{1}'\".format(k, v) for k, v in arg_7.items()])\n arg_10 += \"PARTITION ({pvals})\".format(arg_14=arg_14)\n\n # As a workaround for HIVE-10541, add a newline character\n # at the end of hql (AIRFLOW-2412).\n arg_10 += ';\\n'\n\n arg_0.log.info(arg_10)\n arg_0.run_cli(arg_10)"} +{"_id": "doc_244", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a Hive thrift client.\n \"\"\"\n import hmsclient\n from thrift.transport import TSocket, TTransport\n from thrift.protocol import TBinaryProtocol\n arg_1 = arg_0.metastore_conn\n arg_2 = arg_1.extra_dejson.get('authMechanism', 'NOSASL')\n if configuration.conf.get('core', 'security') == 'kerberos':\n arg_2 = arg_1.extra_dejson.get('authMechanism', 'GSSAPI')\n arg_3 = arg_1.extra_dejson.get('kerberos_service_name', 'hive')\n\n arg_4 = TSocket.TSocket(arg_1.host, arg_1.port)\n if configuration.conf.get('core', 'security') == 'kerberos' \\\n and arg_2 == 'GSSAPI':\n try:\n import saslwrapper as sasl\n except ImportError:\n import sasl\n\n def sasl_factory():\n arg_5 = sasl.Client()\n arg_5.setAttr(\"host\", arg_1.host)\n arg_5.setAttr(\"service\", arg_3)\n arg_5.init()\n return arg_5\n\n from thrift_sasl import TSaslClientTransport\n arg_6 = TSaslClientTransport(sasl_factory, \"GSSAPI\", arg_4)\n else:\n arg_6 = TTransport.TBufferedTransport(arg_4)\n\n arg_7 = TBinaryProtocol.TBinaryProtocol(arg_6)\n\n return hmsclient.HMSClient(iprot=arg_7)"} +{"_id": "doc_245", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Checks whether a partition with a given name exists\n\n :param schema: Name of hive schema (database) @table belongs to\n :type schema: str\n :param table: Name of hive table @partition belongs to\n :type schema: str\n :partition: Name of the partitions to check for (eg `a=b/c=d`)\n :type schema: str\n :rtype: bool\n\n >>> hh = HiveMetastoreHook()\n >>> t = 'static_babynames_partitioned'\n >>> hh.Func('airflow', t, \"ds=2015-01-01\")\n True\n >>> hh.Func('airflow', t, \"ds=xxx\")\n False\n \"\"\"\n with arg_0.metastore as client:\n return client.Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_246", "title": "", "text": "def Func(arg_0, arg_1, arg_2='default'):\n \"\"\"\n Check if table exists\n\n >>> hh = HiveMetastoreHook()\n >>> hh.Func(db='airflow', table_name='static_babynames')\n True\n >>> hh.Func(db='airflow', table_name='does_not_exist')\n False\n \"\"\"\n try:\n arg_0.get_table(arg_1, arg_2)\n return True\n except Exception:\n return False"} +{"_id": "doc_247", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns a Hive connection object.\n \"\"\"\n arg_2 = arg_0.Funcection(arg_0.hiveserver2_conn_id)\n arg_3 = arg_2.extra_dejson.get('authMechanism', 'NONE')\n if arg_3 == 'NONE' and arg_2.login is None:\n # we need to give a username\n arg_4 = 'airflow'\n arg_5 = None\n if configuration.conf.get('core', 'security') == 'kerberos':\n arg_3 = arg_2.extra_dejson.get('authMechanism', 'KERBEROS')\n arg_5 = arg_2.extra_dejson.get('kerberos_service_name', 'hive')\n\n # pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier\n if arg_3 == 'GSSAPI':\n arg_0.log.warning(\n \"Detected deprecated 'GSSAPI' for authMechanism \"\n \"for %s. Please use 'KERBEROS' instead\",\n arg_0.hiveserver2_conn_id\n )\n arg_3 = 'KERBEROS'\n\n from pyhive.hive import connect\n return connect(\n host=arg_2.host,\n port=arg_2.port,\n auth=arg_3,\n arg_5=arg_5,\n arg_4=arg_2.login or arg_4,\n password=arg_2.password,\n database=arg_1 or arg_2.schema or 'default')"} +{"_id": "doc_248", "title": "", "text": "def Func(arg_0, arg_1, arg_2='default', arg_3=None):\n \"\"\"\n Get a set of records from a Hive query.\n\n :param hql: hql to be executed.\n :type hql: str or list\n :param schema: target schema, default to 'default'.\n :type schema: str\n :param hive_conf: hive_conf to execute alone with the hql.\n :type hive_conf: dict\n :return: result of hive execution\n :rtype: list\n\n >>> hh = HiveServer2Hook()\n >>> sql = \"SELECT * FROM airflow.static_babynames LIMIT 100\"\n >>> len(hh.Func(sql))\n 100\n \"\"\"\n return arg_0.get_results(arg_1, arg_2=arg_2, arg_3=arg_3)['data']"} +{"_id": "doc_249", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves connection to Cloud Vision.\n\n :return: Google Cloud Vision client object.\n :rtype: google.cloud.vision_v1.ProductSearchClient\n \"\"\"\n if not arg_0._client:\n arg_0._client = ProductSearchClient(credentials=arg_0._get_credentials())\n return arg_0._client"} +{"_id": "doc_250", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Send Dingding message\n \"\"\"\n arg_1 = ['text', 'link', 'markdown', 'actionCard', 'feedCard']\n if arg_0.message_type not in arg_1:\n raise ValueError('DingdingWebhookHook only support {} '\n 'so far, but receive {}'.format(arg_1, arg_0.message_type))\n\n arg_2 = arg_0._build_message()\n arg_0.log.info('Sending Dingding type %s message %s', arg_0.message_type, arg_2)\n arg_3 = arg_0.run(endpoint=arg_0._get_endpoint(),\n arg_2=arg_2,\n headers={'Content-Type': 'application/json'})\n\n # Dingding success Func message will with errcode equal to 0\n if int(arg_3.json().get('errcode')) != 0:\n raise AirflowException('Send Dingding message failed, receive error '\n 'message %s', arg_3.text)\n arg_0.log.info('Success Send Dingding message')"} +{"_id": "doc_251", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Helper method that binds parameters to a SQL query. \"\"\"\n # inspired by MySQL Python Connector (conversion.py)\n arg_2 = {}\n for (arg_3, arg_4) in iteritems(arg_1):\n if arg_4 is None:\n arg_2[arg_3] = 'NULL'\n elif isinstance(arg_4, basestring):\n arg_2[arg_3] = \"'\" + _escape(arg_4) + \"'\"\n else:\n arg_2[arg_3] = str(arg_4)\n return arg_0 % arg_2"} +{"_id": "doc_252", "title": "", "text": "def Func(arg_0):\n \"\"\" Helper method that escapes parameters to a SQL query. \"\"\"\n arg_1 = arg_0\n arg_1 = arg_1.replace('\\\\', '\\\\\\\\')\n arg_1 = arg_1.replace('\\n', '\\\\n')\n arg_1 = arg_1.replace('\\r', '\\\\r')\n arg_1 = arg_1.replace(\"'\", \"\\\\'\")\n arg_1 = arg_1.replace('\"', '\\\\\"')\n return arg_1"} +{"_id": "doc_253", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Helper method that casts a BigQuery row to the appropriate data types.\n This is useful because BigQuery returns all fields as strings.\n \"\"\"\n if arg_0 is None:\n return None\n elif arg_1 == 'INTEGER':\n return int(arg_0)\n elif arg_1 == 'FLOAT' or arg_1 == 'TIMESTAMP':\n return float(arg_0)\n elif arg_1 == 'BOOLEAN':\n if arg_0 not in ['true', 'false']:\n raise ValueError(\"{} must have value 'true' or 'false'\".format(\n arg_0))\n return arg_0 == 'true'\n else:\n return arg_0"} +{"_id": "doc_254", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" function to check expected type and raise\n error if type is not correct \"\"\"\n if not isinstance(arg_1, arg_2):\n raise TypeError(\"{} argument must have a type {} not {}\".format(\n arg_0, arg_2, type(arg_1)))"} +{"_id": "doc_255", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a BigQuery PEP 249 connection object.\n \"\"\"\n arg_1 = arg_0.get_service()\n arg_2 = arg_0._get_field('project')\n return BigQueryConnection(\n arg_1=arg_1,\n project_id=arg_2,\n use_legacy_sql=arg_0.use_legacy_sql,\n location=arg_0.location,\n num_retries=arg_0.num_retries\n )"} +{"_id": "doc_256", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a BigQuery service object.\n \"\"\"\n arg_1 = arg_0._authorize()\n return build(\n 'bigquery', 'v2', http=arg_1, cache_discovery=False)"} +{"_id": "doc_257", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Checks for the existence of a table in Google BigQuery.\n\n :param project_id: The Google cloud project in which to look for the\n table. The connection supplied to the hook must provide access to\n the specified project.\n :type project_id: str\n :param dataset_id: The name of the dataset in which to look for the\n table.\n :type dataset_id: str\n :param table_id: The name of the table to check the existence of.\n :type table_id: str\n \"\"\"\n arg_4 = arg_0.get_service()\n try:\n arg_4.tables().get(\n projectId=arg_1, datasetId=arg_2,\n tableId=arg_3).execute(num_retries=arg_0.num_retries)\n return True\n except HttpError as e:\n if e.resp['status'] == '404':\n return False\n raise"} +{"_id": "doc_258", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None):\n \"\"\"\n Creates a new, empty table in the dataset.\n To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg\n\n :param project_id: The project to create the table into.\n :type project_id: str\n :param dataset_id: The dataset to create the table into.\n :type dataset_id: str\n :param table_id: The Name of the table to be created.\n :type table_id: str\n :param schema_fields: If set, the schema field list as defined here:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema\n :type schema_fields: list\n :param labels: a dictionary containing labels for the table, passed to BigQuery\n :type labels: dict\n\n **Example**: ::\n\n schema_fields=[{\"name\": \"emp_name\", \"type\": \"STRING\", \"mode\": \"REQUIRED\"},\n {\"name\": \"salary\", \"type\": \"INTEGER\", \"mode\": \"NULLABLE\"}]\n\n :param time_partitioning: configure optional time partitioning fields i.e.\n partition by field, type and expiration as per API specifications.\n\n .. seealso::\n https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning\n :type time_partitioning: dict\n :param cluster_fields: [Optional] The fields used for clustering.\n Must be specified with time_partitioning, data in the table will be first\n partitioned and subsequently clustered.\n https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields\n :type cluster_fields: list\n :param view: [Optional] A dictionary containing definition for the view.\n If set, it will create a view instead of a table:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view\n :type view: dict\n\n **Example**: ::\n\n view = {\n \"query\": \"SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000\",\n \"useLegacySql\": False\n }\n\n :return: None\n \"\"\"\n\n arg_1 = arg_1 if arg_1 is not None else arg_0.project_id\n\n arg_10 = {\n 'tableReference': {\n 'tableId': arg_3\n }\n }\n\n if arg_4:\n arg_10['schema'] = {'fields': arg_4}\n\n if arg_5:\n arg_10['timePartitioning'] = arg_5\n\n if arg_6:\n arg_10['clustering'] = {\n 'fields': arg_6\n }\n\n if arg_7:\n arg_10['labels'] = arg_7\n\n if arg_8:\n arg_10['view'] = arg_8\n\n arg_9 = arg_9 if arg_9 else arg_0.num_retries\n\n arg_0.log.info('Creating Table %s:%s.%s',\n arg_1, arg_2, arg_3)\n\n try:\n arg_0.service.tables().insert(\n projectId=arg_1,\n datasetId=arg_2,\n body=arg_10).execute(arg_9=arg_9)\n\n arg_0.log.info('Table created successfully: %s:%s.%s',\n arg_1, arg_2, arg_3)\n\n except HttpError as err:\n raise AirflowException(\n 'BigQuery job failed. Error was: {}'.format(err.content)\n )"} +{"_id": "doc_259", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None):\n \"\"\"\n Grant authorized view access of a dataset to a view table.\n If this view has already been granted access to the dataset, do nothing.\n This method is not atomic. Running it may clobber a simultaneous update.\n\n :param source_dataset: the source dataset\n :type source_dataset: str\n :param view_dataset: the dataset that the view is in\n :type view_dataset: str\n :param view_table: the table of the view\n :type view_table: str\n :param source_project: the project of the source dataset. If None,\n self.project_id will be used.\n :type source_project: str\n :param view_project: the project that the view is in. If None,\n self.project_id will be used.\n :type view_project: str\n :return: the datasets resource of the source dataset.\n \"\"\"\n\n # Apply default values to projects\n arg_4 = arg_4 if arg_4 else arg_0.project_id\n arg_5 = arg_5 if arg_5 else arg_0.project_id\n\n # we don't want to clobber any existing accesses, so we have to get\n # info on the dataset before we can add view access\n arg_6 = arg_0.service.datasets().get(\n projectId=arg_4, datasetId=arg_1).execute(num_retries=arg_0.num_retries)\n arg_7 = arg_6[\n 'access'] if 'access' in arg_6 else []\n arg_8 = {\n 'view': {\n 'projectId': arg_5,\n 'datasetId': arg_2,\n 'tableId': arg_3\n }\n }\n # check to see if the view we want to add already exists.\n if arg_8 not in arg_7:\n arg_0.log.info(\n 'Granting table %s:%s.%s authorized view access to %s:%s dataset.',\n arg_5, arg_2, arg_3, arg_4,\n arg_1)\n arg_7.append(arg_8)\n return arg_0.service.datasets().patch(\n projectId=arg_4,\n datasetId=arg_1,\n body={\n 'access': arg_7\n }).execute(num_retries=arg_0.num_retries)\n else:\n # if view is already in access, do nothing.\n arg_0.log.info(\n 'Table %s:%s.%s already has authorized view access to %s:%s dataset.',\n arg_5, arg_2, arg_3, arg_4, arg_1)\n return arg_6"} +{"_id": "doc_260", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Method returns dataset_resource if dataset exist\n and raised 404 error if dataset does not exist\n\n :param dataset_id: The BigQuery Dataset ID\n :type dataset_id: str\n :param project_id: The GCP Project ID\n :type project_id: str\n :return: dataset_resource\n\n .. seealso::\n For more information, see Dataset Resource content:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource\n \"\"\"\n\n if not arg_1 or not isinstance(arg_1, str):\n raise ValueError(\"dataset_id argument must be provided and has \"\n \"a type 'str'. You provided: {}\".format(arg_1))\n\n arg_3 = arg_2 if arg_2 else arg_0.project_id\n\n try:\n arg_4 = arg_0.service.datasets().get(\n datasetId=arg_1, projectId=arg_3).execute(num_retries=arg_0.num_retries)\n arg_0.log.info(\"Dataset Resource: %s\", arg_4)\n except HttpError as err:\n raise AirflowException(\n 'BigQuery job failed. Error was: {}'.format(err.content))\n\n return arg_4"} +{"_id": "doc_261", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Executes a BigQuery query, and returns the job ID.\n\n :param operation: The query to Func.\n :type operation: str\n :param parameters: Parameters to substitute into the query.\n :type parameters: dict\n \"\"\"\n arg_3 = _bind_parameters(arg_1,\n arg_2) if arg_2 else arg_1\n arg_0.job_id = arg_0.run_query(arg_3)"} +{"_id": "doc_262", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Queries Postgres and returns a cursor to the results.\n \"\"\"\n arg_1 = PostgresHook(postgres_conn_id=arg_0.postgres_conn_id)\n arg_2 = arg_1.get_conn()\n arg_3 = arg_2.cursor()\n arg_3.execute(arg_0.sql, arg_0.parameters)\n return arg_3"} +{"_id": "doc_263", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create all the intermediate directories in a remote host\n\n :param sftp_client: A Paramiko SFTP client.\n :param remote_directory: Absolute Path of the directory containing the file\n :return:\n \"\"\"\n if arg_1 == '/':\n arg_0.chdir('/')\n return\n if arg_1 == '':\n return\n try:\n arg_0.chdir(arg_1)\n except IOError:\n arg_2, arg_3 = os.path.split(arg_1.rstrip('/'))\n Func(arg_0, arg_2)\n arg_0.mkdir(arg_3)\n arg_0.chdir(arg_3)\n return"} +{"_id": "doc_264", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, arg_4=None):\n \"\"\"\n Send message to the queue\n\n :param queue_url: queue url\n :type queue_url: str\n :param message_body: the contents of the message\n :type message_body: str\n :param delay_seconds: seconds to delay the message\n :type delay_seconds: int\n :param message_attributes: additional attributes for the message (default: None)\n For details of the attributes parameter see :py:meth:`botocore.client.SQS.Func`\n :type message_attributes: dict\n\n :return: dict with the information about the message sent\n For details of the returned value see :py:meth:`botocore.client.SQS.Func`\n :rtype: dict\n \"\"\"\n return arg_0.get_conn().Func(QueueUrl=arg_1,\n MessageBody=arg_2,\n DelaySeconds=arg_3,\n MessageAttributes=arg_4 or {})"} +{"_id": "doc_265", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"\n Run the task command.\n\n :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``\n :type run_with: list\n :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs\n ``['airflow run']``\n :param join_args: bool\n :return: the process that was run\n :rtype: subprocess.Popen\n \"\"\"\n arg_1 = arg_1 or []\n arg_3 = [\" \".join(arg_0._command)] if arg_2 else arg_0._command\n arg_4 = arg_1 + arg_3\n\n arg_0.log.info('Running: %s', arg_4)\n arg_5 = subprocess.Popen(\n arg_4,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n close_fds=True,\n env=os.environ.copy(),\n preexec_fn=os.setsid\n )\n\n # Start daemon thread to read subprocess logging output\n arg_6 = threading.Thread(\n target=arg_0._read_task_logs,\n args=(arg_5.stdout,),\n )\n arg_6.daemon = True\n arg_6.start()\n return arg_5"} +{"_id": "doc_266", "title": "", "text": "def Func():\n \"\"\"\n Parse options and process commands\n \"\"\"\n # Parse arguments\n arg_0 = \"usage: nvd3.py [options]\"\n arg_1 = OptionParser(arg_0=arg_0,\n version=(\"python-nvd3 - Charts generator with \"\n \"nvd3.js and d3.js\"))\n arg_1.add_option(\"-q\", \"--quiet\",\n action=\"store_false\", dest=\"verbose\", default=True,\n help=\"don't print messages to stdout\")\n\n (arg_2, arg_3) = arg_1.parse_args()"} +{"_id": "doc_267", "title": "", "text": "def Func(arg_0):\n \"\"\"generate HTML header content\"\"\"\n arg_0.htmlheader = ''\n # If the JavaScript assets have already been injected, don't bother re-sourcing them.\n global _js_initialized\n if '_js_initialized' not in globals() or not _js_initialized:\n for arg_2 in arg_0.header_css:\n arg_0.htmlheader += arg_2\n for arg_3 in arg_0.header_js:\n arg_0.htmlheader += arg_3"} +{"_id": "doc_268", "title": "", "text": "def Func(arg_0):\n \"\"\"generate HTML div\"\"\"\n if arg_0.container:\n return\n\n # Create SVG div with style\n if arg_0.width:\n if arg_0.width[-1] != '%':\n arg_0.style += 'width:%spx;' % arg_0.width\n else:\n arg_0.style += 'width:%s;' % arg_0.width\n if arg_0.height:\n if arg_0.height[-1] != '%':\n arg_0.style += 'height:%spx;' % arg_0.height\n else:\n arg_0.style += 'height:%s;' % arg_0.height\n if arg_0.style:\n arg_0.style = 'style=\"%s\"' % arg_0.style\n\n arg_0.container = arg_0.containerheader + \\\n '
\\n' % (arg_0.name, arg_0.style)"} +{"_id": "doc_269", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=False, arg_5=False):\n \"\"\"Create X-axis\"\"\"\n arg_6 = {}\n if arg_5 and arg_3:\n arg_6['tickFormat'] = arg_3\n elif arg_3:\n if arg_3 == 'AM_PM':\n arg_6['tickFormat'] = \"function(d) { return get_am_pm(parseInt(d)); }\"\n else:\n arg_6['tickFormat'] = \"d3.format(',%s')\" % arg_3\n\n if arg_2:\n arg_6['axisLabel'] = \"'\" + arg_2 + \"'\"\n\n # date format : see https://github.com/mbostock/d3/wiki/Time-Formatting\n if arg_4:\n arg_0.dateformat = arg_3\n arg_6['tickFormat'] = (\"function(d) { return d3.time.format('%s')\"\n \"(new Date(parseInt(d))) }\\n\"\n \"\" % arg_0.dateformat)\n # flag is the x Axis is a date\n if arg_1[0] == 'x':\n arg_0.x_axis_date = True\n\n # Add new axis to list of axis\n arg_0.axislist[arg_1] = arg_6\n\n # Create x2Axis if focus_enable\n if arg_1 == \"xAxis\" and arg_0.focus_enable:\n arg_0.axislist['x2Axis'] = arg_6"} +{"_id": "doc_270", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator to make a view compressed\n \"\"\"\n @functools.wraps(arg_0)\n def view_func(*arg_1, **arg_2):\n @after_this_request\n def zipper(arg_3):\n arg_4 = request.headers.get('Accept-Encoding', '')\n\n if 'gzip' not in arg_4.lower():\n return arg_3\n\n arg_3.direct_passthrough = False\n\n if (arg_3.status_code < 200 or arg_3.status_code >= 300 or\n 'Content-Encoding' in arg_3.headers):\n return arg_3\n arg_6 = IO()\n arg_7 = gzip.GzipFile(mode='wb',\n fileobj=arg_6)\n arg_7.write(arg_3.data)\n arg_7.close()\n\n arg_3.data = arg_6.getvalue()\n arg_3.headers['Content-Encoding'] = 'gzip'\n arg_3.headers['Vary'] = 'Accept-Encoding'\n arg_3.headers['Content-Length'] = len(arg_3.data)\n\n return arg_3\n\n return arg_0(*arg_1, **arg_2)\n\n return view_func"} +{"_id": "doc_271", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=False,\n arg_6=None,\n arg_7=None):\n \"\"\"\n Creates a dag run from this dag including the tasks associated with this dag.\n Returns the dag run.\n\n :param run_id: defines the the run id for this dag run\n :type run_id: str\n :param execution_date: the execution date of this dag run\n :type execution_date: datetime.datetime\n :param state: the state of the dag run\n :type state: airflow.utils.state.State\n :param start_date: the date this dag run should be evaluated\n :type start_date: datetime.datetime\n :param external_trigger: whether this dag run is externally triggered\n :type external_trigger: bool\n :param session: database session\n :type session: sqlalchemy.orm.session.Session\n \"\"\"\n\n return arg_0.get_dag().Func(arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7)"} +{"_id": "doc_272", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Publish the message to SQS queue\n\n :param context: the context object\n :type context: dict\n :return: dict with information about the message sent\n For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`\n :rtype: dict\n \"\"\"\n\n arg_2 = SQSHook(aws_conn_id=arg_0.aws_conn_id)\n\n arg_3 = arg_2.send_message(queue_url=arg_0.sqs_queue,\n message_body=arg_0.message_content,\n delay_seconds=arg_0.delay_seconds,\n message_attributes=arg_0.message_attributes)\n\n arg_0.log.info('result is send_message is %s', arg_3)\n\n return arg_3"} +{"_id": "doc_273", "title": "", "text": "def Func(arg_0):\n \"\"\"\n returns a json response from a json serializable python object\n \"\"\"\n return Response(\n response=json.dumps(\n arg_0, indent=4, cls=AirflowJsonEncoder),\n status=200,\n mimetype=\"application/json\")"} +{"_id": "doc_274", "title": "", "text": "def Func(arg_0, arg_1='r'):\n \"\"\"\n Opens the given file. If the path contains a folder with a .zip suffix, then\n the folder is treated as a zip archive, opening the file inside the archive.\n\n :return: a file object, as in `open`, or as in `ZipFile.open`.\n \"\"\"\n\n arg_2, arg_3, arg_4 = ZIP_REGEX.search(arg_0).groups()\n if arg_3 and zipfile.is_zipfile(arg_3):\n return zipfile.ZipFile(arg_3, arg_1=arg_1).open(arg_4)\n else:\n return io.open(arg_0, arg_1=arg_1)"} +{"_id": "doc_275", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get Opsgenie api_key for creating alert\n \"\"\"\n arg_1 = arg_0.get_connection(arg_0.http_conn_id)\n arg_2 = arg_1.password\n if not arg_2:\n raise AirflowException('Opsgenie API Key is required for this hook, '\n 'please check your conn_id configuration.')\n return arg_2"} +{"_id": "doc_276", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Overwrite HttpHook Func because this hook just needs base_url\n and headers, and does not need generic params\n\n :param headers: additional headers to be passed through as a dictionary\n :type headers: dict\n \"\"\"\n arg_2 = arg_0.Funcection(arg_0.http_conn_id)\n arg_0.base_url = arg_2.host if arg_2.host else 'https://api.opsgenie.com'\n arg_4 = requests.Session()\n if arg_1:\n arg_4.headers.update(arg_1)\n return arg_4"} +{"_id": "doc_277", "title": "", "text": "def Func(arg_0, arg_1={}):\n \"\"\"\n Execute the Opsgenie Alert call\n\n :param payload: Opsgenie API Create Alert payload values\n See https://docs.opsgenie.com/docs/alert-api#section-create-alert\n :type payload: dict\n \"\"\"\n arg_2 = arg_0._get_api_key()\n return arg_0.run(endpoint='v2/alerts',\n data=json.dumps(arg_1),\n headers={'Content-Type': 'application/json',\n 'Authorization': 'GenieKey %s' % arg_2})"} +{"_id": "doc_278", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Construct the Opsgenie JSON payload. All relevant parameters are combined here\n to a valid Opsgenie JSON payload.\n\n :return: Opsgenie payload (dict) to send\n \"\"\"\n arg_1 = {}\n\n for arg_2 in [\n \"message\", \"alias\", \"description\", \"responders\",\n \"visibleTo\", \"actions\", \"tags\", \"details\", \"entity\",\n \"source\", \"priority\", \"user\", \"note\"\n ]:\n arg_3 = getattr(arg_0, arg_2)\n if arg_3:\n arg_1[arg_2] = arg_3\n return arg_1"} +{"_id": "doc_279", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Call the OpsgenieAlertHook to post message\n \"\"\"\n arg_0.hook = OpsgenieAlertHook(arg_0.opsgenie_conn_id)\n arg_0.hook.Func(arg_0._build_opsgenie_payload())"} +{"_id": "doc_280", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Fetch the status of submitted athena query. Returns None or one of valid query states.\n\n :param query_execution_id: Id of submitted athena query\n :type query_execution_id: str\n :return: str\n \"\"\"\n arg_2 = arg_0.conn.get_query_execution(QueryExecutionId=arg_1)\n arg_3 = None\n try:\n arg_3 = arg_2['QueryExecution']['Status']['State']\n except Exception as ex:\n arg_0.log.error('Exception while getting query state', ex)\n finally:\n return arg_3"} +{"_id": "doc_281", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True, arg_4=False):\n \"\"\"\n Call Zendesk API and return results\n\n :param path: The Zendesk API to Func\n :param query: Query parameters\n :param get_all_pages: Accumulate results over all pages before\n returning. Due to strict rate limiting, this can often timeout.\n Waits for recommended period between tries after a timeout.\n :param side_loading: Retrieve related records as part of a single\n request. In order to enable side-loading, add an 'include'\n query parameter containing a comma-separated list of resources\n to load. For more information on side-loading see\n https://developer.zendesk.com/rest_api/docs/core/side_loading\n \"\"\"\n arg_5 = arg_0.get_conn()\n arg_6 = False\n\n while not arg_6:\n try:\n arg_7 = arg_5.Func(arg_1, arg_2)\n arg_6 = True\n except RateLimitError as rle:\n arg_0.__handle_rate_limit_exception(rle)\n\n # Find the key with the results\n arg_8 = [arg_1.split(\"/\")[-1].split(\".json\")[0]]\n arg_9 = arg_7['next_page']\n if arg_4:\n arg_8 += arg_2['include'].split(',')\n arg_7 = {arg_12: arg_7[arg_12] for arg_12 in arg_8}\n\n if arg_3:\n while arg_9 is not None:\n try:\n # Need to split because the next page URL has\n # `github.zendesk...`\n # in it, but the Func function needs it removed.\n arg_10 = arg_9.split(arg_0.__url)[1]\n arg_0.log.info(\"Calling %s\", arg_10)\n arg_11 = arg_5.Func(arg_10)\n for arg_12 in arg_7:\n arg_7[arg_12].extend(arg_11[arg_12])\n if arg_9 == arg_11['next_page']:\n # Unfortunately zdesk doesn't always throw ZendeskError\n # when we are done getting all the data. Sometimes the\n # next just refers to the current set of results.\n # Hence, need to deal with this special case\n break\n else:\n arg_9 = arg_11['next_page']\n except RateLimitError as rle:\n arg_0.__handle_rate_limit_exception(rle)\n except ZendeskError as ze:\n if b\"Use a start_time older than 5 minutes\" in ze.msg:\n # We have pretty up to date data\n break\n else:\n raise ze\n\n return arg_7"} +{"_id": "doc_282", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3='',\n arg_4=None,\n arg_5=None):\n \"\"\"\n Retrieves the partition values for a table.\n\n :param database_name: The name of the catalog database where the partitions reside.\n :type database_name: str\n :param table_name: The name of the partitions' table.\n :type table_name: str\n :param expression: An expression filtering the partitions to be returned.\n Please see official AWS documentation for further information.\n https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions\n :type expression: str\n :param page_size: pagination size\n :type page_size: int\n :param max_items: maximum items to return\n :type max_items: int\n :return: set of partition values where each value is a tuple since\n a partition may be composed of multiple columns. For example:\n ``{('2018-01-01','1'), ('2018-01-01','2')}``\n \"\"\"\n arg_6 = {\n 'PageSize': arg_4,\n 'MaxItems': arg_5,\n }\n\n arg_7 = arg_0.get_conn().get_paginator('Func')\n arg_8 = arg_7.paginate(\n DatabaseName=arg_1,\n TableName=arg_2,\n Expression=arg_3,\n PaginationConfig=arg_6\n )\n\n arg_9 = set()\n for arg_10 in arg_8:\n for arg_11 in arg_10['Partitions']:\n arg_9.add(tuple(arg_11['Values']))\n\n return arg_9"} +{"_id": "doc_283", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the information of the table\n\n :param database_name: Name of hive database (schema) @table belongs to\n :type database_name: str\n :param table_name: Name of hive table\n :type table_name: str\n :rtype: dict\n\n >>> hook = AwsGlueCatalogHook()\n >>> r = hook.Func('db', 'table_foo')\n >>> r['Name'] = 'table_foo'\n \"\"\"\n\n arg_3 = arg_0.get_conn().Func(DatabaseName=arg_1, Name=arg_2)\n\n return arg_3['Table']"} +{"_id": "doc_284", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the physical location of the table\n\n :param database_name: Name of hive database (schema) @table belongs to\n :type database_name: str\n :param table_name: Name of hive table\n :type table_name: str\n :return: str\n \"\"\"\n\n arg_3 = arg_0.get_table(arg_1, arg_2)\n\n return arg_3['StorageDescriptor']['Location']"} +{"_id": "doc_285", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return status of a cluster\n\n :param cluster_identifier: unique identifier of a cluster\n :type cluster_identifier: str\n \"\"\"\n arg_2 = arg_0.get_conn()\n try:\n arg_3 = arg_2.describe_clusters(\n ClusterIdentifier=arg_1)['Clusters']\n return arg_3[0]['ClusterStatus'] if arg_3 else None\n except arg_2.exceptions.ClusterNotFoundFault:\n return 'cluster_not_found'"} +{"_id": "doc_286", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=True,\n arg_3=''):\n \"\"\"\n Delete a cluster and optionally create a snapshot\n\n :param cluster_identifier: unique identifier of a cluster\n :type cluster_identifier: str\n :param skip_final_cluster_snapshot: determines cluster snapshot creation\n :type skip_final_cluster_snapshot: bool\n :param final_cluster_snapshot_identifier: name of final cluster snapshot\n :type final_cluster_snapshot_identifier: str\n \"\"\"\n arg_4 = arg_0.get_conn().Func(\n ClusterIdentifier=arg_1,\n SkipFinalClusterSnapshot=arg_2,\n FinalClusterSnapshotIdentifier=arg_3\n )\n return arg_4['Cluster'] if arg_4['Cluster'] else None"} +{"_id": "doc_287", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Restores a cluster from its snapshot\n\n :param cluster_identifier: unique identifier of a cluster\n :type cluster_identifier: str\n :param snapshot_identifier: unique identifier for a snapshot of a cluster\n :type snapshot_identifier: str\n \"\"\"\n arg_3 = arg_0.get_conn().Func(\n ClusterIdentifier=arg_1,\n SnapshotIdentifier=arg_2\n )\n return arg_3['Cluster'] if arg_3['Cluster'] else None"} +{"_id": "doc_288", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n SlackAPIOperator calls will not fail even if the call is not unsuccessful.\n It should not prevent a DAG from completing in success\n \"\"\"\n if not arg_0.api_params:\n arg_0.construct_api_call_params()\n arg_2 = SlackHook(token=arg_0.token, slack_conn_id=arg_0.slack_conn_id)\n arg_2.call(arg_0.method, arg_0.api_params)"} +{"_id": "doc_289", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Will test the filepath result and test if its size is at least self.filesize\n\n :param result: a list of dicts returned by Snakebite ls\n :param size: the file size in MB a file should be at least to trigger True\n :return: (bool) depending on the matching criteria\n \"\"\"\n if arg_1:\n arg_2 = LoggingMixin().log\n arg_2.debug(\n 'Filtering for file size >= %s in files: %s',\n arg_1, map(lambda x: x['path'], arg_0)\n )\n arg_1 *= settings.MEGABYTE\n arg_0 = [x for x in arg_0 if x['length'] >= arg_1]\n arg_2.debug('HdfsSensor.poke: after size filter result is %s', arg_0)\n return arg_0"} +{"_id": "doc_290", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Will filter if instructed to do so the result to remove matching criteria\n\n :param result: list of dicts returned by Snakebite ls\n :type result: list[dict]\n :param ignored_ext: list of ignored extensions\n :type ignored_ext: list\n :param ignore_copying: shall we ignore ?\n :type ignore_copying: bool\n :return: list of dicts which were not removed\n :rtype: list[dict]\n \"\"\"\n if arg_2:\n arg_3 = LoggingMixin().log\n arg_4 = r\"^.*\\.(%s$)$\" % '$|'.join(arg_1)\n arg_5 = re.compile(arg_4)\n arg_3.debug(\n 'Filtering result for ignored extensions: %s in files %s',\n arg_5.pattern, map(lambda x: x['path'], arg_0)\n )\n arg_0 = [x for x in arg_0 if not arg_5.match(x['path'])]\n arg_3.debug('HdfsSensor.poke: after ext filter result is %s', arg_0)\n return arg_0"} +{"_id": "doc_291", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Create a pool with a given parameters.\"\"\"\n if not (arg_0 and arg_0.strip()):\n raise AirflowBadRequest(\"Pool name shouldn't be empty\")\n\n try:\n arg_1 = int(arg_1)\n except ValueError:\n raise AirflowBadRequest(\"Bad value for `slots`: %s\" % arg_1)\n\n arg_3.expire_on_commit = False\n arg_5 = arg_3.query(Pool).filter_by(arg_5=arg_0).first()\n if arg_5 is None:\n arg_5 = Pool(arg_5=arg_0, arg_1=arg_1, arg_2=arg_2)\n arg_3.add(arg_5)\n else:\n arg_5.slots = arg_1\n arg_5.description = arg_2\n\n arg_3.commit()\n\n return arg_5"} +{"_id": "doc_292", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Delete pool by a given name.\"\"\"\n if not (arg_0 and arg_0.strip()):\n raise AirflowBadRequest(\"Pool name shouldn't be empty\")\n\n arg_2 = arg_1.query(Pool).filter_by(arg_2=arg_0).first()\n if arg_2 is None:\n raise PoolNotFound(\"Pool '%s' doesn't exist\" % arg_0)\n\n arg_1.delete(arg_2)\n arg_1.commit()\n\n return arg_2"} +{"_id": "doc_293", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Given an operation, continuously fetches the status from Google Cloud until either\n completion or an error occurring\n\n :param operation: The Operation to wait for\n :type operation: google.cloud.container_V1.gapic.enums.Operation\n :param project_id: Google Cloud Platform project ID\n :type project_id: str\n :return: A new, updated operation fetched from Google Cloud\n \"\"\"\n arg_0.log.info(\"Waiting for OPERATION_NAME %s\", arg_1.name)\n time.sleep(OPERATIONAL_POLL_INTERVAL)\n while arg_1.status != Operation.Status.DONE:\n if arg_1.status == Operation.Status.RUNNING or arg_1.status == \\\n Operation.Status.PENDING:\n time.sleep(OPERATIONAL_POLL_INTERVAL)\n else:\n raise exceptions.GoogleCloudError(\n \"Operation has failed with status: %s\" % arg_1.status)\n # To update status of operation\n arg_1 = arg_0.get_operation(arg_1.name, arg_2=arg_2 or arg_0.project_id)\n return arg_1"} +{"_id": "doc_294", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=arg_4, arg_5=arg_4):\n \"\"\"\n Creates a cluster, consisting of the specified number and type of Google Compute\n Engine instances.\n\n :param cluster: A Cluster protobuf or dict. If dict is provided, it must\n be of the same form as the protobuf message\n :class:`google.cloud.container_v1.types.Cluster`\n :type cluster: dict or google.cloud.container_v1.types.Cluster\n :param project_id: Google Cloud Platform project ID\n :type project_id: str\n :param retry: A retry object (``google.api_core.retry.Retry``) used to\n retry requests.\n If None is specified, requests will not be retried.\n :type retry: google.api_core.retry.Retry\n :param timeout: The amount of time, in seconds, to wait for the request to\n complete. Note that if retry is specified, the timeout applies to each\n individual attempt.\n :type timeout: float\n :return: The full url to the new, or existing, cluster\n :raises:\n ParseError: On JSON parsing problems when trying to convert dict\n AirflowException: cluster is not dict type nor Cluster proto type\n \"\"\"\n\n if isinstance(arg_1, dict):\n arg_6 = Cluster()\n arg_1 = arg_0._dict_to_proto(py_dict=arg_1, proto=arg_6)\n elif not isinstance(arg_1, Cluster):\n raise AirflowException(\n \"cluster is not instance of Cluster proto or python dict\")\n\n arg_0._append_label(arg_1, 'airflow-version', 'v' + version.version)\n\n arg_0.log.info(\n \"Creating (project_id=%s, zone=%s, cluster_name=%s)\",\n arg_0.project_id, arg_0.location, arg_1.name\n )\n try:\n arg_7 = arg_0.get_client().Func(arg_2=arg_2 or arg_0.project_id,\n zone=arg_0.location,\n arg_1=arg_1,\n arg_3=arg_3,\n arg_5=arg_5)\n arg_7 = arg_0.wait_for_operation(arg_7)\n\n return arg_7.target_link\n except AlreadyExists as error:\n arg_0.log.info('Assuming Success: %s', error.message)\n return arg_0.get_cluster(name=arg_1.name).self_link"} +{"_id": "doc_295", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=arg_4, arg_5=arg_4):\n \"\"\"\n Gets details of specified cluster\n\n :param name: The name of the cluster to retrieve\n :type name: str\n :param project_id: Google Cloud Platform project ID\n :type project_id: str\n :param retry: A retry object used to retry requests. If None is specified,\n requests will not be retried.\n :type retry: google.api_core.retry.Retry\n :param timeout: The amount of time, in seconds, to wait for the request to\n complete. Note that if retry is specified, the timeout applies to each\n individual attempt.\n :type timeout: float\n :return: google.cloud.container_v1.types.Cluster\n \"\"\"\n arg_0.log.info(\n \"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)\",\n arg_2 or arg_0.project_id, arg_0.location, arg_1\n )\n\n return arg_0.get_client().Func(arg_2=arg_2 or arg_0.project_id,\n zone=arg_0.location,\n cluster_id=arg_1,\n arg_3=arg_3,\n arg_5=arg_5).self_link"} +{"_id": "doc_296", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Construct the Discord JSON payload. All relevant parameters are combined here\n to a valid Discord JSON payload.\n\n :return: Discord payload (str) to send\n \"\"\"\n arg_1 = {}\n\n if arg_0.username:\n arg_1['username'] = arg_0.username\n if arg_0.avatar_url:\n arg_1['avatar_url'] = arg_0.avatar_url\n\n arg_1['tts'] = arg_0.tts\n\n if len(arg_0.message) <= 2000:\n arg_1['content'] = arg_0.message\n else:\n raise AirflowException('Discord message length must be 2000 or fewer '\n 'characters.')\n\n return json.dumps(arg_1)"} +{"_id": "doc_297", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Encrypts a plaintext message using Google Cloud KMS.\n\n :param key_name: The Resource Name for the key (or key version)\n to be used for encyption. Of the form\n ``projects/*/locations/*/keyRings/*/cryptoKeys/**``\n :type key_name: str\n :param plaintext: The message to be Funced.\n :type plaintext: bytes\n :param authenticated_data: Optional additional authenticated data that\n must also be provided to decrypt the message.\n :type authenticated_data: bytes\n :return: The base 64 encoded ciphertext of the original message.\n :rtype: str\n \"\"\"\n arg_4 = arg_0.get_conn().projects().locations().keyRings().cryptoKeys()\n arg_5 = {'plaintext': _b64encode(arg_2)}\n if arg_3:\n arg_5['additionalAuthenticatedData'] = _b64encode(arg_3)\n\n arg_6 = arg_4.Func(name=arg_1, arg_5=arg_5)\n arg_7 = arg_6.execute(num_retries=arg_0.num_retries)\n\n arg_8 = arg_7['ciphertext']\n return arg_8"} +{"_id": "doc_298", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False, arg_4=\"text\",\n arg_5=None, arg_6=None, arg_7=None, arg_8=False,\n arg_9=None, arg_10=None):\n \"\"\"\n Imports table from remote location to target dir. Arguments are\n copies of direct sqoop command line arguments\n\n :param table: Table to read\n :param target_dir: HDFS destination dir\n :param append: Append data to an existing dataset in HDFS\n :param file_type: \"avro\", \"sequence\", \"text\" or \"parquet\".\n Imports data to into the specified format. Defaults to text.\n :param columns: Columns to import from table\n :param split_by: Column of the table used to split work units\n :param where: WHERE clause to use during import\n :param direct: Use direct connector if exists for the database\n :param driver: Manually specify JDBC driver class to use\n :param extra_import_options: Extra import options to pass as dict.\n If a key doesn't have a value, just pass an empty string to it.\n Don't include prefix of -- for sqoop options.\n \"\"\"\n arg_11 = arg_0._import_cmd(arg_2, arg_3, arg_4, arg_6, arg_8,\n arg_9, arg_10)\n\n arg_11 += [\"--table\", arg_1]\n\n if arg_5:\n arg_11 += [\"--columns\", arg_5]\n if arg_7:\n arg_11 += [\"--where\", arg_7]\n\n arg_0.Popen(arg_11)"} +{"_id": "doc_299", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves connection to Cloud Text to Speech.\n\n :return: Google Cloud Text to Speech client object.\n :rtype: google.cloud.texttospeech_v1.TextToSpeechClient\n \"\"\"\n if not arg_0._client:\n arg_0._client = TextToSpeechClient(credentials=arg_0._get_credentials())\n return arg_0._client"} +{"_id": "doc_300", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Close and upload local log file to remote storage S3.\n \"\"\"\n # When application exit, system shuts down all handlers by\n # calling Func method. Here we check if logger is already\n # Funcd to prevent uploading the log to remote storage multiple\n # times when `logging.shutdown` is called.\n if arg_0.Funcd:\n return\n\n super().Func()\n\n if not arg_0.upload_on_Func:\n return\n\n arg_1 = os.path.join(arg_0.local_base, arg_0.log_relative_path)\n arg_2 = os.path.join(arg_0.remote_base, arg_0.log_relative_path)\n if os.path.exists(arg_1):\n # read log and remove old logs to get just the latest additions\n with open(arg_1, 'r') as logfile:\n arg_3 = logfile.read()\n arg_0.s3_write(arg_3, arg_2)\n\n # Mark Funcd so we don't double write if Func is called twice\n arg_0.Funcd = True"} +{"_id": "doc_301", "title": "", "text": "def Func(arg_0):\n \"\"\"When using git to retrieve the DAGs, use the GitSync Init Container\"\"\"\n # If we're using volume claims to mount the dags, no init container is needed\n if arg_0.kube_config.dags_volume_claim or \\\n arg_0.kube_config.dags_volume_host or arg_0.kube_config.dags_in_image:\n return []\n\n # Otherwise, define a git-sync init container\n arg_1 = [{\n 'name': 'GIT_SYNC_REPO',\n 'value': arg_0.kube_config.git_repo\n }, {\n 'name': 'GIT_SYNC_BRANCH',\n 'value': arg_0.kube_config.git_branch\n }, {\n 'name': 'GIT_SYNC_ROOT',\n 'value': arg_0.kube_config.git_sync_root\n }, {\n 'name': 'GIT_SYNC_DEST',\n 'value': arg_0.kube_config.git_sync_dest\n }, {\n 'name': 'GIT_SYNC_DEPTH',\n 'value': '1'\n }, {\n 'name': 'GIT_SYNC_ONE_TIME',\n 'value': 'true'\n }]\n if arg_0.kube_config.git_user:\n arg_1.append({\n 'name': 'GIT_SYNC_USERNAME',\n 'value': arg_0.kube_config.git_user\n })\n if arg_0.kube_config.git_password:\n arg_1.append({\n 'name': 'GIT_SYNC_PASSWORD',\n 'value': arg_0.kube_config.git_password\n })\n\n arg_2 = [{\n 'mountPath': arg_0.kube_config.git_sync_root,\n 'name': arg_0.dags_volume_name,\n 'readOnly': False\n }]\n if arg_0.kube_config.git_ssh_key_secret_name:\n arg_2.append({\n 'name': arg_0.git_sync_ssh_secret_volume_name,\n 'mountPath': '/etc/git-secret/ssh',\n 'subPath': 'ssh'\n })\n arg_1.extend([\n {\n 'name': 'GIT_SSH_KEY_FILE',\n 'value': '/etc/git-secret/ssh'\n },\n {\n 'name': 'GIT_SYNC_SSH',\n 'value': 'true'\n }])\n if arg_0.kube_config.git_ssh_known_hosts_configmap_name:\n arg_2.append({\n 'name': arg_0.git_sync_ssh_known_hosts_volume_name,\n 'mountPath': '/etc/git-secret/known_hosts',\n 'subPath': 'known_hosts'\n })\n arg_1.extend([\n {\n 'name': 'GIT_KNOWN_HOSTS',\n 'value': 'true'\n },\n {\n 'name': 'GIT_SSH_KNOWN_HOSTS_FILE',\n 'value': '/etc/git-secret/known_hosts'\n }\n ])\n else:\n arg_1.append({\n 'name': 'GIT_KNOWN_HOSTS',\n 'value': 'false'\n })\n\n return [{\n 'name': arg_0.kube_config.git_sync_init_container_name,\n 'image': arg_0.kube_config.git_sync_container,\n 'securityContext': {'runAsUser': 65533}, # git-sync user\n 'env': arg_1,\n 'volumeMounts': arg_2\n }]"} +{"_id": "doc_302", "title": "", "text": "def Func(arg_0):\n \"\"\"Defines any necessary environment variables for the pod executor\"\"\"\n arg_1 = {}\n\n for arg_2, arg_3 in six.iteritems(arg_0.kube_config.kube_env_vars):\n arg_1[arg_2] = arg_3\n\n arg_1[\"AIRFLOW__CORE__EXECUTOR\"] = \"LocalExecutor\"\n\n if arg_0.kube_config.airflow_configmap:\n arg_1['AIRFLOW_HOME'] = arg_0.worker_airflow_home\n arg_1['AIRFLOW__CORE__DAGS_FOLDER'] = arg_0.worker_airflow_dags\n if (not arg_0.kube_config.airflow_configmap and\n 'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in arg_0.kube_config.kube_secrets):\n arg_1['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get(\"core\", \"SQL_ALCHEMY_CONN\")\n if arg_0.kube_config.git_dags_folder_mount_point:\n # /root/airflow/dags/repo/dags\n arg_4 = os.path.join(\n arg_0.kube_config.git_dags_folder_mount_point,\n arg_0.kube_config.git_sync_dest, # repo\n arg_0.kube_config.git_subpath # dags\n )\n arg_1['AIRFLOW__CORE__DAGS_FOLDER'] = arg_4\n return arg_1"} +{"_id": "doc_303", "title": "", "text": "def Func(arg_0):\n \"\"\"Defines any necessary secrets for the pod executor\"\"\"\n arg_1 = []\n\n for arg_2, arg_3 in six.iteritems(arg_0.kube_config.kube_secrets):\n arg_4, arg_5 = arg_3.split('=')\n arg_1.append(\n Secret('env', arg_2, arg_4, arg_5)\n )\n\n if arg_0.kube_config.env_from_secret_ref:\n for arg_6 in arg_0.kube_config.env_from_secret_ref.split(','):\n arg_1.append(\n Secret('env', None, arg_6)\n )\n\n return arg_1"} +{"_id": "doc_304", "title": "", "text": "def Func(arg_0):\n \"\"\"Defines the security context\"\"\"\n arg_1 = {}\n\n if arg_0.kube_config.worker_run_as_user:\n arg_1['runAsUser'] = arg_0.kube_config.worker_run_as_user\n\n if arg_0.kube_config.worker_fs_group:\n arg_1['fsGroup'] = arg_0.kube_config.worker_fs_group\n\n # set fs_group to 65533 if not explicitly specified and using git ssh keypair auth\n if arg_0.kube_config.git_ssh_key_secret_name and arg_1.get('fsGroup') is None:\n arg_1['fsGroup'] = 65533\n\n return arg_1"} +{"_id": "doc_305", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Heartbeats update the job's entry in the database with a timestamp\n for the latest_Func and allows for the job to be killed\n externally. This allows at the system level to monitor what is\n actually active.\n\n For instance, an old Func for SchedulerJob would mean something\n is wrong.\n\n This also allows for any job to be killed externally, regardless\n of who is running it or on which machine it is running.\n\n Note that if your Func is set to 60 seconds and you call this\n method after 10 seconds of processing since the last Func, it\n will sleep 50 seconds to complete the 60 seconds and keep a steady\n heart rate. If you go over 60 seconds before calling it, it won't\n sleep at all.\n \"\"\"\n try:\n with create_session() as session:\n arg_1 = session.query(BaseJob).filter_by(id=arg_0.id).one()\n make_transient(arg_1)\n session.commit()\n\n if arg_1.state == State.SHUTDOWN:\n arg_0.kill()\n\n arg_2 = conf.getboolean('core', 'unit_test_mode')\n if not arg_2:\n # Figure out how long to sleep for\n arg_3 = 0\n if arg_1.latest_Func:\n arg_4 = arg_0.heartrate - \\\n (timezone.utcnow() - arg_1.latest_Func)\\\n .total_seconds()\n arg_3 = max(0, arg_4)\n\n sleep(arg_3)\n\n # Update last Func time\n with create_session() as session:\n arg_1 = session.query(BaseJob).filter(BaseJob.id == arg_0.id).first()\n arg_1.latest_Func = timezone.utcnow()\n session.merge(arg_1)\n session.commit()\n\n arg_0.Func_callback(session=session)\n arg_0.log.debug('[Func]')\n except OperationalError as e:\n arg_0.log.error(\"Scheduler Func got an exception: %s\", str(e))"} +{"_id": "doc_306", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Launch the process and Func processing the DAG.\n \"\"\"\n arg_0._process = DagFileProcessor._launch_process(\n arg_0._result_queue,\n arg_0.file_path,\n arg_0._pickle_dags,\n arg_0._dag_id_white_list,\n \"DagFileProcessor{}\".format(arg_0._instance_id),\n arg_0._zombies)\n arg_0._Func_time = timezone.utcnow()"} +{"_id": "doc_307", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the process launched to process this file is Func.\n\n :return: whether the process is finished running\n :rtype: bool\n \"\"\"\n if arg_0._process is None:\n raise AirflowException(\"Tried to see if it's Func before starting!\")\n\n if arg_0._Func:\n return True\n\n # In case result queue is corrupted.\n if arg_0._result_queue and not arg_0._result_queue.empty():\n arg_0._result = arg_0._result_queue.get_nowait()\n arg_0._Func = True\n arg_0.log.debug(\"Waiting for %s\", arg_0._process)\n arg_0._process.join()\n return True\n\n # Potential error case when process dies\n if arg_0._result_queue and not arg_0._process.is_alive():\n arg_0._Func = True\n # Get the object from the queue or else join() can hang.\n if not arg_0._result_queue.empty():\n arg_0._result = arg_0._result_queue.get_nowait()\n arg_0.log.debug(\"Waiting for %s\", arg_0._process)\n arg_0._process.join()\n return True\n\n return False"} +{"_id": "doc_308", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Helper method to clean up processor_agent to avoid leaving orphan processes.\n \"\"\"\n arg_0.log.info(\"Exiting gracefully upon receiving signal %s\", arg_1)\n if arg_0.processor_agent:\n arg_0.processor_agent.end()\n sys.exit(os.EX_OK)"} +{"_id": "doc_309", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n For the DAGs in the given DagBag, record any associated import errors and clears\n errors for files that no longer have them. These are usually displayed through the\n Airflow UI so that users know that there are issues parsing DAGs.\n\n :param session: session for ORM operations\n :type session: sqlalchemy.orm.session.Session\n :param dagbag: DagBag containing DAGs with import errors\n :type dagbag: airflow.models.DagBag\n \"\"\"\n # Clear the errors of the processed files\n for arg_2 in arg_1.file_last_changed:\n arg_0.query(errors.ImportError).filter(\n errors.ImportError.filename == arg_2\n ).delete()\n\n # Add the errors of the processed files\n for arg_3, arg_4 in six.iteritems(arg_1.import_errors):\n arg_0.add(errors.ImportError(\n arg_3=arg_3,\n arg_4=arg_4))\n arg_0.commit()"} +{"_id": "doc_310", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get the concurrency maps.\n\n :param states: List of states to query for\n :type states: list[airflow.utils.state.State]\n :return: A map from (dag_id, task_id) to # of task instances and\n a map from (dag_id, task_id) to # of task instances in the given state list\n :rtype: dict[tuple[str, str], int]\n\n \"\"\"\n arg_3 = models.TaskInstance\n arg_4 = (\n arg_2\n .query(arg_3.task_id, arg_3.dag_id, func.count('*'))\n .filter(arg_3.state.in_(arg_1))\n .group_by(arg_3.task_id, arg_3.dag_id)\n ).all()\n arg_5 = defaultdict(int)\n arg_6 = defaultdict(int)\n for arg_7 in arg_4:\n arg_8, arg_9, arg_10 = arg_7\n arg_5[arg_9] += arg_10\n arg_6[(arg_9, arg_8)] = arg_10\n return arg_5, arg_6"} +{"_id": "doc_311", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3=None):\n \"\"\"\n Changes the state of task instances in the list with one of the given states\n to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.\n\n :param task_instances: TaskInstances to change the state of\n :type task_instances: list[airflow.models.TaskInstance]\n :param acceptable_states: Filters the TaskInstances updated to be in these states\n :type acceptable_states: Iterable[State]\n :rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]\n \"\"\"\n if len(arg_1) == 0:\n arg_3.commit()\n return []\n\n arg_4 = models.TaskInstance\n arg_5 = (\n [and_(\n arg_4.dag_id == ti.dag_id,\n arg_4.task_id == ti.task_id,\n arg_4.execution_date == ti.execution_date)\n for ti in arg_1])\n arg_6 = (\n arg_3\n .query(arg_4)\n .filter(or_(*arg_5)))\n\n if None in arg_2:\n arg_6 = arg_6.filter(\n or_(arg_4.state == None, arg_4.state.in_(arg_2)) # noqa: E711\n )\n else:\n arg_6 = arg_6.filter(arg_4.state.in_(arg_2))\n\n arg_7 = (\n arg_6\n .with_for_update()\n .all())\n if len(arg_7) == 0:\n arg_0.log.info(\"No tasks were able to have their state changed to queued.\")\n arg_3.commit()\n return []\n\n # set TIs to queued state\n for arg_8 in arg_7:\n arg_8.state = State.QUEUED\n arg_8.queued_dttm = (timezone.utcnow()\n if not arg_8.queued_dttm\n else arg_8.queued_dttm)\n arg_3.merge(arg_8)\n\n # Generate a list of SimpleTaskInstance for the use of queuing\n # them in the executor.\n arg_11 = [SimpleTaskInstance(ti) for ti in\n arg_7]\n\n arg_12 = \"\\n\\t\".join(\n [repr(x) for x in arg_7])\n\n arg_3.commit()\n arg_0.log.info(\"Setting the following %s tasks to queued state:\\n\\t%s\",\n len(arg_7), arg_12)\n return arg_11"} +{"_id": "doc_312", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\"\n Takes task_instances, which should have been set to queued, and enqueues them\n with the executor.\n\n :param simple_task_instances: TaskInstances to enqueue\n :type simple_task_instances: list[SimpleTaskInstance]\n :param simple_dag_bag: Should contains all of the task_instances' dags\n :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag\n \"\"\"\n arg_3 = models.TaskInstance\n # actually enqueue them\n for arg_4 in arg_2:\n arg_5 = arg_1.get_dag(arg_4.dag_id)\n arg_6 = arg_3.generate_command(\n arg_4.dag_id,\n arg_4.task_id,\n arg_4.execution_date,\n local=True,\n mark_success=False,\n ignore_all_deps=False,\n ignore_depends_on_past=False,\n ignore_task_deps=False,\n ignore_ti_state=False,\n pool=arg_4.pool,\n file_path=arg_5.full_filepath,\n pickle_id=arg_5.pickle_id)\n\n arg_7 = arg_4.priority_weight\n arg_8 = arg_4.queue\n arg_0.log.info(\n \"Sending %s to executor with priority %s and queue %s\",\n arg_4.key, arg_7, arg_8\n )\n\n arg_0.executor.queue_command(\n arg_4,\n arg_6,\n arg_7=arg_7,\n arg_8=arg_8)"} +{"_id": "doc_313", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None):\n \"\"\"\n Attempts to execute TaskInstances that should be executed by the scheduler.\n\n There are three steps:\n 1. Pick TIs by priority with the constraint that they are in the expected states\n and that we do exceed max_active_runs or pool limits.\n 2. Change the state for the TIs above atomically.\n 3. Enqueue the TIs in the executor.\n\n :param simple_dag_bag: TaskInstances associated with DAGs in the\n simple_dag_bag will be fetched from the DB and executed\n :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag\n :param states: Execute TaskInstances in these states\n :type states: tuple[airflow.utils.state.State]\n :return: Number of task instance with state changed.\n \"\"\"\n arg_4 = arg_0._find_executable_task_instances(arg_1, arg_2,\n arg_3=arg_3)\n\n def query(arg_5, arg_6):\n arg_7 = \\\n arg_0._change_state_for_executable_task_instances(arg_6,\n arg_2,\n arg_3=arg_3)\n arg_0._enqueue_task_instances_with_queued_state(\n arg_1,\n arg_7)\n arg_3.commit()\n return arg_5 + len(arg_7)\n\n return helpers.reduce_in_chunks(query, arg_4, 0, arg_0.max_tis_per_query)"} +{"_id": "doc_314", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n If there are tasks left over in the executor,\n we set them back to SCHEDULED to avoid creating hanging tasks.\n\n :param session: session for ORM operations\n \"\"\"\n if arg_0.executor.queued_tasks:\n arg_2 = models.TaskInstance\n arg_3 = (\n [and_(\n arg_2.dag_id == dag_id,\n arg_2.task_id == task_id,\n arg_2.execution_date == execution_date,\n # The TI.try_number will return raw try_number+1 since the\n # ti is not running. And we need to -1 to match the DB record.\n arg_2._try_number == try_number - 1,\n arg_2.state == State.QUEUED)\n for dag_id, task_id, execution_date, try_number\n in arg_0.executor.queued_tasks.keys()])\n arg_4 = (arg_1.query(arg_2)\n .filter(or_(*arg_3)))\n arg_5 = (arg_4\n .with_for_update()\n .all())\n if len(arg_5) == 0:\n arg_1.commit()\n return\n\n # set TIs to queued state\n for arg_6 in arg_5:\n arg_6.state = State.SCHEDULED\n\n arg_8 = \"\\n\\t\".join(\n [repr(x) for x in arg_5])\n\n arg_1.commit()\n arg_0.log.info(\"Set the following tasks to scheduled state:\\n\\t%s\", arg_8)"} +{"_id": "doc_315", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=None):\n \"\"\"\n Process a Python file containing Airflow DAGs.\n\n This includes:\n\n 1. Execute the file and look for DAG objects in the namespace.\n 2. Pickle the DAG and save it to the DB (if necessary).\n 3. For each DAG, see what tasks should run and create appropriate task\n instances in the DB.\n 4. Record any errors importing the file into ORM\n 5. Kill (in ORM) any task instances belonging to the DAGs that haven't\n issued a heartbeat in a while.\n\n Returns a list of SimpleDag objects that represent the DAGs found in\n the file\n\n :param file_path: the path to the Python file that should be executed\n :type file_path: unicode\n :param zombies: zombie task instances to kill.\n :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]\n :param pickle_dags: whether serialize the DAGs found in the file and\n save them to the db\n :type pickle_dags: bool\n :return: a list of SimpleDags made from the Dags found in the file\n :rtype: list[airflow.utils.dag_processing.SimpleDagBag]\n \"\"\"\n arg_0.log.info(\"Processing file %s for tasks to queue\", arg_1)\n # As DAGs are parsed from this file, they will be converted into SimpleDags\n arg_5 = []\n\n try:\n arg_6 = models.DagBag(arg_1, include_examples=False)\n except Exception:\n arg_0.log.exception(\"Failed at reloading the DAG file %s\", arg_1)\n Stats.incr('dag_file_refresh_error', 1, 1)\n return []\n\n if len(arg_6.dags) > 0:\n arg_0.log.info(\"DAG(s) %s retrieved from %s\", arg_6.dags.keys(), arg_1)\n else:\n arg_0.log.warning(\"No viable dags retrieved from %s\", arg_1)\n arg_0.update_import_errors(arg_4, arg_6)\n return []\n\n # Save individual DAGs in the ORM and update DagModel.last_scheduled_time\n for arg_7 in arg_6.dags.values():\n arg_7.sync_to_db()\n\n arg_8 = [arg_7.dag_id for arg_7 in arg_6.dags.values()\n if arg_7.is_paused]\n\n # Pickle the DAGs (if necessary) and put them into a SimpleDag\n for arg_9 in arg_6.dags:\n # Only return DAGs that are not paused\n if arg_9 not in arg_8:\n arg_7 = arg_6.get_dag(arg_9)\n arg_10 = None\n if arg_3:\n arg_10 = arg_7.pickle(arg_4).id\n arg_5.append(SimpleDag(arg_7, arg_10=arg_10))\n\n if len(arg_0.dag_ids) > 0:\n arg_11 = [arg_7 for arg_7 in arg_6.dags.values()\n if arg_7.dag_id in arg_0.dag_ids and\n arg_7.dag_id not in arg_8]\n else:\n arg_11 = [arg_7 for arg_7 in arg_6.dags.values()\n if not arg_7.parent_dag and\n arg_7.dag_id not in arg_8]\n\n # Not using multiprocessing.Queue() since it's no longer a separate\n # process and due to some unusual behavior. (empty() incorrectly\n # returns true?)\n arg_12 = []\n\n arg_0._process_dags(arg_6, arg_11, arg_12)\n\n for arg_13 in arg_12:\n arg_7 = arg_6.dags[arg_13[0]]\n arg_14 = arg_7.get_task(arg_13[1])\n arg_15 = models.TaskInstance(arg_14, arg_13[2])\n\n arg_15.refresh_from_db(arg_4=arg_4, lock_for_update=True)\n # We can defer checking the task dependency checks to the worker themselves\n # since they can be expensive to run in the scheduler.\n arg_16 = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)\n\n # Only schedule tasks that have their dependencies met, e.g. to avoid\n # a task that recently got its state changed to RUNNING from somewhere\n # other than the scheduler from getting its state overwritten.\n # TODO(aoen): It's not great that we have to check all the task instance\n # dependencies twice; once to get the task scheduled, and again to actually\n # run the task. We should try to come up with a way to only check them once.\n if arg_15.are_dependencies_met(\n arg_16=arg_16,\n arg_4=arg_4,\n verbose=True):\n # Task starts out in the scheduled state. All tasks in the\n # scheduled state will be sent to the executor\n arg_15.state = State.SCHEDULED\n\n # Also save this task instance to the DB.\n arg_0.log.info(\"Creating / updating %s in ORM\", arg_15)\n arg_4.merge(arg_15)\n # commit batch\n arg_4.commit()\n\n # Record import errors into the ORM\n try:\n arg_0.update_import_errors(arg_4, arg_6)\n except Exception:\n arg_0.log.exception(\"Error logging import errors!\")\n try:\n arg_6.kill_zombies(arg_2)\n except Exception:\n arg_0.log.exception(\"Error killing zombies!\")\n\n return arg_5"} +{"_id": "doc_316", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Updates the counters per state of the tasks that were running. Can re-add\n to tasks to run in case required.\n\n :param ti_status: the internal status of the backfill job tasks\n :type ti_status: BackfillJob._DagRunTaskStatus\n \"\"\"\n for arg_2, arg_3 in list(arg_1.running.items()):\n arg_3.refresh_from_db()\n if arg_3.state == State.SUCCESS:\n arg_1.succeeded.add(arg_2)\n arg_0.log.debug(\"Task instance %s succeeded. Don't rerun.\", arg_3)\n arg_1.running.pop(arg_2)\n continue\n elif arg_3.state == State.SKIPPED:\n arg_1.skipped.add(arg_2)\n arg_0.log.debug(\"Task instance %s skipped. Don't rerun.\", arg_3)\n arg_1.running.pop(arg_2)\n continue\n elif arg_3.state == State.FAILED:\n arg_0.log.error(\"Task instance %s failed\", arg_3)\n arg_1.failed.add(arg_2)\n arg_1.running.pop(arg_2)\n continue\n # special case: if the task needs to run again put it back\n elif arg_3.state == State.UP_FOR_RETRY:\n arg_0.log.warning(\"Task instance %s is up for retry\", arg_3)\n arg_1.running.pop(arg_2)\n arg_1.to_run[arg_2] = arg_3\n # special case: if the task needs to be rescheduled put it back\n elif arg_3.state == State.UP_FOR_RESCHEDULE:\n arg_0.log.warning(\"Task instance %s is up for reschedule\", arg_3)\n arg_1.running.pop(arg_2)\n arg_1.to_run[arg_2] = arg_3\n # special case: The state of the task can be set to NONE by the task itself\n # when it reaches concurrency limits. It could also happen when the state\n # is changed externally, e.g. by clearing tasks from the ui. We need to cover\n # for that as otherwise those tasks would fall outside of the scope of\n # the backfill suddenly.\n elif arg_3.state == State.NONE:\n arg_0.log.warning(\n \"FIXME: task instance %s state was set to none externally or \"\n \"reaching concurrency limits. Re-adding task to queue.\",\n arg_3\n )\n arg_3.set_state(State.SCHEDULED)\n arg_1.running.pop(arg_2)\n arg_1.to_run[arg_2] = arg_3"} +{"_id": "doc_317", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Returns a dag run for the given run date, which will be matched to an existing\n dag run if available or create a new dag run otherwise. If the max_active_runs\n limit is reached, this function will return None.\n\n :param run_date: the execution date for the dag run\n :type run_date: datetime.datetime\n :param session: the database session object\n :type session: sqlalchemy.orm.session.Session\n :return: a DagRun in state RUNNING or None\n \"\"\"\n arg_3 = BackfillJob.ID_FORMAT_PREFIX.format(arg_1.isoformat())\n\n # consider max_active_runs but ignore when running subdags\n arg_4 = (True\n if (arg_0.dag.schedule_interval and\n not arg_0.dag.is_subdag)\n else False)\n\n arg_5 = arg_0.dag.get_num_active_runs(external_trigger=False)\n\n # check if we are scheduling on top of a already existing dag_run\n # we could find a \"scheduled\" run instead of a \"backfill\"\n arg_6 = DagRun.find(dag_id=arg_0.dag.dag_id,\n execution_date=arg_1,\n arg_2=arg_2)\n\n if arg_6 is not None and len(arg_6) > 0:\n arg_6 = arg_6[0]\n if arg_6.state == State.RUNNING:\n arg_4 = False\n else:\n arg_6 = None\n\n # enforce max_active_runs limit for dag, special cases already\n # handled by respect_dag_max_active_limit\n if (arg_4 and\n arg_5 >= arg_0.dag.max_active_runs):\n return None\n\n arg_6 = arg_6 or arg_0.dag.create_dagrun(\n arg_3=arg_3,\n execution_date=arg_1,\n start_date=timezone.utcnow(),\n arg_8=State.RUNNING,\n external_trigger=False,\n arg_2=arg_2,\n conf=arg_0.conf,\n )\n\n # set required transient field\n arg_6.dag = arg_0.dag\n\n # explicitly mark as backfill and running\n arg_6.state = State.RUNNING\n arg_6.run_id = arg_3\n arg_6.verify_integrity(arg_2=arg_2)\n return arg_6"} +{"_id": "doc_318", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Returns a map of task instance key to task instance object for the tasks to\n run in the given dag run.\n\n :param dag_run: the dag run to get the tasks from\n :type dag_run: airflow.models.DagRun\n :param session: the database session object\n :type session: sqlalchemy.orm.session.Session\n \"\"\"\n arg_3 = {}\n\n if arg_1 is None:\n return arg_3\n\n # check if we have orphaned tasks\n arg_0.reset_state_for_orphaned_tasks(filter_by_dag_run=arg_1, arg_2=arg_2)\n\n # for some reason if we don't refresh the reference to run is lost\n arg_1.refresh_from_db()\n make_transient(arg_1)\n\n # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf\n for arg_4 in arg_1.get_task_instances():\n # all tasks part of the backfill are scheduled to run\n if arg_4.state == State.NONE:\n arg_4.set_state(State.SCHEDULED, arg_2=arg_2)\n if arg_4.state != State.REMOVED:\n arg_3[arg_4.key] = arg_4\n\n return arg_3"} +{"_id": "doc_319", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6=None):\n \"\"\"\n Computes the dag runs and their respective task instances for\n the given run dates and executes the task instances.\n Returns a list of execution dates of the dag runs that were executed.\n\n :param run_dates: Execution dates for dag runs\n :type run_dates: list\n :param ti_status: internal BackfillJob status structure to tis track progress\n :type ti_status: BackfillJob._DagRunTaskStatus\n :param executor: the executor to use, it must be previously started\n :type executor: BaseExecutor\n :param pickle_id: numeric id of the pickled dag, None if not pickled\n :type pickle_id: int\n :param start_date: backfill start date\n :type start_date: datetime.datetime\n :param session: the current session object\n :type session: sqlalchemy.orm.session.Session\n \"\"\"\n for arg_7 in arg_1:\n arg_8 = arg_0._get_dag_run(arg_7, arg_6=arg_6)\n arg_9 = arg_0._task_instances_for_dag_run(arg_8,\n arg_6=arg_6)\n if arg_8 is None:\n continue\n\n arg_2.active_runs.append(arg_8)\n arg_2.to_run.update(arg_9 or {})\n\n arg_10 = arg_0._process_backfill_task_instances(\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6)\n\n arg_2.executed_dag_run_dates.update(arg_10)"} +{"_id": "doc_320", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Self destruct task if state has been moved away from running externally\"\"\"\n\n if arg_0.terminating:\n # ensure termination if processes are created later\n arg_0.task_runner.terminate()\n return\n\n arg_0.task_instance.refresh_from_db()\n arg_2 = arg_0.task_instance\n\n arg_3 = get_hostname()\n arg_4 = arg_3 == arg_2.hostname\n arg_5 = arg_2.pid == os.getpid()\n\n if arg_2.state == State.RUNNING:\n if not arg_4:\n arg_0.log.warning(\"The recorded hostname %s \"\n \"does not match this instance's hostname \"\n \"%s\", arg_2.hostname, arg_3)\n raise AirflowException(\"Hostname of job runner does not match\")\n elif not arg_5:\n arg_6 = os.getpid()\n arg_0.log.warning(\"Recorded pid %s does not match \"\n \"the current pid %s\", arg_2.pid, arg_6)\n raise AirflowException(\"PID of job runner does not match\")\n elif (\n arg_0.task_runner.return_code() is None and\n hasattr(arg_0.task_runner, 'process')\n ):\n arg_0.log.warning(\n \"State of this instance has been externally set to %s. \"\n \"Taking the poison pill.\",\n arg_2.state\n )\n arg_0.task_runner.terminate()\n arg_0.terminating = True"} +{"_id": "doc_321", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Provides a client for interacting with the Cloud Spanner API.\n\n :param project_id: The ID of the GCP project.\n :type project_id: str\n :return: google.cloud.spanner_v1.client.Client\n :rtype: object\n \"\"\"\n if not arg_0._client:\n arg_0._client = Client(project=arg_1, credentials=arg_0._get_credentials())\n return arg_0._client"} +{"_id": "doc_322", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Gets information about a particular instance.\n\n :param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :return: google.cloud.spanner_v1.instance.Instance\n :rtype: object\n \"\"\"\n arg_3 = arg_0._get_client(arg_2=arg_2).instance(arg_1=arg_1)\n if not arg_3.exists():\n return None\n return arg_3"} +{"_id": "doc_323", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6):\n \"\"\"\n Invokes a method on a given instance by applying a specified Callable.\n\n :param project_id: The ID of the GCP project that owns the Cloud Spanner\n database.\n :type project_id: str\n :param instance_id: The ID of the instance.\n :type instance_id: str\n :param configuration_name: Name of the instance configuration defining how the\n instance will be created. Required for instances which do not yet exist.\n :type configuration_name: str\n :param node_count: (Optional) Number of nodes allocated to the instance.\n :type node_count: int\n :param display_name: (Optional) The display name for the instance in the Cloud\n Console UI. (Must be between 4 and 30 characters.) If this value is not set\n in the constructor, will fall back to the instance ID.\n :type display_name: str\n :param func: Method of the instance to be called.\n :type func: Callable\n \"\"\"\n # noinspection PyUnresolvedReferences\n arg_7 = arg_0._get_client(arg_1=arg_1).instance(\n arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_5=arg_5)\n try:\n arg_8 = arg_6(arg_7) # type: Operation\n except GoogleAPICallError as e:\n arg_0.log.error('An error occurred: %s. Exiting.', e.message)\n raise e\n\n if arg_8:\n arg_9 = arg_8.result()\n arg_0.log.info(arg_9)"} +{"_id": "doc_324", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5=None):\n \"\"\"\n Creates a new Cloud Spanner instance.\n\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param configuration_name: The name of the instance configuration defining how the\n instance will be created. Possible configuration values can be retrieved via\n https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list\n :type configuration_name: str\n :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner\n instance.\n :type node_count: int\n :param display_name: (Optional) The display name for the instance in the GCP\n Console. Must be between 4 and 30 characters. If this value is not set in\n the constructor, the name falls back to the instance ID.\n :type display_name: str\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n arg_0._apply_to_instance(arg_5, arg_1, arg_2,\n arg_3, arg_4, lambda x: x.create())"} +{"_id": "doc_325", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5=None):\n \"\"\"\n Updates an existing Cloud Spanner instance.\n\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param configuration_name: The name of the instance configuration defining how the\n instance will be created. Possible configuration values can be retrieved via\n https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list\n :type configuration_name: str\n :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner\n instance.\n :type node_count: int\n :param display_name: (Optional) The display name for the instance in the GCP\n Console. Must be between 4 and 30 characters. If this value is not set in\n the constructor, the name falls back to the instance ID.\n :type display_name: str\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n return arg_0._apply_to_instance(arg_5, arg_1, arg_2,\n arg_3, arg_4, lambda x: x.update())"} +{"_id": "doc_326", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Deletes an existing Cloud Spanner instance.\n\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: None\n \"\"\"\n\n arg_3 = arg_0._get_client(arg_2=arg_2).instance(arg_1)\n try:\n arg_3.delete()\n return\n except GoogleAPICallError as e:\n arg_0.log.error('An error occurred: %s. Exiting.', e.message)\n raise e"} +{"_id": "doc_327", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Retrieves a database in Cloud Spanner. If the database does not exist\n in the specified instance, it returns None.\n\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param database_id: The ID of the database in Cloud Spanner.\n :type database_id: str\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :type project_id: str\n :return: Database object or None if database does not exist\n :rtype: google.cloud.spanner_v1.database.Database or None\n \"\"\"\n\n arg_4 = arg_0._get_client(arg_3=arg_3).instance(\n arg_1=arg_1)\n if not arg_4.exists():\n raise AirflowException(\"The instance {} does not exist in project {} !\".\n format(arg_1, arg_3))\n arg_5 = arg_4.database(arg_2=arg_2)\n if not arg_5.exists():\n return None\n else:\n return arg_5"} +{"_id": "doc_328", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Creates a new database in Cloud Spanner.\n\n :type project_id: str\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param database_id: The ID of the database to create in Cloud Spanner.\n :type database_id: str\n :param ddl_statements: The string list containing DDL for the new database.\n :type ddl_statements: list[str]\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :return: None\n \"\"\"\n\n arg_5 = arg_0._get_client(arg_4=arg_4).instance(\n arg_1=arg_1)\n if not arg_5.exists():\n raise AirflowException(\"The instance {} does not exist in project {} !\".\n format(arg_1, arg_4))\n arg_6 = arg_5.database(arg_2=arg_2,\n arg_3=arg_3)\n try:\n arg_7 = arg_6.create() # type: Operation\n except GoogleAPICallError as e:\n arg_0.log.error('An error occurred: %s. Exiting.', e.message)\n raise e\n\n if arg_7:\n arg_8 = arg_7.result()\n arg_0.log.info(arg_8)\n return"} +{"_id": "doc_329", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None,\n arg_5=None):\n \"\"\"\n Updates DDL of a database in Cloud Spanner.\n\n :type project_id: str\n :param instance_id: The ID of the Cloud Spanner instance.\n :type instance_id: str\n :param database_id: The ID of the database in Cloud Spanner.\n :type database_id: str\n :param ddl_statements: The string list containing DDL for the new database.\n :type ddl_statements: list[str]\n :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n :param operation_id: (Optional) The unique per database operation ID that can be\n specified to implement idempotency check.\n :type operation_id: str\n :return: None\n \"\"\"\n\n arg_6 = arg_0._get_client(arg_4=arg_4).instance(\n arg_1=arg_1)\n if not arg_6.exists():\n raise AirflowException(\"The instance {} does not exist in project {} !\".\n format(arg_1, arg_4))\n arg_7 = arg_6.database(arg_2=arg_2)\n try:\n arg_8 = arg_7.update_ddl(\n arg_3=arg_3, arg_5=arg_5)\n if arg_8:\n arg_9 = arg_8.result()\n arg_0.log.info(arg_9)\n return\n except AlreadyExists as e:\n if e.code == 409 and arg_5 in e.message:\n arg_0.log.info(\"Replayed update_ddl message - the operation id %s \"\n \"was already done before.\", arg_5)\n return\n except GoogleAPICallError as e:\n arg_0.log.error('An error occurred: %s. Exiting.', e.message)\n raise e"} +{"_id": "doc_330", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a cassandra Session object\n \"\"\"\n if arg_0.session and not arg_0.session.is_shutdown:\n return arg_0.session\n arg_0.session = arg_0.cluster.connect(arg_0.keyspace)\n return arg_0.session"} +{"_id": "doc_331", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Checks if a record exists in Cassandra\n\n :param table: Target Cassandra table.\n Use dot notation to target a specific keyspace.\n :type table: str\n :param keys: The keys and their values to check the existence.\n :type keys: dict\n \"\"\"\n arg_3 = arg_0.keyspace\n if '.' in arg_1:\n arg_3, arg_1 = arg_1.split('.', 1)\n arg_4 = \" AND \".join(\"{}=%({})s\".format(key, key) for key in arg_2.keys())\n arg_5 = \"SELECT * FROM {keyspace}.{table} WHERE {keys}\".format(\n arg_3=arg_3, arg_1=arg_1, arg_2=arg_4)\n\n try:\n arg_6 = arg_0.get_conn().execute(arg_5, arg_2)\n return arg_6.one() is not None\n except Exception:\n return False"} +{"_id": "doc_332", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Construct the command to poll the driver status.\n\n :return: full command to be executed\n \"\"\"\n arg_1 = arg_0._get_spark_binary_path()\n\n # The url ot the spark master\n arg_1 += [\"--master\", arg_0._connection['master']]\n\n # The driver id so we can poll for its status\n if arg_0._driver_id:\n arg_1 += [\"--status\", arg_0._driver_id]\n else:\n raise AirflowException(\n \"Invalid status: attempted to poll driver \" +\n \"status but no driver id is known. Giving up.\")\n\n arg_0.log.debug(\"Poll driver status cmd: %s\", arg_1)\n\n return arg_1"} +{"_id": "doc_333", "title": "", "text": "def Func(arg_0, arg_1=\"\", **arg_2):\n \"\"\"\n Remote Popen to execute the spark-Func job\n\n :param application: Submitted application, jar or py file\n :type application: str\n :param kwargs: extra arguments to Popen (see subprocess.Popen)\n \"\"\"\n arg_3 = arg_0._build_spark_Func_command(arg_1)\n\n if hasattr(arg_0, '_env'):\n arg_4 = os.environ.copy()\n arg_4.update(arg_0._env)\n arg_2[\"env\"] = arg_4\n\n arg_0._Func_sp = subprocess.Popen(arg_3,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n bufsize=-1,\n universal_newlines=True,\n **arg_2)\n\n arg_0._process_spark_Func_log(iter(arg_0._Func_sp.stdout.readline, ''))\n arg_6 = arg_0._Func_sp.wait()\n\n # Check spark-Func return code. In Kubernetes mode, also check the value\n # of exit code in the log, as it may differ.\n if arg_6 or (arg_0._is_kubernetes and arg_0._spark_exit_code != 0):\n raise AirflowException(\n \"Cannot execute: {}. Error code is: {}.\".format(\n arg_3, arg_6\n )\n )\n\n arg_0.log.debug(\"Should track driver: {}\".format(arg_0._should_track_driver_status))\n\n # We want the Airflow job to wait until the Spark driver is finished\n if arg_0._should_track_driver_status:\n if arg_0._driver_id is None:\n raise AirflowException(\n \"No driver id is known: something went wrong when executing \" +\n \"the spark Func command\"\n )\n\n # We start with the SUBMITTED status as initial status\n arg_0._driver_status = \"SUBMITTED\"\n\n # Start tracking the driver status (blocking function)\n arg_0._start_driver_status_tracking()\n\n if arg_0._driver_status != \"FINISHED\":\n raise AirflowException(\n \"ERROR : Driver {} badly exited with status {}\"\n .format(arg_0._driver_id, arg_0._driver_status)\n )"} +{"_id": "doc_334", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Processes the log files and extracts useful information out of it.\n\n If the deploy-mode is 'client', log the output of the submit command as those\n are the output logs of the Spark worker directly.\n\n Remark: If the driver needs to be tracked for its status, the log-level of the\n spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)\n\n :param itr: An iterator which iterates over the input of the subprocess\n \"\"\"\n # Consume the iterator\n for arg_2 in arg_1:\n arg_2 = arg_2.strip()\n # If we run yarn cluster mode, we want to extract the application id from\n # the logs so we can kill the application when we stop it unexpectedly\n if arg_0._is_yarn and arg_0._connection['deploy_mode'] == 'cluster':\n arg_3 = re.search('(application[0-9_]+)', arg_2)\n if arg_3:\n arg_0._yarn_application_id = arg_3.groups()[0]\n arg_0.log.info(\"Identified spark driver id: %s\",\n arg_0._yarn_application_id)\n\n # If we run Kubernetes cluster mode, we want to extract the driver pod id\n # from the logs so we can kill the application when we stop it unexpectedly\n elif arg_0._is_kubernetes:\n arg_3 = re.search(r'\\s*pod name: ((.+?)-([a-z0-9]+)-driver)', arg_2)\n if arg_3:\n arg_0._kubernetes_driver_pod = arg_3.groups()[0]\n arg_0.log.info(\"Identified spark driver pod: %s\",\n arg_0._kubernetes_driver_pod)\n\n # Store the Spark Exit code\n arg_6 = re.search(r'\\s*Exit code: (\\d+)', arg_2)\n if arg_6:\n arg_0._spark_exit_code = int(arg_6.groups()[0])\n\n # if we run in standalone cluster mode and we want to track the driver status\n # we need to extract the driver id from the logs. This allows us to poll for\n # the status using the driver id. Also, we can kill the driver when needed.\n elif arg_0._should_track_driver_status and not arg_0._driver_id:\n arg_8 = re.search(r'(driver-[0-9\\-]+)', arg_2)\n if arg_8:\n arg_0._driver_id = arg_8.groups()[0]\n arg_0.log.info(\"identified spark driver id: {}\"\n .format(arg_0._driver_id))\n\n else:\n arg_0.log.info(arg_2)\n\n arg_0.log.debug(\"spark submit log: {}\".format(arg_2))"} +{"_id": "doc_335", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n parses the logs of the spark driver status query process\n\n :param itr: An iterator which iterates over the input of the subprocess\n \"\"\"\n # Consume the iterator\n for arg_2 in arg_1:\n arg_2 = arg_2.strip()\n\n # Check if the log line is about the driver status and extract the status.\n if \"driverState\" in arg_2:\n arg_0._driver_status = arg_2.split(' : ')[1] \\\n .replace(',', '').replace('\\\"', '').strip()\n\n arg_0.log.debug(\"spark driver status log: {}\".format(arg_2))"} +{"_id": "doc_336", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the task runner that can be used to run the given job.\n\n :param local_task_job: The LocalTaskJob associated with the TaskInstance\n that needs to be executed.\n :type local_task_job: airflow.jobs.LocalTaskJob\n :return: The task runner to use to run the task.\n :rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner\n \"\"\"\n if _TASK_RUNNER == \"StandardTaskRunner\":\n return StandardTaskRunner(arg_0)\n elif _TASK_RUNNER == \"CgroupTaskRunner\":\n from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner\n return CgroupTaskRunner(arg_0)\n else:\n raise AirflowException(\"Unknown task runner type {}\".format(_TASK_RUNNER))"} +{"_id": "doc_337", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Try to use a waiter from the below pull request\n\n * https://github.com/boto/botocore/pull/1307\n\n If the waiter is not available apply a exponential backoff\n\n * docs.aws.amazon.com/general/latest/gr/api-retries.html\n \"\"\"\n try:\n arg_1 = arg_0.client.get_waiter('job_execution_complete')\n arg_1.config.max_attempts = sys.maxsize # timeout is managed by airflow\n arg_1.wait(jobs=[arg_0.jobId])\n except ValueError:\n # If waiter not available use expo\n arg_4 = True\n arg_5 = 0\n\n while arg_5 < arg_0.max_retries and arg_4:\n arg_0.log.info('AWS Batch retry in the next %s seconds', arg_5)\n arg_6 = arg_0.client.describe_jobs(\n jobs=[arg_0.jobId]\n )\n if arg_6['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']:\n arg_4 = False\n\n sleep(1 + pow(arg_5 * 0.1, 2))\n arg_5 += 1"} +{"_id": "doc_338", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Queries mysql and returns a cursor to the results.\n \"\"\"\n arg_1 = MySqlHook(mysql_conn_id=arg_0.mysql_conn_id)\n arg_2 = arg_1.get_conn()\n arg_3 = arg_2.cursor()\n arg_3.execute(arg_0.sql)\n return arg_3"} +{"_id": "doc_339", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Configure a csv writer with the file_handle and write schema\n as headers for the new file.\n \"\"\"\n arg_3 = csv.writer(arg_1, encoding='utf-8',\n delimiter=arg_0.field_delimiter)\n arg_3.writerow(arg_2)\n return arg_3"} +{"_id": "doc_340", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a dict of column name and column type based on self.schema if not None.\n \"\"\"\n arg_1 = []\n if isinstance(arg_0.schema, string_types):\n arg_1 = json.loads(arg_0.schema)\n elif isinstance(arg_0.schema, list):\n arg_1 = arg_0.schema\n elif arg_0.schema is not None:\n arg_0.log.warn('Using default schema due to unexpected type.'\n 'Should be a string or list.')\n\n arg_2 = {}\n try:\n arg_2 = {col['name']: col['type'] for col in arg_1}\n except KeyError:\n arg_0.log.warn('Using default schema due to missing name or type. Please '\n 'refer to: https://cloud.google.com/bigquery/docs/schemas'\n '#specifying_a_json_schema_file')\n return arg_2"} +{"_id": "doc_341", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Execute sqoop job\n \"\"\"\n arg_0.hook = SqoopHook(\n conn_id=arg_0.conn_id,\n verbose=arg_0.verbose,\n num_mappers=arg_0.num_mappers,\n hcatalog_database=arg_0.hcatalog_database,\n hcatalog_table=arg_0.hcatalog_table,\n properties=arg_0.properties\n )\n\n if arg_0.cmd_type == 'export':\n arg_0.hook.export_table(\n table=arg_0.table,\n export_dir=arg_0.export_dir,\n input_null_string=arg_0.input_null_string,\n input_null_non_string=arg_0.input_null_non_string,\n staging_table=arg_0.staging_table,\n clear_staging_table=arg_0.clear_staging_table,\n enclosed_by=arg_0.enclosed_by,\n escaped_by=arg_0.escaped_by,\n input_fields_terminated_by=arg_0.input_fields_terminated_by,\n input_lines_terminated_by=arg_0.input_lines_terminated_by,\n input_optionally_enclosed_by=arg_0.input_optionally_enclosed_by,\n batch=arg_0.batch,\n relaxed_isolation=arg_0.relaxed_isolation,\n extra_export_options=arg_0.extra_export_options)\n elif arg_0.cmd_type == 'import':\n # add create hcatalog table to extra import options if option passed\n # if new params are added to constructor can pass them in here\n # so don't modify sqoop_hook for each param\n if arg_0.create_hcatalog_table:\n arg_0.extra_import_options['create-hcatalog-table'] = ''\n\n if arg_0.table and arg_0.query:\n raise AirflowException(\n 'Cannot specify query and table together. Need to specify either or.'\n )\n\n if arg_0.table:\n arg_0.hook.import_table(\n table=arg_0.table,\n target_dir=arg_0.target_dir,\n append=arg_0.append,\n file_type=arg_0.file_type,\n columns=arg_0.columns,\n split_by=arg_0.split_by,\n where=arg_0.where,\n direct=arg_0.direct,\n driver=arg_0.driver,\n arg_3=arg_0.extra_import_options)\n elif arg_0.query:\n arg_0.hook.import_query(\n query=arg_0.query,\n target_dir=arg_0.target_dir,\n append=arg_0.append,\n file_type=arg_0.file_type,\n split_by=arg_0.split_by,\n direct=arg_0.direct,\n driver=arg_0.driver,\n arg_3=arg_0.extra_import_options)\n else:\n raise AirflowException(\n \"Provide query or table parameter to import using Sqoop\"\n )\n else:\n raise AirflowException(\"cmd_type should be 'import' or 'export'\")"} +{"_id": "doc_342", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the extra property by deserializing json.\"\"\"\n arg_1 = {}\n if arg_0.extra:\n try:\n arg_1 = json.loads(arg_0.extra)\n except Exception as e:\n arg_0.log.exception(e)\n arg_0.log.error(\"Failed parsing the json for conn_id %s\", arg_0.conn_id)\n\n return arg_1"} +{"_id": "doc_343", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Get a set of dates as a list based on a start, end and delta, delta\n can be something that can be added to `datetime.datetime`\n or a cron expression as a `str`\n\n :Example::\n\n Func(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),\n datetime.datetime(2016, 1, 3, 0, 0)]\n Func(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),\n datetime.datetime(2016, 1, 3, 0, 0)]\n Func(datetime(2016, 1, 1), datetime(2016, 3, 3), delta=\"0 0 0 * *\")\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),\n datetime.datetime(2016, 3, 1, 0, 0)]\n\n :param start_date: anchor date to start the series from\n :type start_date: datetime.datetime\n :param end_date: right boundary for the date range\n :type end_date: datetime.datetime\n :param num: alternatively to end_date, you can specify the number of\n number of entries you want in the range. This number can be negative,\n output will always be sorted regardless\n :type num: int\n \"\"\"\n if not arg_3:\n return []\n if arg_1 and arg_0 > arg_1:\n raise Exception(\"Wait. start_date needs to be before end_date\")\n if arg_1 and arg_2:\n raise Exception(\"Wait. Either specify end_date OR num\")\n if not arg_1 and not arg_2:\n arg_1 = timezone.utcnow()\n\n arg_4 = False\n arg_5 = arg_0.tzinfo\n if isinstance(arg_3, six.string_types):\n arg_4 = True\n arg_0 = timezone.make_naive(arg_0, arg_5)\n arg_6 = croniter(arg_3, arg_0)\n elif isinstance(arg_3, timedelta):\n arg_3 = abs(arg_3)\n arg_7 = []\n if arg_1:\n if timezone.is_naive(arg_0):\n arg_1 = timezone.make_naive(arg_1, arg_5)\n while arg_0 <= arg_1:\n if timezone.is_naive(arg_0):\n arg_7.append(timezone.make_aware(arg_0, arg_5))\n else:\n arg_7.append(arg_0)\n\n if arg_4:\n arg_0 = arg_6.get_next(datetime)\n else:\n arg_0 += arg_3\n else:\n for arg_8 in range(abs(arg_2)):\n if timezone.is_naive(arg_0):\n arg_7.append(timezone.make_aware(arg_0, arg_5))\n else:\n arg_7.append(arg_0)\n\n if arg_4:\n if arg_2 > 0:\n arg_0 = arg_6.get_next(datetime)\n else:\n arg_0 = arg_6.get_prev(datetime)\n else:\n if arg_2 > 0:\n arg_0 += arg_3\n else:\n arg_0 -= arg_3\n return sorted(arg_7)"} +{"_id": "doc_344", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=0, arg_3=0, arg_4=0):\n \"\"\"\n Get a datetime object representing `n` days ago. By default the time is\n set to midnight.\n \"\"\"\n arg_5 = timezone.utcnow().replace(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4)\n return arg_5 - timedelta(days=arg_0)"} +{"_id": "doc_345", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Initialize the role with the permissions and related view-menus.\n\n :param role_name:\n :param role_vms:\n :param role_perms:\n :return:\n \"\"\"\n arg_4 = arg_0.get_session.query(sqla_models.PermissionView).all()\n arg_4 = [p for p in arg_4 if p.permission and p.view_menu]\n\n arg_5 = arg_0.find_role(arg_1)\n if not arg_5:\n arg_5 = arg_0.add_role(arg_1)\n\n if len(arg_5.permissions) == 0:\n arg_0.log.info('Initializing permissions for role:%s in the database.', arg_1)\n arg_6 = set()\n for arg_7 in arg_4:\n if arg_7.view_menu.name in arg_2 and arg_7.permission.name in arg_3:\n arg_6.add(arg_7)\n arg_5.permissions = list(arg_6)\n arg_0.get_session.merge(arg_5)\n arg_0.get_session.commit()\n else:\n arg_0.log.debug('Existing permissions for the role:%s '\n 'within the database will persist.', arg_1)"} +{"_id": "doc_346", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete the given Role\n\n :param role_name: the name of a role in the ab_role table\n \"\"\"\n arg_2 = arg_0.get_session\n arg_3 = arg_2.query(sqla_models.Role)\\\n .filter(sqla_models.Role.name == arg_1)\\\n .first()\n if arg_3:\n arg_0.log.info(\"Deleting role '%s'\", arg_1)\n arg_2.delete(arg_3)\n arg_2.commit()\n else:\n raise AirflowException(\"Role named '{}' does not exist\".format(\n arg_1))"} +{"_id": "doc_347", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if arg_1 is None:\n arg_1 = g.user\n if arg_1.is_anonymous:\n arg_2 = appbuilder.config.get('AUTH_ROLE_PUBLIC')\n return [appbuilder.security_manager.find_role(arg_2)] \\\n if arg_2 else []\n return arg_1.roles"} +{"_id": "doc_348", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a set of tuples with the perm name and view menu name\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0.get_user_roles():\n arg_1.update({(arg_3.permission.name, arg_3.view_menu.name)\n for arg_3 in arg_2.permissions})\n return arg_1"} +{"_id": "doc_349", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Whether the user has this role name\n \"\"\"\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n return any(\n [arg_2.name in arg_1 for arg_2 in arg_0.get_user_roles()])"} +{"_id": "doc_350", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Whether the user has this perm\n \"\"\"\n if hasattr(arg_0, 'perms'):\n if (arg_1, arg_2) in arg_0.perms:\n return True\n # rebuild the permissions set\n arg_0._get_and_cache_perms()\n return (arg_1, arg_2) in arg_0.perms"} +{"_id": "doc_351", "title": "", "text": "def Func(arg_0):\n \"\"\"\n FAB leaves faulty permissions that need to be cleaned up\n \"\"\"\n arg_0.log.debug('Cleaning faulty perms')\n arg_1 = arg_0.get_session\n arg_2 = (\n arg_1.query(sqla_models.PermissionView)\n .filter(or_(\n sqla_models.PermissionView.permission == None, # NOQA\n sqla_models.PermissionView.view_menu == None, # NOQA\n ))\n )\n arg_3 = arg_2.delete()\n arg_1.commit()\n if arg_3:\n arg_0.log.info('Deleted %s faulty permissions', arg_3)"} +{"_id": "doc_352", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add the new permission , view_menu to ab_permission_view_role if not exists.\n It will add the related entry to ab_permission\n and ab_view_menu two meta tables as well.\n\n :param permission_name: Name of the permission.\n :type permission_name: str\n :param view_menu_name: Name of the view-menu\n :type view_menu_name: str\n :return:\n \"\"\"\n arg_3 = arg_0.find_permission(arg_1)\n arg_4 = arg_0.find_view_menu(arg_2)\n arg_5 = None\n if arg_3 and arg_4:\n arg_5 = arg_0.get_session.query(arg_0.permissionview_model).filter_by(\n arg_3=arg_3, arg_4=arg_4).first()\n if not arg_5 and arg_1 and arg_2:\n arg_0.add_permission_view_menu(arg_1, arg_2)"} +{"_id": "doc_353", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create perm-vm if not exist and insert into FAB security model for all-dags.\n \"\"\"\n # create perm for global logical dag\n for arg_1 in arg_0.DAG_VMS:\n for arg_2 in arg_0.DAG_PERMS:\n arg_0._merge_perm(permission_name=arg_2,\n view_menu_name=arg_1)"} +{"_id": "doc_354", "title": "", "text": "def Func():\n \"\"\"\n Deferred load of Fernet key.\n\n This function could fail either because Cryptography is not installed\n or because the Fernet key is invalid.\n\n :return: Fernet object\n :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet\n \"\"\"\n global arg_2\n arg_0 = LoggingMixin().log\n\n if arg_2:\n return arg_2\n try:\n from cryptography.fernet import Fernet, MultiFernet, InvalidToken\n global arg_1\n arg_1 = InvalidToken\n\n except BuiltinImportError:\n arg_0.warning(\n \"cryptography not found - values will not be stored encrypted.\"\n )\n arg_2 = NullFernet()\n return arg_2\n\n try:\n arg_3 = configuration.conf.get('core', 'FERNET_KEY')\n if not arg_3:\n arg_0.warning(\n \"empty cryptography key - values will not be stored encrypted.\"\n )\n arg_2 = NullFernet()\n else:\n arg_2 = MultiFernet([\n Fernet(fernet_part.encode('utf-8'))\n for fernet_part in arg_3.split(',')\n ])\n arg_2.is_encrypted = True\n except (ValueError, TypeError) as ve:\n raise AirflowException(\"Could not create Fernet object: {}\".format(ve))\n\n return arg_2"} +{"_id": "doc_355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks for existence of the partition in the AWS Glue Catalog table\n \"\"\"\n if '.' in arg_0.table_name:\n arg_0.database_name, arg_0.table_name = arg_0.table_name.split('.')\n arg_0.log.info(\n 'Poking for table %s. %s, expression %s', arg_0.database_name, arg_0.table_name, arg_0.expression\n )\n\n return arg_0.get_hook().check_for_partition(\n arg_0.database_name, arg_0.table_name, arg_0.expression)"} +{"_id": "doc_356", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check for message on subscribed queue and write to xcom the message with key ``messages``\n\n :param context: the context object\n :type context: dict\n :return: ``True`` if message is available or ``False``\n \"\"\"\n\n arg_2 = SQSHook(aws_conn_id=arg_0.aws_conn_id)\n arg_3 = arg_2.get_conn()\n\n arg_0.log.info('SQSSensor checking for message on queue: %s', arg_0.sqs_queue)\n\n arg_4 = arg_3.receive_message(QueueUrl=arg_0.sqs_queue,\n MaxNumberOfMessages=arg_0.max_messages,\n WaitTimeSeconds=arg_0.wait_time_seconds)\n\n arg_0.log.info(\"reveived message %s\", str(arg_4))\n\n if 'Messages' in arg_4 and len(arg_4['Messages']) > 0:\n\n arg_5 = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']}\n for message in arg_4['Messages']]\n\n arg_6 = arg_3.delete_message_batch(QueueUrl=arg_0.sqs_queue,\n Entries=arg_5)\n\n if 'Successful' in arg_6:\n arg_1['ti'].xcom_push(key='messages', value=arg_4)\n return True\n else:\n raise AirflowException(\n 'Delete SQS Messages failed ' + str(arg_6) + ' for messages ' + str(arg_4))\n\n return False"} +{"_id": "doc_357", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a snakebite HDFSClient object.\n \"\"\"\n # When using HAClient, proxy_user must be the same, so is ok to always\n # take the first.\n arg_1 = arg_0.proxy_user\n arg_2 = arg_0.autoconfig\n arg_3 = configuration.conf.get('core', 'security') == 'kerberos'\n\n try:\n arg_4 = arg_0.Funcections(arg_0.hdfs_conn_id)\n\n if not arg_1:\n arg_1 = arg_4[0].login\n if not arg_2:\n arg_2 = arg_4[0].extra_dejson.get('autoconfig',\n False)\n arg_5 = arg_4[0].extra_dejson.get(\n 'hdfs_namenode_principal')\n except AirflowException:\n if not arg_2:\n raise\n\n if arg_2:\n # will read config info from $HADOOP_HOME conf files\n arg_6 = AutoConfigClient(arg_1=arg_1,\n arg_3=arg_3)\n elif len(arg_4) == 1:\n arg_6 = Client(arg_4[0].host, arg_4[0].port,\n arg_1=arg_1, arg_3=arg_3,\n arg_5=arg_5)\n elif len(arg_4) > 1:\n arg_7 = [Namenode(conn.host, conn.port) for conn in arg_4]\n arg_6 = HAClient(arg_7, arg_1=arg_1,\n arg_3=arg_3,\n arg_5=arg_5)\n else:\n raise HDFSHookException(\"conn_id doesn't exist in the repository \"\n \"and autoconfig is not specified\")\n\n return arg_6"} +{"_id": "doc_358", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Establishes a connection depending on the security mode set via config or environment variable.\n\n :return: a hdfscli InsecureClient or KerberosClient object.\n :rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient\n \"\"\"\n arg_1 = arg_0.Funcections(arg_0.webhdfs_conn_id)\n\n for arg_2 in arg_1:\n try:\n arg_0.log.debug('Trying namenode %s', arg_2.host)\n arg_3 = arg_0._get_client(arg_2)\n arg_3.status('/')\n arg_0.log.debug('Using namenode %s for hook', arg_2.host)\n return arg_3\n except HdfsError as hdfs_error:\n arg_0.log.debug('Read operation on namenode %s failed with error: %s',\n arg_2.host, hdfs_error)\n\n arg_4 = [arg_2.host for arg_2 in arg_1]\n arg_5 = 'Read operations failed on the namenodes below:\\n{hosts}'.format(\n arg_4='\\n'.join(arg_4))\n raise AirflowWebHDFSHookException(arg_5)"} +{"_id": "doc_359", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check for the existence of a path in HDFS by querying FileStatus.\n\n :param hdfs_path: The path to check.\n :type hdfs_path: str\n :return: True if the path exists and False if not.\n :rtype: bool\n \"\"\"\n arg_2 = arg_0.get_conn()\n\n arg_3 = arg_2.status(arg_1, strict=False)\n return bool(arg_3)"} +{"_id": "doc_360", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True, arg_4=1, **arg_5):\n r\"\"\"\n Uploads a file to HDFS.\n\n :param source: Local path to file or folder.\n If it's a folder, all the files inside of it will be uploaded.\n .. note:: This implies that folders empty of files will not be created remotely.\n\n :type source: str\n :param destination: PTarget HDFS path.\n If it already exists and is a directory, files will be uploaded inside.\n :type destination: str\n :param overwrite: Overwrite any existing file or directory.\n :type overwrite: bool\n :param parallelism: Number of threads to use for parallelization.\n A value of `0` (or negative) uses as many threads as there are files.\n :type parallelism: int\n :param \\**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.\n \"\"\"\n arg_6 = arg_0.get_conn()\n\n arg_6.upload(hdfs_path=arg_2,\n local_path=arg_1,\n arg_3=arg_3,\n n_threads=arg_4,\n **arg_5)\n arg_0.log.debug(\"Uploaded file %s to %s\", arg_1, arg_2)"} +{"_id": "doc_361", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Establish a connection to pinot broker through pinot dbqpi.\n \"\"\"\n arg_1 = arg_0.Funcection(arg_0.pinot_broker_conn_id)\n arg_2 = connect(\n host=arg_1.host,\n port=arg_1.port,\n path=arg_1.extra_dejson.get('endpoint', '/pql'),\n scheme=arg_1.extra_dejson.get('schema', 'http')\n )\n arg_0.log.info('Get the connection to pinot '\n 'broker on {host}'.format(host=arg_1.host))\n return arg_2"} +{"_id": "doc_362", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the connection uri for pinot broker.\n\n e.g: http://localhost:9000/pql\n \"\"\"\n arg_1 = arg_0.get_connection(getattr(arg_0, arg_0.conn_name_attr))\n arg_2 = arg_1.host\n if arg_1.port is not None:\n arg_2 += ':{port}'.format(port=arg_1.port)\n arg_3 = 'http' if not arg_1.conn_type else arg_1.conn_type\n arg_4 = arg_1.extra_dejson.get('endpoint', 'pql')\n return '{conn_type}://{host}/{endpoint}'.format(\n arg_3=arg_3, arg_2=arg_2, arg_4=arg_4)"} +{"_id": "doc_363", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert native python ``datetime.time`` object to a format supported by the API\n \"\"\"\n return {HOURS: arg_0.hour, MINUTES: arg_0.minute, SECONDS: arg_0.second}"} +{"_id": "doc_364", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Executes the sql and returns a pandas dataframe\n\n :param sql: the sql statement to be executed (str) or a list of\n sql statements to execute\n :type sql: str or list\n :param parameters: The parameters to render the SQL query with.\n :type parameters: mapping or iterable\n \"\"\"\n import pandas.io.sql as psql\n\n with closing(arg_0.get_conn()) as conn:\n return psql.read_sql(arg_1, con=conn, params=arg_2)"} +{"_id": "doc_365", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=1000,\n arg_5=False):\n \"\"\"\n A generic way to insert a set of tuples into a table,\n a new transaction is created every commit_every rows\n\n :param table: Name of the target table\n :type table: str\n :param rows: The rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: The names of the columns to fill in the table\n :type target_fields: iterable of strings\n :param commit_every: The maximum number of rows to insert in one\n transaction. Set to 0 to insert all rows in one transaction.\n :type commit_every: int\n :param replace: Whether to replace instead of insert\n :type replace: bool\n \"\"\"\n if arg_3:\n arg_3 = \", \".join(arg_3)\n arg_3 = \"({})\".format(arg_3)\n else:\n arg_3 = ''\n arg_6 = 0\n with closing(arg_0.get_conn()) as conn:\n if arg_0.supports_autocommit:\n arg_0.set_autocommit(conn, False)\n\n conn.commit()\n\n with closing(conn.cursor()) as cur:\n for arg_6, arg_7 in enumerate(arg_2, 1):\n arg_8 = []\n for arg_9 in arg_7:\n arg_8.append(arg_0._serialize_cell(arg_9, conn))\n arg_10 = tuple(arg_8)\n arg_11 = [\"%s\", ] * len(arg_10)\n if not arg_5:\n arg_12 = \"INSERT INTO \"\n else:\n arg_12 = \"REPLACE INTO \"\n arg_12 += \"{0} {1} VALUES ({2})\".format(\n arg_1,\n arg_3,\n \",\".join(arg_11))\n cur.execute(arg_12, arg_10)\n if arg_4 and arg_6 % arg_4 == 0:\n conn.commit()\n arg_0.log.info(\n \"Loaded %s into %s rows so far\", arg_6, arg_1\n )\n\n conn.commit()\n arg_0.log.info(\"Done loading. Loaded a total of %s rows\", arg_6)"} +{"_id": "doc_366", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n An endpoint helping check the Func status of the Airflow instance,\n including metadatabase and scheduler.\n \"\"\"\n\n arg_2 = jobs.BaseJob\n arg_3 = {}\n arg_4 = timedelta(seconds=conf.getint('scheduler',\n 'scheduler_Func_check_threshold'\n ))\n\n arg_5 = None\n arg_3['metadatabase'] = {'status': 'Funcy'}\n try:\n arg_5 = arg_1.query(func.max(arg_2.latest_heartbeat)).\\\n filter(arg_2.state == 'running', arg_2.job_type == 'SchedulerJob').\\\n scalar()\n except Exception:\n arg_3['metadatabase']['status'] = 'unFuncy'\n\n if not arg_5:\n arg_6 = 'unFuncy'\n else:\n if timezone.utcnow() - arg_5 <= arg_4:\n arg_6 = 'Funcy'\n else:\n arg_6 = 'unFuncy'\n\n arg_3['scheduler'] = {'status': arg_6,\n 'latest_scheduler_heartbeat': str(arg_5)}\n\n return wwwutils.json_response(arg_3)"} +{"_id": "doc_367", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A restful endpoint that returns external links for a given Operator\n\n It queries the operator that sent the request for the links it wishes\n to provide for a given external link name.\n\n API: GET\n Args: dag_id: The id of the dag containing the task in question\n task_id: The id of the task in question\n execution_date: The date of execution of the task\n link_name: The name of the link reference to find the actual URL for\n\n Returns:\n 200: {url: , error: None} - returned when there was no problem\n finding the URL\n 404: {url: None, error: } - returned when the operator does\n not return a URL\n \"\"\"\n arg_1 = request.args.get('dag_id')\n arg_2 = request.args.get('task_id')\n arg_3 = request.args.get('execution_date')\n arg_4 = request.args.get('link_name')\n arg_5 = airflow.utils.timezone.parse(arg_3)\n arg_6 = dagbag.get_dag(arg_1)\n\n if not arg_6 or arg_2 not in arg_6.task_ids:\n arg_7 = jsonify(\n {'url': None,\n 'error': \"can't find dag {dag} or task_id {task_id}\".format(\n arg_6=arg_6,\n arg_2=arg_2\n )}\n )\n arg_7.status_code = 404\n return arg_7\n\n arg_9 = arg_6.get_task(arg_2)\n\n try:\n arg_10 = arg_9.get_Func(arg_5, arg_4)\n except ValueError as err:\n arg_7 = jsonify({'url': None, 'error': str(err)})\n arg_7.status_code = 404\n return arg_7\n if arg_10:\n arg_7 = jsonify({'error': None, 'url': arg_10})\n arg_7.status_code = 200\n return arg_7\n else:\n arg_7 = jsonify(\n {'url': None, 'error': 'No URL found for {dest}'.format(dest=arg_4)})\n arg_7.status_code = 404\n return arg_7"} +{"_id": "doc_368", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Opens a connection to the cloudant service and closes it automatically if used as context manager.\n\n .. note::\n In the connection form:\n - 'host' equals the 'Account' (optional)\n - 'login' equals the 'Username (or API Key)' (required)\n - 'password' equals the 'Password' (required)\n\n :return: an authorized cloudant session context manager object.\n :rtype: cloudant\n \"\"\"\n arg_1 = arg_0.Funcection(arg_0.cloudant_conn_id)\n\n arg_0._validate_connection(arg_1)\n\n arg_2 = cloudant(user=arg_1.login, passwd=arg_1.password, account=arg_1.host)\n\n return arg_2"} +{"_id": "doc_369", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Call the SlackWebhookHook to post the provided Slack message\n \"\"\"\n arg_0.hook = SlackWebhookHook(\n arg_0.http_conn_id,\n arg_0.webhook_token,\n arg_0.message,\n arg_0.attachments,\n arg_0.channel,\n arg_0.username,\n arg_0.icon_emoji,\n arg_0.link_names,\n arg_0.proxy\n )\n arg_0.hook.Func()"} +{"_id": "doc_370", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A list of states indicating that a task either has not completed\n a run or has not even started.\n \"\"\"\n return [\n arg_0.NONE,\n arg_0.SCHEDULED,\n arg_0.QUEUED,\n arg_0.RUNNING,\n arg_0.SHUTDOWN,\n arg_0.UP_FOR_RETRY,\n arg_0.UP_FOR_RESCHEDULE\n ]"} +{"_id": "doc_371", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Save model to a pickle located at `path`\"\"\"\n if arg_1 is None:\n arg_1 = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_variables(os.path.join(td, \"model\"))\n arg_2 = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arg_2, 'w') as zipf:\n for arg_3, arg_4, arg_5 in os.walk(td):\n for arg_6 in arg_5:\n arg_7 = os.path.join(arg_3, arg_6)\n if arg_7 != arg_2:\n zipf.write(arg_7, os.path.relpath(arg_7, td))\n with open(arg_2, \"rb\") as f:\n arg_8 = f.read()\n with open(arg_1, \"wb\") as f:\n cloudpickle.dump((arg_8, arg_0._act_params), f)"} +{"_id": "doc_372", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n CNN from Nature paper.\n \"\"\"\n arg_2 = tf.cast(arg_0, tf.float32) / 255.\n arg_3 = tf.nn.relu\n arg_4 = arg_3(conv(arg_2, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),\n **arg_1))\n arg_5 = arg_3(conv(arg_4, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **arg_1))\n arg_6 = arg_3(conv(arg_5, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **arg_1))\n arg_6 = conv_to_fc(arg_6)\n return arg_3(fc(arg_6, 'fc1', nh=512, init_scale=np.sqrt(2)))"} +{"_id": "doc_373", "title": "", "text": "def Func(arg_0=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **arg_1):\n '''\n convolutions-only net\n\n Parameters:\n ----------\n\n conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.\n\n Returns:\n\n function that takes tensorflow tensor as input and returns the output of the last convolutional layer\n\n '''\n\n def network_fn(arg_2):\n arg_3 = tf.cast(arg_2, tf.float32) / 255.\n with tf.variable_scope(\"convnet\"):\n for arg_4, arg_5, arg_6 in arg_0:\n arg_3 = layers.convolution2d(arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n activation_fn=tf.nn.relu,\n **arg_1)\n\n return arg_3\n return network_fn"} +{"_id": "doc_374", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None,\n arg_5=0,\n arg_6=1.0,\n arg_7=True,\n arg_8=None):\n \"\"\"\n Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.\n \"\"\"\n arg_4 = arg_4 or {}\n arg_9 = MPI.COMM_WORLD.Get_rank() if MPI else 0\n arg_3 = arg_3 + 10000 * arg_9 if arg_3 is not None else None\n arg_10 = logger.get_dir()\n def make_thunk(arg_11):\n return lambda: make_env(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_9=arg_9,\n subrank=arg_11,\n arg_3=arg_3,\n arg_6=arg_6,\n arg_8=arg_8,\n arg_7=arg_7,\n arg_4=arg_4,\n arg_10=arg_10\n )\n\n set_global_seeds(arg_3)\n if arg_2 > 1:\n return SubprocVecEnv([make_thunk(arg_12 + arg_5) for arg_12 in range(arg_2)])\n else:\n return DummyVecEnv([make_thunk(arg_5)])"} +{"_id": "doc_375", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='Ob'):\n '''\n Create placeholder to feed observations into of the size appropriate to the observation space\n\n Parameters:\n ----------\n\n ob_space: gym.Space observation space\n\n batch_size: int size of the batch to be fed into input. Can be left None in most cases.\n\n name: str name of the placeholder\n\n Returns:\n -------\n\n tensorflow placeholder tensor\n '''\n\n assert isinstance(arg_0, Discrete) or isinstance(arg_0, Box) or isinstance(arg_0, MultiDiscrete), \\\n 'Can only deal with Discrete and Box observation spaces for now'\n\n arg_3 = arg_0.dtype\n if arg_3 == np.int8:\n arg_3 = np.uint8\n\n return tf.placeholder(shape=(arg_1,) + arg_0.shape, arg_3=arg_3, arg_2=arg_2)"} +{"_id": "doc_376", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='Ob'):\n '''\n Create placeholder to feed observations into of the size appropriate to the observation space, and add input\n encoder of the appropriate type.\n '''\n\n arg_3 = observation_placeholder(arg_0, arg_1, arg_2)\n return arg_3, encode_observation(arg_0, arg_3)"} +{"_id": "doc_377", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Deep-copy an observation dict.\n \"\"\"\n return {arg_1: np.copy(arg_2) for arg_1, arg_2 in arg_0.items()}"} +{"_id": "doc_378", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7):\n \"\"\"\n Calculates Func targets\n\n :param R: Rewards\n :param D: Dones\n :param q_i: Q values for actions taken\n :param v: V values\n :param rho_i: Importance weight for each action\n :return: Q_retrace values\n \"\"\"\n arg_8 = batch_to_seq(tf.minimum(1.0, arg_4), arg_5, arg_6, True) # list of len steps, shape [nenvs]\n arg_9 = batch_to_seq(arg_0, arg_5, arg_6, True) # list of len steps, shape [nenvs]\n arg_10 = batch_to_seq(arg_1, arg_5, arg_6, True) # list of len steps, shape [nenvs]\n arg_11 = batch_to_seq(arg_2, arg_5, arg_6, True)\n arg_12 = batch_to_seq(arg_3, arg_5, arg_6 + 1, True)\n arg_13 = arg_12[-1]\n arg_14 = arg_13\n arg_15 = []\n for arg_16 in range(arg_6 - 1, -1, -1):\n check_shape([arg_14, arg_10[arg_16], arg_9[arg_16], arg_8[arg_16], arg_11[arg_16], arg_12[arg_16]], [[arg_5]] * 6)\n arg_14 = arg_9[arg_16] + arg_7 * arg_14 * (1.0 - arg_10[arg_16])\n arg_15.append(arg_14)\n arg_14 = (arg_8[arg_16] * (arg_14 - arg_11[arg_16])) + arg_12[arg_16]\n arg_15 = arg_15[::-1]\n arg_14 = seq_to_batch(arg_15, flat=True)\n return arg_14"} +{"_id": "doc_379", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"See Schedule.Func\"\"\"\n for (arg_2, arg_3), (arg_4, arg_5) in zip(arg_0._endpoints[:-1], arg_0._endpoints[1:]):\n if arg_2 <= arg_1 and arg_1 < arg_4:\n arg_6 = float(arg_1 - arg_2) / (arg_4 - arg_2)\n return arg_0._interpolation(arg_3, arg_5, arg_6)\n\n # t does not belong to any of the pieces, so doom.\n assert arg_0._outside_Func is not None\n return arg_0._outside_Func"} +{"_id": "doc_380", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Control a single environment instance using IPC and\n shared memory.\n \"\"\"\n def _write_obs(arg_7):\n arg_8 = obs_to_dict(arg_7)\n for arg_9 in arg_6:\n arg_10 = arg_3[arg_9].get_obj()\n arg_11 = np.frombuffer(arg_10, dtype=arg_5[arg_9]).reshape(arg_4[arg_9]) # pylint: disable=W0212\n np.copyto(arg_11, arg_8[arg_9])\n\n arg_12 = arg_2.x()\n arg_1.close()\n try:\n while True:\n arg_13, arg_14 = arg_0.recv()\n if arg_13 == 'reset':\n arg_0.send(_write_obs(arg_12.reset()))\n elif arg_13 == 'step':\n arg_15, arg_16, arg_17, arg_18 = arg_12.step(arg_14)\n if arg_17:\n arg_15 = arg_12.reset()\n arg_0.send((_write_obs(arg_15), arg_16, arg_17, arg_18))\n elif arg_13 == 'render':\n arg_0.send(arg_12.render(mode='rgb_array'))\n elif arg_13 == 'close':\n arg_0.send(None)\n break\n else:\n raise RuntimeError('Got unrecognized cmd %s' % arg_13)\n except KeyboardInterrupt:\n print('ShmemVecEnv worker: got KeyboardInterrupt')\n finally:\n arg_12.close()"} +{"_id": "doc_381", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=5,\n arg_4=arg_5(80e6),\n arg_6=0.5,\n arg_7=0.01,\n arg_8=0.5,\n arg_9=7e-4,\n arg_10='linear',\n arg_11=1e-5,\n arg_12=0.99,\n arg_13=0.99,\n arg_14=100,\n arg_15=None,\n **arg_16):\n\n '''\n Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.\n\n Parameters:\n -----------\n\n network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See baselines.common/policies.py/lstm for more details on using recurrent nets in policies\n\n\n env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)\n\n\n seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)\n\n nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel)\n\n total_timesteps: int, total number of timesteps to train on (default: 80M)\n\n vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)\n\n ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)\n\n max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)\n\n lr: float, Funcing rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)\n\n lrschedule: schedule of Funcing rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and\n returns fraction of the Funcing rate (specified as lr) as output\n\n epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)\n\n alpha: float, RMSProp decay parameter (default: 0.99)\n\n gamma: float, reward discounting parameter (default: 0.99)\n\n log_interval: int, specifies how frequently the logs are printed out (default: 100)\n\n **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.\n\n '''\n\n\n\n set_global_seeds(arg_2)\n\n # Get the nb of env\n arg_17 = arg_1.num_envs\n arg_18 = build_policy(arg_1, arg_0, **arg_16)\n\n # Instantiate the model object (that creates step_model and train_model)\n arg_19 = Model(arg_18=arg_18, arg_1=arg_1, arg_3=arg_3, arg_7=arg_7, arg_6=arg_6,\n arg_8=arg_8, arg_9=arg_9, arg_12=arg_12, arg_11=arg_11, arg_4=arg_4, arg_10=arg_10)\n if arg_15 is not None:\n arg_19.load(arg_15)\n\n # Instantiate the runner object\n arg_20 = Runner(arg_1, arg_19, arg_3=arg_3, arg_13=arg_13)\n arg_21 = deque(maxlen=100)\n\n # Calculate the batch_size\n arg_22 = arg_17*arg_3\n\n # Start total timer\n arg_23 = time.time()\n\n for arg_24 in range(1, arg_4//arg_22+1):\n # Get mini batch of experiences\n arg_25, arg_26, arg_27, arg_28, arg_29, arg_30, arg_31 = arg_20.run()\n arg_21.extend(arg_31)\n\n arg_32, arg_33, arg_34 = arg_19.train(arg_25, arg_26, arg_27, arg_28, arg_29, arg_30)\n arg_35 = time.time()-arg_23\n\n # Calculate the fps (frame per second)\n arg_36 = arg_5((arg_24*arg_22)/arg_35)\n if arg_24 % arg_14 == 0 or arg_24 == 1:\n # Calculates if value function is a good predicator of the returns (ev > 1)\n # or if it's just worse than predicting nothing (ev =< 0)\n arg_37 = explained_variance(arg_30, arg_27)\n logger.record_tabular(\"nupdates\", arg_24)\n logger.record_tabular(\"total_timesteps\", arg_24*arg_22)\n logger.record_tabular(\"fps\", arg_36)\n logger.record_tabular(\"policy_entropy\", float(arg_34))\n logger.record_tabular(\"value_loss\", float(arg_33))\n logger.record_tabular(\"explained_variance\", float(arg_37))\n logger.record_tabular(\"eprewmean\", safemean([arg_38['r'] for arg_38 in arg_21]))\n logger.record_tabular(\"eplenmean\", safemean([arg_38['l'] for arg_38 in arg_21]))\n logger.dump_tabular()\n return arg_19"} +{"_id": "doc_382", "title": "", "text": "def Func(arg_0):\n \"\"\"\n swap and then flatten axes 0 and 1\n \"\"\"\n arg_1 = arg_0.shape\n return arg_0.swapaxes(0, 1).reshape(arg_1[0] * arg_1[1], *arg_1[2:])"} +{"_id": "doc_383", "title": "", "text": "def Func(arg_0):\n \"\"\"Print the number of seconds in human readable format.\n\n Examples:\n 2 days\n 2 hours and 37 minutes\n less than a minute\n\n Paramters\n ---------\n seconds_left: int\n Number of seconds to be converted to the ETA\n Returns\n -------\n eta: str\n String representing the pretty ETA.\n \"\"\"\n arg_1 = arg_0 // 60\n arg_0 %= 60\n arg_2 = arg_1 // 60\n arg_1 %= 60\n arg_3 = arg_2 // 24\n arg_2 %= 24\n\n def helper(arg_4, arg_5):\n return \"{} {}{}\".format(str(arg_4), arg_5, ('s' if arg_4 > 1 else ''))\n\n if arg_3 > 0:\n arg_6 = helper(arg_3, 'day')\n if arg_2 > 0:\n arg_6 += ' and ' + helper(arg_2, 'hour')\n return arg_6\n if arg_2 > 0:\n arg_6 = helper(arg_2, 'hour')\n if arg_1 > 0:\n arg_6 += ' and ' + helper(arg_1, 'minute')\n return arg_6\n if arg_1 > 0:\n return helper(arg_1, 'minute')\n return 'less than a minute'"} +{"_id": "doc_384", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Add a boolean flag to argparse parser.\n\n Parameters\n ----------\n parser: argparse.Parser\n parser to add the flag to\n name: str\n -- will enable the flag, while --no- will disable it\n default: bool or None\n default value of the flag\n help: str\n help string for the flag\n \"\"\"\n arg_4 = arg_1.replace('-', '_')\n arg_0.add_argument(\"--\" + arg_1, action=\"store_true\", arg_2=arg_2, arg_4=arg_4, arg_3=arg_3)\n arg_0.add_argument(\"--no-\" + arg_1, action=\"store_false\", arg_4=arg_4)"} +{"_id": "doc_385", "title": "", "text": "def Func(arg_0):\n \"\"\"Stores provided method args as instance attributes.\n \"\"\"\n arg_1 = inspect.getfullargspec(arg_0)\n arg_2 = {}\n if arg_1.defaults is not None:\n arg_2 = dict(\n zip(arg_1.args[-len(arg_1.defaults):], arg_1.defaults))\n if arg_1.kwonlydefaults is not None:\n arg_2.update(arg_1.kwonlydefaults)\n arg_3 = arg_1.args[1:]\n\n @functools.wraps(arg_0)\n def wrapper(*arg_4, **arg_5):\n arg_6 = arg_4[0]\n # Get default arg values\n arg_7 = arg_2.copy()\n # Add provided arg values\n for arg_8, arg_9 in zip(arg_3, arg_4[1:]):\n arg_7[arg_8] = arg_9\n arg_7.update(arg_5)\n arg_6.__dict__.update(arg_7)\n return arg_0(*arg_4, **arg_5)\n\n return wrapper"} +{"_id": "doc_386", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Flattens a variables and their gradients.\n \"\"\"\n return tf.concat([tf.reshape(arg_3, [U.numel(arg_2)])\n for (arg_2, arg_3) in zip(arg_0, arg_1)], 0)"} +{"_id": "doc_387", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False, arg_4=\"\"):\n \"\"\"Creates a simple neural network\n \"\"\"\n for arg_5, arg_6 in enumerate(arg_1):\n arg_7 = tf.Func.relu if arg_5 < len(arg_1) - 1 else None\n arg_0 = tf.layers.dense(inputs=arg_0,\n units=arg_6,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n arg_2=arg_2,\n arg_4=arg_4 + '_' + str(arg_5))\n if arg_7:\n arg_0 = arg_7(arg_0)\n if arg_3:\n assert arg_1[-1] == 1\n arg_0 = tf.reshape(arg_0, [-1])\n return arg_0"} +{"_id": "doc_388", "title": "", "text": "def Func(arg_0, arg_1=[]):\n \"\"\"Re-launches the current script with workers\n Returns \"parent\" for original parent, \"child\" for MPI children\n \"\"\"\n if arg_0 <= 1:\n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n arg_2 = os.environ.copy()\n arg_2.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n # \"-bind-to core\" is crucial for good performance\n arg_3 = [\"mpirun\", \"-np\", str(arg_0)] + \\\n arg_1 + \\\n [sys.executable]\n\n arg_3 += sys.argv\n subprocess.check_call(arg_3, arg_2=arg_2)\n return \"parent\"\n else:\n install_mpi_excepthook()\n return \"child\""} +{"_id": "doc_389", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Get default session or create one with a given config\"\"\"\n arg_1 = tf.get_default_session()\n if arg_1 is None:\n arg_1 = make_session(arg_0=arg_0, make_default=True)\n return arg_1"} +{"_id": "doc_390", "title": "", "text": "def Func():\n \"\"\"Initialize all the unFuncd variables in the global scope.\"\"\"\n arg_0 = set(tf.global_variables()) - ALREADY_INITIALIZED\n get_session().run(tf.variables_Funcr(arg_0))\n ALREADY_INITIALIZED.update(arg_0)"} +{"_id": "doc_391", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n adjust shape of the data to the shape of the placeholder if possible.\n If shape is incompatible, AssertionError is thrown\n\n Parameters:\n placeholder tensorflow input placeholder\n\n data input data to be (potentially) reshaped to be fed into placeholder\n\n Returns:\n reshaped data\n '''\n\n if not isinstance(arg_1, np.ndarray) and not isinstance(arg_1, list):\n return arg_1\n if isinstance(arg_1, list):\n arg_1 = np.array(arg_1)\n\n arg_2 = [x or -1 for x in arg_0.shape.as_list()]\n\n assert _check_shape(arg_2, arg_1.shape), \\\n 'Shape of data {} is not compatible with shape of the placeholder {}'.format(arg_1.shape, arg_2)\n\n return np.reshape(arg_1, arg_2)"} +{"_id": "doc_392", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True, arg_3=False, arg_4=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if arg_1:\n arg_0 = EpisodicLifeEnv(arg_0)\n if 'FIRE' in arg_0.unwrapped.get_action_meanings():\n arg_0 = FireResetEnv(arg_0)\n arg_0 = WarpFrame(arg_0)\n if arg_4:\n arg_0 = ScaledFloatFrame(arg_0)\n if arg_2:\n arg_0 = ClipRewardEnv(arg_0)\n if arg_3:\n arg_0 = FrameStack(arg_0, 4)\n return arg_0"} +{"_id": "doc_393", "title": "", "text": "def Func():\n \"\"\"\n Count the GPUs on this machine.\n \"\"\"\n if shutil.which('nvidia-smi') is None:\n return 0\n arg_0 = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'])\n return max(0, len(arg_0.split(b'\\n')) - 2)"} +{"_id": "doc_394", "title": "", "text": "def Func():\n \"\"\"\n Set CUDA_VISIBLE_DEVICES to MPI rank if not already set\n \"\"\"\n if 'CUDA_VISIBLE_DEVICES' not in arg_3.environ:\n if sys.platform == 'darwin': # This Assumes if you're on OSX you're just\n arg_0 = [] # doing a smoke test and don't want GPUs\n else:\n arg_1, arg_2 = get_local_rank_size(MPI.COMM_WORLD)\n arg_0 = [arg_1]\n arg_3.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(map(str, arg_0))"} +{"_id": "doc_395", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Copies the file from rank 0 to all other ranks\n Puts it in the same place on all machines\n \"\"\"\n arg_2, arg_3 = get_local_rank_size(arg_0)\n if arg_0.Get_rank() == 0:\n with open(arg_1, 'rb') as fh:\n arg_4 = fh.read()\n arg_0.bcast(arg_4)\n else:\n arg_4 = arg_0.bcast(None)\n if arg_2 == 0:\n os.makedirs(os.path.dirname(arg_1), exist_ok=True)\n with open(arg_1, 'wb') as fh:\n fh.write(arg_4)\n arg_0.Barrier()"} +{"_id": "doc_396", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n computes Funced sums along 0th dimension of x.\n\n inputs\n ------\n x: ndarray\n gamma: float\n\n outputs\n -------\n y: ndarray with same shape as x, satisfying\n\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n \"\"\"\n assert arg_0.ndim >= 1\n return scipy.signal.lfilter([1],[1,-arg_1],arg_0[::-1], axis=0)[::-1]"} +{"_id": "doc_397", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"See ReplayBuffer.store_effect\"\"\"\n arg_3 = arg_0._next_idx\n super().Func(*arg_1, **arg_2)\n arg_0._it_sum[arg_3] = arg_0._max_priority ** arg_0._alpha\n arg_0._it_min[arg_3] = arg_0._max_priority ** arg_0._alpha"} +{"_id": "doc_398", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Update priorities of sampled transitions.\n\n sets priority of transition at index idxes[i] in buffer\n to priorities[i].\n\n Parameters\n ----------\n idxes: [int]\n List of idxes of sampled transitions\n priorities: [float]\n List of updated priorities corresponding to\n transitions at the sampled idxes denoted by\n variable `idxes`.\n \"\"\"\n assert len(arg_1) == len(arg_2)\n for arg_3, arg_4 in zip(arg_1, arg_2):\n assert arg_4 > 0\n assert 0 <= arg_3 < len(arg_0._storage)\n arg_0._it_sum[arg_3] = arg_4 ** arg_0._alpha\n arg_0._it_min[arg_3] = arg_4 ** arg_0._alpha\n\n arg_0._max_priority = max(arg_0._max_priority, arg_4)"} +{"_id": "doc_399", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=4):\n \"\"\"\n Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind\n \"\"\"\n arg_0 = WarpFrame(arg_0)\n arg_0 = ClipRewardEnv(arg_0)\n if arg_2 > 1:\n arg_0 = FrameStack(arg_0, arg_2)\n if arg_1:\n arg_0 = ScaledFloatFrame(arg_0)\n return arg_0"} +{"_id": "doc_400", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Creates a sample function that can be used for HER experience replay.\n\n Args:\n replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\n regular DDPG experience replay is used\n replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\n as many HER replays as regular replays are used)\n reward_fun (function): function to re-compute the reward with substituted goals\n \"\"\"\n if arg_0 == 'future':\n arg_3 = 1 - (1. / (1 + arg_1))\n else: # 'replay_strategy' == 'none'\n arg_3 = 0\n\n def _sample_her_transitions(arg_4, arg_5):\n \"\"\"episode_batch is {key: array(buffer_size x T x dim_key)}\n \"\"\"\n arg_6 = arg_4['u'].shape[1]\n arg_7 = arg_4['u'].shape[0]\n arg_8 = arg_5\n\n # Select which episodes and time steps to use.\n arg_9 = np.random.randint(0, arg_7, arg_8)\n arg_10 = np.random.randint(arg_6, size=arg_8)\n arg_11 = {arg_17: arg_4[arg_17][arg_9, arg_10].copy()\n for arg_17 in arg_4.keys()}\n\n # Select future time indexes proportional with probability future_p. These\n # will be used for HER replay by substituting in future goals.\n arg_12 = np.where(np.random.uniform(size=arg_8) < arg_3)\n arg_13 = np.random.uniform(size=arg_8) * (arg_6 - arg_10)\n arg_13 = arg_13.astype(int)\n arg_14 = (arg_10 + 1 + arg_13)[arg_12]\n\n # Replace goal with achieved goal but only for the previously-selected\n # HER transitions (as defined by her_indexes). For the other transitions,\n # keep the original goal.\n arg_15 = arg_4['ag'][arg_9[arg_12], arg_14]\n arg_11['g'][arg_12] = arg_15\n\n # Reconstruct info dictionary for reward computation.\n arg_16 = {}\n for arg_17, arg_18 in arg_11.items():\n if arg_17.startswith('info_'):\n arg_16[arg_17.replace('info_', '')] = arg_18\n\n # Re-compute reward since we may have substituted the goal.\n arg_20 = {k: arg_11[k] for k in ['ag_2', 'g']}\n arg_20['info'] = arg_16\n arg_11['r'] = arg_2(**arg_20)\n\n arg_11 = {k: arg_11[k].reshape(arg_8, *arg_11[k].shape[1:])\n for k in arg_11.keys()}\n\n assert(arg_11['u'].shape[0] == arg_5)\n\n return arg_11\n\n return _sample_her_transitions"} +{"_id": "doc_401", "title": "", "text": "def Func(arg_0, arg_1=1e-5):\n \"\"\"\n Estimate the geometric median of points in 2D.\n\n Code from https://stackoverflow.com/a/30305181\n\n Parameters\n ----------\n X : (N,2) ndarray\n Points in 2D. Second axis must be given in xy-form.\n\n eps : float, optional\n Distance threshold when to return the median.\n\n Returns\n -------\n (2,) ndarray\n Geometric median as xy-coordinate.\n\n \"\"\"\n arg_2 = np.mean(arg_0, 0)\n\n while True:\n arg_3 = scipy.spatial.distance.cdist(arg_0, [arg_2])\n arg_4 = (arg_3 != 0)[:, 0]\n\n arg_5 = 1 / arg_3[arg_4]\n arg_6 = np.sum(arg_5)\n arg_7 = arg_5 / arg_6\n arg_8 = np.sum(arg_7 * arg_0[arg_4], 0)\n\n arg_9 = len(arg_0) - np.sum(arg_4)\n if arg_9 == 0:\n arg_10 = arg_8\n elif arg_9 == len(arg_0):\n return arg_2\n else:\n arg_11 = (arg_8 - arg_2) * arg_6\n arg_12 = np.linalg.norm(arg_11)\n arg_13 = 0 if arg_12 == 0 else arg_9/arg_12\n arg_10 = max(0, 1-arg_13)*arg_8 + min(1, arg_13)*arg_2\n\n if scipy.spatial.distance.euclidean(arg_2, arg_10) < arg_1:\n return arg_10\n\n arg_2 = arg_10"} +{"_id": "doc_402", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Project the keypoint onto a new position on a new image.\n\n E.g. if the keypoint is on its original image at x=(10 of 100 pixels)\n and y=(20 of 100 pixels) and is Funced onto a new image with\n size (width=200, height=200), its new position will be (20, 40).\n\n This is intended for cases where the original image is resized.\n It cannot be used for more complex changes (e.g. padding, cropping).\n\n Parameters\n ----------\n from_shape : tuple of int\n Shape of the original image. (Before resize.)\n\n to_shape : tuple of int\n Shape of the new image. (After resize.)\n\n Returns\n -------\n imgaug.Keypoint\n Keypoint object with new coordinates.\n\n \"\"\"\n arg_3 = Func_coords([(arg_0.x, arg_0.y)], arg_1, arg_2)\n return arg_0.deepcopy(x=arg_3[0][0], y=arg_3[0][1])"} +{"_id": "doc_403", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=0):\n \"\"\"\n Move the keypoint around on an image.\n\n Parameters\n ----------\n x : number, optional\n Move by this value on the x axis.\n\n y : number, optional\n Move by this value on the y axis.\n\n Returns\n -------\n imgaug.Keypoint\n Keypoint object with new coordinates.\n\n \"\"\"\n return arg_0.deepcopy(arg_0.x + arg_1, arg_0.y + arg_2)"} +{"_id": "doc_404", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(0, 255, 0), arg_3=1.0, arg_4=3,\n arg_5=True, arg_6=False):\n \"\"\"\n Draw the keypoint onto a given image.\n\n The keypoint is drawn as a square.\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n The image onto which to draw the keypoint.\n\n color : int or list of int or tuple of int or (3,) ndarray, optional\n The RGB color of the keypoint. If a single int ``C``, then that is\n equivalent to ``(C,C,C)``.\n\n alpha : float, optional\n The opacity of the drawn keypoint, where ``1.0`` denotes a fully\n visible keypoint and ``0.0`` an invisible one.\n\n size : int, optional\n The size of the keypoint. If set to ``S``, each square will have\n size ``S x S``.\n\n copy : bool, optional\n Whether to copy the image before drawing the keypoint.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an exception if the keypoint is outside of the\n image.\n\n Returns\n -------\n image : (H,W,3) ndarray\n Image with drawn keypoint.\n\n \"\"\"\n if arg_5:\n arg_1 = np.copy(arg_1)\n\n if arg_1.ndim == 2:\n assert ia.is_single_number(arg_2), (\n \"Got a 2D image. Expected then 'color' to be a single number, \"\n \"but got %s.\" % (str(arg_2),))\n elif arg_1.ndim == 3 and ia.is_single_number(arg_2):\n arg_2 = [arg_2] * arg_1.shape[-1]\n\n arg_7 = arg_1.dtype\n arg_8 = arg_2\n if arg_3 < 0.01:\n # keypoint invisible, nothing to do\n return arg_1\n elif arg_3 > 0.99:\n arg_3 = 1\n else:\n arg_1 = arg_1.astype(np.float32, arg_5=False)\n arg_8 = arg_3 * np.array(arg_2)\n\n arg_9, arg_10 = arg_1.shape[0:2]\n\n arg_11, arg_12 = arg_0.y_int, arg_0.x_int\n\n arg_13 = max(arg_12 - arg_4//2, 0)\n arg_14 = min(arg_12 + 1 + arg_4//2, arg_10)\n arg_15 = max(arg_11 - arg_4//2, 0)\n arg_16 = min(arg_11 + 1 + arg_4//2, arg_9)\n\n arg_17, arg_18 = np.clip([arg_13, arg_14], 0, arg_10)\n arg_19, arg_20 = np.clip([arg_15, arg_16], 0, arg_9)\n\n arg_21 = (arg_17 < 0 or arg_17 >= arg_10)\n arg_22 = (arg_18 < 0 or arg_18 >= arg_10+1)\n arg_23 = (arg_19 < 0 or arg_19 >= arg_9)\n arg_24 = (arg_20 < 0 or arg_20 >= arg_9+1)\n arg_25 = (arg_21 and arg_22)\n arg_26 = (arg_23 and arg_24)\n arg_27 = (arg_18 - arg_17) < 1 # min size is 1px\n arg_28 = (arg_20 - arg_19) < 1\n if not arg_25 and not arg_26 and not arg_27 and not arg_28:\n if arg_3 == 1:\n arg_1[arg_19:arg_20, arg_17:arg_18] = arg_2\n else:\n arg_1[arg_19:arg_20, arg_17:arg_18] = (\n (1 - arg_3)\n * arg_1[arg_19:arg_20, arg_17:arg_18]\n + arg_8\n )\n else:\n if arg_6:\n raise Exception(\n \"Cannot draw keypoint x=%.8f, y=%.8f on image with \"\n \"shape %s.\" % (arg_11, arg_12, arg_1.shape))\n\n if arg_1.dtype.name != arg_7.name:\n if arg_7.name == \"uint8\":\n arg_1 = np.clip(arg_1, 0, 255, out=arg_1)\n arg_1 = arg_1.astype(arg_7, arg_5=False)\n return arg_1"} +{"_id": "doc_405", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a shallow Func of the Keypoint object.\n\n Parameters\n ----------\n x : None or number, optional\n Coordinate of the keypoint on the x axis.\n If ``None``, the instance's value will be copied.\n\n y : None or number, optional\n Coordinate of the keypoint on the y axis.\n If ``None``, the instance's value will be copied.\n\n Returns\n -------\n imgaug.Keypoint\n Shallow Func.\n\n \"\"\"\n return arg_0.deepFunc(arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_406", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a deep copy of the Keypoint object.\n\n Parameters\n ----------\n x : None or number, optional\n Coordinate of the keypoint on the x axis.\n If ``None``, the instance's value will be copied.\n\n y : None or number, optional\n Coordinate of the keypoint on the y axis.\n If ``None``, the instance's value will be copied.\n\n Returns\n -------\n imgaug.Keypoint\n Deep copy.\n\n \"\"\"\n arg_1 = arg_0.x if arg_1 is None else arg_1\n arg_2 = arg_0.y if arg_2 is None else arg_2\n return Keypoint(arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_407", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Project keypoints from Funce image to a new Funce.\n\n Parameters\n ----------\n image : ndarray or tuple of int\n New image Functo which the keypoints are to be projected.\n May also simply be that new image's shape tuple.\n\n Returns\n -------\n keypoints : imgaug.KeypointsOnImage\n Object cFunctaining all projected keypoints.\n\n \"\"\"\n arg_2 = normalize_shape(arg_1)\n if arg_2[0:2] == arg_0.shape[0:2]:\n return arg_0.deepcopy()\n else:\n arg_3 = [kp.project(arg_0.shape, arg_2) for kp in arg_0.keypoints]\n return arg_0.deepcopy(arg_3, arg_2)"} +{"_id": "doc_408", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=0):\n \"\"\"\n Move the keypoints around on an image.\n\n Parameters\n ----------\n x : number, optional\n Move each keypoint by this value on the x axis.\n\n y : number, optional\n Move each keypoint by this value on the y axis.\n\n Returns\n -------\n out : KeypointsOnImage\n Keypoints after moving them.\n\n \"\"\"\n arg_3 = [keypoint.Func(arg_1=arg_1, arg_2=arg_2) for keypoint in arg_0.keypoints]\n return arg_0.deepcopy(arg_3)"} +{"_id": "doc_409", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a shallow Func of the KeypointsOnImage object.\n\n Parameters\n ----------\n keypoints : None or list of imgaug.Keypoint, optional\n List of keypoints on the image. If ``None``, the instance's\n keypoints will be copied.\n\n shape : tuple of int, optional\n The shape of the image on which the keypoints are placed.\n If ``None``, the instance's shape will be copied.\n\n Returns\n -------\n imgaug.KeypointsOnImage\n Shallow Func.\n\n \"\"\"\n arg_3 = Func.Func(arg_0)\n if arg_1 is not None:\n arg_3.keypoints = arg_1\n if arg_2 is not None:\n arg_3.shape = arg_2\n return arg_3"} +{"_id": "doc_410", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Compute the Func bounding box of this bounding box and another one.\n\n Note that in extreme cases, the Func can be a single point, meaning that the Func bounding box\n will exist, but then also has a height and width of zero.\n\n Parameters\n ----------\n other : imgaug.BoundingBox\n Other bounding box with which to generate the Func.\n\n default : any, optional\n Default value to return if there is no Func.\n\n Returns\n -------\n imgaug.BoundingBox or any\n Intersection bounding box of the two bounding boxes if there is an Func.\n If there is no Func, the default value will be returned, which can by anything.\n\n \"\"\"\n arg_3 = max(arg_0.x1, arg_1.x1)\n arg_4 = max(arg_0.y1, arg_1.y1)\n arg_5 = min(arg_0.x2, arg_1.x2)\n arg_6 = min(arg_0.y2, arg_1.y2)\n if arg_3 > arg_5 or arg_4 > arg_6:\n return arg_2\n else:\n return BoundingBox(x1=arg_3, y1=arg_4, x2=arg_5, y2=arg_6)"} +{"_id": "doc_411", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute the Func bounding box of this bounding box and another one.\n\n This is equivalent to drawing a bounding box around all corners points of both\n bounding boxes.\n\n Parameters\n ----------\n other : imgaug.BoundingBox\n Other bounding box with which to generate the Func.\n\n Returns\n -------\n imgaug.BoundingBox\n Union bounding box of the two bounding boxes.\n\n \"\"\"\n return BoundingBox(\n x1=min(arg_0.x1, arg_1.x1),\n y1=min(arg_0.y1, arg_1.y1),\n x2=max(arg_0.x2, arg_1.x2),\n y2=max(arg_0.y2, arg_1.y2),\n )"} +{"_id": "doc_412", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Estimate whether the bounding box is at least partially inside the image area.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use.\n If an ndarray, its shape will be used.\n If a tuple, it is assumed to represent the image shape\n and must contain at least two integers.\n\n Returns\n -------\n bool\n True if the bounding box is at least partially inside the image area. False otherwise.\n\n \"\"\"\n arg_2 = normalize_shape(arg_1)\n arg_3, arg_4 = arg_2[0:2]\n arg_5 = np.finfo(np.float32).eps\n arg_6 = BoundingBox(x1=0, x2=arg_4-arg_5, y1=0, y2=arg_3-arg_5)\n return arg_0.intersection(arg_6) is not None"} +{"_id": "doc_413", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=False):\n \"\"\"\n Estimate whether the bounding box is partially or fully outside of the image area.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is\n assumed to represent the image shape and must contain at least two integers.\n\n fully : bool, optional\n Whether to return True if the bounding box is fully outside fo the image area.\n\n partly : bool, optional\n Whether to return True if the bounding box is at least partially outside fo the\n image area.\n\n Returns\n -------\n bool\n True if the bounding box is partially/fully outside of the image area, depending\n on defined parameters. False otherwise.\n\n \"\"\"\n if arg_0.is_fully_within_image(arg_1):\n return False\n elif arg_0.is_partly_within_image(arg_1):\n return arg_3\n else:\n return arg_2"} +{"_id": "doc_414", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Clip off all parts of the bounding box that are outside of the image.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use for the clipping of the bounding box.\n If an ndarray, its shape will be used.\n If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\n Returns\n -------\n result : imgaug.BoundingBox\n Bounding box, clipped to fall within the image dimensions.\n\n \"\"\"\n arg_2 = normalize_shape(arg_1)\n\n arg_3, arg_4 = arg_2[0:2]\n ia.do_assert(arg_3 > 0)\n ia.do_assert(arg_4 > 0)\n\n arg_5 = np.finfo(np.float32).eps\n arg_6 = np.clip(arg_0.x1, 0, arg_4 - arg_5)\n arg_7 = np.clip(arg_0.x2, 0, arg_4 - arg_5)\n arg_8 = np.clip(arg_0.y1, 0, arg_3 - arg_5)\n arg_9 = np.clip(arg_0.y2, 0, arg_3 - arg_5)\n\n return arg_0.copy(\n arg_6=arg_6,\n arg_8=arg_8,\n arg_7=arg_7,\n arg_9=arg_9,\n label=arg_0.label\n )"} +{"_id": "doc_415", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(0, 255, 0), arg_3=1.0, arg_4=1,\n arg_5=True, arg_6=False, arg_7=None):\n \"\"\"\n Draw the bounding box on an image.\n\n Parameters\n ----------\n image : (H,W,C) ndarray(uint8)\n The image onto which to draw the bounding box.\n\n color : iterable of int, optional\n The color to use, corresponding to the channel layout of the image. Usually RGB.\n\n alpha : float, optional\n The transparency of the drawn bounding box, where 1.0 denotes no transparency and\n 0.0 is invisible.\n\n size : int, optional\n The thickness of the bounding box in pixels. If the value is larger than 1, then\n additional pixels will be added around the bounding box (i.e. extension towards the\n outside).\n\n copy : bool, optional\n Whether to copy the input image or change it in-place.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the bounding box is fully outside of the\n image. If set to False, no error will be raised and only the parts inside the image\n will be drawn.\n\n thickness : None or int, optional\n Deprecated.\n\n Returns\n -------\n result : (H,W,C) ndarray(uint8)\n Image with bounding box drawn on it.\n\n \"\"\"\n if arg_7 is not None:\n ia.warn_deprecated(\n \"Usage of argument 'thickness' in BoundingBox.Func() \"\n \"is deprecated. The argument was renamed to 'size'.\"\n )\n arg_4 = arg_7\n\n if arg_6 and arg_0.is_out_of_image(arg_1):\n raise Exception(\"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s.\" % (\n arg_0.x1, arg_0.y1, arg_0.x2, arg_0.y2, arg_1.shape))\n\n arg_8 = np.copy(arg_1) if arg_5 else arg_1\n\n if isinstance(arg_2, (tuple, list)):\n arg_2 = np.uint8(arg_2)\n\n for arg_9 in range(arg_4):\n arg_10, arg_11, arg_12, arg_13 = arg_0.y1_int, arg_0.y2_int, arg_0.x1_int, arg_0.x2_int\n\n # When y values get into the range (H-0.5, H), the *_int functions round them to H.\n # That is technically sensible, but in the case of drawing means that the border lies\n # just barely outside of the image, making the border disappear, even though the BB\n # is fully inside the image. Here we correct for that because of beauty reasons.\n # Same is the case for x coordinates.\n if arg_0.is_fully_within_image(arg_1):\n arg_10 = np.clip(arg_10, 0, arg_1.shape[0]-1)\n arg_11 = np.clip(arg_11, 0, arg_1.shape[0]-1)\n arg_12 = np.clip(arg_12, 0, arg_1.shape[1]-1)\n arg_13 = np.clip(arg_13, 0, arg_1.shape[1]-1)\n\n arg_14 = [arg_10-arg_9, arg_10-arg_9, arg_11+arg_9, arg_11+arg_9]\n arg_15 = [arg_12-arg_9, arg_13+arg_9, arg_13+arg_9, arg_12-arg_9]\n arg_16, arg_17 = skimage.draw.polygon_perimeter(arg_14, arg_15, shape=arg_8.shape)\n if arg_3 >= 0.99:\n arg_8[arg_16, arg_17, :] = arg_2\n else:\n if ia.is_float_array(arg_8):\n arg_8[arg_16, arg_17, :] = (1 - arg_3) * arg_8[arg_16, arg_17, :] + arg_3 * arg_2\n arg_8 = np.clip(arg_8, 0, 255)\n else:\n arg_18 = arg_8.dtype\n arg_8 = arg_8.astype(np.float32)\n arg_8[arg_16, arg_17, :] = (1 - arg_3) * arg_8[arg_16, arg_17, :] + arg_3 * arg_2\n arg_8 = np.clip(arg_8, 0, 255).astype(arg_18)\n\n return arg_8"} +{"_id": "doc_416", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=None,\n arg_4=True):\n \"\"\"\n Extract the image pixels within the bounding box.\n\n This function will zero-pad the image if the bounding box is partially/fully outside of\n the image.\n\n Parameters\n ----------\n image : (H,W) ndarray or (H,W,C) ndarray\n The image from which to extract the pixels within the bounding box.\n\n pad : bool, optional\n Whether to zero-pad the image if the object is partially/fully\n outside of it.\n\n pad_max : None or int, optional\n The maximum number of pixels that may be zero-paded on any side,\n i.e. if this has value ``N`` the total maximum of added pixels\n is ``4*N``.\n This option exists to prevent extremely large images as a result of\n single points being moved very far away during augmentation.\n\n prevent_zero_size : bool, optional\n Whether to prevent height or width of the extracted image from becoming zero.\n If this is set to True and height or width of the bounding box is below 1, the height/width will\n be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.\n If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or\n ``W`` potentially being 0.\n\n Returns\n -------\n image : (H',W') ndarray or (H',W',C) ndarray\n Pixels within the bounding box. Zero-padded if the bounding box is partially/fully\n outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``\n and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.\n\n \"\"\"\n arg_5 = 0\n arg_6 = 0\n arg_7 = 0\n arg_8 = 0\n\n arg_9, arg_10 = arg_1.shape[0], arg_1.shape[1]\n arg_11, arg_12, arg_13, arg_14 = arg_0.x1_int, arg_0.x2_int, arg_0.y1_int, arg_0.y2_int\n\n # When y values get into the range (H-0.5, H), the *_int functions round them to H.\n # That is technically sensible, but in the case of extraction leads to a black border,\n # which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for\n # that because of beauty reasons.\n # Same is the case for x coordinates.\n arg_15 = arg_0.is_fully_within_image(arg_1)\n if arg_15:\n arg_13, arg_14 = np.clip([arg_13, arg_14], 0, arg_9-1)\n arg_11, arg_12 = np.clip([arg_11, arg_12], 0, arg_10-1)\n\n # TODO add test\n if arg_4:\n if abs(arg_12 - arg_11) < 1:\n arg_12 = arg_11 + 1\n if abs(arg_14 - arg_13) < 1:\n arg_14 = arg_13 + 1\n\n if arg_2:\n # if the bb is outside of the image area, the following pads the image\n # first with black pixels until the bb is inside the image\n # and only then extracts the image area\n # TODO probably more efficient to initialize an array of zeros\n # and copy only the portions of the bb into that array that are\n # natively inside the image area\n if arg_11 < 0:\n arg_8 = abs(arg_11)\n arg_12 = arg_12 + arg_8\n arg_10 = arg_10 + arg_8\n arg_11 = 0\n if arg_13 < 0:\n arg_5 = abs(arg_13)\n arg_14 = arg_14 + arg_5\n arg_9 = arg_9 + arg_5\n arg_13 = 0\n if arg_12 >= arg_10:\n arg_6 = arg_12 - arg_10\n if arg_14 >= arg_9:\n arg_7 = arg_14 - arg_9\n\n arg_16 = [arg_5, arg_6, arg_7, arg_8]\n arg_17 = any([val > 0 for val in arg_16])\n if arg_17:\n if arg_3 is None:\n arg_3 = max(arg_16)\n\n arg_1 = ia.pad(\n arg_1,\n top=min(arg_5, arg_3),\n right=min(arg_6, arg_3),\n bottom=min(arg_7, arg_3),\n left=min(arg_8, arg_3)\n )\n return arg_1[arg_13:arg_14, arg_11:arg_12]\n else:\n arg_18 = (\n (0, 0, 0, 0)\n <= (arg_11, arg_13, arg_12, arg_14)\n < (arg_10, arg_9, arg_10, arg_9)\n )\n arg_19, arg_20 = (arg_14 - arg_13), (arg_12 - arg_11)\n arg_21 = (arg_19 > 0)\n arg_22 = (arg_20 > 0)\n if arg_18 and arg_21 and arg_22:\n return arg_1[arg_13:arg_14, arg_11:arg_12]\n if arg_4:\n arg_19 = 1\n arg_20 = 1\n else:\n arg_19 = 0\n arg_20 = 0\n if arg_1.ndim == 2:\n return np.zeros((arg_19, arg_20), dtype=arg_1.dtype)\n return np.zeros((arg_19, arg_20, arg_1.shape[-1]),\n dtype=arg_1.dtype)"} +{"_id": "doc_417", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False):\n \"\"\"\n Remove all bounding boxes that are fully or partially outside of the image.\n\n Parameters\n ----------\n fully : bool, optional\n Whether to remove bounding boxes that are fully outside of the image.\n\n partly : bool, optional\n Whether to remove bounding boxes that are partially outside of the image.\n\n Returns\n -------\n imgaug.BoundingBoxesOnImage\n Reduced set of bounding boxes, with those that were fully/partially outside of\n the image removed.\n\n \"\"\"\n arg_3 = [bb for bb in arg_0.bounding_boxes\n if not bb.is_out_of_image(arg_0.shape, arg_1=arg_1, arg_2=arg_2)]\n return BoundingBoxesOnImage(arg_3, shape=arg_0.shape)"} +{"_id": "doc_418", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clip off all parts from all bounding boxes that are outside of the image.\n\n Returns\n -------\n imgaug.BoundingBoxesOnImage\n Bounding boxes, clipped to fall within the image dimensions.\n\n \"\"\"\n arg_1 = [bb.Func(arg_0.shape)\n for bb in arg_0.bounding_boxes if bb.is_partly_within_image(arg_0.shape)]\n return BoundingBoxesOnImage(arg_1, shape=arg_0.shape)"} +{"_id": "doc_419", "title": "", "text": "def Func(arg_0=0, arg_1=1, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Augmenter that embosses images and overlays the result with the original\n image.\n\n The embossed version pronounces highlights and shadows,\n letting the image look as if it was recreated on a metal plate (\"embossed\").\n\n dtype support::\n\n See ``imgaug.augmenters.convolutional.Convolve``.\n\n Parameters\n ----------\n alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Visibility of the sharpened image. At 0, only the original image is\n visible, at 1.0 only its sharpened version is visible.\n\n * If an int or float, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Parameter that controls the strength of the embossing.\n Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard\n embossing effect. Default value is 1.\n\n * If an int or float, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = Func(alpha=(0.0, 1.0), strength=(0.5, 1.5))\n\n embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``\n and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``\n over the old image.\n\n \"\"\"\n arg_5 = iap.handle_continuous_param(arg_0, \"alpha\", value_range=(0, 1.0), tuple_to_uniform=True,\n list_to_choice=True)\n arg_6 = iap.handle_continuous_param(arg_1, \"strength\", value_range=(0, None), tuple_to_uniform=True,\n list_to_choice=True)\n\n def create_matrices(arg_7, arg_8, arg_9):\n arg_10 = arg_5.draw_sample(arg_4=arg_9)\n ia.do_assert(0 <= arg_10 <= 1.0)\n arg_11 = arg_6.draw_sample(arg_4=arg_9)\n arg_12 = np.array([\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]\n ], dtype=np.float32)\n arg_13 = np.array([\n [-1-arg_11, 0-arg_11, 0],\n [0-arg_11, 1, 0+arg_11],\n [0, 0+arg_11, 1+arg_11]\n ], dtype=np.float32)\n arg_14 = (1-arg_10) * arg_12 + arg_10 * arg_13\n return [arg_14] * arg_8\n\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return Convolve(create_matrices, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_420", "title": "", "text": "def Func(arg_0=0, arg_1=(0.0, 1.0), arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Augmenter that detects edges that have certain directions and marks them\n in a black and white image and then overlays the result with the original\n image.\n\n dtype support::\n\n See ``imgaug.augmenters.convolutional.Convolve``.\n\n Parameters\n ----------\n alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Visibility of the sharpened image. At 0, only the original image is\n visible, at 1.0 only its sharpened version is visible.\n\n * If an int or float, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Angle of edges to pronounce, where 0 represents 0 degrees and 1.0\n represents 360 degrees (both clockwise, starting at the top).\n Default value is ``(0.0, 1.0)``, i.e. pick a random angle per image.\n\n * If an int or float, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = Func(alpha=1.0, direction=0)\n\n turns input images into edge images in which edges are detected from\n top side of the image (i.e. the top sides of horizontal edges are\n added to the output).\n\n >>> aug = Func(alpha=1.0, direction=90/360)\n\n same as before, but detecting edges from the right (right side of each\n vertical edge).\n\n >>> aug = Func(alpha=1.0, direction=(0.0, 1.0))\n\n same as before, but detecting edges from a variable direction (anything\n between 0 and 1.0, i.e. 0 degrees and 360 degrees, starting from the\n top and moving clockwise).\n\n >>> aug = Func(alpha=(0.0, 0.3), direction=0)\n\n generates edge images (edges detected from the top) and overlays them\n with the input images by a variable amount between 0 and 30 percent\n (e.g. for 0.3 then ``0.7*old_image + 0.3*edge_image``).\n\n \"\"\"\n arg_5 = iap.handle_continuous_param(arg_0, \"alpha\", value_range=(0, 1.0), tuple_to_uniform=True,\n list_to_choice=True)\n arg_6 = iap.handle_continuous_param(arg_1, \"direction\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True)\n\n def create_matrices(arg_7, arg_8, arg_9):\n arg_10 = arg_5.draw_sample(arg_4=arg_9)\n ia.do_assert(0 <= arg_10 <= 1.0)\n arg_11 = arg_6.draw_sample(arg_4=arg_9)\n\n arg_12 = int(arg_11 * 360) % 360\n arg_13 = np.deg2rad(arg_12)\n arg_14 = np.cos(arg_13 - 0.5*np.pi)\n arg_15 = np.sin(arg_13 - 0.5*np.pi)\n arg_16 = np.array([arg_14, arg_15])\n\n arg_17 = np.array([\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]\n ], dtype=np.float32)\n for arg_14 in [-1, 0, 1]:\n for arg_15 in [-1, 0, 1]:\n if (arg_14, arg_15) != (0, 0):\n arg_18 = np.array([arg_14, arg_15])\n arg_19 = np.rad2deg(ia.angle_between_vectors(arg_18, arg_16))\n arg_20 = arg_19 / 180\n arg_21 = (1 - arg_20)**4\n arg_17[arg_15+1, arg_14+1] = arg_21\n arg_17 = arg_17 / np.sum(arg_17)\n arg_17 = arg_17 * (-1)\n arg_17[1, 1] = 1\n\n arg_22 = np.array([\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]\n ], dtype=np.float32)\n\n arg_23 = (1-arg_10) * arg_22 + arg_10 * arg_17\n\n return [arg_23] * arg_8\n\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return Convolve(create_matrices, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_421", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Normalize a shape tuple or array to a shape tuple.\n\n Parameters\n ----------\n shape : tuple of int or ndarray\n The input to normalize. May optionally be an array.\n\n Returns\n -------\n tuple of int\n Shape tuple.\n\n \"\"\"\n if isinstance(arg_0, tuple):\n return arg_0\n assert ia.is_np_array(arg_0), (\n \"Expected tuple of ints or array, got %s.\" % (type(arg_0),))\n return arg_0.shape"} +{"_id": "doc_422", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Project coordinates from one image shape to another.\n\n This performs a relative projection, e.g. a point at 60% of the old\n image width will be at 60% of the new image width after projection.\n\n Parameters\n ----------\n coords : ndarray or tuple of number\n Coordinates to project. Either a ``(N,2)`` numpy array or a tuple\n of `(x,y)` coordinates.\n\n from_shape : tuple of int or ndarray\n Old image shape.\n\n to_shape : tuple of int or ndarray\n New image shape.\n\n Returns\n -------\n ndarray\n Projected coordinates as ``(N,2)`` ``float32`` numpy array.\n\n \"\"\"\n arg_1 = normalize_shape(arg_1)\n arg_2 = normalize_shape(arg_2)\n if arg_1[0:2] == arg_2[0:2]:\n return arg_0\n\n arg_3, arg_4 = arg_1[0:2]\n arg_5, arg_6 = arg_2[0:2]\n assert all([arg_7 > 0 for arg_7 in [arg_3, arg_4, arg_5, arg_6]])\n\n # make sure to not just call np.float32(coords) here as the following lines\n # perform in-place changes and np.float32(.) only copies if the input\n # was *not* a float32 array\n arg_8 = np.array(arg_0).astype(np.float32)\n arg_8[:, 0] = (arg_8[:, 0] / arg_4) * arg_6\n arg_8[:, 1] = (arg_8[:, 1] / arg_3) * arg_5\n return arg_8"} +{"_id": "doc_423", "title": "", "text": "def Func(arg_0=0, arg_1=False, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Create an augmenter to add poisson noise to images.\n\n Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from\n a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers,\n the sign of the sampled values are here randomly flipped.\n\n Values of around ``10.0`` for `lam` lead to visible noise (for uint8).\n Values of around ``20.0`` for `lam` lead to very visible noise (for uint8).\n It is recommended to usually set `per_channel` to True.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.AddElementwise``.\n\n Parameters\n ----------\n lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``.\n\n * If a number, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n per_channel : bool or float, optional\n Whether to use the same noise value per pixel for all channels (False)\n or to sample a new value for each channel (True).\n If this value is a float ``p``, then for ``p`` percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(lam=5.0)\n\n Adds poisson noise sampled from ``Poisson(5.0)`` to images.\n\n >>> aug = iaa.Func(lam=(0.0, 10.0))\n\n Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the\n interval ``[0.0, 10.0]``.\n\n >>> aug = iaa.Func(lam=5.0, per_channel=True)\n\n Adds poisson noise sampled from ``Poisson(5.0)`` to images,\n where the values are different per pixel *and* channel (e.g. a\n different one for red, green and blue channels for the same pixel).\n\n >>> aug = iaa.Func(lam=(0.0, 10.0), per_channel=True)\n\n Adds poisson noise sampled from ``Poisson(x)`` to images,\n with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel.\n This is the *recommended* configuration.\n\n >>> aug = iaa.Func(lam=2, per_channel=0.5)\n\n Adds poisson noise sampled from the distribution ``Poisson(2)`` to images,\n where the values are sometimes (50 percent of all cases) the same\n per pixel for all channels and sometimes different (other 50 percent).\n\n \"\"\"\n arg_5 = iap.handle_continuous_param(arg_0, \"lam\", value_range=(0, None), tuple_to_uniform=True,\n list_to_choice=True)\n\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return AddElementwise(iap.RandomSign(iap.Poisson(arg_0=arg_5)), arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_424", "title": "", "text": "def Func(arg_0=0, arg_1=False, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Augmenter that sets a certain fraction of pixels in images to zero.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.\n\n Parameters\n ----------\n p : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n The probability of any pixel being dropped (i.e. set to zero).\n\n * If a float, then that value will be used for all images. A value\n of 1.0 would mean that all pixels will be dropped and 0.0 that\n no pixels would be dropped. A value of 0.05 corresponds to 5\n percent of all pixels dropped.\n * If a tuple ``(a, b)``, then a value p will be sampled from the\n range ``a <= p <= b`` per image and be used as the pixel's dropout\n probability.\n * If a StochasticParameter, then this parameter will be used to\n determine per pixel whether it should be dropped (sampled value\n of 0) or shouldn't (sampled value of 1).\n If you instead want to provide the probability as a stochastic\n parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``\n to convert parameter `p` to a 0/1 representation.\n\n per_channel : bool or float, optional\n Whether to use the same value (is dropped / is not dropped)\n for all channels of a pixel (False) or to sample a new value for each\n channel (True).\n If this value is a float p, then for p percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(0.02)\n\n drops 2 percent of all pixels.\n\n >>> aug = iaa.Func((0.0, 0.05))\n\n drops in each image a random fraction of all pixels, where the fraction\n is in the range ``0.0 <= x <= 0.05``.\n\n >>> aug = iaa.Func(0.02, per_channel=True)\n\n drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely\n for any pixel to have all channels set to zero (black pixels).\n\n >>> aug = iaa.Func(0.02, per_channel=0.5)\n\n same as previous example, but the `per_channel` feature is only active\n for 50 percent of all images.\n\n \"\"\"\n if ia.is_single_number(arg_0):\n arg_5 = iap.Binomial(1 - arg_0)\n elif ia.is_iterable(arg_0):\n ia.do_assert(len(arg_0) == 2)\n ia.do_assert(arg_0[0] < arg_0[1])\n ia.do_assert(0 <= arg_0[0] <= 1.0)\n ia.do_assert(0 <= arg_0[1] <= 1.0)\n arg_5 = iap.Binomial(iap.Uniform(1 - arg_0[1], 1 - arg_0[0]))\n elif isinstance(arg_0, iap.StochasticParameter):\n arg_5 = arg_0\n else:\n raise Exception(\"Expected p to be float or int or StochasticParameter, got %s.\" % (type(arg_0),))\n\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return MultiplyElementwise(arg_5, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_425", "title": "", "text": "def Func(arg_0=0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"\n Creates an augmenter to apply impulse noise to an image.\n\n This is identical to ``SaltAndPepper``, except that per_channel is always set to True.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.SaltAndPepper``.\n\n \"\"\"\n return SaltAndPepper(arg_0=arg_0, per_channel=True, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_426", "title": "", "text": "def Func(arg_0=0, arg_1=False, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\n Parameters\n ----------\n p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n Probability of changing a pixel to salt/pepper noise.\n\n * If a float, then that value will be used for all images as the\n probability.\n * If a tuple ``(a, b)``, then a probability will be sampled per image\n from the range ``a <= x <= b``.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, then this parameter will be used as\n the *mask*, i.e. it is expected to contain values between\n 0.0 and 1.0, where 1.0 means that salt/pepper is to be added\n at that location.\n\n per_channel : bool or float, optional\n Whether to use the same value for all channels (False)\n or to sample a new value for each channel (True).\n If this value is a float ``p``, then for ``p`` percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(0.05)\n\n Replaces 5 percent of all pixels with salt/pepper.\n\n \"\"\"\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return ReplaceElementwise(\n mask=arg_0,\n replacement=iap.Beta(0.5, 0.5) * 255,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4\n )"} +{"_id": "doc_427", "title": "", "text": "def Func(arg_0=0, arg_1=False, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Adds pepper noise to an image, i.e. black-ish pixels.\n\n This is similar to dropout, but slower and the black pixels are not uniformly black.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\n Parameters\n ----------\n p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n Probability of changing a pixel to pepper noise.\n\n * If a float, then that value will be used for all images as the\n probability.\n * If a tuple ``(a, b)``, then a probability will be sampled per image\n from the range ``a <= x <= b``.\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, then this parameter will be used as\n the *mask*, i.e. it is expected to contain values between\n 0.0 and 1.0, where 1.0 means that pepper is to be added\n at that location.\n\n per_channel : bool or float, optional\n Whether to use the same value for all channels (False)\n or to sample a new value for each channel (True).\n If this value is a float ``p``, then for ``p`` percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(0.05)\n\n Replaces 5 percent of all pixels with pepper.\n\n \"\"\"\n\n arg_5 = iap.ForceSign(\n iap.Beta(0.5, 0.5) - 0.5,\n positive=False,\n mode=\"invert\"\n ) + 0.5\n arg_6 = arg_5 * 255\n\n if arg_2 is None:\n arg_2 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return ReplaceElementwise(\n mask=arg_0,\n arg_6=arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4\n )"} +{"_id": "doc_428", "title": "", "text": "def Func(arg_0=0, arg_1=None, arg_2=None, arg_3=False, arg_4=4, arg_5=None, arg_6=False,\n arg_7=None):\n \"\"\"\n Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.\n\n dtype support::\n\n See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\n Parameters\n ----------\n p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n Probability of changing a pixel to pepper noise.\n\n * If a float, then that value will be used for all images as the\n probability.\n * If a tuple ``(a, b)``, then a probability will be sampled per image\n from the range ``a <= x <= b.``\n * If a list, then a random value will be sampled from that list\n per image.\n * If a StochasticParameter, then this parameter will be used as\n the *mask*, i.e. it is expected to contain values between\n 0.0 and 1.0, where 1.0 means that pepper is to be added\n at that location.\n\n size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional\n The size of the lower resolution image from which to sample the noise\n mask in absolute pixel dimensions.\n\n * If an integer, then that size will be used for both height and\n width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then\n upsampled to ``HxW``, where ``H`` is the image size and W the image width.\n * If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the\n range ``[a..b]`` and the mask will be generated at size ``MxN``, then\n upsampled to ``HxW``.\n * If a StochasticParameter, then this parameter will be used to\n determine the sizes. It is expected to be discrete.\n\n size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n The size of the lower resolution image from which to sample the noise\n mask *in percent* of the input image.\n\n * If a float, then that value will be used as the percentage of the\n height and width (relative to the original size). E.g. for value\n p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled\n to ``HxW``.\n * If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the\n interval ``(a, b)`` and used as the percentages, i.e the mask size\n will be ``(m*H)x(n*W)``.\n * If a StochasticParameter, then this parameter will be used to\n sample the percentage values. It is expected to be continuous.\n\n per_channel : bool or float, optional\n Whether to use the same value (is dropped / is not dropped)\n for all channels of a pixel (False) or to sample a new value for each\n channel (True).\n If this value is a float ``p``, then for ``p`` percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n min_size : int, optional\n Minimum size of the low resolution mask, both width and height. If\n `size_percent` or `size_px` leads to a lower value than this, `min_size`\n will be used instead. This should never have a value of less than 2,\n otherwise one may end up with a 1x1 low resolution mask, leading easily\n to the whole image being replaced.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(0.05, size_percent=(0.01, 0.1))\n\n Replaces 5 percent of all pixels with pepper in an image that has\n 1 to 10 percent of the input image size, then upscales the results\n to the input image size, leading to large rectangular areas being replaced.\n\n \"\"\"\n arg_8 = iap.handle_probability_param(arg_0, \"p\", tuple_to_uniform=True, list_to_choice=True)\n\n if arg_1 is not None:\n arg_9 = iap.FromLowerResolution(other_param=arg_8, arg_1=arg_1, arg_4=arg_4)\n elif arg_2 is not None:\n arg_9 = iap.FromLowerResolution(other_param=arg_8, arg_2=arg_2, arg_4=arg_4)\n else:\n raise Exception(\"Either size_px or size_percent must be set.\")\n\n arg_10 = iap.ForceSign(\n iap.Beta(0.5, 0.5) - 0.5,\n positive=False,\n mode=\"invert\"\n ) + 0.5\n arg_11 = arg_10 * 255\n\n if arg_5 is None:\n arg_5 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return ReplaceElementwise(\n arg_8=arg_9,\n arg_11=arg_11,\n arg_3=arg_3,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7\n )"} +{"_id": "doc_429", "title": "", "text": "def Func(arg_0=1.0, arg_1=False, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Augmenter that changes the contrast of images.\n\n dtype support:\n\n See ``imgaug.augmenters.contrast.LinearContrast``.\n\n Parameters\n ----------\n alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Strength of the contrast normalization. Higher values than 1.0\n lead to higher contrast, lower values decrease the contrast.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be sampled per image from\n the range ``a <= x <= b`` and be used as the alpha value.\n * If a list, then a random value will be sampled per image from\n that list.\n * If a StochasticParameter, then this parameter will be used to\n sample the alpha value per image.\n\n per_channel : bool or float, optional\n Whether to use the same value for all channels (False)\n or to sample a new value for each channel (True).\n If this value is a float ``p``, then for ``p`` percent of all images\n `per_channel` will be treated as True, otherwise as False.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> iaa.Func((0.5, 1.5))\n\n Decreases oder improves contrast per image by a random factor between\n 0.5 and 1.5. The factor 0.5 means that any difference from the center value\n (i.e. 128) will be halved, leading to less contrast.\n\n >>> iaa.Func((0.5, 1.5), per_channel=0.5)\n\n Same as before, but for 50 percent of all images the normalization is done\n independently per channel (i.e. factors can vary per channel for the same\n image). In the other 50 percent of all images, the factor is the same for\n all channels.\n\n \"\"\"\n # placed here to avoid cyclic dependency\n from . import contrast as contrast_lib\n return contrast_lib.LinearContrast(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_430", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Checks whether a variable is a numpy integer array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n True if the variable is a numpy integer array. Otherwise False.\n\n \"\"\"\n return is_np_array(arg_0) and issubclass(arg_0.dtype.type, np.integer)"} +{"_id": "doc_431", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Checks whether a variable is a numpy float array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n True if the variable is a numpy float array. Otherwise False.\n\n \"\"\"\n return is_np_array(arg_0) and issubclass(arg_0.dtype.type, np.floating)"} +{"_id": "doc_432", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Creates a copy of a random state.\n\n Parameters\n ----------\n random_state : numpy.random.RandomState\n The random state to copy.\n\n force_copy : bool, optional\n If True, this function will always create a copy of every random\n state. If False, it will not copy numpy's default random state,\n but all other random states.\n\n Returns\n -------\n rs_copy : numpy.random.RandomState\n The copied random state.\n\n \"\"\"\n if arg_0 == np.random and not arg_1:\n return arg_0\n else:\n arg_2 = dummy_random_state()\n arg_3 = arg_0.get_state()\n arg_2.set_state(arg_3)\n return arg_2"} +{"_id": "doc_433", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"\n Create N new random states based on an existing random state or seed.\n\n Parameters\n ----------\n random_state : numpy.random.RandomState\n Random state or seed from which to derive new random states.\n\n n : int, optional\n Number of random states to derive.\n\n Returns\n -------\n list of numpy.random.RandomState\n Derived random states.\n\n \"\"\"\n arg_2 = arg_0.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]\n return [new_random_state(arg_2+arg_3) for arg_3 in sm.xrange(arg_1)]"} +{"_id": "doc_434", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generate a normalized rectangle to be extract from the standard quokka image.\n\n Parameters\n ----------\n extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n Unnormalized representation of the image subarea to be extracted.\n\n * If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``\n will be extracted from the image.\n * If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``\n and ``y2``.\n * If a BoundingBox, then that bounding box's area will be extracted from the image.\n * If a BoundingBoxesOnImage, then expected to contain exactly one bounding box\n and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the\n one bounding box will be used similar to BoundingBox.\n\n Returns\n -------\n bb : imgaug.BoundingBox\n Normalized representation of the area to extract from the standard quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n if arg_0 == \"square\":\n arg_1 = BoundingBox(x1=0, y1=0, x2=643, y2=643)\n elif isinstance(arg_0, tuple) and len(arg_0) == 4:\n arg_1 = BoundingBox(x1=arg_0[0], y1=arg_0[1], x2=arg_0[2], y2=arg_0[3])\n elif isinstance(arg_0, BoundingBox):\n arg_1 = arg_0\n elif isinstance(arg_0, BoundingBoxesOnImage):\n do_assert(len(arg_0.bounding_boxes) == 1)\n do_assert(arg_0.shape[0:2] == (643, 960))\n arg_1 = arg_0.bounding_boxes[0]\n else:\n raise Exception(\n \"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage \"\n + \"for parameter 'extract', got %s.\" % (type(arg_0),)\n )\n return arg_1"} +{"_id": "doc_435", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Computes the intended new shape of an image-like array after resizing.\n\n Parameters\n ----------\n from_shape : tuple or ndarray\n Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or\n alternatively an array with two or three dimensions.\n\n to_shape : None or tuple of ints or tuple of floats or int or float or ndarray\n New shape of the array.\n\n * If None, then `from_shape` will be used as the new shape.\n * If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it\n is part of `from_shape`.\n * If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old\n height/width.\n * If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height\n and width.\n * If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will\n be used as the new height and width.\n * If a numpy array, then the array's shape will be used.\n\n Returns\n -------\n to_shape_computed : tuple of int\n New shape.\n\n \"\"\"\n if is_np_array(arg_0):\n arg_0 = arg_0.shape\n if is_np_array(arg_1):\n arg_1 = arg_1.shape\n\n arg_2 = list(arg_0)\n\n if arg_1 is None:\n pass\n elif isinstance(arg_1, tuple):\n do_assert(len(arg_0) in [2, 3])\n do_assert(len(arg_1) in [2, 3])\n\n if len(arg_0) == 3 and len(arg_1) == 3:\n do_assert(arg_0[2] == arg_1[2])\n elif len(arg_1) == 3:\n arg_2.append(arg_1[2])\n\n do_assert(all([arg_3 is None or is_single_number(arg_3) for arg_3 in arg_1[0:2]]),\n \"Expected the first two entries in to_shape to be None or numbers, \"\n + \"got types %s.\" % (str([type(arg_3) for arg_3 in arg_1[0:2]]),))\n\n for arg_4, arg_5 in enumerate(arg_0[0:2]):\n if arg_1[arg_4] is None:\n arg_2[arg_4] = arg_5\n elif is_single_integer(arg_1[arg_4]):\n arg_2[arg_4] = arg_1[arg_4]\n else: # float\n arg_2[arg_4] = int(np.round(arg_5 * arg_1[arg_4]))\n elif is_single_integer(arg_1) or is_single_float(arg_1):\n arg_2 = Func(arg_0, (arg_1, arg_1))\n else:\n raise Exception(\"Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int \"\n + \"or single float, got %s.\" % (type(arg_1),))\n\n return tuple(arg_2)"} +{"_id": "doc_436", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Returns an image of a Func as a numpy array.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.\n Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height\n and ``W`` is the width. If None, then the image will not be resized.\n\n extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n Subarea of the Func image to extract:\n\n * If None, then the whole image will be used.\n * If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will\n be extracted from the image.\n * If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``\n and ``y2``.\n * If a BoundingBox, then that bounding box's area will be extracted from the image.\n * If a BoundingBoxesOnImage, then expected to contain exactly one bounding box\n and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the\n one bounding box will be used similar to BoundingBox.\n\n Returns\n -------\n img : (H,W,3) ndarray\n The image array of dtype uint8.\n\n \"\"\"\n arg_2 = imageio.imread(QUOKKA_FP, pilmode=\"RGB\")\n if arg_1 is not None:\n arg_3 = _Func_normalize_extract(arg_1)\n arg_2 = arg_3.extract_from_image(arg_2)\n if arg_0 is not None:\n arg_4 = _compute_resized_shape(arg_2.shape, arg_0)\n arg_2 = imresize_single_image(arg_2, arg_4[0:2])\n return arg_2"} +{"_id": "doc_437", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Returns a segmentation map for the standard example quokka image.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n See :func:`imgaug.quokka`.\n\n extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n See :func:`imgaug.quokka`.\n\n Returns\n -------\n result : imgaug.SegmentationMapOnImage\n Segmentation map object.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.segmaps import SegmentationMapOnImage\n\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n arg_2 = json.load(f)\n\n arg_3 = []\n arg_4 = []\n for arg_5 in arg_2[\"polygons\"][0][\"keypoints\"]:\n arg_6 = arg_5[\"x\"]\n arg_7 = arg_5[\"y\"]\n arg_3.append(arg_6)\n arg_4.append(arg_7)\n\n arg_8 = np.zeros((643, 960, 1), dtype=np.float32)\n arg_9, arg_10 = skimage.draw.polygon(np.array(arg_4), np.array(arg_3), arg_14=arg_8.shape)\n arg_8[arg_9, arg_10] = 1.0\n\n if arg_1 is not None:\n arg_11 = _quokka_normalize_extract(arg_1)\n arg_8 = arg_11.extract_from_image(arg_8)\n\n arg_12 = SegmentationMapOnImage(arg_8, arg_14=arg_8.shape[0:2] + (3,))\n\n if arg_0 is not None:\n arg_13 = _compute_resized_shape(arg_8.shape, arg_0)\n arg_12 = arg_12.resize(arg_13[0:2])\n arg_12.shape = tuple(arg_13[0:2]) + (3,)\n\n return arg_12"} +{"_id": "doc_438", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Returns example keypoints on the standard example quokke image.\n\n The keypoints cover the eyes, ears, nose and paws.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the keypoints are placed. If None, then the keypoints\n are not projected to any new size (positions on the original image are used).\n Floats lead to relative size changes, ints to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`imgaug.quokka`.\n\n Returns\n -------\n kpsoi : imgaug.KeypointsOnImage\n Example keypoints on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n\n arg_2, arg_3 = 0, 0\n if arg_1 is not None:\n arg_4 = _quokka_normalize_extract(arg_1)\n arg_2 = arg_4.x1\n arg_3 = arg_4.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n arg_5 = json.load(f)\n arg_6 = []\n for arg_7 in arg_5[\"keypoints\"]:\n arg_6.append(Keypoint(x=arg_7[\"x\"] - arg_2, y=arg_7[\"y\"] - arg_3))\n if arg_1 is not None:\n arg_8 = (arg_4.height, arg_4.width, 3)\n else:\n arg_8 = (643, 960, 3)\n arg_9 = KeypointsOnImage(arg_6, arg_8=arg_8)\n if arg_0 is not None:\n arg_10 = _compute_resized_shape(arg_8, arg_0)\n arg_9 = arg_9.on(arg_10)\n return arg_9"} +{"_id": "doc_439", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Returns example bounding boxes on the standard example quokke image.\n\n Currently only a single bounding box is returned that covers the quokka.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the BBs are placed. If None, then the BBs\n are not projected to any new size (positions on the original image are used).\n Floats lead to relative size changes, ints to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`imgaug.quokka`.\n\n Returns\n -------\n bbsoi : imgaug.BoundingBoxesOnImage\n Example BBs on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n arg_2, arg_3 = 0, 0\n if arg_1 is not None:\n arg_4 = _quokka_normalize_extract(arg_1)\n arg_2 = arg_4.x1\n arg_3 = arg_4.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n arg_5 = json.load(f)\n arg_6 = []\n for arg_7 in arg_5[\"bounding_boxes\"]:\n arg_6.append(\n BoundingBox(\n x1=arg_7[\"x1\"] - arg_2,\n y1=arg_7[\"y1\"] - arg_3,\n x2=arg_7[\"x2\"] - arg_2,\n y2=arg_7[\"y2\"] - arg_3\n )\n )\n if arg_1 is not None:\n arg_8 = (arg_4.height, arg_4.width, 3)\n else:\n arg_8 = (643, 960, 3)\n arg_9 = BoundingBoxesOnImage(arg_6, arg_8=arg_8)\n if arg_0 is not None:\n arg_10 = _compute_resized_shape(arg_8, arg_0)\n arg_9 = arg_9.on(arg_10)\n return arg_9"} +{"_id": "doc_440", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Returns example polygons on the standard example quokke image.\n\n The result contains one polygon, covering the quokka's outline.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the polygons are placed. If None,\n then the polygons are not projected to any new size (positions on the\n original image are used). Floats lead to relative size changes, ints\n to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.BoundingBox or \\\n imgaug.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`imgaug.quokka`.\n\n Returns\n -------\n psoi : imgaug.PolygonsOnImage\n Example polygons on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.polys import Polygon, PolygonsOnImage\n\n arg_2, arg_3 = 0, 0\n if arg_1 is not None:\n arg_4 = _quokka_normalize_extract(arg_1)\n arg_2 = arg_4.x1\n arg_3 = arg_4.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n arg_5 = json.load(f)\n arg_6 = []\n for arg_7 in arg_5[\"polygons\"]:\n arg_6.append(\n Polygon([(arg_8[\"x\"] - arg_2, arg_8[\"y\"] - arg_3)\n for arg_8 in arg_7[\"keypoints\"]])\n )\n if arg_1 is not None:\n arg_9 = (arg_4.height, arg_4.width, 3)\n else:\n arg_9 = (643, 960, 3)\n arg_10 = PolygonsOnImage(arg_6, arg_9=arg_9)\n if arg_0 is not None:\n arg_11 = _compute_resized_shape(arg_9, arg_0)\n arg_10 = arg_10.on(arg_11)\n return arg_10"} +{"_id": "doc_441", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the angle in radians between vectors `v1` and `v2`.\n\n From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python\n\n Parameters\n ----------\n v1 : (N,) ndarray\n First vector.\n\n v2 : (N,) ndarray\n Second vector.\n\n Returns\n -------\n out : float\n Angle in radians.\n\n Examples\n --------\n >>> Func(np.float32([1, 0, 0]), np.float32([0, 1, 0]))\n 1.570796...\n\n >>> Func(np.float32([1, 0, 0]), np.float32([1, 0, 0]))\n 0.0\n\n >>> Func(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))\n 3.141592...\n\n \"\"\"\n arg_2 = np.linalg.norm(arg_0)\n arg_3 = np.linalg.norm(arg_1)\n arg_4 = (arg_0 / arg_2) if arg_2 > 0 else np.float32(arg_0) * 0\n arg_5 = (arg_1 / arg_3) if arg_3 > 0 else np.float32(arg_1) * 0\n return np.arccos(np.clip(np.dot(arg_4, arg_5), -1.0, 1.0))"} +{"_id": "doc_442", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7):\n \"\"\"\n Compute the intersection point of two lines.\n\n Taken from https://stackoverflow.com/a/20679579 .\n\n Parameters\n ----------\n x1 : number\n x coordinate of the first point on line 1. (The lines extends beyond this point.)\n\n y1 : number\n y coordinate of the first point on line 1. (The lines extends beyond this point.)\n\n x2 : number\n x coordinate of the second point on line 1. (The lines extends beyond this point.)\n\n y2 : number\n y coordinate of the second point on line 1. (The lines extends beyond this point.)\n\n x3 : number\n x coordinate of the first point on line 2. (The lines extends beyond this point.)\n\n y3 : number\n y coordinate of the first point on line 2. (The lines extends beyond this point.)\n\n x4 : number\n x coordinate of the second point on line 2. (The lines extends beyond this point.)\n\n y4 : number\n y coordinate of the second point on line 2. (The lines extends beyond this point.)\n\n Returns\n -------\n tuple of number or bool\n The coordinate of the intersection point as a tuple ``(x, y)``.\n If the lines are parallel (no intersection point or an infinite number of them), the result is False.\n\n \"\"\"\n def _make_line(arg_8, arg_9):\n arg_10 = (arg_8[1] - arg_9[1])\n arg_11 = (arg_9[0] - arg_8[0])\n arg_12 = (arg_8[0]*arg_9[1] - arg_9[0]*arg_8[1])\n return arg_10, arg_11, -arg_12\n\n arg_13 = _make_line((arg_0, arg_1), (arg_2, arg_3))\n arg_14 = _make_line((arg_4, arg_5), (arg_6, arg_7))\n\n arg_15 = arg_13[0] * arg_14[1] - arg_13[1] * arg_14[0]\n arg_16 = arg_13[2] * arg_14[1] - arg_13[1] * arg_14[2]\n arg_17 = arg_13[0] * arg_14[2] - arg_13[2] * arg_14[0]\n if arg_15 != 0:\n arg_18 = arg_16 / arg_15\n arg_19 = arg_17 / arg_15\n return arg_18, arg_19\n else:\n return False"} +{"_id": "doc_443", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Resizes a single image.\n\n\n dtype support::\n\n See :func:`imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n image : (H,W,C) ndarray or (H,W) ndarray\n Array of the image to resize.\n Usually recommended to be of dtype uint8.\n\n sizes : float or iterable of int or iterable of float\n See :func:`imgaug.imgaug.imresize_many_images`.\n\n interpolation : None or str or int, optional\n See :func:`imgaug.imgaug.imresize_many_images`.\n\n Returns\n -------\n out : (H',W',C) ndarray or (H',W') ndarray\n The resized image.\n\n \"\"\"\n arg_3 = False\n if arg_0.ndim == 2:\n arg_3 = True\n arg_0 = arg_0[:, :, np.newaxis]\n do_assert(len(arg_0.shape) == 3, arg_0.shape)\n arg_4 = imresize_many_images(arg_0[np.newaxis, :, :, :], arg_1, arg_2=arg_2)\n if arg_3:\n return np.squeeze(arg_4[0, :, :, 0])\n else:\n return arg_4[0, ...]"} +{"_id": "doc_444", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.\n\n The aspect ratio is given as width/height.\n Depending on which dimension is smaller (height or width), only the corresponding\n sides (left/right or top/bottom) will be padded. In each case, both of the sides will\n be padded equally.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array for which to compute pad amounts.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice\n as much width as height.\n\n Returns\n -------\n result : tuple of int\n Required paddign amounts to reach the target aspect ratio, given as a tuple\n of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n do_assert(arg_0.ndim in [2, 3])\n do_assert(arg_1 > 0)\n arg_2, arg_3 = arg_0.shape[0:2]\n do_assert(arg_2 > 0)\n arg_4 = arg_3 / arg_2\n\n arg_5 = 0\n arg_6 = 0\n arg_7 = 0\n arg_8 = 0\n\n if arg_4 < arg_1:\n # vertical image, height > width\n arg_9 = (arg_1 * arg_2) - arg_3\n arg_6 = int(np.ceil(arg_9 / 2))\n arg_8 = int(np.floor(arg_9 / 2))\n elif arg_4 > arg_1:\n # horizontal image, width > height\n arg_9 = ((1/arg_1) * arg_3) - arg_2\n arg_5 = int(np.floor(arg_9 / 2))\n arg_7 = int(np.ceil(arg_9 / 2))\n\n return arg_5, arg_6, arg_7, arg_8"} +{"_id": "doc_445", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, arg_4=True):\n \"\"\"\n Resize an array by Funcing values within blocks.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested (2)\n * ``uint64``: no (1)\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested (2)\n * ``int64``: no (1)\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested (2)\n * ``bool``: yes; tested\n\n - (1) results too inaccurate (at least when using np.average as func)\n - (2) Note that scikit-image documentation says that the wrapped Funcing function converts\n inputs to float64. Actual tests showed no indication of that happening (at least when\n using preserve_dtype=True).\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to Func. Ideally of datatype ``numpy.float64``.\n\n block_size : int or tuple of int\n Spatial size of each group of values to Func, aka kernel size.\n If a single integer, then a symmetric block of that size along height and width will be used.\n If a tuple of two values, it is assumed to be the block size along height and width of the image-like,\n with Funcing happening per channel.\n If a tuple of three values, it is assumed to be the block size along height, width and channels.\n\n func : callable\n Function to apply to a given block in order to convert it to a single number,\n e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.\n\n cval : number, optional\n Value to use in order to pad the array along its border if the array cannot be divided\n by `block_size` without remainder.\n\n preserve_dtype : bool, optional\n Whether to convert the array back to the input datatype if it is changed away from\n that in the Funcing process.\n\n Returns\n -------\n arr_reduced : (H',W') ndarray or (H',W',C') ndarray\n Array after Funcing.\n\n \"\"\"\n # TODO find better way to avoid circular import\n from . import dtypes as iadt\n iadt.gate_dtypes(arg_0,\n allowed=[\"bool\", \"uint8\", \"uint16\", \"uint32\", \"int8\", \"int16\", \"int32\",\n \"float16\", \"float32\", \"float64\", \"float128\"],\n disallowed=[\"uint64\", \"uint128\", \"uint256\", \"int64\", \"int128\", \"int256\",\n \"float256\"],\n augmenter=None)\n\n do_assert(arg_0.ndim in [2, 3])\n arg_5 = is_single_integer(arg_1) and arg_1 >= 1\n arg_6 = is_iterable(arg_1) and len(arg_1) in [2, 3] \\\n and [is_single_integer(val) and val >= 1 for val in arg_1]\n do_assert(arg_5 or arg_6)\n\n if is_single_integer(arg_1):\n arg_1 = [arg_1, arg_1]\n if len(arg_1) < arg_0.ndim:\n arg_1 = list(arg_1) + [1]\n\n arg_7 = arg_0.dtype\n arg_8 = skimage.measure.block_reduce(arg_0, tuple(arg_1), arg_2, arg_3=arg_3)\n if arg_4 and arg_8.dtype.type != arg_7:\n arg_8 = arg_8.astype(arg_7)\n return arg_8"} +{"_id": "doc_446", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=True):\n \"\"\"\n Resize an array using average pooling.\n\n dtype support::\n\n See :func:`imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool. See :func:`imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool. See :func:`imgaug.pool` for details.\n\n cval : number, optional\n Padding value. See :func:`imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.\n\n Returns\n -------\n arr_reduced : (H',W') ndarray or (H',W',C') ndarray\n Array after average pooling.\n\n \"\"\"\n return pool(arg_0, arg_1, np.average, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_447", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=True):\n \"\"\"\n Resize an array using max-pooling.\n\n dtype support::\n\n See :func:`imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool. See :func:`imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool. See `imgaug.pool` for details.\n\n cval : number, optional\n Padding value. See :func:`imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.\n\n Returns\n -------\n arr_reduced : (H',W') ndarray or (H',W',C') ndarray\n Array after max-pooling.\n\n \"\"\"\n return pool(arg_0, arg_1, np.max, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_448", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Converts the input images to a grid image and shows it in a new window.\n\n dtype support::\n\n minimum of (\n :func:`imgaug.imgaug.draw_grid`,\n :func:`imgaug.imgaug.imshow`\n )\n\n Parameters\n ----------\n images : (N,H,W,3) ndarray or iterable of (H,W,3) array\n See :func:`imgaug.draw_grid`.\n\n rows : None or int, optional\n See :func:`imgaug.draw_grid`.\n\n cols : None or int, optional\n See :func:`imgaug.draw_grid`.\n\n \"\"\"\n arg_3 = draw_grid(arg_0, arg_1=arg_1, arg_2=arg_2)\n imshow(arg_3)"} +{"_id": "doc_449", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"\n Shows an image in a window.\n\n dtype support::\n\n * ``uint8``: yes; not tested\n * ``uint16``: ?\n * ``uint32``: ?\n * ``uint64``: ?\n * ``int8``: ?\n * ``int16``: ?\n * ``int32``: ?\n * ``int64``: ?\n * ``float16``: ?\n * ``float32``: ?\n * ``float64``: ?\n * ``float128``: ?\n * ``bool``: ?\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n Image to show.\n\n backend : {'matplotlib', 'cv2'}, optional\n Library to use to show the image. May be either matplotlib or OpenCV ('cv2').\n OpenCV tends to be faster, but apparently causes more technical issues.\n\n \"\"\"\n do_assert(arg_1 in [\"matplotlib\", \"cv2\"], \"Expected backend 'matplotlib' or 'cv2', got %s.\" % (arg_1,))\n\n if arg_1 == \"cv2\":\n arg_3 = arg_0\n if arg_0.ndim == 3 and arg_0.shape[2] in [3, 4]:\n arg_3 = arg_0[..., 0:3][..., ::-1]\n\n arg_4 = \"imgaug-default-window\"\n cv2.namedWindow(arg_4, cv2.WINDOW_NORMAL)\n cv2.Func(arg_4, arg_3)\n cv2.waitKey(0)\n cv2.destroyWindow(arg_4)\n else:\n # import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)\n import matplotlib.pyplot as plt\n\n arg_5 = 96\n arg_6, arg_7 = arg_0.shape[0] / arg_5, arg_0.shape[1] / arg_5\n arg_7 = max(arg_7, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)\n arg_8, arg_9 = plt.subplots(figsize=(arg_7, arg_6), arg_5=arg_5)\n arg_8.canvas.set_window_title(\"imgaug.Func(%s)\" % (arg_0.shape,))\n arg_9.Func(arg_0, cmap=\"gray\") # cmap is only activate for grayscale images\n plt.show()"} +{"_id": "doc_450", "title": "", "text": "def Func(arg_0, arg_1=2):\n \"\"\"Generate a non-silent deprecation warning with stacktrace.\n\n The used warning is ``imgaug.imgaug.DeprecationWarning``.\n\n Parameters\n ----------\n msg : str\n The message of the warning.\n\n stacklevel : int, optional\n How many steps above this function to \"jump\" in the stacktrace for\n the displayed file and line number of the error message.\n Usually 2.\n\n \"\"\"\n import warnings\n warnings.warn(arg_0,\n category=DeprecationWarning,\n arg_1=arg_1)"} +{"_id": "doc_451", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Returns whether an augmenter may be executed.\n\n Returns\n -------\n bool\n If True, the augmenter may be executed. If False, it may not be executed.\n\n \"\"\"\n if arg_0.activator is None:\n return arg_4\n else:\n return arg_0.activator(arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_452", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n A function to be called after the augmentation of images was\n performed.\n\n Returns\n -------\n (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray\n The input images, optionally modified.\n\n \"\"\"\n if arg_0.Funcor is None:\n return arg_1\n else:\n return arg_0.Funcor(arg_1, arg_2, arg_3)"} +{"_id": "doc_453", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Augment batches asynchonously.\n\n Parameters\n ----------\n batches : list of imgaug.augmentables.batches.Batch\n The batches to augment.\n\n chunksize : None or int, optional\n Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve\n performance.\n\n callback : None or callable, optional\n Function to call upon finish. See `multiprocessing.Pool`.\n\n error_callback : None or callable, optional\n Function to call upon errors. See `multiprocessing.Pool`.\n\n Returns\n -------\n multiprocessing.MapResult\n Asynchonous result. See `multiprocessing.Pool`.\n\n \"\"\"\n assert isinstance(arg_1, list), (\"Expected to get a list as 'batches', got type %s. \"\n + \"Call imap_batches() if you use generators.\") % (type(arg_1),)\n return arg_0.pool.map_async(_Pool_starworker, arg_0._handle_batch_ids(arg_1),\n arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_454", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n Augment batches from a generator.\n\n Parameters\n ----------\n batches : generator of imgaug.augmentables.batches.Batch\n The batches to augment, provided as a generator. Each call to the generator should yield exactly one\n batch.\n\n chunksize : None or int, optional\n Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve\n performance.\n\n Yields\n ------\n imgaug.augmentables.batches.Batch\n Augmented batch.\n\n \"\"\"\n assert ia.is_generator(arg_1), (\"Expected to get a generator as 'batches', got type %s. \"\n + \"Call map_batches() if you use lists.\") % (type(arg_1),)\n # TODO change this to 'yield from' once switched to 3.3+\n arg_3 = arg_0.pool.imap(_Pool_starworker, arg_0._handle_batch_ids_gen(arg_1), arg_2=arg_2)\n for arg_4 in arg_3:\n yield arg_4"} +{"_id": "doc_455", "title": "", "text": "def Func(arg_0):\n \"\"\"Terminate the pool immediately.\"\"\"\n if arg_0._pool is not None:\n arg_0._pool.Func()\n arg_0._pool.join()\n arg_0._pool = None"} +{"_id": "doc_456", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a batch from the queue of augmented batches.\n\n If workers are still running and there are no batches in the queue,\n it will automatically wait for the next batch.\n\n Returns\n -------\n out : None or imgaug.Batch\n One batch or None if all workers have finished.\n\n \"\"\"\n if arg_0.all_finished():\n return None\n\n arg_1 = arg_0.queue_result.get()\n arg_2 = pickle.loads(arg_1)\n if arg_2 is not None:\n return arg_2\n else:\n arg_0.nb_workers_finished += 1\n if arg_0.nb_workers_finished >= arg_0.nb_workers:\n try:\n arg_0.queue_source.get(timeout=0.001) # remove the None from the source queue\n except QueueEmpty:\n pass\n return None\n else:\n return arg_0.Func()"} +{"_id": "doc_457", "title": "", "text": "def Func(arg_0, arg_1=\"invert\", arg_2=2):\n \"\"\"\n Converts another parameter's results to negative values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be\n modified.\n\n mode : {'invert', 'reroll'}, optional\n How to change the signs. Valid values are ``invert`` and ``reroll``.\n ``invert`` means that wrong signs are simply flipped.\n ``reroll`` means that all samples with wrong signs are sampled again,\n optionally many times, until they randomly end up having the correct\n sign.\n\n reroll_count_max : int, optional\n If `mode` is set to ``reroll``, this determines how often values may\n be rerolled before giving up and simply flipping the sign (as in\n ``mode=\"invert\"``). This shouldn't be set too high, as rerolling is\n expensive.\n\n Examples\n --------\n >>> param = Func(Normal(0, 1), mode=\"reroll\")\n\n Generates a normal distribution that has only negative values.\n\n \"\"\"\n return ForceSign(\n arg_0=arg_0,\n positive=False,\n arg_1=arg_1,\n arg_2=arg_2\n )"} +{"_id": "doc_458", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Estimate the Func of the polygon.\n\n Returns\n -------\n number\n Area of the polygon.\n\n \"\"\"\n if len(arg_0.exterior) < 3:\n raise Exception(\"Cannot compute the polygon's Func because it contains less than three points.\")\n arg_1 = arg_0.to_shapely_polygon()\n return arg_1.Func"} +{"_id": "doc_459", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Project the polygon onto an image with different shape.\n\n The relative coordinates of all points remain the same.\n E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be\n Funced on a new image (width=200, height=100) to (x=40, y=10).\n\n This is intended for cases where the original image is resized.\n It cannot be used for more complex changes (e.g. padding, cropping).\n\n Parameters\n ----------\n from_shape : tuple of int\n Shape of the original image. (Before resize.)\n\n to_shape : tuple of int\n Shape of the new image. (After resize.)\n\n Returns\n -------\n imgaug.Polygon\n Polygon object with new coordinates.\n\n \"\"\"\n if arg_1[0:2] == arg_2[0:2]:\n return arg_0.copy()\n arg_3 = arg_0.to_line_string(closed=False).Func(\n arg_1, arg_2)\n return arg_0.copy(exterior=arg_3.coords)"} +{"_id": "doc_460", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Find the index of the point within the exterior that is closest to the given coordinates.\n\n \"Closeness\" is here defined based on euclidean distance.\n This method will raise an AssertionError if the exterior contains no points.\n\n Parameters\n ----------\n x : number\n X-coordinate around which to search for close points.\n\n y : number\n Y-coordinate around which to search for close points.\n\n return_distance : bool, optional\n Whether to also return the distance of the closest point.\n\n Returns\n -------\n int\n Index of the closest point.\n\n number\n Euclidean distance to the closest point.\n This value is only returned if `return_distance` was set to True.\n\n \"\"\"\n ia.do_assert(len(arg_0.exterior) > 0)\n arg_4 = []\n for arg_5, arg_6 in arg_0.exterior:\n arg_7 = (arg_5 - arg_1) ** 2 + (arg_6 - arg_2) ** 2\n arg_4.append(arg_7)\n arg_4 = np.sqrt(arg_4)\n arg_8 = np.argmin(arg_4)\n if arg_3:\n return arg_8, arg_4[arg_8]\n return arg_8"} +{"_id": "doc_461", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Estimate whether the polygon is fully inside the image area.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use.\n If an ndarray, its shape will be used.\n If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\n Returns\n -------\n bool\n True if the polygon is fully inside the image area.\n False otherwise.\n\n \"\"\"\n return not arg_0.is_out_of_image(arg_1, fully=True, partly=True)"} +{"_id": "doc_462", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Estimate whether the polygon is at least partially inside the image area.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use.\n If an ndarray, its shape will be used.\n If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\n Returns\n -------\n bool\n True if the polygon is at least partially inside the image area.\n False otherwise.\n\n \"\"\"\n return not arg_0.is_out_of_image(arg_1, fully=True, partly=False)"} +{"_id": "doc_463", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=False):\n \"\"\"\n Estimate whether the polygon is partially or fully outside of the image area.\n\n Parameters\n ----------\n image : (H,W,...) ndarray or tuple of int\n Image dimensions to use.\n If an ndarray, its shape will be used.\n If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\n fully : bool, optional\n Whether to return True if the polygon is fully outside of the image area.\n\n partly : bool, optional\n Whether to return True if the polygon is at least partially outside fo the image area.\n\n Returns\n -------\n bool\n True if the polygon is partially/fully outside of the image area, depending\n on defined parameters. False otherwise.\n\n \"\"\"\n # TODO this is inconsistent with line strings, which return a default\n # value in these cases\n if len(arg_0.exterior) == 0:\n raise Exception(\"Cannot determine whether the polygon is inside the image, because it contains no points.\")\n arg_4 = arg_0.to_line_string()\n return arg_4.Func(arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_464", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Extract the image pixels within the polygon.\n\n This function will zero-pad the image if the polygon is partially/fully outside of\n the image.\n\n Parameters\n ----------\n image : (H,W) ndarray or (H,W,C) ndarray\n The image from which to extract the pixels within the polygon.\n\n Returns\n -------\n result : (H',W') ndarray or (H',W',C) ndarray\n Pixels within the polygon. Zero-padded if the polygon is partially/fully\n outside of the image.\n\n \"\"\"\n ia.do_assert(arg_1.ndim in [2, 3])\n if len(arg_0.exterior) <= 2:\n raise Exception(\"Polygon must be made up of at least 3 points to extract its area from an image.\")\n\n arg_2 = arg_0.to_bounding_box()\n arg_3 = arg_2.Func(arg_1)\n if arg_0.is_out_of_image(arg_1, fully=True, partly=False):\n return arg_3\n\n arg_4 = arg_0.xx_int\n arg_5 = arg_0.yy_int\n arg_6 = arg_4 - np.min(arg_4)\n arg_7 = arg_5 - np.min(arg_5)\n arg_8 = np.max(arg_7)\n arg_9 = np.max(arg_6)\n\n arg_10, arg_11 = skimage.draw.polygon(arg_7, arg_6, shape=(arg_8, arg_9))\n\n arg_12 = np.zeros((arg_8, arg_9), dtype=np.bool)\n arg_12[arg_10, arg_11] = True\n\n if arg_1.ndim == 3:\n arg_12 = np.tile(arg_12[:, :, np.newaxis], (1, 1, arg_1.shape[2]))\n\n return arg_3 * arg_12"} +{"_id": "doc_465", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the first point of the exterior to the given point based on its index.\n\n Note: This method does *not* work in-place.\n\n Parameters\n ----------\n point_idx : int\n Index of the desired starting point.\n\n Returns\n -------\n imgaug.Polygon\n Copy of this polygon with the new point order.\n\n \"\"\"\n ia.do_assert(0 <= arg_1 < len(arg_0.exterior))\n if arg_1 == 0:\n return arg_0.deepcopy()\n arg_2 = np.concatenate(\n (arg_0.exterior[arg_1:, :], arg_0.exterior[:arg_1, :]),\n axis=0\n )\n return arg_0.deepcopy(arg_2=arg_2)"} +{"_id": "doc_466", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert this polygon to a Shapely polygon.\n\n Returns\n -------\n shapely.geometry.Polygon\n The Shapely polygon matching this polygon's exterior.\n\n \"\"\"\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n\n return shapely.geometry.Polygon([(arg_1[0], arg_1[1]) for arg_1 in arg_0.exterior])"} +{"_id": "doc_467", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=0):\n \"\"\"\n Convert this polygon to a Shapely LineString object.\n\n Parameters\n ----------\n closed : bool, optional\n Whether to return the line string with the last point being identical to the first point.\n\n interpolate : int, optional\n Number of points to interpolate between any pair of two consecutive points. These points are added\n to the final line string.\n\n Returns\n -------\n shapely.geometry.LineString\n The Shapely LineString matching the polygon's exterior.\n\n \"\"\"\n return _convert_points_Func(arg_0.exterior, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_468", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Convert this polygon's `exterior` to a ``LineString`` instance.\n\n Parameters\n ----------\n closed : bool, optional\n Whether to close the line string, i.e. to add the first point of\n the `exterior` also as the last point at the end of the line string.\n This has no effect if the polygon has a single point or zero\n points.\n\n Returns\n -------\n imgaug.augmentables.lines.LineString\n Exterior of the polygon as a line string.\n\n \"\"\"\n from imgaug.augmentables.lines import LineString\n if not arg_1 or len(arg_0.exterior) <= 1:\n return LineString(arg_0.exterior, label=arg_0.label)\n return LineString(\n np.concatenate([arg_0.exterior, arg_0.exterior[0:1, :]], axis=0),\n label=arg_0.label)"} +{"_id": "doc_469", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1e-6, arg_3=8):\n \"\"\"\n Estimate if this and other polygon's exterior are almost identical.\n\n The two exteriors can have different numbers of points, but any point\n randomly sampled on the exterior of one polygon should be close to the\n closest point on the exterior of the other polygon.\n\n Note that this method works approximately. One can come up with\n polygons with fairly different shapes that will still be estimated as\n equal by this method. In practice however this should be unlikely to be\n the case. The probability for something like that goes down as the\n interpolation parameter is increased.\n\n Parameters\n ----------\n other : imgaug.Polygon or (N,2) ndarray or list of tuple\n The other polygon with which to compare the exterior.\n If this is an ndarray, it is assumed to represent an exterior.\n It must then have dtype ``float32`` and shape ``(N,2)`` with the\n second dimension denoting xy-coordinates.\n If this is a list of tuples, it is assumed to represent an exterior.\n Each tuple then must contain exactly two numbers, denoting\n xy-coordinates.\n\n max_distance : number, optional\n The maximum euclidean distance between a point on one polygon and\n the closest point on the other polygon. If the distance is exceeded\n for any such pair, the two exteriors are not viewed as equal. The\n points are other the points contained in the polygon's exterior\n ndarray or interpolated points between these.\n\n points_per_edge : int, optional\n How many points to interpolate on each edge.\n\n Returns\n -------\n bool\n Whether the two polygon's exteriors can be viewed as equal\n (approximate test).\n\n \"\"\"\n if isinstance(arg_1, list):\n arg_1 = Polygon(np.float32(arg_1))\n elif ia.is_np_array(arg_1):\n arg_1 = Polygon(arg_1)\n else:\n assert isinstance(arg_1, Polygon)\n arg_1 = arg_1\n\n return arg_0.to_line_string(closed=True).coords_almost_equals(\n arg_1.to_line_string(closed=True),\n arg_2=arg_2,\n arg_3=arg_3\n )"} +{"_id": "doc_470", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a shallow Func of the Polygon object.\n\n Parameters\n ----------\n exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional\n List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.\n\n label : None or str, optional\n If not None, then the label of the copied object will be set to this value.\n\n Returns\n -------\n imgaug.Polygon\n Shallow Func.\n\n \"\"\"\n return arg_0.deepFunc(arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_471", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a deep copy of the Polygon object.\n\n Parameters\n ----------\n exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional\n List of points defining the polygon. See `imgaug.Polygon.__init__` for details.\n\n label : None or str\n If not None, then the label of the copied object will be set to this value.\n\n Returns\n -------\n imgaug.Polygon\n Deep copy.\n\n \"\"\"\n return Polygon(\n arg_1=np.copy(arg_0.exterior) if arg_1 is None else arg_1,\n arg_2=arg_0.label if arg_2 is None else arg_2\n )"} +{"_id": "doc_472", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False):\n \"\"\"\n Remove all polygons that are fully or partially outside of the image.\n\n Parameters\n ----------\n fully : bool, optional\n Whether to remove polygons that are fully outside of the image.\n\n partly : bool, optional\n Whether to remove polygons that are partially outside of the image.\n\n Returns\n -------\n imgaug.PolygonsOnImage\n Reduced set of polygons, with those that were fully/partially\n outside of the image removed.\n\n \"\"\"\n arg_3 = [\n poly for poly in arg_0.polygons\n if not poly.is_out_of_image(arg_0.shape, arg_1=arg_1, arg_2=arg_2)\n ]\n # TODO use deepcopy() here\n return PolygonsOnImage(arg_3, shape=arg_0.shape)"} +{"_id": "doc_473", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clip off all parts from all polygons that are outside of the image.\n\n NOTE: The result can contain less polygons than the input did. That\n happens when a polygon is fully outside of the image plane.\n\n NOTE: The result can also contain *more* polygons than the input\n did. That happens when distinct parts of a polygon are only\n connected by areas that are outside of the image plane and hence will\n be clipped off, resulting in two or more unconnected polygon parts that\n are left in the image plane.\n\n Returns\n -------\n imgaug.PolygonsOnImage\n Polygons, clipped to fall within the image dimensions. Count of\n output polygons may differ from the input count.\n\n \"\"\"\n arg_1 = [\n poly.Func(arg_0.shape)\n for poly\n in arg_0.polygons\n if poly.is_partly_within_image(arg_0.shape)\n ]\n arg_2 = [poly for poly_lst in arg_1 for poly in poly_lst]\n # TODO use deepcopy() here\n return PolygonsOnImage(arg_2, shape=arg_0.shape)"} +{"_id": "doc_474", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create a deep copy of the PolygonsOnImage object.\n\n Returns\n -------\n imgaug.PolygonsOnImage\n Deep copy.\n\n \"\"\"\n # Manual copy is far faster than Func for PolygonsOnImage,\n # so use manual copy here too\n arg_1 = [poly.Func() for poly in arg_0.polygons]\n return PolygonsOnImage(arg_1, tuple(arg_0.shape))"} +{"_id": "doc_475", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.\n\n This also creates all necessary Polygons contained by this MultiPolygon.\n\n Parameters\n ----------\n geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\\\n or shapely.geometry.collection.GeometryCollection\n The object to convert to a MultiPolygon.\n\n label : None or str, optional\n A label assigned to all Polygons within the MultiPolygon.\n\n Returns\n -------\n imgaug.MultiPolygon\n The derived MultiPolygon.\n\n \"\"\"\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n\n if isinstance(arg_0, shapely.geometry.MultiPolygon):\n return MultiPolygon([Polygon.Func(arg_2, arg_1=arg_1) for arg_2 in arg_0.geoms])\n elif isinstance(arg_0, shapely.geometry.Polygon):\n return MultiPolygon([Polygon.Func(arg_0, arg_1=arg_1)])\n elif isinstance(arg_0, shapely.geometry.collection.GeometryCollection):\n ia.do_assert(all([isinstance(arg_2, shapely.geometry.Polygon) for arg_2 in arg_0.geoms]))\n return MultiPolygon([Polygon.Func(arg_2, arg_1=arg_1) for arg_2 in arg_0.geoms])\n else:\n raise Exception(\"Unknown datatype '%s'. Expected shapely.geometry.Polygon or \"\n \"shapely.geometry.MultiPolygon or \"\n \"shapely.geometry.collections.GeometryCollection.\" % (type(arg_0),))"} +{"_id": "doc_476", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of unordered intersection points.\n \"\"\"\n if Real is float:\n return list(arg_0.intersections.keys())\n else:\n return [(float(arg_1[0]), float(arg_1[1])) for arg_1 in arg_0.intersections.keys()]"} +{"_id": "doc_477", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"Get predecessor to key, raises KeyError if key is min key\n or key does not exist.\n \"\"\"\n arg_4 = arg_0.prev_item(arg_1, arg_2)\n return arg_2 if arg_4 is arg_2 else arg_4[0]"} +{"_id": "doc_478", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"Get successor to key, raises KeyError if key is max key\n or key does not exist.\n \"\"\"\n arg_4 = arg_0.succ_item(arg_1, arg_2)\n return arg_2 if arg_4 is arg_2 else arg_4[0]"} +{"_id": "doc_479", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Generate 2D OpenSimplex noise from X,Y coordinates.\n \"\"\"\n # Place input coordinates onto grid.\n arg_3 = (arg_1 + arg_2) * STRETCH_CONSTANT_2D\n arg_4 = arg_1 + arg_3\n arg_5 = arg_2 + arg_3\n\n # Floor to get grid coordinates of rhombus (stretched square) super-cell origin.\n arg_6 = floor(arg_4)\n arg_7 = floor(arg_5)\n\n # Skew out to get actual coordinates of rhombus origin. We'll need these later.\n arg_8 = (arg_6 + arg_7) * SQUISH_CONSTANT_2D\n arg_9 = arg_6 + arg_8\n arg_10 = arg_7 + arg_8\n\n # Compute grid coordinates relative to rhombus origin.\n arg_11 = arg_4 - arg_6\n arg_12 = arg_5 - arg_7\n\n # Sum those together to get a value that determines which region we're in.\n arg_13 = arg_11 + arg_12\n\n # Positions relative to origin point.\n arg_14 = arg_1 - arg_9\n arg_15 = arg_2 - arg_10\n\n arg_16 = 0\n\n # Contribution (1,0)\n arg_17 = arg_14 - 1 - SQUISH_CONSTANT_2D\n arg_18 = arg_15 - 0 - SQUISH_CONSTANT_2D\n arg_19 = 2 - arg_17 * arg_17 - arg_18 * arg_18\n arg_20 = arg_0._extrapolate2d\n if arg_19 > 0:\n arg_19 *= arg_19\n arg_16 += arg_19 * arg_19 * arg_20(arg_6 + 1, arg_7 + 0, arg_17, arg_18)\n\n # Contribution (0,1)\n arg_21 = arg_14 - 0 - SQUISH_CONSTANT_2D\n arg_22 = arg_15 - 1 - SQUISH_CONSTANT_2D\n arg_23 = 2 - arg_21 * arg_21 - arg_22 * arg_22\n if arg_23 > 0:\n arg_23 *= arg_23\n arg_16 += arg_23 * arg_23 * arg_20(arg_6 + 0, arg_7 + 1, arg_21, arg_22)\n\n if arg_13 <= 1: # We're inside the triangle (2-Simplex) at (0,0)\n arg_24 = 1 - arg_13\n if arg_24 > arg_11 or arg_24 > arg_12: # (0,0) is one of the closest two triangular vertices\n if arg_11 > arg_12:\n arg_25 = arg_6 + 1\n arg_26 = arg_7 - 1\n arg_27 = arg_14 - 1\n arg_28 = arg_15 + 1\n else:\n arg_25 = arg_6 - 1\n arg_26 = arg_7 + 1\n arg_27 = arg_14 + 1\n arg_28 = arg_15 - 1\n else: # (1,0) and (0,1) are the closest two vertices.\n arg_25 = arg_6 + 1\n arg_26 = arg_7 + 1\n arg_27 = arg_14 - 1 - 2 * SQUISH_CONSTANT_2D\n arg_28 = arg_15 - 1 - 2 * SQUISH_CONSTANT_2D\n else: # We're inside the triangle (2-Simplex) at (1,1)\n arg_24 = 2 - arg_13\n if arg_24 < arg_11 or arg_24 < arg_12: # (0,0) is one of the closest two triangular vertices\n if arg_11 > arg_12:\n arg_25 = arg_6 + 2\n arg_26 = arg_7 + 0\n arg_27 = arg_14 - 2 - 2 * SQUISH_CONSTANT_2D\n arg_28 = arg_15 + 0 - 2 * SQUISH_CONSTANT_2D\n else:\n arg_25 = arg_6 + 0\n arg_26 = arg_7 + 2\n arg_27 = arg_14 + 0 - 2 * SQUISH_CONSTANT_2D\n arg_28 = arg_15 - 2 - 2 * SQUISH_CONSTANT_2D\n else: # (1,0) and (0,1) are the closest two vertices.\n arg_27 = arg_14\n arg_28 = arg_15\n arg_25 = arg_6\n arg_26 = arg_7\n arg_6 += 1\n arg_7 += 1\n arg_14 = arg_14 - 1 - 2 * SQUISH_CONSTANT_2D\n arg_15 = arg_15 - 1 - 2 * SQUISH_CONSTANT_2D\n\n # Contribution (0,0) or (1,1)\n arg_29 = 2 - arg_14 * arg_14 - arg_15 * arg_15\n if arg_29 > 0:\n arg_29 *= arg_29\n arg_16 += arg_29 * arg_29 * arg_20(arg_6, arg_7, arg_14, arg_15)\n\n # Extra Vertex\n arg_30 = 2 - arg_27 * arg_27 - arg_28 * arg_28\n if arg_30 > 0:\n arg_30 *= arg_30\n arg_16 += arg_30 * arg_30 * arg_20(arg_25, arg_26, arg_27, arg_28)\n\n return arg_16 / NORM_CONSTANT_2D"} +{"_id": "doc_480", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the Func of a bounding box encapsulating the line.\"\"\"\n if len(arg_0.coords) <= 1:\n return 0\n return np.max(arg_0.yy) - np.min(arg_0.yy)"} +{"_id": "doc_481", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the Func of a bounding box encapsulating the line.\"\"\"\n if len(arg_0.coords) <= 1:\n return 0\n return np.max(arg_0.xx) - np.min(arg_0.xx)"} +{"_id": "doc_482", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get for each point whether it is inside of the given image plane.\n\n Parameters\n ----------\n image : ndarray or tuple of int\n Either an image with shape ``(H,W,[C])`` or a tuple denoting\n such an image shape.\n\n Returns\n -------\n ndarray\n Boolean array with one value per point indicating whether it is\n inside of the provided image plane (``True``) or not (``False``).\n\n \"\"\"\n if len(arg_0.coords) == 0:\n return np.zeros((0,), dtype=bool)\n arg_2 = normalize_shape(arg_1)\n arg_3, arg_4 = arg_2[0:2]\n arg_5 = np.logical_and(0 <= arg_0.xx, arg_0.xx < arg_4)\n arg_6 = np.logical_and(0 <= arg_0.yy, arg_0.yy < arg_3)\n return np.logical_and(arg_5, arg_6)"} +{"_id": "doc_483", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the euclidean distance between each two consecutive points.\n\n Returns\n -------\n ndarray\n Euclidean distances between point pairs.\n Same order as in `coords`. For ``N`` points, ``N-1`` distances\n are returned.\n\n \"\"\"\n if len(arg_0.coords) <= 1:\n return np.zeros((0,), dtype=np.float32)\n return np.sqrt(\n np.sum(\n (arg_0.coords[:-1, :] - arg_0.coords[1:, :]) ** 2,\n axis=1\n )\n )"} +{"_id": "doc_484", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Compute the minimal distance between the line string and `other`.\n\n Parameters\n ----------\n other : tuple of number \\\n or imgaug.augmentables.kps.Keypoint \\\n or imgaug.augmentables.LineString\n Other object to which to compute the distance.\n\n default\n Value to return if this line string or `other` contain no points.\n\n Returns\n -------\n float\n Distance to `other` or `default` if not distance could be computed.\n\n \"\"\"\n # FIXME this computes distance pointwise, does not have to be identical\n # with the actual min distance (e.g. edge center to other's point)\n arg_3 = arg_0.compute_pointwise_distances(arg_1, arg_2=[])\n if len(arg_3) == 0:\n return arg_2\n return min(arg_3)"} +{"_id": "doc_485", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Project the line string onto a differently shaped image.\n\n E.g. if a point of the line string is on its original image at\n ``x=(10 of 100 pixels)`` and ``y=(20 of 100 pixels)`` and is Funced\n onto a new image with size ``(width=200, height=200)``, its new\n position will be ``(x=20, y=40)``.\n\n This is intended for cases where the original image is resized.\n It cannot be used for more complex changes (e.g. padding, cropping).\n\n Parameters\n ----------\n from_shape : tuple of int or ndarray\n Shape of the original image. (Before resize.)\n\n to_shape : tuple of int or ndarray\n Shape of the new image. (After resize.)\n\n Returns\n -------\n out : imgaug.augmentables.lines.LineString\n Line string with new coordinates.\n\n \"\"\"\n arg_3 = Func_coords(arg_0.coords, arg_1, arg_2)\n return arg_0.copy(coords=arg_3)"} +{"_id": "doc_486", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Estimate whether the line string is fully inside the image area.\n\n Parameters\n ----------\n image : ndarray or tuple of int\n Either an image with shape ``(H,W,[C])`` or a tuple denoting\n such an image shape.\n\n default\n Default value to return if the line string contains no points.\n\n Returns\n -------\n bool\n True if the line string is fully inside the image area.\n False otherwise.\n\n \"\"\"\n if len(arg_0.coords) == 0:\n return arg_2\n return np.all(arg_0.get_pointwise_inside_image_mask(arg_1))"} +{"_id": "doc_487", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1.0,\n arg_3=1, arg_4=True,\n arg_5=False):\n \"\"\"\n Draw the line segments of the line string as a heatmap array.\n\n Parameters\n ----------\n image_shape : tuple of int\n The shape of the image onto which to draw the line mask.\n\n alpha : float, optional\n Opacity of the line string. Higher values denote a more visible\n line string.\n\n size : int, optional\n Thickness of the line segments.\n\n antialiased : bool, optional\n Whether to draw the line with anti-aliasing activated.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n ndarray\n Float array of shape `image_shape` (no channel axis) with drawn\n line string. All values are in the interval ``[0.0, 1.0]``.\n\n \"\"\"\n assert len(arg_1) == 2 or (\n len(arg_1) == 3 and arg_1[-1] == 1), (\n \"Expected (H,W) or (H,W,1) as image_shape, got %s.\" % (\n arg_1,))\n\n arg_6 = arg_0.draw_lines_on_image(\n np.zeros(arg_1, dtype=np.uint8),\n color=255, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )\n return arg_6.astype(np.float32) / 255.0"} +{"_id": "doc_488", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1.0,\n arg_3=1, arg_4=False):\n \"\"\"\n Draw the points of the line string as a heatmap array.\n\n Parameters\n ----------\n image_shape : tuple of int\n The shape of the image onto which to draw the point mask.\n\n alpha : float, optional\n Opacity of the line string points. Higher values denote a more\n visible points.\n\n size : int, optional\n Size of the points in pixels.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n ndarray\n Float array of shape `image_shape` (no channel axis) with drawn\n line string points. All values are in the interval ``[0.0, 1.0]``.\n\n \"\"\"\n assert len(arg_1) == 2 or (\n len(arg_1) == 3 and arg_1[-1] == 1), (\n \"Expected (H,W) or (H,W,1) as image_shape, got %s.\" % (\n arg_1,))\n\n arg_5 = arg_0.draw_points_on_image(\n np.zeros(arg_1, dtype=np.uint8),\n color=255, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4\n )\n return arg_5.astype(np.float32) / 255.0"} +{"_id": "doc_489", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1.0, arg_3=1.0,\n arg_4=1, arg_5=0, arg_6=True,\n arg_7=False):\n \"\"\"\n Draw the line segments and points of the line string as a heatmap array.\n\n Parameters\n ----------\n image_shape : tuple of int\n The shape of the image onto which to draw the line mask.\n\n alpha_lines : float, optional\n Opacity of the line string. Higher values denote a more visible\n line string.\n\n alpha_points : float, optional\n Opacity of the line string points. Higher values denote a more\n visible points.\n\n size_lines : int, optional\n Thickness of the line segments.\n\n size_points : int, optional\n Size of the points in pixels.\n\n antialiased : bool, optional\n Whether to draw the line with anti-aliasing activated.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n ndarray\n Float array of shape `image_shape` (no channel axis) with drawn\n line segments and points. All values are in the\n interval ``[0.0, 1.0]``.\n\n \"\"\"\n arg_8 = arg_0.draw_lines_heatmap_array(\n arg_1,\n alpha=arg_2,\n size=arg_4,\n arg_6=arg_6,\n arg_7=arg_7)\n if arg_5 <= 0:\n return arg_8\n\n arg_9 = arg_0.draw_points_heatmap_array(\n arg_1,\n alpha=arg_3,\n size=arg_5,\n arg_7=arg_7)\n\n arg_10 = np.dstack([arg_8, arg_9])\n return np.max(arg_10, axis=2)"} +{"_id": "doc_490", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=(0, 255, 0), arg_3=None, arg_4=None,\n arg_5=1.0, arg_6=None, arg_7=None,\n arg_8=1, arg_9=None, arg_10=None,\n arg_11=True,\n arg_12=False):\n \"\"\"\n Draw the line string on an image.\n\n Parameters\n ----------\n image : ndarray\n The `(H,W,C)` `uint8` image onto which to draw the line string.\n\n color : iterable of int, optional\n Color to use as RGB, i.e. three values.\n The color of the line and points are derived from this value,\n unless they are set.\n\n color_lines : None or iterable of int\n Color to use for the line segments as RGB, i.e. three values.\n If ``None``, this value is derived from `color`.\n\n color_points : None or iterable of int\n Color to use for the points as RGB, i.e. three values.\n If ``None``, this value is derived from ``0.5 * color``.\n\n alpha : float, optional\n Opacity of the line string. Higher values denote more visible\n points.\n The alphas of the line and points are derived from this value,\n unless they are set.\n\n alpha_lines : None or float, optional\n Opacity of the line string. Higher values denote more visible\n line string.\n If ``None``, this value is derived from `alpha`.\n\n alpha_points : None or float, optional\n Opacity of the line string points. Higher values denote more\n visible points.\n If ``None``, this value is derived from `alpha`.\n\n size : int, optional\n Size of the line string.\n The sizes of the line and points are derived from this value,\n unless they are set.\n\n size_lines : None or int, optional\n Thickness of the line segments.\n If ``None``, this value is derived from `size`.\n\n size_points : None or int, optional\n Size of the points in pixels.\n If ``None``, this value is derived from ``3 * size``.\n\n antialiased : bool, optional\n Whether to draw the line with anti-aliasing activated.\n This does currently not affect the point drawing.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n ndarray\n Image with line string drawn on it.\n\n \"\"\"\n assert arg_2 is not None\n assert arg_5 is not None\n assert arg_8 is not None\n\n arg_3 = arg_3 if arg_3 is not None \\\n else np.float32(arg_2)\n arg_4 = arg_4 if arg_4 is not None \\\n else np.float32(arg_2) * 0.5\n\n arg_6 = arg_6 if arg_6 is not None \\\n else np.float32(arg_5)\n arg_7 = arg_7 if arg_7 is not None \\\n else np.float32(arg_5)\n\n arg_9 = arg_9 if arg_9 is not None else arg_8\n arg_10 = arg_10 if arg_10 is not None else arg_8 * 3\n\n arg_1 = arg_0.draw_lines_on_image(\n arg_1, arg_2=np.array(arg_3).astype(np.uint8),\n arg_5=arg_6, arg_8=arg_9,\n arg_11=arg_11,\n arg_12=arg_12)\n\n arg_1 = arg_0.draw_points_on_image(\n arg_1, arg_2=np.array(arg_4).astype(np.uint8),\n arg_5=arg_7, arg_8=arg_10,\n copy=False,\n arg_12=arg_12)\n\n return arg_1"} +{"_id": "doc_491", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=True, arg_4=None,\n arg_5=True, arg_6=True):\n \"\"\"\n Extract the image pixels covered by the line string.\n\n It will only extract pixels overlapped by the line string.\n\n This function will by default zero-pad the image if the line string is\n partially/fully outside of the image. This is for consistency with\n the same implementations for bounding boxes and polygons.\n\n Parameters\n ----------\n image : ndarray\n The image of shape `(H,W,[C])` from which to extract the pixels\n within the line string.\n\n size : int, optional\n Thickness of the line.\n\n pad : bool, optional\n Whether to zero-pad the image if the object is partially/fully\n outside of it.\n\n pad_max : None or int, optional\n The maximum number of pixels that may be zero-paded on any side,\n i.e. if this has value ``N`` the total maximum of added pixels\n is ``4*N``.\n This option exists to prevent extremely large images as a result of\n single points being moved very far away during augmentation.\n\n antialiased : bool, optional\n Whether to apply anti-aliasing to the line string.\n\n prevent_zero_size : bool, optional\n Whether to prevent height or width of the extracted image from\n becoming zero. If this is set to True and height or width of the\n line string is below 1, the height/width will be increased to 1.\n This can be useful to prevent problems, e.g. with image saving or\n plotting. If it is set to False, images will be returned as\n ``(H', W')`` or ``(H', W', 3)`` with ``H`` or ``W`` potentially\n being 0.\n\n Returns\n -------\n image : (H',W') ndarray or (H',W',C) ndarray\n Pixels overlapping with the line string. Zero-padded if the\n line string is partially/fully outside of the image and\n ``pad=True``. If `prevent_zero_size` is activated, it is\n guarantueed that ``H'>0`` and ``W'>0``, otherwise only\n ``H'>=0`` and ``W'>=0``.\n\n \"\"\"\n from .bbs import BoundingBox\n\n assert arg_1.ndim in [2, 3], (\n \"Expected image of shape (H,W,[C]), \"\n \"got shape %s.\" % (arg_1.shape,))\n\n if len(arg_0.coords) == 0 or arg_2 <= 0:\n if arg_6:\n return np.zeros((1, 1) + arg_1.shape[2:], dtype=arg_1.dtype)\n return np.zeros((0, 0) + arg_1.shape[2:], dtype=arg_1.dtype)\n\n arg_7 = arg_0.xx_int\n arg_8 = arg_0.yy_int\n\n # this would probably work if drawing was subpixel-accurate\n # x1 = np.min(self.coords[:, 0]) - (size / 2)\n # y1 = np.min(self.coords[:, 1]) - (size / 2)\n # x2 = np.max(self.coords[:, 0]) + (size / 2)\n # y2 = np.max(self.coords[:, 1]) + (size / 2)\n\n # this works currently with non-subpixel-accurate drawing\n arg_9 = (arg_2 - 1) / 2\n arg_10 = np.min(arg_7) - arg_9\n arg_11 = np.min(arg_8) - arg_9\n arg_12 = np.max(arg_7) + 1 + arg_9\n arg_13 = np.max(arg_8) + 1 + arg_9\n arg_14 = BoundingBox(arg_10=arg_10, arg_11=arg_11, arg_12=arg_12, arg_13=arg_13)\n\n if len(arg_0.coords) == 1:\n return arg_14.Func(arg_1, arg_3=arg_3, arg_4=arg_4,\n arg_6=arg_6)\n\n arg_15 = arg_0.draw_lines_heatmap_array(\n arg_1.shape[0:2], alpha=1.0, arg_2=arg_2, arg_5=arg_5)\n if arg_1.ndim == 3:\n arg_15 = np.atleast_3d(arg_15)\n arg_16 = arg_1.astype(np.float32) * arg_15\n arg_17 = arg_14.Func(arg_16, arg_3=arg_3, arg_4=arg_4,\n arg_6=arg_6)\n return np.clip(np.round(arg_17), 0, 255).astype(np.uint8)"} +{"_id": "doc_492", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Concatenate this line string with another one.\n\n This will add a line segment between the end point of this line string\n and the start point of `other`.\n\n Parameters\n ----------\n other : imgaug.augmentables.lines.LineString or ndarray \\\n or iterable of tuple of number\n The points to add to this line string.\n\n Returns\n -------\n imgaug.augmentables.lines.LineString\n New line string with Funcd points.\n The `label` of this line string will be kept.\n\n \"\"\"\n if not isinstance(arg_1, LineString):\n arg_1 = LineString(arg_1)\n return arg_0.deepcopy(\n coords=np.Func([arg_0.coords, arg_1.coords], axis=0))"} +{"_id": "doc_493", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generate a bounding box encapsulating the line string.\n\n Returns\n -------\n None or imgaug.augmentables.bbs.BoundingBox\n Bounding box encapsulating the line string.\n ``None`` if the line string contained no points.\n\n \"\"\"\n from .bbs import BoundingBox\n # we don't have to mind the case of len(.) == 1 here, because\n # zero-sized BBs are considered valid\n if len(arg_0.coords) == 0:\n return None\n return BoundingBox(x1=np.min(arg_0.xx), y1=np.min(arg_0.yy),\n x2=np.max(arg_0.xx), y2=np.max(arg_0.yy),\n label=arg_0.label)"} +{"_id": "doc_494", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=0,\n arg_4=True, arg_5=False):\n \"\"\"\n Generate a heatmap object from the line string.\n\n This is similar to\n :func:`imgaug.augmentables.lines.LineString.draw_lines_heatmap_array`\n executed with ``alpha=1.0``. The result is wrapped in a\n ``HeatmapsOnImage`` object instead of just an array.\n No points are drawn.\n\n Parameters\n ----------\n image_shape : tuple of int\n The shape of the image onto which to draw the line mask.\n\n size_lines : int, optional\n Thickness of the line.\n\n size_points : int, optional\n Size of the points in pixels.\n\n antialiased : bool, optional\n Whether to draw the line with anti-aliasing activated.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n imgaug.augmentables.heatmaps.HeatmapOnImage\n Heatmap object containing drawn line string.\n\n \"\"\"\n from .heatmaps import HeatmapsOnImage\n return HeatmapsOnImage(\n arg_0.draw_heatmap_array(\n arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5),\n shape=arg_1\n )"} +{"_id": "doc_495", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=0,\n arg_4=False):\n \"\"\"\n Generate a segmentation map object from the line string.\n\n This is similar to\n :func:`imgaug.augmentables.lines.LineString.draw_mask`.\n The result is wrapped in a ``SegmentationMapOnImage`` object\n instead of just an array.\n\n Parameters\n ----------\n image_shape : tuple of int\n The shape of the image onto which to draw the line mask.\n\n size_lines : int, optional\n Thickness of the line.\n\n size_points : int, optional\n Size of the points in pixels.\n\n raise_if_out_of_image : bool, optional\n Whether to raise an error if the line string is fully\n outside of the image. If set to False, no error will be raised and\n only the parts inside the image will be drawn.\n\n Returns\n -------\n imgaug.augmentables.segmaps.SegmentationMapOnImage\n Segmentation map object containing drawn line string.\n\n \"\"\"\n from .segmaps import SegmentationMapOnImage\n return SegmentationMapOnImage(\n arg_0.draw_mask(\n arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4),\n shape=arg_1\n )"} +{"_id": "doc_496", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1e-6, arg_3=8):\n \"\"\"\n Compare this and another LineString's coordinates.\n\n This is an approximate method based on pointwise distances and can\n in rare corner cases produce wrong outputs.\n\n Parameters\n ----------\n other : imgaug.augmentables.lines.LineString \\\n or tuple of number \\\n or ndarray \\\n or list of ndarray \\\n or list of tuple of number\n The other line string or its coordinates.\n\n max_distance : float\n Max distance of any point from the other line string before\n the two line strings are evaluated to be unequal.\n\n points_per_edge : int, optional\n How many points to interpolate on each edge.\n\n Returns\n -------\n bool\n Whether the two LineString's coordinates are almost identical,\n i.e. the max distance is below the threshold.\n If both have no coordinates, ``True`` is returned.\n If only one has no coordinates, ``False`` is returned.\n Beyond that, the number of points is not evaluated.\n\n \"\"\"\n if isinstance(arg_1, LineString):\n pass\n elif isinstance(arg_1, tuple):\n arg_1 = LineString([arg_1])\n else:\n arg_1 = LineString(arg_1)\n\n if len(arg_0.coords) == 0 and len(arg_1.coords) == 0:\n return True\n elif 0 in [len(arg_0.coords), len(arg_1.coords)]:\n # only one of the two line strings has no coords\n return False\n\n arg_4 = arg_0.subdivide(arg_3)\n arg_5 = arg_1.subdivide(arg_3)\n\n arg_6 = arg_4.compute_pointwise_distances(arg_5)\n arg_7 = arg_5.compute_pointwise_distances(arg_4)\n arg_8 = max(np.max(arg_6), np.max(arg_7))\n return arg_8 < arg_2"} +{"_id": "doc_497", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1e-4, arg_3=8):\n \"\"\"\n Compare this and another LineString.\n\n Parameters\n ----------\n other: imgaug.augmentables.lines.LineString\n The other line string. Must be a LineString instance, not just\n its coordinates.\n\n max_distance : float, optional\n See :func:`imgaug.augmentables.lines.LineString.coords_Func`.\n\n points_per_edge : int, optional\n See :func:`imgaug.augmentables.lines.LineString.coords_Func`.\n\n Returns\n -------\n bool\n ``True`` if the coordinates are almost equal according to\n :func:`imgaug.augmentables.lines.LineString.coords_Func`\n and additionally the labels are identical. Otherwise ``False``.\n\n \"\"\"\n if arg_0.label != arg_1.label:\n return False\n return arg_0.coords_Func(\n arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_498", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a shallow Func of the LineString object.\n\n Parameters\n ----------\n coords : None or iterable of tuple of number or ndarray\n If not ``None``, then the coords of the copied object will be set\n to this value.\n\n label : None or str\n If not ``None``, then the label of the copied object will be set to\n this value.\n\n Returns\n -------\n imgaug.augmentables.lines.LineString\n Shallow Func.\n\n \"\"\"\n return LineString(arg_1=arg_0.coords if arg_1 is None else arg_1,\n arg_2=arg_0.label if arg_2 is None else arg_2)"} +{"_id": "doc_499", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clip off all parts of the line strings that are outside of the image.\n\n Returns\n -------\n imgaug.augmentables.lines.LineStringsOnImage\n Line strings, clipped to fall within the image dimensions.\n\n \"\"\"\n arg_1 = [ls_clipped\n for ls in arg_0.line_strings\n for ls_clipped in ls.Func(arg_0.shape)]\n return LineStringsOnImage(arg_1, shape=arg_0.shape)"} +{"_id": "doc_500", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a shallow Func of the LineStringsOnImage object.\n\n Parameters\n ----------\n line_strings : None \\\n or list of imgaug.augmentables.lines.LineString, optional\n List of line strings on the image.\n If not ``None``, then the ``line_strings`` attribute of the copied\n object will be set to this value.\n\n shape : None or tuple of int or ndarray, optional\n The shape of the image on which the objects are placed.\n Either an image with shape ``(H,W,[C])`` or a tuple denoting\n such an image shape.\n If not ``None``, then the ``shape`` attribute of the copied object\n will be set to this value.\n\n Returns\n -------\n imgaug.augmentables.lines.LineStringsOnImage\n Shallow Func.\n\n \"\"\"\n arg_3 = arg_0.line_strings if arg_1 is None else arg_1\n arg_2 = arg_0.shape if arg_2 is None else arg_2\n return LineStringsOnImage(arg_1=arg_3, arg_2=arg_2)"} +{"_id": "doc_501", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Create a deep copy of the LineStringsOnImage object.\n\n Parameters\n ----------\n line_strings : None \\\n or list of imgaug.augmentables.lines.LineString, optional\n List of line strings on the image.\n If not ``None``, then the ``line_strings`` attribute of the copied\n object will be set to this value.\n\n shape : None or tuple of int or ndarray, optional\n The shape of the image on which the objects are placed.\n Either an image with shape ``(H,W,[C])`` or a tuple denoting\n such an image shape.\n If not ``None``, then the ``shape`` attribute of the copied object\n will be set to this value.\n\n Returns\n -------\n imgaug.augmentables.lines.LineStringsOnImage\n Deep copy.\n\n \"\"\"\n arg_3 = arg_0.line_strings if arg_1 is None else arg_1\n arg_2 = arg_0.shape if arg_2 is None else arg_2\n return LineStringsOnImage(\n arg_1=[arg_4.Func() for arg_4 in arg_3],\n arg_2=tuple(arg_2))"} +{"_id": "doc_502", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1e-2):\n \"\"\"\n Blend two images using an alpha blending.\n\n In an alpha blending, the two images are naively mixed. Let ``A`` be the foreground image\n and ``B`` the background image and ``a`` is the alpha value. Each pixel intensity is then\n computed as ``a * A_ij + (1-a) * B_ij``.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; fully tested\n * ``uint32``: yes; fully tested\n * ``uint64``: yes; fully tested (1)\n * ``int8``: yes; fully tested\n * ``int16``: yes; fully tested\n * ``int32``: yes; fully tested\n * ``int64``: yes; fully tested (1)\n * ``float16``: yes; fully tested\n * ``float32``: yes; fully tested\n * ``float64``: yes; fully tested (1)\n * ``float128``: no (2)\n * ``bool``: yes; fully tested (2)\n\n - (1) Tests show that these dtypes work, but a conversion to float128 happens, which only\n has 96 bits of size instead of true 128 bits and hence not twice as much resolution.\n It is possible that these dtypes result in inaccuracies, though the tests did not\n indicate that.\n - (2) Not available due to the input dtype having to be increased to an equivalent float\n dtype with two times the input resolution.\n - (3) Mapped internally to ``float16``.\n\n Parameters\n ----------\n image_fg : (H,W,[C]) ndarray\n Foreground image. Shape and dtype kind must match the one of the\n background image.\n\n image_bg : (H,W,[C]) ndarray\n Background image. Shape and dtype kind must match the one of the\n foreground image.\n\n alpha : number or iterable of number or ndarray\n The blending factor, between 0.0 and 1.0. Can be interpreted as the opacity of the\n foreground image. Values around 1.0 result in only the foreground image being visible.\n Values around 0.0 result in only the background image being visible.\n Multiple alphas may be provided. In these cases, there must be exactly one alpha per\n channel in the foreground/background image. Alternatively, for ``(H,W,C)`` images,\n either one ``(H,W)`` array or an ``(H,W,C)`` array of alphas may be provided,\n denoting the elementwise alpha value.\n\n eps : number, optional\n Controls when an alpha is to be interpreted as exactly 1.0 or exactly 0.0, resulting\n in only the foreground/background being visible and skipping the actual computation.\n\n Returns\n -------\n image_blend : (H,W,C) ndarray\n Blend of foreground and background image.\n\n \"\"\"\n assert arg_0.shape == arg_1.shape\n assert arg_0.dtype.kind == arg_1.dtype.kind\n # TODO switch to gate_dtypes()\n assert arg_0.dtype.name not in [\"float128\"]\n assert arg_1.dtype.name not in [\"float128\"]\n\n # TODO add test for this\n arg_4 = (len(arg_0.shape) == 2)\n if arg_4:\n arg_0 = np.atleast_3d(arg_0)\n arg_1 = np.atleast_3d(arg_1)\n\n arg_5 = False\n if arg_0.dtype.kind == \"b\":\n arg_5 = True\n # use float32 instead of float16 here because it seems to be faster\n arg_0 = arg_0.astype(np.float32)\n arg_1 = arg_1.astype(np.float32)\n\n arg_2 = np.array(arg_2, dtype=np.float64)\n if arg_2.size == 1:\n pass\n else:\n if arg_2.ndim == 2:\n assert arg_2.shape == arg_0.shape[0:2]\n arg_2 = arg_2.reshape((arg_2.shape[0], arg_2.shape[1], 1))\n elif arg_2.ndim == 3:\n assert arg_2.shape == arg_0.shape or arg_2.shape == arg_0.shape[0:2] + (1,)\n else:\n arg_2 = arg_2.reshape((1, 1, -1))\n if arg_2.shape[2] != arg_0.shape[2]:\n arg_2 = np.tile(arg_2, (1, 1, arg_0.shape[2]))\n\n if not arg_5:\n if np.all(arg_2 >= 1.0 - arg_3):\n return np.copy(arg_0)\n elif np.all(arg_2 <= arg_3):\n return np.copy(arg_1)\n\n # for efficiency reaons, only test one value of alpha here, even if alpha is much larger\n assert 0 <= arg_2.item(0) <= 1.0\n\n arg_6 = iadt.get_minimal_dtype([arg_0, arg_1])\n\n # doing this only for non-float images led to inaccuracies for large floats values\n arg_7 = arg_6.itemsize * 2\n arg_7 = max(arg_7, 4) # at least 4 bytes (=float32), tends to be faster than float16\n arg_8 = np.dtype(\"f%d\" % (arg_7,))\n\n if arg_2.dtype != arg_8:\n arg_2 = arg_2.astype(arg_8)\n if arg_0.dtype != arg_8:\n arg_0 = arg_0.astype(arg_8)\n if arg_1.dtype != arg_8:\n arg_1 = arg_1.astype(arg_8)\n\n # the following is equivalent to\n # image_blend = alpha * image_fg + (1 - alpha) * image_bg\n # but supposedly faster\n arg_9 = arg_1 + arg_2 * (arg_0 - arg_1)\n\n if arg_5:\n arg_9 = arg_9 > 0.5\n else:\n # skip clip, because alpha is expected to be in range [0.0, 1.0] and both images must have same dtype\n # dont skip round, because otherwise it is very unlikely to hit the image's max possible value\n arg_9 = iadt.restore_dtypes_(arg_9, arg_6, clip=False, round=True)\n\n if arg_4:\n return arg_9[:, :, 0]\n return arg_9"} +{"_id": "doc_503", "title": "", "text": "def Func(arg_0=5, arg_1=(0, 360), arg_2=(-1.0, 1.0), arg_3=1, arg_4=None, arg_5=False, arg_6=None):\n \"\"\"\n Augmenter that sharpens images and overlays the result with the original image.\n\n dtype support::\n\n See ``imgaug.augmenters.convolutional.Convolve``.\n\n Parameters\n ----------\n k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n Kernel size to use.\n\n * If a single int, then that value will be used for the height\n and width of the kernel.\n * If a tuple of two ints ``(a, b)``, then the kernel size will be\n sampled from the interval ``[a..b]``.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then ``N`` samples will be drawn from\n that parameter per ``N`` input images, each representing the kernel\n size for the nth image.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Angle of the motion blur in degrees (clockwise, relative to top center direction).\n\n * If a number, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards\n the back (with angle provided via `angle`). Higher values towards 1.0 will point the motion blur forward.\n A value of 0.0 leads to a uniformly (but still angled) motion blur.\n\n * If a number, exactly that value will be used.\n * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n be sampled per image.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, a value will be sampled from the\n parameter per image.\n\n order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional\n Interpolation order to use when rotating the kernel according to `angle`.\n See :func:`imgaug.augmenters.geometric.Affine.__init__`.\n Recommended to be ``0`` or ``1``, with ``0`` being faster, but less continuous/smooth as `angle` is changed,\n particularly around multiple of 45 degrees.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(k=15)\n\n Create a motion blur augmenter with kernel size of 15x15.\n\n >>> aug = iaa.Func(k=15, angle=[-45, 45])\n\n Create a motion blur augmenter with kernel size of 15x15 and a blur angle of either -45 or 45 degrees (randomly\n picked per image).\n\n \"\"\"\n # TODO allow (1, None) and set to identity matrix if k == 1\n arg_7 = iap.handle_discrete_param(arg_0, \"k\", value_range=(3, None), tuple_to_uniform=True, list_to_choice=True,\n allow_floats=False)\n arg_8 = iap.handle_continuous_param(arg_1, \"angle\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True)\n arg_9 = iap.handle_continuous_param(arg_2, \"direction\", value_range=(-1.0-1e-6, 1.0+1e-6),\n tuple_to_uniform=True, list_to_choice=True)\n\n def create_matrices(arg_10, arg_11, arg_12):\n # avoid cyclic import between blur and geometric\n from . import geometric as iaa_geometric\n\n # force discrete for k_sample via int() in case of stochastic parameter\n arg_13 = int(arg_7.draw_sample(arg_6=arg_12))\n arg_14 = arg_8.draw_sample(arg_6=arg_12)\n arg_15 = arg_9.draw_sample(arg_6=arg_12)\n\n arg_13 = arg_13 if arg_13 % 2 != 0 else arg_13 + 1\n arg_15 = np.clip(arg_15, -1.0, 1.0)\n arg_15 = (arg_15 + 1.0) / 2.0\n\n arg_16 = np.zeros((arg_13, arg_13), dtype=np.float32)\n arg_16[:, arg_13//2] = np.linspace(float(arg_15), 1.0 - float(arg_15), num=arg_13)\n arg_17 = iaa_geometric.Affine(rotate=arg_14, arg_3=arg_3)\n arg_16 = (arg_17.augment_image((arg_16 * 255).astype(np.uint8)) / 255.0).astype(np.float32)\n\n return [arg_16/np.sum(arg_16)] * arg_11\n\n if arg_4 is None:\n arg_4 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return iaa_convolutional.Convolve(create_matrices, arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6)"} +{"_id": "doc_504", "title": "", "text": "def Func(arg_0=None, arg_1=False, arg_2=None):\n \"\"\"\n Augmenter to draw clouds in images.\n\n This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities\n and frequency patterns of clouds.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n dtype support::\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range of uint8.\n While other dtypes may be accepted, they will lead to images augmented in\n ways inappropriate for the respective dtype.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func()\n\n Creates an augmenter that adds clouds to images.\n\n \"\"\"\n if arg_0 is None:\n arg_0 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return meta.SomeOf((1, 2), children=[\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),\n sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)\n ),\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),\n sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)\n )\n ], random_order=False, arg_0=arg_0, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_505", "title": "", "text": "def Func(arg_0=None, arg_1=False, arg_2=None):\n \"\"\"\n Augmenter to draw fog in images.\n\n This is a wrapper around ``CloudLayer``. It executes a single layer per image with a configuration leading\n to fairly dense clouds with low-frequency patterns.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n dtype support::\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range of uint8.\n While other dtypes may be accepted, they will lead to images augmented in\n ways inappropriate for the respective dtype.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func()\n\n Creates an augmenter that adds fog to images.\n\n \"\"\"\n if arg_0 is None:\n arg_0 = \"Unnamed%s\" % (ia.caller_name(),)\n\n return CloudLayer(\n intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,\n alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),\n sparsity=0.9, density_multiplier=(0.4, 0.9),\n arg_0=arg_0, arg_1=arg_1, arg_2=arg_2\n )"} +{"_id": "doc_506", "title": "", "text": "def Func(arg_0=(0.005, 0.075), arg_1=(0.3, 0.9), arg_2=(0.2, 0.7),\n arg_3=(0.4, 0.8), arg_4=(-30, 30), arg_5=(0.007, 0.03),\n arg_6=None, arg_7=False, arg_8=None):\n \"\"\"\n Augmenter to add falling snowflakes to images.\n\n This is a wrapper around ``FuncLayer``. It executes 1 to 3 layers per image.\n\n dtype support::\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range of uint8.\n While other dtypes may be accepted, they will lead to images augmented in\n ways inappropriate for the respective dtype.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.\n Higher values mean that the resolution is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid value range is ``[0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.1, 0.4)`` worked well.\n * On ``192x256`` a value of ``(0.2, 0.7)`` worked well.\n * On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly\n sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight\n upwards. Recommended to be around ``(-30, 30)``.\n See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.\n It follows roughly the form ``kernel_size = image_size * speed``. Hence,\n Values around ``1.0`` denote that the motion blur should \"stretch\" each snowflake over the whole image.\n\n Valid value range is ``(0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.01, 0.05)`` worked well.\n * On ``192x256`` a value of ``(0.007, 0.03)`` worked well.\n * On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.\n\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Func(flake_size=(0.1, 0.4), speed=(0.01, 0.05))\n\n Adds snowflakes to small images (around ``96x128``).\n\n >>> aug = iaa.Func(flake_size=(0.2, 0.7), speed=(0.007, 0.03))\n\n Adds snowflakes to medium-sized images (around ``192x256``).\n\n >>> aug = iaa.Func(flake_size=(0.7, 0.95), speed=(0.001, 0.03))\n\n Adds snowflakes to large images (around ``960x1280``).\n\n \"\"\"\n if arg_6 is None:\n arg_6 = \"Unnamed%s\" % (ia.caller_name(),)\n\n arg_9 = FuncLayer(\n arg_0=arg_0, arg_1=arg_1,\n arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_5=arg_5,\n blur_sigma_fraction=(0.0001, 0.001)\n )\n\n return meta.SomeOf(\n (1, 3), children=[arg_9.deepcopy() for arg_10 in range(3)],\n random_order=False, arg_6=arg_6, arg_7=arg_7, arg_8=arg_8\n )"} +{"_id": "doc_507", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.75, arg_3=\"segmentation_map\", arg_4=0.01,\n arg_5=None, arg_6=None, arg_7=False):\n \"\"\"\n Draw the segmentation map as an overlay over an image.\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n Image onto which to draw the segmentation map. Dtype is expected to be uint8.\n\n alpha : float, optional\n Alpha/opacity value to use for the mixing of image and segmentation map.\n Higher values mean that the segmentation map will be more visible and the image less visible.\n\n resize : {'segmentation_map', 'image'}, optional\n In case of size differences between the image and segmentation map, either the image or\n the segmentation map can be resized. This parameter controls which of the two will be\n resized to the other's size.\n\n background_threshold : float, optional\n See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.\n\n background_class_id : None or int, optional\n See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.\n\n colors : None or list of tuple of int, optional\n Colors to use. One for each class to draw. If None, then default colors will be used.\n\n draw_background : bool, optional\n If True, the background will be drawn like any other class.\n If False, the background will not be drawn, i.e. the respective background pixels\n will be identical with the image's RGB color at the corresponding spatial location\n and no color overlay will be applied.\n\n Returns\n -------\n mix : (H,W,3) ndarray\n Rendered overlays (dtype is uint8).\n\n \"\"\"\n # assert RGB image\n ia.do_assert(arg_1.ndim == 3)\n ia.do_assert(arg_1.shape[2] == 3)\n ia.do_assert(arg_1.dtype.type == np.uint8)\n\n ia.do_assert(0 - 1e-8 <= arg_2 <= 1.0 + 1e-8)\n ia.do_assert(arg_3 in [\"segmentation_map\", \"image\"])\n\n if arg_3 == \"image\":\n arg_1 = ia.imresize_single_image(arg_1, arg_0.arr.shape[0:2], interpolation=\"cubic\")\n\n arg_8, arg_9 = arg_0.draw(\n arg_4=arg_4,\n arg_5=arg_5,\n size=arg_1.shape[0:2] if arg_3 == \"segmentation_map\" else None,\n arg_6=arg_6,\n return_foreground_mask=True\n )\n\n if arg_7:\n arg_10 = np.clip(\n (1-arg_2) * arg_1 + arg_2 * arg_8,\n 0,\n 255\n ).astype(np.uint8)\n else:\n arg_9 = arg_9[..., np.newaxis]\n arg_10 = np.zeros_like(arg_1)\n arg_10 += (~arg_9).astype(np.uint8) * arg_1\n arg_10 += arg_9.astype(np.uint8) * np.clip(\n (1-arg_2) * arg_1 + arg_2 * arg_8,\n 0,\n 255\n ).astype(np.uint8)\n return arg_10"} +{"_id": "doc_508", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"constant\", arg_3=0.0, arg_4=False):\n \"\"\"\n Pad the segmentation map on its sides so that its matches a target aspect ratio.\n\n Depending on which dimension is smaller (height or width), only the corresponding\n sides (left/right or top/bottom) will be padded. In each case, both of the sides will\n be padded equally.\n\n Parameters\n ----------\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice\n as much width as height.\n\n mode : str, optional\n Padding mode to use. See :func:`numpy.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If False, then only the padded image will be returned. If True, a tuple with two\n entries will be returned, where the first entry is the padded image and the second\n entry are the amounts by which each image side was padded. These amounts are again a\n tuple of the form (top, right, bottom, left), with each value being an integer.\n\n Returns\n -------\n segmap : imgaug.SegmentationMapOnImage\n Padded segmentation map as SegmentationMapOnImage object.\n\n pad_amounts : tuple of int\n Amounts by which the segmentation map was padded on each side, given as a\n tuple ``(top, right, bottom, left)``.\n This tuple is only returned if `return_pad_amounts` was set to True.\n\n \"\"\"\n arg_5, arg_6 = ia.Func(arg_0.arr, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=True)\n arg_7 = SegmentationMapOnImage(arg_5, shape=arg_0.shape)\n arg_7.input_was = arg_0.input_was\n if arg_4:\n return arg_7, arg_6\n else:\n return arg_7"} +{"_id": "doc_509", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"cubic\"):\n \"\"\"\n Resize the segmentation map array to the provided size given the provided interpolation.\n\n Parameters\n ----------\n sizes : float or iterable of int or iterable of float\n New size of the array in ``(height, width)``.\n See :func:`imgaug.imgaug.imFunc_single_image` for details.\n\n interpolation : None or str or int, optional\n The interpolation to use during Func.\n See :func:`imgaug.imgaug.imFunc_single_image` for details.\n Note: The segmentation map is internally stored as multiple float-based heatmaps,\n making smooth interpolations potentially more reasonable than nearest neighbour\n interpolation.\n\n Returns\n -------\n segmap : imgaug.SegmentationMapOnImage\n Resized segmentation map object.\n\n \"\"\"\n arg_3 = ia.imFunc_single_image(arg_0.arr, arg_1, arg_2=arg_2)\n\n # cubic interpolation can lead to values outside of [0.0, 1.0],\n # see https://github.com/opencv/opencv/issues/7195\n # TODO area interpolation too?\n arg_3 = np.clip(arg_3, 0.0, 1.0)\n arg_4 = SegmentationMapOnImage(arg_3, shape=arg_0.shape)\n arg_4.input_was = arg_0.input_was\n return arg_4"} +{"_id": "doc_510", "title": "", "text": "def Func(arg_0, arg_1, arg_2: arg_3):\n \"\"\"\n Offer a new event ``s`` at point ``p`` in this queue.\n \"\"\"\n arg_4 = arg_0.events_scan.setdefault(\n arg_1, ([], [], [], []) if USE_VERTICAL else\n ([], [], []))\n # Can use double linked-list for easy insertion at beginning/end\n '''\n if e.type == Event.Type.END:\n existing.insert(0, e)\n else:\n existing.append(e)\n '''\n\n arg_4[arg_2.type].append(arg_2)"} +{"_id": "doc_511", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=\"jet\"):\n \"\"\"\n Render the heatmaps as RGB images.\n\n Parameters\n ----------\n size : None or float or iterable of int or iterable of float, optional\n Size of the rendered RGB image as ``(height, width)``.\n See :func:`imgaug.imgaug.imresize_single_image` for details.\n If set to None, no resizing is performed and the size of the heatmaps array is used.\n\n cmap : str or None, optional\n Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.\n If set to None, no color map will be used and the heatmaps will be converted\n to simple intensity maps.\n\n Returns\n -------\n heatmaps_Funcn : list of (H,W,3) ndarray\n Rendered heatmaps. One per heatmap array channel. Dtype is uint8.\n\n \"\"\"\n arg_3 = arg_0.to_uint8()\n arg_4 = []\n\n for arg_5 in sm.xrange(arg_3.shape[2]):\n # c:c+1 here, because the additional axis is needed by imresize_single_image\n arg_6 = arg_3[..., arg_5:arg_5+1]\n\n if arg_1 is not None:\n arg_7 = ia.imresize_single_image(arg_6, arg_1, interpolation=\"nearest\")\n else:\n arg_7 = arg_6\n arg_7 = np.squeeze(arg_7).astype(np.float32) / 255.0\n\n if arg_2 is not None:\n # import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)\n import matplotlib.pyplot as plt\n\n arg_8 = plt.get_cmap(arg_2)\n arg_9 = arg_8(arg_7)\n arg_9 = np.delete(arg_9, 3, 2)\n else:\n arg_9 = np.tile(arg_7[..., np.newaxis], (1, 1, 3))\n\n arg_9 = np.clip(arg_9 * 255, 0, 255).astype(np.uint8)\n\n arg_4.append(arg_9)\n return arg_4"} +{"_id": "doc_512", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.75, arg_3=\"jet\", arg_4=\"heatmaps\"):\n \"\"\"\n Draw the heatmaps as overlays over an image.\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n Image onto which to draw the heatmaps. Expected to be of dtype uint8.\n\n alpha : float, optional\n Alpha/opacity value to use for the mixing of image and heatmaps.\n Higher values mean that the heatmaps will be more visible and the image less visible.\n\n cmap : str or None, optional\n Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.\n\n resize : {'heatmaps', 'image'}, optional\n In case of size differences between the image and heatmaps, either the image or\n the heatmaps can be resized. This parameter controls which of the two will be resized\n to the other's size.\n\n Returns\n -------\n mix : list of (H,W,3) ndarray\n Rendered overlays. One per heatmap array channel. Dtype is uint8.\n\n \"\"\"\n # assert RGB image\n ia.do_assert(arg_1.ndim == 3)\n ia.do_assert(arg_1.shape[2] == 3)\n ia.do_assert(arg_1.dtype.type == np.uint8)\n\n ia.do_assert(0 - 1e-8 <= arg_2 <= 1.0 + 1e-8)\n ia.do_assert(arg_4 in [\"heatmaps\", \"image\"])\n\n if arg_4 == \"image\":\n arg_1 = ia.imresize_single_image(arg_1, arg_0.arr_0to1.shape[0:2], interpolation=\"cubic\")\n\n arg_5 = arg_0.draw(\n size=arg_1.shape[0:2] if arg_4 == \"heatmaps\" else None,\n arg_3=arg_3\n )\n\n arg_6 = [\n np.clip((1-arg_2) * arg_1 + arg_2 * heatmap_i, 0, 255).astype(np.uint8)\n for heatmap_i\n in arg_5\n ]\n\n return arg_6"} +{"_id": "doc_513", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"constant\", arg_3=0.0, arg_4=False):\n \"\"\"\n Pad the heatmaps on their sides so that they match a target aspect ratio.\n\n Depending on which dimension is smaller (height or width), only the corresponding\n sides (left/right or top/bottom) will be padded. In each case, both of the sides will\n be padded equally.\n\n Parameters\n ----------\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice\n as much width as height.\n\n mode : str, optional\n Padding mode to use. See :func:`numpy.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If False, then only the padded image will be returned. If True, a tuple with two\n entries will be returned, where the first entry is the padded image and the second\n entry are the amounts by which each image side was padded. These amounts are again a\n tuple of the form (top, right, bottom, left), with each value being an integer.\n\n Returns\n -------\n heatmaps : imgaug.HeatmapsOnImage\n Padded heatmaps as HeatmapsOnImage object.\n\n pad_amounts : tuple of int\n Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.\n This tuple is only returned if `return_pad_amounts` was set to True.\n\n \"\"\"\n arg_5, arg_6 = ia.Func(arg_0.arr_0to1, arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_4=True)\n arg_7 = HeatmapsOnImage.from_0to1(arg_5, shape=arg_0.shape, min_value=arg_0.min_value,\n max_value=arg_0.max_value)\n if arg_4:\n return arg_7, arg_6\n else:\n return arg_7"} +{"_id": "doc_514", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.0, arg_3=1.0):\n \"\"\"\n Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.\n\n Parameters\n ----------\n arr_0to1 : (H,W) or (H,W,C) ndarray\n Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.\n Expected dtype is float32.\n\n shape : tuple of ints\n Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the\n heatmap(s) array, unless it is identical to the image shape (note the likely\n difference between the arrays in the number of channels).\n If there is not a corresponding image, use the shape of the heatmaps array.\n\n min_value : float, optional\n Minimum value for the heatmaps that the 0-to-1 array represents. This will usually\n be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the\n underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.\n E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these\n to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``\n so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value\n range (-1.0, 1.0).\n\n max_value : float, optional\n Maximum value for the heatmaps that to 0-to-255 array represents.\n See parameter min_value for details.\n\n Returns\n -------\n heatmaps : imgaug.HeatmapsOnImage\n Heatmaps object.\n\n \"\"\"\n arg_4 = HeatmapsOnImage(arg_0, arg_1, arg_2=0.0, arg_3=1.0)\n arg_4.min_value = arg_2\n arg_4.max_value = arg_3\n return arg_4"} +{"_id": "doc_515", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create a deep copy of the Heatmaps object.\n\n Returns\n -------\n imgaug.HeatmapsOnImage\n Deep copy.\n\n \"\"\"\n return HeatmapsOnImage(arg_0.get_arr(), shape=arg_0.shape, min_value=arg_0.min_value, max_value=arg_0.max_value)"} +{"_id": "doc_516", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n See ``ToTensor`` for more details.\n\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n if not(_is_pil_image(arg_0) or _is_numpy_image(arg_0)):\n raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(arg_0)))\n\n if isinstance(arg_0, np.ndarray):\n # handle numpy array\n if arg_0.ndim == 2:\n arg_0 = arg_0[:, :, None]\n\n arg_1 = torch.from_numpy(arg_0.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(arg_1, torch.ByteTensor):\n return arg_1.float().div(255)\n else:\n return arg_1\n\n if accimage is not None and isinstance(arg_0, accimage.Image):\n arg_2 = np.zeros([arg_0.channels, arg_0.height, arg_0.width], dtype=np.float32)\n arg_0.copyto(arg_2)\n return torch.from_numpy(arg_2)\n\n # handle PIL Image\n if arg_0.mode == 'I':\n arg_1 = torch.from_numpy(np.array(arg_0, np.int32, copy=False))\n elif arg_0.mode == 'I;16':\n arg_1 = torch.from_numpy(np.array(arg_0, np.int16, copy=False))\n elif arg_0.mode == 'F':\n arg_1 = torch.from_numpy(np.array(arg_0, np.float32, copy=False))\n elif arg_0.mode == '1':\n arg_1 = 255 * torch.from_numpy(np.array(arg_0, np.uint8, copy=False))\n else:\n arg_1 = torch.ByteTensor(torch.ByteStorage.from_buffer(arg_0.tobytes()))\n # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK\n if arg_0.mode == 'YCbCr':\n arg_3 = 3\n elif arg_0.mode == 'I;16':\n arg_3 = 1\n else:\n arg_3 = len(arg_0.mode)\n arg_1 = arg_1.view(arg_0.size[1], arg_0.size[0], arg_3)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n arg_1 = arg_1.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(arg_1, torch.ByteTensor):\n return arg_1.float().div(255)\n else:\n return arg_1"} +{"_id": "doc_517", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n\n .. note::\n This transform acts out of place by default, i.e., it does not mutates the input tensor.\n\n See :class:`~torchvision.transforms.Normalize` for more details.\n\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be Funcd.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n if not _is_tensor_image(arg_0):\n raise TypeError('tensor is not a torch image.')\n\n if not arg_3:\n arg_0 = arg_0.clone()\n\n arg_1 = torch.as_tensor(arg_1, dtype=torch.float32, device=arg_0.device)\n arg_2 = torch.as_tensor(arg_2, dtype=torch.float32, device=arg_0.device)\n arg_0.sub_(arg_1[:, None, None]).div_(arg_2[:, None, None])\n return arg_0"} +{"_id": "doc_518", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.BILINEAR):\n r\"\"\"Resize the input PIL Image to the given size.\n\n Args:\n img (PIL Image): Image to be Funcd.\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaing\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n\n Returns:\n PIL Image: Resized image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n if not (isinstance(arg_1, int) or (isinstance(arg_1, Iterable) and len(arg_1) == 2)):\n raise TypeError('Got inappropriate size arg: {}'.format(arg_1))\n\n if isinstance(arg_1, int):\n arg_5, arg_6 = arg_0.size\n if (arg_5 <= arg_6 and arg_5 == arg_1) or (arg_6 <= arg_5 and arg_6 == arg_1):\n return arg_0\n if arg_5 < arg_6:\n arg_7 = arg_1\n arg_8 = int(arg_1 * arg_6 / arg_5)\n return arg_0.Func((arg_7, arg_8), arg_2)\n else:\n arg_8 = arg_1\n arg_7 = int(arg_1 * arg_5 / arg_6)\n return arg_0.Func((arg_7, arg_8), arg_2)\n else:\n return arg_0.Func(arg_1[::-1], arg_2)"} +{"_id": "doc_519", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3='constant'):\n r\"\"\"Pad the given PIL Image on all sides with specified Funcding mode and fill value.\n\n Args:\n img (PIL Image): Image to be Funcded.\n Funcding (int or tuple): Padding on each border. If a single int is provided this\n is used to Func all borders. If tuple of length 2 is provided this is the Funcding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the Funcding for the left, top, right and bottom borders\n respectively.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the Funcding_mode is constant\n Funcding_mode: Type of Funcding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: Funcs with a constant value, this value is specified with fill\n\n - edge: Funcs with the last value on the edge of the image\n\n - reflect: Funcs with reflection of image (without repeating the last value on the edge)\n\n Funcding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: Funcs with reflection of image (repeating the last value on the edge)\n\n Funcding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n if not isinstance(arg_1, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate Funcding arg')\n if not isinstance(arg_2, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(arg_3, str):\n raise TypeError('Got inappropriate Funcding_mode arg')\n\n if isinstance(arg_1, Sequence) and len(arg_1) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(arg_1)))\n\n assert arg_3 in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if arg_3 == 'constant':\n if arg_0.mode == 'P':\n arg_4 = arg_0.getpalette()\n arg_5 = ImageOps.expand(arg_0, border=arg_1, arg_2=arg_2)\n arg_5.putpalette(arg_4)\n return arg_5\n\n return ImageOps.expand(arg_0, border=arg_1, arg_2=arg_2)\n else:\n if isinstance(arg_1, int):\n arg_6 = arg_8 = arg_7 = arg_9 = arg_1\n if isinstance(arg_1, Sequence) and len(arg_1) == 2:\n arg_6 = arg_8 = arg_1[0]\n arg_7 = arg_9 = arg_1[1]\n if isinstance(arg_1, Sequence) and len(arg_1) == 4:\n arg_6 = arg_1[0]\n arg_7 = arg_1[1]\n arg_8 = arg_1[2]\n arg_9 = arg_1[3]\n\n if arg_0.mode == 'P':\n arg_4 = arg_0.getpalette()\n arg_0 = np.asarray(arg_0)\n arg_0 = np.Func(arg_0, ((arg_7, arg_9), (arg_6, arg_8)), arg_3)\n arg_0 = Image.fromarray(arg_0)\n arg_0.putpalette(arg_4)\n return arg_0\n\n arg_0 = np.asarray(arg_0)\n # RGB image\n if len(arg_0.shape) == 3:\n arg_0 = np.Func(arg_0, ((arg_7, arg_9), (arg_6, arg_8), (0, 0)), arg_3)\n # Grayscale image\n if len(arg_0.shape) == 2:\n arg_0 = np.Func(arg_0, ((arg_7, arg_9), (arg_6, arg_8)), arg_3)\n\n return Image.fromarray(arg_0)"} +{"_id": "doc_520", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Crop the given PIL Image.\n\n Args:\n img (PIL Image): Image to be Funcped.\n i (int): i in (i,j) i.e coordinates of the upper left corner.\n j (int): j in (i,j) i.e coordinates of the upper left corner.\n h (int): Height of the Funcped image.\n w (int): Width of the Funcped image.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n return arg_0.Func((arg_2, arg_1, arg_2 + arg_4, arg_1 + arg_3))"} +{"_id": "doc_521", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=arg_7.BILINEAR):\n \"\"\"Crop the given PIL Image and resize it to desired size.\n\n Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.\n\n Args:\n img (PIL Image): Image to be cropped.\n i (int): i in (i,j) i.e coordinates of the upper left corner\n j (int): j in (i,j) i.e coordinates of the upper left corner\n h (int): Height of the cropped image.\n w (int): Width of the cropped image.\n size (sequence or int): Desired output size. Same semantics as ``resize``.\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``.\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n assert _is_pil_image(arg_0), 'img should be PIL Image'\n arg_0 = crop(arg_0, arg_1, arg_2, arg_3, arg_4)\n arg_0 = resize(arg_0, arg_5, arg_6)\n return arg_0"} +{"_id": "doc_522", "title": "", "text": "def Func(arg_0):\n \"\"\"Horizontally flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Horizontall flipped image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n return arg_0.transpose(Image.FLIP_LEFT_RIGHT)"} +{"_id": "doc_523", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_4.BICUBIC):\n \"\"\"Perform Func transform of the given PIL Image.\n\n Args:\n img (PIL Image): Image to be transformed.\n coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.\n for a Func transform.\n interpolation: Default- Image.BICUBIC\n Returns:\n PIL Image: Perspectively transformed Image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n arg_6 = _get_Func_coeffs(arg_1, arg_2)\n return arg_0.transform(arg_0.size, arg_4.PERSPECTIVE, arg_6, arg_3)"} +{"_id": "doc_524", "title": "", "text": "def Func(arg_0):\n \"\"\"Vertically flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Vertically flipped image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n return arg_0.transpose(Image.FLIP_TOP_BOTTOM)"} +{"_id": "doc_525", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Crop the given PIL Image into four corners and the central crop.\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.\n \"\"\"\n if isinstance(arg_1, numbers.Number):\n arg_1 = (int(arg_1), int(arg_1))\n else:\n assert len(arg_1) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n arg_2, arg_3 = arg_0.size\n arg_4, arg_5 = arg_1\n if arg_5 > arg_2 or arg_4 > arg_3:\n raise ValueError(\"Requested crop size {} is bigger than input size {}\".format(arg_1,\n (arg_3, arg_2)))\n arg_6 = arg_0.crop((0, 0, arg_5, arg_4))\n arg_7 = arg_0.crop((arg_2 - arg_5, 0, arg_2, arg_4))\n arg_8 = arg_0.crop((0, arg_3 - arg_4, arg_5, arg_3))\n arg_9 = arg_0.crop((arg_2 - arg_5, arg_3 - arg_4, arg_2, arg_3))\n arg_10 = center_crop(arg_0, (arg_4, arg_5))\n return (arg_6, arg_7, arg_8, arg_9, arg_10)"} +{"_id": "doc_526", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Adjust brightness of an Image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n PIL Image: Brightness adjusted image.\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n arg_2 = ImageEnhance.Brightness(arg_0)\n arg_0 = arg_2.enhance(arg_1)\n return arg_0"} +{"_id": "doc_527", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False, arg_4=None):\n \"\"\"Rotate the image by angle.\n\n\n Args:\n img (PIL Image): PIL Image to be Funcd.\n angle (float or int): In degrees degrees counter clockwise order.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire Funcd image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n return arg_0.Func(arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_528", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=0, arg_6=None):\n \"\"\"Apply Func transformation on the image keeping image center invariant\n\n Args:\n img (PIL Image): PIL Image to be rotated.\n angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.\n translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)\n scale (float): overall scale\n shear (float): shear angle value in degrees between -180 to 180, clockwise direction.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter.\n See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)\n \"\"\"\n if not _is_pil_image(arg_0):\n raise TypeError('img should be PIL Image. Got {}'.format(type(arg_0)))\n\n assert isinstance(arg_2, (tuple, list)) and len(arg_2) == 2, \\\n \"Argument translate should be a list or tuple of length 2\"\n\n assert arg_3 > 0.0, \"Argument scale should be positive\"\n\n arg_7 = arg_0.size\n arg_8 = (arg_0.size[0] * 0.5 + 0.5, arg_0.size[1] * 0.5 + 0.5)\n arg_9 = _get_inverse_Func_matrix(arg_8, arg_1, arg_2, arg_3, arg_4)\n arg_10 = {\"fillcolor\": arg_6} if PILLOW_VERSION[0] == '5' else {}\n return arg_0.transform(arg_7, Image.AFFINE, arg_9, arg_5, **arg_10)"} +{"_id": "doc_529", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n if sys.version_info >= (3, 5):\n # Faster and available in Python 3.5 and above\n arg_2 = [d.name for d in os.scandir(arg_1) if d.is_dir()]\n else:\n arg_2 = [d for d in os.listdir(arg_1) if os.path.isdir(os.path.join(arg_1, d))]\n arg_2.sort()\n arg_3 = {arg_2[i]: i for i in range(len(arg_2))}\n return arg_2, arg_3"} +{"_id": "doc_530", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a Tensor containing the list of labels\n Read the file and keep only the ID of the 3D point.\n \"\"\"\n arg_2 = []\n with open(os.path.join(arg_0, arg_1), 'r') as f:\n arg_2 = [int(line.split()[0]) for line in f]\n return torch.LongTensor(arg_2)"} +{"_id": "doc_531", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(1,)):\n \"\"\"Computes the Func over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n arg_3 = max(arg_2)\n arg_4 = arg_1.size(0)\n\n arg_5, arg_6 = arg_0.topk(arg_3, 1, True, True)\n arg_6 = arg_6.t()\n arg_7 = arg_6.eq(arg_1[None])\n\n arg_8 = []\n for arg_9 in arg_2:\n arg_10 = arg_7[:arg_9].flatten().sum(dtype=torch.float32)\n arg_8.append(arg_10 * (100.0 / arg_4))\n return arg_8"} +{"_id": "doc_532", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as arg_5\n arg_1 = arg_5.print\n\n def arg_6(*arg_2, **arg_3):\n arg_4 = arg_3.pop('force', False)\n if arg_0 or arg_4:\n arg_1(*arg_2, **arg_3)\n\n arg_5.print = arg_6"} +{"_id": "doc_533", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Download a file from a url and place it in root.\n\n Args:\n url (str): URL to download file from\n root (str): Directory to place downloaded file in\n filename (str, optional): Name to save the file under. If None, use the basename of the URL\n md5 (str, optional): MD5 checksum of the download. If None, do not check\n \"\"\"\n from six.moves import urllib\n\n arg_1 = os.path.expanduser(arg_1)\n if not arg_2:\n arg_2 = os.path.basename(arg_0)\n arg_4 = os.path.join(arg_1, arg_2)\n\n makedir_exist_ok(arg_1)\n\n # downloads file\n if os.path.isfile(arg_4) and check_integrity(arg_4, arg_3):\n print('Using downloaded and verified file: ' + arg_4)\n else:\n try:\n print('Downloading ' + arg_0 + ' to ' + arg_4)\n urllib.request.urlretrieve(\n arg_0, arg_4,\n reporthook=gen_bar_updater()\n )\n except OSError:\n if arg_0[:5] == 'https':\n arg_0 = arg_0.replace('https:', 'http:')\n print('Failed download. Trying https -> http instead.'\n ' Downloading ' + arg_0 + ' to ' + arg_4)\n urllib.request.urlretrieve(\n arg_0, arg_4,\n reporthook=gen_bar_updater()\n )"} +{"_id": "doc_534", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"List all directories at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the directories found\n \"\"\"\n arg_0 = os.path.expanduser(arg_0)\n arg_2 = list(\n filter(\n lambda p: os.path.isdir(os.path.join(arg_0, p)),\n os.listdir(arg_0)\n )\n )\n\n if arg_1 is True:\n arg_2 = [os.path.join(arg_0, d) for d in arg_2]\n\n return arg_2"} +{"_id": "doc_535", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"List all files ending with a suffix at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').\n It uses the Python \"str.endswith\" method and is passed directly\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the files found\n \"\"\"\n arg_0 = os.path.expanduser(arg_0)\n arg_3 = list(\n filter(\n lambda p: os.path.isfile(os.path.join(arg_0, p)) and p.endswith(arg_1),\n os.listdir(arg_0)\n )\n )\n\n if arg_2 is True:\n arg_3 = [os.path.join(arg_0, d) for d in arg_3]\n\n return arg_3"} +{"_id": "doc_536", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n arg_2, arg_3 = arg_0.size\n arg_4, arg_5 = arg_1\n if arg_2 == arg_5 and arg_3 == arg_4:\n return 0, 0, arg_3, arg_2\n\n arg_6 = random.randint(0, arg_3 - arg_4)\n arg_7 = random.randint(0, arg_2 - arg_5)\n return arg_6, arg_7, arg_4, arg_5"} +{"_id": "doc_537", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width : width of the image.\n height : height of the image.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n arg_3 = int(arg_1 / 2)\n arg_4 = int(arg_0 / 2)\n arg_5 = (random.randint(0, int(arg_2 * arg_4)),\n random.randint(0, int(arg_2 * arg_3)))\n arg_6 = (random.randint(arg_0 - int(arg_2 * arg_4) - 1, arg_0 - 1),\n random.randint(0, int(arg_2 * arg_3)))\n arg_7 = (random.randint(arg_0 - int(arg_2 * arg_4) - 1, arg_0 - 1),\n random.randint(arg_1 - int(arg_2 * arg_3) - 1, arg_1 - 1))\n arg_8 = (random.randint(0, int(arg_2 * arg_4)),\n random.randint(arg_1 - int(arg_2 * arg_3) - 1, arg_1 - 1))\n arg_9 = [(0, 0), (arg_0 - 1, 0), (arg_0 - 1, arg_1 - 1), (0, arg_1 - 1)]\n arg_10 = [arg_5, arg_6, arg_7, arg_8]\n return arg_9, arg_10"} +{"_id": "doc_538", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n arg_3 = arg_0.size[0] * arg_0.size[1]\n\n for arg_4 in range(10):\n arg_5 = random.uniform(*arg_1) * arg_3\n arg_6 = (math.log(arg_2[0]), math.log(arg_2[1]))\n arg_7 = math.exp(random.uniform(*arg_6))\n\n arg_8 = int(round(math.sqrt(arg_5 * arg_7)))\n arg_9 = int(round(math.sqrt(arg_5 / arg_7)))\n\n if arg_8 <= arg_0.size[0] and arg_9 <= arg_0.size[1]:\n arg_10 = random.randint(0, arg_0.size[1] - arg_9)\n arg_11 = random.randint(0, arg_0.size[0] - arg_8)\n return arg_10, arg_11, arg_9, arg_8\n\n # Fallback to central crop\n arg_12 = arg_0.size[0] / arg_0.size[1]\n if (arg_12 < min(arg_2)):\n arg_8 = arg_0.size[0]\n arg_9 = arg_8 / min(arg_2)\n elif (arg_12 > max(arg_2)):\n arg_9 = arg_0.size[1]\n arg_8 = arg_9 * max(arg_2)\n else: # whole image\n arg_8 = arg_0.size[0]\n arg_9 = arg_0.size[1]\n arg_10 = (arg_0.size[1] - arg_9) // 2\n arg_11 = (arg_0.size[0] - arg_8) // 2\n return arg_10, arg_11, arg_9, arg_8"} +{"_id": "doc_539", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n arg_4 = []\n\n if arg_0 is not None:\n arg_5 = random.uniform(arg_0[0], arg_0[1])\n arg_4.append(Lambda(lambda img: F.adjust_brightness(img, arg_5)))\n\n if arg_1 is not None:\n arg_6 = random.uniform(arg_1[0], arg_1[1])\n arg_4.append(Lambda(lambda img: F.adjust_contrast(img, arg_6)))\n\n if arg_2 is not None:\n arg_7 = random.uniform(arg_2[0], arg_2[1])\n arg_4.append(Lambda(lambda img: F.adjust_saturation(img, arg_7)))\n\n if arg_3 is not None:\n arg_8 = random.uniform(arg_3[0], arg_3[1])\n arg_4.append(Lambda(lambda img: F.adjust_hue(img, arg_8)))\n\n random.shuffle(arg_4)\n arg_9 = Compose(arg_4)\n\n return arg_9"} +{"_id": "doc_540", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Get parameters for affine transformation\n\n Returns:\n sequence: params to be passed to the affine transformation\n \"\"\"\n arg_5 = random.uniform(arg_0[0], arg_0[1])\n if arg_1 is not None:\n arg_6 = arg_1[0] * arg_4[0]\n arg_7 = arg_1[1] * arg_4[1]\n arg_8 = (np.round(random.uniform(-arg_6, arg_6)),\n np.round(random.uniform(-arg_7, arg_7)))\n else:\n arg_8 = (0, 0)\n\n if arg_2 is not None:\n arg_9 = random.uniform(arg_2[0], arg_2[1])\n else:\n arg_9 = 1.0\n\n if arg_3 is not None:\n arg_10 = random.uniform(arg_3[0], arg_3[1])\n else:\n arg_10 = 0.0\n\n return arg_5, arg_8, arg_9, arg_10"} +{"_id": "doc_541", "title": "", "text": "def Func(arg_0):\n \"\"\"Download the MNIST data if it doesn't exist in processed_folder already.\"\"\"\n\n if arg_0._check_exists():\n return\n\n makedir_exist_ok(arg_0.raw_folder)\n makedir_exist_ok(arg_0.processed_folder)\n\n # Func files\n for arg_1 in arg_0.urls:\n arg_2 = arg_1.rpartition('/')[2]\n arg_3 = os.path.join(arg_0.raw_folder, arg_2)\n Func_url(arg_1, root=arg_0.raw_folder, arg_2=arg_2, md5=None)\n arg_0.extract_gzip(gzip_path=arg_3, remove_finished=True)\n\n # process and save as torch files\n print('Processing...')\n\n arg_4 = (\n read_image_file(os.path.join(arg_0.raw_folder, 'train-images-idx3-ubyte')),\n read_label_file(os.path.join(arg_0.raw_folder, 'train-labels-idx1-ubyte'))\n )\n arg_5 = (\n read_image_file(os.path.join(arg_0.raw_folder, 't10k-images-idx3-ubyte')),\n read_label_file(os.path.join(arg_0.raw_folder, 't10k-labels-idx1-ubyte'))\n )\n with open(os.path.join(arg_0.processed_folder, arg_0.training_file), 'wb') as f:\n torch.save(arg_4, f)\n with open(os.path.join(arg_0.processed_folder, arg_0.test_file), 'wb') as f:\n torch.save(arg_5, f)\n\n print('Done!')"} +{"_id": "doc_542", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Returns theme name.\n\n Checks in this order:\n 1. override\n 2. cookies\n 3. settings\"\"\"\n\n if arg_0 and (arg_0 in themes or arg_0 == '__common__'):\n return arg_0\n arg_1 = request.args.get('theme', request.preferences.get_value('theme'))\n if arg_1 not in themes:\n arg_1 = default_theme\n return arg_1"} +{"_id": "doc_543", "title": "", "text": "def Func():\n \"\"\"Return Func results\"\"\"\n\n # set blocked engines\n arg_0 = request.preferences.engines.get_disabled()\n\n # parse query\n if PY3:\n arg_1 = RawTextQuery(request.form.get('q', b''), arg_0)\n else:\n arg_1 = RawTextQuery(request.form.get('q', u'').encode('utf-8'), arg_0)\n arg_1.parse_query()\n\n # check if search query is set\n if not arg_1.getSearchQuery():\n return '', 400\n\n # run Func\n arg_2 = autocomplete_backends.get(request.preferences.get_value('autocomplete'))\n\n # parse searx specific Func results like !bang\n arg_3 = searx_bang(arg_1)\n\n # normal autocompletion results only appear if max 3 inner results returned\n if len(arg_3) <= 3 and arg_2:\n # get language from cookie\n arg_4 = request.preferences.get_value('language')\n if not arg_4 or arg_4 == 'all':\n arg_4 = 'en'\n else:\n arg_4 = arg_4.split('-')[0]\n # run autocompletion\n arg_3.extend(arg_2(arg_1.getSearchQuery(), arg_4))\n\n # parse results (write :language and !engine back to result string)\n arg_5 = []\n for arg_6 in arg_3:\n arg_1.changeSearchQuery(arg_6)\n\n # add parsed result\n arg_5.append(arg_1.getFullQuery())\n\n # return Func results\n if request.form.get('format') == 'x-suggestions':\n return Response(json.dumps([arg_1.query, arg_5]),\n mimetype='application/json')\n\n return Response(json.dumps(arg_5),\n mimetype='application/json')"} +{"_id": "doc_544", "title": "", "text": "def Func():\n \"\"\"Render Func page && save user Func\"\"\"\n\n # save Func\n if request.method == 'POST':\n arg_0 = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))\n try:\n request.Func.parse_form(request.form)\n except ValidationException:\n request.errors.append(gettext('Invalid settings, please edit your Func'))\n return arg_0\n return request.Func.save(arg_0)\n\n # render Func\n arg_1 = request.Func.get_value('image_proxy')\n arg_2 = request.Func.get_value('language')\n arg_3 = request.Func.engines.get_disabled()\n arg_4 = request.Func.plugins.get_enabled()\n\n # stats for Func page\n arg_5 = {}\n\n for arg_6 in categories:\n for arg_7 in categories[arg_6]:\n arg_5[arg_7.name] = {'time': None,\n 'warn_timeout': False,\n 'warn_time': False}\n if arg_7.timeout > settings['outgoing']['request_timeout']:\n arg_5[arg_7.name]['warn_timeout'] = True\n arg_5[arg_7.name]['supports_selected_language'] = _is_selected_language_supported(arg_7, request.Func)\n\n # get first element [0], the engine time,\n # and then the second element [1] : the time (the first one is the label)\n for arg_9 in get_engines_stats()[0][1]:\n arg_5[arg_9.get('name')]['time'] = round(arg_9.get('avg'), 3)\n if arg_9.get('avg') > settings['outgoing']['request_timeout']:\n arg_5[arg_9.get('name')]['warn_time'] = True\n # end of stats\n\n return render('Func.html',\n locales=settings['locales'],\n current_locale=get_locale(),\n arg_1=arg_1,\n engines_by_category=categories,\n arg_5=arg_5,\n answerers=[{'info': arg_11.self_info(), 'keywords': arg_11.keywords} for arg_11 in answerers],\n arg_3=arg_3,\n autocomplete_backends=autocomplete_backends,\n shortcuts={arg_13: arg_12 for arg_12, arg_13 in engine_shortcuts.items()},\n themes=themes,\n plugins=plugins,\n doi_resolvers=settings['doi_resolvers'],\n current_doi_resolver=get_doi_resolver(request.args, request.Func.get_value('doi_resolver')),\n arg_4=arg_4,\n theme=get_current_theme_name(),\n Func_url_params=request.Func.get_as_url_params(),\n base_url=get_base_url(),\n Func=True)"} +{"_id": "doc_545", "title": "", "text": "def Func(arg_0):\n '''check if the searchQuery contain a bang, and create fitting autocompleter results'''\n # check if there is a query which can be parsed\n if len(arg_0.getSearchQuery()) == 0:\n return []\n\n arg_1 = []\n\n # check if current query stats with !bang\n arg_2 = arg_0.getSearchQuery()[0]\n if arg_2 == '!' or arg_2 == '?':\n if len(arg_0.getSearchQuery()) == 1:\n # show some example queries\n # TODO, check if engine is not avaliable\n arg_1.append(arg_2 + \"images\")\n arg_1.append(arg_2 + \"wikipedia\")\n arg_1.append(arg_2 + \"osm\")\n else:\n arg_3 = arg_0.getSearchQuery()[1:]\n\n # check if query starts with categorie name\n for arg_4 in categories:\n if arg_4.startswith(arg_3):\n arg_1.append(arg_2 + '{categorie}'.format(arg_4=arg_4))\n\n # check if query starts with engine name\n for arg_5 in engines:\n if arg_5.startswith(arg_3.replace('_', ' ')):\n arg_1.append(arg_2 + '{engine}'.format(arg_5=arg_5.replace(' ', '_')))\n\n # check if query starts with engine shortcut\n for arg_6 in engine_shortcuts:\n if arg_6.startswith(arg_3):\n arg_1.append(arg_2 + '{engine_shortcut}'.format(arg_6=arg_6))\n\n # check if current query stats with :bang\n elif arg_2 == ':':\n if len(arg_0.getSearchQuery()) == 1:\n # show some example queries\n arg_1.append(\":en\")\n arg_1.append(\":en_us\")\n arg_1.append(\":english\")\n arg_1.append(\":united_kingdom\")\n else:\n arg_3 = arg_0.getSearchQuery()[1:]\n\n for arg_7 in language_codes:\n arg_8, arg_9, arg_10, arg_11 = map(unicode.lower, arg_7)\n\n # check if query starts with language-id\n if arg_8.startswith(arg_3):\n if len(arg_3) <= 2:\n arg_1.append(u':{lang_id}'.format(arg_8=arg_8.split('-')[0]))\n else:\n arg_1.append(u':{lang_id}'.format(arg_8=arg_8))\n\n # check if query starts with language name\n if arg_9.startswith(arg_3) or arg_11.startswith(arg_3):\n arg_1.append(u':{lang_name}'.format(arg_9=arg_9))\n\n # check if query starts with country\n if arg_10.startswith(arg_3.replace('_', ' ')):\n arg_1.append(u':{country}'.format(arg_10=arg_10.replace(' ', '_')))\n\n # remove duplicates\n arg_12 = set(arg_1)\n\n # remove results which are already contained in the query\n for arg_13 in arg_0.query_parts:\n if arg_13 in arg_12:\n arg_12.remove(arg_13)\n\n # convert result_set back to list\n return list(arg_12)"} +{"_id": "doc_546", "title": "", "text": "def Func(\n arg_0, arg_1,\n arg_2, arg_3, arg_4):\n \"\"\"Eight-schools joint log-prob.\"\"\"\n arg_5 = tfd.Normal(loc=0., scale=10.)\n arg_6 = tfd.Normal(loc=5., scale=1.)\n arg_7 = mvn(\n loc=tf.zeros_like(arg_4),\n scale=tf.ones_like(arg_4))\n arg_8 = mvn(\n loc=(arg_2 + tf.exp(arg_3) * arg_4),\n scale=arg_1)\n return (\n arg_5.log_prob(arg_2) +\n arg_6.log_prob(arg_3) +\n arg_7.log_prob(arg_4) +\n arg_8.log_prob(arg_0))"} +{"_id": "doc_547", "title": "", "text": "def Func(\n arg_0=arg_1(5e3),\n arg_2=arg_1(3e3),\n arg_3=3,\n arg_4=0.4):\n \"\"\"Runs HMC on the eight-schools unnormalized posterior.\"\"\"\n\n arg_5 = 8\n arg_6 = tf.constant(\n [28, 8, -3, 7, -1, 1, 18, 12],\n dtype=np.float32,\n name='treatment_effects')\n arg_7 = tf.constant(\n [15, 10, 16, 11, 9, 11, 10, 18],\n dtype=np.float32,\n name='treatment_stddevs')\n\n def unnormalized_posterior_log_prob(\n arg_8, arg_9, arg_10):\n \"\"\"Eight-schools unnormalized log posterior.\"\"\"\n return eight_schools_joint_log_prob(\n arg_6, arg_7,\n arg_8, arg_9, arg_10)\n\n if tf.executing_eagerly():\n arg_11 = tf.function(tfp.mcmc.sample_chain)\n else:\n arg_11 = tfp.mcmc.sample_chain\n\n def computation():\n \"\"\"The benchmark computation.\"\"\"\n arg_12, arg_13 = arg_11(\n arg_0=arg_0,\n arg_2=arg_2,\n current_state=(\n tf.zeros([], name='init_avg_effect'),\n tf.zeros([], name='init_avg_stddev'),\n tf.ones([arg_5], name='init_school_effects_standard'),\n ),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=unnormalized_posterior_log_prob,\n arg_4=arg_4,\n arg_3=arg_3))\n\n return arg_13.is_accepted\n\n # Let's force evaluation of graph to ensure build time is not part of our time\n # trial.\n arg_14 = computation()\n if not tf.executing_eagerly():\n arg_15 = tf.compat.v1.Session()\n arg_15.run(arg_14)\n\n arg_16 = time.time()\n if tf.executing_eagerly():\n arg_17 = computation()\n else:\n arg_17 = arg_15.run(arg_14)\n arg_18 = time.time() - arg_16\n\n arg_19 = np.sum(arg_17)\n arg_20 = np.float32(arg_19) / np.float32(arg_0)\n\n return dict(\n iters=(arg_0 + arg_2) * arg_3,\n extras={'acceptance_rate': arg_20},\n arg_18=arg_18)"} +{"_id": "doc_548", "title": "", "text": "def Func(**arg_0):\n \"\"\"Decorator to programmatically expand the docstring.\n\n Args:\n **kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,\n the key is found as `${k}` in the docstring and replaced with `v`.\n\n Returns:\n Decorated function.\n \"\"\"\n def _fn_wrapped(arg_1):\n \"\"\"Original function with modified `__doc__` attribute.\"\"\"\n arg_2 = inspect.cleandoc(arg_1.__doc__)\n for arg_3, arg_4 in six.iteritems(arg_0):\n # Capture each ${k} reference to replace with v.\n # We wrap the replacement in a function so no backslash escapes\n # are processed.\n arg_5 = r'\\$\\{' + str(arg_3) + r'\\}'\n arg_2 = re.sub(arg_5, lambda match: arg_4, arg_2) # pylint: disable=cell-var-from-loop\n arg_1.__doc__ = arg_2\n return arg_1\n return _fn_wrapped"} +{"_id": "doc_549", "title": "", "text": "def Func(arg_0):\n \"\"\"Infer the original name passed into a distribution constructor.\n\n Distributions typically follow the pattern of\n with.name_scope(name) as name:\n super(name=name)\n so we attempt to reverse the name-scope transformation to allow\n addressing of RVs by the distribution's original, user-visible\n name kwarg.\n\n Args:\n distribution: a tfd.Distribution instance.\n Returns:\n simple_name: the original name passed into the Distribution.\n\n #### Example\n\n ```\n d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'\n d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'\n Func(d2) # returns 'x'\n\n ```\n\n \"\"\"\n arg_1 = arg_0.name\n\n # turn 'scope/x/' into 'x'\n if arg_1.endswith('/'):\n arg_1 = arg_1.split('/')[-2]\n\n # turn 'x_3' into 'x'\n arg_2 = arg_1.split('_')\n if arg_2[-1].isdigit():\n arg_1 = '_'.join(arg_2[:-1])\n\n return arg_1"} +{"_id": "doc_550", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"RandomVariable constructor with a dummy name argument.\"\"\"\n # Program transformations (e.g., `make_log_joint_fn`) assume that\n # the traced constructor has `name` and `value` kwargs, enabling\n # them to override the value of an RV according to its name.\n # User-defined RVs inherit their name from the provided\n # distribution; this helper method exposes the name as a dummy kwarg\n # so that it's visible to program transformations.\n del arg_3 # unused\n return RandomVariable(arg_0=arg_0,\n arg_1=arg_1,\n arg_2=arg_2)"} +{"_id": "doc_551", "title": "", "text": "def Func(arg_0):\n \"\"\"Factory function to make random variable given distribution class.\"\"\"\n\n @interceptable\n @functools.wraps(arg_0, assigned=('__module__', '__name__'))\n @docstring_util.expand_docstring(\n cls=arg_0.__name__,\n doc=inspect.cleandoc(arg_0.__init__.__doc__ or ''))\n def func(*arg_1, **arg_2):\n # pylint: disable=g-doc-args\n \"\"\"Create a random variable for ${cls}.\n\n See ${cls} for more details.\n\n Returns:\n RandomVariable.\n\n #### Original Docstring for Distribution\n\n ${doc}\n \"\"\"\n # pylint: enable=g-doc-args\n arg_3 = arg_2.pop('sample_shape', ())\n arg_4 = arg_2.pop('value', None)\n return RandomVariable(distribution=arg_0(*arg_1, **arg_2),\n arg_3=arg_3,\n arg_4=arg_4)\n return func"} +{"_id": "doc_552", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute one-step-ahead predictive distributions for all timesteps.\n\n Given samples from the posterior over parameters, return the predictive\n distribution over observations at each time `T`, given observations up\n through time `T-1`.\n\n Args:\n model: An instance of `StructuralTimeSeries` representing a\n time-series model. This represents a joint distribution over\n time-series and their parameters with batch shape `[b1, ..., bN]`.\n observed_time_series: `float` `Tensor` of shape\n `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where\n `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`\n dimension may (optionally) be omitted if `num_timesteps > 1`. May\n optionally be an instance of `tfp.sts.MaskedTimeSeries` including a\n mask `Tensor` to encode the locations of missing observations.\n parameter_samples: Python `list` of `Tensors` representing posterior samples\n of model parameters, with shapes `[concat([[num_posterior_draws],\n param.prior.batch_shape, param.prior.event_shape]) for param in\n model.parameters]`. This may optionally also be a map (Python `dict`) of\n parameter names to `Tensor` values.\n\n Returns:\n forecast_dist: a `tfd.MixtureSameFamily` instance with event shape\n [num_timesteps] and\n batch shape `concat([sample_shape, model.batch_shape])`, with\n `num_posterior_draws` mixture components. The `t`th step represents the\n forecast distribution `p(observed_time_series[t] |\n observed_time_series[0:t-1], parameter_samples)`.\n\n #### Examples\n\n Suppose we've built a model and fit it to data using HMC:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n\n samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)\n ```\n\n Passing the posterior samples into `Func`, we construct a\n one-step-ahead predictive distribution:\n\n ```python\n Func_dist = tfp.sts.Func(\n model, observed_time_series, parameter_samples=samples)\n\n predictive_means = Func_dist.mean()\n predictive_scales = Func_dist.stddev()\n ```\n\n If using variational inference instead of HMC, we'd construct a forecast using\n samples from the variational posterior:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.build_factored_variational_loss(\n model=model, observed_time_series=observed_time_series)\n\n # OMITTED: take steps to optimize variational loss\n\n samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}\n Func_dist = tfp.sts.Func(\n model, observed_time_series, parameter_samples=samples)\n ```\n\n We can visualize the forecast by plotting:\n\n ```python\n from matplotlib import pylab as plt\n def plot_Func(observed_time_series,\n forecast_mean,\n forecast_scale):\n plt.figure(figsize=(12, 6))\n num_timesteps = forecast_mean.shape[-1]\n c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)\n plt.plot(observed_time_series, label=\"observed time series\", color=c1)\n plt.plot(forecast_mean, label=\"one-step prediction\", color=c2)\n plt.fill_between(np.arange(num_timesteps),\n forecast_mean - 2 * forecast_scale,\n forecast_mean + 2 * forecast_scale,\n alpha=0.1, color=c2)\n plt.legend()\n\n plot_Func(observed_time_series,\n forecast_mean=predictive_means,\n forecast_scale=predictive_scales)\n ```\n\n To detect anomalous timesteps, we check whether the observed value at each\n step is within a 95% predictive interval, i.e., two standard deviations from\n the mean:\n\n ```python\n z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1])\n / predictive_scales[..., :-1])\n anomalous_timesteps = tf.boolean_mask(\n tf.range(1, num_timesteps),\n tf.abs(z_scores) > 2.0)\n ```\n\n \"\"\"\n\n with tf.compat.v1.name_scope(\n 'Func', values=[arg_1, arg_2]):\n\n [\n arg_1,\n arg_3\n ] = sts_util.canonicalize_observed_time_series_with_mask(\n arg_1)\n\n # Run filtering over the training timesteps to extract the\n # predictive means and variances.\n arg_4 = dist_util.prefer_static_value(\n tf.shape(input=arg_1))[-2]\n arg_5 = arg_0.make_state_space_model(\n arg_4=arg_4, param_vals=arg_2)\n (arg_6, arg_6, arg_6, arg_6, arg_6, arg_7, arg_8\n ) = arg_5.forward_filter(arg_1, mask=arg_3)\n\n # Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`\n # to a scalar time series.\n return sts_util.mix_over_posterior_draws(\n means=arg_7[..., 0],\n variances=arg_8[..., 0, 0])"} +{"_id": "doc_553", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Construct predictive distribution over future observations.\n\n Given samples from the posterior over parameters, return the predictive\n distribution over future observations for num_steps_Func timesteps.\n\n Args:\n model: An instance of `StructuralTimeSeries` representing a\n time-series model. This represents a joint distribution over\n time-series and their parameters with batch shape `[b1, ..., bN]`.\n observed_time_series: `float` `Tensor` of shape\n `concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where\n `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`\n dimension may (optionally) be omitted if `num_timesteps > 1`. May\n optionally be an instance of `tfp.sts.MaskedTimeSeries` including a\n mask `Tensor` to encode the locations of missing observations.\n parameter_samples: Python `list` of `Tensors` representing posterior samples\n of model parameters, with shapes `[concat([[num_posterior_draws],\n param.prior.batch_shape, param.prior.event_shape]) for param in\n model.parameters]`. This may optionally also be a map (Python `dict`) of\n parameter names to `Tensor` values.\n num_steps_Func: scalar `int` `Tensor` number of steps to Func.\n\n Returns:\n Func_dist: a `tfd.MixtureSameFamily` instance with event shape\n [num_steps_Func, 1] and batch shape\n `concat([sample_shape, model.batch_shape])`, with `num_posterior_draws`\n mixture components.\n\n #### Examples\n\n Suppose we've built a model and fit it to data using HMC:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n\n samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)\n ```\n\n Passing the posterior samples into `Func`, we construct a Func\n distribution:\n\n ```python\n Func_dist = tfp.sts.Func(model, observed_time_series,\n parameter_samples=samples,\n num_steps_Func=50)\n\n Func_mean = Func_dist.mean()[..., 0] # shape: [50]\n Func_scale = Func_dist.stddev()[..., 0] # shape: [50]\n Func_samples = Func_dist.sample(10)[..., 0] # shape: [10, 50]\n ```\n\n If using variational inference instead of HMC, we'd construct a Func using\n samples from the variational posterior:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.build_factored_variational_loss(\n model=model, observed_time_series=observed_time_series)\n\n # OMITTED: take steps to optimize variational loss\n\n samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}\n Func_dist = tfp.sts.Func(model, observed_time_series,\n parameter_samples=samples,\n num_steps_Func=50)\n ```\n\n We can visualize the Func by plotting:\n\n ```python\n from matplotlib import pylab as plt\n def plot_Func(observed_time_series,\n Func_mean,\n Func_scale,\n Func_samples):\n plt.figure(figsize=(12, 6))\n\n num_steps = observed_time_series.shape[-1]\n num_steps_Func = Func_mean.shape[-1]\n num_steps_train = num_steps - num_steps_Func\n\n c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)\n plt.plot(np.arange(num_steps), observed_time_series,\n lw=2, color=c1, label='ground truth')\n\n Func_steps = np.arange(num_steps_train,\n num_steps_train+num_steps_Func)\n plt.plot(Func_steps, Func_samples.T, lw=1, color=c2, alpha=0.1)\n plt.plot(Func_steps, Func_mean, lw=2, ls='--', color=c2,\n label='Func')\n plt.fill_between(Func_steps,\n Func_mean - 2 * Func_scale,\n Func_mean + 2 * Func_scale, color=c2, alpha=0.2)\n\n plt.xlim([0, num_steps])\n plt.legend()\n\n plot_Func(observed_time_series,\n Func_mean=Func_mean,\n Func_scale=Func_scale,\n Func_samples=Func_samples)\n ```\n\n \"\"\"\n\n with tf.compat.v1.name_scope(\n 'Func',\n values=[arg_1, arg_2, arg_3]):\n [\n arg_1,\n arg_4\n ] = sts_util.canonicalize_observed_time_series_with_mask(\n arg_1)\n\n # Run filtering over the observed timesteps to extract the\n # latent state posterior at timestep T+1 (i.e., the final\n # filtering distribution, pushed through the transition model).\n # This is the prior for the Func model (\"today's prior\n # is yesterday's posterior\").\n arg_5 = dist_util.prefer_static_value(\n tf.shape(input=arg_1))[-2]\n arg_6 = arg_0.make_state_space_model(\n num_timesteps=arg_5, param_vals=arg_2)\n (arg_7, arg_7, arg_7, arg_8, arg_9, arg_7, arg_7\n ) = arg_6.forward_filter(arg_1, arg_4=arg_4)\n\n # Build a batch of state-space models over the Func period. Because\n # we'll use MixtureSameFamily to mix over the posterior draws, we need to\n # do some shenanigans to move the `[num_posterior_draws]` batch dimension\n # from the leftmost to the rightmost side of the model's batch shape.\n # TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an\n # arbitrary axis, and eliminate `move_dimension` calls here.\n arg_2 = arg_0._canonicalize_param_vals_as_map(arg_2) # pylint: disable=protected-access\n arg_10 = {\n param.name: dist_util.move_dimension(\n arg_2[param.name],\n 0, -(1 + _prefer_static_event_ndims(param.prior)))\n for param in arg_0.parameters}\n arg_11 = tfd.MultivariateNormalFullCovariance(\n loc=dist_util.move_dimension(arg_8[..., -1, :], 0, -2),\n covariance_matrix=dist_util.move_dimension(\n arg_9[..., -1, :, :], 0, -3))\n\n # Ugly hack: because we moved `num_posterior_draws` to the trailing (rather\n # than leading) dimension of parameters, the parameter batch shapes no\n # longer broadcast against the `constant_offset` attribute used in `sts.Sum`\n # models. We fix this by manually adding an extra broadcasting dim to\n # `constant_offset` if present.\n # The root cause of this hack is that we mucked with param dimensions above\n # and are now passing params that are 'invalid' in the sense that they don't\n # match the shapes of the model's param priors. The fix (as above) will be\n # to update MixtureSameFamily so we can avoid changing param dimensions\n # altogether.\n # TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an\n # arbitrary axis, and eliminate this hack.\n arg_12 = {}\n if hasattr(arg_0, 'constant_offset'):\n arg_12['constant_offset'] = tf.convert_to_tensor(\n value=arg_0.constant_offset,\n dtype=arg_11.dtype)[..., tf.newaxis]\n\n # We assume that any STS model that has a `constant_offset` attribute\n # will allow it to be overridden as a kwarg. This is currently just\n # `sts.Sum`.\n # TODO(b/120245392): when kwargs hack is removed, switch back to calling\n # the public version of `_make_state_space_model`.\n arg_13 = arg_0._make_state_space_model( # pylint: disable=protected-access\n num_timesteps=arg_3,\n param_map=arg_10,\n initial_state_prior=arg_11,\n initial_step=arg_5,\n **arg_12)\n\n arg_14 = dist_util.prefer_static_value(\n arg_13.batch_shape_tensor())[-1]\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n logits=tf.zeros([arg_14], dtype=arg_13.dtype)),\n components_distribution=arg_13)"} +{"_id": "doc_554", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2=False, arg_3=0):\n \"\"\"Returns `max` or `mask` if `max` is not finite.\"\"\"\n arg_4 = np.max(arg_0, arg_1=_astuple(arg_1), arg_2=arg_2)\n arg_5 = ~np.isfinite(arg_4)\n if arg_5.ndim > 0:\n arg_4[arg_5] = arg_3\n elif arg_5:\n arg_4 = arg_3\n return arg_4"} +{"_id": "doc_555", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None):\n \"\"\"Assert `x` has rank equal to `rank` or smaller.\n\n Example of adding a dependency to an operation:\n\n ```python\n with tf.control_dependencies([tf.Func(x, 2)]):\n output = tf.reduce_sum(x)\n ```\n\n Args:\n x: Numeric `Tensor`.\n rank: Scalar `Tensor`.\n data: The tensors to print out if the condition is False. Defaults to\n error message and first few entries of `x`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional).\n Defaults to \"Func\".\n\n Returns:\n Op raising `InvalidArgumentError` unless `x` has specified rank or lower.\n If static checks determine `x` has correct rank, a `no_op` is returned.\n\n Raises:\n ValueError: If static checks determine `x` has wrong rank.\n \"\"\"\n with tf.compat.v2.name_scope(arg_5 or 'Func'):\n return tf.compat.v1.assert_less_equal(\n tf.rank(arg_0), arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_556", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"OneHotCategorical helper computing probs, cdf, etc over its support.\"\"\"\n with tf.compat.v1.name_scope(arg_2, 'eval_all_one_hot'):\n arg_3 = arg_1.event_shape_tensor()[-1]\n arg_4 = tf.size(input=arg_1.batch_shape_tensor())\n # Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`.\n arg_5 = tf.reshape(\n tf.eye(arg_3, dtype=arg_1.dtype),\n shape=tf.pad(\n tensor=tf.ones(arg_4, tf.int32),\n paddings=[[1, 1]],\n constant_values=arg_3))\n # Compute `fn(x)` then cyclically left-transpose one dim.\n arg_6 = tf.pad(tensor=tf.range(1, arg_4 + 1), paddings=[[0, 1]])\n return tf.transpose(a=arg_0(arg_1, arg_5), arg_6=arg_6)"} +{"_id": "doc_557", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a convert-to-tensor func, given a name, config, callable, etc.\"\"\"\n if arg_0 is None:\n return None\n\n if isinstance(arg_0, six.string_types):\n arg_0 = str(arg_0)\n return _deserialize(arg_0)\n\n if isinstance(arg_0, dict):\n return _deserialize(arg_0)\n\n if isinstance(arg_0, property):\n arg_0 = arg_0.fget\n if callable(arg_0):\n return arg_0\n\n raise ValueError('Could not interpret '\n 'convert-to-tensor function identifier:', arg_0)"} +{"_id": "doc_558", "title": "", "text": "def Func():\n \"\"\"Yields the top-most interceptor on the thread-local interceptor stack.\n\n Operations may be intercepted by multiple nested interceptors. Once reached,\n an operation can be forwarded through nested interceptors until resolved.\n To allow for nesting, implement interceptors by re-wrapping their first\n argument (`f`) as an `interceptable`. To avoid nesting, manipulate the\n computation without using `interceptable`.\n\n This function allows for nesting by manipulating the thread-local interceptor\n stack, so that operations are intercepted in the order of interceptor nesting.\n\n #### Examples\n\n ```python\n from tensorflow_probability import edward2 as ed\n\n def model():\n x = ed.Normal(loc=0., scale=1., name=\"x\")\n y = ed.Normal(loc=x, scale=1., name=\"y\")\n return x + y\n\n def double(f, *args, **kwargs):\n return 2. * interceptable(f)(*args, **kwargs)\n\n def set_y(f, *args, **kwargs):\n if kwargs.get(\"name\") == \"y\":\n kwargs[\"value\"] = 0.42\n return interceptable(f)(*args, **kwargs)\n\n with interception(double):\n with interception(set_y):\n z = model()\n ```\n\n This will firstly put `double` on the stack, and then `set_y`,\n resulting in the stack:\n (TOP) set_y -> double -> apply (BOTTOM)\n\n The execution of `model` is then (top lines are current stack state):\n 1) (TOP) set_y -> double -> apply (BOTTOM);\n `ed.Normal(0., 1., \"x\")` is intercepted by `set_y`, and as the name is not \"y\"\n the operation is simply forwarded to the next interceptor on the stack.\n\n 2) (TOP) double -> apply (BOTTOM);\n `ed.Normal(0., 1., \"x\")` is intercepted by `double`, to produce\n `2*ed.Normal(0., 1., \"x\")`, with the operation being forwarded down the stack.\n\n 3) (TOP) apply (BOTTOM);\n `ed.Normal(0., 1., \"x\")` is intercepted by `apply`, which simply calls the\n constructor.\n\n (At this point, the nested calls to `Func()`, produced by\n forwarding operations, exit, and the current stack is again:\n (TOP) set_y -> double -> apply (BOTTOM))\n\n 4) (TOP) set_y -> double -> apply (BOTTOM);\n `ed.Normal(0., 1., \"y\")` is intercepted by `set_y`,\n the value of `y` is set to 0.42 and the operation is forwarded down the stack.\n\n 5) (TOP) double -> apply (BOTTOM);\n `ed.Normal(0., 1., \"y\")` is intercepted by `double`, to produce\n `2*ed.Normal(0., 1., \"y\")`, with the operation being forwarded down the stack.\n\n 6) (TOP) apply (BOTTOM);\n `ed.Normal(0., 1., \"y\")` is intercepted by `apply`, which simply calls the\n constructor.\n\n The final values for `x` and `y` inside of `model()` are tensors where `x` is\n a random draw from Normal(0., 1.) doubled, and `y` is a constant 0.84, thus\n z = 2 * Normal(0., 1.) + 0.84.\n \"\"\"\n try:\n arg_0 = _interceptor_stack.stack.pop()\n yield arg_0\n finally:\n _interceptor_stack.stack.append(arg_0)"} +{"_id": "doc_559", "title": "", "text": "def Func(arg_0):\n \"\"\"Decorator that wraps `func` so that its execution is intercepted.\n\n The wrapper passes `func` to the interceptor for the current thread.\n\n If there is no next interceptor, we perform an \"immediate\" call to `func`.\n That is, `func` terminates without forwarding its execution to another\n interceptor.\n\n Args:\n func: Function to wrap.\n\n Returns:\n The decorated function.\n \"\"\"\n @functools.wraps(arg_0)\n def func_wrapped(*arg_1, **arg_2):\n with get_next_interceptor() as interceptor:\n return interceptor(arg_0, *arg_1, **arg_2)\n\n return func_wrapped"} +{"_id": "doc_560", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=5.0):\n \"\"\"Generates synthetic data for binary classification.\n\n Args:\n num_examples: The number of samples to generate (scalar Python `int`).\n input_size: The input space dimension (scalar Python `int`).\n weights_prior_stddev: The prior standard deviation of the weight\n vector. (scalar Python `float`).\n\n Returns:\n random_weights: Sampled weights as a Numpy `array` of shape\n `[input_size]`.\n random_bias: Sampled bias as a scalar Python `float`.\n design_matrix: Points sampled uniformly from the cube `[-1,\n 1]^{input_size}`, as a Numpy `array` of shape `(num_examples,\n input_size)`.\n labels: Labels sampled from the logistic model `p(label=1) =\n logistic(dot(features, random_weights) + random_bias)`, as a Numpy\n `int32` `array` of shape `(num_examples, 1)`.\n \"\"\"\n arg_3 = arg_2 * np.random.randn(arg_1)\n arg_4 = np.random.randn()\n arg_5 = np.random.rand(arg_0, arg_1) * 2 - 1\n arg_6 = np.reshape(\n np.dot(arg_5, arg_3) + arg_4,\n (-1, 1))\n arg_7 = 1. / (1 + np.exp(-arg_6))\n arg_8 = np.int32(arg_7 > np.random.rand(arg_0, 1))\n return arg_3, arg_4, np.float32(arg_5), arg_8"} +{"_id": "doc_561", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Build a Dataset iterator for supervised classification.\n\n Args:\n x: Numpy `array` of features, indexed by the first dimension.\n y: Numpy `array` of labels, with the same first dimension as `x`.\n batch_size: Number of elements in each training batch.\n\n Returns:\n batch_features: `Tensor` feed features, of shape\n `[batch_size] + x.shape[1:]`.\n batch_labels: `Tensor` feed of labels, of shape\n `[batch_size] + y.shape[1:]`.\n \"\"\"\n arg_3 = tf.data.Dataset.from_tensor_slices((arg_0, arg_1))\n arg_4 = arg_3.repeat().batch(arg_2)\n arg_5 = tf.compat.v1.data.make_one_shot_iterator(arg_4)\n arg_6, arg_7 = arg_5.get_next()\n return arg_6, arg_7"} +{"_id": "doc_562", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Validate `map_values` if `validate_args`==True.\"\"\"\n arg_2 = []\n\n arg_3 = 'Rank of map_values must be 1.'\n if tensorshape_util.rank(arg_0.shape) is not None:\n if tensorshape_util.rank(arg_0.shape) != 1:\n raise ValueError(arg_3)\n elif arg_1:\n arg_2.append(assert_util.assert_rank(arg_0, 1, arg_3=arg_3))\n\n arg_3 = 'Size of map_values must be greater than 0.'\n if tensorshape_util.num_elements(arg_0.shape) is not None:\n if tensorshape_util.num_elements(arg_0.shape) == 0:\n raise ValueError(arg_3)\n elif arg_1:\n arg_2.append(\n assert_util.assert_greater(\n tf.size(input=arg_0), 0, arg_3=arg_3))\n\n if arg_1:\n arg_2.append(\n assert_util.assert_equal(\n tf.math.is_strictly_increasing(arg_0),\n True,\n arg_3='map_values is not strictly increasing.'))\n\n return arg_2"} +{"_id": "doc_563", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3[arg_4[arg_5], arg_5]\n ) -> arg_4[tf.Tensor, TensorNest, TensorNest]:\n \"\"\"Calls `fn` and returns the gradients with respect to `fn`'s first output.\n\n Args:\n fn: A `TransitionOperator`.\n args: Arguments to `fn`\n\n Returns:\n ret: First output of `fn`.\n extra: Second output of `fn`.\n grads: Gradients of `ret` with respect to `args`.\n \"\"\"\n with tf.GradientTape() as tape:\n tape.watch(arg_2)\n arg_6, arg_7 = call_fn(arg_0, arg_2)\n arg_8 = tape.gradient(arg_6, arg_2)\n return arg_6, arg_7, arg_8"} +{"_id": "doc_564", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> arg_1:\n \"\"\"Maybe broadcasts `from_structure` to `to_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\n Args:\n from_structure: A structure.\n to_structure: A structure.\n\n Returns:\n new_from_structure: Same structure as `to_structure`.\n \"\"\"\n arg_3 = tf.nest.flatten(arg_0)\n arg_4 = tf.nest.flatten(arg_2)\n if len(arg_3) == 1:\n arg_3 *= len(arg_4)\n return tf.nest.pack_sequence_as(arg_2, arg_3)"} +{"_id": "doc_565", "title": "", "text": "def Func(arg_0: arg_1,\n arg_2: arg_3,\n arg_4: arg_5 = None\n ) -> Union[arg_1, Tuple[arg_1, arg_5]]:\n \"\"\"Transforms a log-prob function using a bijector.\n\n This takes a log-prob function and creates a new log-prob function that now\n takes takes state in the domain of the bijector, forward transforms that state\n and calls the original log-prob function. It then returns the log-probability\n that correctly accounts for this transformation.\n\n The forward-transformed state is pre-pended to the original log-prob\n function's extra returns and returned as the new extra return.\n\n For convenience you can also pass the initial state (in the original space),\n and this function will return the inverse transformed as the 2nd return value.\n You'd use this to initialize MCMC operators that operate in the transformed\n space.\n\n Args:\n log_prob_fn: Log prob fn.\n bijector: Bijector(s), must be of the same structure as the `log_prob_fn`\n inputs.\n init_state: Initial state, in the original space.\n\n Returns:\n transformed_log_prob_fn: Transformed log prob fn.\n transformed_init_state: If `init_state` is provided. Initial state in the\n transformed space.\n \"\"\"\n\n def wrapper(*arg_6):\n \"\"\"Transformed wrapper.\"\"\"\n arg_7 = arg_2\n\n arg_6 = tf.nest.map_structure(lambda x: 0. + x, arg_6)\n if len(arg_6) == 1:\n arg_6 = arg_6[0]\n elif isinstance(arg_7, list):\n arg_7 = tuple(arg_7)\n\n arg_8 = tf.nest.map_structure(lambda b, x: b.forward(x),\n arg_7, arg_6)\n arg_8 = arg_8 # type: Tuple[Any]\n arg_9, arg_10 = call_fn(arg_0, arg_8)\n arg_11 = tf.nest.map_structure(\n lambda x: tf.rank(x) - tf.rank(arg_9), arg_6)\n\n return arg_9 + sum(\n tf.nest.flatten(\n tf.nest.map_structure(\n lambda b, x, e: b.forward_log_det_jacobian(x, arg_11=e),\n arg_7, arg_6, arg_11))), [arg_8, arg_10]\n\n if arg_4 is None:\n return wrapper\n else:\n return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), arg_2,\n arg_4)"} +{"_id": "doc_566", "title": "", "text": "def Func(arg_0: arg_1,\n arg_2: arg_3, arg_4: arg_5,\n arg_6: arg_5\n ) -> Tuple[arg_1, LeapFrogStepExtras]:\n \"\"\"Leapfrog `TransitionOperator`.\n\n Args:\n Func_state: LeapFrogStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n Func_state: LeapFrogStepState.\n Func_extras: LeapFrogStepExtras.\n \"\"\"\n arg_7 = arg_0.state\n arg_8 = arg_0.state_grads\n arg_9 = arg_0.momentum\n arg_2 = maybe_broadcast_structure(arg_2, arg_7)\n\n arg_7 = tf.nest.map_structure(tf.convert_to_tensor, arg_7)\n arg_9 = tf.nest.map_structure(tf.convert_to_tensor, arg_9)\n arg_7 = tf.nest.map_structure(tf.convert_to_tensor, arg_7)\n\n if arg_8 is None:\n arg_10, arg_10, arg_8 = call_and_grads(arg_4, arg_7)\n else:\n arg_8 = tf.nest.map_structure(tf.convert_to_tensor, arg_8)\n\n arg_9 = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, arg_9,\n arg_8, arg_2)\n\n arg_11, arg_12, arg_13 = call_and_grads(\n arg_6, arg_9)\n\n arg_7 = tf.nest.map_structure(lambda x, mg, s: x + mg * s, arg_7,\n arg_13, arg_2)\n\n arg_14, arg_15, arg_8 = call_and_grads(\n arg_4, arg_7)\n\n arg_9 = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, arg_9,\n arg_8, arg_2)\n\n return arg_1(arg_7, arg_8, arg_9), LeapFrogStepExtras(\n arg_14, arg_15, arg_11, arg_12)"} +{"_id": "doc_567", "title": "", "text": "def Func(arg_0: arg_1,\n arg_2: arg_1,\n arg_3: arg_4,\n arg_5=None) -> Tuple[arg_1, tf.Tensor, tf.Tensor]:\n \"\"\"Metropolis-Hastings step.\n\n This probabilistically chooses between `current_state` and `proposed_state`\n based on the `energy_change` so as to preserve detailed balance.\n\n Energy change is the negative of `log_accept_ratio`.\n\n Args:\n current_state: Current state.\n proposed_state: Proposed state.\n energy_change: E(proposed_state) - E(previous_state).\n seed: For reproducibility.\n\n Returns:\n new_state: The chosen state.\n is_accepted: Whether the proposed state was accepted.\n log_uniform: The random number that was used to select between the two\n states.\n \"\"\"\n arg_6 = tf.nest.flatten(arg_0)\n arg_7 = nest.flatten_up_to(arg_0, arg_2)\n # Impute the None's in the current state.\n arg_6 = [\n p if c is None else c for p, c in zip(arg_7, arg_6)\n ]\n arg_0 = tf.nest.pack_sequence_as(arg_0, arg_6)\n\n arg_0 = tf.nest.map_structure(tf.convert_to_tensor, arg_0)\n arg_2 = tf.nest.map_structure(tf.convert_to_tensor, arg_2)\n arg_3 = tf.convert_to_tensor(value=arg_3)\n\n arg_8 = -arg_3\n\n arg_9 = tf.math.log(\n tf.random.uniform(\n shape=tf.shape(input=arg_8),\n dtype=arg_8.dtype.base_dtype,\n arg_5=arg_5))\n arg_10 = arg_9 < arg_8\n\n arg_11 = mcmc_util.choose(\n arg_10, arg_2, arg_0, name='choose_next_state')\n return arg_11, arg_10, arg_9"} +{"_id": "doc_568", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6, arg_7,\n arg_8):\n \"\"\"Construct `scale` from various components.\n\n Args:\n identity_multiplier: floating point rank 0 `Tensor` representing a scaling\n done to the identity matrix.\n diag: Floating-point `Tensor` representing the diagonal matrix.`diag` has\n shape `[N1, N2, ... k]`, which represents a k x k diagonal matrix.\n tril: Floating-point `Tensor` representing the lower triangular matrix.\n `tril` has shape `[N1, N2, ... k, k]`, which represents a k x k lower\n triangular matrix.\n perturb_diag: Floating-point `Tensor` representing the diagonal matrix of\n the low rank update.\n perturb_factor: Floating-point `Tensor` representing factor matrix.\n shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n dtype: `DType` for arg `Tensor` conversions.\n\n Returns:\n scale. In the case of scaling by a constant, scale is a\n floating point `Tensor`. Otherwise, scale is a `LinearOperator`.\n\n Raises:\n ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.\n \"\"\"\n arg_1 = _as_tensor(arg_1, \"identity_multiplier\",\n arg_8)\n arg_2 = _as_tensor(arg_2, \"diag\", arg_8)\n arg_3 = _as_tensor(arg_3, \"tril\", arg_8)\n arg_4 = _as_tensor(arg_4, \"perturb_diag\", arg_8)\n arg_5 = _as_tensor(arg_5, \"perturb_factor\", arg_8)\n\n # If possible, use the low rank update to infer the shape of\n # the identity matrix, when scale represents a scaled identity matrix\n # with a low rank update.\n arg_9 = None\n if arg_5 is not None:\n arg_9 = distribution_util.dimension_size(arg_5, axis=-2)\n\n if arg_0._is_only_identity_multiplier:\n if arg_7:\n return distribution_util.with_dependencies([\n assert_util.assert_none_equal(\n arg_1, tf.zeros([], arg_1.dtype),\n [\"identity_multiplier should be non-zero.\"])\n ], arg_1)\n return arg_1\n\n arg_10 = distribution_util.make_tril_scale(\n loc=arg_6,\n scale_tril=arg_3,\n scale_diag=arg_2,\n scale_identity_multiplier=arg_1,\n arg_7=arg_7,\n assert_positive=False,\n arg_9=arg_9)\n\n if arg_5 is not None:\n return tf.linalg.LinearOperatorLowRankUpdate(\n arg_10,\n u=arg_5,\n diag_update=arg_4,\n is_diag_update_positive=arg_4 is None,\n is_non_singular=True, # Implied by is_positive_definite=True.\n is_self_adjoint=True,\n is_positive_definite=True,\n is_square=True)\n\n return arg_10"} +{"_id": "doc_569", "title": "", "text": "def Func(arg_0=1., arg_1=None):\n \"\"\"Returns a callable that adds a random normal perturbation to the input.\n\n This function returns a callable that accepts a Python `list` of `Tensor`s of\n any shapes and `dtypes` representing the state parts of the `current_state`\n and a random seed. The supplied argument `scale` must be a `Tensor` or Python\n `list` of `Tensor`s representing the scale of the generated\n proposal. `scale` must broadcast with the state parts of `current_state`.\n The callable adds a sample from a zero-mean normal distribution with the\n supplied scales to each state part and returns a same-type `list` of `Tensor`s\n as the state parts of `current_state`.\n\n Args:\n scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`\n controlling the scale of the normal proposal distribution.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: 'Func'.\n\n Returns:\n Func: A callable accepting a Python `list` of `Tensor`s\n representing the state parts of the `current_state` and an `int`\n representing the random seed to be used to generate the proposal. The\n callable returns the same-type `list` of `Tensor`s as the input and\n represents the proposal for the RWM algorithm.\n \"\"\"\n def _fn(arg_2, arg_3):\n \"\"\"Adds a normal perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and real dtype representing\n the state parts of the `current_state` of the Markov chain.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n\n Returns:\n perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\n shape and type as the `state_parts`.\n\n Raises:\n ValueError: if `scale` does not broadcast with `state_parts`.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_1, 'Func', values=[arg_2, arg_0, arg_3]):\n arg_4 = arg_0 if mcmc_util.is_list_like(arg_0) else [arg_0]\n if len(arg_4) == 1:\n arg_4 *= len(arg_2)\n if len(arg_2) != len(arg_4):\n raise ValueError('`scale` must broadcast with `state_parts`.')\n arg_5 = distributions.SeedStream(arg_3, salt='RandomWalkNormalFn')\n arg_6 = [\n tf.random.normal(\n mean=state_part,\n stddev=scale_part,\n shape=tf.shape(input=state_part),\n dtype=state_part.dtype.base_dtype,\n arg_3=arg_5())\n for scale_part, state_part in zip(arg_4, arg_2)\n ]\n\n return arg_6\n return _fn"} +{"_id": "doc_570", "title": "", "text": "def Func(arg_0=1., arg_1=None):\n \"\"\"Returns a callable that adds a random uniform perturbation to the input.\n\n For more details on `Func`, see\n `random_walk_normal_fn`. `scale` might\n be a `Tensor` or a list of `Tensor`s that should broadcast with state parts\n of the `current_state`. The generated uniform perturbation is sampled as a\n uniform point on the rectangle `[-scale, scale]`.\n\n Args:\n scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`\n controlling the upper and lower bound of the uniform proposal\n distribution.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: 'Func'.\n\n Returns:\n Func: A callable accepting a Python `list` of `Tensor`s\n representing the state parts of the `current_state` and an `int`\n representing the random seed used to generate the proposal. The callable\n returns the same-type `list` of `Tensor`s as the input and represents the\n proposal for the RWM algorithm.\n \"\"\"\n def _fn(arg_2, arg_3):\n \"\"\"Adds a uniform perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and real dtype representing\n the state parts of the `current_state` of the Markov chain.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n\n Returns:\n perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\n shape and type as the `state_parts`.\n\n Raises:\n ValueError: if `scale` does not broadcast with `state_parts`.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_1, 'Func', values=[arg_2, arg_0, arg_3]):\n arg_4 = arg_0 if mcmc_util.is_list_like(arg_0) else [arg_0]\n if len(arg_4) == 1:\n arg_4 *= len(arg_2)\n if len(arg_2) != len(arg_4):\n raise ValueError('`scale` must broadcast with `state_parts`.')\n arg_5 = distributions.SeedStream(arg_3, salt='RandomWalkUniformFn')\n arg_6 = [\n tf.random.uniform(\n minval=state_part - scale_part,\n maxval=state_part + scale_part,\n shape=tf.shape(input=state_part),\n dtype=state_part.dtype.base_dtype,\n arg_3=arg_5())\n for scale_part, state_part in zip(arg_4, arg_2)\n ]\n return arg_6\n return _fn"} +{"_id": "doc_571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a list of num_components batchwise probabilities.\"\"\"\n arg_2 = tf.nn.log_softmax if arg_1 else tf.nn.softmax\n arg_3 = arg_2(arg_0.cat.logits)\n arg_3 = tf.unstack(arg_3, num=arg_0.num_components, axis=-1)\n return arg_3"} +{"_id": "doc_572", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Validate `outcomes`, `logits` and `probs`'s shapes.\"\"\"\n arg_4 = []\n\n def validate_equal_last_dim(arg_5, arg_6, arg_7):\n if arg_5.shape.is_fully_defined() and arg_6.shape.is_fully_defined():\n if arg_5.shape[-1] != arg_6.shape[-1]:\n raise ValueError(arg_7)\n elif arg_3:\n arg_4.append(\n tf.compat.v1.assert_equal(\n tf.shape(input=arg_5)[-1],\n tf.shape(input=arg_6)[-1],\n arg_7=arg_7))\n\n if arg_1 is not None:\n validate_equal_last_dim(\n arg_0,\n arg_1,\n arg_7='Last dimension of outcomes and logits must be equal size.')\n if arg_2 is not None:\n validate_equal_last_dim(\n arg_0,\n arg_2,\n arg_7='Last dimension of outcomes and probs must be equal size.')\n\n arg_7 = 'Rank of outcomes must be 1.'\n if arg_0.shape.ndims is not None:\n if arg_0.shape.ndims != 1:\n raise ValueError(arg_7)\n elif arg_3:\n arg_4.append(tf.compat.v1.assert_rank(arg_0, 1, arg_7=arg_7))\n\n arg_7 = 'Size of outcomes must be greater than 0.'\n if arg_0.shape.num_elements() is not None:\n if arg_0.shape.num_elements() == 0:\n raise ValueError(arg_7)\n elif arg_3:\n arg_4.append(\n tf.compat.v1.assert_greater(\n tf.size(input=arg_0), 0, arg_7=arg_7))\n\n if arg_3:\n arg_4.append(\n tf.compat.v1.assert_equal(\n tf.math.is_strictly_increasing(arg_0),\n True,\n arg_7='outcomes is not strictly increasing.'))\n\n return arg_4"} +{"_id": "doc_573", "title": "", "text": "def Func(arg_0):\n \"\"\"Bayesian logistic regression, which returns labels given features.\"\"\"\n arg_1 = ed.MultivariateNormalDiag(\n loc=tf.zeros(arg_0.shape[1]), name=\"coeffs\")\n arg_2 = ed.Bernoulli(\n logits=tf.tensordot(arg_0, arg_1, [[1], [0]]), name=\"labels\")\n return arg_2"} +{"_id": "doc_574", "title": "", "text": "def Func():\n \"\"\"Builds the Covertype data set.\"\"\"\n import sklearn.datasets # pylint: disable=g-import-not-at-top\n arg_0 = sklearn.datasets.covtype.fetch_covtype()\n arg_1 = arg_0.data\n arg_2 = arg_0.target\n\n # Normalize features and append a column of ones for the intercept.\n arg_1 -= arg_1.mean(0)\n arg_1 /= arg_1.std(0)\n arg_1 = np.hstack([arg_1, np.ones([arg_1.shape[0], 1])])\n arg_1 = tf.cast(arg_1, dtype=tf.float32)\n\n # Binarize outcomes on whether it is a specific category.\n arg_3, arg_4 = np.unique(arg_2, return_counts=True)\n arg_5 = np.argmax(arg_4)\n arg_2 = (arg_2 == arg_5)\n arg_2 = tf.cast(arg_2, dtype=tf.int32)\n return arg_1, arg_2"} +{"_id": "doc_575", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=False, arg_3=None):\n \"\"\"Cholesky factor of the covariance matrix of vector-variate random samples.\n\n This function can be use to fit a multivariate normal to data.\n\n ```python\n tf.enable_eager_execution()\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Assume data.shape = (1000, 2). 1000 samples of a random variable in R^2.\n observed_data = read_data_samples(...)\n\n # The mean is easy\n mu = tf.reduce_mean(observed_data, axis=0)\n\n # Get the scale matrix\n L = tfp.stats.Func(observed_data)\n\n # Make the best fit multivariate normal (under maximum likelihood condition).\n mvn = tfd.MultivariateNormalTriL(loc=mu, scale_tril=L)\n\n # Plot contours of the pdf.\n xs, ys = tf.meshgrid(\n tf.linspace(-5., 5., 50), tf.linspace(-5., 5., 50), indexing='ij')\n xy = tf.stack((tf.reshape(xs, [-1]), tf.reshape(ys, [-1])), axis=-1)\n pdf = tf.reshape(mvn.prob(xy), (50, 50))\n CS = plt.contour(xs, ys, pdf, 10)\n plt.clabel(CS, inline=1, fontsize=10)\n ```\n\n Why does this work?\n Given vector-variate random variables `X = (X1, ..., Xd)`, one may obtain the\n sample covariance matrix in `R^{d x d}` (see `tfp.stats.covariance`).\n\n The [Cholesky factor](https://en.wikipedia.org/wiki/Cholesky_decomposition)\n of this matrix is analogous to standard deviation for scalar random variables:\n Suppose `X` has covariance matrix `C`, with Cholesky factorization `C = L L^T`\n Then multiplying a vector of iid random variables which have unit variance by\n `L` produces a vector with covariance `L L^T`, which is the same as `X`.\n\n ```python\n observed_data = read_data_samples(...)\n L = tfp.stats.Func(observed_data, sample_axis=0)\n\n # Make fake_data with the same covariance as observed_data.\n uncorrelated_normal = tf.random_normal(shape=(500, 10))\n fake_data = tf.linalg.matvec(L, uncorrelated_normal)\n ```\n\n Args:\n x: Numeric `Tensor`. The rightmost dimension of `x` indexes events. E.g.\n dimensions of a random vector.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples.\n Default value: `0` (leftmost dimension). Cannot be the rightmost dimension\n (since this indexes events).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'covariance'`).\n\n Returns:\n chol: `Tensor` of same `dtype` as `x`. The last two dimensions hold\n lower triangular matrices (the Cholesky factors).\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_3, 'Func', values=[arg_0, arg_1]):\n arg_1 = tf.convert_to_tensor(value=arg_1, dtype=tf.int32)\n arg_4 = covariance(\n arg_0, arg_1=arg_1, event_axis=-1, arg_2=arg_2)\n return tf.linalg.cholesky(arg_4)"} +{"_id": "doc_576", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=False, arg_3=None):\n \"\"\"Estimate standard deviation using samples.\n\n Given `N` samples of scalar valued random variable `X`, standard deviation may\n be estimated as\n\n ```none\n Stddev[X] := Sqrt[Var[X]],\n Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)},\n Xbar := N^{-1} sum_{n=1}^N X_n\n ```\n\n ```python\n x = tf.random_normal(shape=(100, 2, 3))\n\n # Func[i, j] is the sample standard deviation of the (i, j) batch member.\n Func = tfp.stats.Func(x, sample_axis=0)\n ```\n\n Scaling a unit normal by a standard deviation produces normal samples\n with that standard deviation.\n\n ```python\n observed_data = read_data_samples(...)\n Func = tfp.stats.Func(observed_data)\n\n # Make fake_data with the same standard deviation as observed_data.\n fake_data = Func * tf.random_normal(shape=(100,))\n ```\n\n Notice we divide by `N` (the numpy default), which does not create `NaN`\n when `N = 1`, but is slightly biased.\n\n Args:\n x: A numeric `Tensor` holding samples.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'Func'`).\n\n Returns:\n Func: A `Tensor` of same `dtype` as the `x`, and rank equal to\n `rank(x) - len(sample_axis)`\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, 'Func', values=[arg_0, arg_1]):\n return tf.sqrt(variance(arg_0, arg_1=arg_1, arg_2=arg_2))"} +{"_id": "doc_577", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Rectify possibly negatively axis. Prefer return Python list.\"\"\"\n arg_0 = _make_list_or_1d_tensor(arg_0)\n\n arg_1 = tf.convert_to_tensor(value=arg_1, name='ndims', dtype=tf.int32)\n arg_2 = tf.get_static_value(arg_1)\n\n if _is_list_like(arg_0) and arg_2 is not None:\n # Static case\n arg_3 = []\n for arg_4 in arg_0:\n if arg_4 < 0:\n arg_4 = arg_2 + arg_4\n arg_3.append(arg_4)\n else:\n # Dynamic case\n arg_0 = tf.convert_to_tensor(value=arg_0, name='axis', dtype=tf.int32)\n arg_3 = tf.where(arg_0 >= 0, arg_0, arg_0 + arg_1)\n\n return arg_3"} +{"_id": "doc_578", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"A version of squeeze that works with dynamic axis.\"\"\"\n arg_0 = tf.convert_to_tensor(value=arg_0, name='x')\n if arg_1 is None:\n return tf.squeeze(arg_0, arg_1=None)\n arg_1 = tf.convert_to_tensor(value=arg_1, name='axis', dtype=tf.int32)\n arg_1 += tf.zeros([1], dtype=arg_1.dtype) # Make axis at least 1d.\n arg_2, arg_3 = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(arg_0)), arg_1)\n return tf.reshape(arg_0, tf.gather(tf.shape(input=arg_0), arg_2))"} +{"_id": "doc_579", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Standardize input `x` to a unit normal.\"\"\"\n with tf.name_scope(\"standardize\"):\n return (arg_1 - arg_0.loc) / arg_0.scale"} +{"_id": "doc_580", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=arg_4.float32,\n arg_6=True,\n arg_7=None,\n arg_8=None):\n r\"\"\"Returns a sample from the `dim` dimensional Halton sequence.\n\n Warning: The sequence elements take values only between 0 and 1. Care must be\n taken to appropriately transform the domain of a function if it differs from\n the unit cube before evaluating integrals using Halton samples. It is also\n important to remember that quasi-random numbers without randomization are not\n a replacement for pseudo-random numbers in every context. Quasi random numbers\n are completely deterministic and typically have significant negative\n autocorrelation unless randomization is used.\n\n Computes the members of the low discrepancy Halton sequence in dimension\n `dim`. The `dim`-dimensional sequence takes values in the unit hypercube in\n `dim` dimensions. Currently, only dimensions up to 1000 are supported. The\n prime base for the k-th axes is the k-th prime starting from 2. For example,\n if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first\n element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more\n complete description of the Halton sequences see\n [here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy\n sequences and their applications see\n [here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).\n\n If `randomized` is true, this function produces a scrambled version of the\n Halton sequence introduced by [Owen (2017)][1]. For the advantages of\n randomization of low discrepancy sequences see [here](\n https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).\n\n The number of samples produced is controlled by the `num_results` and\n `sequence_indices` parameters. The user must supply either `num_results` or\n `sequence_indices` but not both.\n The former is the number of samples to produce starting from the first\n element. If `sequence_indices` is given instead, the specified elements of\n the sequence are generated. For example, sequence_indices=tf.range(10) is\n equivalent to specifying n=10.\n\n #### Examples\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n # Produce the first 1000 members of the Halton sequence in 3 dimensions.\n num_results = 1000\n dim = 3\n sample = tfp.mcmc.Func(\n dim,\n num_results=num_results,\n seed=127)\n\n # Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional\n # hypercube.\n powers = tf.range(1.0, limit=dim + 1)\n integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))\n true_value = 1.0 / tf.reduce_prod(powers + 1.0)\n with tf.Session() as session:\n values = session.run((integral, true_value))\n\n # Produces a relative absolute error of 1.7%.\n print (\"Estimated: %f, True Value: %f\" % values)\n\n # Now skip the first 1000 samples and recompute the integral with the next\n # thousand samples. The sequence_indices argument can be used to do this.\n\n\n sequence_indices = tf.range(start=1000, limit=1000 + num_results,\n dtype=tf.int32)\n sample_leaped = tfp.mcmc.Func(\n dim,\n sequence_indices=sequence_indices,\n seed=111217)\n\n integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,\n axis=-1))\n with tf.Session() as session:\n values = session.run((integral_leaped, true_value))\n # Now produces a relative absolute error of 0.05%.\n print (\"Leaped Estimated: %f, True Value: %f\" % values)\n ```\n\n Args:\n dim: Positive Python `int` representing each sample's `event_size.` Must\n not be greater than 1000.\n num_results: (Optional) Positive scalar `Tensor` of dtype int32. The number\n of samples to generate. Either this parameter or sequence_indices must\n be specified but not both. If this parameter is None, then the behaviour\n is determined by the `sequence_indices`.\n Default value: `None`.\n sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The\n elements of the sequence to compute specified by their position in the\n sequence. The entries index into the Halton sequence starting with 0 and\n hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will\n produce the first, sixth and seventh elements of the sequence. If this\n parameter is None, then the `num_results` parameter must be specified\n which gives the number of desired samples starting from the first sample.\n Default value: `None`.\n dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or\n `float64`.\n Default value: `tf.float32`.\n randomized: (Optional) bool indicating whether to produce a randomized\n Halton sequence. If True, applies the randomization described in\n [Owen (2017)][1].\n Default value: `True`.\n seed: (Optional) Python integer to seed the random number generator. Only\n used if `randomized` is True. If not supplied and `randomized` is True,\n no seed is set.\n Default value: `None`.\n name: (Optional) Python `str` describing ops managed by this function. If\n not supplied the name of this function is used.\n Default value: \"Func\".\n\n Returns:\n halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype\n and `shape` `[num_results, dim]` if `num_results` was specified or shape\n `[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`\n were specified.\n\n Raises:\n ValueError: if both `sequence_indices` and `num_results` were specified or\n if dimension `dim` is less than 1 or greater than 1000.\n\n #### References\n\n [1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint\n arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808\n \"\"\"\n if arg_0 < 1 or arg_0 > _MAX_DIMENSION:\n raise ValueError(\n 'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,\n arg_0))\n if (arg_1 is None) == (arg_2 is None):\n raise ValueError('Either `num_results` or `sequence_indices` must be'\n ' specified but not both.')\n\n if not arg_3.is_floating:\n raise ValueError('dtype must be of `float`-type')\n\n with arg_4.compat.v1.name_scope(\n arg_8, 'sample', values=[arg_1, arg_2]):\n # Here and in the following, the shape layout is as follows:\n # [sample dimension, event dimension, coefficient dimension].\n # The coefficient dimension is an intermediate axes which will hold the\n # weights of the starting integer when expressed in the (prime) base for\n # an event dimension.\n if arg_1 is not None:\n arg_1 = arg_4.convert_to_tensor(value=arg_1)\n if arg_2 is not None:\n arg_2 = arg_4.convert_to_tensor(value=arg_2)\n arg_9 = _get_indices(arg_1, arg_2, arg_3)\n arg_10 = arg_4.constant(_PRIMES[0:arg_0], arg_3=arg_3, shape=[arg_0, 1])\n\n arg_11 = _base_expansion_size(\n arg_4.reduce_max(input_tensor=arg_9), arg_10)\n\n arg_12 = arg_4.reduce_max(input_tensor=arg_11)\n\n # The powers of the radixes that we will need. Note that there is a bit\n # of an excess here. Suppose we need the place value coefficients of 7\n # in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits\n # for base 3. However, we can only create rectangular tensors so we\n # store both expansions in a [2, 3] tensor. This leads to the problem that\n # we might end up attempting to raise large numbers to large powers. For\n # example, base 2 expansion of 1024 has 10 digits. If we were in 10\n # dimensions, then the 10th prime (29) we will end up computing 29^10 even\n # though we don't need it. We avoid this by setting the exponents for each\n # axes to 0 beyond the maximum value needed for that dimension.\n arg_13 = arg_4.tile([arg_4.range(arg_12)], [arg_0, 1])\n\n # The mask is true for those coefficients that are irrelevant.\n arg_14 = arg_13 >= arg_11\n arg_15 = arg_4.where(\n arg_14,\n arg_4.zeros_like(arg_13),\n arg_13)\n arg_16 = arg_10 ** arg_15\n # The following computes the base b expansion of the indices. Suppose,\n # x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with\n # the vector (1, b, b^2, b^3, ...) will produce\n # (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care\n # about. Noting that all a_i < b by definition of place value expansion,\n # we see that taking the elements mod b of the above vector produces the\n # place value expansion coefficients.\n arg_17 = arg_4.math.floordiv(arg_9, arg_16)\n arg_17 *= 1. - arg_4.cast(arg_14, arg_3)\n arg_17 %= arg_10\n if not arg_6:\n arg_17 /= arg_10\n return arg_4.reduce_sum(input_tensor=arg_17 / arg_16, axis=-1)\n arg_18 = distributions.SeedStream(arg_7, salt='MCMCSampleHaltonSequence')\n arg_17 = _randomize(arg_17, arg_10, arg_7=arg_18())\n # Remove the contribution from randomizing the trailing zero for the\n # axes where max_size_by_axes < max_size. This will be accounted\n # for separately below (using zero_correction).\n arg_17 *= 1. - arg_4.cast(arg_14, arg_3)\n arg_17 /= arg_10\n arg_19 = arg_4.reduce_sum(input_tensor=arg_17 / arg_16, axis=-1)\n\n # The randomization used in Owen (2017) does not leave 0 invariant. While\n # we have accounted for the randomization of the first `max_size_by_axes`\n # coefficients, we still need to correct for the trailing zeros. Luckily,\n # this is equivalent to adding a uniform random value scaled so the first\n # `max_size_by_axes` coefficients are zero. The following statements perform\n # this correction.\n arg_20 = arg_4.random.uniform([arg_0, 1], arg_7=arg_18(), arg_3=arg_3)\n arg_20 /= arg_10 ** arg_11\n return arg_19 + arg_4.reshape(arg_20, [-1])"} +{"_id": "doc_581", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Uniform iid sample from the space of permutations.\n\n Draws a sample of size `num_results` from the group of permutations of degrees\n specified by the `dims` tensor. These are packed together into one tensor\n such that each row is one sample from each of the dimensions in `dims`. For\n example, if dims = [2,3] and num_results = 2, the result is a tensor of shape\n [2, 2 + 3] and the first row of the result might look like:\n [1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements\n while the next three are a permutation over 3 elements.\n\n Args:\n num_results: A positive scalar `Tensor` of integral type. The number of\n draws from the discrete uniform distribution over the permutation groups.\n dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the\n permutation groups from which to sample.\n seed: (Optional) Python integer to seed the random number generator.\n\n Returns:\n permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same\n dtype as `dims`.\n \"\"\"\n arg_3 = tf.range(arg_0)\n arg_4 = distributions.SeedStream(arg_2, salt='MCMCSampleHaltonSequence3')\n def generate_one(arg_5):\n arg_2 = arg_4()\n arg_6 = lambda _: tf.random.shuffle(tf.range(arg_5), arg_2=arg_2)\n return tf.map_fn(\n arg_6,\n arg_3,\n parallel_iterations=1 if arg_2 is not None else 10)\n return tf.concat([generate_one(arg_5) for arg_5 in tf.unstack(arg_1)],\n axis=-1)"} +{"_id": "doc_582", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Generates starting points for the Halton sequence procedure.\n\n The k'th element of the sequence is generated starting from a positive integer\n which must be distinct for each `k`. It is conventional to choose the starting\n point as `k` itself (or `k+1` if k is zero based). This function generates\n the starting integers for the required elements and reshapes the result for\n later use.\n\n Args:\n num_results: Positive scalar `Tensor` of dtype int32. The number of samples\n to generate. If this parameter is supplied, then `sequence_indices`\n should be None.\n sequence_indices: `Tensor` of dtype int32 and rank 1. The entries\n index into the Halton sequence starting with 0 and hence, must be whole\n numbers. For example, sequence_indices=[0, 5, 6] will produce the first,\n sixth and seventh elements of the sequence. If this parameter is not None\n then `n` must be None.\n dtype: The dtype of the sample. One of `float32` or `float64`.\n Default is `float32`.\n name: Python `str` name which describes ops created by this function.\n\n Returns:\n indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, 'Func',\n [arg_0, arg_1]):\n if arg_1 is None:\n arg_0 = tf.cast(arg_0, arg_2=arg_2)\n arg_1 = tf.range(arg_0, arg_2=arg_2)\n else:\n arg_1 = tf.cast(arg_1, arg_2)\n\n # Shift the indices so they are 1 based.\n arg_4 = arg_1 + 1\n\n # Reshape to make space for the event dimension and the place value\n # coefficients.\n return tf.reshape(arg_4, [-1, 1, 1])"} +{"_id": "doc_583", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes the number of terms in the place value expansion.\n\n Let num = a0 + a1 b + a2 b^2 + ... ak b^k be the place value expansion of\n `num` in base b (ak <> 0). This function computes and returns `k+1` for each\n base `b` specified in `bases`.\n\n This can be inferred from the base `b` logarithm of `num` as follows:\n $$k = Floor(log_b (num)) + 1 = Floor( log(num) / log(b)) + 1$$\n\n Args:\n num: Scalar `Tensor` of dtype either `float32` or `float64`. The number to\n compute the base expansion size of.\n bases: `Tensor` of the same dtype as num. The bases to compute the size\n against.\n\n Returns:\n Tensor of same dtype and shape as `bases` containing the size of num when\n written in that base.\n \"\"\"\n return tf.floor(tf.math.log(arg_0) / tf.math.log(arg_1)) + 1"} +{"_id": "doc_584", "title": "", "text": "def Func(arg_0):\n # Based on\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n \"\"\"Returns sorted array of primes such that `2 <= prime < n`.\"\"\"\n arg_1 = np.array((2, 3, 5))\n if arg_0 <= 6:\n return arg_1[arg_1 < arg_0]\n arg_2 = np.ones(arg_0 // 3 + (arg_0 % 6 == 2), dtype=np.bool)\n arg_2[0] = False\n arg_3 = int(arg_0 ** 0.5) // 3 + 1\n for arg_4 in range(arg_3):\n if not arg_2[arg_4]:\n continue\n arg_5 = 3 * arg_4 + 1 | 1\n arg_2[arg_5 ** 2 // 3::2 * arg_5] = False\n arg_2[(arg_5 ** 2 + 4 * arg_5 - 2 * arg_5 * (arg_4 & 1)) // 3::2 * arg_5] = False\n return np.r_[2, 3, 3 * np.nonzero(arg_2)[0] + 1 | 1]"} +{"_id": "doc_585", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the machine epsilon for the supplied dtype.\"\"\"\n if isinstance(arg_0, tf.DType):\n arg_0 = arg_0.as_numpy_dtype()\n return np.finfo(arg_0).eps"} +{"_id": "doc_586", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=1e-6,\n arg_6=0.66,\n arg_7=5.0,\n arg_8=0.1,\n arg_9=0.9,\n arg_10=0.1,\n arg_11=50,\n arg_12=None):\n \"\"\"The Hager Zhang line search algorithm.\n\n Performs an inexact line search based on the algorithm of\n [Hager and Zhang (2006)][2].\n The univariate objective function `value_and_gradients_function` is typically\n generated by projecting a multivariate objective function along a search\n direction. Suppose the multivariate function to be minimized is\n `g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish\n to perform a line search. Then the projected univariate function to be used\n for line search is\n\n ```None\n f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)\n ```\n\n The directional derivative along (d1, d2, ..., dn) is needed for this\n procedure. This also corresponds to the derivative of the projected function\n `f(a)` with respect to `a`. Note that this derivative must be negative for\n `a = 0` if the direction is a descent direction.\n\n The usual stopping criteria for the line search is the satisfaction of the\n (weak) Wolfe conditions. For details of the Wolfe conditions, see\n ref. [3]. On a finite precision machine, the exact Wolfe conditions can\n be difficult to satisfy when one is very close to the minimum and as argued\n by [Hager and Zhang (2005)][1], one can only expect the minimum to be\n determined within square root of machine precision. To improve the situation,\n they propose to replace the Wolfe conditions with an approximate version\n depending on the derivative of the function which is applied only when one\n is very close to the minimum. The following algorithm implements this\n enhanced scheme.\n\n ### Usage:\n\n Primary use of line search methods is as an internal component of a class of\n optimization algorithms (called line search based methods as opposed to\n trust region methods). Hence, the end user will typically not want to access\n line search directly. In particular, inexact line search should not be\n confused with a univariate minimization method. The stopping criteria of line\n search is the satisfaction of Wolfe conditions and not the discovery of the\n minimum of the function.\n\n With this caveat in mind, the following example illustrates the standalone\n usage of the line search.\n\n ```python\n # Define value and gradient namedtuple\n ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df'])\n # Define a quadratic target with minimum at 1.3.\n def value_and_gradients_function(x):\n return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3))\n # Set initial step size.\n step_size = tf.constant(0.1)\n ls_result = tfp.optimizer.linesearch.Func(\n value_and_gradients_function, initial_step_size=step_size)\n # Evaluate the results.\n with tf.Session() as session:\n results = session.run(ls_result)\n # Ensure convergence.\n assert results.converged\n # If the line search converged, the left and the right ends of the\n # bracketing interval are identical.\n assert results.left.x == result.right.x\n # Print the number of evaluations and the final step size.\n print (\"Final Step Size: %f, Evaluations: %d\" % (results.left.x,\n results.func_evals))\n ```\n\n ### References:\n [1]: William Hager, Hongchao Zhang. A new conjugate gradient method with\n guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,\n pp. 170-172. 2005.\n https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf\n\n [2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate\n gradient method with guaranteed descent. ACM Transactions on Mathematical\n Software, Vol 32., 1, pp. 113-137. 2006.\n http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf\n\n [3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in\n Operations Research. pp 33-36. 2006\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or\n a tensor of shape [n] in batching mode. The initial value (or values) to\n try to bracket the minimum. Default is `1.` as a float32.\n Note that this point need not necessarily bracket the minimum for the line\n search to work correctly but the supplied value must be greater than 0.\n A good initial value will make the search converge faster.\n value_at_initial_step: (Optional) The full return value of evaluating\n value_and_gradients_function at initial_step_size, i.e. a namedtuple with\n 'x', 'f', 'df', if already known by the caller. If supplied the value of\n `initial_step_size` will be ignored, otherwise the tuple will be computed\n by evaluating value_and_gradients_function.\n value_at_zero: (Optional) The full return value of\n value_and_gradients_function at `0.`, i.e. a namedtuple with\n 'x', 'f', 'df', if already known by the caller. If not supplied the tuple\n will be computed by evaluating value_and_gradients_function.\n converged: (Optional) In batching mode a tensor of shape [n], indicating\n batch members which have already converged and no further search should\n be performed. These batch members are also reported as converged in the\n output, and both their `left` and `right` are set to the\n `value_at_initial_step`.\n threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`\n of real dtype. Corresponds to the parameter 'epsilon' in\n [Hager and Zhang (2006)][2]. Used to estimate the\n threshold at which the line search switches to approximate Wolfe\n conditions.\n shrinkage_param: Scalar positive Tensor of real dtype. Must be less than\n `1.`. Corresponds to the parameter `gamma` in\n [Hager and Zhang (2006)][2].\n If the secant**2 step does not shrink the bracketing interval by this\n proportion, a bisection step is performed to reduce the interval width.\n expansion_param: Scalar positive `Tensor` of real dtype. Must be greater\n than `1.`. Used to expand the initial interval in case it does not bracket\n a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to `delta` in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n step_size_shrink_param: Positive scalar `Tensor` of real dtype. Bounded\n above by `1`. If the supplied step size is too big (i.e. either the\n objective value or the gradient at that point is infinite), this factor\n is used to shrink the step size until it is finite.\n max_iterations: Positive scalar `Tensor` of integral dtype or None. The\n maximum number of iterations to perform in the line search. The number of\n iterations used to bracket the minimum are also counted against this\n parameter.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'Func' is used.\n\n Returns:\n results: A namedtuple containing the following attributes.\n converged: Boolean `Tensor` of shape [n]. Whether a point satisfying\n Wolfe/Approx wolfe was found.\n failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.\n if either the objective function or the gradient are not finite at\n an evaluation point.\n iterations: Scalar int32 `Tensor`. Number of line search iterations made.\n func_evals: Scalar int32 `Tensor`. Number of function evaluations made.\n left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the final bracketing interval. Values are\n equal to those of `right` on batch members where converged is True.\n Otherwise, it corresponds to the last interval computed.\n right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the final bracketing interval. Values are\n equal to those of `left` on batch members where converged is True.\n Otherwise, it corresponds to the last interval computed.\n \"\"\"\n with tf.compat.v1.name_scope(arg_12, 'Func', [\n arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6,\n arg_7, arg_8, arg_9]):\n arg_13, arg_14, arg_15, arg_16 = _prepare_args(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_5)\n\n arg_17 = (hzl.is_finite(arg_13) & (arg_13.df < 0) &\n tf.math.is_finite(arg_14.x) & (arg_14.x > 0))\n\n if arg_4 is None:\n arg_18 = tf.zeros_like(arg_17) # i.e. all false.\n else:\n arg_18 = tf.convert_to_tensor(value=arg_4)\n\n arg_19 = ~arg_18 & ~arg_17\n arg_20 = ~arg_18 & arg_17\n\n # Note: _fix_step_size returns immediately if either all inputs are invalid\n # or none of the active ones need fixing.\n arg_21, arg_22, arg_23 = _fix_step_size(\n arg_0, arg_14, arg_20,\n arg_10)\n\n arg_24 = HagerZhangLineSearchResult(\n arg_4=arg_18,\n arg_19=arg_19 | arg_23,\n func_evals=arg_16 + arg_21,\n iterations=tf.convert_to_tensor(value=0),\n left=arg_13,\n right=hzl.val_where(arg_18, arg_13, arg_22))\n\n def _apply_bracket_and_search():\n \"\"\"Bracketing and searching to do for valid inputs.\"\"\"\n return _bracket_and_search(\n arg_0, arg_24, arg_15, arg_11,\n arg_6, arg_7, arg_8,\n arg_9)\n\n arg_25 = ~arg_24.failed & ~arg_24.converged\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_25),\n _apply_bracket_and_search,\n lambda: arg_24)"} +{"_id": "doc_587", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Shrinks the input step size until the value and grad become finite.\"\"\"\n # The maximum iterations permitted are determined as the number of halvings\n # it takes to reduce 1 to 0 in the given dtype.\n arg_4 = np.ceil(-np.log2(_machine_eps(arg_1.x.dtype)))\n\n def _cond(arg_5, arg_6, arg_7):\n del arg_6 # Unused.\n return (arg_5 < arg_4) & tf.reduce_any(input_tensor=arg_7)\n\n def _body(arg_5, arg_6, arg_7):\n arg_8 = tf.where(arg_7, arg_6.x * arg_3, arg_6.x)\n arg_9 = arg_0(arg_8)\n arg_10 = arg_7 & ~hzl.is_finite(arg_9)\n return (arg_5 + 1, arg_9, arg_10)\n\n arg_7 = arg_2 & ~hzl.is_finite(arg_1)\n return tf.while_loop(\n cond=_cond, body=_body, loop_vars=(0, arg_1, arg_7))"} +{"_id": "doc_588", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7):\n \"\"\"The main loop of line search after the minimum has been bracketed.\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n search_interval: Instance of `HagerZhangLineSearchResults` containing\n the current line search interval.\n val_0: A namedtuple as returned by value_and_gradients_function evaluated\n at `0.`. The gradient must be negative (i.e. must be a descent direction).\n f_lim: Scalar `Tensor` of float dtype.\n max_iterations: Positive scalar `Tensor` of integral dtype. The maximum\n number of iterations to perform in the line search. The number of\n iterations used to bracket the minimum are also counted against this\n parameter.\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to `delta` in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n shrinkage_param: Scalar positive Tensor of real dtype. Must be less than\n `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].\n\n Returns:\n A namedtuple containing the following fields.\n converged: Boolean `Tensor` of shape [n]. Whether a point satisfying\n Wolfe/Approx wolfe was found.\n failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.\n if either the objective function or the gradient are not finite at\n an evaluation point.\n iterations: Scalar int32 `Tensor`. Number of line search iterations made.\n func_evals: Scalar int32 `Tensor`. Number of function evaluations made.\n left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the updated bracketing interval.\n right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the updated bracketing interval.\n \"\"\"\n\n def _loop_cond(arg_8):\n \"\"\"Loop condition.\"\"\"\n arg_9 = ~(arg_8.converged | arg_8.failed)\n return (arg_8.iterations <\n arg_4) & tf.reduce_any(input_tensor=arg_9)\n\n def _loop_body(arg_8):\n \"\"\"The loop body.\"\"\"\n arg_10 = hzl.secant2(\n arg_0, arg_2, arg_8, arg_3,\n arg_5, arg_6)\n arg_11 = HagerZhangLineSearchResult(\n converged=arg_10.converged,\n failed=arg_10.failed,\n iterations=arg_8.iterations + 1,\n func_evals=arg_10.num_evals,\n left=arg_10.left,\n right=arg_10.right)\n\n arg_12 = ~(arg_11.converged | arg_11.failed)\n\n def _do_check_shrinkage():\n \"\"\"Check if interval has shrinked enough.\"\"\"\n arg_13 = arg_8.right.x - arg_8.left.x\n arg_14 = arg_11.right.x - arg_11.left.x\n arg_15 = arg_14 < arg_13 * arg_7\n arg_16 = (\n _very_close(arg_8.left.f, arg_8.right.f) &\n _very_close(arg_11.left.f, arg_11.right.f))\n\n arg_17 = (\n arg_12 & arg_15 & arg_16)\n arg_18 = arg_12 & ~arg_15\n\n arg_19 = arg_11._replace(\n converged=arg_11.converged | arg_17)\n\n def _apply_inner_bisect():\n return _line_search_inner_bisection(\n arg_0, arg_19,\n arg_18, arg_3)\n\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_18),\n _apply_inner_bisect,\n lambda: arg_19)\n\n arg_20 = prefer_static.cond(\n tf.reduce_any(input_tensor=arg_12),\n _do_check_shrinkage,\n lambda: arg_11)\n\n arg_21 = (\n ~arg_20.failed & _very_close(arg_20.left.x, arg_20.right.x))\n return [arg_20._replace(converged=arg_20.converged | arg_21)]\n\n return tf.while_loop(\n cond=_loop_cond,\n body=_loop_body,\n loop_vars=[arg_1],\n parallel_iterations=1)[0]"} +{"_id": "doc_589", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Performs bisection and updates the interval.\"\"\"\n arg_4 = (arg_1.left.x + arg_1.right.x) / 2\n arg_5 = arg_0(arg_4)\n arg_6 = hzl.is_finite(arg_5)\n\n arg_7 = arg_2 & arg_6\n arg_8 = arg_2 & ~arg_6\n arg_9 = arg_1._replace(\n failed=arg_1.failed | arg_8,\n func_evals=arg_1.func_evals + 1)\n\n def _apply_update():\n arg_10 = hzl.update(\n arg_0, arg_9.left, arg_9.right,\n arg_5, arg_3, arg_2=arg_7)\n return HagerZhangLineSearchResult(\n converged=arg_9.converged,\n failed=arg_9.failed | arg_10.failed,\n iterations=arg_9.iterations + arg_10.iteration,\n func_evals=arg_9.func_evals + arg_10.num_evals,\n left=arg_10.left,\n right=arg_10.right)\n\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_7),\n _apply_update,\n lambda: arg_9)"} +{"_id": "doc_590", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Wrapper for tf.Print which supports lists and namedtuples for printing.\"\"\"\n arg_2 = []\n for arg_3 in arg_1:\n # Checks if it is a namedtuple.\n if hasattr(arg_3, '_fields'):\n for arg_4 in arg_3._fields:\n arg_2.extend([arg_4, _to_str(getattr(arg_3, arg_4))])\n continue\n if isinstance(arg_3, (list, tuple)):\n for arg_5 in arg_3:\n arg_2.append(_to_str(arg_5))\n continue\n arg_2.append(_to_str(arg_3))\n return tf.compat.v1.Print(arg_0, arg_2)"} +{"_id": "doc_591", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=False, arg_4=None):\n \"\"\"Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.\n\n A `SoftmaxNormal` random variable `Y` may be generated via\n\n ```\n Y = SoftmaxCentered(X),\n X = Normal(normal_loc, normal_scale)\n ```\n\n Note: for a given `quadrature_size`, this method is generally less accurate\n than `quadrature_scheme_softmaxnormal_quantiles`.\n\n Args:\n normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.\n The location parameter of the Normal used to construct the SoftmaxNormal.\n normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.\n The scale parameter of the Normal used to construct the SoftmaxNormal.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the\n convex combination of affine parameters for `K` components.\n `grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.\n probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the\n associated with each grid point.\n \"\"\"\n with tf.name_scope(\n arg_4 or \"Func\"):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_4=\"normal_loc\")\n arg_5 = dtype_util.as_numpy_dtype(arg_0.dtype)\n arg_1 = tf.convert_to_tensor(\n value=arg_1, dtype=arg_5, arg_4=\"normal_scale\")\n\n arg_1 = maybe_check_quadrature_param(\n arg_1, \"normal_scale\", arg_3)\n\n arg_6, arg_7 = np.polynomial.hermite.hermgauss(deg=arg_2)\n arg_6 = arg_6.astype(arg_5)\n arg_7 = arg_7.astype(arg_5)\n arg_7 /= np.linalg.norm(arg_7, ord=1, keepdims=True)\n arg_7 = tf.convert_to_tensor(value=arg_7, arg_4=\"probs\", dtype=arg_5)\n\n arg_6 = softmax(\n -distribution_util.pad(\n (arg_0[..., tf.newaxis] +\n np.sqrt(2.) * arg_1[..., tf.newaxis] * arg_6),\n axis=-2,\n front=True),\n axis=-2) # shape: [B, components, deg]\n\n return arg_6, arg_7"} +{"_id": "doc_592", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Helper which checks validity of `loc` and `scale` init args.\"\"\"\n with tf.name_scope(\"check_\" + arg_1):\n arg_3 = []\n if tensorshape_util.rank(arg_0.shape) is not None:\n if tensorshape_util.rank(arg_0.shape) == 0:\n raise ValueError(\"Mixing params must be a (batch of) vector; \"\n \"{}.rank={} is not at least one.\".format(\n arg_1, tensorshape_util.rank(arg_0.shape)))\n elif arg_2:\n arg_3.append(\n assert_util.assert_rank_at_least(\n arg_0,\n 1,\n message=(\"Mixing params must be a (batch of) vector; \"\n \"{}.rank is not at least one.\".format(arg_1))))\n\n # TODO(jvdillon): Remove once we support k-mixtures.\n if tensorshape_util.with_rank_at_least(arg_0.shape, 1)[-1] is not None:\n if tf.compat.dimension_value(arg_0.shape[-1]) != 1:\n raise NotImplementedError(\"Currently only bimixtures are supported; \"\n \"{}.shape[-1]={} is not 1.\".format(\n arg_1,\n tf.compat.dimension_value(\n arg_0.shape[-1])))\n elif arg_2:\n arg_3.append(\n assert_util.assert_equal(\n tf.shape(input=arg_0)[-1],\n 1,\n message=(\"Currently only bimixtures are supported; \"\n \"{}.shape[-1] is not 1.\".format(arg_1))))\n\n if arg_3:\n return distribution_util.with_dependencies(arg_3, arg_0)\n return arg_0"} +{"_id": "doc_593", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper which interpolates between two locs.\"\"\"\n if len(arg_1) != 2:\n raise NotImplementedError(\"Currently only bimixtures are supported; \"\n \"len(scale)={} is not 2.\".format(len(arg_1)))\n arg_2 = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(arg_0.shape, 1)[-1])\n if arg_2 is None:\n raise ValueError(\"Num quadrature grid points must be known prior \"\n \"to graph execution.\")\n with tf.name_scope(\"Func\"):\n if arg_1 is None or arg_1[0] is None and arg_1[1] is None:\n return [None]*arg_2\n # shape: [B, 1, k, deg]\n arg_3 = arg_0[..., tf.newaxis, :, :]\n arg_1 = [\n arg_4[..., tf.newaxis] # shape: [B, e, 1]\n if arg_4 is not None else None for arg_4 in arg_1\n ]\n if arg_1[0] is None:\n arg_4 = arg_3[..., 1, :] * arg_1[1] # shape: [B, e, deg]\n elif arg_1[1] is None:\n arg_4 = arg_3[..., 0, :] * arg_1[0] # shape: [B, e, deg]\n else:\n arg_5 = arg_1[0] - arg_1[1]\n arg_4 = arg_3[..., 0, :] * arg_5 + arg_1[1] # shape: [B, e, deg]\n return [arg_4[..., arg_6] for arg_6 in range(arg_2)]"} +{"_id": "doc_594", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper which interpolates between two scales.\"\"\"\n if len(arg_1) != 2:\n raise NotImplementedError(\"Currently only bimixtures are supported; \"\n \"len(scale)={} is not 2.\".format(len(arg_1)))\n arg_2 = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(arg_0.shape, 1)[-1])\n if arg_2 is None:\n raise ValueError(\"Num quadrature grid points must be known prior \"\n \"to graph execution.\")\n with tf.name_scope(\"Func\"):\n return [linop_add_lib.add_operators([\n linop_scale(arg_0[..., arg_3, arg_5], arg_4)\n for arg_3, arg_4 in enumerate(arg_1)\n ])[0] for arg_5 in range(arg_2)]"} +{"_id": "doc_595", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Multiply tensor of vectors by matrices assuming values stored are logs.\"\"\"\n\n return tf.reduce_logsumexp(input_tensor=arg_0[..., tf.newaxis] + arg_1, axis=-2)"} +{"_id": "doc_596", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Tabulate log probabilities from a batch of distributions.\"\"\"\n\n arg_2 = tf.reshape(tf.range(arg_0),\n tf.concat([[arg_0],\n tf.ones_like(arg_1.batch_shape_tensor())],\n axis=0))\n return distribution_util.move_dimension(arg_1.log_prob(arg_2), 0, -1)"} +{"_id": "doc_597", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Compute marginal posterior distribution for each state.\n\n This function computes, for each time step, the marginal\n conditional probability that the hidden Markov model was in\n each possible state given the observations that were made\n at each time step.\n So if the hidden states are `z[0],...,z[num_steps - 1]` and\n the observations are `x[0], ..., x[num_steps - 1]`, then\n this function computes `P(z[i] | x[0], ..., x[num_steps - 1])`\n for all `i` from `0` to `num_steps - 1`.\n\n This operation is sometimes called smoothing. It uses a form\n of the forward-backward algorithm.\n\n Note: the behavior of this function is undefined if the\n `observations` argument represents impossible observations\n from the model.\n\n Args:\n observations: A tensor representing a batch of observations\n made on the hidden Markov model. The rightmost dimension of this tensor\n gives the steps in a sequence of observations from a single sample from\n the hidden Markov model. The size of this dimension should match the\n `num_steps` parameter of the hidden Markov model object. The other\n dimensions are the dimensions of the batch and these are broadcast with\n the hidden Markov model's parameters.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"HiddenMarkovModel\".\n\n Returns:\n posterior_marginal: A `Categorical` distribution object representing the\n marginal probability of the hidden Markov model being in each state at\n each step. The rightmost dimension of the `Categorical` distributions\n batch will equal the `num_steps` parameter providing one marginal\n distribution for each step. The other dimensions are the dimensions\n corresponding to the batch of observations.\n\n Raises:\n ValueError: if rightmost dimension of `observations` does not\n have size `num_steps`.\n \"\"\"\n\n with tf.name_scope(arg_2 or \"Func\"):\n with tf.control_dependencies(arg_0._runtime_assertions):\n arg_3 = tf.shape(input=arg_1)\n\n with arg_0._observation_shape_preconditions(arg_3):\n arg_4 = arg_3[\n :-1 - arg_0._underlying_event_rank]\n arg_5 = arg_3[\n -1 - arg_0._underlying_event_rank:]\n\n arg_6 = tf.broadcast_dynamic_shape(arg_4,\n arg_0.batch_shape_tensor())\n arg_7 = tf.broadcast_to(arg_0._log_init,\n tf.concat([arg_6,\n [arg_0._num_states]],\n axis=0))\n arg_8 = arg_0._log_trans\n\n arg_1 = tf.broadcast_to(arg_1,\n tf.concat([arg_6,\n arg_5],\n axis=0))\n arg_9 = tf.rank(arg_1)\n arg_10 = arg_0._underlying_event_rank\n arg_1 = distribution_util.move_dimension(\n arg_1, arg_9 - arg_10 - 1, 0)\n arg_1 = tf.expand_dims(\n arg_1,\n arg_9 - arg_10)\n arg_11 = arg_0._observation_distribution.log_prob(\n arg_1)\n\n arg_12 = tf.zeros_like(arg_7)\n\n def forward_step(arg_13, arg_14):\n return _log_vector_matrix(arg_13,\n arg_8) + arg_14\n\n arg_15 = arg_7 + arg_11[0]\n\n arg_16 = tf.scan(forward_step, arg_11[1:],\n initializer=arg_15,\n arg_2=\"forward_log_probs\")\n\n arg_16 = tf.concat([[arg_15], arg_16], axis=0)\n\n def backward_step(arg_13, arg_14):\n return _log_matrix_vector(arg_8,\n arg_14 + arg_13)\n\n arg_17 = tf.scan(\n backward_step,\n arg_11[1:],\n initializer=arg_12,\n reverse=True,\n arg_2=\"backward_log_adjoint_probs\")\n\n arg_18 = tf.reduce_logsumexp(\n input_tensor=arg_16[-1], axis=-1)\n\n arg_17 = tf.concat([arg_17,\n [arg_12]], axis=0)\n\n arg_19 = arg_16 + arg_17\n\n arg_20 = distribution_util.move_dimension(\n arg_19 - arg_18[..., tf.newaxis], 0, -2)\n\n return categorical.Categorical(logits=arg_20)"} +{"_id": "doc_598", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Chooses a random direction in the event space.\"\"\"\n arg_3 = distributions.SeedStream(arg_2, salt='Func')\n # Chooses the random directions across each of the input components.\n arg_4 = [\n tf.random.normal(\n tf.shape(input=current_state_part), dtype=tf.float32, arg_2=arg_3())\n for current_state_part in arg_0\n ]\n\n # Sum squares over all of the input components. Note this takes all\n # components into account.\n arg_5 = sum(\n tf.reduce_sum(\n input_tensor=rnd_direction**2.,\n axis=tf.range(arg_1, tf.rank(rnd_direction)),\n keepdims=True) for rnd_direction in arg_4)\n\n # Normalizes the random direction fragments.\n arg_4 = [rnd_direction / tf.sqrt(arg_5)\n for rnd_direction in arg_4]\n\n return arg_4"} +{"_id": "doc_599", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6=None,\n arg_7=None):\n \"\"\"Applies a single iteration of slice sampling update.\n\n Applies hit and run style slice sampling. Chooses a uniform random direction\n on the unit sphere in the event space. Applies the one dimensional slice\n sampling update along that direction.\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `*current_state_parts` and returns its (possibly unnormalized) log-density\n under the target distribution.\n current_state_parts: Python `list` of `Tensor`s representing the current\n state(s) of the Markov chain(s). The first `independent_chain_ndims` of\n the `Tensor`(s) index different chains.\n step_sizes: Python `list` of `Tensor`s. Provides a measure of the width\n of the density. Used to find the slice bounds. Must broadcast with the\n shape of `current_state_parts`.\n max_doublings: Integer number of doublings to allow while locating the slice\n boundaries.\n current_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn(*current_state_parts)`. The only reason to specify\n this argument is to reduce TF graph size.\n batch_rank: Integer. The number of axes in the state that correspond to\n independent batches.\n seed: Python integer to seed random number generators.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'find_slice_bounds').\n\n Returns:\n proposed_state_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state_parts`.\n proposed_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn` at `next_state`.\n bounds_satisfied: Boolean `Tensor` of the same shape as the log density.\n True indicates whether the an interval containing the slice for that\n batch was found successfully.\n direction: `Tensor` or Python list of `Tensors`s representing the direction\n along which the slice was sampled. Has the same shape and dtype(s) as\n `current_state_parts`.\n upper_bounds: `Tensor` of batch shape and the dtype of the input state. The\n upper bounds of the slices along the sampling direction.\n lower_bounds: `Tensor` of batch shape and the dtype of the input state. The\n lower bounds of the slices along the sampling direction.\n \"\"\"\n with tf.compat.v1.name_scope(arg_7, 'sample_next', [\n arg_1, arg_2, arg_3, arg_4,\n arg_5\n ]):\n # First step: Choose a random direction.\n # Direction is a list of tensors. The i'th tensor should have the same shape\n # as the i'th state part.\n arg_8 = _choose_random_direction(arg_1,\n arg_5=arg_5,\n arg_6=arg_6)\n\n # Interpolates the step sizes for the chosen direction.\n # Applies an ellipsoidal interpolation to compute the step direction for\n # the chosen direction. Suppose we are given step sizes for each direction.\n # Label these s_1, s_2, ... s_k. These are the step sizes to use if moving\n # in a direction parallel to one of the axes. Consider an ellipsoid which\n # intercepts the i'th axis at s_i. The step size for a direction specified\n # by the unit vector (n_1, n_2 ...n_k) is then defined as the intersection\n # of the line through this vector with this ellipsoid.\n #\n # One can show that the length of the vector from the origin to the\n # intersection point is given by:\n # 1 / sqrt(n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...).\n #\n # Proof:\n # The equation of the ellipsoid is:\n # Sum_i [x_i^2 / s_i^2 ] = 1. Let n be a unit direction vector. Points\n # along the line given by n may be parameterized as alpha*n where alpha is\n # the distance along the vector. Plugging this into the equation for the\n # ellipsoid, we get:\n # alpha^2 ( n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...) = 1\n # so alpha = \\sqrt { \\frac{1} { ( n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...) } }\n arg_9 = [tf.range(arg_5, tf.rank(dirn_part))\n for dirn_part in arg_8]\n\n arg_10 = [\n tf.reduce_sum(\n input_tensor=(dirn_part / arg_11)**2, axis=arg_9[i])\n for i, (arg_11, dirn_part) in enumerate(zip(arg_2, arg_8))\n ]\n arg_11 = tf.math.rsqrt(tf.add_n(arg_10))\n # Computes the rank of a tensor. Uses the static rank if possible.\n def _get_rank(arg_12):\n return (len(arg_12.shape.as_list()) if arg_12.shape.dims is not None\n else tf.rank(arg_12))\n arg_13 = [_get_rank(part) for part in arg_1]\n\n def _step_along_direction(arg_14):\n \"\"\"Converts the scalar alpha into an n-dim vector with full state info.\n\n Computes x_0 + alpha * direction where x_0 is the current state and\n direction is the direction chosen above.\n\n Args:\n alpha: A tensor of shape equal to the batch dimensions of\n `current_state_parts`.\n\n Returns:\n state_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) for a given alpha and a given chosen\n direction. Has the same shape as `current_state_parts`.\n \"\"\"\n arg_15 = [_right_pad(arg_14, final_rank=part_rank)\n for part_rank in arg_13]\n\n arg_16 = [state_part + padded_alpha\n * direction_part for state_part, direction_part,\n padded_alpha in\n zip(arg_1, arg_8, arg_15)]\n return arg_16\n\n def projected_target_log_prob_fn(arg_14):\n \"\"\"The target log density projected along the chosen direction.\n\n Args:\n alpha: A tensor of shape equal to the batch dimensions of\n `current_state_parts`.\n\n Returns:\n Target log density evaluated at x_0 + alpha * direction where x_0 is the\n current state and direction is the direction chosen above. Has the same\n shape as `alpha`.\n \"\"\"\n return arg_0(*_step_along_direction(arg_14))\n\n arg_17 = tf.zeros_like(arg_4,\n dtype=arg_1[0].dtype.base_dtype)\n [\n arg_18,\n arg_19,\n arg_20,\n arg_21,\n arg_22\n ] = ssu.slice_sampler_one_dim(projected_target_log_prob_fn,\n x_initial=arg_17,\n arg_3=arg_3,\n arg_11=arg_11, arg_6=arg_6)\n return [\n _step_along_direction(arg_18),\n arg_19,\n arg_20,\n arg_8,\n arg_21,\n arg_22\n ]"} +{"_id": "doc_600", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3='target_log_prob'):\n \"\"\"Helper which computes `fn_result` if needed.\"\"\"\n arg_1 = (list(arg_1) if mcmc_util.is_list_like(arg_1)\n else [arg_1])\n if arg_2 is None:\n arg_2 = arg_0(*arg_1)\n if not arg_2.dtype.is_floating:\n raise TypeError('`{}` must be a `Tensor` with `float` `dtype`.'.format(\n arg_3))\n return arg_2"} +{"_id": "doc_601", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Pads the shape of x to the right to be of rank final_rank.\n\n Expands the dims of `x` to the right such that its rank is equal to\n final_rank. For example, if `x` is of shape [1, 5, 7, 2] and `final_rank` is\n 7, we return padded_x, which is of shape [1, 5, 7, 2, 1, 1, 1].\n\n Args:\n x: The tensor whose shape is to be padded.\n final_rank: Scalar int32 `Tensor` or Python `int`. The desired rank of x.\n\n Returns:\n padded_x: A tensor of rank final_rank.\n \"\"\"\n arg_2 = tf.concat(\n [tf.shape(input=arg_0),\n tf.ones(arg_1 - tf.rank(arg_0), dtype=tf.int32)],\n axis=0)\n arg_3 = None\n if arg_0.shape.is_fully_defined() and isinstance(arg_1, int):\n arg_3 = arg_0.shape.as_list()\n arg_4 = arg_1 - len(arg_3)\n arg_3.extend([1] * arg_4)\n\n arg_5 = tf.reshape(arg_0, arg_3 or arg_2)\n return arg_5"} +{"_id": "doc_602", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Runs one iteration of Slice Sampler.\n\n Args:\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions\n index independent chains,\n `r = tf.rank(target_log_prob_fn(*current_state))`.\n previous_kernel_results: `collections.namedtuple` containing `Tensor`s\n representing values from previous calls to this function (or from the\n `bootstrap_results` function.)\n\n Returns:\n next_state: Tensor or Python list of `Tensor`s representing the state(s)\n of the Markov chain(s) after taking exactly one step. Has same type and\n shape as `current_state`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n\n Raises:\n ValueError: if there isn't one `step_size` or a list with same length as\n `current_state`.\n TypeError: if `not target_log_prob.dtype.is_floating`.\n \"\"\"\n with tf.compat.v1.name_scope(\n name=mcmc_util.make_name(arg_0.name, 'slice', 'Func'),\n values=[\n arg_0.step_size, arg_0.max_doublings, arg_0._seed_stream,\n arg_1, arg_2.target_log_prob\n ]):\n with tf.compat.v1.name_scope('initialize'):\n [\n arg_3,\n arg_4,\n arg_5\n ] = _prepare_args(\n arg_0.target_log_prob_fn,\n arg_1,\n arg_0.step_size,\n arg_2.target_log_prob,\n maybe_expand=True)\n\n arg_6 = tf.convert_to_tensor(\n value=arg_0.max_doublings, dtype=tf.int32, name='max_doublings')\n\n arg_7 = distribution_util.prefer_static_rank(\n arg_5)\n\n [\n arg_8,\n arg_9,\n arg_10,\n arg_11,\n arg_12,\n arg_13\n ] = _sample_next(\n arg_0.target_log_prob_fn,\n arg_3,\n arg_4,\n arg_6,\n arg_5,\n arg_7,\n seed=arg_0._seed_stream()\n )\n\n def maybe_flatten(arg_14):\n return arg_14 if mcmc_util.is_list_like(arg_1) else arg_14[0]\n\n return [\n maybe_flatten(arg_8),\n SliceSamplerKernelResults(\n target_log_prob=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13\n ),\n ]"} +{"_id": "doc_603", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=(),\n arg_3=None,\n arg_4=None):\n \"\"\"Build a loss function for variational inference in STS models.\n\n Variational inference searches for the distribution within some family of\n approximate posteriors that minimizes a divergence between the approximate\n posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting\n inference to optimization, it's generally much faster than sampling-based\n inference algorithms such as HMC. The tradeoff is that the approximating\n family rarely contains the true posterior, so it may miss important aspects of\n posterior structure (in particular, dependence between variables) and should\n not be blindly trusted. Results may vary; it's generally wise to compare to\n HMC to evaluate whether inference quality is sufficient for your task at hand.\n\n This method constructs a loss function for variational inference using the\n Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an\n approximating family given by independent Normal distributions transformed to\n the appropriate parameter space for each parameter. Minimizing this loss (the\n negative ELBO) maximizes a lower bound on the log model evidence `-log\n p(observed_time_series)`. This is equivalent to the 'mean-field' method\n implemented in [1]. and is a standard approach. The resulting posterior\n approximations are unimodal; they will tend to underestimate posterior\n uncertainty when the true posterior contains multiple modes (the `KL[q||p]`\n divergence encourages choosing a single mode) or dependence between variables.\n\n Args:\n model: An instance of `StructuralTimeSeries` representing a\n time-series model. This represents a joint distribution over\n time-series and their parameters with batch shape `[b1, ..., bN]`.\n observed_time_series: `float` `Tensor` of shape\n `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where\n `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`\n dimension may (optionally) be omitted if `num_timesteps > 1`. May\n optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes\n a mask `Tensor` to specify timesteps with missing observations.\n init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial\n states to optimize in parallel.\n Default value: `()`. (i.e., just run a single optimization).\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'Func').\n\n Returns:\n variational_loss: `float` `Tensor` of shape\n `concat([init_batch_shape, model.batch_shape])`, encoding a stochastic\n estimate of an upper bound on the negative model evidence `-log p(y)`.\n Minimizing this loss performs variational inference; the gap between the\n variational bound and the true (generally unknown) model evidence\n corresponds to the divergence `KL[q||p]` between the approximate and true\n posterior.\n variational_distributions: `collections.OrderedDict` giving\n the approximate posterior for each model parameter. The keys are\n Python `str` parameter names in order, corresponding to\n `[param.name for param in model.parameters]`. The values are\n `tfd.Distribution` instances with batch shape\n `concat([init_batch_shape, model.batch_shape])`; these will typically be\n of the form `tfd.TransformedDistribution(tfd.Normal(...),\n bijector=param.bijector)`.\n\n #### Examples\n\n Assume we've built a structural time-series model:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n ```\n\n To run variational inference, we simply construct the loss and optimize\n it:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.Func(\n model=model, observed_time_series=observed_time_series)\n\n train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(200):\n _, loss_ = sess.run((train_op, variational_loss))\n\n if step % 20 == 0:\n print(\"step {} loss {}\".format(step, loss_))\n\n posterior_samples_ = sess.run({\n param_name: q.sample(50)\n for param_name, q in variational_distributions.items()})\n ```\n\n As a more complex example, we might try to avoid local optima by optimizing\n from multiple initializations in parallel, and selecting the result with the\n lowest loss:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.Func(\n model=model, observed_time_series=observed_time_series,\n init_batch_shape=[10])\n\n train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(200):\n _, loss_ = sess.run((train_op, variational_loss))\n\n if step % 20 == 0:\n print(\"step {} losses {}\".format(step, loss_))\n\n # Draw multiple samples to reduce Monte Carlo error in the optimized\n # variational bounds.\n avg_loss = np.mean(\n [sess.run(variational_loss) for _ in range(25)], axis=0)\n best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)\n ```\n\n #### References\n\n [1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and\n David M. Blei. Automatic Differentiation Variational Inference. In\n _Journal of Machine Learning Research_, 2017.\n https://arxiv.org/abs/1603.00788\n\n \"\"\"\n\n with tf.compat.v1.name_scope(\n arg_4, 'Func',\n values=[arg_1]) as arg_4:\n arg_3 = tfd.SeedStream(\n arg_3, salt='StructuralTimeSeries_Func')\n\n arg_5 = collections.OrderedDict()\n arg_6 = []\n for arg_7 in arg_0.parameters:\n def initial_loc_fn(arg_7):\n return sample_uniform_initial_state(\n arg_7, return_constrained=True,\n init_sample_shape=arg_2,\n arg_3=arg_3())\n arg_8 = _build_trainable_posterior(arg_7, initial_loc_fn=initial_loc_fn)\n arg_5[arg_7.name] = arg_8\n arg_6.append(arg_8.sample(arg_3=arg_3()))\n\n # Multiple initializations (similar to HMC chains) manifest as an extra\n # param batch dimension, so we need to add corresponding batch dimension(s)\n # to `observed_time_series`.\n arg_1 = sts_util.pad_batch_dimension_for_multiple_chains(\n arg_1, arg_0, chain_batch_shape=arg_2)\n\n # Construct the variational bound.\n arg_9 = arg_0.joint_log_prob(arg_1)\n arg_10 = arg_9(*arg_6)\n arg_11 = tf.reduce_sum(\n input_tensor=[\n -arg_8.log_prob(sample) for (arg_8, sample) in zip(\n arg_5.values(), arg_6)\n ],\n axis=0)\n arg_12 = -(arg_10 + arg_11) # -ELBO\n\n return arg_12, arg_5"} +{"_id": "doc_604", "title": "", "text": "def Func(arg_0, arg_1=200, arg_2=None):\n \"\"\"Run an optimizer within the graph to minimize a loss function.\"\"\"\n arg_2 = tf.compat.v1.train.AdamOptimizer(\n 0.1) if arg_2 is None else arg_2\n\n def train_loop_body(arg_3):\n arg_4 = arg_2.minimize(\n arg_0 if tf.executing_eagerly() else arg_0())\n return tf.tuple(tensors=[tf.add(arg_3, 1)], control_inputs=[arg_4])\n\n arg_5 = tf.compat.v1.while_loop(\n cond=lambda arg_3: arg_3 < arg_1,\n body=train_loop_body,\n loop_vars=[tf.constant(0)],\n return_same_structure=True)[0] # Always return a single op.\n return arg_5"} +{"_id": "doc_605", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute mean and variance, accounting for a mask.\n\n Args:\n time_series_tensor: float `Tensor` time series of shape\n `concat([batch_shape, [num_timesteps]])`.\n broadcast_mask: bool `Tensor` of the same shape as `time_series`.\n Returns:\n mean: float `Tensor` of shape `batch_shape`.\n variance: float `Tensor` of shape `batch_shape`.\n \"\"\"\n arg_2 = tf.cast(\n tf.reduce_sum(input_tensor=tf.cast(~arg_1, tf.int32), axis=-1),\n arg_0.dtype)\n\n # Manually compute mean and variance, excluding masked entries.\n arg_3 = (tf.reduce_sum(input_tensor=tf.where(\n arg_1,\n tf.zeros_like(arg_0),\n arg_0), axis=-1) / arg_2)\n arg_4 = (tf.reduce_sum(input_tensor=tf.where(\n arg_1,\n tf.zeros_like(arg_0),\n (arg_0 - arg_3[..., tf.newaxis]) ** 2), axis=-1)\n / arg_2)\n return arg_3, arg_4"} +{"_id": "doc_606", "title": "", "text": "def Func(arg_0):\n \"\"\"Get broadcast batch shape from distributions, statically if possible.\"\"\"\n\n # Static case\n arg_1 = arg_0[0].batch_shape\n for arg_2 in arg_0:\n arg_1 = tf.broadcast_static_shape(arg_1,\n arg_2.batch_shape)\n if arg_1.is_fully_defined():\n return arg_1.as_list()\n\n # Fallback on dynamic.\n arg_1 = arg_0[0].batch_shape_tensor()\n for arg_2 in arg_0:\n arg_1 = tf.broadcast_dynamic_shape(arg_1,\n arg_2.batch_shape_tensor())\n\n return tf.convert_to_tensor(value=arg_1)"} +{"_id": "doc_607", "title": "", "text": "def Func(arg_0):\n \"\"\"Combine MultivariateNormals into a factored joint distribution.\n\n Given a list of multivariate normal distributions\n `dist[i] = Normal(loc[i], scale[i])`, construct the joint\n distribution given by concatenating independent samples from these\n distributions. This is multivariate normal with mean vector given by the\n concatenation of the component mean vectors, and block-diagonal covariance\n matrix in which the blocks are the component covariances.\n\n Note that for computational efficiency, multivariate normals are represented\n by a 'scale' (factored covariance) linear operator rather than the full\n covariance matrix.\n\n Args:\n distributions: Python `iterable` of MultivariateNormal distribution\n instances (e.g., `tfd.MultivariateNormalDiag`,\n `tfd.MultivariateNormalTriL`, etc.). These must be broadcastable to a\n consistent batch shape, but may have different event shapes\n (i.e., defined over spaces of different dimension).\n\n Returns:\n joint_distribution: An instance of `tfd.MultivariateNormalLinearOperator`\n representing the joint distribution constructed by concatenating\n an independent sample from each input distributions.\n \"\"\"\n\n arg_1 = [tensor for distribution in arg_0\n for tensor in distribution._graph_parents] # pylint: disable=protected-access\n with tf.compat.v1.name_scope('Func', values=arg_1):\n\n # We explicitly broadcast the `locs` so that we can concatenate them.\n # We don't have direct numerical access to the `scales`, which are arbitrary\n # linear operators, but `LinearOperatorBlockDiag` appears to do the right\n # thing without further intervention.\n arg_2 = tf.debugging.assert_same_float_dtype(arg_0)\n arg_3 = tf.ones(broadcast_batch_shape(arg_0),\n arg_2=arg_2)[..., tf.newaxis]\n return MultivariateNormalLinearOperator(\n loc=tf.concat([arg_4.mean() * arg_3 for arg_4 in arg_0],\n axis=-1),\n scale=tfl.LinearOperatorBlockDiag([arg_4.scale for arg_4 in arg_0],\n is_square=True))"} +{"_id": "doc_608", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute statistics of a provided time series, as heuristic initialization.\n\n Args:\n observed_time_series: `Tensor` representing a time series, or batch of time\n series, of shape either `batch_shape + [num_timesteps, 1]` or\n `batch_shape + [num_timesteps]` (allowed if `num_timesteps > 1`).\n\n Returns:\n observed_mean: `Tensor` of shape `batch_shape`, giving the empirical\n mean of each time series in the batch.\n observed_stddev: `Tensor` of shape `batch_shape`, giving the empirical\n standard deviation of each time series in the batch.\n observed_initial_centered: `Tensor of shape `batch_shape`, giving the\n initial value of each time series in the batch after centering\n (subtracting the mean).\n \"\"\"\n\n with tf.compat.v1.name_scope(\n 'Func', values=[arg_0]):\n\n [\n arg_0,\n arg_1\n ] = canonicalize_observed_time_series_with_mask(arg_0)\n\n arg_2 = arg_0[..., 0]\n if arg_1 is None:\n arg_3, arg_4 = tf.nn.moments(\n x=arg_2, axes=-1)\n arg_5 = arg_2[..., 0]\n else:\n arg_6 = tf.broadcast_to(tf.cast(arg_1, tf.bool),\n tf.shape(input=arg_2))\n arg_3, arg_4 = (\n missing_values_util.moments_of_masked_time_series(\n arg_2, arg_6=arg_6))\n try:\n arg_5 = (\n missing_values_util.initial_value_of_masked_time_series(\n arg_2, arg_6=arg_6))\n except NotImplementedError:\n tf.compat.v1.logging.warn(\n 'Cannot compute initial values for a masked time series'\n 'with dynamic shape; using the mean instead. This will'\n 'affect heuristic priors and may change the results of'\n 'inference.')\n arg_5 = arg_3\n\n arg_7 = tf.sqrt(arg_4)\n arg_8 = arg_5 - arg_3\n return arg_3, arg_7, arg_8"} +{"_id": "doc_609", "title": "", "text": "def Func(arg_0):\n \"\"\"Ensures `observed_time_series_tensor` has a trailing dimension of size 1.\n\n The `tfd.LinearGaussianStateSpaceModel` Distribution has event shape of\n `[num_timesteps, observation_size]`, but canonical BSTS models\n are univariate, so their observation_size is always `1`. The extra trailing\n dimension gets annoying, so this method allows arguments with or without the\n extra dimension. There is no ambiguity except in the trivial special case\n where `num_timesteps = 1`; this can be avoided by specifying any unit-length\n series in the explicit `[num_timesteps, 1]` style.\n\n Most users should not call this method directly, and instead call\n `canonicalize_observed_time_series_with_mask`, which handles converting\n to `Tensor` and specifying an optional missingness mask.\n\n Args:\n observed_time_series_tensor: `Tensor` of shape\n `batch_shape + [num_timesteps, 1]` or `batch_shape + [num_timesteps]`,\n where `num_timesteps > 1`.\n\n Returns:\n expanded_time_series: `Tensor` of shape `batch_shape + [num_timesteps, 1]`.\n \"\"\"\n\n with tf.compat.v1.name_scope(\n 'maybe_expand_trailing_dim', values=[arg_0]):\n if (arg_0.shape.ndims is not None and\n tf.compat.dimension_value(\n arg_0.shape[-1]) is not None):\n arg_1 = (\n arg_0\n if arg_0.shape[-1] == 1\n else arg_0[..., tf.newaxis])\n else:\n arg_1 = tf.cond(\n pred=tf.equal(tf.shape(input=arg_0)[-1], 1),\n true_fn=lambda: arg_0,\n false_fn=lambda: arg_0[..., tf.newaxis])\n return arg_1"} +{"_id": "doc_610", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Construct a predictive normal distribution that mixes over posterior draws.\n\n Args:\n means: float `Tensor` of shape\n `[num_posterior_draws, ..., num_timesteps]`.\n variances: float `Tensor` of shape\n `[num_posterior_draws, ..., num_timesteps]`.\n\n Returns:\n mixture_dist: `tfd.MixtureSameFamily(tfd.Independent(tfd.Normal))` instance\n representing a uniform mixture over the posterior samples, with\n `batch_shape = ...` and `event_shape = [num_timesteps]`.\n\n \"\"\"\n # The inputs `means`, `variances` have shape\n # `concat([\n # [num_posterior_draws],\n # sample_shape,\n # batch_shape,\n # [num_timesteps]])`\n # Because MixtureSameFamily mixes over the rightmost batch dimension,\n # we need to move the `num_posterior_draws` dimension to be rightmost\n # in the batch shape. This requires use of `Independent` (to preserve\n # `num_timesteps` as part of the event shape) and `move_dimension`.\n # TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an\n # arbitrary axis, and eliminate `move_dimension` calls here.\n\n with tf.compat.v1.name_scope(\n 'Func', values=[arg_0, arg_1]):\n arg_2 = dist_util.prefer_static_value(\n tf.shape(input=arg_0))[0]\n\n arg_3 = tfd.Independent(\n distribution=tfd.Normal(\n loc=dist_util.move_dimension(arg_0, 0, -2),\n scale=tf.sqrt(dist_util.move_dimension(arg_1, 0, -2))),\n reinterpreted_batch_ndims=1)\n\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n logits=tf.zeros([arg_2],\n dtype=arg_3.dtype)),\n components_distribution=arg_3)"} +{"_id": "doc_611", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Uses arg names to resolve distribution names.\"\"\"\n if arg_1 is None:\n arg_1 = []\n else:\n arg_1 = arg_1.copy()\n arg_3 = len(arg_0)\n arg_1.extend([None]*(arg_3 - len(arg_1)))\n for arg_4, arg_5 in enumerate(reversed(arg_0)):\n if not arg_5:\n continue # There's no args to analyze.\n arg_6 = arg_3 - arg_4 - 1\n for arg_7, arg_8 in enumerate(arg_5):\n arg_1[arg_6 - arg_7 - 1] = arg_8\n arg_7 = 0\n for arg_4 in range(len(arg_1)):\n arg_6 = arg_3 - arg_4 - 1\n if arg_1[arg_6] is None:\n arg_1[arg_6] = arg_2 if arg_7 == 0 else arg_2 + str(arg_7)\n arg_7 += 1\n return tuple(arg_1)"} +{"_id": "doc_612", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Calculate the KL divergence between two `JointDistributionSequential`s.\n\n Args:\n d0: instance of a `JointDistributionSequential` object.\n d1: instance of a `JointDistributionSequential` object.\n name: (optional) Name to use for created operations.\n Default value: `\"kl_joint_joint\"`.\n\n Returns:\n kl_joint_joint: `Tensor` The sum of KL divergences between elemental\n distributions of two joint distributions.\n\n Raises:\n ValueError: when joint distributions have a different number of elemental\n distributions.\n ValueError: when either joint distribution has a distribution with dynamic\n dependency, i.e., when either joint distribution is not a collection of\n independent distributions.\n \"\"\"\n if len(arg_0._dist_fn_wrapped) != len(arg_1._dist_fn_wrapped): # pylint: disable=protected-access\n raise ValueError(\n 'Can only compute KL divergence between when each has the'\n 'same number of component distributions.')\n if (not all(arg_3 is None for arg_3 in arg_0._dist_fn_args) or # pylint: disable=protected-access\n not all(arg_3 is None for arg_3 in arg_1._dist_fn_args)): # pylint: disable=protected-access\n raise ValueError(\n 'Can only compute KL divergence when all distributions are '\n 'independent.')\n with tf.name_scope(arg_2 or 'kl_jointseq_jointseq'):\n return sum(kullback_leibler.kl_divergence(arg_4(), arg_5())\n for arg_4, arg_5 in zip(arg_0._dist_fn_wrapped, arg_1._dist_fn_wrapped))"} +{"_id": "doc_613", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates `dist_fn`, `dist_fn_wrapped`, `dist_fn_args`.\"\"\"\n if not isinstance(arg_1, collections.Sequence):\n raise TypeError('`model` must be `list`-like (saw: {}).'.format(\n type(arg_1).__name__))\n arg_0._dist_fn = arg_1\n arg_0._dist_fn_wrapped, arg_0._dist_fn_args = zip(*[\n _unify_call_signature(i, dist_fn)\n for i, dist_fn in enumerate(arg_1)])"} +{"_id": "doc_614", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='x'):\n \"\"\"Creates a `tuple` of `tuple`s of dependencies.\n\n This function is **experimental**. That said, we encourage its use\n and ask that you report problems to `tfprobability@tensorflow.org`.\n\n Args:\n distribution_names: `list` of `str` or `None` names corresponding to each\n of `model` elements. (`None`s are expanding into the\n appropriate `str`.)\n leaf_name: `str` used when no maker depends on a particular\n `model` element.\n\n Returns:\n graph: `tuple` of `(str tuple)` pairs representing the name of each\n distribution (maker) and the names of its dependencies.\n\n #### Example\n\n ```python\n d = tfd.JointDistributionSequential([\n tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),\n lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),\n tfd.Normal(loc=0, scale=2.),\n lambda n, g: tfd.Normal(loc=n, scale=g),\n ])\n d.Func()\n # ==> (\n # ('e', ()),\n # ('g', ('e',)),\n # ('n', ()),\n # ('x', ('n', 'g')),\n # )\n ```\n\n \"\"\"\n # This function additionally depends on:\n # self._dist_fn_args\n # self._dist_fn_wrapped\n # TODO(b/129008220): Robustify this procedure. Eg, handle collisions better,\n # ignore args prefixed with `_`.\n if arg_1 is None or any(arg_0._dist_fn_args):\n arg_1 = _resolve_distribution_names(\n arg_0._dist_fn_args, arg_1, arg_2)\n if len(set(arg_1)) != len(arg_1):\n raise ValueError('Distribution names must be unique: {}'.format(\n arg_1))\n if len(arg_1) != len(arg_0._dist_fn_wrapped):\n raise ValueError('Distribution names must be 1:1 with `rvs`.')\n return tuple(zip(arg_1,\n tuple(() if arg_3 is None else arg_3 for arg_3 in arg_0._dist_fn_args)))"} +{"_id": "doc_615", "title": "", "text": "def Func(arg_0):\n \"\"\"Decorator function for argument bounds checking.\n\n This decorator is meant to be used with methods that require the first\n argument to be in the support of the distribution. If `validate_args` is\n `True`, the method is wrapped with an assertion that the first argument is\n greater than or equal to `loc`, since the support of the half-Cauchy\n distribution is given by `[loc, infinity)`.\n\n\n Args:\n f: method to be decorated.\n\n Returns:\n Returns a decorated method that, when `validate_args` attribute of the class\n is `True`, will assert that all elements in the first argument are within\n the support of the distribution before executing the original method.\n \"\"\"\n @functools.wraps(arg_0)\n def _check_arg_and_apply_f(*arg_1, **arg_2):\n arg_3 = arg_1[0]\n arg_4 = arg_1[1]\n with tf.control_dependencies([\n assert_util.assert_greater_equal(\n arg_4, arg_3.loc, message=\"x is not in the support of the distribution\")\n ] if arg_3.validate_args else []):\n return arg_0(*arg_1, **arg_2)\n return _check_arg_and_apply_f"} +{"_id": "doc_616", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Visualizes sequences as TensorBoard summaries.\n\n Args:\n seqs: A tensor of shape [n, t, h, w, c].\n name: String name of this summary.\n num: Integer for the number of examples to visualize. Defaults to\n all examples.\n \"\"\"\n arg_0 = tf.clip_by_value(arg_0, 0., 1.)\n arg_0 = tf.unstack(arg_0[:arg_2])\n arg_3 = [tf.concat(tf.unstack(seq), 1) for seq in arg_0]\n arg_3 = tf.expand_dims(tf.concat(arg_3, 0), 0)\n tf.compat.v2.summary.image(\n arg_1,\n arg_3,\n max_outputs=1,\n step=tf.compat.v1.train.get_or_create_global_step())"} +{"_id": "doc_617", "title": "", "text": "def Func(arg_0, arg_1, arg_2=3, arg_3=\"reconstruction\"):\n \"\"\"Visualizes the reconstruction of inputs in TensorBoard.\n\n Args:\n inputs: A tensor of the original inputs, of shape [batch, timesteps,\n h, w, c].\n reconstruct: A tensor of a reconstruction of inputs, of shape\n [batch, timesteps, h, w, c].\n num: Integer for the number of examples to visualize.\n name: String name of this summary.\n \"\"\"\n arg_1 = tf.clip_by_value(arg_1, 0., 1.)\n arg_4 = tf.concat((arg_0[:arg_2], arg_1[:arg_2]), axis=0)\n image_summary(arg_4, arg_3)"} +{"_id": "doc_618", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"dist_params\"):\n \"\"\"Summarize the parameters of a distribution.\n\n Args:\n dist: A Distribution object with mean and standard deviation\n parameters.\n name: The name of the distribution.\n name_scope: The name scope of this summary.\n \"\"\"\n with tf.compat.v1.name_scope(arg_2):\n tf.compat.v2.summary.histogram(\n arg_1=\"{}/{}\".format(arg_1, \"mean\"),\n data=arg_0.mean(),\n step=tf.compat.v1.train.get_or_create_global_step())\n tf.compat.v2.summary.histogram(\n arg_1=\"{}/{}\".format(arg_1, \"stddev\"),\n data=arg_0.stddev(),\n step=tf.compat.v1.train.get_or_create_global_step())"} +{"_id": "doc_619", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Runs the model to generate a distribution for a single timestep.\n\n This generates a batched MultivariateNormalDiag distribution using\n the output of the recurrent model at the current timestep to\n parameterize the distribution.\n\n Args:\n inputs: The sampled value of `z` at the previous timestep, i.e.,\n `z_{t-1}`, of shape [..., dimensions].\n `z_0` should be set to the empty matrix.\n state: A tuple containing the (hidden, cell) state.\n\n Returns:\n A tuple of a MultivariateNormalDiag distribution, and the state of\n the recurrent function at the end of the current timestep. The\n distribution will have event shape [dimensions], batch shape\n [...], and sample shape [sample_shape, ..., dimensions].\n \"\"\"\n # In order to allow the user to pass in a single example without a batch\n # dimension, we always expand the input to at least two dimensions, then\n # fix the output shape to remove the batch dimension if necessary.\n arg_3 = arg_1.shape\n if len(arg_3) < 2:\n arg_1 = tf.reshape(arg_1, [1, -1])\n arg_4, arg_2 = arg_0.lstm_cell(arg_1, arg_2)\n arg_4 = arg_0.output_layer(arg_4)\n arg_5 = tf.concat((arg_3[:-1], tf.shape(input=arg_4)[-1:]),\n 0)\n arg_4 = tf.reshape(arg_4, arg_5)\n arg_6 = arg_4[..., :arg_0.dimensions]\n arg_7 = tf.nn.softplus(arg_4[..., arg_0.dimensions:]) + 1e-5 # keep > 0\n return tfd.MultivariateNormalDiag(arg_6=arg_6, arg_7=arg_7), arg_2"} +{"_id": "doc_620", "title": "", "text": "def Func(arg_0):\n \"\"\"Static batch shape of models represented by this component.\n\n Returns:\n batch_shape: A `tf.TensorShape` giving the broadcast batch shape of\n all model parameters. This should match the batch shape of\n derived state space models, i.e.,\n `self.make_state_space_model(...).batch_shape`. It may be partially\n defined or unknown.\n \"\"\"\n Func = tf.TensorShape([])\n for arg_2 in arg_0.parameters:\n Func = tf.broadcast_static_shape(\n Func, arg_2.prior.batch_shape)\n return Func"} +{"_id": "doc_621", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=0):\n \"\"\"Instantiate this model as a Distribution over specified `num_timesteps`.\n\n Args:\n num_timesteps: Python `int` number of timesteps to model.\n param_vals: a list of `Tensor` parameter values in order corresponding to\n `self.parameters`, or a dict mapping from parameter names to values.\n initial_state_prior: an optional `Distribution` instance overriding the\n default prior on the model's initial state. This is used in forecasting\n (\"today's prior is yesterday's posterior\").\n initial_step: optional `int` specifying the initial timestep to model.\n This is relevant when the model contains time-varying components,\n e.g., holidays or seasonality.\n\n Returns:\n dist: a `LinearGaussianStateSpaceModel` Distribution object.\n \"\"\"\n return arg_0._Func(\n arg_1=arg_1,\n param_map=arg_0._canonicalize_param_vals_as_map(arg_2),\n arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_622", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=0,\n arg_3=(),\n arg_4=(),\n arg_5=None):\n \"\"\"Sample from the joint prior over model parameters and trajectories.\n\n Args:\n num_timesteps: Scalar `int` `Tensor` number of timesteps to model.\n initial_step: Optional scalar `int` `Tensor` specifying the starting\n timestep.\n Default value: 0.\n params_sample_shape: Number of possible worlds to sample iid from the\n parameter prior, or more generally, `Tensor` `int` shape to fill with\n iid samples.\n Default value: [] (i.e., draw a single sample and don't expand the\n shape).\n trajectories_sample_shape: For each sampled set of parameters, number\n of trajectories to sample, or more generally, `Tensor` `int` shape to\n fill with iid samples.\n Default value: [] (i.e., draw a single sample and don't expand the\n shape).\n seed: Python `int` random seed.\n\n Returns:\n trajectories: `float` `Tensor` of shape\n `trajectories_sample_shape + params_sample_shape + [num_timesteps, 1]`\n containing all sampled trajectories.\n param_samples: list of sampled parameter value `Tensor`s, in order\n corresponding to `self.parameters`, each of shape\n `params_sample_shape + prior.batch_shape + prior.event_shape`.\n \"\"\"\n\n arg_5 = distributions.SeedStream(\n arg_5, salt='StructuralTimeSeries_Func')\n\n with tf.compat.v1.name_scope(\n 'Func',\n values=[arg_1, arg_3, arg_4]):\n arg_6 = [\n p.prior.sample(arg_3, arg_5=arg_5(), name=p.name)\n for p in arg_0.parameters\n ]\n arg_7 = arg_0.make_state_space_model(\n arg_1=arg_1,\n arg_2=arg_2,\n param_vals=arg_6)\n return arg_7.sample(arg_4, arg_5=arg_5()), arg_6"} +{"_id": "doc_623", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2='ASCENDING', arg_3=False, arg_4=None): # pylint: disable=unused-argument\n \"\"\"Numpy implementation of `tf.argsort`.\"\"\"\n if arg_2 == 'ASCENDING':\n pass\n elif arg_2 == 'DESCENDING':\n arg_0 = np.negative(arg_0)\n else:\n raise ValueError('Unrecognized direction: {}.'.format(arg_2))\n return np.argsort(arg_0, arg_1, kind='stable' if arg_3 else 'quicksort')"} +{"_id": "doc_624", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2='ASCENDING', arg_3=False, arg_4=None): # pylint: disable=unused-argument\n \"\"\"Numpy implementation of `tf.sort`.\"\"\"\n if arg_2 == 'ASCENDING':\n pass\n elif arg_2 == 'DESCENDING':\n arg_0 = np.negative(arg_0)\n else:\n raise ValueError('Unrecognized direction: {}.'.format(arg_2))\n arg_5 = np.sort(arg_0, arg_1, kind='stable' if arg_3 else 'quicksort')\n if arg_2 == 'DESCENDING':\n return np.negative(arg_5)\n return arg_5"} +{"_id": "doc_625", "title": "", "text": "def Func(arg_0, arg_1=\"Func\"):\n \"\"\"Normal distribution function.\n\n Returns the area under the Gaussian probability density function, integrated\n from minus infinity to x:\n\n ```\n 1 / x\n Func(x) = ---------- | exp(-0.5 t**2) dt\n sqrt(2 pi) /-inf\n\n = 0.5 (1 + erf(x / sqrt(2)))\n = 0.5 erfc(x / sqrt(2))\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"Func\").\n\n Returns:\n Func: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with tf.name_scope(arg_1):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"x\")\n if dtype_util.as_numpy_dtype(arg_0.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"x.dtype=%s is not handled, see docstring for supported types.\"\n % arg_0.dtype)\n return _Func(arg_0)"} +{"_id": "doc_626", "title": "", "text": "def Func(arg_0):\n \"\"\"Implements ndtr core logic.\"\"\"\n arg_1 = tf.constant(\n 0.5 * np.sqrt(2.), dtype=arg_0.dtype, name=\"half_sqrt_2\")\n arg_2 = arg_0 * arg_1\n arg_3 = tf.abs(arg_2)\n arg_4 = tf.where(\n tf.less(arg_3, arg_1), 1. + tf.math.erf(arg_2),\n tf.where(tf.greater(arg_2, 0.), 2. - tf.math.erfc(arg_3), tf.math.erfc(arg_3)))\n return 0.5 * arg_4"} +{"_id": "doc_627", "title": "", "text": "def Func(arg_0, arg_1=\"Func\"):\n \"\"\"The inverse of the CDF of the Normal distribution function.\n\n Returns x such that the area under the pdf from minus infinity to x is equal\n to p.\n\n A piece-wise rational approximation is done for the function.\n This is a port of the implementation in netlib.\n\n Args:\n p: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"Func\").\n\n Returns:\n x: `Tensor` with `dtype=p.dtype`.\n\n Raises:\n TypeError: if `p` is not floating-type.\n \"\"\"\n\n with tf.name_scope(arg_1):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"p\")\n if dtype_util.as_numpy_dtype(arg_0.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"p.dtype=%s is not handled, see docstring for supported types.\"\n % arg_0.dtype)\n return _Func(arg_0)"} +{"_id": "doc_628", "title": "", "text": "def Func(arg_0, arg_1=3, arg_2=\"Func\"):\n \"\"\"Log Normal distribution function.\n\n For details of the Normal distribution function see `ndtr`.\n\n This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or\n using an asymptotic series. Specifically:\n - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n `log(1-x) ~= -x, x << 1`.\n - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n - For `x <= lower_segment`, we use the series approximation of erf to compute\n the log CDF directly.\n\n The `lower_segment` is set based on the precision of the input:\n\n ```\n lower_segment = { -20, x.dtype=float64\n { -10, x.dtype=float32\n upper_segment = { 8, x.dtype=float64\n { 5, x.dtype=float32\n ```\n\n When `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n ```\n ndtr(x) = scale * (1 + sum) + R_N\n scale = exp(-0.5 x**2) / (-x sqrt(2 pi))\n sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}\n R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})\n ```\n\n where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n [double-factorial](https://en.wikipedia.org/wiki/Double_factorial).\n\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n series_order: Positive Python `integer`. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n name: Python string. A name for the operation (default=\"Func\").\n\n Returns:\n Func: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`.\n \"\"\"\n if not isinstance(arg_1, int):\n raise TypeError(\"series_order must be a Python integer.\")\n if arg_1 < 0:\n raise ValueError(\"series_order must be non-negative.\")\n if arg_1 > 30:\n raise ValueError(\"series_order must be <= 30.\")\n\n with tf.name_scope(arg_2):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_2=\"x\")\n\n if dtype_util.base_equal(arg_0.dtype, tf.float64):\n arg_3 = LOGNDTR_FLOAT64_LOWER\n arg_4 = LOGNDTR_FLOAT64_UPPER\n elif dtype_util.base_equal(arg_0.dtype, tf.float32):\n arg_3 = LOGNDTR_FLOAT32_LOWER\n arg_4 = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError(\"x.dtype=%s is not supported.\" % arg_0.dtype)\n\n # The basic idea here was ported from:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n # We copy the main idea, with a few changes\n # * For x >> 1, and X ~ Normal(0, 1),\n # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],\n # which extends the range of validity of this function.\n # * We use one fixed series_order for all of 'x', rather than adaptive.\n # * Our docstring properly reflects that this is an asymptotic series, not a\n # Taylor series. We also provided a correct bound on the remainder.\n # * We need to use the max/min in the _Func_lower arg to avoid nan when\n # x=0. This happens even though the branch is unchosen because when x=0\n # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan\n # regardless of whether dy is finite. Note that the minimum is a NOP if\n # the branch is chosen.\n return tf.where(\n tf.greater(arg_0, arg_4),\n -_ndtr(-arg_0), # log(1-x) ~= -x, x << 1\n tf.where(\n tf.greater(arg_0, arg_3),\n tf.math.log(_ndtr(tf.maximum(arg_0, arg_3))),\n _Func_lower(tf.minimum(arg_0, arg_3), arg_1)))"} +{"_id": "doc_629", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculates the asymptotic series used in log_ndtr.\"\"\"\n arg_2 = dtype_util.as_numpy_dtype(arg_0.dtype)\n if arg_1 <= 0:\n return arg_2(1)\n arg_3 = tf.square(arg_0)\n arg_4 = tf.zeros_like(arg_0)\n arg_5 = tf.zeros_like(arg_0)\n arg_6 = arg_3 # Start with x^{2*1} = x^{2*n} with n = 1.\n for arg_7 in range(1, arg_1 + 1):\n arg_8 = arg_2(_double_factorial(2 * arg_7 - 1)) / arg_6\n if arg_7 % 2:\n arg_5 += arg_8\n else:\n arg_4 += arg_8\n arg_6 *= arg_3\n return 1. + arg_4 - arg_5"} +{"_id": "doc_630", "title": "", "text": "def Func(arg_0, arg_1=\"Func\"):\n \"\"\"Log Laplace distribution function.\n\n This function calculates `Log[L(x)]`, where `L(x)` is the cumulative\n distribution function of the Laplace distribution, i.e.\n\n ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\n For numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n ```\n x <= 0:\n Log[L(x)] = Log[0.5] + x, which is exact\n\n 0 < x:\n Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n \"\"\"\n\n with tf.name_scope(arg_1):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"x\")\n\n # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.\n arg_2 = -np.log(2.) + arg_0\n\n # safe_exp_neg_x = exp{-x} for x > 0, but is\n # bounded above by 1, which avoids\n # log[1 - 1] = -inf for x = log(1/2), AND\n # exp{-x} --> inf, for x << -1\n arg_3 = tf.exp(-tf.abs(arg_0))\n\n # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used\n # internally by log1p, rather than being done explicitly here.\n arg_4 = tf.math.log1p(-0.5 * arg_3)\n\n return tf.where(arg_0 < 0., arg_2, arg_4)"} +{"_id": "doc_631", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Joint log probability function.\"\"\"\n arg_4 = (1. / tf.reduce_mean(input_tensor=arg_0))\n arg_5 = tfd.Exponential(rate=arg_4)\n\n arg_6 = tfd.Uniform()\n\n arg_7 = tf.gather(\n [arg_1, arg_2],\n indices=tf.cast(\n arg_3 * tf.cast(tf.size(input=arg_0), dtype=tf.float32) <= tf.cast(\n tf.range(tf.size(input=arg_0)), dtype=tf.float32),\n dtype=tf.int32))\n arg_8 = tfd.Poisson(rate=arg_7)\n\n return (arg_5.log_prob(arg_1) + arg_5.log_prob(arg_2) +\n arg_6.log_prob(arg_3) +\n tf.reduce_sum(input_tensor=arg_8.log_prob(arg_0)))"} +{"_id": "doc_632", "title": "", "text": "def Func(\n arg_0=arg_1(3e3),\n arg_2=arg_1(3e3),\n arg_3=3):\n \"\"\"Runs HMC on the text-messages unnormalized posterior.\"\"\"\n\n if not tf.executing_eagerly():\n tf.compat.v1.reset_default_graph()\n\n # Build a static, pretend dataset.\n arg_4 = tf.cast(\n tf.concat(\n [tfd.Poisson(rate=15.).sample(43),\n tfd.Poisson(rate=25.).sample(31)],\n axis=0),\n dtype=tf.float32)\n if tf.executing_eagerly():\n arg_4 = arg_4.numpy()\n else:\n with tf.compat.v1.Session():\n arg_4 = arg_4.eval()\n\n # Define a closure over our joint_log_prob.\n def unnormalized_log_posterior(arg_5, arg_6, arg_7):\n return text_messages_joint_log_prob(arg_4, arg_5, arg_6, arg_7)\n\n if tf.executing_eagerly():\n arg_8 = tf.function(tfp.mcmc.sample_chain)\n else:\n arg_8 = tfp.mcmc.sample_chain\n\n # Initialize the step_size. (It will be automatically adapted.)\n arg_9 = tf.compat.v2.Variable(\n name='step_size',\n initial_value=tf.constant(0.05, dtype=tf.float32),\n trainable=False)\n\n def computation():\n \"\"\"The benchmark computation.\"\"\"\n\n arg_10 = [\n tf.constant(arg_4.mean(), name='init_lambda1'),\n tf.constant(arg_4.mean(), name='init_lambda2'),\n tf.constant(0.5, name='init_tau'),\n ]\n\n arg_11 = [\n tfp.bijectors.Exp(), # Maps a positive real to R.\n tfp.bijectors.Exp(), # Maps a positive real to R.\n tfp.bijectors.Sigmoid(), # Maps [0,1] to R.\n ]\n\n arg_12, arg_13 = arg_8(\n arg_0=arg_0,\n arg_2=arg_2,\n current_state=arg_10,\n kernel=tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=unnormalized_log_posterior,\n arg_3=arg_3,\n arg_9=arg_9,\n step_size_update_fn=\n tfp.mcmc.make_simple_step_size_update_policy(arg_2),\n state_gradients_are_stopped=True),\n bijector=arg_11))\n\n return arg_13.inner_results.is_accepted\n\n # Let's force evaluation of graph to ensure build time is not part of our time\n # trial.\n arg_14 = computation()\n if not tf.executing_eagerly():\n arg_15 = tf.compat.v1.Session()\n arg_15.run(tf.compat.v1.global_variables_initializer())\n arg_15.run(arg_14)\n\n arg_16 = time.time()\n if tf.executing_eagerly():\n arg_17 = computation()\n else:\n arg_17 = arg_15.run(arg_14)\n arg_18 = time.time() - arg_16\n\n arg_19 = np.sum(arg_17)\n arg_20 = np.float32(arg_19) / np.float32(arg_0)\n\n return dict(\n iters=(arg_0 + arg_2) * arg_3,\n extras={'acceptance_rate': arg_20},\n arg_18=arg_18)"} +{"_id": "doc_633", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Compute the marginal of this GP over function values at `index_points`.\n\n Args:\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the GP is defined. Shape has the form\n `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e` is the number\n (size) of index points in each batch. Ultimately this distribution\n corresponds to a `e`-dimensional multivariate normal. The batch shape\n must be broadcastable with `kernel.batch_shape` and any batch dims\n yielded by `mean_fn`.\n\n Returns:\n marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,\n according to whether `index_points` consists of one or many index\n points, respectively.\n \"\"\"\n with arg_0._name_scope('Func'):\n # TODO(cgs): consider caching the result here, keyed on `index_points`.\n arg_1 = arg_0._get_index_points(arg_1)\n arg_2 = arg_0._compute_covariance(arg_1)\n arg_3 = arg_0._mean_fn(arg_1)\n # If we're sure the number of index points is 1, we can just construct a\n # scalar Normal. This has computational benefits and supports things like\n # CDF that aren't otherwise straightforward to provide.\n if arg_0._is_univariate_marginal(arg_1):\n arg_4 = tf.sqrt(arg_2)\n # `loc` has a trailing 1 in the shape; squeeze it.\n arg_3 = tf.squeeze(arg_3, axis=-1)\n return normal.Normal(\n arg_3=arg_3,\n arg_4=arg_4,\n validate_args=arg_0._validate_args,\n allow_nan_stats=arg_0._allow_nan_stats,\n name='marginal_distribution')\n else:\n arg_4 = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(_add_diagonal_shift(arg_2, arg_0.jitter)),\n is_non_singular=True,\n name='GaussianProcessScaleLinearOperator')\n return mvn_linear_operator.MultivariateNormalLinearOperator(\n arg_3=arg_3,\n arg_4=arg_4,\n validate_args=arg_0._validate_args,\n allow_nan_stats=arg_0._allow_nan_stats,\n name='marginal_distribution')"} +{"_id": "doc_634", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return `index_points` if not None, else `self._index_points`.\n\n Args:\n index_points: if given, this is what is returned; else,\n `self._index_points`\n\n Returns:\n index_points: the given arg, if not None, else the class member\n `self._index_points`.\n\n Rases:\n ValueError: if `index_points` and `self._index_points` are both `None`.\n \"\"\"\n if arg_0._index_points is None and arg_1 is None:\n raise ValueError(\n 'This GaussianProcess instance was not instantiated with a value for '\n 'index_points. One must therefore be provided when calling sample, '\n 'log_prob, and other such methods. In particular, one can\\'t compute '\n 'KL divergences to/from an instance of `GaussianProccess` with '\n 'unspecified `index_points` directly. Instead, use the '\n '`get_marginal_distribution` function, which takes `index_points` as '\n 'an argument and returns a `Normal` or '\n '`MultivariateNormalLinearOperator` instance, whose KL can be '\n 'computed.')\n return arg_1 if arg_1 is not None else arg_0._index_points"} +{"_id": "doc_635", "title": "", "text": "def Func(arg_0,\n arg_1=2,\n arg_2=None,\n arg_3=arg_4.float32):\n \"\"\"Creates an stacked IAF bijector.\n\n This bijector operates on vector-valued events.\n\n Args:\n total_event_size: Number of dimensions to operate over.\n num_hidden_layers: How many hidden layers to use in each IAF.\n seed: Random seed for the initializers.\n dtype: DType for the variables.\n\n Returns:\n bijector: The created bijector.\n \"\"\"\n\n arg_2 = tfd.SeedStream(arg_2, 'Func')\n\n def make_iaf():\n \"\"\"Create an IAF.\"\"\"\n arg_6 = arg_4.compat.v2.keras.initializers.VarianceScaling(\n 2 * 0.01, arg_2=arg_2() % (2**31 - 1))\n\n arg_7 = tfb.AutoregressiveLayer(\n params=2,\n event_shape=[arg_0],\n hidden_units=[arg_0] * arg_1,\n activation=arg_4.nn.elu,\n kernel_initializer=arg_6,\n arg_3=arg_3)\n\n def shift_and_scale(arg_8):\n # TODO(siege): Something is losing the static shape.\n arg_8.set_shape(\n arg_8.shape.merge_with([None] * (arg_8.shape.ndims - 1) + [arg_0]))\n return arg_4.unstack(arg_7(arg_8), num=2, axis=-1)\n\n return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))\n\n def make_swap():\n \"\"\"Create an swap.\"\"\"\n arg_9 = list(reversed(range(arg_0)))\n return tfb.Permute(arg_9)\n\n arg_10 = make_iaf()\n arg_10 = make_swap()(arg_10)\n arg_10 = make_iaf()(arg_10)\n arg_10 = make_swap()(arg_10)\n arg_10 = make_iaf()(arg_10)\n arg_10 = make_swap()(arg_10)\n\n return arg_10"} +{"_id": "doc_636", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Runs one iteration of NeuTra.\n\n Args:\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n previous_kernel_results: `collections.namedtuple` containing `Tensor`s\n representing values from previous calls to this function (or from the\n `bootstrap_results` function.)\n\n Returns:\n next_state: Tensor or Python list of `Tensor`s representing the state(s)\n of the Markov chain(s) after taking exactly one step. Has same type and\n shape as `current_state`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n\n @tfp.mcmc.internal.util.make_innermost_setter\n def set_num_leapfrog_steps(arg_3, arg_4):\n return arg_3._replace(\n accepted_results=arg_3.accepted_results._replace(\n arg_4=arg_4))\n\n arg_5 = arg_2.new_step_size\n arg_2 = set_num_leapfrog_steps(\n arg_2, arg_0._num_leapfrog_steps(arg_5))\n\n arg_6, arg_3 = arg_0._kernel.Func(\n arg_0._flatten_state(arg_1), arg_2)\n return arg_0._unflatten_state(arg_6), arg_3"} +{"_id": "doc_637", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Trains the bijector and creates initial `previous_kernel_results`.\n\n The supplied `state` is only used to determine the number of chains to run\n in parallel_iterations\n\n Args:\n state: `Tensor` or Python `list` of `Tensor`s representing the initial\n state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*state))`.\n\n Returns:\n kernel_results: Instance of\n `UncalibratedHamiltonianMonteCarloKernelResults` inside\n `MetropolisHastingsResults` inside `TransformedTransitionKernelResults`\n inside `SimpleStepSizeAdaptationResults`.\n \"\"\"\n\n def loss():\n arg_2 = arg_0._flattened_variational_distribution()\n # TODO(siege): How to seed this?\n arg_3 = arg_2.sample(arg_0.train_batch_size)\n return tf.reduce_mean(\n input_tensor=arg_2.log_prob(arg_3) -\n arg_0._flattened_target_log_prob(arg_3),\n axis=-1)\n\n arg_4 = tf.convert_to_tensor(value=arg_0.learning_rate, arg_5=arg_0._dtype)\n arg_5 = arg_4.dtype\n\n arg_6 = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(\n list(arg_0.num_train_steps *\n np.array([0.2, 0.8]).astype(arg_5.as_numpy_dtype())),\n [arg_4, arg_4 * 0.1, arg_4 * 0.01])\n\n arg_7 = tf.compat.v2.optimizers.Adam(arg_6)\n\n @tf.function(autograph=False)\n def train_step():\n with tf.GradientTape() as tape:\n arg_8 = loss()\n arg_9 = tape.watched_variables()\n arg_10 = tape.gradient(arg_8, arg_9)\n arg_11 = list(zip(arg_10, arg_9))\n arg_7.apply_gradients(arg_11)\n return arg_8\n\n for arg_12 in range(arg_0.num_train_steps):\n arg_8 = train_step()\n tf.debugging.assert_all_finite(\n arg_8, 'NeuTra loss is NaN at step {}'.format(arg_12))\n if arg_0.train_debug_fn:\n # pylint: disable=not-callable\n arg_0.train_debug_fn(arg_0, arg_12, arg_8)\n\n arg_13 = tf.nest.flatten(arg_1)\n arg_14 = tf.nest.flatten(arg_0.state_shape)\n arg_15 = tf.shape(input=arg_13[0])[:-arg_14[0].ndims]\n\n return arg_0._kernel.Func(\n arg_0._flattened_variational_distribution().sample(\n arg_15, seed=arg_0.seed))"} +{"_id": "doc_638", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convenience function analogous to tf.squared_difference.\"\"\"\n arg_2 = arg_0 - arg_1\n return arg_2[..., tf.newaxis, :] * arg_2[..., tf.newaxis]"} +{"_id": "doc_639", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Performs distributional transform of the mixture samples.\n\n Distributional transform removes the parameters from samples of a\n multivariate distribution by applying conditional CDFs:\n (F(x_1), F(x_2 | x1_), ..., F(x_d | x_1, ..., x_d-1))\n (the indexing is over the \"flattened\" event dimensions).\n The result is a sample of product of Uniform[0, 1] distributions.\n\n We assume that the components are factorized, so the conditional CDFs become\n F(x_i | x_1, ..., x_i-1) = sum_k w_i^k F_k (x_i),\n where w_i^k is the posterior mixture weight: for i > 0\n w_i^k = w_k prob_k(x_1, ..., x_i-1) / sum_k' w_k' prob_k'(x_1, ..., x_i-1)\n and w_0^k = w_k is the mixture probability of the k-th component.\n\n Arguments:\n x: Sample of mixture distribution\n\n Returns:\n Result of the distributional transform\n \"\"\"\n\n if tensorshape_util.rank(arg_1.shape) is None:\n # tf.nn.softmax raises an error when applied to inputs of undefined rank.\n raise ValueError(\"Distributional transform does not support inputs of \"\n \"undefined rank.\")\n\n # Obtain factorized components distribution and assert that it's\n # a scalar distribution.\n if isinstance(arg_0._components_distribution, independent.Independent):\n arg_2 = arg_0._components_distribution.distribution\n else:\n arg_2 = arg_0._components_distribution\n\n with tf.control_dependencies([\n assert_util.assert_equal(\n arg_2.is_scalar_event(),\n True,\n message=\"`univariate_components` must have scalar event\")\n ]):\n arg_3 = arg_0._pad_sample_dims(arg_1) # [S, B, 1, E]\n arg_4 = arg_2.log_prob(arg_3) # [S, B, k, E]\n arg_5 = arg_2.cdf(arg_3) # [S, B, k, E]\n\n # log prob_k (x_1, ..., x_i-1)\n arg_6 = tf.reshape(\n tf.math.cumsum(\n # [S*prod(B)*k, prod(E)]\n tf.reshape(arg_4, [-1, arg_0._event_size]),\n exclusive=True,\n axis=-1),\n tf.shape(input=arg_4)) # [S, B, k, E]\n\n arg_7 = distribution_utils.pad_mixture_dimensions(\n arg_0.mixture_distribution.logits, arg_0, arg_0.mixture_distribution,\n arg_0._event_ndims) # [B, k, 1]\n\n # Logits of the posterior weights: log w_k + log prob_k (x_1, ..., x_i-1)\n arg_8 = arg_7 + arg_6\n\n arg_9 = tensorshape_util.rank(arg_1.shape) - arg_0._event_ndims\n arg_10 = tf.nn.softmax(arg_8,\n axis=arg_9)\n return tf.reduce_sum(\n input_tensor=arg_10 * arg_5, axis=arg_9)"} +{"_id": "doc_640", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3):\n \"\"\"Utility method to decompose a joint posterior into components.\n\n Args:\n model: `tfp.sts.Sum` instance defining an additive STS model.\n posterior_means: float `Tensor` of shape `concat(\n [[num_posterior_draws], batch_shape, num_timesteps, latent_size])`\n representing the posterior mean over latents in an\n `AdditiveStateSpaceModel`.\n posterior_covs: float `Tensor` of shape `concat(\n [[num_posterior_draws], batch_shape, num_timesteps,\n latent_size, latent_size])`\n representing the posterior marginal covariances over latents in an\n `AdditiveStateSpaceModel`.\n parameter_samples: Python `list` of `Tensors` representing posterior\n samples of model parameters, with shapes `[concat([\n [num_posterior_draws], param.prior.batch_shape,\n param.prior.event_shape]) for param in model.parameters]`. This may\n optionally also be a map (Python `dict`) of parameter names to\n `Tensor` values.\n\n Returns:\n component_dists: A `collections.OrderedDict` instance mapping\n component StructuralTimeSeries instances (elements of `model.components`)\n to `tfd.Distribution` instances representing the posterior marginal\n distributions on the process modeled by each component. Each distribution\n has batch shape matching that of `posterior_means`/`posterior_covs`, and\n event shape of `[num_timesteps]`.\n \"\"\"\n\n try:\n arg_0.components\n except AttributeError:\n raise ValueError('Model decomposed into components must be an instance of'\n '`tfp.sts.Sum` (passed model {})'.format(arg_0))\n\n with tf.compat.v1.name_scope('decompose_from_posterior_marginals'):\n\n # Extract the component means/covs from the joint latent posterior.\n arg_4 = [arg_10.latent_size for arg_10 in arg_0.components]\n arg_5 = tf.split(arg_1, arg_4, axis=-1)\n arg_6 = _split_covariance_into_marginals(\n arg_2, arg_4)\n\n # Instantiate per-component state space models, and use them to push the\n # posterior means/covs through the observation model for each component.\n arg_7 = dist_util.prefer_static_value(\n tf.shape(input=arg_1))[-2]\n arg_8 = arg_0.make_component_state_space_models(\n arg_7=arg_7,\n param_vals=arg_3)\n arg_9 = collections.OrderedDict()\n for (arg_10, arg_11,\n arg_12, arg_13) in zip(arg_0.components, arg_8,\n arg_5, arg_6):\n arg_14, arg_15 = (\n arg_11.latents_to_observations(\n latent_means=arg_12,\n latent_covs=arg_13))\n\n # Using the observation means and covs, build a mixture distribution\n # that integrates over the posterior draws.\n arg_9[arg_10] = sts_util.mix_over_posterior_draws(\n means=arg_14[..., 0],\n variances=arg_15[..., 0, 0])\n return arg_9"} +{"_id": "doc_641", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decompose a forecast distribution into contributions from each component.\n\n Args:\n model: An instance of `tfp.sts.Sum` representing a structural time series\n model.\n forecast_dist: A `Distribution` instance returned by `tfp.sts.forecast()`.\n (specifically, must be a `tfd.MixtureSameFamily` over a\n `tfd.LinearGaussianStateSpaceModel` parameterized by posterior samples).\n parameter_samples: Python `list` of `Tensors` representing posterior samples\n of model parameters, with shapes `[concat([[num_posterior_draws],\n param.prior.batch_shape, param.prior.event_shape]) for param in\n model.parameters]`. This may optionally also be a map (Python `dict`) of\n parameter names to `Tensor` values.\n Returns:\n component_forecasts: A `collections.OrderedDict` instance mapping\n component StructuralTimeSeries instances (elements of `model.components`)\n to `tfd.Distribution` instances representing the marginal forecast for\n each component. Each distribution has batch and event shape matching\n `forecast_dist` (specifically, the event shape is\n `[num_steps_forecast]`).\n\n #### Examples\n\n Suppose we've built a model, fit it to data, and constructed a forecast\n distribution:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n\n num_steps_forecast = 50\n samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=samples,\n num_steps_forecast=num_steps_forecast)\n ```\n\n To extract the forecast for individual components, pass the forecast\n distribution into `Funcs`:\n\n ```python\n component_forecasts = Func(\n model, forecast_dist, samples)\n\n # Component mean and stddev have shape `[num_steps_forecast]`.\n day_of_week_effect_mean = forecast_components[day_of_week].mean()\n day_of_week_effect_stddev = forecast_components[day_of_week].stddev()\n ```\n\n Using the component forecasts, we can visualize the uncertainty for each\n component:\n\n ```\n from matplotlib import pylab as plt\n num_components = len(component_forecasts)\n xs = np.arange(num_steps_forecast)\n fig = plt.figure(figsize=(12, 3 * num_components))\n for i, (component, component_dist) in enumerate(component_forecasts.items()):\n\n # If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.\n component_mean = component_dist.mean().numpy()\n component_stddev = component_dist.stddev().numpy()\n\n ax = fig.add_subplot(num_components, 1, 1 + i)\n ax.plot(xs, component_mean, lw=2)\n ax.fill_between(xs,\n component_mean - 2 * component_stddev,\n component_mean + 2 * component_stddev,\n alpha=0.5)\n ax.set_title(component.name)\n ```\n\n \"\"\"\n\n with tf.compat.v1.name_scope('Func'):\n try:\n arg_3 = arg_1.components_distribution\n arg_4, arg_5 = arg_3._joint_mean() # pylint: disable=protected-access\n arg_6, arg_5 = arg_3._joint_covariances() # pylint: disable=protected-access\n except AttributeError as e:\n raise ValueError(\n 'Forecast distribution must be a MixtureSameFamily of'\n 'LinearGaussianStateSpaceModel distributions, such as returned by'\n '`tfp.sts.forecast()`. (saw exception: {})'.format(e))\n\n # Since `parameter_samples` will have sample shape `[num_posterior_draws]`,\n # we need to move the `num_posterior_draws` dimension of the forecast\n # moments from the trailing batch dimension, where it's currently put by\n # `sts.forecast`, back to the leading (sample shape) dimension.\n arg_4 = dist_util.move_dimension(\n arg_4, source_idx=-3, dest_idx=0)\n arg_6 = dist_util.move_dimension(\n arg_6, source_idx=-4, dest_idx=0)\n\n return _decompose_from_posterior_marginals(\n arg_0, arg_4, arg_6, arg_2)"} +{"_id": "doc_642", "title": "", "text": "def Func(arg_0):\n \"\"\"Get tensor that the random variable corresponds to.\"\"\"\n if arg_0._Func is None:\n try:\n arg_0._Func = arg_0.distribution.sample(arg_0.sample_shape_tensor())\n except NotImplementedError:\n raise NotImplementedError(\n \"sample is not implemented for {0}. You must either pass in the \"\n \"Func argument or implement sample for {0}.\"\n .format(arg_0.distribution.__class__.__name__))\n return arg_0._Func"} +{"_id": "doc_643", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"In a session, computes and returns the value of this random variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used.\n\n Args:\n session: tf.BaseSession.\n The `tf.Session` to use to Funcuate this random variable. If\n none, the default session is used.\n feed_dict: dict.\n A dictionary that maps `tf.Tensor` objects to feed values. See\n `tf.Session.run()` for a description of the valid feed values.\n\n Returns:\n Value of the random variable.\n\n #### Examples\n\n ```python\n x = Normal(0.0, 1.0)\n with tf.Session() as sess:\n # Usage passing the session explicitly.\n print(x.Func(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(x.Func())\n ```\n \"\"\"\n return arg_0.value.Func(arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_644", "title": "", "text": "def Func(arg_0):\n \"\"\"Value as NumPy array, only available for TF Eager.\"\"\"\n if not isinstance(arg_0.value, ops.EagerTensor):\n raise NotImplementedError(\"value argument must be a EagerTensor.\")\n\n return arg_0.value.Func()"} +{"_id": "doc_645", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Posterior Normal distribution with conjugate prior on the mean.\n\n This model assumes that `n` observations (with sum `s`) come from a\n Normal with unknown mean `loc` (described by the Normal `prior`)\n and known variance `scale**2`. The \"known scale posterior\" is\n the distribution of the unknown `loc`.\n\n Accepts a prior Normal distribution object, having parameters\n `loc0` and `scale0`, as well as known `scale` values of the predictive\n distribution(s) (also assumed Normal),\n and statistical estimates `s` (the sum(s) of the observations) and\n `n` (the number(s) of observations).\n\n Returns a posterior (also Normal) distribution object, with parameters\n `(loc', scale'**2)`, where:\n\n ```\n mu ~ N(mu', sigma'**2)\n sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),\n mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.\n ```\n\n Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.\n will broadcast in the case of multidimensional sets of parameters.\n\n Args:\n prior: `Normal` object of type `dtype`:\n the prior distribution having parameters `(loc0, scale0)`.\n scale: tensor of type `dtype`, taking values `scale > 0`.\n The known stddev parameter(s).\n s: Tensor of type `dtype`. The sum(s) of observations.\n n: Tensor of type `int`. The number(s) of observations.\n\n Returns:\n A new Normal posterior distribution object for the unknown observation\n mean `loc`.\n\n Raises:\n TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a\n Normal object.\n \"\"\"\n if not isinstance(arg_0, normal.Normal):\n raise TypeError(\"Expected prior to be an instance of type Normal\")\n\n if arg_2.dtype != arg_0.dtype:\n raise TypeError(\n \"Observation sum s.dtype does not match prior dtype: %s vs. %s\"\n % (arg_2.dtype, arg_0.dtype))\n\n arg_3 = tf.cast(arg_3, arg_0.dtype)\n arg_4 = tf.square(arg_0.scale)\n arg_5 = tf.square(arg_1)\n arg_6 = 1.0/(1/arg_4 + arg_3/arg_5)\n return normal.Normal(\n loc=(arg_0.loc / arg_4 + arg_2 / arg_5) * arg_6,\n arg_1=tf.sqrt(arg_6))"} +{"_id": "doc_646", "title": "", "text": "def Func(arg_0,\n arg_1=False,\n arg_2=arg_3.nn.relu,\n arg_6=None,\n *arg_7, # pylint: disable=keyword-arg-before-vararg\n **arg_8):\n \"\"\"Build a scale-and-shift function using a multi-layer neural network.\n\n This will be wrapped in a make_template to ensure the variables are only\n created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`\n dimensional outputs `loc` (\"mu\") and `log_scale` (\"alpha\").\n\n The default template does not support conditioning and will raise an\n exception if `condition_kwargs` are passed to it. To use conditioning in\n real nvp bijector, implement a conditioned shift/scale template that\n handles the `condition_kwargs`.\n\n Arguments:\n hidden_layers: Python `list`-like of non-negative integer, scalars\n indicating the number of units in each hidden layer. Default: `[512, 512].\n shift_only: Python `bool` indicating if only the `shift` term shall be\n computed (i.e. NICE bijector). Default: `False`.\n activation: Activation function (callable). Explicitly setting to `None`\n implies a linear activation.\n name: A name for ops managed by this function. Default:\n \"Func\".\n *args: `tf.layers.dense` arguments.\n **kwargs: `tf.layers.dense` keyword arguments.\n\n Returns:\n shift: `Float`-like `Tensor` of shift terms (\"mu\" in\n [Papamakarios et al. (2016)][1]).\n log_scale: `Float`-like `Tensor` of log(scale) terms (\"alpha\" in\n [Papamakarios et al. (2016)][1]).\n\n Raises:\n NotImplementedError: if rightmost dimension of `inputs` is unknown prior to\n graph execution, or if `condition_kwargs` is not empty.\n\n #### References\n\n [1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked\n Autoregressive Flow for Density Estimation. In _Neural Information\n Processing Systems_, 2017. https://arxiv.org/abs/1705.07057\n \"\"\"\n\n with arg_3.compat.v2.name_scope(arg_6 or \"Func\"):\n\n def _fn(arg_9, arg_10, **arg_11):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if arg_11:\n raise NotImplementedError(\n \"Conditioning not implemented in the default template.\")\n\n if tensorshape_util.rank(arg_9.shape) == 1:\n arg_9 = arg_9[arg_3.newaxis, ...]\n arg_12 = lambda arg_9: arg_9[0]\n else:\n arg_12 = lambda arg_9: arg_9\n for arg_13 in arg_0:\n arg_9 = arg_3.compat.v1.layers.dense(\n inputs=arg_9,\n arg_13=arg_13,\n arg_2=arg_2,\n *arg_7, # pylint: disable=keyword-arg-before-vararg\n **arg_8)\n arg_9 = arg_3.compat.v1.layers.dense(\n inputs=arg_9,\n arg_13=(1 if arg_1 else 2) * arg_10,\n arg_2=None,\n *arg_7, # pylint: disable=keyword-arg-before-vararg\n **arg_8)\n if arg_1:\n return arg_12(arg_9), None\n arg_14, arg_15 = arg_3.split(arg_9, 2, axis=-1)\n return arg_12(arg_14), arg_12(arg_15)\n\n return arg_3.compat.v1.make_template(\"Func\", _fn)"} +{"_id": "doc_647", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Returns a batch of points chosen uniformly from the unit hypersphere.\"\"\"\n # This works because the Gaussian distribution is spherically symmetric.\n # raw shape: shape + [dimension]\n arg_4 = normal.Normal(\n loc=dtype_util.as_numpy_dtype(arg_2)(0),\n scale=dtype_util.as_numpy_dtype(arg_2)(1)).sample(\n tf.concat([arg_1, [arg_0]], axis=0), arg_3=arg_3())\n arg_5 = arg_4 / tf.norm(tensor=arg_4, ord=2, axis=-1)[..., tf.newaxis]\n return arg_5"} +{"_id": "doc_648", "title": "", "text": "def Func(arg_0, arg_1='log_normalization'):\n \"\"\"Returns the log normalization of an LKJ distribution.\n\n Args:\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n log_z: A Tensor of the same shape and dtype as `concentration`, containing\n the corresponding log normalizers.\n \"\"\"\n # The formula is from D. Lewandowski et al [1], p. 1999, from the\n # proof that eqs 16 and 17 are equivalent.\n with tf.name_scope(arg_1 or 'log_normalization_lkj'):\n arg_2 = np.log(np.pi)\n arg_3 = tf.zeros_like(arg_0.concentration)\n for arg_4 in range(1, arg_0.dimension):\n arg_3 += arg_2 * (arg_4 / 2.)\n arg_3 += tf.math.lgamma(arg_0.concentration +\n (arg_0.dimension - 1 - arg_4) / 2.)\n arg_3 -= tf.math.lgamma(arg_0.concentration + (arg_0.dimension - 1) / 2.)\n return arg_3"} +{"_id": "doc_649", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Returns explict dtype from `args_list` if exists, else preferred_dtype.\"\"\"\n arg_2 = None\n arg_1 = (None if arg_1 is None\n else tf.as_dtype(arg_1))\n for arg_3 in tf.nest.flatten(arg_0):\n if hasattr(arg_3, 'dtype'):\n arg_4 = tf.as_dtype(arg_3.dtype)\n else:\n continue\n if arg_2 is None:\n arg_2 = arg_4\n elif arg_2 != arg_4:\n raise TypeError('Found incompatible dtypes, {} and {}.'.format(arg_2, arg_4))\n if arg_2 is None and arg_1 is None:\n return None\n return (arg_1 if arg_2 is None else arg_2).as_numpy_dtype"} +{"_id": "doc_650", "title": "", "text": "def Func(arg_0):\n \"\"\"Factory for implementing summary statistics, eg, mean, stddev, mode.\"\"\"\n def _fn(arg_1, **arg_2):\n \"\"\"Implements summary statistic, eg, mean, stddev, mode.\"\"\"\n arg_3 = getattr(arg_1.distribution, arg_0)(**arg_2)\n arg_4 = prefer_static.concat([\n arg_1.distribution.batch_shape_tensor(),\n prefer_static.ones(prefer_static.rank_from_shape(arg_1.sample_shape),\n dtype=arg_1.sample_shape.dtype),\n arg_1.distribution.event_shape_tensor(),\n ], axis=0)\n arg_3 = tf.reshape(arg_3, arg_4=arg_4)\n arg_4 = prefer_static.concat([\n arg_1.distribution.batch_shape_tensor(),\n arg_1.sample_shape,\n arg_1.distribution.event_shape_tensor(),\n ], axis=0)\n return tf.broadcast_to(arg_3, arg_4)\n return _fn"} +{"_id": "doc_651", "title": "", "text": "def Func(arg_0,\n arg_1=0.,\n arg_2=None,\n arg_3=None):\n \"\"\"Estimate a lower bound on effective sample size for each independent chain.\n\n Roughly speaking, \"effective sample size\" (ESS) is the size of an iid sample\n with the same variance as `state`.\n\n More precisely, given a stationary sequence of possibly correlated random\n variables `X_1, X_2,...,X_N`, each identically distributed ESS is the number\n such that\n\n ```Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.```\n\n If the sequence is uncorrelated, `ESS = N`. In general, one should expect\n `ESS <= N`, with more highly correlated sequences having smaller `ESS`.\n\n Args:\n states: `Tensor` or list of `Tensor` objects. Dimension zero should index\n identically distributed states.\n filter_threshold: `Tensor` or list of `Tensor` objects.\n Must broadcast with `state`. The auto-correlation sequence is truncated\n after the first appearance of a term less than `filter_threshold`.\n Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,\n setting to any number less than `-1` has the same effect.\n filter_beyond_lag: `Tensor` or list of `Tensor` objects. Must be\n `int`-like and scalar valued. The auto-correlation sequence is truncated\n to this length. Setting to `None` means we do not filter based on number\n of lags.\n name: `String` name to prepend to created ops.\n\n Returns:\n ess: `Tensor` or list of `Tensor` objects. The effective sample size of\n each component of `states`. Shape will be `states.shape[1:]`.\n\n Raises:\n ValueError: If `states` and `filter_threshold` or `states` and\n `filter_beyond_lag` are both lists with different lengths.\n\n #### Examples\n\n We use ESS to estimate standard error.\n\n ```\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])\n\n # Get 1000 states from one chain.\n states = tfp.mcmc.sample_chain(\n num_burnin_steps=200,\n num_results=1000,\n current_state=tf.constant([0., 0.]),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=0.05,\n num_leapfrog_steps=20))\n states.shape\n ==> (1000, 2)\n\n ess = Func(states)\n ==> Shape (2,) Tensor\n\n mean, variance = tf.nn.moments(states, axis=0)\n standard_error = tf.sqrt(variance / ess)\n ```\n\n Some math shows that, with `R_k` the auto-correlation sequence,\n `R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have\n\n ```ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]```\n\n This function estimates the above by first estimating the auto-correlation.\n Since `R_k` must be estimated using only `N - k` samples, it becomes\n progressively noisier for larger `k`. For this reason, the summation over\n `R_k` should be truncated at some number `filter_beyond_lag < N`. Since many\n MCMC methods generate chains where `R_k > 0`, a reasonable criteria is to\n truncate at the first index where the estimated auto-correlation becomes\n negative.\n\n The arguments `filter_beyond_lag`, `filter_threshold` are filters intended to\n remove noisy tail terms from `R_k`. They combine in an \"OR\" manner meaning\n terms are removed if they were to be filtered under the `filter_beyond_lag` OR\n `filter_threshold` criteria.\n \"\"\"\n arg_4 = _is_list_like(arg_0)\n\n # Convert all args to lists.\n if not arg_4:\n arg_0 = [arg_0]\n\n arg_2 = _broadcast_maybelist_arg(arg_0, arg_2,\n 'filter_beyond_lag')\n arg_1 = _broadcast_maybelist_arg(arg_0, arg_1,\n 'filter_threshold')\n\n # Process items, one at a time.\n with tf.compat.v1.name_scope(arg_3, 'Func'):\n arg_5 = [\n _Func_single_state(s, ml, mlt)\n for (s, ml, mlt) in zip(arg_0, arg_2, arg_1)\n ]\n\n if arg_4:\n return arg_5\n return arg_5[0]"} +{"_id": "doc_652", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\"ESS computation for one single Tensor argument.\"\"\"\n\n with tf.compat.v1.name_scope(\n 'effective_sample_size_single_state',\n values=[arg_0, arg_1, arg_2]):\n\n arg_0 = tf.convert_to_tensor(value=arg_0, name='states')\n arg_3 = arg_0.dtype\n\n # filter_beyond_lag == None ==> auto_corr is the full sequence.\n arg_4 = stats.auto_correlation(\n arg_0, axis=0, max_lags=arg_1)\n if arg_2 is not None:\n arg_2 = tf.convert_to_tensor(\n value=arg_2, dtype=arg_3, name='filter_threshold')\n # Get a binary mask to zero out values of auto_corr below the threshold.\n # mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,\n # mask[i, ...] = 0, otherwise.\n # So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]\n # Building step by step,\n # Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.\n # Step 1: mask = [False, False, True, False]\n arg_5 = arg_4 < arg_2\n # Step 2: mask = [0, 0, 1, 1]\n arg_5 = tf.cast(arg_5, dtype=arg_3)\n # Step 3: mask = [0, 0, 1, 2]\n arg_5 = tf.cumsum(arg_5, axis=0)\n # Step 4: mask = [1, 1, 0, 0]\n arg_5 = tf.maximum(1. - arg_5, 0.)\n arg_4 *= arg_5\n\n # With R[k] := auto_corr[k, ...],\n # ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]}\n # = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1)\n # approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]}\n # where M is the filter_beyond_lag truncation point chosen above.\n\n # Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total\n # ndims the same as auto_corr\n arg_6 = _axis_size(arg_0, axis=0)\n arg_7 = tf.range(0., _axis_size(arg_4, axis=0))\n arg_8 = (arg_6 - arg_7) / arg_6\n if arg_4.shape.ndims is not None:\n arg_9 = [-1] + [1] * (arg_4.shape.ndims - 1)\n else:\n arg_9 = tf.concat(\n ([-1],\n tf.ones([tf.rank(arg_4) - 1], dtype=tf.int32)),\n axis=0)\n arg_8 = tf.reshape(arg_8, arg_9)\n\n return arg_6 / (-1 +\n 2 * tf.reduce_sum(input_tensor=arg_8 * arg_4, axis=0))"} +{"_id": "doc_653", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"potential_scale_reduction for one single state `Tensor`.\"\"\"\n with tf.compat.v1.name_scope(\n 'potential_scale_reduction_single_state',\n values=[arg_0, arg_1]):\n # We assume exactly one leading dimension indexes e.g. correlated samples\n # from each Markov chain.\n arg_0 = tf.convert_to_tensor(value=arg_0, name='state')\n arg_2 = 1\n\n arg_3 = tf.range(0, arg_2)\n arg_4 = tf.range(arg_2,\n arg_2 + arg_1)\n arg_5 = tf.range(\n 0, arg_2 + arg_1)\n\n arg_6 = _axis_size(arg_0, arg_3)\n arg_7 = _axis_size(arg_0, arg_4)\n\n # In the language of Brooks and Gelman (1998),\n # B / n is the between chain variance, the variance of the chain means.\n # W is the within sequence variance, the mean of the chain variances.\n arg_8 = _reduce_variance(\n tf.reduce_mean(input_tensor=arg_0, axis=arg_3, keepdims=True),\n arg_5,\n biased=False)\n arg_9 = tf.reduce_mean(\n input_tensor=_reduce_variance(\n arg_0, arg_3, keepdims=True, biased=True),\n axis=arg_5)\n\n # sigma^2_+ is an estimate of the true variance, which would be unbiased if\n # each chain was drawn from the target. c.f. \"law of total variance.\"\n arg_10 = arg_9 + arg_8\n\n return ((arg_7 + 1.) / arg_7) * arg_10 / arg_9 - (arg_6 - 1.) / (arg_7 * arg_6)"} +{"_id": "doc_654", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Broadcast a listable secondary_arg to that of states.\"\"\"\n if _is_list_like(arg_1):\n if len(arg_1) != len(arg_0):\n raise ValueError('Argument `%s` was a list of different length ({}) than '\n '`states` ({})'.format(arg_2, len(arg_0)))\n else:\n arg_1 = [arg_1] * len(arg_0)\n\n return arg_1"} +{"_id": "doc_655", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=False, arg_4=None):\n \"\"\"Use LogNormal quantiles to form quadrature on positive-reals.\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: (Batch of) length-`quadrature_size` vectors representing the\n `log_rate` parameters of a `Poisson`.\n probs: (Batch of) length-`quadrature_size` vectors representing the\n weight associate with each `grid` value.\n \"\"\"\n with tf.name_scope(arg_4 or \"Func\"):\n # Create a LogNormal distribution.\n arg_5 = transformed_distribution.TransformedDistribution(\n distribution=normal.Normal(arg_0=arg_0, arg_1=arg_1),\n bijector=exp_bijector.Exp(),\n arg_3=arg_3)\n arg_6 = tensorshape_util.rank(arg_5.batch_shape)\n if arg_6 is None:\n arg_6 = tf.shape(input=arg_5.batch_shape_tensor())[0]\n\n def _compute_quantiles():\n \"\"\"Helper to build quantiles.\"\"\"\n # Omit {0, 1} since they might lead to Inf/NaN.\n arg_7 = tf.zeros([], dtype=arg_5.dtype)\n arg_8 = tf.linspace(arg_7, 1., arg_2 + 3)[1:-1]\n # Expand edges so its broadcast across batch dims.\n arg_8 = tf.reshape(\n arg_8,\n shape=tf.concat(\n [[-1], tf.ones([arg_6], dtype=tf.int32)], axis=0))\n arg_9 = arg_5.quantile(arg_8)\n # Cyclically permute left by one.\n arg_10 = tf.concat([tf.range(1, 1 + arg_6), [0]], axis=0)\n arg_9 = tf.transpose(a=arg_9, arg_10=arg_10)\n return arg_9\n arg_9 = _compute_quantiles()\n\n # Compute grid as quantile midpoints.\n arg_11 = (arg_9[..., :-1] + arg_9[..., 1:]) / 2.\n # Set shape hints.\n arg_12 = tensorshape_util.concatenate(arg_5.batch_shape,\n [arg_2])\n tensorshape_util.set_shape(arg_11, arg_12)\n\n # By construction probs is constant, i.e., `1 / quadrature_size`. This is\n # important, because non-constant probs leads to non-reparameterizable\n # samples.\n arg_13 = tf.fill(\n dims=[arg_2], value=1. / tf.cast(arg_2, arg_5.dtype))\n\n return arg_11, arg_13"} +{"_id": "doc_656", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Helper to merge which handles merging one value.\"\"\"\n if arg_1 is None:\n return arg_2\n if arg_2 is None:\n return arg_1\n if (arg_1 == arg_2) if arg_3 else (arg_1 is arg_2):\n return arg_1\n raise ValueError(\"Incompatible values: %s != %s\" % (arg_1, arg_2))"} +{"_id": "doc_657", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Converts nested `tuple`, `list`, or `dict` to nested `tuple`.\"\"\"\n if isinstance(arg_1, dict):\n return arg_0.Func(tuple(sorted(arg_1.items())))\n elif isinstance(arg_1, (list, tuple)):\n return tuple(map(arg_0.Func, arg_1))\n\n return arg_1"} +{"_id": "doc_658", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=None):\n \"\"\"Computes the doubling increments for the left end point.\n\n The doubling procedure expands an initial interval to find a superset of the\n true slice. At each doubling iteration, the interval width is doubled to\n either the left or the right hand side with equal probability.\n If, initially, the left end point is at `L(0)` and the width of the\n interval is `w(0)`, then the left end point and the width at the\n k-th iteration (denoted L(k) and w(k) respectively) are given by the following\n recursions:\n\n ```none\n w(k) = 2 * w(k-1)\n L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5)\n or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k)\n ```\n\n This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0\n and `max_doublings` independently for each chain.\n\n Args:\n batch_shape: Positive int32 `tf.Tensor`. The batch shape.\n max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of\n doublings to consider.\n step_size: A real `tf.Tensor` with shape compatible with [num_chains].\n The size of the initial interval.\n seed: (Optional) positive int. The random seed. If None, no seed is set.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'find_slice_bounds').\n\n Returns:\n left_increments: A tensor of shape (max_doublings+1, batch_shape). The\n relative position of the left end point after the doublings.\n widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The\n widths of the intervals at each stage of the doubling.\n \"\"\"\n with tf.compat.v1.name_scope(arg_4, 'left_doubling_increments',\n [arg_0, arg_1, arg_2]):\n\n arg_2 = tf.convert_to_tensor(value=arg_2)\n arg_5 = arg_2.dtype.base_dtype\n # Output shape of the left increments tensor.\n arg_6 = tf.concat(([arg_1 + 1], arg_0), axis=0)\n # A sample realization of X_k.\n arg_7 = distributions.Bernoulli(0.5, arg_5=arg_5).sample(\n sample_shape=arg_6, arg_3=arg_3)\n\n # The widths of the successive intervals. Starts with 1.0 and ends with\n # 2^max_doublings.\n arg_8 = tf.cast(2 ** tf.range(0, arg_1+1), arg_5=arg_5)\n # Output shape of the `widths` tensor.\n arg_9 = tf.concat(([arg_1 + 1],\n tf.ones_like(arg_0)), axis=0)\n arg_8 = tf.reshape(arg_8, shape=arg_9)\n # Widths shape is [max_doublings + 1, 1, 1, 1...].\n arg_10 = arg_8 * arg_2\n\n # Take the cumulative sum of the left side increments in slice width to give\n # the resulting distance from the inital lower bound.\n arg_11 = tf.cumsum(arg_10 * arg_7, exclusive=True, axis=0)\n return arg_11, arg_10"} +{"_id": "doc_659", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Finds the index of the optimal set of bounds for each chain.\n\n For each chain, finds the smallest set of bounds for which both edges lie\n outside the slice. This is equivalent to the point at which a for loop\n implementation (P715 of Neal (2003)) of the algorithm would terminate.\n\n Performs the following calculation, where i is the number of doublings that\n have been performed and k is the max number of doublings:\n\n (2 * k - i) * flag + i\n\n The argmax of the above returns the earliest index where the bounds were\n outside the slice and if there is no such point, the widest bounds.\n\n Args:\n x: A tensor of shape (max_doublings+1, batch_shape). Type int32, with value\n 0 or 1. Indicates if this set of bounds is outside the slice.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'find_slice_bounds').\n\n Returns:\n indices: A tensor of shape batch_shape. Type int32, with the index of the\n first set of bounds outside the slice and if there are none, the index of\n the widest set.\n \"\"\"\n with tf.compat.v1.name_scope(arg_1, 'find_best_interval_idx', [arg_0]):\n # Returns max_doublings + 1. Positive int32.\n arg_2 = tf.shape(input=arg_0)[0]\n arg_3 = arg_0.dtype.base_dtype\n # Factors by which to multiply the flag. Corresponds to (2 * k - i) above.\n arg_4 = tf.range(2 * arg_2, arg_2, -1, arg_3=arg_3)[:, tf.newaxis]\n # Factors by which to shift the flag. Corresponds to i above. Ensures the\n # widest bounds are selected if there are no bounds outside the slice.\n arg_5 = tf.range(arg_2, arg_3=arg_3)[:, tf.newaxis]\n arg_6 = tf.argmax(input=arg_4 * arg_0 + arg_5, axis=0, output_type=arg_3)\n return arg_6"} +{"_id": "doc_660", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=None):\n \"\"\"Returns the bounds of the slice at each stage of doubling procedure.\n\n Precomputes the x coordinates of the left (L) and right (R) endpoints of the\n interval `I` produced in the \"doubling\" algorithm [Neal 2003][1] P713. Note\n that we simultaneously compute all possible doubling values for each chain,\n for the reason that at small-medium densities, the gains from parallel\n evaluation might cause a speed-up, but this will be benchmarked against the\n while loop implementation.\n\n Args:\n x_initial: `tf.Tensor` of any shape and any real dtype consumable by\n `target_log_prob`. The initial points.\n target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as\n `x_initial` and returning a tensor of the same shape. The log density of\n the target distribution.\n log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the\n same dtype as returned by `target_log_prob`. The log of the height of the\n slice for each chain. The values must be bounded above by\n `target_log_prob(x_initial)`.\n max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of\n doublings to consider.\n step_size: `tf.Tensor` with same dtype as and shape compatible with\n `x_initial`. The size of the initial interval.\n seed: (Optional) positive int. The random seed. If None, no seed is set.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'find_slice_bounds').\n\n Returns:\n upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper\n bounds for each chain.\n lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower\n bounds for each chain.\n both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both\n the chosen upper and lower bound lie outside of the slice.\n\n #### References\n\n [1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31,\n No. 3 , 705-767.\n https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_6, 'Func',\n [arg_0, arg_2, arg_3, arg_4]):\n arg_7 = distributions.SeedStream(arg_5, salt='Func')\n arg_0 = tf.convert_to_tensor(value=arg_0)\n arg_8 = tf.shape(input=arg_0)\n arg_9 = arg_4.dtype.base_dtype\n arg_10 = arg_0 + arg_4 * tf.random.uniform(\n arg_8, minval=-1.0, maxval=0.0, arg_9=arg_9, arg_5=arg_7())\n\n # Compute the increments by which we need to step the upper and lower bounds\n # part of the doubling procedure.\n arg_11, arg_12 = _left_doubling_increments(\n arg_8, arg_3, arg_4, arg_5=arg_7())\n # The left and right end points. Shape (max_doublings+1,) + batch_shape.\n arg_10 -= arg_11\n arg_13 = arg_10 + arg_12\n\n # Test if these end points lie outside of the slice.\n # Checks if the end points of the slice are outside the graph of the pdf.\n arg_14 = tf.map_fn(arg_1, arg_10)\n arg_15 = tf.map_fn(arg_1, arg_13)\n arg_16 = arg_14 < arg_2\n arg_17 = arg_15 < arg_2\n arg_18 = arg_16 & arg_17\n\n arg_19 = tf.reshape(arg_18, [arg_3 + 1, -1])\n\n arg_20 = _find_best_interval_idx(\n tf.cast(arg_19, arg_9=tf.int32))\n\n # Formats the above index as required to use with gather_nd.\n arg_21 = tf.stack(\n [arg_20,\n tf.range(tf.size(input=arg_20))],\n axis=1,\n arg_6='point_index_gather')\n arg_22 = tf.reshape(arg_10, [arg_3 + 1, -1])\n arg_23 = tf.reshape(arg_13, [arg_3 + 1, -1])\n # The x values of the uppper and lower bounds of the slices for each chain.\n arg_24 = tf.reshape(tf.gather_nd(arg_22, arg_21),\n arg_8)\n arg_25 = tf.reshape(tf.gather_nd(arg_23, arg_21),\n arg_8)\n arg_18 = tf.reduce_any(input_tensor=arg_18, axis=0)\n return arg_25, arg_24, arg_18"} +{"_id": "doc_661", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5, arg_6=None,\n arg_7=None):\n \"\"\"Samples from the slice by applying shrinkage for rejected points.\n\n Implements the one dimensional slice sampling algorithm of Neal (2003), with a\n doubling algorithm (Neal 2003 P715 Fig. 4), which doubles the size of the\n interval at each iteration and shrinkage (Neal 2003 P716 Fig. 5), which\n reduces the width of the slice when a selected point is rejected, by setting\n the relevant bound that that value. Randomly sampled points are checked for\n two criteria: that they lie within the slice and that they pass the\n acceptability check (Neal 2003 P717 Fig. 6), which tests that the new state\n could have generated the previous one.\n\n Args:\n x_initial: A tensor of any shape. The initial positions of the chains. This\n function assumes that all the dimensions of `x_initial` are batch\n dimensions (i.e. the event shape is `[]`).\n target_log_prob: Callable accepting a tensor like `x_initial` and returning\n a tensor containing the log density at that point of the same shape.\n log_slice_heights: Tensor of the same shape and dtype as the return value\n of `target_log_prob` when applied to `x_initial`. The log of the height of\n the chosen slice.\n step_size: A tensor of shape and dtype compatible with `x_initial`. The min\n interval size in the doubling algorithm.\n lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower\n bounds for each chain.\n upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper\n bounds for each chain.\n seed: (Optional) positive int. The random seed. If None, no seed is set.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'find_slice_bounds').\n\n Returns:\n x_proposed: A tensor of the same shape and dtype as `x_initial`. The next\n proposed state of the chain.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_7, 'sample_with_shrinkage',\n [arg_0, arg_2, arg_3, arg_4, arg_5]):\n arg_8 = distributions.SeedStream(arg_6, salt='Func')\n # Keeps track of whether an acceptable sample has been found for the chain.\n arg_9 = tf.zeros_like(arg_0, dtype=tf.bool)\n arg_10 = lambda arg_9, *ignored_args: ~tf.reduce_all(input_tensor=arg_9)\n arg_11 = tf.identity(arg_0)\n arg_12 = tf.shape(input=arg_0)\n arg_13 = arg_0.dtype.base_dtype\n def _body(arg_9, arg_14, arg_15, arg_11):\n \"\"\"Iterates until every chain has found a suitable next state.\"\"\"\n arg_16 = tf.random.uniform(\n arg_12, dtype=arg_13, arg_6=arg_8())\n arg_17 = tf.where(~arg_9, arg_14 + arg_16 * (arg_15 - arg_14), arg_11)\n arg_18 = _test_acceptance(arg_0, arg_1=arg_1,\n decided=arg_9,\n arg_2=arg_2,\n arg_17=arg_17, arg_3=arg_3,\n arg_4=arg_14, arg_5=arg_15)\n arg_19 = arg_2 < arg_1(arg_17)\n arg_20 = arg_19 & arg_18\n arg_21 = arg_9 | arg_20\n # Note that it might seem that we are moving the left and right end points\n # even if the point has been accepted (which is contrary to the stated\n # algorithm in Neal). However, this does not matter because the endpoints\n # for points that have been already accepted are not used again so it\n # doesn't matter what we do with them.\n arg_22 = tf.where(arg_17 < arg_0, arg_17, arg_14)\n arg_23 = tf.where(arg_17 >= arg_0, arg_17, arg_15)\n return arg_21, arg_22, arg_23, arg_17\n\n return tf.while_loop(\n arg_10=arg_10,\n body=_body,\n loop_vars=(arg_9, arg_4, arg_5, arg_11))[-1]"} +{"_id": "doc_662", "title": "", "text": "def Func(**arg_0):\n \"\"\"Creates a value-setting interceptor.\n\n This function creates an interceptor that sets values of Edward2 random\n variable objects. This is useful for a range of tasks, including conditioning\n on observed data, sampling from posterior predictive distributions, and as a\n building block of inference primitives such as computing log joint\n probabilities (see examples below).\n\n Args:\n **model_kwargs: dict of str to Tensor. Keys are the names of random\n variables in the model to which this interceptor is being applied. Values\n are Tensors to set their value to. Variables not included in this dict\n will not be set and will maintain their existing value semantics (by\n default, a sample from the parent-conditional distribution).\n\n Returns:\n set_values: function that sets the value of intercepted ops.\n\n #### Examples\n\n Consider for illustration a model with latent `z` and\n observed `x`, and a corresponding trainable posterior model:\n\n ```python\n num_observations = 10\n def model():\n z = ed.Normal(loc=0, scale=1., name='z') # log rate\n x = ed.Poisson(rate=tf.exp(z) * tf.ones(num_observations), name='x')\n return x\n\n def variational_model():\n return ed.Normal(loc=tf.Variable(0.),\n scale=tf.nn.softplus(tf.Variable(-4.)),\n name='z') # for simplicity, match name of the model RV.\n ```\n\n We can use a value-setting interceptor to condition the model on observed\n data. This approach is slightly more cumbersome than that of partially\n evaluating the complete log-joint function, but has the potential advantage\n that it returns a new model callable, which may be used to sample downstream\n variables, passed into additional transformations, etc.\n\n ```python\n x_observed = np.array([6, 3, 1, 8, 7, 0, 6, 4, 7, 5])\n def observed_model():\n with ed.interception(Func(x=x_observed)):\n model()\n observed_log_joint_fn = ed.make_log_joint_fn(observed_model)\n\n # After fixing 'x', the observed log joint is now only a function of 'z'.\n # This enables us to define a variational lower bound,\n # `E_q[ log p(x, z) - log q(z)]`, simply by evaluating the observed and\n # variational log joints at variational samples.\n variational_log_joint_fn = ed.make_log_joint_fn(variational_model)\n with ed.tape() as variational_sample: # Sample trace from variational model.\n variational_model()\n elbo_loss = -(observed_log_joint_fn(**variational_sample) -\n variational_log_joint_fn(**variational_sample))\n ```\n\n After performing inference by minimizing the variational loss, a value-setting\n interceptor enables simulation from the posterior predictive distribution:\n\n ```python\n with ed.tape() as posterior_samples: # tape is a map {rv.name : rv}\n variational_model()\n with ed.interception(ed.Func(**posterior_samples)):\n x = model()\n # x is a sample from p(X | Z = z') where z' ~ q(z) (the variational model)\n ```\n\n As another example, using a value setter inside of `ed.tape` enables\n computing the log joint probability, by setting all variables to\n posterior values and then accumulating the log probs of those values under\n the induced parent-conditional distributions. This is one way that we could\n have implemented `ed.make_log_joint_fn`:\n\n ```python\n def make_log_joint_fn_demo(model):\n def log_joint_fn(**model_kwargs):\n with ed.tape() as model_tape:\n with ed.Func(**model_kwargs):\n model()\n\n # accumulate sum_i log p(X_i = x_i | X_{:i-1} = x_{:i-1})\n log_prob = 0.\n for rv in model_tape.values():\n log_prob += tf.reduce_sum(rv.log_prob(rv.value))\n\n return log_prob\n return log_joint_fn\n ```\n\n \"\"\"\n def set_values(arg_1, *arg_2, **arg_3):\n \"\"\"Sets random variable values to its aligned value.\"\"\"\n arg_4 = arg_3.get(\"name\")\n if arg_4 in arg_0:\n arg_3[\"value\"] = arg_0[arg_4]\n return interceptable(arg_1)(*arg_2, **arg_3)\n return set_values"} +{"_id": "doc_663", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Filters inputs to be compatible with function `f`'s signature.\n\n Args:\n f: Function according to whose input signature we filter arguments.\n src_kwargs: Keyword arguments to filter according to `f`.\n\n Returns:\n kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s\n signature.\n \"\"\"\n if hasattr(arg_0, \"_func\"): # functions returned by tf.make_template\n arg_0 = arg_0._func # pylint: disable=protected-access\n\n try: # getargspec was deprecated in Python 3.6\n arg_2 = inspect.getfullargspec(arg_0)\n except AttributeError:\n arg_2 = inspect.getargspec(arg_0)\n\n arg_3 = {k: v for k, v in six.iteritems(arg_1) if k in arg_2.args}\n return arg_3"} +{"_id": "doc_664", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Network block for VGG.\"\"\"\n arg_5 = tfp.layers.Convolution2DFlipout(\n arg_1,\n arg_2,\n padding='same',\n arg_4=arg_4)(arg_0)\n arg_5 = tf.keras.layers.BatchNormalization()(arg_5)\n arg_5 = tf.keras.layers.Activation('relu')(arg_5)\n\n arg_5 = tfp.layers.Convolution2DFlipout(\n arg_1,\n arg_2,\n padding='same',\n arg_4=arg_4)(arg_5)\n arg_5 = tf.keras.layers.BatchNormalization()(arg_5)\n arg_5 = tf.keras.layers.Activation('relu')(arg_5)\n\n arg_5 = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2), strides=arg_3)(arg_5)\n return arg_5"} +{"_id": "doc_665", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n arg_9=1000.,\n arg_10=None):\n \"\"\"Builds a tree at a given tree depth and at a given state.\n\n The `current` state is immediately adjacent to, but outside of,\n the subtrajectory spanned by the returned `forward` and `reverse` states.\n\n Args:\n value_and_gradients_fn: Python callable which takes an argument like\n `*current_state` and returns a tuple of its (possibly unnormalized)\n log-density under the target distribution and its gradient with respect to\n each state.\n current_state: List of `Tensor`s representing the current states of the\n NUTS trajectory.\n current_target_log_prob: Scalar `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`.\n current_grads_target_log_prob: List of `Tensor`s representing gradient of\n `current_target_log_prob` with respect to `current_state`. Must have same\n shape as `current_state`.\n current_momentum: List of `Tensor`s representing the momentums of\n `current_state`. Must have same shape as `current_state`.\n direction: int that is either -1 or 1. It determines whether to perform\n leapfrog integration backwards (reverse) or forward in time respectively.\n depth: non-negative int that indicates how deep of a tree to build.\n Each call to `Func` takes `2**depth` leapfrog steps.\n step_size: List of `Tensor`s representing the step sizes for the leapfrog\n integrator. Must have same shape as `current_state`.\n log_slice_sample: The log of an auxiliary slice variable. It is used\n together with `max_simulation_error` to avoid simulating trajectories with\n too much numerical error.\n max_simulation_error: Maximum simulation error to tolerate before\n terminating the trajectory. Simulation error is the\n `log_slice_sample` minus the log-joint probability at the simulated state.\n seed: Integer to seed the random number generator.\n\n Returns:\n reverse_state: List of `Tensor`s representing the \"reverse\" states of the\n NUTS trajectory. Has same shape as `current_state`.\n reverse_target_log_prob: Scalar `Tensor` representing the value of\n `target_log_prob_fn` at the `reverse_state`.\n reverse_grads_target_log_prob: List of `Tensor`s representing gradient of\n `reverse_target_log_prob` with respect to `reverse_state`. Has same shape\n as `reverse_state`.\n reverse_momentum: List of `Tensor`s representing the momentums of\n `reverse_state`. Has same shape as `reverse_state`.\n forward_state: List of `Tensor`s representing the \"forward\" states of the\n NUTS trajectory. Has same shape as `current_state`.\n forward_target_log_prob: Scalar `Tensor` representing the value of\n `target_log_prob_fn` at the `forward_state`.\n forward_grads_target_log_prob: List of `Tensor`s representing gradient of\n `forward_target_log_prob` with respect to `forward_state`. Has same shape\n as `forward_state`.\n forward_momentum: List of `Tensor`s representing the momentums of\n `forward_state`. Has same shape as `forward_state`.\n next_state: List of `Tensor`s representing the next states of the NUTS\n trajectory. Has same shape as `current_state`.\n next_target_log_prob: Scalar `Tensor` representing the value of\n `target_log_prob_fn` at `next_state`.\n next_grads_target_log_prob: List of `Tensor`s representing the gradient of\n `next_target_log_prob` with respect to `next_state`.\n num_states: Number of acceptable candidate states in the subtree. A state is\n acceptable if it is \"in the slice\", that is, if its log-joint probability\n with its momentum is greater than `log_slice_sample`.\n continue_trajectory: bool determining whether to continue the simulation\n trajectory. The trajectory is continued if no U-turns are encountered\n within the built subtree, and if the log-probability accumulation due to\n integration error does not exceed `max_simulation_error`.\n \"\"\"\n if arg_6 == 0: # base case\n # Take a leapfrog step. Terminate the tree-building if the simulation\n # error from the leapfrog integrator is too large. States discovered by\n # continuing the simulation are likely to have very low probability.\n [\n arg_11,\n arg_12,\n arg_13,\n arg_14,\n ] = _leapfrog(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_7=arg_5 * arg_7)\n arg_15 = _log_joint(arg_12, arg_14)\n arg_16 = tf.cast(arg_15 > arg_8, dtype=tf.int32)\n arg_17 = (arg_15 >\n arg_8 - arg_9)\n return [\n arg_11,\n arg_12,\n arg_13,\n arg_14,\n arg_11,\n arg_12,\n arg_13,\n arg_14,\n arg_11,\n arg_12,\n arg_13,\n arg_16,\n arg_17,\n ]\n\n # Build a tree at the current state.\n arg_18 = tfd.SeedStream(arg_10, \"build_tree\")\n [\n arg_19,\n arg_20,\n arg_21,\n arg_22,\n arg_23,\n arg_24,\n arg_25,\n arg_26,\n arg_11,\n arg_12,\n arg_13,\n arg_16,\n arg_17,\n ] = Func(arg_0=arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6 - 1,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_10=arg_18())\n if arg_17:\n # If the just-built subtree did not terminate, build a second subtree at\n # the forward or reverse state, as appropriate.\n if arg_5 < 0:\n [\n arg_19,\n arg_20,\n arg_21,\n arg_22,\n arg_27,\n arg_27,\n arg_27,\n arg_27,\n arg_28,\n arg_29,\n arg_30,\n arg_31,\n arg_32,\n ] = Func(\n arg_0=arg_0,\n arg_1=arg_19,\n arg_2=arg_20,\n arg_3=arg_21,\n arg_4=arg_22,\n arg_5=arg_5,\n arg_6=arg_6 - 1,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_10=arg_18())\n else:\n [\n arg_27,\n arg_27,\n arg_27,\n arg_27,\n arg_23,\n arg_24,\n arg_25,\n arg_26,\n arg_28,\n arg_29,\n arg_30,\n arg_31,\n arg_32,\n ] = Func(\n arg_0=arg_0,\n arg_1=arg_23,\n arg_2=arg_24,\n arg_3=arg_25,\n arg_4=arg_26,\n arg_5=arg_5,\n arg_6=arg_6 - 1,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_10=arg_18())\n\n # Propose either `next_state` (which came from the first subtree and so is\n # nearby) or the new forward/reverse state (which came from the second\n # subtree and so is far away).\n arg_16 += arg_31\n arg_33 = _random_bernoulli(\n [],\n probs=arg_31 / arg_16,\n dtype=tf.bool,\n arg_10=arg_18())\n if arg_33:\n arg_11 = arg_28\n arg_12 = arg_29\n arg_13 = arg_30\n\n # Continue the NUTS trajectory if the far subtree did not terminate either,\n # and if the reverse-most and forward-most states do not exhibit a U-turn.\n arg_34 = tf.logical_and(\n _has_no_u_turn(arg_23, arg_19, arg_26),\n _has_no_u_turn(arg_23, arg_19, arg_22))\n arg_17 = arg_32 and arg_34\n\n return [\n arg_19,\n arg_20,\n arg_21,\n arg_22,\n arg_23,\n arg_24,\n arg_25,\n arg_26,\n arg_11,\n arg_12,\n arg_13,\n arg_16,\n arg_17,\n ]"} +{"_id": "doc_666", "title": "", "text": "def Func(arg_0):\n \"\"\"Wraps value and gradients function to assist with None gradients.\"\"\"\n @functools.wraps(arg_0)\n def func_wrapped(*arg_1, **arg_2):\n \"\"\"Wrapped function which checks for None gradients.\"\"\"\n arg_3, arg_4 = arg_0(*arg_1, **arg_2)\n if any(arg_5 is None for arg_5 in arg_4):\n raise ValueError(\"Gradient is None for a state.\")\n return arg_3, arg_4\n return func_wrapped"} +{"_id": "doc_667", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"If two given states and momentum do not exhibit a U-turn pattern.\"\"\"\n arg_3 = sum([\n tf.reduce_sum(input_tensor=(s1 - s2) * m)\n for s1, s2, m in zip(arg_0, arg_1, arg_2)\n ])\n return arg_3 > 0"} +{"_id": "doc_668", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Runs one step of leapfrog integration.\"\"\"\n arg_5 = [\n m + 0.5 * step * g for m, step, g in\n zip(arg_3, arg_4, arg_2)]\n arg_6 = [\n s + step * m for s, step, m in\n zip(arg_1, arg_4, arg_5)]\n arg_7, arg_8 = arg_0(\n *arg_6)\n arg_9 = [\n m + 0.5 * step * g for m, step, g in\n zip(arg_5, arg_4, arg_8)]\n return [\n arg_6,\n arg_7,\n arg_8,\n arg_9,\n ]"} +{"_id": "doc_669", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Log-joint probability given a state's log-probability and momentum.\"\"\"\n arg_2 = -sum(\n [tf.reduce_sum(input_tensor=0.5 * (m**2.)) for m in arg_1])\n return arg_0 + arg_2"} +{"_id": "doc_670", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.int32, arg_5=None, arg_6=None):\n \"\"\"Returns samples from a Bernoulli distribution.\"\"\"\n with arg_3.compat.v1.name_scope(arg_6, \"random_bernoulli\", [arg_0, arg_1]):\n arg_1 = arg_3.convert_to_tensor(value=arg_1)\n arg_7 = arg_3.random.uniform(arg_0, arg_2=arg_1.dtype, arg_5=arg_5)\n return arg_3.cast(arg_3.less(arg_7, arg_1), arg_2)"} +{"_id": "doc_671", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4):\n \"\"\"Creates multivariate standard `Normal` distribution.\n\n Args:\n dtype: Type of parameter's event.\n shape: Python `list`-like representing the parameter's event shape.\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Returns:\n Multivariate standard `Normal` distribution.\n \"\"\"\n del arg_2, arg_3, arg_4 # unused\n arg_5 = tfd.Normal(loc=tf.zeros(arg_1, arg_0), scale=arg_0.as_numpy_dtype(1))\n arg_6 = tf.size(input=arg_5.batch_shape_tensor())\n return tfd.Independent(arg_5, reinterpreted_batch_ndims=arg_6)"} +{"_id": "doc_672", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Deserializes the Keras-serialized function.\n\n (De)serializing Python functions from/to bytecode is unsafe. Therefore we\n also use the function's type as an anonymous function ('lambda') or named\n function in the Python environment ('function'). In the latter case, this lets\n us use the Python scope to obtain the function rather than reload it from\n bytecode. (Note that both cases are brittle!)\n\n Keras-deserialized functions do not perform lexical scoping. Any modules that\n the function requires must be imported within the function itself.\n\n This serialization mimicks the implementation in `tf.keras.layers.Lambda`.\n\n Args:\n serial: Serialized Keras object: typically a dict, string, or bytecode.\n function_type: Python string denoting 'function' or 'lambda'.\n\n Returns:\n function: Function the serialized Keras object represents.\n\n #### Examples\n\n ```python\n serial, function_type = serialize_function(lambda x: x)\n function = Func(serial, function_type)\n assert function(2.3) == 2.3 # function is identity\n ```\n\n \"\"\"\n if arg_1 == 'function':\n # Simple lookup in custom objects\n arg_2 = tf.keras.utils.deserialize_keras_object(arg_0)\n elif arg_1 == 'lambda':\n # Unsafe deserialization from bytecode\n arg_2 = generic_utils.func_load(arg_0)\n else:\n raise TypeError('Unknown function type:', arg_1)\n return arg_2"} +{"_id": "doc_673", "title": "", "text": "def Func(arg_0):\n \"\"\"Serializes function for Keras.\n\n (De)serializing Python functions from/to bytecode is unsafe. Therefore we\n return the function's type as an anonymous function ('lambda') or named\n function in the Python environment ('function'). In the latter case, this lets\n us use the Python scope to obtain the function rather than reload it from\n bytecode. (Note that both cases are brittle!)\n\n This serialization mimicks the implementation in `tf.keras.layers.Lambda`.\n\n Args:\n func: Python function to serialize.\n\n Returns:\n (serial, function_type): Serialized object, which is a tuple of its\n bytecode (if function is anonymous) or name (if function is named), and its\n function type.\n \"\"\"\n if isinstance(arg_0, types.LambdaType):\n return generic_utils.func_dump(arg_0), 'lambda'\n return arg_0.__name__, 'function'"} +{"_id": "doc_674", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Broadcasts `from_structure` to `to_structure`.\n\n This is useful for downstream usage of `zip` or `tf.nest.map_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\n Args:\n to_structure: A structure.\n from_structure: A structure.\n\n Returns:\n new_from_structure: Same structure as `to_structure`.\n\n #### Example:\n\n ```python\n a_structure = ['a', 'b', 'c']\n b_structure = Func(a_structure, 'd')\n # -> ['d', 'd', 'd']\n c_structure = tf.nest.map_structure(\n lambda a, b: a + b, a_structure, b_structure)\n # -> ['ad', 'bd', 'cd']\n ```\n \"\"\"\n arg_2 = tf.nest.flatten(arg_1)\n if len(arg_2) == 1:\n arg_1 = tf.nest.map_structure(lambda _: arg_2[0],\n arg_0)\n return arg_1"} +{"_id": "doc_675", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Eagerly converts struct to Tensor, recursing upon failure.\"\"\"\n if arg_1 is not None or not tf.nest.is_nested(arg_0):\n return tf.convert_to_tensor(arg_0, arg_1=arg_1)\n\n if _maybe_convertible_to_tensor(arg_0):\n try:\n # Try converting the structure wholesale.\n return tf.convert_to_tensor(value=arg_0, arg_2=arg_2)\n except (ValueError, TypeError):\n # Unfortunately Eager/Graph mode don't agree on the error type.\n pass\n # Try converting all of its children.\n arg_3 = _get_shallow_structure(arg_0)\n return nest.map_structure_up_to(\n arg_3, lambda s: Func(s, arg_2=arg_2), arg_0)"} +{"_id": "doc_676", "title": "", "text": "def Func():\n \"\"\"Returns `Tensor` attributes related to shape and Python builtins.\"\"\"\n # Enable \"Tensor semantics\" for distributions.\n # See tensorflow/python/framework/ops.py `class Tensor` for details.\n arg_0 = dict()\n # Setup overloadable operators and white-listed members / properties.\n arg_0.update((arg_1, _wrap_method(tf.Tensor, arg_1))\n for arg_1 in tf.Tensor.OVERLOADABLE_OPERATORS.union({'__iter__'}))\n # Copy some members straight-through.\n arg_0.update((arg_1, getattr(tf.Tensor, arg_1))\n for arg_1 in {'__nonzero__', '__bool__', '__array_priority__'})\n return arg_0"} +{"_id": "doc_677", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates the mixture of Gaussians prior distribution.\n\n Args:\n latent_size: The dimensionality of the latent representation.\n mixture_components: Number of elements of the mixture.\n\n Returns:\n random_prior: A `tfd.Distribution` instance representing the distribution\n over encodings in the absence of any evidence.\n \"\"\"\n if arg_1 == 1:\n # See the module docstring for why we don't learn the parameters here.\n return tfd.MultivariateNormalDiag(\n arg_2=tf.zeros([arg_0]),\n scale_identity_multiplier=1.0)\n\n arg_2 = tf.compat.v1.get_variable(\n name=\"loc\", shape=[arg_1, arg_0])\n arg_3 = tf.compat.v1.get_variable(\n name=\"raw_scale_diag\", shape=[arg_1, arg_0])\n arg_4 = tf.compat.v1.get_variable(\n name=\"mixture_logits\", shape=[arg_1])\n\n return tfd.MixtureSameFamily(\n components_distribution=tfd.MultivariateNormalDiag(\n arg_2=arg_2,\n scale_diag=tf.nn.softplus(arg_3)),\n mixture_distribution=tfd.Categorical(logits=arg_4),\n name=\"prior\")"} +{"_id": "doc_678", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Helper utility to make a field of images.\"\"\"\n arg_3 = tf.shape(input=arg_0)\n arg_4 = arg_3[-3]\n arg_5 = arg_3[-2]\n arg_6 = arg_3[-1]\n arg_0 = tf.reshape(arg_0, (-1, arg_4, arg_5, arg_6))\n arg_7 = tf.shape(input=arg_0)[0]\n arg_1 = tf.minimum(arg_1, arg_7)\n arg_2 = tf.minimum(arg_7 // arg_1, arg_2)\n arg_0 = arg_0[:arg_1 * arg_2]\n arg_0 = tf.reshape(arg_0, (arg_1, arg_2, arg_4, arg_5, arg_6))\n arg_0 = tf.transpose(a=arg_0, perm=[0, 2, 1, 3, 4])\n arg_0 = tf.reshape(arg_0, [1, arg_1 * arg_4, arg_2 * arg_5, arg_6])\n return arg_0"} +{"_id": "doc_679", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Downloads a file.\"\"\"\n arg_2 = os.path.join(arg_0, arg_1)\n if tf.io.gfile.exists(arg_2):\n return arg_2\n if not tf.io.gfile.exists(arg_0):\n tf.io.gfile.makedirs(arg_0)\n arg_3 = os.path.join(ROOT_PATH, arg_1)\n print(\"Downloading %s to %s\" % (arg_3, arg_2))\n urllib.request.urlretrieve(arg_3, arg_2)\n return arg_2"} +{"_id": "doc_680", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Helper to validate block sizes.\"\"\"\n arg_3 = arg_0.shape\n if tensorshape_util.is_fully_defined(arg_3):\n if (tensorshape_util.rank(arg_3) != 1 or\n (tensorshape_util.num_elements(arg_3) != len(arg_1))):\n raise ValueError(\n '`block_sizes` must be `None`, or a vector of the same length as '\n '`bijectors`. Got a `Tensor` with shape {} and `bijectors` of '\n 'length {}'.format(arg_3, len(arg_1)))\n return arg_0\n elif arg_2:\n arg_4 = ('`block_sizes` must be `None`, or a vector of the same length '\n 'as `bijectors`.')\n with tf.control_dependencies([\n assert_util.assert_equal(\n tf.size(input=arg_0), len(arg_1), arg_4=arg_4),\n assert_util.assert_equal(tf.rank(arg_0), 1)\n ]):\n return tf.identity(arg_0)\n else:\n return arg_0"} +{"_id": "doc_681", "title": "", "text": "def Func(arg_0, arg_1=arg_2.compat.v1.layers.dense, arg_7=None):\n \"\"\"Constructs a trainable `tfd.Bernoulli` distribution.\n\n This function creates a Bernoulli distribution parameterized by logits.\n Using default args, this function is mathematically equivalent to:\n\n ```none\n Y = Bernoulli(logits=matmul(W, x) + b)\n\n where,\n W in R^[d, n]\n b in R^d\n ```\n\n #### Examples\n\n This function can be used as a [logistic regression](\n https://en.wikipedia.org/wiki/Logistic_regression) loss.\n\n ```python\n # This example fits a logistic regression loss.\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n # Create fictitious training data.\n dtype = np.float32\n n = 3000 # number of samples\n x_size = 4 # size of single x\n def make_training_data():\n np.random.seed(142)\n x = np.random.randn(n, x_size).astype(dtype)\n w = np.random.randn(x_size).astype(dtype)\n b = np.random.randn(1).astype(dtype)\n true_logits = np.tensordot(x, w, axes=[[-1], [-1]]) + b\n noise = np.random.logistic(size=n).astype(dtype)\n y = dtype(true_logits + noise > 0.)\n return y, x\n y, x = make_training_data()\n\n # Build TF graph for fitting Bernoulli maximum likelihood estimator.\n Func = tfp.trainable_distributions.Func(x)\n loss = -tf.reduce_mean(Func.log_prob(y))\n train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)\n mse = tf.reduce_mean(tf.squared_difference(y, Func.mean()))\n init_op = tf.global_variables_initializer()\n\n # Run graph 1000 times.\n num_steps = 1000\n loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.\n mse_ = np.zeros(num_steps)\n with tf.Session() as sess:\n sess.run(init_op)\n for it in xrange(loss_.size):\n _, loss_[it], mse_[it] = sess.run([train_op, loss, mse])\n if it % 200 == 0 or it == loss_.size - 1:\n print(\"iteration:{} loss:{} mse:{}\".format(it, loss_[it], mse_[it]))\n\n # ==> iteration:0 loss:0.635675370693 mse:0.222526371479\n # iteration:200 loss:0.440077394247 mse:0.143687799573\n # iteration:400 loss:0.440077394247 mse:0.143687844276\n # iteration:600 loss:0.440077394247 mse:0.143687844276\n # iteration:800 loss:0.440077424049 mse:0.143687844276\n # iteration:999 loss:0.440077424049 mse:0.143687844276\n ```\n\n Args:\n x: `Tensor` with floating type. Must have statically defined rank and\n statically known right-most dimension.\n layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and\n returns a transformation of `x` with shape\n `tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.\n Default value: `tf.layers.dense`.\n name: A `name_scope` name for operations created by this function.\n Default value: `None` (i.e., \"Func\").\n\n Returns:\n Func: An instance of `tfd.Bernoulli`.\n \"\"\"\n with arg_2.compat.v1.name_scope(arg_7, 'Func', [arg_0]):\n arg_0 = arg_2.convert_to_tensor(value=arg_0, arg_7='x')\n arg_8 = arg_2.squeeze(arg_1(arg_0, 1), axis=-1)\n return tfd.Bernoulli(arg_8=arg_8)"} +{"_id": "doc_682", "title": "", "text": "def Func(arg_0,\n arg_1=arg_2.compat.v1.layers.dense,\n arg_7=lambda arg_0: arg_0,\n arg_8=1.,\n arg_9=None):\n \"\"\"Constructs a trainable `tfd.Normal` distribution.\n\n\n This function creates a Normal distribution parameterized by loc and scale.\n Using default args, this function is mathematically equivalent to:\n\n ```none\n Y = Normal(loc=matmul(W, x) + b, scale=1)\n\n where,\n W in R^[d, n]\n b in R^d\n ```\n\n #### Examples\n\n This function can be used as a [linear regression](\n https://en.wikipedia.org/wiki/Linear_regression) loss.\n\n ```python\n # This example fits a linear regression loss.\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n # Create fictitious training data.\n dtype = np.float32\n n = 3000 # number of samples\n x_size = 4 # size of single x\n def make_training_data():\n np.random.seed(142)\n x = np.random.randn(n, x_size).astype(dtype)\n w = np.random.randn(x_size).astype(dtype)\n b = np.random.randn(1).astype(dtype)\n true_mean = np.tensordot(x, w, axes=[[-1], [-1]]) + b\n noise = np.random.randn(n).astype(dtype)\n y = true_mean + noise\n return y, x\n y, x = make_training_data()\n\n # Build TF graph for fitting Normal maximum likelihood estimator.\n Func = tfp.trainable_distributions.Func(x)\n loss = -tf.reduce_mean(Func.log_prob(y))\n train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)\n mse = tf.reduce_mean(tf.squared_difference(y, Func.mean()))\n init_op = tf.global_variables_initializer()\n\n # Run graph 1000 times.\n num_steps = 1000\n loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.\n mse_ = np.zeros(num_steps)\n with tf.Session() as sess:\n sess.run(init_op)\n for it in xrange(loss_.size):\n _, loss_[it], mse_[it] = sess.run([train_op, loss, mse])\n if it % 200 == 0 or it == loss_.size - 1:\n print(\"iteration:{} loss:{} mse:{}\".format(it, loss_[it], mse_[it]))\n\n # ==> iteration:0 loss:6.34114170074 mse:10.8444051743\n # iteration:200 loss:1.40146839619 mse:0.965059816837\n # iteration:400 loss:1.40052902699 mse:0.963181257248\n # iteration:600 loss:1.40052902699 mse:0.963181257248\n # iteration:800 loss:1.40052902699 mse:0.963181257248\n # iteration:999 loss:1.40052902699 mse:0.963181257248\n ```\n\n Args:\n x: `Tensor` with floating type. Must have statically defined rank and\n statically known right-most dimension.\n layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and\n returns a transformation of `x` with shape\n `tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.\n Default value: `tf.layers.dense`.\n loc_fn: Python `callable` which transforms the `loc` parameter. Takes a\n (batch of) length-`dims` vectors and returns a `Tensor` of same shape and\n `dtype`.\n Default value: `lambda x: x`.\n scale_fn: Python `callable` or `Tensor`. If a `callable` transforms the\n `scale` parameters; if `Tensor` is the `tfd.Normal` `scale` argument.\n Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same\n size. (Taking a `callable` or `Tensor` is how `tf.Variable` intializers\n behave.)\n Default value: `1`.\n name: A `name_scope` name for operations created by this function.\n Default value: `None` (i.e., \"Func\").\n\n Returns:\n Func: An instance of `tfd.Normal`.\n \"\"\"\n with arg_2.compat.v1.name_scope(arg_9, 'Func', [arg_0]):\n arg_0 = arg_2.convert_to_tensor(value=arg_0, arg_9='x')\n if callable(arg_8):\n arg_10 = arg_1(arg_0, 2)\n arg_11 = arg_7(arg_10[..., 0])\n arg_12 = arg_8(arg_10[..., 1])\n else:\n arg_10 = arg_2.squeeze(arg_1(arg_0, 1), axis=-1)\n arg_11 = arg_7(arg_10)\n arg_12 = arg_2.cast(arg_8, arg_11.dtype.base_dtype)\n return tfd.Normal(arg_11=arg_11, arg_12=arg_12)"} +{"_id": "doc_683", "title": "", "text": "def Func(arg_0,\n arg_1=arg_2.compat.v1.layers.dense,\n arg_7=lambda arg_0: arg_0,\n arg_8=None):\n \"\"\"Constructs a trainable `tfd.Poisson` distribution.\n\n This function creates a Poisson distribution parameterized by log rate.\n Using default args, this function is mathematically equivalent to:\n\n ```none\n Y = Poisson(log_rate=matmul(W, x) + b)\n\n where,\n W in R^[d, n]\n b in R^d\n ```\n\n #### Examples\n\n This can be used as a [Poisson regression](\n https://en.wikipedia.org/wiki/Poisson_regression) loss.\n\n ```python\n # This example fits a Func regression loss.\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n # Create fictitious training data.\n dtype = np.float32\n n = 3000 # number of samples\n x_size = 4 # size of single x\n def make_training_data():\n np.random.seed(142)\n x = np.random.randn(n, x_size).astype(dtype)\n w = np.random.randn(x_size).astype(dtype)\n b = np.random.randn(1).astype(dtype)\n true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b\n y = np.random.Func(lam=np.exp(true_log_rate)).astype(dtype)\n return y, x\n y, x = make_training_data()\n\n # Build TF graph for fitting Poisson maximum likelihood estimator.\n Func = tfp.trainable_distributions.Func(x)\n loss = -tf.reduce_mean(Func.log_prob(y))\n train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)\n mse = tf.reduce_mean(tf.squared_difference(y, Func.mean()))\n init_op = tf.global_variables_initializer()\n\n # Run graph 1000 times.\n num_steps = 1000\n loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.\n mse_ = np.zeros(num_steps)\n with tf.Session() as sess:\n sess.run(init_op)\n for it in xrange(loss_.size):\n _, loss_[it], mse_[it] = sess.run([train_op, loss, mse])\n if it % 200 == 0 or it == loss_.size - 1:\n print(\"iteration:{} loss:{} mse:{}\".format(it, loss_[it], mse_[it]))\n\n # ==> iteration:0 loss:37.0814208984 mse:6359.41259766\n # iteration:200 loss:1.42010736465 mse:40.7654914856\n # iteration:400 loss:1.39027583599 mse:8.77660560608\n # iteration:600 loss:1.3902695179 mse:8.78443241119\n # iteration:800 loss:1.39026939869 mse:8.78443622589\n # iteration:999 loss:1.39026939869 mse:8.78444766998\n ```\n\n Args:\n x: `Tensor` with floating type. Must have statically defined rank and\n statically known right-most dimension.\n layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and\n returns a transformation of `x` with shape\n `tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.\n Default value: `tf.layers.dense`.\n log_rate_fn: Python `callable` which transforms the `log_rate` parameter.\n Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same\n shape and `dtype`.\n Default value: `lambda x: x`.\n name: A `name_scope` name for operations created by this function.\n Default value: `None` (i.e., \"Func\").\n\n Returns:\n Func: An instance of `tfd.Poisson`.\n \"\"\"\n with arg_2.compat.v1.name_scope(arg_8, 'Func', [arg_0]):\n arg_0 = arg_2.convert_to_tensor(value=arg_0, arg_8='x')\n arg_9 = arg_7(arg_2.squeeze(arg_1(arg_0, 1), axis=-1))\n return tfd.Poisson(arg_9=arg_9)"} +{"_id": "doc_684", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3,\n arg_4=None):\n \"\"\"Compute diffusion drift at the current location `current_state`.\n\n The drift of the diffusion at is computed as\n\n ```none\n 0.5 * `step_size` * volatility_parts * `target_log_prob_fn(current_state)`\n + `step_size` * `grads_volatility`\n ```\n\n where `volatility_parts` = `volatility_fn(current_state)**2` and\n `grads_volatility` is a gradient of `volatility_parts` at the `current_state`.\n\n Args:\n step_size_parts: Python `list` of `Tensor`s representing the step size for\n Euler-Maruyama method. Must broadcast with the shape of\n `volatility_parts`. Larger step sizes lead to faster progress, but\n too-large step sizes make rejection exponentially more likely. When\n possible, it's often helpful to match per-variable step sizes to the\n standard deviations of the target distribution in each variable.\n volatility_parts: Python `list` of `Tensor`s representing the value of\n `volatility_fn(*state_parts)`.\n grads_volatility: Python list of `Tensor`s representing the value of the\n gradient of `volatility_parts**2` wrt the state of the chain.\n grads_target_log_prob: Python list of `Tensor`s representing\n gradient of `target_log_prob_fn(*state_parts`) wrt `state_parts`. Must\n have same shape as `volatility_parts`.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'malaFunc').\n\n Returns:\n drift_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state_parts`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_4, 'malaFunc', [\n arg_0, arg_1, arg_2, arg_3\n ]):\n\n arg_5 = []\n\n for arg_6, arg_7, arg_8, arg_9 in (\n zip(arg_0,\n arg_1,\n arg_2,\n arg_3)):\n arg_10 = tf.square(arg_7)\n arg_11 = 0.5 * arg_6 * (arg_10 * arg_9\n + arg_8)\n arg_5.append(arg_11)\n\n return arg_5"} +{"_id": "doc_685", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8=None):\n r\"\"\"Helper to `kernel` which computes the log acceptance-correction.\n\n Computes `log_acceptance_correction` as described in `MetropolisHastings`\n class. The proposal density is normal. More specifically,\n\n ```none\n q(proposed_state | current_state) \\sim N(current_state + current_drift,\n step_size * current_volatility**2)\n\n q(current_state | proposed_state) \\sim N(proposed_state + proposed_drift,\n step_size * proposed_volatility**2)\n ```\n\n The `log_acceptance_correction` is then\n\n ```none\n log_acceptance_correctio = q(current_state | proposed_state)\n - q(proposed_state | current_state)\n ```\n\n Args:\n current_state_parts: Python `list` of `Tensor`s representing the value(s) of\n the current state of the chain.\n proposed_state_parts: Python `list` of `Tensor`s representing the value(s)\n of the proposed state of the chain. Must broadcast with the shape of\n `current_state_parts`.\n current_volatility_parts: Python `list` of `Tensor`s representing the value\n of `volatility_fn(*current_volatility_parts)`. Must broadcast with the\n shape of `current_state_parts`.\n proposed_volatility_parts: Python `list` of `Tensor`s representing the value\n of `volatility_fn(*proposed_volatility_parts)`. Must broadcast with the\n shape of `current_state_parts`\n current_drift_parts: Python `list` of `Tensor`s representing value of the\n drift `_get_drift(*current_state_parts, ..)`. Must broadcast with the\n shape of `current_state_parts`.\n proposed_drift_parts: Python `list` of `Tensor`s representing value of the\n drift `_get_drift(*proposed_drift_parts, ..)`. Must broadcast with the\n shape of `current_state_parts`.\n step_size_parts: Python `list` of `Tensor`s representing the step size for\n Euler-Maruyama method. Must broadcast with the shape of\n `current_state_parts`.\n independent_chain_ndims: Scalar `int` `Tensor` representing the number of\n leftmost `Tensor` dimensions which index independent chains.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'compute_log_acceptance_correction').\n\n Returns:\n log_acceptance_correction: `Tensor` representing the `log`\n acceptance-correction. (See docstring for mathematical definition.)\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_8, 'compute_log_acceptance_correction', [\n arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5,\n arg_6, arg_7\n ]):\n\n arg_9 = []\n arg_10 = []\n\n for [\n arg_11,\n arg_12,\n arg_13,\n arg_14,\n arg_15,\n arg_16,\n arg_17,\n ] in zip(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n ):\n arg_18 = tf.range(arg_7, tf.rank(arg_11))\n\n arg_19 = arg_12 - arg_11\n\n arg_13 *= tf.sqrt(arg_17)\n\n arg_20 = (arg_19 - arg_15) / arg_13\n\n arg_14 *= tf.sqrt(arg_17)\n # Compute part of `q(proposed_state | current_state)`\n arg_20 = (\n tf.reduce_sum(\n input_tensor=mcmc_util.safe_sum(\n [tf.math.log(arg_13),\n 0.5 * (arg_20**2)]),\n arg_18=arg_18))\n arg_9.append(-arg_20)\n\n # Compute part of `q(current_state | proposed_state)`\n arg_21 = (arg_19 + arg_16) / arg_14\n arg_21 = (\n tf.reduce_sum(\n input_tensor=mcmc_util.safe_sum(\n [tf.math.log(arg_14), 0.5 * (arg_21**2)]),\n arg_18=arg_18))\n arg_10.append(-arg_21)\n\n # Compute `q(proposed_state | current_state)`\n arg_22 = tf.reduce_sum(\n input_tensor=tf.stack(arg_9, arg_18=-1), arg_18=-1)\n # Compute `q(current_state | proposed_state)`\n arg_23 = tf.reduce_sum(\n input_tensor=tf.stack(arg_10, arg_18=-1), arg_18=-1)\n\n return mcmc_util.safe_sum([arg_23,\n -arg_22])"} +{"_id": "doc_686", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=10):\n \"\"\"Helper which computes `volatility_fn` results and grads, if needed.\"\"\"\n arg_6 = list(arg_1) if mcmc_util.is_list_like(arg_1) else [arg_1]\n arg_7 = arg_3 is None\n\n # Convert `volatility_fn_results` to a list\n if arg_2 is None:\n arg_2 = arg_0(*arg_6)\n\n arg_2 = (list(arg_2)\n if mcmc_util.is_list_like(arg_2)\n else [arg_2])\n if len(arg_2) == 1:\n arg_2 *= len(arg_6)\n if len(arg_6) != len(arg_2):\n raise ValueError('`volatility_fn` should return a tensor or a list '\n 'of the same length as `current_state`.')\n\n # The shape of 'volatility_parts' needs to have the number of chains as a\n # leading dimension. For determinism we broadcast 'volatility_parts' to the\n # shape of `state_parts` since each dimension of `state_parts` could have a\n # different volatility value.\n\n arg_2 = _maybe_broadcast_volatility(arg_2,\n arg_6)\n if arg_3 is None:\n [\n arg_8,\n arg_3,\n ] = diag_jacobian(\n xs=arg_6,\n ys=arg_2,\n arg_4=arg_4,\n arg_5=arg_5,\n fn=arg_0)\n\n # Compute gradient of `volatility_parts**2`\n if arg_7:\n arg_3 = [\n 2. * g * volatility if g is not None else tf.zeros_like(\n fn_arg, dtype=fn_arg.dtype.base_dtype)\n for g, volatility, fn_arg in zip(\n arg_3, arg_2, arg_6)\n ]\n\n return arg_2, arg_3"} +{"_id": "doc_687", "title": "", "text": "def Func(arg_0,\n arg_1):\n \"\"\"Helper to broadcast `volatility_parts` to the shape of `state_parts`.\"\"\"\n return [arg_2 + tf.zeros_like(arg_3, dtype=arg_3.dtype.base_dtype)\n for arg_2, arg_3 in zip(arg_0, arg_1)]"} +{"_id": "doc_688", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Calls `fn`, appropriately reshaping its input `x` and output.\"\"\"\n # Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`\n # because it is possible the user provided extra kwargs would itself\n # have `fn` and/or `x` as a key.\n with tf.control_dependencies(arg_0._runtime_assertions +\n arg_0._validate_sample_arg(arg_2)):\n arg_4, arg_5 = arg_0._sample_shape(arg_2)\n arg_6 = tf.concat(\n [\n arg_4,\n arg_0.distribution.batch_shape_tensor(),\n arg_0.event_shape_tensor(),\n ],\n axis=0)\n arg_7 = tf.reshape(arg_2, arg_6)\n arg_8 = arg_1(arg_7, **arg_3) if arg_3 else arg_1(arg_7)\n arg_9 = tf.concat(\n [\n arg_4,\n arg_0._batch_shape_unexpanded,\n ], axis=0)\n arg_8 = tf.reshape(arg_8, arg_9)\n if (tensorshape_util.rank(arg_5) is not None and\n tensorshape_util.rank(arg_0.batch_shape) is not None):\n arg_9 = tensorshape_util.concatenate(arg_5,\n arg_0.batch_shape)\n tensorshape_util.set_shape(arg_8, arg_9)\n return arg_8"} +{"_id": "doc_689", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"Calls `fn` and appropriately reshapes its output.\"\"\"\n # Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`\n # because it is possible the user provided extra kwargs would itself\n # have `fn`, `event_shape_list`, `static_event_shape_list` and/or\n # `extra_kwargs` as keys.\n with tf.control_dependencies(arg_0._runtime_assertions):\n if arg_2 is None:\n arg_2 = [arg_0._event_shape_tensor()]\n if arg_3 is None:\n arg_3 = [arg_0.event_shape]\n arg_5 = tf.concat(\n [arg_0._batch_shape_unexpanded] + arg_2, axis=0)\n arg_6 = tf.reshape(arg_1(**arg_4) if arg_4 else arg_1(),\n arg_5)\n if (tensorshape_util.rank(arg_0.batch_shape) is not None and\n tensorshape_util.rank(arg_0.event_shape) is not None):\n arg_7 = tf.TensorShape([])\n for arg_8 in arg_3:\n arg_7 = tensorshape_util.concatenate(arg_7, arg_8)\n arg_9 = tensorshape_util.concatenate(\n arg_0.batch_shape, arg_7)\n tensorshape_util.set_shape(arg_6, arg_9)\n return arg_6"} +{"_id": "doc_690", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"The binomial cumulative distribution function.\n\n Args:\n k: floating point `Tensor`.\n n: floating point `Tensor`.\n p: floating point `Tensor`.\n\n Returns:\n `sum_{j=0}^k p^j (1 - p)^(n - j)`.\n \"\"\"\n # Trick for getting safe backprop/gradients into n, k when\n # betainc(a = 0, ..) = nan\n # Write:\n # where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))\n arg_3 = tf.ones_like(arg_1 - arg_0)\n arg_4 = tf.equal(arg_0, arg_1)\n arg_5 = tf.where(arg_4, arg_3, arg_1 - arg_0)\n arg_6 = tf.math.betainc(a=arg_5, b=arg_0 + 1, x=1 - arg_2)\n return tf.where(arg_4, arg_3, arg_6)"} +{"_id": "doc_691", "title": "", "text": "def Func(arg_0, arg_1=(), arg_2=None, arg_3=None):\n \"\"\"Executes `model`, creating both samples and distributions.\"\"\"\n arg_4 = []\n arg_5 = []\n arg_2 = seed_stream.SeedStream('JointDistributionCoroutine', arg_2)\n arg_6 = arg_0._model()\n arg_7 = 0\n arg_8 = next(arg_6)\n try:\n while True:\n arg_9 = arg_8.distribution if isinstance(arg_8, arg_0.Root) else arg_8\n arg_4.append(arg_9)\n if (arg_3 is not None and len(arg_3) > arg_7 and\n arg_3[arg_7] is not None):\n arg_2()\n arg_10 = arg_3[arg_7]\n else:\n arg_10 = arg_9.sample(\n arg_1=arg_1 if isinstance(arg_8, arg_0.Root) else (),\n arg_2=arg_2())\n arg_5.append(arg_10)\n arg_7 += 1\n arg_8 = arg_6.send(arg_10)\n except StopIteration:\n pass\n return arg_4, arg_5"} +{"_id": "doc_692", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Latent Dirichlet Allocation in terms of its generative process.\n\n The model posits a distribution over bags of words and is parameterized by\n a concentration and the topic-word probabilities. It collapses per-word\n topic assignments.\n\n Args:\n concentration: A Tensor of shape [1, num_topics], which parameterizes the\n Dirichlet prior over topics.\n topics_words: A Tensor of shape [num_topics, num_words], where each row\n (topic) denotes the probability of each word being in that topic.\n\n Returns:\n bag_of_words: A random variable capturing a sample from the model, of shape\n [1, num_words]. It represents one generated document as a bag of words.\n \"\"\"\n arg_2 = ed.Dirichlet(arg_0=arg_0, name=\"topics\")\n arg_3 = tf.matmul(arg_2, arg_1)\n # The observations are bags of words and therefore not one-hot. However,\n # log_prob of OneHotCategorical computes the probability correctly in\n # this case.\n arg_4 = ed.OneHotCategorical(probs=arg_3, name=\"bag_of_words\")\n return arg_4"} +{"_id": "doc_693", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"20 newsgroups as a tf.data.Dataset.\"\"\"\n arg_4 = np.load(download(arg_0, FILE_TEMPLATE.format(split=arg_1)))\n # The last row is empty in both train and test.\n arg_4 = arg_4[:-1]\n\n # Each row is a list of word ids in the document. We first convert this to\n # sparse COO matrix (which automatically sums the repeating words). Then,\n # we convert this COO matrix to CSR format which allows for fast querying of\n # documents.\n arg_5 = arg_4.shape[0]\n arg_6 = np.array([(row_idx, column_idx)\n for row_idx, row in enumerate(arg_4)\n for column_idx in row])\n arg_7 = scipy.sparse.coo_matrix(\n (np.ones(arg_6.shape[0]), (arg_6[:, 0], arg_6[:, 1])),\n shape=(arg_5, arg_2),\n dtype=np.float32)\n arg_7 = arg_7.tocsr()\n\n arg_8 = tf.data.Dataset.range(arg_5)\n\n # For training, we shuffle each epoch and repeat the epochs.\n if arg_3:\n arg_8 = arg_8.shuffle(arg_5).repeat()\n\n # Returns a single document as a dense TensorFlow tensor. The dataset is\n # stored as a sparse matrix outside of the graph.\n def get_row_py_func(arg_9):\n def get_row_python(arg_10):\n return np.squeeze(np.array(arg_7[arg_10].todense()), axis=0)\n\n arg_11 = tf.compat.v1.py_func(\n get_row_python, [arg_9], tf.float32, stateful=False)\n arg_11.set_shape((arg_2,))\n return arg_11\n\n arg_8 = arg_8.map(get_row_py_func)\n return arg_8"} +{"_id": "doc_694", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds fake data for unit testing.\"\"\"\n arg_1 = 1000\n arg_2 = [str(i) for i in range(arg_1)]\n\n arg_3 = np.random.randint(\n 10, size=(arg_0, arg_1)).astype(np.float32)\n\n def train_input_fn():\n arg_4 = tf.data.Dataset.from_tensor_slices(arg_3)\n arg_4 = arg_4.batch(arg_0).repeat()\n return tf.compat.v1.data.make_one_shot_iterator(arg_4).get_next()\n\n def eval_input_fn():\n arg_4 = tf.data.Dataset.from_tensor_slices(arg_3)\n arg_4 = arg_4.batch(arg_0)\n return tf.compat.v1.data.make_one_shot_iterator(arg_4).get_next()\n\n return train_input_fn, eval_input_fn, arg_2"} +{"_id": "doc_695", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Builds iterators for train and evaluation data.\n\n Each object is represented as a bag-of-words vector.\n\n Arguments:\n data_dir: Folder in which to store the data.\n batch_size: Batch size for both train and evaluation.\n\n Returns:\n train_input_fn: A function that returns an iterator over the training data.\n eval_input_fn: A function that returns an iterator over the evaluation data.\n vocabulary: A mapping of word's integer index to the corresponding string.\n \"\"\"\n\n with open(download(arg_0, \"vocab.pkl\"), \"r\") as f:\n arg_2 = pickle.load(f)\n arg_3 = len(arg_2)\n\n arg_4 = [None] * arg_3\n for arg_5, arg_6 in arg_2.items():\n arg_4[arg_6] = arg_5\n\n # Build an iterator over training batches.\n def train_input_fn():\n arg_7 = newsgroups_dataset(\n arg_0, \"train\", arg_3, shuffle_and_repeat=True)\n # Prefetching makes training about 1.5x faster.\n arg_7 = arg_7.batch(arg_1).prefetch(32)\n return tf.compat.v1.data.make_one_shot_iterator(arg_7).get_next()\n\n # Build an iterator over the heldout set.\n def eval_input_fn():\n arg_7 = newsgroups_dataset(\n arg_0, \"test\", arg_3, shuffle_and_repeat=False)\n arg_7 = arg_7.batch(arg_1)\n return tf.compat.v1.data.make_one_shot_iterator(arg_7).get_next()\n\n return train_input_fn, eval_input_fn, arg_4"} +{"_id": "doc_696", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Add control dependencies to the commmitment loss to update the codebook.\n\n Args:\n vector_quantizer: An instance of the VectorQuantizer class.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n codes: A `float`-like `Tensor` containing the latent vectors to be compared\n to the codebook.\n commitment_loss: The commitment loss from comparing the encoder outputs to\n their neighboring codebook entries.\n decay: Decay factor for exponential moving average.\n\n Returns:\n commitment_loss: Commitment loss with control dependencies.\n \"\"\"\n # Use an exponential moving average to update the codebook.\n arg_5 = moving_averages.assign_moving_average(\n arg_0.ema_count,\n tf.reduce_sum(input_tensor=arg_1, axis=[0, 1]),\n arg_4,\n zero_debias=False)\n arg_6 = moving_averages.assign_moving_average(\n arg_0.ema_means,\n tf.reduce_sum(\n input_tensor=tf.expand_dims(arg_2, 2) *\n tf.expand_dims(arg_1, 3),\n axis=[0, 1]),\n arg_4,\n zero_debias=False)\n\n # Add small value to avoid dividing by zero.\n arg_7 = arg_5 + 1e-5\n with tf.control_dependencies([arg_3]):\n arg_8 = tf.compat.v1.assign(\n arg_0.codebook,\n arg_6 / arg_7[..., tf.newaxis])\n with tf.control_dependencies([arg_8]):\n return tf.identity(arg_3)"} +{"_id": "doc_697", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper method to save a grid of images to a PNG file.\n\n Args:\n x: A numpy array of shape [n_images, height, width].\n fname: The filename to write to (including extension).\n \"\"\"\n arg_2 = arg_0.shape[0]\n arg_3 = figure.Figure(figsize=(arg_2, 1), frameon=False)\n arg_4 = backend_agg.FigureCanvasAgg(arg_3)\n for arg_5 in range(arg_2):\n arg_6 = arg_3.add_subplot(1, arg_2, arg_5+1)\n arg_6.imshow(arg_0[arg_5].squeeze(),\n interpolation=\"none\",\n cmap=cm.get_cmap(\"binary\"))\n arg_6.axis(\"off\")\n arg_4.print_figure(arg_1, format=\"png\")\n print(\"saved %s\" % arg_1)"} +{"_id": "doc_698", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a `np.dtype` based on this `dtype`.\"\"\"\n arg_0 = tf.as_dtype(arg_0)\n if hasattr(arg_0, 'Func'):\n return arg_0.Func\n return arg_0"} +{"_id": "doc_699", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns whether this is a boolean data type.\"\"\"\n arg_0 = tf.as_dtype(arg_0)\n if hasattr(arg_0, 'Func'):\n return arg_0.Func\n # We use `kind` because:\n # np.issubdtype(np.uint8, np.bool) == True.\n return np.dtype(arg_0).kind == 'b'"} +{"_id": "doc_700", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns whether this is a complex floating point type.\"\"\"\n arg_0 = tf.as_dtype(arg_0)\n if hasattr(arg_0, 'Func'):\n return arg_0.Func\n return np.issubdtype(np.dtype(arg_0), np.complex)"} +{"_id": "doc_701", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the string Func for this `dtype`.\"\"\"\n arg_0 = tf.as_dtype(arg_0)\n if hasattr(arg_0, 'Func'):\n return arg_0.Func\n if hasattr(arg_0, '__Func__'):\n return arg_0.__Func__\n return str(arg_0)"} +{"_id": "doc_702", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"Validate and return float type based on `tensors` and `dtype`.\n\n For ops such as matrix multiplication, inputs and weights must be of the\n same float type. This function validates that all `tensors` are the same type,\n validates that type is `dtype` (if supplied), and returns the type. Type must\n be a floating point type. If neither `tensors` nor `dtype` is supplied,\n the function will return `dtypes.float32`.\n\n Args:\n tensors: Tensors of input values. Can include `None` elements, which will\n be ignored.\n dtype: Expected type.\n\n Returns:\n Validated type.\n\n Raises:\n ValueError: if neither `tensors` nor `dtype` is supplied, or result is not\n float, or the common type of the inputs is not a floating point type.\n \"\"\"\n if arg_0:\n arg_1 = _assert_same_base_type(arg_0, arg_1)\n if not arg_1:\n arg_1 = tf.float32\n elif not is_floating(arg_1):\n raise ValueError('Expected floating point type, got {}.'.format(arg_1))\n return arg_1"} +{"_id": "doc_703", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Creates the condition function pair for a reflection to be accepted.\"\"\"\n def _replace_worst_with_reflected():\n arg_5 = _replace_at_index(arg_0, arg_2, arg_3)\n arg_6 = _replace_at_index(arg_1, arg_2,\n arg_4)\n return False, arg_5, arg_6, 0\n return _replace_worst_with_reflected"} +{"_id": "doc_704", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7):\n \"\"\"Creates the condition function pair for an expansion.\"\"\"\n def _expand_and_maybe_replace():\n \"\"\"Performs the expansion step.\"\"\"\n arg_8 = arg_6 + arg_7 * (arg_4 - arg_6)\n arg_9 = arg_0(arg_8)\n arg_10 = (arg_9 <\n arg_5)\n arg_11 = lambda: (arg_8, arg_9)\n arg_12 = lambda: (arg_4, arg_5)\n arg_13, arg_14 = prefer_static.cond(\n arg_10, arg_11, arg_12)\n arg_15 = _replace_at_index(arg_1, arg_3, arg_13)\n arg_16 = _replace_at_index(arg_2,\n arg_3,\n arg_14)\n return False, arg_15, arg_16, 1\n return _expand_and_maybe_replace"} +{"_id": "doc_705", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n arg_9,\n arg_10):\n \"\"\"Creates the condition function pair for an outside contraction.\"\"\"\n def _contraction():\n \"\"\"Performs a contraction.\"\"\"\n arg_11 = arg_3 + arg_8 * (arg_6 - arg_3)\n arg_12 = arg_0(arg_11)\n arg_13 = arg_12 <= arg_7\n def _accept_contraction():\n arg_14 = _replace_at_index(arg_1, arg_5, arg_11)\n arg_15 = _replace_at_index(\n arg_2,\n arg_5,\n arg_12)\n return (False,\n arg_14,\n arg_15,\n 1)\n\n def _reject_contraction():\n return _shrink_towards_best(arg_0,\n arg_1,\n arg_4,\n arg_9,\n arg_10)\n\n return prefer_static.cond(arg_13,\n _accept_contraction,\n _reject_contraction)\n return _contraction"} +{"_id": "doc_706", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5):\n \"\"\"Returns True if the simplex has converged.\n\n If the simplex size is smaller than the `position_tolerance` or the variation\n of the function value over the vertices of the simplex is smaller than the\n `func_tolerance` return True else False.\n\n Args:\n simplex: `Tensor` of real dtype. The simplex to test for convergence. For\n more details, see the docstring for `initial_simplex` argument\n of `minimize`.\n best_vertex: `Tensor` of real dtype and rank one less than `simplex`. The\n vertex with the best (i.e. smallest) objective value.\n best_objective: Scalar `Tensor` of real dtype. The best (i.e. smallest)\n value of the objective function at a vertex.\n worst_objective: Scalar `Tensor` of same dtype as `best_objective`. The\n worst (i.e. largest) value of the objective function at a vertex.\n func_tolerance: Scalar positive `Tensor`. The tolerance for the variation\n of the objective function value over the simplex. If the variation over\n the simplex vertices is below this threshold, convergence is True.\n position_tolerance: Scalar positive `Tensor`. The algorithm stops if the\n lengths (under the supremum norm) of edges connecting to the best vertex\n are below this threshold.\n\n Returns:\n has_converged: A scalar boolean `Tensor` indicating whether the algorithm\n is deemed to have converged.\n \"\"\"\n arg_6 = tf.abs(arg_3 -\n arg_2) < arg_4\n arg_7 = tf.reduce_max(\n input_tensor=tf.abs(arg_0 - arg_1)) < arg_5\n return arg_6 | arg_7"} +{"_id": "doc_707", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6):\n \"\"\"Computes the initial simplex and the objective values at the simplex.\n\n Args:\n objective_function: A Python callable that accepts a point as a\n real `Tensor` and returns a `Tensor` of real dtype containing\n the value of the function at that point. The function\n to be evaluated at the simplex. If `batch_evaluate_objective` is `True`,\n the callable may be evaluated on a `Tensor` of shape `[n+1] + s `\n where `n` is the dimension of the problem and `s` is the shape of a\n single point in the domain (so `n` is the size of a `Tensor`\n representing a single point).\n In this case, the expected return value is a `Tensor` of shape `[n+1]`.\n initial_simplex: None or `Tensor` of real dtype. The initial simplex to\n start the search. If supplied, should be a `Tensor` of shape `[n+1] + s`\n where `n` is the dimension of the problem and `s` is the shape of a\n single point in the domain. Each row (i.e. the `Tensor` with a given\n value of the first index) is interpreted as a vertex of a simplex and\n hence the rows must be affinely independent. If not supplied, an axes\n aligned simplex is constructed using the `initial_vertex` and\n `step_sizes`. Only one and at least one of `initial_simplex` and\n `initial_vertex` must be supplied.\n initial_vertex: None or `Tensor` of real dtype and any shape that can\n be consumed by the `objective_function`. A single point in the domain that\n will be used to construct an axes aligned initial simplex.\n step_sizes: None or `Tensor` of real dtype and shape broadcasting\n compatible with `initial_vertex`. Supplies the simplex scale along each\n axes. Only used if `initial_simplex` is not supplied. See the docstring\n of `minimize` for more details.\n objective_at_initial_simplex: None or rank `1` `Tensor` of real dtype.\n The value of the objective function at the initial simplex.\n May be supplied only if `initial_simplex` is\n supplied. If not supplied, it will be computed.\n objective_at_initial_vertex: None or scalar `Tensor` of real dtype. The\n value of the objective function at the initial vertex. May be supplied\n only if the `initial_vertex` is also supplied.\n batch_evaluate_objective: Python `bool`. If True, the objective function\n will be evaluated on all the vertices of the simplex packed into a\n single tensor. If False, the objective will be mapped across each\n vertex separately.\n\n Returns:\n prepared_args: A tuple containing the following elements:\n dimension: Scalar `Tensor` of `int32` dtype. The dimension of the problem\n as inferred from the supplied arguments.\n num_vertices: Scalar `Tensor` of `int32` dtype. The number of vertices\n in the simplex.\n simplex: A `Tensor` of same dtype as `initial_simplex`\n (or `initial_vertex`). The first component of the shape of the\n `Tensor` is `num_vertices` and each element represents a vertex of\n the simplex.\n objective_at_simplex: A `Tensor` of same dtype as the dtype of the\n return value of objective_function. The shape is a vector of size\n `num_vertices`. The objective function evaluated at the simplex.\n num_evaluations: An `int32` scalar `Tensor`. The number of points on\n which the objective function was evaluated.\n\n Raises:\n ValueError: If any of the following conditions hold\n 1. If none or more than one of `initial_simplex` and `initial_vertex` are\n supplied.\n 2. If `initial_simplex` and `step_sizes` are both specified.\n \"\"\"\n if arg_4 is not None and arg_1 is None:\n raise ValueError('`objective_at_initial_simplex` specified but the'\n '`initial_simplex` was not.')\n\n if arg_5 is not None and arg_2 is None:\n raise ValueError('`objective_at_initial_vertex` specified but the'\n '`initial_vertex` was not.')\n\n # The full simplex was specified.\n if arg_1 is not None:\n if arg_2 is not None:\n raise ValueError('Both `initial_simplex` and `initial_vertex` specified.'\n ' Only one of the two should be specified.')\n\n if arg_3 is not None:\n raise ValueError('`step_sizes` must not be specified when an'\n ' `initial_simplex` has been specified.')\n return Func_with_initial_simplex(arg_0,\n arg_1,\n arg_4,\n arg_6)\n\n if arg_2 is None:\n raise ValueError('One of `initial_simplex` or `initial_vertex`'\n ' must be supplied')\n\n if arg_3 is None:\n arg_3 = _default_step_sizes(arg_2)\n\n return Func_with_initial_vertex(arg_0,\n arg_2,\n arg_3,\n arg_5,\n arg_6)"} +{"_id": "doc_708", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Evaluates the objective function at the specified initial simplex.\"\"\"\n arg_1 = tf.convert_to_tensor(value=arg_1)\n\n # If d is the dimension of the problem, the number of vertices in the\n # simplex should be d+1. From this, we can infer the number of dimensions\n # as n - 1 where n is the number of vertices specified.\n arg_4 = tf.shape(input=arg_1)[0]\n arg_5 = arg_4 - 1\n arg_6 = 0\n\n if arg_2 is None:\n arg_2, arg_7 = _evaluate_objective_multiple(\n arg_0, arg_1, arg_3)\n arg_6 += arg_7\n arg_2 = tf.convert_to_tensor(\n value=arg_2)\n return (arg_5,\n arg_4,\n arg_1,\n arg_2,\n arg_6)"} +{"_id": "doc_709", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Constructs a standard axes aligned simplex.\"\"\"\n arg_5 = tf.size(input=arg_1)\n arg_6 = arg_5 + 1\n arg_7 = tf.reshape(\n tf.eye(arg_5, arg_5, dtype=arg_1.dtype.base_dtype),\n tf.concat([[arg_5], tf.shape(input=arg_1)], axis=0))\n\n # If step_sizes does not broadcast to initial_vertex, the multiplication\n # in the second term will fail.\n arg_8 = arg_1 + arg_2 * arg_7\n arg_9 = tf.concat([tf.expand_dims(arg_1, axis=0),\n arg_8], axis=0)\n arg_10 = 0\n # Evaluate the objective function at the simplex vertices.\n if arg_3 is None:\n arg_3 = arg_0(arg_1)\n arg_10 += 1\n\n arg_11, arg_12 = _evaluate_objective_multiple(\n arg_0, arg_8, arg_4)\n arg_10 += arg_12\n\n arg_13 = tf.concat(\n [\n tf.expand_dims(arg_3, axis=0),\n arg_11\n ], axis=0)\n\n return (arg_5,\n arg_6,\n arg_9,\n arg_13,\n arg_10)"} +{"_id": "doc_710", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\"Evaluates the objective function on a batch of points.\n\n If `batch_evaluate_objective` is True, returns\n `objective function(arg_batch)` else it maps the `objective_function`\n across the `arg_batch`.\n\n Args:\n objective_function: A Python callable that accepts a single `Tensor` of\n rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype\n containing the value of the function at that point. If\n `batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the\n size of the batch of args. In this case, the expected return value is a\n `Tensor` of shape `[batch_size]`.\n arg_batch: A `Tensor` of real dtype. The batch of arguments at which to\n evaluate the `objective_function`. If `batch_evaluate_objective` is False,\n `arg_batch` will be unpacked along the zeroth axis and the\n `objective_function` will be applied to each element.\n batch_evaluate_objective: `bool`. Whether the `objective_function` can\n evaluate a batch of arguments at once.\n\n Returns:\n A tuple containing:\n objective_values: A `Tensor` of real dtype and shape `[batch_size]`.\n The value of the objective function evaluated at the supplied\n `arg_batch`.\n num_evaluations: An `int32` scalar `Tensor`containing the number of\n points on which the objective function was evaluated (i.e `batch_size`).\n \"\"\"\n arg_3 = tf.shape(input=arg_1)[0]\n if arg_2:\n return arg_0(arg_1), arg_3\n return tf.map_fn(arg_0, arg_1), arg_3"} +{"_id": "doc_711", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3=10, arg_4=\"\"):\n \"\"\"Save a PNG plot visualizing posterior uncertainty on heldout data.\n\n Args:\n input_vals: A `float`-like Numpy `array` of shape\n `[num_heldout] + IMAGE_SHAPE`, containing heldout input images.\n probs: A `float`-like Numpy array of shape `[num_monte_carlo,\n num_heldout, num_classes]` containing Monte Carlo samples of\n class probabilities for each heldout sample.\n fname: Python `str` filename to save the plot to.\n n: Python `int` number of datapoints to vizualize.\n title: Python `str` title for the plot.\n \"\"\"\n arg_5 = figure.Figure(figsize=(9, 3*arg_3))\n arg_6 = backend_agg.FigureCanvasAgg(arg_5)\n for arg_7 in range(arg_3):\n arg_8 = arg_5.add_subplot(arg_3, 3, 3*arg_7 + 1)\n arg_8.imshow(arg_0[arg_7, :].reshape(IMAGE_SHAPE[:-1]), interpolation=\"None\")\n\n arg_8 = arg_5.add_subplot(arg_3, 3, 3*arg_7 + 2)\n for arg_9 in arg_1:\n sns.barplot(np.arange(10), arg_9[arg_7, :], alpha=0.1, arg_8=arg_8)\n arg_8.set_ylim([0, 1])\n arg_8.set_title(\"posterior samples\")\n\n arg_8 = arg_5.add_subplot(arg_3, 3, 3*arg_7 + 3)\n sns.barplot(np.arange(10), np.mean(arg_1[:, arg_7, :], axis=0), arg_8=arg_8)\n arg_8.set_ylim([0, 1])\n arg_8.set_title(\"predictive probs\")\n arg_5.suptitle(arg_4)\n arg_5.tight_layout()\n\n arg_6.print_figure(arg_2, format=\"png\")\n print(\"saved {}\".format(arg_2))"} +{"_id": "doc_712", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Instantiates an initializer from a configuration dictionary.\"\"\"\n return arg_0(**{\n 'initializers': [tf.compat.v2.initializers.deserialize(arg_2)\n for arg_2 in arg_1.get('initializers', [])],\n 'sizes': arg_1.get('sizes', []),\n 'validate_args': arg_1.get('validate_args', False),\n })"} +{"_id": "doc_713", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Compute the log of the exponentially weighted moving mean of the exp.\n\n If `log_value` is a draw from a stationary random variable, this function\n approximates `log(E[exp(log_value)])`, i.e., a weighted log-sum-exp. More\n precisely, a `tf.Variable`, `log_mean_exp_var`, is updated by `log_value`\n using the following identity:\n\n ```none\n log_mean_exp_var =\n = log(decay exp(log_mean_exp_var) + (1 - decay) exp(log_value))\n = log(exp(log_mean_exp_var + log(decay)) + exp(log_value + log1p(-decay)))\n = log_mean_exp_var\n + log( exp(log_mean_exp_var - log_mean_exp_var + log(decay))\n + exp(log_value - log_mean_exp_var + log1p(-decay)))\n = log_mean_exp_var\n + log_sum_exp([log(decay), log_value - log_mean_exp_var + log1p(-decay)]).\n ```\n\n In addition to numerical stability, this formulation is advantageous because\n `log_mean_exp_var` can be updated in a lock-free manner, i.e., using\n `assign_add`. (Note: the updates are not thread-safe; it's just that the\n update to the tf.Variable is presumed efficient due to being lock-free.)\n\n Args:\n log_mean_exp_var: `float`-like `Variable` representing the log of the\n exponentially weighted moving mean of the exp. Same shape as `log_value`.\n log_value: `float`-like `Tensor` representing a new (streaming) observation.\n Same shape as `log_mean_exp_var`.\n decay: A `float`-like `Tensor`. The moving mean decay. Typically close to\n `1.`, e.g., `0.999`.\n name: Optional name of the returned operation.\n\n Returns:\n log_mean_exp_var: A reference to the input 'Variable' tensor with the\n `log_value`-updated log of the exponentially weighted moving mean of exp.\n\n Raises:\n TypeError: if `log_mean_exp_var` does not have float type `dtype`.\n TypeError: if `log_mean_exp_var`, `log_value`, `decay` have different\n `base_dtype`.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, \"Func\",\n [arg_0, arg_1, arg_2]):\n # We want to update the variable in a numerically stable and lock-free way.\n # To do this, observe that variable `x` updated by `v` is:\n # x = log(w exp(x) + (1-w) exp(v))\n # = log(exp(x + log(w)) + exp(v + log1p(-w)))\n # = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w)))\n # = x + lse([log(w), v - x + log1p(-w)])\n with tf.compat.v1.colocate_with(arg_0):\n arg_4 = arg_0.dtype.base_dtype\n if not arg_4.is_floating:\n raise TypeError(\n \"log_mean_exp_var.base_dtype({}) does not have float type \"\n \"`dtype`.\".format(arg_4.name))\n arg_1 = tf.convert_to_tensor(\n value=arg_1, dtype=arg_4, arg_3=\"log_value\")\n arg_2 = tf.convert_to_tensor(value=arg_2, dtype=arg_4, arg_3=\"decay\")\n arg_5 = (arg_1 - arg_0)[tf.newaxis, ...]\n arg_6 = tf.concat([\n tf.math.log(arg_2) * tf.ones_like(arg_5),\n arg_5 + tf.math.log1p(-arg_2)\n ],\n axis=0)\n arg_6 = tf.reduce_logsumexp(input_tensor=arg_6, axis=0)\n return arg_0.assign_add(arg_6)"} +{"_id": "doc_714", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Ensures non-scalar input has at least one column.\n\n Example:\n If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.\n\n If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.\n\n If `x = 1` then the output is unchanged.\n\n Args:\n x: `Tensor`.\n\n Returns:\n columnar_x: `Tensor` with at least two dimensions.\n \"\"\"\n if tensorshape_util.rank(arg_1.shape) is not None:\n if tensorshape_util.rank(arg_1.shape) == 1:\n arg_1 = arg_1[tf.newaxis, :]\n return arg_1\n arg_2 = tf.shape(input=arg_1)\n arg_3 = tf.concat([\n arg_2[:-1],\n distribution_util.pick_vector(\n tf.equal(tf.rank(arg_1), 1), [1], np.array([], dtype=np.int32)),\n arg_2[-1:],\n ], 0)\n return tf.reshape(arg_1, arg_3)"} +{"_id": "doc_715", "title": "", "text": "def Func(arg_0, arg_1=arg_2.float32, arg_4=None, arg_5=None):\n \"\"\"Generates `Tensor` consisting of `-1` or `+1`, chosen uniformly at random.\n\n For more details, see [Rademacher distribution](\n https://en.wikipedia.org/wiki/Rademacher_distribution).\n\n Args:\n shape: Vector-shaped, `int` `Tensor` representing shape of output.\n dtype: (Optional) TF `dtype` representing `dtype` of output.\n seed: (Optional) Python integer to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'Func').\n\n Returns:\n rademacher: `Tensor` with specified `shape` and `dtype` consisting of `-1`\n or `+1` chosen uniformly-at-random.\n \"\"\"\n with arg_2.compat.v1.name_scope(arg_5, 'Func', [arg_0, arg_4]):\n # Choose the dtype to cause `2 * random_bernoulli - 1` to run in the same\n # memory (host or device) as the downstream cast will want to put it. The\n # convention on GPU is that int32 are in host memory and int64 are in device\n # memory.\n arg_6 = arg_2.int64 if arg_2.as_dtype(arg_1) != arg_2.int32 else arg_2.int32\n arg_7 = arg_2.random.uniform(\n arg_0, minval=0, maxval=2, arg_1=arg_6, arg_4=arg_4)\n return arg_2.cast(2 * arg_7 - 1, arg_1)"} +{"_id": "doc_716", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=arg_3.float32, arg_5=None, arg_6=None):\n \"\"\"Generates `Tensor` of positive reals drawn from a Rayleigh distributions.\n\n The probability density function of a Rayleigh distribution with `scale`\n parameter is given by:\n\n ```none\n f(x) = x scale**-2 exp(-x**2 0.5 scale**-2)\n ```\n\n For more details, see [Rayleigh distribution](\n https://en.wikipedia.org/wiki/Rayleigh_distribution)\n\n Args:\n shape: Vector-shaped, `int` `Tensor` representing shape of output.\n scale: (Optional) Positive `float` `Tensor` representing `Rayleigh` scale.\n Default value: `None` (i.e., `scale = 1.`).\n dtype: (Optional) TF `dtype` representing `dtype` of output.\n Default value: `tf.float32`.\n seed: (Optional) Python integer to seed the random number generator.\n Default value: `None` (i.e., no seed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'Func').\n\n Returns:\n rayleigh: `Tensor` with specified `shape` and `dtype` consisting of positive\n real values drawn from a Rayleigh distribution with specified `scale`.\n \"\"\"\n with arg_3.compat.v1.name_scope(arg_6, 'Func', [arg_0, arg_1, arg_5]):\n if arg_1 is not None:\n # Its important to expand the shape to match scale's, otherwise we won't\n # have independent draws.\n arg_1 = arg_3.convert_to_tensor(value=arg_1, arg_2=arg_2, arg_6='scale')\n arg_0 = arg_3.broadcast_dynamic_shape(arg_0, arg_3.shape(input=arg_1))\n arg_7 = arg_3.sqrt(-2. * arg_3.math.log(\n arg_3.random.uniform(arg_0, minval=0, maxval=1, arg_2=arg_2, arg_5=arg_5)))\n if arg_1 is None:\n return arg_7\n return arg_7 * arg_1"} +{"_id": "doc_717", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Convenience function which chooses the condition based on the predicate.\"\"\"\n # Note: This function is only valid if all of pred, cond_true, and cond_false\n # are scalars. This means its semantics are arguably more like tf.cond than\n # tf.where even though we use tf.where to implement it.\n arg_3 = tf.get_static_value(tf.convert_to_tensor(value=arg_0))\n if arg_3 is None:\n return tf.where(arg_0, arg_1, arg_2)\n return arg_1 if arg_3 else arg_2"} +{"_id": "doc_718", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n **arg_5):\n \"\"\"Finish computation of log_prob on one element of the inverse image.\"\"\"\n arg_2 = arg_0._maybe_rotate_dims(arg_2, rotate_right=True)\n arg_6 = arg_0.distribution.log_prob(arg_2, **arg_5)\n if arg_0._is_maybe_event_override:\n arg_6 = tf.reduce_sum(\n input_tensor=arg_6, axis=arg_0._reduce_event_indices)\n arg_6 += tf.cast(arg_3, arg_6.dtype)\n if arg_0._is_maybe_event_override and isinstance(arg_4, int):\n tensorshape_util.set_shape(\n arg_6,\n tf.broadcast_static_shape(\n tensorshape_util.with_rank_at_least(arg_1.shape, 1)[:-arg_4],\n arg_0.batch_shape))\n return arg_6"} +{"_id": "doc_719", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Helper which rolls left event_dims left or right event_dims right.\"\"\"\n arg_3 = tf.get_static_value(arg_0._needs_rotation)\n if arg_3 is not None and not arg_3:\n return arg_1\n arg_4 = prefer_static.rank(arg_1)\n arg_5 = (arg_4 - arg_0._rotate_ndims) if arg_2 else arg_0._rotate_ndims\n arg_6 = prefer_static.concat([\n prefer_static.range(arg_5, arg_4), prefer_static.range(0, arg_5)], axis=0)\n return tf.transpose(a=arg_1, arg_6=arg_6)"} +{"_id": "doc_720", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6=None):\n r\"\"\"Inverse of tf.nn.batch_normalization.\n\n Args:\n x: Input `Tensor` of arbitrary dimensionality.\n mean: A mean `Tensor`.\n variance: A variance `Tensor`.\n offset: An offset `Tensor`, often denoted `beta` in equations, or\n None. If present, will be added to the normalized tensor.\n scale: A scale `Tensor`, often denoted `gamma` in equations, or\n `None`. If present, the scale is applied to the normalized tensor.\n variance_epsilon: A small `float` added to the minibatch `variance` to\n prevent dividing by zero.\n name: A name for this operation (optional).\n\n Returns:\n batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.\n \"\"\"\n with tf.compat.v2.name_scope(arg_6 or \"undo_batchnorm\"):\n # inv = tf.rsqrt(variance + variance_epsilon)\n # if scale is not None:\n # inv *= scale\n # return x * inv + (\n # offset - mean * inv if offset is not None else -mean * inv)\n arg_7 = tf.sqrt(arg_2 + arg_5)\n if arg_4 is not None:\n arg_7 /= arg_4\n arg_8 = arg_0 * arg_7 + (\n arg_1 - arg_3 * arg_7 if arg_3 is not None else arg_1)\n return arg_8"} +{"_id": "doc_721", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check for valid BatchNormalization layer.\n\n Args:\n layer: Instance of `tf.layers.BatchNormalization`.\n Raises:\n ValueError: If batchnorm_layer argument is not an instance of\n `tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or\n if `batchnorm_layer.virtual_batch_size` is specified.\n \"\"\"\n if (not isinstance(arg_1, tf.keras.layers.BatchNormalization) and\n not isinstance(arg_1, tf.compat.v1.layers.BatchNormalization)):\n raise ValueError(\n \"batchnorm_layer must be an instance of BatchNormalization layer.\")\n if arg_1.renorm:\n raise ValueError(\"BatchNorm Bijector does not support renormalization.\")\n if arg_1.virtual_batch_size:\n raise ValueError(\n \"BatchNorm Bijector does not support virtual batch sizes.\")"} +{"_id": "doc_722", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Applies a single slicing step to `dist`, returning a new instance.\"\"\"\n if len(arg_2) == 1 and arg_2[0] == Ellipsis:\n # The path used by Distribution.copy: batch_slice(...args..., Ellipsis)\n arg_4 = {}\n else:\n arg_4 = _slice_params_to_dict(arg_0, arg_1, arg_2)\n arg_4.update(arg_3)\n arg_5 = dict(arg_0.parameters, **arg_4)\n arg_6 = type(arg_0)(**arg_5)\n return arg_6"} +{"_id": "doc_723", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None,\n arg_10=True,\n arg_11=None,\n arg_12=None):\n \"\"\"Runs multiple Fisher scoring steps.\n\n Args:\n model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row\n represents a sample's features.\n response: (Batch of) vector-shaped `Tensor` where each element represents a\n sample's observed response (to the corresponding row of features). Must\n have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance which implicitly\n characterizes a negative log-likelihood loss by specifying the\n distribuion's `mean`, `gradient_mean`, and `variance`.\n model_coefficients_start: Optional (batch of) vector-shaped `Tensor`\n representing the initial model coefficients, one for each column in\n `model_matrix`. Must have same `dtype` as `model_matrix`.\n Default value: Zeros.\n predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`\n matching `response`; represents `offset` shifted initial linear\n predictions based on `model_coefficients_start`.\n Default value: `offset` if `model_coefficients is None`, and\n `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`\n otherwise.\n l2_regularizer: Optional scalar `Tensor` representing L2 regularization\n penalty, i.e.,\n `loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.\n Default value: `None` (i.e., no L2 regularization).\n dispersion: Optional (batch of) `Tensor` representing `response` dispersion,\n i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.\n Must broadcast with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n offset: Optional `Tensor` representing constant shift applied to\n `predicted_linear_response`. Must broadcast to `response`.\n Default value: `None` (i.e., `tf.zeros_like(response)`).\n convergence_criteria_fn: Python `callable` taking:\n `is_converged_previous`, `iter_`, `model_coefficients_previous`,\n `predicted_linear_response_previous`, `model_coefficients_next`,\n `predicted_linear_response_next`, `response`, `model`, `dispersion` and\n returning a `bool` `Tensor` indicating that Fisher scoring has converged.\n See `convergence_criteria_small_relative_norm_weights_change` as an\n example function.\n Default value: `None` (i.e.,\n `convergence_criteria_small_relative_norm_weights_change`).\n learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative\n progress. Typically only needed if optimization diverges, should be no\n larger than `1` and typically very close to `1`.\n Default value: `None` (i.e., `1`).\n fast_unsafe_numerics: Optional Python `bool` indicating if faster, less\n numerically accurate methods can be employed for computing the weighted\n least-squares solution.\n Default value: `True` (i.e., \"fast but possibly diminished accuracy\").\n maximum_iterations: Optional maximum number of iterations of Fisher scoring\n to run; \"and-ed\" with result of `convergence_criteria_fn`.\n Default value: `None` (i.e., `infinity`).\n name: Python `str` used as name prefix to ops created by this function.\n Default value: `\"Func\"`.\n\n Returns:\n model_coefficients: (Batch of) vector-shaped `Tensor`; represents the\n Functed model coefficients, one for each column in `model_matrix`.\n predicted_linear_response: `response`-shaped `Tensor` representing linear\n predictions based on new `model_coefficients`, i.e.,\n `tf.linalg.matvec(model_matrix, model_coefficients) + offset`.\n is_converged: `bool` `Tensor` indicating that the returned\n `model_coefficients` met the `convergence_criteria_fn` criteria within the\n `maximum_iterations` limit.\n iter_: `int32` `Tensor` indicating the number of iterations taken.\n\n #### Example\n\n ```python\n from __future__ import print_function\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n def make_dataset(n, d, link, scale=1., dtype=np.float32):\n model_coefficients = tfd.Uniform(\n low=np.array(-1, dtype),\n high=np.array(1, dtype)).sample(d, seed=42)\n radius = np.sqrt(2.)\n model_coefficients *= radius / tf.linalg.norm(model_coefficients)\n model_matrix = tfd.Normal(\n loc=np.array(0, dtype),\n scale=np.array(1, dtype)).sample([n, d], seed=43)\n scale = tf.convert_to_tensor(scale, dtype)\n linear_response = tf.tensordot(\n model_matrix, model_coefficients, axes=[[1], [0]])\n if link == 'linear':\n response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)\n elif link == 'probit':\n response = tf.cast(\n tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,\n dtype)\n elif link == 'logit':\n response = tfd.Bernoulli(logits=linear_response).sample(seed=44)\n else:\n raise ValueError('unrecognized true link: {}'.format(link))\n return model_matrix, response, model_coefficients\n\n X, Y, w_true = make_dataset(n=int(1e6), d=100, link='probit')\n\n w, linear_response, is_converged, num_iter = tfp.glm.Func(\n model_matrix=X,\n response=Y,\n model=tfp.glm.BernoulliNormalCDF())\n log_likelihood = tfp.glm.BernoulliNormalCDF().log_prob(Y, linear_response)\n\n with tf.Session() as sess:\n [w_, linear_response_, is_converged_, num_iter_, Y_, w_true_,\n log_likelihood_] = sess.run([\n w, linear_response, is_converged, num_iter, Y, w_true,\n log_likelihood])\n\n print('is_converged: ', is_converged_)\n print(' num_iter: ', num_iter_)\n print(' accuracy: ', np.mean((linear_response_ > 0.) == Y_))\n print(' deviance: ', 2. * np.mean(log_likelihood_))\n print('||w0-w1||_2 / (1+||w0||_2): ', (np.linalg.norm(w_true_ - w_, ord=2) /\n (1. + np.linalg.norm(w_true_, ord=2))))\n\n # ==>\n # is_converged: True\n # num_iter: 6\n # accuracy: 0.804382\n # deviance: -0.820746600628\n # ||w0-w1||_2 / (1+||w0||_2): 0.00619245105309\n ```\n\n \"\"\"\n arg_13 = [arg_0, arg_1, arg_3,\n arg_4, arg_6, arg_7,\n arg_9, arg_11]\n with tf.compat.v1.name_scope(arg_12, 'Func', arg_13):\n [\n arg_0,\n arg_1,\n arg_3,\n arg_4,\n arg_7,\n ] = prepare_args(\n arg_0,\n arg_1,\n arg_3,\n arg_4,\n arg_7)\n if arg_8 is None:\n arg_8 = (\n convergence_criteria_small_relative_norm_weights_change())\n\n def _body(\n arg_14,\n arg_15,\n arg_16,\n arg_17):\n \"\"\"`tf.while_loop` body.\"\"\"\n arg_18, arg_19 = Func_one_step(\n arg_0,\n arg_1,\n arg_2,\n arg_16,\n arg_17,\n arg_5,\n arg_6,\n arg_7,\n arg_9,\n arg_10)\n arg_20 = arg_8(\n arg_14=arg_14,\n arg_15=arg_15,\n arg_16=arg_16,\n arg_17=arg_17,\n arg_18=arg_18,\n arg_19=arg_19,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_6=arg_6)\n return [\n arg_20,\n arg_15 + 1,\n arg_18,\n arg_19,\n ]\n\n # while not converged:\n # Func_one_step\n [\n arg_21,\n arg_15,\n arg_22,\n arg_23,\n ] = tf.while_loop(\n cond=lambda arg_21, *args: tf.logical_not(arg_21),\n body=_body,\n loop_vars=[\n tf.zeros([], np.bool), # is_converged\n tf.zeros([], np.int32), # iter_\n arg_3,\n arg_4,\n ],\n arg_11=arg_11)\n\n return [\n arg_22,\n arg_23,\n arg_21,\n arg_15\n ]"} +{"_id": "doc_724", "title": "", "text": "def Func(\n arg_0=1e-5,\n arg_1=2):\n \"\"\"Returns Python `callable` which indicates fitting procedure has converged.\n\n Writing old, new `model_coefficients` as `w0`, `w1`, this function\n defines convergence as,\n\n ```python\n relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) /\n (1. + tf.norm(w0, ord=2, axis=-1)))\n reduce_all(relative_euclidean_norm < tolerance)\n ```\n\n where `tf.norm(x, ord=2)` denotes the [Euclidean norm](\n https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`.\n\n Args:\n tolerance: `float`-like `Tensor` indicating convergence, i.e., when\n max relative Euclidean norm weights difference < tolerance`.\n Default value: `1e-5`.\n norm_order: Order of the norm. Default value: `2` (i.e., \"Euclidean norm\".)\n\n Returns:\n convergence_criteria_fn: Python `callable` which returns `bool` `Tensor`\n indicated fitting procedure has converged. (See inner function\n specification for argument signature.)\n Default value: `1e-5`.\n \"\"\"\n def convergence_criteria_fn(\n arg_2, # pylint: disable=unused-argument\n arg_3,\n arg_4,\n arg_5, # pylint: disable=unused-argument\n arg_6,\n arg_7, # pylint: disable=unused-argument\n arg_8, # pylint: disable=unused-argument\n arg_9, # pylint: disable=unused-argument\n arg_10): # pylint: disable=unused-argument\n \"\"\"Returns `bool` `Tensor` indicating if fitting procedure has converged.\n\n Args:\n is_converged_previous: \"old\" convergence results.\n iter_: Iteration number.\n model_coefficients_previous: \"old\" `model_coefficients`.\n predicted_linear_response_previous: \"old\" `predicted_linear_response`.\n model_coefficients_next: \"new\" `model_coefficients`.\n predicted_linear_response_next: \"new: `predicted_linear_response`.\n response: (Batch of) vector-shaped `Tensor` where each element represents\n a sample's observed response (to the corresponding row of features).\n Must have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance used to construct the\n negative log-likelihood loss, gradient, and expected Hessian (i.e., the\n Fisher information matrix).\n dispersion: `Tensor` representing `response` dispersion, i.e., as in:\n `p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast\n with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n\n Returns:\n is_converged: `bool` `Tensor`.\n \"\"\"\n arg_11 = (\n tf.norm(\n tensor=arg_4 - arg_6,\n ord=arg_1,\n axis=-1) /\n (1. +\n tf.norm(tensor=arg_4, ord=arg_1, axis=-1)))\n return (arg_3 > 0) & tf.reduce_all(\n input_tensor=arg_11 < arg_0)\n\n return convergence_criteria_fn"} +{"_id": "doc_725", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None):\n \"\"\"Helper to `fit` which sanitizes input args.\n\n Args:\n model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row\n represents a sample's features.\n response: (Batch of) vector-shaped `Tensor` where each element represents a\n sample's observed response (to the corresponding row of features). Must\n have same `dtype` as `model_matrix`.\n model_coefficients: Optional (batch of) vector-shaped `Tensor` representing\n the model coefficients, one for each column in `model_matrix`. Must have\n same `dtype` as `model_matrix`.\n Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`.\n predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching\n `response`; represents `offset` shifted initial linear predictions based\n on current `model_coefficients`.\n Default value: `offset` if `model_coefficients is None`, and\n `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`\n otherwise.\n offset: Optional `Tensor` with `shape`, `dtype` matching `response`;\n represents constant shift applied to `predicted_linear_response`.\n Default value: `None` (i.e., `tf.zeros_like(response)`).\n name: Python `str` used as name prefix to ops created by this function.\n Default value: `\"Func\"`.\n\n Returns:\n model_matrix: A `Tensor` with `shape`, `dtype` and values of the\n `model_matrix` argument.\n response: A `Tensor` with `shape`, `dtype` and values of the\n `response` argument.\n model_coefficients_start: A `Tensor` with `shape`, `dtype` and\n values of the `model_coefficients_start` argument if specified.\n A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix`\n containing the default starting point otherwise.\n predicted_linear_response: A `Tensor` with `shape`, `dtype` and\n values of the `predicted_linear_response` argument if specified.\n A `Tensor` with `shape`, `dtype` matching `response` containing the\n default value otherwise.\n offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument\n if specified or `None` otherwise.\n \"\"\"\n arg_6 = [arg_0, arg_1, arg_2,\n arg_3, arg_4]\n with tf.compat.v1.name_scope(arg_5, 'Func', arg_6):\n arg_7 = dtype_util.common_dtype(arg_6, np.float32)\n\n arg_0 = tf.convert_to_tensor(\n value=arg_0, arg_7=arg_7, arg_5='model_matrix')\n\n if arg_4 is not None:\n arg_4 = tf.convert_to_tensor(value=arg_4, arg_7=arg_7, arg_5='offset')\n\n arg_1 = tf.convert_to_tensor(\n value=arg_1, arg_7=arg_7, arg_5='response')\n\n arg_8 = arg_2 is None\n if arg_8:\n # User did not supply model coefficients; assume they're all zero.\n arg_9 = tf.shape(input=arg_0)[:-2]\n arg_10 = tf.shape(input=arg_0)[-1]\n arg_2 = tf.zeros(\n shape=tf.concat([arg_9, [arg_10]], axis=0),\n arg_7=arg_7, arg_5='model_coefficients')\n else:\n # User did supply model coefficients; convert to Tensor in case it's\n # numpy or literal.\n arg_2 = tf.convert_to_tensor(\n value=arg_2, arg_7=arg_7, arg_5='model_coefficients')\n\n if arg_3 is None:\n if arg_8:\n # Since we're using zeros for model_coefficients, we know the predicted\n # linear response will also be all zeros.\n if arg_4 is None:\n arg_3 = tf.zeros_like(\n arg_1, arg_7, arg_5='predicted_linear_response')\n else:\n arg_3 = tf.broadcast_to(\n arg_4,\n tf.shape(input=arg_1),\n arg_5='predicted_linear_response')\n else:\n # We were given model_coefficients but not the predicted linear\n # response.\n arg_3 = calculate_linear_predictor(\n arg_0, arg_2, arg_4)\n else:\n arg_3 = tf.convert_to_tensor(\n value=arg_3,\n arg_7=arg_7,\n arg_5='predicted_linear_response')\n\n return [\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n ]"} +{"_id": "doc_726", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper function for statically evaluating predicates in `cond`.\"\"\"\n if arg_0 in {0, 1}: # Accept 1/0 as valid boolean values\n arg_1 = bool(arg_0)\n elif isinstance(arg_0, bool):\n arg_1 = arg_0\n elif isinstance(arg_0, tf.Tensor):\n arg_1 = tf.get_static_value(arg_0)\n\n # TODO(jamieas): remove the dependency on `pywrap_tensorflow`.\n # pylint: disable=protected-access\n if arg_1 is None:\n arg_1 = c_api.TF_TryEvaluateConstant_wrapper(arg_0.graph._c_graph,\n arg_0._as_tf_output())\n # pylint: enable=protected-access\n\n else:\n raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. '\n 'Found instead: {}'.format(arg_0))\n return arg_1"} +{"_id": "doc_727", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Computes `rank` given a `Tensor`'s `shape`.\"\"\"\n\n if arg_1 is None:\n arg_2 = (arg_0() if callable(arg_0)\n else arg_0)\n if (hasattr(arg_2, 'shape') and\n hasattr(arg_2.shape, 'num_elements')):\n arg_3 = tensorshape_util.num_elements(arg_2.shape)\n else:\n arg_3 = len(arg_2)\n arg_4 = lambda: tf.size(input=arg_2)\n else:\n arg_3 = tensorshape_util.rank(arg_1)\n arg_4 = lambda: tf.size(input=arg_0() # pylint: disable=g-long-lambda\n if callable(arg_0)\n else arg_0)\n return arg_4() if arg_3 is None else arg_3"} +{"_id": "doc_728", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3='smart_Func'):\n \"\"\"Like tf.Func, except attempts to statically evaluate predicates.\n\n If any predicate in `pred_fn_pairs` is a bool or has a constant value, the\n associated callable will be called or omitted depending on its value.\n Otherwise this functions like tf.Func.\n\n Args:\n pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a\n callable which returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/dictionary.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n \"\"\"\n return control_flow_ops._Func_helper( # pylint: disable=protected-access\n cond, arg_0, arg_1, arg_2, arg_3, allow_python_preds=True)"} +{"_id": "doc_729", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n with tf.compat.v1.name_scope(arg_0.name):\n with tf.compat.v1.name_scope(\n arg_1, arg_2, arg_3=arg_3 or []) as scope:\n yield scope"} +{"_id": "doc_730", "title": "", "text": "def Func(arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=False,\n arg_5=False,\n arg_6=None,\n arg_7=None):\n \"\"\"Creates a LinearOperator representing a diagonal matrix.\n\n Args:\n loc: Floating-point `Tensor`. This is used for inferring shape in the case\n where only `scale_identity_multiplier` is set.\n scale_diag: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal\n matrix. When `None` no diagonal term is added to the LinearOperator.\n scale_identity_multiplier: floating point rank 0 `Tensor` representing a\n scaling done to the identity matrix. When `scale_identity_multiplier =\n scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise\n no scaled-identity-matrix is added to `scale`.\n shape_hint: scalar integer `Tensor` representing a hint at the dimension of\n the identity matrix when only `scale_identity_multiplier` is set.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness.\n assert_positive: Python `bool` indicating whether LinearOperator should be\n checked for being positive definite.\n name: Python `str` name given to ops managed by this object.\n dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall\n back to a compatible dtype across all of `loc`, `scale_diag`, and\n `scale_identity_multiplier`.\n\n Returns:\n `LinearOperator` representing a lower triangular matrix.\n\n Raises:\n ValueError: If only `scale_identity_multiplier` is set and `loc` and\n `shape_hint` are both None.\n \"\"\"\n\n def _maybe_attach_assertion(arg_8):\n if not arg_4:\n return arg_8\n if arg_5:\n return with_dependencies([\n assert_util.assert_positive(\n arg_8, message=\"diagonal part must be positive\"),\n ], arg_8)\n return with_dependencies([\n assert_util.assert_none_equal(\n arg_8, tf.zeros([], arg_8.dtype), message=\"diagonal part must be non-zero\")\n ], arg_8)\n\n with tf.name_scope(arg_6 or \"Func\"):\n if arg_7 is None:\n arg_7 = dtype_util.common_dtype(\n [arg_0, arg_1, arg_2],\n preferred_dtype=tf.float32)\n arg_0 = _convert_to_tensor(arg_0, arg_6=\"loc\", arg_7=arg_7)\n arg_1 = _convert_to_tensor(arg_1, arg_6=\"scale_diag\", arg_7=arg_7)\n arg_2 = _convert_to_tensor(\n arg_2,\n arg_6=\"scale_identity_multiplier\",\n arg_7=arg_7)\n\n if arg_1 is not None:\n if arg_2 is not None:\n arg_1 += arg_2[..., tf.newaxis]\n return tf.linalg.LinearOperatorDiag(\n diag=_maybe_attach_assertion(arg_1),\n is_non_singular=True,\n is_self_adjoint=True,\n is_positive_definite=arg_5)\n\n if arg_0 is None and arg_3 is None:\n raise ValueError(\"Cannot infer `event_shape` unless `loc` or \"\n \"`shape_hint` is specified.\")\n\n arg_9 = arg_3\n del arg_3\n if arg_9 is None:\n arg_9 = tf.compat.dimension_value(arg_0.shape[-1])\n if arg_9 is None:\n arg_9 = tf.shape(input=arg_0)[-1]\n\n if arg_2 is None:\n return tf.linalg.LinearOperatorIdentity(\n arg_9=arg_9,\n arg_7=arg_7,\n is_self_adjoint=True,\n is_positive_definite=True,\n assert_proper_shapes=arg_4)\n\n return tf.linalg.LinearOperatorScaledIdentity(\n arg_9=arg_9,\n multiplier=_maybe_attach_assertion(arg_2),\n is_non_singular=True,\n is_self_adjoint=True,\n is_positive_definite=arg_5,\n assert_proper_shapes=arg_4)"} +{"_id": "doc_731", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"Func\"):\n \"\"\"Infer distribution batch and event shapes from a location and scale.\n\n Location and scale family distributions determine their batch/event shape by\n broadcasting the `loc` and `scale` args. This helper does that broadcast,\n statically if possible.\n\n Batch shape broadcasts as per the normal rules.\n We allow the `loc` event shape to broadcast up to that of `scale`. We do not\n allow `scale`'s event shape to change. Therefore, the last dimension of `loc`\n must either be size `1`, or the same as `scale.range_dimension`.\n\n See `MultivariateNormalLinearOperator` for a usage example.\n\n Args:\n loc: `Tensor` (already converted to tensor) or `None`. If `None`, or\n `rank(loc)==0`, both batch and event shape are determined by `scale`.\n scale: A `LinearOperator` instance.\n name: A string name to prepend to created ops.\n\n Returns:\n batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.\n event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.\n\n Raises:\n ValueError: If the last dimension of `loc` is determined statically to be\n different than the range of `scale`.\n \"\"\"\n if arg_0 is not None and tensorshape_util.rank(arg_0.shape) == 0:\n arg_0 = None # scalar loc is irrelevant to determining batch/event shape.\n with tf.name_scope(arg_2):\n # Get event shape.\n arg_3 = arg_1.range_dimension_tensor()\n arg_4 = tf.get_static_value(arg_3)\n arg_5 = (None if arg_0 is None\n else tf.compat.dimension_value(arg_0.shape[-1]))\n\n if arg_4 is not None and arg_5 is not None:\n # Static check that event shapes match.\n if arg_5 != 1 and arg_5 != arg_4:\n raise ValueError(\n \"Event size of 'scale' ({}) could not be broadcast up to that \"\n \"of 'loc' ({}).\".format(arg_4, arg_5))\n elif arg_5 is not None and arg_5 != 1:\n arg_4 = arg_5\n\n if arg_4 is None:\n arg_6 = arg_3[tf.newaxis]\n else:\n arg_6 = tf.convert_to_tensor(\n value=np.reshape(arg_4, [1]),\n dtype=tf.int32,\n arg_2=\"event_shape\")\n\n # Get batch shape.\n arg_7 = arg_1.batch_shape_tensor()\n if arg_0 is not None:\n arg_8 = tensorshape_util.with_rank_at_least(arg_0.shape, 1)[:-1]\n if tensorshape_util.rank(\n arg_0.shape) is None or not tensorshape_util.is_fully_defined(\n arg_8):\n arg_8 = tf.shape(input=arg_0)[:-1]\n else:\n arg_8 = tf.convert_to_tensor(\n value=arg_8, dtype=tf.int32, arg_2=\"loc_batch_shape\")\n # This is defined in the core util module.\n arg_7 = prefer_static_broadcast_shape(arg_7, arg_8) # pylint: disable=undefined-variable\n arg_7 = tf.convert_to_tensor(\n value=arg_7, dtype=tf.int32, arg_2=\"batch_shape\")\n\n return arg_7, arg_6"} +{"_id": "doc_732", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns `True` if `scale` is a `LinearOperator` that is known to be diag.\n\n Args:\n scale: `LinearOperator` instance.\n\n Returns:\n Python `bool`.\n\n Raises:\n TypeError: If `scale` is not a `LinearOperator`.\n \"\"\"\n if not isinstance(arg_0, tf.linalg.LinearOperator):\n raise TypeError(\"Expected argument 'scale' to be instance of LinearOperator\"\n \". Found: %s\" % arg_0)\n return (isinstance(arg_0, tf.linalg.LinearOperatorIdentity) or\n isinstance(arg_0, tf.linalg.LinearOperatorScaledIdentity) or\n isinstance(arg_0, tf.linalg.LinearOperatorDiag))"} +{"_id": "doc_733", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Convenience function that chooses one of two values based on the predicate.\n\n This utility is equivalent to a version of `tf.where` that accepts only a\n scalar predicate and computes its result statically when possible. It may also\n be used in place of `tf.cond` when both branches yield a `Tensor` of the same\n shape; the operational difference is that `tf.cond` uses control flow to\n evaluate only the branch that's needed, while `tf.where` (and thus\n this method) may evaluate both branches before the predicate's truth is known.\n This means that `tf.cond` is preferred when one of the branches is expensive\n to evaluate (like performing a large matmul), while this method is preferred\n when both branches are cheap, e.g., constants. In the latter case, we expect\n this method to be substantially faster than `tf.cond` on GPU and to give\n similar performance on CPU.\n\n Args:\n pred: Scalar `bool` `Tensor` predicate.\n true_value: `Tensor` to return if `pred` is `True`.\n false_value: `Tensor` to return if `pred` is `False`. Must have the same\n shape as `true_value`.\n name: Python `str` name given to ops managed by this object.\n\n Returns:\n result: a `Tensor` (or `Tensor`-convertible Python value) equal to\n `true_value` if `pred` evaluates to `True` and `false_value` otherwise.\n If the condition can be evaluated statically, the result returned is one\n of the input Python values, with no graph side effects.\n \"\"\"\n with tf.name_scope(arg_3 or \"Func\"):\n arg_0 = tf.convert_to_tensor(\n value=arg_0, dtype_hint=tf.bool, arg_3=\"pred\")\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_3=\"true_value\")\n arg_2 = tf.convert_to_tensor(value=arg_2, arg_3=\"false_value\")\n arg_4 = tf.get_static_value(arg_0)\n if arg_4 is None:\n return tf.where(arg_0, arg_1, arg_2)\n return arg_1 if arg_4 else arg_2"} +{"_id": "doc_734", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Move a single tensor dimension within its shape.\n\n This is a special case of `tf.transpose()`, which applies\n arbitrary permutations to tensor dimensions.\n\n Args:\n x: Tensor of rank `ndims`.\n source_idx: Integer index into `x.shape` (negative indexing is supported).\n dest_idx: Integer index into `x.shape` (negative indexing is supported).\n\n Returns:\n x_perm: Tensor of rank `ndims`, in which the dimension at original\n index `source_idx` has been moved to new index `dest_idx`, with\n all other dimensions retained in their original order.\n\n Example:\n\n ```python\n x = tf.placeholder(shape=[200, 30, 4, 1, 6])\n x_perm = _Func(x, 1, 1) # no-op\n x_perm = _Func(x, 0, 3) # result shape [30, 4, 1, 200, 6]\n x_perm = _Func(x, 0, -2) # equivalent to previous\n x_perm = _Func(x, 4, 2) # result shape [200, 30, 6, 4, 1]\n ```\n \"\"\"\n arg_3 = prefer_static_rank(arg_0)\n arg_4 = dtype_util.common_dtype([arg_1, arg_2],\n preferred_dtype=tf.int32)\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_4=arg_4)\n arg_2 = tf.convert_to_tensor(value=arg_2, arg_4=arg_4)\n\n # Handle negative indexing.\n arg_1 = pick_scalar_condition(arg_1 < 0, arg_3 + arg_1,\n arg_1)\n arg_2 = pick_scalar_condition(arg_2 < 0, arg_3 + arg_2, arg_2)\n\n # Construct the appropriate permutation of dimensions, depending\n # whether the source is before or after the destination.\n def move_left_permutation():\n return prefer_static_value(\n tf.concat([\n tf.range(0, arg_2, arg_4=arg_4), [arg_1],\n tf.range(arg_2, arg_1, arg_4=arg_4),\n tf.range(arg_1 + 1, arg_3, arg_4=arg_4)\n ],\n axis=0))\n\n def move_right_permutation():\n return prefer_static_value(\n tf.concat([\n tf.range(0, arg_1, arg_4=arg_4),\n tf.range(arg_1 + 1, arg_2 + 1, arg_4=arg_4), [arg_1],\n tf.range(arg_2 + 1, arg_3, arg_4=arg_4)\n ],\n axis=0))\n\n def x_permuted():\n return tf.transpose(\n a=arg_0,\n perm=prefer_static.cond(arg_1 < arg_2,\n move_right_permutation,\n move_left_permutation))\n\n # One final conditional to handle the special case where source\n # and destination indices are equal.\n return prefer_static.cond(tf.equal(arg_1, arg_2),\n lambda: arg_0, x_permuted)"} +{"_id": "doc_735", "title": "", "text": "def Func(\n arg_0, arg_1=\"Func\"):\n \"\"\"Assert x is a non-negative tensor, and optionally of integers.\"\"\"\n with tf.name_scope(arg_1):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"x\")\n arg_2 = [\n assert_util.assert_non_negative(\n arg_0, message=\"'{}' must be non-negative.\".format(arg_0)),\n ]\n if not dtype_util.is_integer(arg_0.dtype):\n arg_2 += [\n assert_integer_form(\n arg_0,\n message=\"'{}' cannot contain fractional components.\".format(arg_0)),\n ]\n return with_dependencies(arg_2, arg_0)"} +{"_id": "doc_736", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper returning True if dtype is known to be unsigned.\"\"\"\n return {\n tf.bool: True,\n tf.uint8: True,\n tf.uint16: True,\n }.get(arg_0.base_dtype, False)"} +{"_id": "doc_737", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper returning True if dtype is known to be signed.\"\"\"\n return {\n tf.float16: True,\n tf.float32: True,\n tf.float64: True,\n tf.int8: True,\n tf.int16: True,\n tf.int32: True,\n tf.int64: True,\n }.get(arg_0.base_dtype, False)"} +{"_id": "doc_738", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper returning the largest integer exactly representable by dtype.\"\"\"\n if not _is_known_dtype(arg_0):\n raise TypeError(\"Unrecognized dtype: {}\".format(arg_0.name))\n if arg_0.is_floating:\n return int(2**(np.finfo(arg_0.as_numpy_dtype).nmant + 1))\n if arg_0.is_integer:\n return np.iinfo(arg_0.as_numpy_dtype).max\n if arg_0.base_dtype == tf.bool:\n return int(1)\n # We actually can't land here but keep the case for completeness.\n raise TypeError(\"Unrecognized dtype: {}\".format(arg_0.name))"} +{"_id": "doc_739", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper returning the smallest integer exactly representable by dtype.\"\"\"\n if not _is_known_dtype(arg_0):\n raise TypeError(\"Unrecognized dtype: {}\".format(arg_0.name))\n if _is_known_unsigned_by_dtype(arg_0):\n return 0\n return -1 * _largest_integer_by_dtype(arg_0)"} +{"_id": "doc_740", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"Func\"):\n \"\"\"Circularly moves dims left or right.\n\n Effectively identical to:\n\n ```python\n numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))\n ```\n\n When `validate_args=False` additional graph-runtime checks are\n performed. These checks entail moving data from to GPU to CPU.\n\n Example:\n\n ```python\n x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].\n Func(x, -1).shape == [2, 3, 4, 1]\n Func(x, -2).shape == [3, 4, 1, 2]\n Func(x, 1).shape == [4, 1, 2, 3]\n Func(x, 2).shape == [3, 4, 1, 2]\n Func(x, 7).shape == Func(x, 3).shape # [2, 3, 4, 1]\n Func(x, -7).shape == Func(x, -3).shape # [4, 1, 2, 3]\n ```\n\n Args:\n x: `Tensor`.\n shift: `Tensor`. Number of dimensions to transpose left (shift<0) or\n transpose right (shift>0).\n name: Python `str`. The name to give this op.\n\n Returns:\n rotated_x: Input `Tensor` with dimensions circularly rotated by shift.\n\n Raises:\n TypeError: if shift is not integer type.\n \"\"\"\n with tf.name_scope(arg_2):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_2=\"x\")\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_2=\"shift\")\n # We do not assign back to preserve constant-ness.\n assert_util.assert_integer(arg_1)\n arg_3 = tf.get_static_value(arg_1)\n arg_4 = tensorshape_util.rank(arg_0.shape)\n if arg_4 is not None and arg_3 is not None:\n if arg_4 < 2:\n return arg_0\n arg_3 = np.sign(arg_3) * (\n abs(arg_3) % arg_4)\n if arg_3 == 0:\n return arg_0\n arg_5 = np.roll(np.arange(arg_4), arg_3)\n return tf.transpose(a=arg_0, arg_5=arg_5)\n else:\n # Consider if we always had a positive shift, and some specified\n # direction.\n # When shifting left we want the new array:\n # last(x, n-shift) + first(x, shift)\n # and if shifting right then we want:\n # last(x, shift) + first(x, n-shift)\n # Observe that last(a) == slice(a, n) and first(a) == slice(0, a).\n # Also, we can encode direction and shift as one: direction * shift.\n # Combining these facts, we have:\n # a = cond(shift<0, -shift, n-shift)\n # last(x, n-a) + first(x, a) == x[a:n] + x[0:a]\n # Finally, we transform shift by modulo length so it can be specified\n # independently from the array upon which it operates (like python).\n arg_4 = tf.rank(arg_0)\n arg_1 = tf.where(\n tf.less(arg_1, 0), -arg_1 % arg_4,\n arg_4 - arg_1 % arg_4)\n arg_6 = tf.range(0, arg_1)\n arg_7 = tf.range(arg_1, arg_4)\n arg_5 = tf.concat([arg_7, arg_6], 0)\n return tf.transpose(a=arg_0, arg_5=arg_5)"} +{"_id": "doc_741", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"Func\"):\n \"\"\"Picks possibly different length row `Tensor`s based on condition.\n\n Value `Tensor`s should have exactly one dimension.\n\n If `cond` is a python Boolean or `tf.constant` then either `true_vector` or\n `false_vector` is immediately returned. I.e., no graph nodes are created and\n no validation happens.\n\n Args:\n cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.\n true_vector: `Tensor` of one dimension. Returned when cond is `True`.\n false_vector: `Tensor` of one dimension. Returned when cond is `False`.\n name: Python `str`. The name to give this op.\n Example: ```python Func(tf.less(0, 5), tf.range(10, 12), tf.range(15,\n 18)) # [10, 11] Func(tf.less(5, 0), tf.range(10, 12), tf.range(15,\n 18)) # [15, 16, 17] ```\n\n Returns:\n true_or_false_vector: `Tensor`.\n\n Raises:\n TypeError: if `cond.dtype != tf.bool`\n TypeError: if `cond` is not a constant and\n `true_vector.dtype != false_vector.dtype`\n \"\"\"\n with tf.name_scope(arg_3):\n arg_0 = tf.convert_to_tensor(\n value=arg_0, dtype_hint=tf.bool, arg_3=\"cond\")\n if arg_0.dtype != tf.bool:\n raise TypeError(\n \"{}.dtype={} which is not {}\".format(arg_0, arg_0.dtype, tf.bool))\n\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_3=\"true_vector\")\n arg_2 = tf.convert_to_tensor(value=arg_2, arg_3=\"false_vector\")\n if arg_1.dtype != arg_2.dtype:\n raise TypeError(\n \"{}.dtype={} does not match {}.dtype={}\".format(\n arg_1, arg_1.dtype, arg_2, arg_2.dtype))\n\n arg_4 = tf.get_static_value(arg_0)\n if arg_4 is not None:\n return arg_1 if arg_4 else arg_2\n arg_5 = tf.shape(input=arg_1)[0]\n return tf.slice(\n tf.concat([arg_1, arg_2], 0), [tf.where(arg_0, 0, arg_5)],\n [tf.where(arg_0, arg_5, -1)])"} +{"_id": "doc_742", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate a new seed, from the given seed and salt.\"\"\"\n if arg_0 is None:\n return None\n arg_2 = (str(arg_0) + arg_1).encode(\"utf-8\")\n return int(hashlib.md5(arg_2).hexdigest()[:8], 16) & 0x7FFFFFFF"} +{"_id": "doc_743", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Creates a matrix with values set above, below, and on the diagonal.\n\n Example:\n\n ```python\n Func(below=[1., 2., 3.],\n diag=[4., 5., 6., 7.],\n above=[8., 9., 10.])\n # ==> array([[ 4., 8., 0., 0.],\n # [ 1., 5., 9., 0.],\n # [ 0., 2., 6., 10.],\n # [ 0., 0., 3., 7.]], dtype=float32)\n ```\n\n Warning: This Op is intended for convenience, not efficiency.\n\n Args:\n below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below\n diagonal part. `None` is logically equivalent to `below = 0`.\n diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal\n part. `None` is logically equivalent to `diag = 0`.\n above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above\n diagonal part. `None` is logically equivalent to `above = 0`.\n name: Python `str`. The name to give this op.\n\n Returns:\n Func: `Tensor` with values set above, below and on the diagonal.\n\n Raises:\n ValueError: if all inputs are `None`.\n \"\"\"\n\n def _pad(arg_4):\n \"\"\"Prepends and appends a zero to every vector in a batch of vectors.\"\"\"\n arg_5 = tf.concat([tf.shape(input=arg_4)[:-1], [1]], axis=0)\n arg_6 = tf.zeros(arg_5, dtype=arg_4.dtype)\n return tf.concat([arg_6, arg_4, arg_6], axis=-1)\n\n def _add(*arg_4):\n \"\"\"Adds list of Tensors, ignoring `None`.\"\"\"\n arg_7 = None\n for arg_8 in arg_4:\n if arg_8 is None:\n continue\n elif arg_7 is None:\n arg_7 = arg_8\n else:\n arg_7 += arg_8\n if arg_7 is None:\n raise ValueError(\"Must specify at least one of `below`, `diag`, `above`.\")\n return arg_7\n\n with tf.name_scope(arg_3 or \"Func\"):\n if arg_0 is not None:\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_3=\"below\")\n arg_0 = tf.linalg.diag(_pad(arg_0))[..., :-1, 1:]\n if arg_1 is not None:\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_3=\"diag\")\n arg_1 = tf.linalg.diag(arg_1)\n if arg_2 is not None:\n arg_2 = tf.convert_to_tensor(value=arg_2, arg_3=\"above\")\n arg_2 = tf.linalg.diag(_pad(arg_2))[..., 1:, :-1]\n # TODO(jvdillon): Consider using scatter_nd instead of creating three full\n # matrices.\n return _add(arg_0, arg_1, arg_2)"} +{"_id": "doc_744", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None):\n \"\"\"Validates quadrature grid, probs or computes them as necessary.\n\n Args:\n quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s\n representing the sample points and the corresponding (possibly\n normalized) weight. When `None`, defaults to:\n `np.polynomial.hermite.hermgauss(deg=8)`.\n dtype: The expected `dtype` of `grid` and `probs`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s\n representing the sample points and the corresponding (possibly\n normalized) weight.\n\n Raises:\n ValueError: if `quadrature_grid_and_probs is not None` and\n `len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`\n \"\"\"\n with tf.name_scope(arg_3 or \"Func\"):\n if arg_0 is None:\n arg_4, arg_5 = np.polynomial.hermite.hermgauss(deg=8)\n arg_4 = arg_4.astype(dtype_util.as_numpy_dtype(arg_1))\n arg_5 = arg_5.astype(dtype_util.as_numpy_dtype(arg_1))\n arg_5 /= np.linalg.norm(arg_5, ord=1, keepdims=True)\n arg_4 = tf.convert_to_tensor(value=arg_4, arg_3=\"grid\", arg_1=arg_1)\n arg_5 = tf.convert_to_tensor(value=arg_5, arg_3=\"probs\", arg_1=arg_1)\n return arg_4, arg_5\n\n arg_4, arg_5 = tuple(arg_0)\n arg_4 = tf.convert_to_tensor(value=arg_4, arg_3=\"grid\", arg_1=arg_1)\n arg_5 = tf.convert_to_tensor(\n value=arg_5, arg_3=\"unnormalized_probs\", arg_1=arg_1)\n arg_5 /= tf.norm(tensor=arg_5, ord=1, axis=-1, keepdims=True, arg_3=\"probs\")\n\n def _static_event_size(arg_6):\n \"\"\"Returns the static size of a specific dimension or `None`.\"\"\"\n return tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(arg_6.shape, 1)[-1])\n\n arg_7, arg_8 = _static_event_size(arg_5), _static_event_size(arg_4)\n if arg_7 is not None and arg_8 is not None:\n if arg_7 != arg_8:\n raise ValueError(\"`quadrature_grid_and_probs` must be a `tuple` of \"\n \"same-length zero-th-dimension `Tensor`s \"\n \"(saw lengths {}, {})\".format(arg_7, arg_8))\n elif arg_2:\n arg_9 = [\n assert_util.assert_equal(\n dimension_size(arg_5, axis=-1),\n dimension_size(arg_4, axis=-1),\n message=(\"`quadrature_grid_and_probs` must be a `tuple` of \"\n \"same-length zero-th-dimension `Tensor`s\")),\n ]\n with tf.control_dependencies(arg_9):\n arg_4 = tf.identity(arg_4)\n arg_5 = tf.identity(arg_5)\n return arg_4, arg_5"} +{"_id": "doc_745", "title": "", "text": "def Func():\n \"\"\"Returns parent frame arguments.\n\n When called inside a function, returns a dictionary with the caller's function\n arguments. These are positional arguments and keyword arguments (**kwargs),\n while variable arguments (*varargs) are excluded.\n\n When called at global scope, this will return an empty dictionary, since there\n are no arguments.\n\n WARNING: If caller function argument names are overloaded before invoking\n this method, then values will reflect the overloaded value. For this reason,\n we recommend calling `Func` at the beginning of the\n function.\n \"\"\"\n # All arguments and the names used for *varargs, and **kwargs\n arg_0, arg_1, arg_2, arg_3 = (\n tf_inspect._inspect.getargvalues( # pylint: disable=protected-access\n # Get the first frame of the caller of this method.\n tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access\n\n # Remove the *varargs, and flatten the **kwargs. Both are\n # nested lists.\n arg_3.pop(arg_1, {})\n arg_4 = arg_3.pop(arg_2, {})\n\n arg_5 = {}\n # Copy over arguments and their values. In general, local_vars\n # may contain more than just the arguments, since this method\n # can be called anywhere in a function.\n for arg_6 in arg_0:\n arg_5[arg_6] = arg_3.pop(arg_6)\n arg_5.update(arg_4)\n\n return arg_5"} +{"_id": "doc_746", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=False):\n \"\"\"Transform a 0-D or 1-D `Tensor` to be 1-D.\n\n For user convenience, many parts of the TensorFlow Probability API accept\n inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed\n to the API as either `5` or `[5]`. This function can be used to transform\n such an argument to always be 1-D.\n\n NOTE: Python or NumPy values will be converted to `Tensor`s with standard type\n inference/conversion. In particular, an empty list or tuple will become an\n empty `Tensor` with dtype `float32`. Callers should convert values to\n `Tensor`s before calling this function if different behavior is desired\n (e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).\n\n Args:\n x: A 0-D or 1-D `Tensor`.\n tensor_name: Python `str` name for `Tensor`s created by this function.\n op_name: Python `str` name for `Op`s created by this function.\n validate_args: Python `bool, default `False`. When `True`, arguments may be\n checked for validity at execution time, possibly degrading runtime\n performance. When `False`, invalid inputs may silently render incorrect\n outputs.\n Returns:\n vector: a 1-D `Tensor`.\n \"\"\"\n with tf.name_scope(arg_2 or \"Func\"):\n arg_0 = tf.convert_to_tensor(value=arg_0, name=\"x\")\n arg_4 = tensorshape_util.rank(arg_0.shape)\n\n if arg_4 is None:\n # Maybe expand ndims from 0 to 1.\n if arg_3:\n arg_0 = with_dependencies([\n assert_util.assert_rank_at_most(\n arg_0, 1, message=\"Input is neither scalar nor vector.\")\n ], arg_0)\n arg_4 = tf.rank(arg_0)\n arg_5 = pick_vector(\n tf.equal(arg_4, 0), np.array([1], dtype=np.int32), tf.shape(input=arg_0))\n return tf.reshape(arg_0, arg_5)\n\n elif arg_4 == 0:\n # Definitely expand ndims from 0 to 1.\n arg_6 = tf.get_static_value(arg_0)\n if arg_6 is not None:\n return tf.convert_to_tensor(\n value=dtype_util.as_numpy_dtype(arg_0.dtype)([arg_6]),\n name=arg_1)\n\n else:\n return tf.reshape(arg_0, [1])\n\n elif arg_4 != 1:\n raise ValueError(\"Input is neither scalar nor vector.\")\n\n # ndims == 1\n return arg_0"} +{"_id": "doc_747", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None):\n \"\"\"Checks that `rightmost_transposed_ndims` is valid.\"\"\"\n with tf.name_scope(arg_2 or 'maybe_validate_rightmost_transposed_ndims'):\n arg_3 = []\n if not dtype_util.is_integer(arg_0.dtype):\n raise TypeError('`rightmost_transposed_ndims` must be integer type.')\n\n if tensorshape_util.rank(arg_0.shape) is not None:\n if tensorshape_util.rank(arg_0.shape) != 0:\n raise ValueError('`rightmost_transposed_ndims` must be a scalar, '\n 'saw rank: {}.'.format(\n tensorshape_util.rank(\n arg_0.shape)))\n elif arg_1:\n arg_3 += [assert_util.assert_rank(arg_0, 0)]\n\n arg_4 = tf.get_static_value(\n arg_0)\n arg_5 = '`rightmost_transposed_ndims` must be non-negative.'\n if arg_4 is not None:\n if arg_4 < 0:\n raise ValueError(arg_5[:-1] + ', saw: {}.'.format(\n arg_4))\n elif arg_1:\n arg_3 += [\n assert_util.assert_non_negative(\n arg_0, message=arg_5)\n ]\n\n return arg_3"} +{"_id": "doc_748", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Checks that `perm` is valid.\"\"\"\n with tf.name_scope(arg_2 or 'maybe_validate_perm'):\n arg_3 = []\n if not dtype_util.is_integer(arg_0.dtype):\n raise TypeError('`perm` must be integer type')\n\n arg_4 = '`perm` must be a vector.'\n if tensorshape_util.rank(arg_0.shape) is not None:\n if tensorshape_util.rank(arg_0.shape) != 1:\n raise ValueError(\n arg_4[:-1] +\n ', saw rank: {}.'.format(tensorshape_util.rank(arg_0.shape)))\n elif arg_1:\n arg_3 += [assert_util.assert_rank(arg_0, 1, message=arg_4)]\n\n arg_5 = tf.get_static_value(arg_0)\n arg_4 = '`perm` must be a valid permutation vector.'\n if arg_5 is not None:\n if not np.all(np.arange(np.size(arg_5)) == np.sort(arg_5)):\n raise ValueError(arg_4[:-1] + ', saw: {}.'.format(arg_5))\n elif arg_1:\n arg_3 += [\n assert_util.assert_equal(\n tf.sort(arg_0), tf.range(tf.size(input=arg_0)), message=arg_4)\n ]\n\n return arg_3"} +{"_id": "doc_749", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the concatenation of the dimension in `x` and `other`.\n\n *Note:* If either `x` or `other` is completely unknown, concatenation will\n discard information about the other shape. In future, we might support\n concatenation that preserves this information for use with slicing.\n\n For more details, see `help(tf.TensorShape.Func)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n new_shape: an object like `x` whose elements are the concatenation of the\n dimensions in `x` and `other`.\n \"\"\"\n return type(arg_0)(tf.TensorShape(arg_0).Func(arg_1))"} +{"_id": "doc_750", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a list of dimension sizes, or `None` if `rank` is unknown.\n\n For more details, see `help(tf.TensorShape.Func)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n shape_as_list: list of sizes or `None` values representing each\n dimensions size if known. A size is `tf.Dimension` if input is a\n `tf.TensorShape` and an `int` otherwise.\n \"\"\"\n if isinstance(arg_0, tf.TensorShape):\n return arg_0.Func\n arg_1 = tf.TensorShape(arg_0).Func\n return None if arg_1 is None else list(map(tf.compat.dimension_value, arg_1))"} +{"_id": "doc_751", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a shape combining the information in `x` and `other`.\n\n The dimensions in `x` and `other` are merged elementwise, according to the\n rules defined for `tf.Dimension.Func()`.\n\n For more details, see `help(tf.TensorShape.Func)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n merged_shape: shape having `type(x)` containing the combined information of\n `x` and `other`.\n\n Raises:\n ValueError: If `x` and `other` are not compatible.\n \"\"\"\n return type(arg_0)(tf.TensorShape(arg_0).Func(arg_1))"} +{"_id": "doc_752", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=redefined-outer-name\n \"\"\"Returns a shape based on `x` with at least the given `rank`.\n\n For more details, see `help(tf.TensorShape.Func)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n rank: An `int` representing the minimum rank of `x` or else an assertion is\n raised.\n\n Returns:\n shape: a shape having `type(x)` but guaranteed to have at least the given\n rank (or else an assertion was raised).\n\n Raises:\n ValueError: If `x` does not represent a shape with at least the given\n `rank`.\n \"\"\"\n return type(arg_0)(tf.TensorShape(arg_0).Func(arg_1))"} +{"_id": "doc_753", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None):\n \"\"\"Check that source and target shape match, statically if possible.\"\"\"\n\n arg_3 = tf.TensorShape(arg_3)\n if tensorshape_util.is_fully_defined(\n arg_1) and tensorshape_util.is_fully_defined(arg_3):\n if arg_1 != arg_3:\n raise ValueError(\"{}: required shape {} but found {}\".\n format(arg_0, arg_3, arg_1))\n return None\n else:\n if arg_4 is None:\n if tensorshape_util.is_fully_defined(arg_3):\n arg_4 = tensorshape_util.as_list(arg_3)\n else:\n raise ValueError(\"{}: cannot infer target shape: no dynamic shape \"\n \"specified and static shape {} is not fully defined\".\n format(arg_0, arg_3))\n return assert_util.assert_equal(\n arg_2,\n arg_4,\n message=(\"{}: required shape {}\".format(arg_0, arg_3)))"} +{"_id": "doc_754", "title": "", "text": "def Func(arg_0):\n \"\"\"Build a callable that perform one step for backward smoothing.\n\n Args:\n get_transition_matrix_for_timestep: callable taking a timestep\n as an integer `Tensor` argument, and returning a `LinearOperator`\n of shape `[latent_size, latent_size]`.\n\n Returns:\n backward_pass_step: a callable that updates a BackwardPassState\n from timestep `t` to `t-1`.\n \"\"\"\n\n def backward_pass_step(arg_1,\n arg_2):\n \"\"\"Run a single step of backward smoothing.\"\"\"\n\n (arg_3, arg_4,\n arg_5, arg_6) = arg_2\n arg_7 = arg_0(arg_1.timestep)\n\n arg_8 = arg_1.backward_mean\n arg_9 = arg_1.backward_cov\n\n arg_10, arg_11 = backward_smoothing_update(\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_8,\n arg_9,\n arg_7)\n\n return BackwardPassState(backward_mean=arg_10,\n backward_cov=arg_11,\n timestep=arg_1.timestep-1)\n\n return backward_pass_step"} +{"_id": "doc_755", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Build a callable that performs one step of Kalman filtering.\n\n Args:\n get_transition_matrix_for_timestep: callable taking a timestep\n as an integer `Tensor` argument, and returning a `LinearOperator`\n of shape `[latent_size, latent_size]`.\n get_transition_noise_for_timestep: callable taking a timestep as\n an integer `Tensor` argument, and returning a\n `MultivariateNormalLinearOperator` of event shape\n `[latent_size]`.\n get_observation_matrix_for_timestep: callable taking a timestep\n as an integer `Tensor` argument, and returning a `LinearOperator`\n of shape `[observation_size, observation_size]`.\n get_observation_noise_for_timestep: callable taking a timestep as\n an integer `Tensor` argument, and returning a\n `MultivariateNormalLinearOperator` of event shape\n `[observation_size]`.\n\n Returns:\n kalman_filter_step: a callable that updates a KalmanFilterState\n from timestep `t-1` to `t`.\n \"\"\"\n\n def kalman_filter_step(arg_4, arg_5):\n \"\"\"Run a single step of Kalman filtering.\n\n Args:\n state: A `KalmanFilterState` object representing the previous\n filter state at time `t-1`.\n elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.\n `x_t` is a `Tensor` with rightmost shape dimensions\n `[observation_size, 1]` representing the vector observed at time `t`,\n and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`\n representing the observation mask at time `t`. Both `x_t` and `mask_t`\n may have batch dimensions, which must be compatible with the batch\n dimensions of `state.predicted_mean` and `state.predictived_cov`\n respectively. If `mask_t` is not provided, it is assumed to be `None`.\n\n Returns:\n new_state: A `KalmanFilterState` object representing the new\n filter state at time `t`.\n \"\"\"\n\n if isinstance(arg_5, tuple):\n arg_6, arg_7 = arg_5\n else:\n arg_6 = arg_5\n arg_7 = None\n\n arg_8 = arg_2(arg_4.timestep)\n arg_9 = arg_3(arg_4.timestep)\n if arg_7 is not None:\n # Before running the update, fill in masked observations using the prior\n # expectation. The precise filled value shouldn't matter since updates\n # from masked elements will not be selected below, but we need to ensure\n # that any results we incidently compute on masked values are at least\n # finite (not inf or NaN) so that they don't screw up gradient propagation\n # through `tf.where`, as described in\n # https://github.com/tensorflow/tensorflow/issues/2540.\n # We fill with the prior expectation because any fixed value such as zero\n # might be arbitrarily unlikely under the prior, leading to overflow in\n # the updates, but the prior expectation should always be a\n # 'reasonable' observation.\n arg_10 = _propagate_mean(arg_4.predicted_mean,\n arg_8,\n arg_9) * tf.ones_like(arg_6)\n arg_6 = tf.where(\n tf.broadcast_to(arg_7, tf.shape(input=arg_10)), arg_10,\n tf.broadcast_to(arg_6, tf.shape(input=arg_10)))\n\n # Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the\n # previous step, incorporate the observation x_t, producing the\n # filtered mean u_t and covariance P_t.\n (arg_11,\n arg_12,\n arg_13) = linear_gaussian_update(\n arg_4.predicted_mean, arg_4.predicted_cov,\n arg_8, arg_9,\n arg_6)\n\n # Compute the marginal likelihood p(x_{t} | x_{:t-1}) for this\n # observation.\n arg_14 = arg_13.log_prob(arg_6[..., 0])\n\n if arg_7 is not None:\n arg_11 = tf.where(\n tf.broadcast_to(arg_7, tf.shape(input=arg_11)),\n arg_4.predicted_mean, arg_11)\n arg_12 = tf.where(\n tf.broadcast_to(arg_7, tf.shape(input=arg_12)),\n arg_4.predicted_cov, arg_12)\n arg_14 = tf.where(\n tf.broadcast_to(arg_7[..., 0, 0],\n tf.shape(input=arg_14)),\n tf.zeros_like(arg_14),\n arg_14)\n\n # Run the filtered posterior through the transition\n # model to predict the next time step:\n # u_{t|t-1} = F_t u_{t-1} + b_t\n # P_{t|t-1} = F_t P_{t-1} F_t' + Q_t\n arg_15, arg_16 = kalman_transition(\n arg_11,\n arg_12,\n arg_0(arg_4.timestep),\n arg_1(arg_4.timestep))\n\n return KalmanFilterState(\n arg_11, arg_12,\n arg_15, arg_16,\n arg_13.mean()[..., tf.newaxis],\n arg_13.covariance(),\n arg_14,\n arg_4.timestep+1)\n\n return kalman_filter_step"} +{"_id": "doc_756", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Conjugate update for a linear Gaussian model.\n\n Given a normal prior on a latent variable `z`,\n `p(z) = N(prior_mean, prior_cov) = N(u, P)`,\n for which we observe a linear Gaussian transformation `x`,\n `p(x|z) = N(H * z + c, R)`,\n the posterior is also normal:\n `p(z|x) = N(u*, P*)`.\n\n We can write this update as\n x_expected = H * u + c # pushforward prior mean\n S = R + H * P * H' # pushforward prior cov\n K = P * H' * S^{-1} # optimal Kalman gain\n u* = u + K * (x_observed - x_expected) # posterior mean\n P* = (I - K * H) * P (I - K * H)' + K * R * K' # posterior cov\n (see, e.g., https://en.wikipedia.org/wiki/Kalman_filter#Update)\n\n Args:\n prior_mean: `Tensor` with event shape `[latent_size, 1]` and\n potential batch shape `B = [b1, ..., b_n]`.\n prior_cov: `Tensor` with event shape `[latent_size, latent_size]`\n and batch shape `B` (matching `prior_mean`).\n observation_matrix: `LinearOperator` with shape\n `[observation_size, latent_size]` and batch shape broadcastable\n to `B`.\n observation_noise: potentially-batched\n `MultivariateNormalLinearOperator` instance with event shape\n `[observation_size]` and batch shape broadcastable to `B`.\n x_observed: potentially batched `Tensor` with event shape\n `[observation_size, 1]` and batch shape `B`.\n\n Returns:\n posterior_mean: `Tensor` with event shape `[latent_size, 1]` and\n batch shape `B`.\n posterior_cov: `Tensor` with event shape `[latent_size,\n latent_size]` and batch shape `B`.\n predictive_dist: the prior predictive distribution `p(x|z)`,\n as a `Distribution` instance with event\n shape `[observation_size]` and batch shape `B`. This will\n typically be `tfd.MultivariateNormalTriL`, but when\n `observation_size=1` we return a `tfd.Independent(tfd.Normal)`\n instance as an optimization.\n \"\"\"\n\n # If observations are scalar, we can avoid some matrix ops.\n arg_5 = (\n tf.compat.dimension_value(arg_2.shape[-2]) == 1)\n\n # Push the predicted mean for the latent state through the\n # observation model\n arg_6 = _propagate_mean(arg_0,\n arg_2,\n arg_3)\n\n # Push the predictive covariance of the latent state through the\n # observation model:\n # S = R + H * P * H'.\n # We use a temporary variable for H * P,\n # reused below to compute Kalman gain.\n arg_7 = arg_2.matmul(arg_1)\n arg_8 = (\n arg_2.matmul(arg_7, adjoint_arg=True)\n + arg_3.covariance())\n\n # Compute optimal Kalman gain:\n # K = P * H' * S^{-1}\n # Since both S and P are cov matrices, thus symmetric,\n # we can take the transpose and reuse our previous\n # computation:\n # = (S^{-1} * H * P)'\n # = (S^{-1} * tmp_obs_cov) '\n # = (S \\ tmp_obs_cov)'\n if arg_5:\n arg_9 = arg_7/arg_8\n else:\n arg_10 = tf.linalg.cholesky(arg_8)\n arg_9 = tf.linalg.cholesky_solve(arg_10,\n arg_7)\n\n # Compute the posterior mean, incorporating the observation.\n # u* = u + K (x_observed - x_expected)\n arg_11 = (arg_0 +\n tf.linalg.matmul(arg_9, arg_4 - arg_6,\n adjoint_a=True))\n\n # For the posterior covariance, we could use the simple update\n # P* = P - K * H * P\n # but this is prone to numerical issues because it subtracts a\n # value from a PSD matrix. We choose instead to use the more\n # expensive Jordan form update\n # P* = (I - K H) * P * (I - K H)' + K R K'\n # which always produces a PSD result. This uses\n # tmp_term = (I - K * H)'\n # as an intermediate quantity.\n arg_12 = -arg_2.matmul(arg_9, adjoint=True) # -K * H\n arg_12 = tf.linalg.set_diag(arg_12, tf.linalg.diag_part(arg_12) + 1)\n arg_13 = (\n tf.linalg.matmul(\n arg_12, tf.linalg.matmul(arg_1, arg_12), adjoint_a=True)\n + tf.linalg.matmul(arg_9,\n tf.linalg.matmul(\n arg_3.covariance(), arg_9),\n adjoint_a=True))\n\n if arg_5:\n # A plain Normal would have event shape `[]`; wrapping with Independent\n # ensures `event_shape=[1]` as required.\n arg_14 = independent.Independent(\n normal.Normal(loc=arg_6[..., 0],\n scale=tf.sqrt(arg_8[..., 0])),\n reinterpreted_batch_ndims=1)\n\n # Minor hack to define the covariance, so that `predictive_dist` can pass as\n # an MVNTriL-like object.\n arg_14.covariance = lambda: arg_8\n else:\n arg_14 = mvn_tril.MultivariateNormalTriL(\n loc=arg_6[..., 0],\n scale_tril=arg_10)\n\n return arg_11, arg_13, arg_14"} +{"_id": "doc_757", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3):\n \"\"\"Propagate a filtered distribution through a transition model.\"\"\"\n\n arg_4 = _propagate_mean(arg_0,\n arg_2,\n arg_3)\n arg_5 = _propagate_cov(arg_1,\n arg_2,\n arg_3)\n return arg_4, arg_5"} +{"_id": "doc_758", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Build a callable that performs one step of Kalman mean recursion.\n\n Args:\n get_transition_matrix_for_timestep: callable taking a timestep\n as an integer `Tensor` argument, and returning a `LinearOperator`\n of shape `[latent_size, latent_size]`.\n get_transition_noise_for_timestep: callable taking a timestep as\n an integer `Tensor` argument, and returning a\n `MultivariateNormalLinearOperator` of event shape\n `[latent_size]`.\n get_observation_matrix_for_timestep: callable taking a timestep\n as an integer `Tensor` argument, and returning a `LinearOperator`\n of shape `[observation_size, observation_size]`.\n get_observation_noise_for_timestep: callable taking a timestep as\n an integer `Tensor` argument, and returning a\n `MultivariateNormalLinearOperator` of event shape\n `[observation_size]`.\n\n Returns:\n kalman_mean_step: a callable that computes latent state and\n observation means at time `t`, given latent mean at time `t-1`.\n \"\"\"\n\n def mean_step(arg_4, arg_5):\n \"\"\"Single step of prior mean recursion.\"\"\"\n arg_6, arg_7 = arg_4\n\n arg_8 = _propagate_mean(arg_6,\n arg_0(arg_5 - 1),\n arg_1(arg_5 - 1))\n arg_9 = _propagate_mean(arg_8,\n arg_2(arg_5),\n arg_3(arg_5))\n return (arg_8, arg_9)\n\n return mean_step"} +{"_id": "doc_759", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Propagate a mean through linear Gaussian transformation.\"\"\"\n return arg_1.matmul(arg_0) + arg_2.mean()[..., tf.newaxis]"} +{"_id": "doc_760", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Propagate covariance through linear Gaussian transformation.\"\"\"\n # For linop A and input cov P, returns `A P A' + dist.cov()`\n return arg_1.matmul(arg_1.matmul(arg_0), adjoint_arg=True) + arg_2.covariance()"} +{"_id": "doc_761", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Run the backward pass in Kalman smoother.\n\n The backward smoothing is using Rauch, Tung and Striebel smoother as\n as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:\n A Probabilistic Perspective, The MIT Press. The inputs are returned by\n `forward_filter` function.\n\n Args:\n filtered_means: Means of the per-timestep filtered marginal\n distributions p(z_t | x_{:t}), as a Tensor of shape\n `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.\n filtered_covs: Covariances of the per-timestep filtered marginal\n distributions p(z_t | x_{:t}), as a Tensor of shape\n `batch_shape + [num_timesteps, latent_size, latent_size]`.\n predicted_means: Means of the per-timestep predictive\n distributions over latent states, p(z_{t+1} | x_{:t}), as a\n Tensor of shape `sample_shape(x) + batch_shape +\n [num_timesteps, latent_size]`.\n predicted_covs: Covariances of the per-timestep predictive\n distributions over latent states, p(z_{t+1} | x_{:t}), as a\n Tensor of shape `batch_shape + [num_timesteps, latent_size,\n latent_size]`.\n\n Returns:\n posterior_means: Means of the smoothed marginal distributions\n p(z_t | x_{1:T}), as a Tensor of shape\n `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,\n which is of the same shape as filtered_means.\n posterior_covs: Covariances of the smoothed marginal distributions\n p(z_t | x_{1:T}), as a Tensor of shape\n `batch_shape + [num_timesteps, latent_size, latent_size]`.\n which is of the same shape as filtered_covs.\n \"\"\"\n with tf.name_scope(\"backward_pass\"):\n arg_1 = tf.convert_to_tensor(\n value=arg_1, name=\"filtered_means\")\n arg_2 = tf.convert_to_tensor(\n value=arg_2, name=\"filtered_covs\")\n arg_3 = tf.convert_to_tensor(\n value=arg_3, name=\"predicted_means\")\n arg_4 = tf.convert_to_tensor(\n value=arg_4, name=\"predicted_covs\")\n\n # To scan over time dimension, we need to move 'num_timesteps' from the\n # event shape to the initial dimension of the tensor.\n arg_1 = distribution_util.move_dimension(arg_1, -2, 0)\n arg_2 = distribution_util.move_dimension(arg_2, -3, 0)\n arg_3 = distribution_util.move_dimension(arg_3, -2, 0)\n arg_4 = distribution_util.move_dimension(arg_4, -3, 0)\n\n # The means are assumed to be vectors. Adding a dummy index to\n # ensure the `matmul` op working smoothly.\n arg_1 = arg_1[..., tf.newaxis]\n arg_3 = arg_3[..., tf.newaxis]\n\n arg_5 = arg_3[-1, ...]\n arg_6 = arg_4[-1, ...]\n\n arg_7 = tf.shape(input=arg_1)[0]\n arg_8 = BackwardPassState(\n backward_mean=arg_5,\n backward_cov=arg_6,\n timestep=arg_0.initial_step + arg_7 - 1)\n\n arg_9 = build_backward_pass_step(\n arg_0.get_transition_matrix_for_timestep)\n\n # For backward pass, it scans the `elems` from last to first.\n arg_10 = tf.scan(arg_9,\n elems=(arg_1,\n arg_2,\n arg_3,\n arg_4),\n initializer=arg_8,\n reverse=True)\n\n # Move the time dimension back into the event shape.\n arg_11 = distribution_util.move_dimension(\n arg_10.backward_mean[..., 0], 0, -2)\n arg_12 = distribution_util.move_dimension(\n arg_10.backward_cov, 0, -3)\n\n return (arg_11, arg_12)"} +{"_id": "doc_762", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Draw a joint sample from the prior over latents and observations.\"\"\"\n\n with tf.name_scope(\"sample_n_joint\"):\n arg_3 = seed_stream.SeedStream(\n arg_2, salt=\"LinearGaussianStateSpaceModel_sample_n_joint\")\n\n arg_4 = distribution_util.prefer_static_value(\n tf.concat([[arg_1], arg_0.batch_shape_tensor()],\n axis=0))\n\n # Sample the initial timestep from the prior. Since we want\n # this sample to have full batch shape (not just the batch shape\n # of the self.initial_state_prior object which might in general be\n # smaller), we augment the sample shape to include whatever\n # extra batch dimensions are required.\n with tf.control_dependencies(arg_0.runtime_assertions):\n arg_5 = arg_0.initial_state_prior.sample(\n sample_shape=_augment_sample_shape(\n arg_0.initial_state_prior,\n arg_4,\n arg_0.validate_args),\n arg_2=arg_3())\n\n # Add a dummy dimension so that matmul() does matrix-vector\n # multiplication.\n arg_5 = arg_5[..., tf.newaxis]\n\n arg_6 = (\n arg_0.get_observation_matrix_for_timestep(arg_0.initial_step))\n arg_7 = (\n arg_0.get_observation_noise_for_timestep(arg_0.initial_step))\n\n arg_8 = arg_6.matmul(\n arg_5)\n arg_9 = (arg_8 +\n arg_7.sample(\n sample_shape=_augment_sample_shape(\n arg_7,\n arg_4,\n arg_0.validate_args),\n arg_2=arg_3())[..., tf.newaxis])\n\n arg_10 = build_kalman_sample_step(\n arg_0.get_transition_matrix_for_timestep,\n arg_0.get_transition_noise_for_timestep,\n arg_0.get_observation_matrix_for_timestep,\n arg_0.get_observation_noise_for_timestep,\n full_sample_and_batch_shape=arg_4,\n arg_3=arg_3,\n validate_args=arg_0.validate_args)\n\n # Scan over all timesteps to sample latents and observations.\n (arg_11, arg_12) = tf.scan(\n arg_10,\n elems=tf.range(arg_0.initial_step+1, arg_0.final_step),\n initializer=(arg_5, arg_9))\n\n # Combine the initial sampled timestep with the remaining timesteps.\n arg_11 = tf.concat([arg_5[tf.newaxis, ...],\n arg_11], axis=0)\n arg_12 = tf.concat([arg_9[tf.newaxis, ...],\n arg_12], axis=0)\n\n # Put dimensions back in order. The samples we've computed are\n # ordered by timestep, with shape `[num_timesteps, num_samples,\n # batch_shape, size, 1]` where `size` represents `latent_size`\n # or `observation_size` respectively. But timesteps are really\n # part of each probabilistic event, so we need to return a Tensor\n # of shape `[num_samples, batch_shape, num_timesteps, size]`.\n arg_11 = tf.squeeze(arg_11, -1)\n arg_11 = distribution_util.move_dimension(arg_11, 0, -2)\n arg_12 = tf.squeeze(arg_12, -1)\n arg_12 = distribution_util.move_dimension(arg_12, 0, -2)\n\n return arg_11, arg_12"} +{"_id": "doc_763", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Run a Kalman smoother to return posterior mean and cov.\n\n Note that the returned values `smoothed_means` depend on the observed\n time series `x`, while the `smoothed_covs` are independent\n of the observed series; i.e., they depend only on the model itself.\n This means that the mean values have shape `concat([sample_shape(x),\n batch_shape, [num_timesteps, {latent/observation}_size]])`,\n while the covariances have shape `concat[(batch_shape, [num_timesteps,\n {latent/observation}_size, {latent/observation}_size]])`, which\n does not depend on the sample shape.\n\n This function only performs smoothing. If the user wants the\n intermediate values, which are returned by filtering pass `forward_filter`,\n one could get it by:\n ```\n (log_likelihoods,\n filtered_means, filtered_covs,\n predicted_means, predicted_covs,\n observation_means, observation_covs) = model.forward_filter(x)\n smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)\n ```\n where `x` is an observation sequence.\n\n Args:\n x: a float-type `Tensor` with rightmost dimensions\n `[num_timesteps, observation_size]` matching\n `self.event_shape`. Additional dimensions must match or be\n broadcastable to `self.batch_shape`; any further dimensions\n are interpreted as a sample shape.\n mask: optional bool-type `Tensor` with rightmost dimension\n `[num_timesteps]`; `True` values specify that the value of `x`\n at that timestep is masked, i.e., not conditioned on. Additional\n dimensions must match or be broadcastable to `self.batch_shape`; any\n further dimensions must match or be broadcastable to the sample\n shape of `x`.\n Default value: `None`.\n\n Returns:\n smoothed_means: Means of the per-timestep smoothed\n distributions over latent states, p(x_{t} | x_{:T}), as a\n Tensor of shape `sample_shape(x) + batch_shape +\n [num_timesteps, observation_size]`.\n smoothed_covs: Covariances of the per-timestep smoothed\n distributions over latent states, p(x_{t} | x_{:T}), as a\n Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,\n observation_size, observation_size]`. Note that the covariances depend\n only on the model and the mask, not on the data, so this may have fewer\n dimensions than `filtered_means`.\n \"\"\"\n\n with tf.name_scope(\"smooth\"):\n arg_1 = tf.convert_to_tensor(value=arg_1, name=\"x\")\n (arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_3, arg_3) = arg_0.forward_filter(\n arg_1, arg_2=arg_2)\n\n (arg_8, arg_9) = arg_0.backward_smoothing_pass(\n arg_4, arg_5,\n arg_6, arg_7)\n\n return (arg_8, arg_9)"} +{"_id": "doc_764", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute prior means for all variables via dynamic programming.\n\n Returns:\n latent_means: Prior means of latent states `z_t`, as a `Tensor`\n of shape `batch_shape + [num_timesteps, latent_size]`\n observation_means: Prior covariance matrices of observations\n `x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,\n observation_size]`\n \"\"\"\n\n with tf.name_scope(\"mean_joint\"):\n\n # The initial timestep is a special case, since we sample the\n # latent state from the prior rather than the transition model.\n\n with tf.control_dependencies(arg_0.runtime_assertions):\n # Broadcast to ensure we represent the full batch shape.\n arg_1 = _broadcast_to_shape(\n arg_0.initial_state_prior.mean()[..., tf.newaxis],\n tf.concat([arg_0.batch_shape_tensor(),\n [arg_0.latent_size, 1]], axis=0))\n\n arg_2 = _propagate_mean(\n arg_1,\n arg_0.get_observation_matrix_for_timestep(arg_0.initial_step),\n arg_0.get_observation_noise_for_timestep(arg_0.initial_step))\n\n arg_3 = build_kalman_mean_step(\n arg_0.get_transition_matrix_for_timestep,\n arg_0.get_transition_noise_for_timestep,\n arg_0.get_observation_matrix_for_timestep,\n arg_0.get_observation_noise_for_timestep)\n\n # Scan over all timesteps following the initial step.\n (arg_4, arg_5) = tf.scan(\n arg_3,\n elems=tf.range(arg_0.initial_step+1, arg_0.final_step),\n initializer=(arg_1, arg_2))\n\n # Squish the initial step back on top of the other (scanned) timesteps\n arg_4 = tf.concat([arg_1[tf.newaxis, ...],\n arg_4], axis=0)\n arg_5 = tf.concat([arg_2[tf.newaxis, ...],\n arg_5], axis=0)\n\n # Put dimensions back in order. The samples we've computed have\n # shape `[num_timesteps, batch_shape, size, 1]`, where `size`\n # is the dimension of the latent or observation spaces\n # respectively, but we want to return values with shape\n # `[batch_shape, num_timesteps, size]`.\n arg_4 = tf.squeeze(arg_4, -1)\n arg_4 = distribution_util.move_dimension(arg_4, 0, -2)\n arg_5 = tf.squeeze(arg_5, -1)\n arg_5 = distribution_util.move_dimension(\n arg_5, 0, -2)\n\n return arg_4, arg_5"} +{"_id": "doc_765", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute prior covariances for all variables via dynamic programming.\n\n Returns:\n latent_covs: Prior covariance matrices of latent states `z_t`, as\n a `Tensor` of shape `batch_shape + [num_timesteps,\n latent_size, latent_size]`\n observation_covs: Prior covariance matrices of observations\n `x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,\n observation_size, observation_size]`\n \"\"\"\n\n with tf.name_scope(\"covariance_joint\"):\n\n with tf.control_dependencies(arg_0.runtime_assertions):\n arg_1 = _broadcast_to_shape(\n arg_0.initial_state_prior.covariance(),\n tf.concat([arg_0.batch_shape_tensor(),\n [arg_0.latent_size, arg_0.latent_size]], axis=0))\n\n arg_2 = _propagate_cov(\n arg_1,\n arg_0.get_observation_matrix_for_timestep(arg_0.initial_step),\n arg_0.get_observation_noise_for_timestep(arg_0.initial_step))\n\n arg_3 = build_kalman_cov_step(\n arg_0.get_transition_matrix_for_timestep,\n arg_0.get_transition_noise_for_timestep,\n arg_0.get_observation_matrix_for_timestep,\n arg_0.get_observation_noise_for_timestep)\n\n # Scan over all timesteps following the initial step.\n (arg_4, arg_5) = tf.scan(\n arg_3,\n elems=tf.range(arg_0.initial_step+1, arg_0.final_step),\n initializer=(arg_1, arg_2))\n\n # Squish the initial step back on top of the other (scanned) timesteps\n arg_4 = tf.concat([arg_1[tf.newaxis, ...],\n arg_4], axis=0)\n arg_5 = tf.concat([arg_2[tf.newaxis, ...],\n arg_5], axis=0)\n\n # Put dimensions back in order. The samples we've computed have\n # shape `[num_timesteps, batch_shape, size, size]`, where `size`\n # is the dimension of the state or observation spaces\n # respectively, but we want to return values with shape\n # `[batch_shape, num_timesteps, size, size]`.\n arg_4 = distribution_util.move_dimension(arg_4, 0, -3)\n arg_5 = distribution_util.move_dimension(\n arg_5, 0, -3)\n return arg_4, arg_5"} +{"_id": "doc_766", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a deep copy of fn.\n\n Args:\n fn: a callable\n\n Returns:\n A `FunctionType`: a deep copy of fn.\n\n Raises:\n TypeError: if `fn` is not a callable.\n \"\"\"\n if not callable(arg_0):\n raise TypeError(\"fn is not callable: {}\".format(arg_0))\n # The blessed way to copy a function. copy.deepcopy fails to create a\n # non-reference copy. Since:\n # types.FunctionType == type(lambda: None),\n # and the docstring for the function type states:\n #\n # function(code, globals[, name[, argdefs[, closure]]])\n #\n # Create a function object from a code object and a dictionary.\n # ...\n #\n # Here we can use this to create a new function with the old function's\n # code, globals, closure, etc.\n return types.FunctionType(\n code=arg_0.__code__, globals=arg_0.__globals__,\n name=arg_0.__name__, argdefs=arg_0.__defaults__,\n closure=arg_0.__closure__)"} +{"_id": "doc_767", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Removes `dict` keys which have have `self` as value.\"\"\"\n return {arg_2: arg_3 for arg_2, arg_3 in arg_0.items() if arg_3 is not arg_1}"} +{"_id": "doc_768", "title": "", "text": "def Func(arg_0):\n \"\"\"Recursively replace `dict`s with `_PrettyDict`.\"\"\"\n # We use \"PrettyDict\" because collections.OrderedDict repr/str has the word\n # \"OrderedDict\" in it. We only want to print \"OrderedDict\" if in fact the\n # input really is an OrderedDict.\n if isinstance(arg_0, dict):\n return _PrettyDict({\n arg_1: Func(arg_2)\n for arg_1, arg_2 in arg_0.items()})\n if (isinstance(arg_0, collections.Sequence) and\n not isinstance(arg_0, six.string_types)):\n arg_3 = (Func(x_) for x_ in arg_0)\n arg_4 = (isinstance(arg_0, tuple) and\n hasattr(arg_0, \"_asdict\") and\n hasattr(arg_0, \"_fields\"))\n return type(arg_0)(*arg_3) if arg_4 else type(arg_0)(arg_3)\n if isinstance(arg_0, collections.Mapping):\n return type(arg_0)(**{arg_1: Func(arg_2)\n for arg_1, arg_2 in arg_0.items()})\n return arg_0"} +{"_id": "doc_769", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"Helper to `maybe_call_fn_and_grads`.\"\"\"\n with tf.compat.v1.name_scope(arg_4, 'value_and_gradients',\n [arg_1, arg_2, arg_3]):\n\n def _convert_to_tensor(arg_5, arg_4):\n arg_6 = lambda arg_7: arg_7 if arg_7 is None else tf.convert_to_tensor(\n value=arg_7, arg_4=arg_4)\n return [arg_6(arg_7) for arg_7 in arg_5] if is_list_like(arg_5) else arg_6(arg_5)\n\n arg_1 = (list(arg_1) if is_list_like(arg_1)\n else [arg_1])\n arg_1 = _convert_to_tensor(arg_1, 'fn_arg')\n\n if arg_2 is None:\n arg_2 = arg_0(*arg_1)\n if arg_3 is None and tf.executing_eagerly():\n # Ensure we disable bijector cacheing in eager mode.\n # TODO(b/72831017): Remove this once bijector cacheing is fixed for\n # eager mode.\n arg_1 = [0 + arg_5 for arg_5 in arg_1]\n\n arg_2 = _convert_to_tensor(arg_2, 'fn_result')\n\n if arg_3 is not None:\n arg_3 = _convert_to_tensor(arg_3, 'fn_grad')\n return arg_2, arg_3\n\n if is_list_like(arg_2) and len(arg_2) == len(arg_1):\n # Compute the block diagonal of Jacobian.\n # TODO(b/79158574): Guard this calculation by an arg which explicitly\n # requests block diagonal Jacobian calculation.\n def fn_slice(arg_8):\n \"\"\"Needed to prevent `cell-var-from-loop` pylint warning.\"\"\"\n return lambda arg_5: arg_0(*(arg_1[:arg_8] + [arg_5] + arg_1[arg_8+1:]))\n arg_3 = [\n tfp_mathFunc(fn_slice(arg_8), arg_1[arg_8])[1]\n for arg_8 in range(len(arg_2))\n ]\n else:\n arg_9, arg_3 = tfp_mathFunc(arg_0, arg_1)\n\n return arg_2, arg_3"} +{"_id": "doc_770", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=True,\n arg_5=None):\n \"\"\"Calls `fn` and computes the gradient of the result wrt `args_list`.\"\"\"\n with tf.compat.v1.name_scope(arg_5, 'Func',\n [arg_1, arg_2, arg_3]):\n arg_1 = (list(arg_1) if is_list_like(arg_1)\n else [arg_1])\n arg_2, arg_3 = _value_and_gradients(arg_0, arg_1, arg_2, arg_3)\n if not all(arg_6.dtype.is_floating\n for arg_6 in (arg_2 if is_list_like(arg_2) else [arg_2])): # pylint: disable=superfluous-parens\n raise TypeError('Function result must be a `Tensor` with `float` '\n '`dtype`.')\n if len(arg_1) != len(arg_3):\n raise ValueError('Function args must be in one-to-one correspondence '\n 'with grads.')\n if arg_4 and any(arg_7 is None for arg_7 in arg_3):\n raise ValueError('Encountered `None` gradient.\\n'\n ' fn_arg_list: {}\\n'\n ' grads: {}'.format(arg_1, arg_3))\n return arg_2, arg_3"} +{"_id": "doc_771", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=10, arg_4=None):\n \"\"\"Construct a for loop, preferring a python loop if `n` is staticaly known.\n\n Given `loop_num_iter` and `body_fn`, return an op corresponding to executing\n `body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into\n the next iteration.\n\n If `loop_num_iter` is statically known, the op is constructed via python for\n loop, and otherwise a `tf.while_loop` is used.\n\n Args:\n loop_num_iter: `Integer` `Tensor` representing the number of loop\n iterations.\n body_fn: Callable to be executed `loop_num_iter` times.\n initial_loop_vars: Listlike object of `Tensors` to be passed in to\n `body_fn`'s first execution.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer. See `tf.while_loop` for more details.\n Default value: `10`.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"Func\").\n Returns:\n result: `Tensor` representing applying `body_fn` iteratively `n` times.\n \"\"\"\n with tf.compat.v1.name_scope(arg_4, 'Func',\n [arg_0, arg_2]):\n arg_5 = tf.get_static_value(arg_0)\n if (arg_5 is None or tf.executing_eagerly() or\n control_flow_util.GraphOrParentsInXlaContext(\n tf.compat.v1.get_default_graph())):\n # Cast to int32 to run the comparison against i in host memory,\n # where while/LoopCond needs it.\n arg_0 = tf.cast(arg_0, dtype=tf.int32)\n return tf.while_loop(\n cond=lambda i, *args: i < arg_0,\n body=lambda i, *args: [i + 1] + list(arg_1(*args)),\n loop_vars=[np.int32(0)] + arg_2,\n arg_3=arg_3\n )[1:]\n arg_6 = arg_2\n for arg_7 in range(arg_5):\n arg_6 = arg_1(*arg_6)\n return arg_6"} +{"_id": "doc_772", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=10,\n arg_5=None):\n \"\"\"A simplified version of `tf.scan` that has configurable tracing.\n\n This function repeatedly calls `loop_fn(state, elem)`, where `state` is the\n `initial_state` during the first iteration, and the return value of `loop_fn`\n for every iteration thereafter. `elem` is a slice of `elements` along the\n first dimension, accessed in order. Additionally, it calls `trace_fn` on the\n return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are\n stacked and returned from this function, such that the first dimension of\n those `Tensor`s matches the size of `elems`.\n\n Args:\n loop_fn: A callable that takes in a `Tensor` or a nested collection of\n `Tensor`s with the same structure as `initial_state`, a slice of `elems`\n and returns the same structure as `initial_state`.\n initial_state: A `Tensor` or a nested collection of `Tensor`s passed to\n `loop_fn` in the first iteration.\n elems: A `Tensor` that is split along the first dimension and each element\n of which is passed to `loop_fn`.\n trace_fn: A callable that takes in the return value of `loop_fn` and returns\n a `Tensor` or a nested collection of `Tensor`s.\n parallel_iterations: Passed to the internal `tf.while_loop`.\n name: Name scope used in this function. Default: 'Func'.\n\n Returns:\n final_state: The final return value of `loop_fn`.\n trace: The same structure as the return value of `trace_fn`, but with each\n `Tensor` being a stack of the corresponding `Tensors` in the return value\n of `trace_fn` for each slice of `elems`.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_5, 'Func', [arg_1, arg_2]), tf.compat.v1.variable_scope(\n tf.compat.v1.get_variable_scope()) as vs:\n if vs.caching_device is None and not tf.executing_eagerly():\n vs.set_caching_device(lambda op: op.device)\n\n arg_1 = tf.nest.map_structure(\n lambda arg_15: tf.convert_to_tensor(value=arg_15, arg_5='initial_state'),\n arg_1)\n arg_2 = tf.convert_to_tensor(value=arg_2, arg_5='elems')\n\n arg_6 = arg_2.shape[0]\n if tf.compat.dimension_value(arg_6) is None:\n arg_7 = tf.shape(input=arg_2)[0]\n else:\n arg_7 = tf.convert_to_tensor(\n value=arg_6, dtype=tf.int32, arg_5='length')\n\n # This is an TensorArray in part because of XLA, which had trouble with\n # non-statically known indices. I.e. elems[i] errored, but\n # elems_array.read(i) worked.\n arg_8 = tf.TensorArray(\n arg_2.dtype, size=arg_7, element_shape=arg_2.shape[1:])\n arg_8 = arg_8.unstack(arg_2)\n\n arg_9 = tf.nest.map_structure(\n lambda arg_15: tf.TensorArray(arg_15.dtype, size=arg_7, element_shape=arg_15.shape),\n arg_3(arg_1))\n\n def _body(arg_10, arg_11, arg_9):\n arg_11 = arg_0(arg_11, arg_8.read(arg_10))\n arg_9 = tf.nest.pack_sequence_as(arg_9, [\n a.write(arg_10, v) for a, v in zip(\n tf.nest.flatten(arg_9), tf.nest.flatten(arg_3(arg_11)))\n ])\n return arg_10 + 1, arg_11, arg_9\n\n arg_12, arg_13, arg_9 = tf.while_loop(\n cond=lambda arg_10, *args: arg_10 < arg_7,\n body=_body,\n loop_vars=(0, arg_1, arg_9),\n arg_4=arg_4)\n\n arg_14 = tf.nest.map_structure(lambda arg_15: arg_15.stack(), arg_9)\n\n # Restore the static length if we know it.\n def _merge_static_length(arg_15):\n arg_15.set_shape(tf.TensorShape(arg_6).concatenate(arg_15.shape[1:]))\n return arg_15\n\n arg_14 = tf.nest.map_structure(_merge_static_length, arg_14)\n return arg_13, arg_14"} +{"_id": "doc_773", "title": "", "text": "def Func(arg_0):\n \"\"\"Wraps a setter so it applies to the inner-most results in `kernel_results`.\n\n The wrapped setter unwraps `kernel_results` and applies `setter` to the first\n results without an `inner_results` attribute.\n\n Args:\n setter: A callable that takes the kernel results as well as some `*args` and\n `**kwargs` and returns a modified copy of those kernel results.\n\n Returns:\n new_setter: A wrapped `setter`.\n \"\"\"\n\n @functools.wraps(arg_0)\n def _new_setter(arg_1, *arg_2, **arg_3):\n \"\"\"Wrapped setter.\"\"\"\n arg_4 = []\n while hasattr(arg_1, 'inner_results'):\n arg_4.append(arg_1)\n arg_1 = arg_1.inner_results\n\n arg_5 = arg_0(arg_1, *arg_2, **arg_3)\n for arg_6 in reversed(arg_4):\n arg_5 = arg_6._replace(\n inner_results=arg_5)\n\n return arg_5\n\n return _new_setter"} +{"_id": "doc_774", "title": "", "text": "def Func(arg_0):\n \"\"\"Enables the `store_parameters_in_results` parameter in a chain of kernels.\n\n This is a temporary utility for use during the transition period of the\n parameter storage methods.\n\n Args:\n kernel: A TransitionKernel.\n\n Returns:\n kernel: The same kernel, but recreated with `store_parameters_in_results`\n recursively set to `True` in its parameters and its inner kernels (as\n appropriate).\n \"\"\"\n arg_1 = []\n while hasattr(arg_0, 'parameters') and 'inner_kernel' in arg_0.parameters:\n arg_1.append(arg_0)\n arg_0 = arg_0.parameters['inner_kernel']\n\n def _recreate_kernel(arg_0, arg_2):\n arg_3 = arg_0.parameters.copy()\n arg_3.update(arg_2)\n if 'store_parameters_in_results' in arg_3:\n arg_3['store_parameters_in_results'] = True\n with deprecation.silence():\n return type(arg_0)(**arg_3)\n\n if hasattr(arg_0, 'parameters'):\n arg_0 = _recreate_kernel(arg_0, {})\n\n for arg_4 in reversed(arg_1):\n arg_4 = _recreate_kernel(arg_4, {'inner_kernel': arg_0})\n arg_0 = arg_4\n\n return arg_0"} +{"_id": "doc_775", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that a shape Tensor is int-type and otherwise sane.\"\"\"\n if not dtype_util.is_integer(arg_0.dtype):\n raise TypeError('{} dtype ({}) should be `int`-like.'.format(\n arg_0, dtype_util.name(arg_0.dtype)))\n\n arg_2 = []\n\n arg_3 = '`{}` rank should be <= 1.'\n if tensorshape_util.rank(arg_0.shape) is not None:\n if tensorshape_util.rank(arg_0.shape) > 1:\n raise ValueError(arg_3.format(arg_0))\n elif arg_1:\n arg_2.append(assert_util.assert_less(\n tf.rank(arg_0), 2, arg_3=arg_3.format(arg_0)))\n\n arg_4 = tf.get_static_value(arg_0)\n\n arg_3 = '`{}` elements must have at most one `-1`.'\n if arg_4 is not None:\n if sum(arg_4 == -1) > 1:\n raise ValueError(arg_3.format(arg_0))\n elif arg_1:\n arg_2.append(assert_util.assert_less(\n tf.reduce_sum(input_tensor=tf.cast(tf.equal(arg_0, -1), tf.int32)),\n 2,\n arg_3=arg_3.format(arg_0)))\n\n arg_3 = '`{}` elements must be either positive integers or `-1`.'\n if arg_4 is not None:\n if np.any(arg_4 < -1):\n raise ValueError(arg_3.format(arg_0))\n elif arg_1:\n arg_2.append(assert_util.assert_greater(\n arg_0, -2, arg_3=arg_3.format(arg_0)))\n\n return arg_2"} +{"_id": "doc_776", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5,\n arg_6):\n \"\"\"Performs the line search step of the BFGS search procedure.\n\n Uses hager_zhang line search procedure to compute a suitable step size\n to advance the current `state.position` along the given `search_direction`.\n Also, if the line search is successful, updates the `state.position` by\n taking the corresponding step.\n\n Args:\n state: A namedtuple instance holding values for the current state of the\n search procedure. The state must include the fields: `position`,\n `objective_value`, `objective_gradient`, `num_iterations`,\n `num_objective_evaluations`, `converged` and `failed`.\n value_and_gradients_function: A Python callable that accepts a point as a\n real `Tensor` of shape `[..., n]` and returns a tuple of two tensors of\n the same dtype: the objective function value, a real `Tensor` of shape\n `[...]`, and its derivative, another real `Tensor` of shape `[..., n]`.\n search_direction: A real `Tensor` of shape `[..., n]`. The direction along\n which to perform line search.\n grad_tolerance: Scalar `Tensor` of real dtype. Specifies the gradient\n tolerance for the procedure.\n f_relative_tolerance: Scalar `Tensor` of real dtype. Specifies the\n tolerance for the relative change in the objective value.\n x_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance for the\n change in the position.\n stopping_condition: A Python function that takes as input two Boolean\n tensors of shape `[...]`, and returns a Boolean scalar tensor. The input\n tensors are `converged` and `failed`, indicating the current status of\n each respective batch member; the return value states whether the\n algorithm should stop.\n Returns:\n A copy of the input state with the following fields updated:\n converged: a Boolean `Tensor` of shape `[...]` indicating whether the\n convergence criteria has been met.\n failed: a Boolean `Tensor` of shape `[...]` indicating whether the line\n search procedure failed to converge, or if either the updated gradient\n or objective function are no longer finite.\n num_iterations: Increased by 1.\n num_objective_evaluations: Increased by the number of times that the\n objective function got evaluated.\n position, objective_value, objective_gradient: If line search succeeded,\n updated by computing the new position and evaluating the objective\n function at that position.\n \"\"\"\n arg_7 = _restrict_along_direction(\n arg_1, arg_0.position, arg_2)\n arg_8 = tf.reduce_sum(\n input_tensor=arg_0.objective_gradient * arg_2, axis=-1)\n arg_9 = ValueAndGradient(x=_broadcast(0, arg_0.position),\n f=arg_0.objective_value,\n df=arg_8,\n full_gradient=arg_0.objective_gradient)\n arg_10 = arg_0.failed | arg_0.converged\n arg_11 = linesearch.hager_zhang(\n arg_7,\n initial_step_size=_broadcast(1, arg_0.position),\n value_at_zero=arg_9,\n converged=arg_10) # No search needed for these.\n\n arg_12 = update_fields(\n arg_0,\n failed=arg_0.failed | ~arg_11.converged,\n num_iterations=arg_0.num_iterations + 1,\n num_objective_evaluations=(\n arg_0.num_objective_evaluations + arg_11.func_evals))\n\n def _do_update_position():\n # For inactive batch members `left.x` is zero. However, their\n # `search_direction` might also be undefined, so we can't rely on\n # multiplication by zero to produce a `position_delta` of zero.\n arg_13 = tf.where(\n arg_10,\n tf.zeros_like(arg_2),\n arg_2 * tf.expand_dims(arg_11.left.x, axis=-1))\n return _update_position(\n arg_12,\n arg_13,\n arg_11.left.f,\n arg_11.left.full_gradient,\n arg_3, arg_4, arg_5)\n\n return prefer_static.cond(\n arg_6(arg_0.converged, arg_0.failed),\n true_fn=lambda: arg_12,\n false_fn=_do_update_position)"} +{"_id": "doc_777", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6):\n \"\"\"Updates the state advancing its position by a given position_delta.\"\"\"\n arg_7 = arg_0.failed | ~tf.math.is_finite(arg_2) | ~tf.reduce_all(\n input_tensor=tf.math.is_finite(arg_3), axis=-1)\n\n arg_8 = arg_0.position + arg_1\n arg_9 = ~arg_7 & _check_convergence(arg_0.position,\n arg_8,\n arg_0.objective_value,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6)\n return update_fields(\n arg_0,\n arg_9=arg_0.converged | arg_9,\n arg_7=arg_7,\n position=arg_8,\n objective_value=arg_2,\n objective_gradient=arg_3)"} +{"_id": "doc_778", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7):\n \"\"\"Checks if the algorithm satisfies the convergence criteria.\"\"\"\n arg_8 = norm(arg_4, dims=1) <= arg_5\n arg_9 = norm(arg_1 - arg_0, dims=1) <= arg_7\n arg_10 = (norm(arg_3 - arg_2, dims=0) <=\n arg_6 * arg_2)\n return arg_8 | arg_9 | arg_10"} +{"_id": "doc_779", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Broadcast a value to match the batching dimensions of a target.\n\n If necessary the value is converted into a tensor. Both value and target\n should be of the same dtype.\n\n Args:\n value: A value to broadcast.\n target: A `Tensor` of shape [b1, ..., bn, d].\n\n Returns:\n A `Tensor` of shape [b1, ..., bn] and same dtype as the target.\n \"\"\"\n return tf.broadcast_to(\n tf.convert_to_tensor(arg_0=arg_0, dtype=arg_1.dtype),\n distribution_util.prefer_static_shape(arg_1)[:-1])"} +{"_id": "doc_780", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"field_name from kernel_results or kernel_results.accepted_results.\"\"\"\n if hasattr(arg_0, arg_1):\n return getattr(arg_0, arg_1)\n if hasattr(arg_0, 'accepted_results'):\n return getattr(arg_0.accepted_results, arg_1)\n raise TypeError('Cannot extract %s from %s' % (arg_1, arg_0))"} +{"_id": "doc_781", "title": "", "text": "def Func(arg_0):\n \"\"\"Makes a function which applies a list of Bijectors' `inverse`s.\"\"\"\n if not mcmc_util.is_list_like(arg_0):\n arg_0 = [arg_0]\n def fn(arg_1):\n return [arg_2.inverse(arg_3)\n for arg_2, arg_3 in zip(arg_0, arg_1)]\n return fn"} +{"_id": "doc_782", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Like tf.where but works on namedtuples.\"\"\"\n if isinstance(arg_1, tf.Tensor):\n return tf.where(arg_0, arg_1, arg_2)\n elif isinstance(arg_1, tuple):\n arg_3 = type(arg_1)\n return arg_3(*(Func(arg_0, arg_4, arg_5) for arg_4, arg_5 in zip(arg_1, arg_2)))\n else:\n raise Exception(TypeError)"} +{"_id": "doc_783", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=0.1,\n arg_5=0.9,\n arg_6=None):\n \"\"\"Performs the secant square procedure of Hager Zhang.\n\n Given an interval that brackets a root, this procedure performs an update of\n both end points using two intermediate points generated using the secant\n interpolation. For details see the steps S1-S4 in [Hager and Zhang (2006)][2].\n\n The interval [a, b] must satisfy the opposite slope conditions described in\n the documentation for `update`.\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns an object that can be converted to a namedtuple.\n The namedtuple should have fields 'f' and 'df' that correspond to scalar\n tensors of real dtype containing the value of the function and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n function values and derivatives at the input points.\n val_0: A namedtuple, as returned by value_and_gradients_function evaluated\n at `0.`.\n search_interval: A namedtuple describing the current search interval,\n must include the fields:\n - converged: Boolean `Tensor` of shape [n], indicating batch members\n where search has already converged. Interval for these batch members\n won't be modified.\n - failed: Boolean `Tensor` of shape [n], indicating batch members\n where search has already failed. Interval for these batch members\n wont be modified.\n - iterations: Scalar int32 `Tensor`. Number of line search iterations\n so far.\n - func_evals: Scalar int32 `Tensor`. Number of function evaluations\n so far.\n - left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the current search interval.\n - right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the current search interval.\n f_lim: Scalar `Tensor` of real dtype. The function value threshold for\n the approximate Wolfe conditions to be checked.\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to 'delta' in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'Func' is used.\n\n Returns:\n A namedtuple containing the following fields.\n active: A boolean `Tensor` of shape [n]. Used internally by the procedure\n to indicate batch members on which there is work left to do.\n converged: A boolean `Tensor` of shape [n]. Indicates whether a point\n satisfying the Wolfe conditions has been found. If this is True, the\n interval will be degenerate (i.e. `left` and `right` below will be\n identical).\n failed: A boolean `Tensor` of shape [n]. Indicates if invalid function or\n gradient values were encountered (i.e. infinity or NaNs).\n num_evals: A scalar int32 `Tensor`. The total number of function\n evaluations made.\n left: Return value of value_and_gradients_function at the updated left\n end point of the interval.\n right: Return value of value_and_gradients_function at the updated right\n end point of the interval.\n \"\"\"\n with tf.compat.v1.name_scope(arg_6, 'Func', [\n arg_1, arg_2, arg_3, arg_4,\n arg_5]):\n # This will always be s.t. left <= c <= right\n arg_7 = arg_0(\n _secant(arg_2.left, arg_2.right))\n arg_8 = arg_2.failed | ~is_finite(arg_7)\n arg_9 = arg_2.converged | (~arg_8 & _satisfies_wolfe(\n arg_1, arg_7, arg_3, arg_4, arg_5))\n arg_10 = arg_9 & ~arg_2.converged\n arg_11 = val_where(arg_10, arg_7, arg_2.left)\n arg_12 = val_where(arg_10, arg_7, arg_2.right)\n\n arg_13 = _Secant2Result(\n active=~arg_8 & ~arg_9,\n arg_9=arg_9,\n arg_8=arg_8,\n num_evals=arg_2.func_evals + 1,\n left=arg_11,\n right=arg_12)\n\n def _apply_Func_inner():\n return _Func_inner(\n arg_0,\n arg_13,\n arg_1,\n arg_7,\n arg_3,\n arg_4,\n arg_5)\n\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_13.active),\n _apply_Func_inner,\n lambda: arg_13)"} +{"_id": "doc_784", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6):\n \"\"\"Helper function for secant-square step.\"\"\"\n # Fail if `val_c` is no longer finite.\n arg_7 = arg_1.active & ~is_finite(arg_3)\n arg_8 = arg_1.active & ~arg_7\n arg_9 = arg_1.failed | arg_7\n\n # We converge when we find a point satisfying the Wolfe conditions, in those\n # cases we set `val_left = val_right = val_c`.\n arg_10 = arg_8 & _satisfies_wolfe(\n arg_2, arg_3, arg_4, arg_5, arg_6)\n arg_11 = val_where(arg_10, arg_3, arg_1.left)\n arg_12 = val_where(arg_10, arg_3, arg_1.right)\n arg_13 = arg_1.converged | arg_10\n arg_8 = arg_8 & ~arg_10\n\n # If any active batch members remain, we apply the `update` function to\n # squeeze further their corresponding left/right bracketing interval.\n def _apply_update():\n arg_14 = update(\n arg_0, arg_11, arg_12, arg_3, arg_4,\n arg_8=arg_8)\n return _Secant2Result(\n arg_8=tf.zeros_like(arg_8), # End of secant2, no actives anymore.\n arg_13=arg_13,\n arg_9=arg_9 | arg_14.failed,\n num_evals=arg_1.num_evals + arg_14.num_evals,\n left=arg_14.left,\n right=arg_14.right)\n\n # Otherwise just return the current results.\n def _default():\n return _Secant2Result(\n arg_8=arg_8,\n arg_13=arg_13,\n arg_9=arg_9,\n num_evals=arg_1.num_evals,\n left=arg_11,\n right=arg_12)\n\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_8), _apply_update, _default)"} +{"_id": "doc_785", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=5.0):\n \"\"\"Brackets the minimum given an initial starting point.\n\n Applies the Hager Zhang Funcing algorithm to find an interval containing\n a region with points satisfying Wolfe conditions. Uses the supplied initial\n step size 'c', the right end point of the provided search interval, to find\n such an interval. The only condition on 'c' is that it should be positive.\n For more details see steps B0-B3 in [Hager and Zhang (2006)][2].\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple containing the value filed `f` of the\n function and its derivative value field `df` at that point.\n Alternatively, the function may representthe batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and return a tuple of two tensors of shape [n], the\n function values and the corresponding derivatives at the input points.\n search_interval: A namedtuple describing the current search interval,\n must include the fields:\n - converged: Boolean `Tensor` of shape [n], indicating batch members\n where search has already converged. Interval for these batch members\n wont be modified.\n - failed: Boolean `Tensor` of shape [n], indicating batch members\n where search has already failed. Interval for these batch members\n wont be modified.\n - iterations: Scalar int32 `Tensor`. Number of line search iterations\n so far.\n - func_evals: Scalar int32 `Tensor`. Number of function evaluations\n so far.\n - left: A namedtuple, as returned by value_and_gradients_function\n evaluated at 0, the left end point of the current interval.\n - right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the current interval (labelled 'c' above).\n f_lim: real `Tensor` of shape [n]. The function value threshold for\n the approximate Wolfe conditions to be checked for each batch member.\n max_iterations: Int32 scalar `Tensor`. The maximum number of iterations\n permitted. The limit applies equally to all batch members.\n expansion_param: Scalar positive `Tensor` of real dtype. Must be greater\n than `1.`. Used to expand the initial interval in case it does not Func\n a minimum.\n\n Returns:\n A namedtuple with the following fields.\n iteration: An int32 scalar `Tensor`. The number of iterations performed.\n Bounded above by `max_iterations` parameter.\n stopped: A boolean `Tensor` of shape [n]. True for those batch members\n where the algorithm terminated before reaching `max_iterations`.\n failed: A boolean `Tensor` of shape [n]. True for those batch members\n where an error was encountered during Funcing.\n num_evals: An int32 scalar `Tensor`. The number of times the objective\n function was evaluated.\n left: Return value of value_and_gradients_function at the updated left\n end point of the interval found.\n right: Return value of value_and_gradients_function at the updated right\n end point of the interval found.\n \"\"\"\n arg_5 = arg_1.failed | arg_1.converged\n\n # If the slope at right end point is positive, step B1 in [2], then the given\n # initial points already Func a minimum.\n arg_6 = arg_1.right.df >= 0\n\n # Bisection is needed, step B2, if right end point almost works as a new left\n # end point but the objective value is too high.\n arg_7 = (\n arg_1.right.df < 0) & (arg_1.right.f > arg_2)\n\n # In these three cases Funcing is already `stopped` and there is no need\n # to perform further evaluations. Otherwise the Funcing loop is needed to\n # expand the interval, step B3, until the conditions are met.\n arg_8 = _IntermediateResult(\n iteration=arg_1.iterations,\n arg_15=arg_5 | arg_6 | arg_7,\n arg_13=arg_1.failed,\n num_evals=arg_1.func_evals,\n arg_11=arg_1.left,\n arg_12=arg_1.right)\n\n def _loop_cond(arg_9):\n return (arg_9.iteration <\n arg_3) & ~tf.reduce_all(input_tensor=arg_9.stopped)\n\n def _loop_body(arg_9):\n \"\"\"Main body of Funcing loop.\"\"\"\n # The loop maintains the invariant that curr.stopped is true if we have\n # either: failed, successfully Funced, or not yet Funced but needs\n # bisect. On the only remaining case, step B3 in [2]. case we need to\n # expand and update the left/right values appropriately.\n arg_10 = arg_0(arg_4 * arg_9.right.x)\n arg_11 = val_where(arg_9.stopped, arg_9.left, arg_9.right)\n arg_12 = val_where(arg_9.stopped, arg_9.right, arg_10)\n\n # Updated the failed, Funced, and needs_bisect conditions.\n arg_13 = arg_9.failed | ~is_finite(arg_12)\n arg_6 = arg_12.df >= 0\n arg_7 = (arg_12.df < 0) & (arg_12.f > arg_2)\n return [_IntermediateResult(\n iteration=arg_9.iteration + 1,\n arg_15=arg_9.stopped | arg_13 | arg_6 | arg_7,\n arg_13=arg_13,\n num_evals=arg_9.num_evals + 1,\n arg_11=arg_11,\n arg_12=arg_12)]\n\n arg_14 = tf.while_loop(\n cond=_loop_cond, body=_loop_body, loop_vars=[arg_8])[0]\n\n # For entries where bisect is still needed, mark them as not yet stopped,\n # reset the left end point, and run `_bisect` on them.\n arg_7 = (\n (arg_14.right.df < 0) & (arg_14.right.f > arg_2))\n arg_15 = arg_5 | arg_14.failed | ~arg_7\n arg_11 = val_where(arg_15, arg_14.left, arg_1.left)\n arg_16 = arg_14._replace(arg_15=arg_15, arg_11=arg_11)\n return _bisect(arg_0, arg_16, arg_2)"} +{"_id": "doc_786", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Bisects an interval and updates to satisfy opposite slope conditions.\n\n Corresponds to the step U3 in [Hager and Zhang (2006)][2].\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple containing the value filed `f` of the\n function and its derivative value field `df` at that point.\n Alternatively, the function may representthe batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and return a tuple of two tensors of shape [n], the\n function values and the corresponding derivatives at the input points.\n initial_left: Return value of value_and_gradients_function at the left end\n point of the current bracketing interval.\n initial_right: Return value of value_and_gradients_function at the right end\n point of the current bracketing interval.\n f_lim: real `Tensor` of shape [n]. The function value threshold for\n the approximate Wolfe conditions to be checked for each batch member.\n\n Returns:\n A namedtuple containing the following fields:\n iteration: An int32 scalar `Tensor`. The number of iterations performed.\n Bounded above by `max_iterations` parameter.\n stopped: A boolean scalar `Tensor`. True if the Func algorithm\n terminated.\n failed: A scalar boolean tensor. Indicates whether the objective function\n failed to produce a finite value.\n num_evals: A scalar int32 tensor. The number of value and gradients\n function evaluations.\n left: Return value of value_and_gradients_function at the left end\n point of the bracketing interval found.\n right: Return value of value_and_gradients_function at the right end\n point of the bracketing interval found.\n \"\"\"\n arg_4 = ~is_finite(arg_1, arg_2)\n arg_5 = (arg_2.df < 0) & (arg_2.f > arg_3)\n arg_6 = _IntermediateResult(\n iteration=tf.convert_to_tensor(value=0),\n stopped=arg_4 | ~arg_5,\n arg_4=arg_4,\n num_evals=tf.convert_to_tensor(value=0),\n left=arg_1,\n right=arg_2)\n return _Func(arg_0, arg_6, arg_3)"} +{"_id": "doc_787", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Actual implementation of bisect given initial_args in a _BracketResult.\"\"\"\n def _loop_cond(arg_3):\n # TODO(b/112524024): Also take into account max_iterations.\n return ~tf.reduce_all(input_tensor=arg_3.stopped)\n\n def _loop_body(arg_3):\n \"\"\"Narrow down interval to satisfy opposite slope conditions.\"\"\"\n arg_4 = arg_0((arg_3.left.x + arg_3.right.x) / 2)\n\n # Fail if function values at mid point are no longer finite; or left/right\n # points are so close to it that we can't distinguish them any more.\n arg_5 = (arg_3.failed | ~is_finite(arg_4) |\n tf.equal(arg_4.x, arg_3.left.x) | tf.equal(arg_4.x, arg_3.right.x))\n\n # If mid point has a negative slope and the function value at that point is\n # small enough, we can use it as a new left end point to narrow down the\n # interval. If mid point has a positive slope, then we have found a suitable\n # right end point to bracket a minima within opposite slopes. Otherwise, the\n # mid point has a negative slope but the function value at that point is too\n # high to work as left end point, we are in the same situation in which we\n # started the loop so we just update the right end point and continue.\n arg_6 = ~(arg_3.stopped | arg_5)\n arg_7 = (arg_4.df < 0) & (arg_4.f <= arg_2)\n arg_8 = val_where(arg_6 & arg_7, arg_4, arg_3.left)\n arg_9 = val_where(arg_6 & ~arg_7, arg_4, arg_3.right)\n\n # We're done when the right end point has a positive slope.\n arg_10 = arg_3.stopped | arg_5 | (arg_9.df >= 0)\n\n return [_IntermediateResult(\n iteration=arg_3.iteration,\n arg_10=arg_10,\n arg_5=arg_5,\n num_evals=arg_3.num_evals + 1,\n arg_8=arg_8,\n arg_9=arg_9)]\n\n # The interval needs updating if the right end point has a negative slope and\n # the value of the function at that point is too high. It is not a valid left\n # end point but along with the current left end point, it encloses another\n # minima. The loop above tries to narrow the interval so that it satisfies the\n # opposite slope conditions.\n return tf.while_loop(\n cond=_loop_cond, body=_loop_body, loop_vars=[arg_1])[0]"} +{"_id": "doc_788", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Checks if the supplied values are finite.\n\n Args:\n val_1: A namedtuple instance with the function value and derivative,\n as returned e.g. by value_and_gradients_function evaluations.\n val_2: (Optional) A namedtuple instance with the function value and\n derivative, as returned e.g. by value_and_gradients_function evaluations.\n\n Returns:\n Func: Scalar boolean `Tensor` indicating whether the function value\n and the derivative in `val_1` (and optionally in `val_2`) are all finite.\n \"\"\"\n arg_2 = tf.math.Func(arg_0.f) & tf.math.Func(arg_0.df)\n if arg_1 is not None:\n return arg_2 & tf.math.Func(arg_1.f) & tf.math.Func(\n arg_1.df)\n return arg_2"} +{"_id": "doc_789", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4):\n \"\"\"Checks whether the Wolfe or approx Wolfe conditions are satisfied.\n\n The Wolfe conditions are a set of stopping criteria for an inexact line search\n algorithm. Let f(a) be the function value along the search direction and\n df(a) the derivative along the search direction evaluated a distance 'a'.\n Here 'a' is the distance along the search direction. The Wolfe conditions are:\n\n ```None\n f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)\n df(a) >= sigma * df(0) (Weak curvature condition)\n ```\n `delta` and `sigma` are two user supplied parameters satisfying:\n `0 < delta < sigma <= 1.`. In the following, delta is called\n `sufficient_decrease_param` and sigma is called `curvature_param`.\n\n On a finite precision machine, the Wolfe conditions are difficult to satisfy\n when one is close to the minimum. Hence, Hager-Zhang propose replacing\n the sufficient decrease condition with the following condition on the\n derivative in the vicinity of a minimum.\n\n ```None\n df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)\n ```\n This condition is only used if one is near the minimum. This is tested using\n\n ```None\n f(a) <= f(0) + epsilon * |f(0)|\n ```\n The following function checks both the Wolfe and approx Wolfe conditions.\n Here, `epsilon` is a small positive constant. In the following, the argument\n `f_lim` corresponds to the product: epsilon * |f(0)|.\n\n Args:\n val_0: A namedtuple, as returned by value_and_gradients_function\n evaluated at 0.\n val_c: A namedtuple, as returned by value_and_gradients_function\n evaluated at the point to be tested.\n f_lim: Scalar `Tensor` of real dtype. The function value threshold for\n the approximate Wolfe conditions to be checked.\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to 'delta' in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager Zhang (2005)][1].\n\n Returns:\n is_satisfied: A scalar boolean `Tensor` which is True if either the\n Wolfe or approximate Wolfe conditions are satisfied.\n \"\"\"\n arg_5 = (arg_3 * arg_0.df >=\n (arg_1.f - arg_0.f) / arg_1.x)\n arg_6 = arg_1.df >= arg_4 * arg_0.df\n arg_7 = arg_5 & arg_6\n arg_8 = arg_1.f <= arg_2\n arg_9 = ((2 * arg_3 - 1) * arg_0.df\n >= arg_1.df)\n arg_10 = arg_8 & arg_9 & arg_6\n arg_11 = arg_7 | arg_10\n return arg_11"} +{"_id": "doc_790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the secant interpolation for the minimum.\n\n The secant method is a technique for finding roots of nonlinear functions.\n When finding the minimum, one applies the secant method to the derivative\n of the function.\n For an arbitrary function and a bounding interval, the secant approximation\n can produce the next point which is outside the bounding interval. However,\n with the assumption of opposite slope condtion on the interval [a,b] the new\n point c is always bracketed by [a,b]. Note that by assumption,\n f'(a) < 0 and f'(b) > 0.\n Hence c is a weighted average of a and b and thus always in [a, b].\n\n Args:\n val_a: A namedtuple with the left end point, function value and derivative,\n of the current interval (i.e. a).\n val_b: A namedtuple with the right end point, function value and derivative,\n of the current interval (i.e. b).\n\n Returns:\n approx_minimum: A scalar real `Tensor`. An approximation to the point\n at which the derivative vanishes.\n \"\"\"\n return (arg_0.x * arg_1.df - arg_1.x * arg_0.df) / (arg_1.df - arg_0.df)"} +{"_id": "doc_791", "title": "", "text": "def Func(arg_0,\n arg_1=0.75,\n arg_2=0.01,\n arg_3=0.01,\n arg_4=None):\n \"\"\"Create a function implementing a step-size update policy.\n\n The simple policy increases or decreases the `step_size_var` based on the\n average of `exp(minimum(0., log_accept_ratio))`. It is based on\n [Section 4.2 of Andrieu and Thoms (2008)](\n https://people.eecs.berkeley.edu/~jordan/sail/readings/andrieu-thoms.pdf).\n\n The `num_adaptation_steps` argument is set independently of any burnin\n for the overall chain. In general, adaptation prevents the chain from\n reaching a stationary distribution, so obtaining consistent samples requires\n `num_adaptation_steps` be set to a value [somewhat smaller](\n http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745)\n than the number of burnin steps. However, it may sometimes be helpful to set\n `num_adaptation_steps` to a larger value during development in order to\n inspect the behavior of the chain during adaptation.\n\n Args:\n num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to\n during which to adjust the step size. This may be greater, less than, or\n equal to the number of burnin steps. If `None`, the step size is adapted\n on every step (note this breaks stationarity of the chain!).\n target_rate: Scalar `Tensor` representing desired `accept_ratio`.\n Default value: `0.75` (i.e., [center of asymptotically optimal\n rate](https://arxiv.org/abs/1411.6669)).\n decrement_multiplier: `Tensor` representing amount to downscale current\n `step_size`.\n Default value: `0.01`.\n increment_multiplier: `Tensor` representing amount to upscale current\n `step_size`.\n Default value: `0.01`.\n step_counter: Scalar `int` `Variable` specifying the current step. The step\n size is adapted iff `step_counter < num_adaptation_steps`.\n Default value: if `None`, an internal variable\n `step_size_adaptation_step_counter` is created and initialized to `-1`.\n\n Returns:\n step_size_simple_update_fn: Callable that takes args\n `step_size_var, kernel_results` and returns updated step size(s).\n \"\"\"\n if arg_4 is None and arg_0 is not None:\n arg_4 = tf.compat.v1.get_variable(\n name='step_size_adaptation_step_counter',\n initializer=np.array(-1, dtype=np.int32),\n # Specify the dtype for variable sharing to work correctly\n # (b/120599991).\n dtype=tf.int32,\n trainable=False,\n use_resource=True)\n\n def step_size_simple_update_fn(arg_5, arg_6):\n \"\"\"Updates (list of) `step_size` using a standard adaptive MCMC procedure.\n\n Args:\n step_size_var: (List of) `tf.Variable`s representing the per `state_part`\n HMC `step_size`.\n kernel_results: `collections.namedtuple` containing `Tensor`s\n representing values from most recent call to `one_step`.\n\n Returns:\n step_size_assign: (List of) `Tensor`(s) representing updated\n `step_size_var`(s).\n \"\"\"\n\n if arg_6 is None:\n if mcmc_util.is_list_like(arg_5):\n return [tf.identity(arg_7) for arg_7 in arg_5]\n return tf.identity(arg_5)\n arg_8 = tf.math.log(\n tf.cast(\n tf.size(input=arg_6.log_accept_ratio),\n arg_6.log_accept_ratio.dtype))\n arg_9 = tf.reduce_logsumexp(\n input_tensor=tf.minimum(arg_6.log_accept_ratio, 0.)) - arg_8\n arg_10 = tf.where(\n arg_9 < tf.cast(\n tf.math.log(arg_1), arg_9.dtype),\n -arg_2 / (1. + arg_2),\n arg_3)\n\n def build_assign_op():\n if mcmc_util.is_list_like(arg_5):\n return [\n arg_7.assign_add(arg_7 * tf.cast(arg_10, arg_7.dtype))\n for arg_7 in arg_5\n ]\n return arg_5.assign_add(\n arg_5 * tf.cast(arg_10, arg_5.dtype))\n\n if arg_0 is None:\n return build_assign_op()\n else:\n with tf.control_dependencies([arg_4.assign_add(1)]):\n return tf.cond(\n pred=arg_4 < arg_0,\n true_fn=build_assign_op,\n false_fn=lambda: arg_5)\n\n return step_size_simple_update_fn"} +{"_id": "doc_792", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates initial `previous_kernel_results` using a supplied `state`.\"\"\"\n arg_2 = arg_0._impl.Func(arg_1)\n if arg_0.step_size_update_fn is not None:\n arg_3 = arg_0.step_size_update_fn(arg_0.step_size, None) # pylint: disable=not-callable\n arg_2 = arg_2._replace(\n extra=HamiltonianMonteCarloExtraKernelResults(\n arg_3=arg_3))\n return arg_2"} +{"_id": "doc_793", "title": "", "text": "def Func(arg_0,\n arg_1=10,\n arg_2=-9.0,\n arg_3=0.1,\n arg_4=0.2):\n \"\"\"Constructs a ResNet18 model.\n\n Args:\n input_shape: A `tuple` indicating the Tensor shape.\n num_classes: `int` representing the number of class labels.\n kernel_posterior_scale_mean: Python `int` number for the kernel\n posterior's scale (log variance) mean. The smaller the mean the closer\n is the initialization to a deterministic network.\n kernel_posterior_scale_stddev: Python `float` number for the initial kernel\n posterior's scale stddev.\n ```\n q(W|x) ~ N(mu, var),\n log_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)\n ````\n kernel_posterior_scale_constraint: Python `float` number for the log value\n to constrain the log variance throughout training.\n i.e. log_var <= log(kernel_posterior_scale_constraint).\n\n Returns:\n tf.keras.Model.\n \"\"\"\n\n arg_5 = [64, 128, 256, 512]\n arg_6 = [3, 3, 3, 3]\n arg_7 = [1, 2, 2, 2]\n\n def _untransformed_scale_constraint(arg_8):\n return tf.clip_by_value(arg_8, -1000,\n tf.math.log(arg_4))\n\n arg_9 = tfp.layers.default_mean_field_normal_fn(\n untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(\n mean=arg_2,\n stddev=arg_3),\n untransformed_scale_constraint=_untransformed_scale_constraint)\n\n arg_10 = tf.keras.layers.Input(shape=arg_0, dtype='float32')\n arg_11 = tfp.layers.Convolution2DFlipout(\n 64,\n 3,\n arg_7=1,\n padding='same',\n arg_9=arg_9)(arg_10)\n\n for arg_12 in range(len(arg_6)):\n arg_11 = _resnet_block(\n arg_11,\n arg_5[arg_12],\n arg_6[arg_12],\n arg_7[arg_12],\n arg_9)\n\n arg_11 = tf.keras.layers.BatchNormalization()(arg_11)\n arg_11 = tf.keras.layers.Activation('relu')(arg_11)\n arg_11 = tf.keras.layers.AveragePooling2D(4, 1)(arg_11)\n arg_11 = tf.keras.layers.Flatten()(arg_11)\n\n arg_11 = tfp.layers.DenseFlipout(\n arg_1,\n arg_9=arg_9)(arg_11)\n\n arg_13 = tf.keras.Model(inputs=arg_10, outputs=arg_11, name='resnet18')\n return arg_13"} +{"_id": "doc_794", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create the encoder function.\n\n Args:\n activation: Activation function to use.\n num_topics: The number of topics.\n layer_sizes: The number of hidden units per layer in the encoder.\n\n Returns:\n encoder: A `callable` mapping a bag-of-words `Tensor` to a\n `tfd.Distribution` instance over topics.\n \"\"\"\n arg_3 = tf.keras.Sequential()\n for arg_4 in arg_2:\n arg_3.add(\n tf.keras.layers.Dense(\n arg_4,\n arg_0=arg_0,\n kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n arg_3.add(\n tf.keras.layers.Dense(\n arg_1,\n arg_0=tf.nn.softplus,\n kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n\n def encoder(arg_5):\n arg_6 = _clip_dirichlet_parameters(arg_3(arg_5))\n return tfd.Dirichlet(concentration=arg_6,\n name=\"topics_posterior\")\n\n return encoder"} +{"_id": "doc_795", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create the decoder function.\n\n Args:\n num_topics: The number of topics.\n num_words: The number of words.\n\n Returns:\n decoder: A `callable` mapping a `Tensor` of encodings to a\n `tfd.Distribution` instance over words.\n \"\"\"\n arg_2 = tf.compat.v1.get_variable(\n \"topics_words_logits\",\n shape=[arg_0, arg_1],\n initializer=tf.compat.v1.glorot_normal_initializer())\n arg_3 = tf.nn.softmax(arg_2, axis=-1)\n\n def decoder(arg_4):\n arg_5 = tf.matmul(arg_4, arg_3)\n # The observations are bag of words and therefore not one-hot. However,\n # log_prob of OneHotCategorical computes the probability correctly in\n # this case.\n return tfd.OneHotCategorical(probs=arg_5,\n name=\"bag_of_words\")\n\n return decoder, arg_3"} +{"_id": "doc_796", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=0,\n arg_5=0,\n arg_6=lambda arg_1, arg_7: arg_7,\n arg_8=False,\n arg_9=10,\n arg_10=None,\n):\n \"\"\"Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.\n\n This function samples from an Markov chain at `current_state` and whose\n stationary distribution is governed by the supplied `TransitionKernel`\n instance (`kernel`).\n\n This function can sample from multiple chains, in parallel. (Whether or not\n there are multiple chains is dictated by the `kernel`.)\n\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state.\n\n Since MCMC states are correlated, it is sometimes desirable to produce\n additional intermediate states, and then discard them, ending up with a set of\n states with decreased autocorrelation. See [Owen (2017)][1]. Such \"thinning\"\n is made possible by setting `num_steps_between_results > 0`. The chain then\n takes `num_steps_between_results` extra steps between the steps that make it\n into the results. The extra steps are never materialized (in calls to\n `sess.run`), and thus do not increase memory requirements.\n\n Warning: when setting a `seed` in the `kernel`, ensure that `Func`'s\n `parallel_iterations=1`, otherwise results will not be reproducible.\n\n In addition to returning the chain state, this function supports tracing of\n auxiliary variables used by the kernel. The traced values are selected by\n specifying `trace_fn`. By default, all kernel results are traced but in the\n future the default will be changed to no results being traced, so plan\n accordingly. See below for some examples of this feature.\n\n Args:\n num_results: Integer number of Markov chain draws.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s).\n previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s\n representing internal calculations made within the previous call to this\n function (or as returned by `bootstrap_results`).\n kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step\n of the Markov chain.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. The number of returned chain states is\n still equal to `num_results`. Default value: 0 (i.e., no thinning).\n trace_fn: A callable that takes in the current chain state and the previous\n kernel results and return a `Tensor` or a nested collection of `Tensor`s\n that is then traced along with the chain state.\n return_final_kernel_results: If `True`, then the final kernel results are\n returned alongside the chain state and the trace specified by the\n `trace_fn`.\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer. See `tf.while_loop` for more details.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"mcmc_Func\").\n\n Returns:\n checkpointable_states_and_trace: if `return_final_kernel_results` is\n `True`. The return value is an instance of\n `CheckpointableStatesAndTrace`.\n all_states: if `return_final_kernel_results` is `False` and `trace_fn` is\n `None`. The return value is a `Tensor` or Python list of `Tensor`s\n representing the state(s) of the Markov chain(s) at each result step. Has\n same shape as input `current_state` but with a prepended\n `num_results`-size dimension.\n states_and_trace: if `return_final_kernel_results` is `False` and\n `trace_fn` is not `None`. The return value is an instance of\n `StatesAndTrace`.\n\n #### Examples\n\n ##### Sample from a diagonal-variance Gaussian.\n\n I.e.,\n\n ```none\n for i=1..n:\n x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood\n ```\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n dims = 10\n true_stddev = np.sqrt(np.linspace(1., 3., dims))\n likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)\n\n states = tfp.mcmc.Func(\n num_results=1000,\n num_burnin_steps=500,\n current_state=tf.zeros(dims),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=likelihood.log_prob,\n step_size=0.5,\n num_leapfrog_steps=2),\n trace_fn=None)\n\n sample_mean = tf.reduce_mean(states, axis=0)\n # ==> approx all zeros\n\n sample_stddev = tf.sqrt(tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0))\n # ==> approx equal true_stddev\n ```\n\n ##### Sampling from factor-analysis posteriors with known factors.\n\n I.e.,\n\n ```none\n # prior\n w ~ MultivariateNormal(loc=0, scale=eye(d))\n for i=1..n:\n # likelihood\n x[i] ~ Normal(loc=w^T F[i], scale=1)\n ```\n\n where `F` denotes factors.\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Specify model.\n def make_prior(dims):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims))\n\n def make_likelihood(weights, factors):\n return tfd.MultivariateNormalDiag(\n loc=tf.matmul(weights, factors, adjoint_b=True))\n\n def joint_log_prob(num_weights, factors, x, w):\n return (make_prior(num_weights).log_prob(w) +\n make_likelihood(w, factors).log_prob(x))\n\n def unnormalized_log_posterior(w):\n # Posterior is proportional to: `p(W, X=x | factors)`.\n return joint_log_prob(num_weights, factors, x, w)\n\n # Setup data.\n num_weights = 10 # == d\n num_factors = 40 # == n\n num_chains = 100\n\n weights = make_prior(num_weights).sample(1)\n factors = tf.random_normal([num_factors, num_weights])\n x = make_likelihood(weights, factors).sample()\n\n # Sample from Hamiltonian Monte Carlo Markov Chain.\n\n # Get `num_results` samples from `num_chains` independent chains.\n chains_states, kernels_results = tfp.mcmc.Func(\n num_results=1000,\n num_burnin_steps=500,\n current_state=tf.zeros([num_chains, num_weights], name='init_weights'),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=unnormalized_log_posterior,\n step_size=0.1,\n num_leapfrog_steps=2))\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])\n # ==> approx equal to weights\n\n sample_var = tf.reduce_mean(\n tf.squared_difference(chains_states, sample_mean),\n axis=[0, 1])\n # ==> less than 1\n ```\n\n ##### Custom tracing functions.\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n likelihood = tfd.Normal(loc=0., scale=1.)\n\n def Func(trace_fn):\n return tfp.mcmc.Func(\n num_results=1000,\n num_burnin_steps=500,\n current_state=0.,\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=likelihood.log_prob,\n step_size=0.5,\n num_leapfrog_steps=2),\n trace_fn=trace_fn)\n\n def trace_log_accept_ratio(states, previous_kernel_results):\n return previous_kernel_results.log_accept_ratio\n\n def trace_everything(states, previous_kernel_results):\n return previous_kernel_results\n\n _, log_accept_ratio = Func(trace_fn=trace_log_accept_ratio)\n _, kernel_results = Func(trace_fn=trace_everything)\n\n acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))\n # Equivalent to, but more efficient than:\n acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))\n ```\n\n #### References\n\n [1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.\n _Technical Report_, 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n \"\"\"\n if not arg_3.is_calibrated:\n warnings.warn(\"supplied `TransitionKernel` is not calibrated. Markov \"\n \"chain may not converge to intended target distribution.\")\n with tf.compat.v1.name_scope(\n arg_10, \"mcmc_Func\",\n [arg_0, arg_4, arg_5]):\n arg_0 = tf.convert_to_tensor(\n value=arg_0, dtype=tf.int32, arg_10=\"num_results\")\n arg_4 = tf.convert_to_tensor(\n value=arg_4, dtype=tf.int32, arg_10=\"num_burnin_steps\")\n arg_5 = tf.convert_to_tensor(\n value=arg_5,\n dtype=tf.int32,\n arg_10=\"num_steps_between_results\")\n arg_1 = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, arg_10=\"current_state\"),\n arg_1)\n if arg_2 is None:\n arg_2 = arg_3.bootstrap_results(arg_1)\n\n if arg_6 is None:\n # It simplifies the logic to use a dummy function here.\n arg_6 = lambda *args: ()\n arg_11 = True\n else:\n arg_11 = False\n if arg_6 is Func.__defaults__[4]:\n warnings.warn(\"Tracing all kernel results by default is deprecated. Set \"\n \"the `trace_fn` argument to None (the future default \"\n \"value) or an explicit callback that traces the values \"\n \"you are interested in.\")\n\n def _trace_scan_fn(arg_12, arg_13):\n arg_14, arg_15 = mcmc_util.smart_for_loop(\n loop_num_iter=arg_13,\n body_fn=arg_3.one_step,\n initial_loop_vars=list(arg_12),\n arg_9=arg_9)\n return arg_14, arg_15\n\n (arg_16, arg_17), (arg_18, arg_19) = mcmc_util.trace_scan(\n loop_fn=_trace_scan_fn,\n initial_state=(arg_1, arg_2),\n elems=tf.one_hot(\n indices=0,\n depth=arg_0,\n on_value=1 + arg_4,\n off_value=1 + arg_5,\n dtype=tf.int32),\n # pylint: disable=g-long-lambda\n arg_6=lambda arg_12: (arg_12[0],\n arg_6(*arg_12)),\n # pylint: enable=g-long-lambda\n arg_9=arg_9)\n\n if arg_8:\n return CheckpointableStatesAndTrace(\n arg_18=arg_18,\n arg_19=arg_19,\n arg_17=arg_17)\n else:\n if arg_11:\n return arg_18\n else:\n return StatesAndTrace(arg_18=arg_18, arg_19=arg_19)"} +{"_id": "doc_797", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"A multi-layered topic model over a documents-by-terms matrix.\"\"\"\n arg_4 = ed.Gamma(0.1, 0.3, sample_shape=[arg_2[2], arg_2[1]], name=\"w2\")\n arg_5 = ed.Gamma(0.1, 0.3, sample_shape=[arg_2[1], arg_2[0]], name=\"w1\")\n arg_6 = ed.Gamma(0.1, 0.3, sample_shape=[arg_2[0], arg_1], name=\"w0\")\n\n arg_7 = ed.Gamma(0.1, 0.1, sample_shape=[arg_0, arg_2[2]], name=\"z2\")\n arg_8 = ed.Gamma(arg_3, arg_3 / tf.matmul(arg_7, arg_4), name=\"z1\")\n arg_9 = ed.Gamma(arg_3, arg_3 / tf.matmul(arg_8, arg_5), name=\"z0\")\n arg_10 = ed.Poisson(tf.matmul(arg_9, arg_6), name=\"x\")\n return arg_10"} +{"_id": "doc_798", "title": "", "text": "def Func(arg_0, arg_1=1e-3, arg_2=1e-5, arg_3=None):\n \"\"\"Learnable Gamma via concentration and scale parameterization.\"\"\"\n with tf.compat.v1.variable_scope(None, default_name=\"Func\"):\n arg_4 = tf.compat.v1.get_variable(\n \"unconstrained_concentration\",\n arg_0,\n initializer=tf.compat.v1.initializers.random_normal(\n mean=0.5, stddev=0.1))\n arg_5 = tf.compat.v1.get_variable(\n \"unconstrained_scale\",\n arg_0,\n initializer=tf.compat.v1.initializers.random_normal(stddev=0.1))\n arg_6 = tf.maximum(tf.nn.softplus(arg_4),\n arg_1)\n arg_7 = tf.maximum(1. / tf.nn.softplus(arg_5), 1. / arg_2)\n arg_8 = ed.Gamma(arg_6=arg_6, arg_7=arg_7, arg_3=arg_3)\n return arg_8"} +{"_id": "doc_799", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the KL function registered for classes a and b.\"\"\"\n arg_2 = tf_inspect.getmro(arg_0)\n arg_3 = tf_inspect.getmro(arg_1)\n arg_4 = None\n arg_5 = None\n for arg_6, arg_7 in enumerate(arg_2):\n for arg_8, arg_9 in enumerate(arg_3):\n arg_10 = arg_6 + arg_8\n arg_11 = _DIVERGENCES.get((arg_7, arg_9), None)\n if not arg_5 or (arg_11 and arg_10 < arg_4):\n arg_4 = arg_10\n arg_5 = arg_11\n return arg_5"} +{"_id": "doc_800", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns an image tensor.\"\"\"\n arg_1 = tf.io.read_file(arg_0)\n arg_2 = tf.image.decode_image(arg_1, channels=CHANNELS)\n arg_2 = tf.image.convert_image_dtype(arg_2, tf.float32)\n return arg_2"} +{"_id": "doc_801", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Creates a character sprite from a set of attribute sprites.\"\"\"\n arg_4 = arg_0.dtype\n arg_5 = tf.cast(arg_1[..., -1:] <= 0, arg_4)\n arg_6 = tf.cast(arg_2[..., -1:] <= 0, arg_4)\n arg_7 = tf.cast(arg_3[..., -1:] <= 0, arg_4)\n arg_8 = (arg_0 * arg_5) + arg_1\n arg_8 = (arg_8 * arg_6) + arg_2\n arg_8 = (arg_8 * arg_7) + arg_3\n return arg_8"} +{"_id": "doc_802", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=8):\n \"\"\"Creates a random sequence.\"\"\"\n arg_4 = tf.random.uniform([], maxval=arg_1[1], dtype=tf.int32)\n return create_seq(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_803", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=3,\n arg_4=8, arg_5=False, arg_6=False):\n \"\"\"Creates a tf.data pipeline for the sprites dataset.\n\n Args:\n characters: A list of (skin, hair, top, pants) tuples containing\n relative paths to the sprite png image for each attribute.\n actions: A list of Actions.\n directions: A list of Directions.\n channels: Number of image channels to yield.\n length: Desired length of the sequences.\n shuffle: Whether or not to shuffle the characters and sequences\n start frame.\n fake_data: Boolean for whether or not to yield synthetic data.\n\n Returns:\n A tf.data.Dataset yielding (seq, skin label index, hair label index,\n top label index, pants label index, action label index, skin label\n name, hair label_name, top label name, pants label name, action\n label name) tuples.\n \"\"\"\n if arg_6:\n arg_7 = tf.random.normal([HEIGHT, WIDTH, CHANNELS])\n else:\n arg_8 = download_sprites()\n\n arg_9 = [action.name for action in arg_1]\n arg_10 = [(action.start_row, action.frames) for action in arg_1]\n\n arg_11 = [direction.row_offset for direction in arg_2]\n\n arg_12 = tf.data.Dataset.from_tensor_slices(arg_0)\n arg_13 = tf.data.Dataset.from_tensor_slices(arg_9).repeat()\n arg_14 = tf.data.Dataset.from_tensor_slices(arg_10).repeat()\n arg_15 = tf.data.Dataset.from_tensor_slices(arg_11).repeat()\n\n if arg_5:\n arg_12 = arg_12.shuffle(len(arg_0))\n\n arg_16 = tf.data.Dataset.zip((arg_12, arg_13, arg_14, arg_15))\n\n arg_17 = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))\n arg_18 = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))\n arg_19 = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))\n arg_20 = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))\n arg_21 = tf.contrib.lookup.index_table_from_tensor(sorted(arg_9))\n\n def process_example(arg_22, arg_23, arg_24, arg_25):\n \"\"\"Processes a dataset row.\"\"\"\n arg_26 = arg_22[0]\n arg_27 = arg_22[1]\n arg_28 = arg_22[2]\n arg_29 = arg_22[3]\n\n if arg_6:\n arg_30 = arg_7\n else:\n arg_31 = read_image(arg_8 + os.sep + arg_26)\n arg_32 = read_image(arg_8 + os.sep + arg_27)\n arg_33 = read_image(arg_8 + os.sep + arg_28)\n arg_34 = read_image(arg_8 + os.sep + arg_29)\n arg_30 = create_character(arg_31, arg_32, arg_33, arg_34)\n\n if arg_5:\n arg_35 = create_random_seq(arg_30, arg_24, arg_25, arg_4)\n else:\n arg_35 = create_seq(arg_30, arg_24, arg_25, arg_4)\n arg_35 = arg_35[..., :arg_3] # limit output channels\n\n arg_36 = arg_17.lookup(arg_26)\n arg_37 = arg_18.lookup(arg_27)\n arg_38 = arg_19.lookup(arg_28)\n arg_39 = arg_20.lookup(arg_29)\n arg_40 = arg_21.lookup(arg_23)\n\n return (arg_35, arg_36, arg_37, arg_38, arg_39, arg_40,\n arg_26, arg_27, arg_28, arg_29, arg_23)\n\n arg_16 = arg_16.map(process_example)\n return arg_16"} +{"_id": "doc_804", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks that `distributions` satisfies all assumptions.\"\"\"\n arg_3 = []\n\n if not _is_iterable(arg_0) or not arg_0:\n raise ValueError('`distributions` must be a list of one or more '\n 'distributions.')\n\n if arg_1 is None:\n arg_4 = [\n dtype_util.base_dtype(arg_6.dtype)\n for arg_6 in arg_0\n if arg_6.dtype is not None\n ]\n if arg_4[1:] != arg_4[:-1]:\n raise TypeError('Distributions must have same dtype; found: {}.'.format(\n set(dtype_util.name(arg_5) for arg_5 in arg_4)))\n\n # Validate event_ndims.\n for arg_6 in arg_0:\n if tensorshape_util.rank(arg_6.event_shape) is not None:\n if tensorshape_util.rank(arg_6.event_shape) != 1:\n raise ValueError('`Distribution` must be vector variate, '\n 'found event nimds: {}.'.format(\n tensorshape_util.rank(arg_6.event_shape)))\n elif arg_2:\n arg_3.append(\n assert_util.assert_equal(\n 1, tf.size(input=arg_6.event_shape_tensor()),\n message='`Distribution` must be vector variate.'))\n\n arg_7 = [arg_6.batch_shape for arg_6 in arg_0]\n if all(tensorshape_util.is_fully_defined(arg_8) for arg_8 in arg_7):\n if arg_7[1:] != arg_7[:-1]:\n raise ValueError('Distributions must have the same `batch_shape`; '\n 'found: {}.'.format(arg_7))\n elif arg_2:\n arg_7 = [\n tensorshape_util.as_list(arg_6.batch_shape) # pylint: disable=g-complex-comprehension\n if tensorshape_util.is_fully_defined(arg_6.batch_shape) else\n arg_6.batch_shape_tensor() for arg_6 in arg_0\n ]\n arg_3.extend(\n assert_util.assert_equal( # pylint: disable=g-complex-comprehension\n arg_9, arg_10,\n message='Distribution `batch_shape`s must be identical.')\n for arg_9, arg_10 in zip(arg_7[1:], arg_7[:-1]))\n\n return arg_3"} +{"_id": "doc_805", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=arg_6.int32,\n arg_8=None):\n \"\"\"Counts the number of occurrences of each value in an integer array `arr`.\n\n Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies\n dimensions to reduce over. With\n `~axis = [i for i in range(arr.ndim) if i not in axis]`,\n this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`.\n\n If `minlength` and `maxlength` are not given, `K = tf.reduce_max(arr) + 1`\n if `arr` is non-empty, and 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n Args:\n arr: An `int32` `Tensor` of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead of\n 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n axis: A `0-D` or `1-D` `int32` `Tensor` (with static values) designating\n dimensions in `arr` to reduce over.\n `Default value:` `None`, meaning reduce over all dimensions.\n dtype: If `weights` is None, determines the type of the output bins.\n name: A name scope for the associated operations (optional).\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n \"\"\"\n with arg_6.compat.v1.name_scope(\n arg_8, 'Func', values=[arg_0, arg_1, arg_2, arg_3,\n arg_4]):\n if arg_4 is None:\n return arg_6.math.bincount(\n arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_5=arg_5)\n\n arg_0 = arg_6.convert_to_tensor(value=arg_0, arg_5=arg_6.int32, arg_8='arr')\n arg_9 = _get_static_ndims(arg_0, expect_static=True)\n\n arg_4 = _make_static_axis_non_negative_list(arg_4, arg_9)\n\n # ~axis from docstring. Dims in arr that are not in axis.\n arg_10 = sorted(set(range(arg_9)).difference(arg_4))\n\n # If we're reducing over everything, just use standard bincount.\n if not arg_10:\n return arg_6.math.bincount(\n arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_5=arg_5)\n\n # Move dims in ~axis to the left, so we can tf.map_fn bincount over them,\n # Producing counts for every index I in ~axis.\n # Thus, flat_arr is not totally flat, it just has the dims in ~axis\n # flattened.\n arg_11 = _move_dims_to_flat_end(arg_0, arg_10, arg_9, right_end=False)\n\n # tf.map_fn over dim 0.\n if arg_1 is None:\n\n def one_bincount(arg_12):\n return arg_6.math.bincount(\n arg_12,\n arg_1=None,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_5=arg_5)\n\n arg_13 = arg_6.map_fn(one_bincount, elems=arg_11, arg_5=arg_5)\n else:\n arg_1 = arg_6.convert_to_tensor(value=arg_1, arg_8='weights')\n _get_static_ndims(arg_1, expect_static=True, expect_ndims=arg_9)\n arg_14 = _move_dims_to_flat_end(\n arg_1, arg_10, arg_9, right_end=False)\n\n def one_bincount(arg_15):\n arg_12, arg_16 = arg_15\n return arg_6.math.bincount(\n arg_12,\n arg_1=arg_16,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_5=arg_5)\n\n arg_13 = arg_6.map_fn(\n one_bincount, elems=[arg_11, arg_14], arg_5=arg_1.dtype)\n\n # flat_counts.shape = [prod(~axis), K], because map_fn stacked on axis 0.\n # bincount needs to have the K bins in axis 0, so transpose...\n arg_17 = arg_6.transpose(a=arg_13, perm=[1, 0])\n\n # Throw in this assert, to ensure shape assumptions are correct.\n _get_static_ndims(arg_17, expect_ndims=2, expect_static=True)\n\n # not_axis_shape = arr.shape[~axis]\n arg_18 = arg_6.gather(arg_6.shape(input=arg_0), indices=arg_10)\n\n # The first index of flat_counts_t indexes bins 0,..,K-1, the rest are ~axis\n arg_19 = arg_6.concat([[-1], arg_18], arg_4=0)\n\n return arg_6.reshape(arg_17, arg_19)"} +{"_id": "doc_806", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=False,\n arg_3=False,\n arg_4=None,\n arg_5=None):\n \"\"\"Bin values into discrete intervals.\n\n Given `edges = [c0, ..., cK]`, defining intervals\n `I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,\n This function returns `bins`, such that:\n `edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.\n\n Args:\n x: Numeric `N-D` `Tensor` with `N > 0`.\n edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges\n of intervals. Must either be `1-D` or have\n `x.shape[1:] == edges.shape[1:]`. If `rank(edges) > 1`, `edges[k]`\n designates a shape `edges.shape[1:]` `Tensor` of bin edges for the\n corresponding dimensions of `x`.\n extend_lower_interval: Python `bool`. If `True`, extend the lowest\n interval `I0` to `(-inf, c1]`.\n extend_upper_interval: Python `bool`. If `True`, extend the upper\n interval `I_{K-1}` to `[c_{K-1}, +inf)`.\n dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.\n This effects the output values when `x` is below/above the intervals,\n which will be `-1/K+1` for `int` types and `NaN` for `float`s.\n At indices where `x` is `NaN`, the output values will be `0` for `int`\n types and `NaN` for floats.\n name: A Python string name to prepend to created ops. Default: 'Func'\n\n Returns:\n bins: `Tensor` with same `shape` as `x` and `dtype`.\n Has whole number values. `bins[i] = k` means the `x[i]` falls into the\n `kth` bin, ie, `edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.\n\n Raises:\n ValueError: If `edges.shape[0]` is determined to be less than 2.\n\n #### Examples\n\n Cut a `1-D` array\n\n ```python\n x = [0., 5., 6., 10., 20.]\n edges = [0., 5., 10.]\n tfp.stats.Func(x, edges)\n ==> [0., 0., 1., 1., np.nan]\n ```\n\n Cut `x` into its deciles\n\n ```python\n x = tf.random_uniform(shape=(100, 200))\n decile_edges = tfp.stats.quantiles(x, num_quantiles=10)\n bins = tfp.stats.Func(x, edges=decile_edges)\n bins.shape\n ==> (100, 200)\n tf.reduce_mean(bins == 0.)\n ==> approximately 0.1\n tf.reduce_mean(bins == 1.)\n ==> approximately 0.1\n ```\n\n \"\"\"\n # TFP users may be surprised to see the \"action\" in the leftmost dim of\n # edges, rather than the rightmost (event) dim. Why?\n # 1. Most likely you created edges by getting quantiles over samples, and\n # quantile/percentile return these edges in the leftmost (sample) dim.\n # 2. Say you have event_shape = [5], then we expect the bin will be different\n # for all 5 events, so the index of the bin should not be in the event dim.\n with tf.compat.v1.name_scope(\n arg_5, default_name='Func', values=[arg_0, arg_1]):\n arg_6 = dtype_util.common_dtype([arg_0, arg_1],\n preferred_dtype=tf.float32)\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_5='edges', arg_4=arg_6)\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_5='x', arg_4=arg_6)\n\n if (tf.compat.dimension_value(arg_1.shape[0]) is not None and\n tf.compat.dimension_value(arg_1.shape[0]) < 2):\n raise ValueError(\n 'First dimension of `edges` must have length > 1 to index 1 or '\n 'more bin. Found: {}'.format(arg_1.shape))\n\n arg_7 = arg_1.shape.ndims == 1 and arg_0.shape.ndims > 1\n\n if arg_7:\n arg_8 = tf.shape(input=arg_0)\n arg_0 = tf.reshape(arg_0, [-1])\n\n if arg_4 is None:\n arg_4 = arg_6\n arg_4 = tf.as_dtype(arg_4)\n\n # Move first dims into the rightmost.\n arg_9 = distribution_util.rotate_transpose(arg_0, shift=-1)\n arg_10 = distribution_util.rotate_transpose(arg_1, shift=-1)\n\n # If...\n # x_permed = [0, 1, 6., 10]\n # edges = [0, 5, 10.]\n # ==> almost_output = [0, 1, 2, 2]\n arg_11 = arg_4 if arg_4 in [tf.int32, tf.int64] else None\n arg_12 = tf.searchsorted(\n sorted_sequence=arg_10,\n values=arg_9,\n side='right',\n out_type=arg_11)\n # Move the rightmost dims back to the leftmost.\n arg_13 = tf.cast(\n distribution_util.rotate_transpose(arg_12, shift=1),\n arg_4)\n\n # In above example, we want [0, 0, 1, 1], so correct this here.\n arg_14 = tf.clip_by_value(arg_13 - 1, tf.cast(0, arg_4),\n tf.cast(tf.shape(input=arg_1)[0] - 2, arg_4))\n\n if not arg_2:\n arg_15 = np.nan if arg_4.is_floating else -1\n arg_14 = tf.where(arg_0 < tf.expand_dims(arg_1[0], 0),\n tf.fill(tf.shape(input=arg_0), tf.cast(arg_15, arg_4)),\n arg_14)\n\n if not arg_3:\n arg_16 = np.nan if arg_4.is_floating else tf.shape(input=arg_1)[0] - 1\n arg_14 = tf.where(arg_0 > tf.expand_dims(arg_1[-1], 0),\n tf.fill(tf.shape(input=arg_0), tf.cast(arg_16, arg_4)), arg_14)\n\n if arg_7:\n arg_14 = tf.reshape(arg_14, arg_8)\n\n return arg_14"} +{"_id": "doc_807", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=False,\n arg_4=False,\n arg_5=None,\n arg_6=None):\n \"\"\"Count how often `x` falls in intervals defined by `edges`.\n\n Given `edges = [c0, ..., cK]`, defining intervals\n `I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,\n This function counts how often `x` falls into each interval.\n\n Values of `x` outside of the intervals cause errors. Consider using\n `extend_lower_interval`, `extend_upper_interval` to deal with this.\n\n Args:\n x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not\n `None`, must have statically known number of dimensions. The\n `axis` kwarg determines which dimensions index iid samples.\n Other dimensions of `x` index \"events\" for which we will compute different\n Funcs.\n edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges\n of intervals. Must either be `1-D` or have `edges.shape[1:]` the same\n as the dimensions of `x` excluding `axis`.\n If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`\n `Tensor` of interval edges for the corresponding dimensions of `x`.\n axis: Optional `0-D` or `1-D` integer `Tensor` with constant\n values. The axis in `x` that index iid samples.\n `Default value:` `None` (treat every dimension as sample dimension).\n extend_lower_interval: Python `bool`. If `True`, extend the lowest\n interval `I0` to `(-inf, c1]`.\n extend_upper_interval: Python `bool`. If `True`, extend the upper\n interval `I_{K-1}` to `[c_{K-1}, +inf)`.\n dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.\n name: A Python string name to prepend to created ops.\n `Default value:` 'Func'\n\n Returns:\n counts: `Tensor` of type `dtype` and, with\n `~axis = [i for i in range(arr.ndim) if i not in axis]`,\n `counts.shape = [edges.shape[0]] + x.shape[~axis]`.\n With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times\n event(s) fell into the `kth` interval of `edges`.\n\n #### Examples\n\n ```python\n # x.shape = [1000, 2]\n # x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).\n x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],\n axis=-1)\n\n # edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].\n edges = [0., 0.5, 1.0, 1.5, 2.0]\n\n tfp.stats.Func(x, edges)\n ==> approximately [500, 500, 500, 500]\n\n tfp.stats.Func(x, edges, axis=0)\n ==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]\n ```\n\n \"\"\"\n with tf.compat.v1.name_scope(arg_6, 'Func', values=[arg_0, arg_1, arg_2]):\n\n # Tensor conversions.\n arg_7 = dtype_util.common_dtype([arg_0, arg_1], preferred_dtype=tf.float32)\n\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_6='x', arg_5=arg_7)\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_6='edges', arg_5=arg_7)\n\n # Move dims in axis to the left end as one flattened dim.\n # After this, x.shape = [n_samples] + E.\n if arg_2 is None:\n arg_0 = tf.reshape(arg_0, shape=[-1])\n else:\n arg_8 = _get_static_ndims(\n arg_0, expect_static=True, expect_ndims_at_least=1)\n arg_2 = _make_static_axis_non_negative_list(arg_2, arg_8)\n if not arg_2:\n raise ValueError('`axis` cannot be empty. Found: {}'.format(arg_2))\n arg_0 = _move_dims_to_flat_end(arg_0, arg_2, arg_8, right_end=False)\n\n # bins.shape = x.shape = [n_samples] + E,\n # and bins[i] is a shape E Tensor of the bins that sample `i` fell into.\n # E is the \"event shape\", which is [] if axis is None.\n arg_9 = find_bins(\n arg_0,\n arg_1=arg_1,\n # If not extending intervals, then values outside the edges will return\n # -1, which gives an error when fed to bincount.\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=tf.int32)\n\n # TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.\n arg_10 = count_integers(\n arg_9,\n # Ensure we get correct output, even if x did not fall into every bin\n minlength=tf.shape(input=arg_1)[0] - 1,\n maxlength=tf.shape(input=arg_1)[0] - 1,\n arg_2=0,\n arg_5=arg_5 or arg_7)\n arg_11 = tf.compat.dimension_value(arg_1.shape[0])\n if arg_11 is not None:\n arg_10.set_shape(\n tf.TensorShape([arg_11 - 1]).concatenate(arg_10.shape[1:]))\n return arg_10"} +{"_id": "doc_808", "title": "", "text": "def Func(arg_0,\n arg_1=False,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"Get static number of dimensions and assert that some expectations are met.\n\n This function returns the number of dimensions 'ndims' of x, as a Python int.\n\n The optional expect arguments are used to check the ndims of x, but this is\n only done if the static ndims of x is not None.\n\n Args:\n x: A Tensor.\n expect_static: Expect `x` to have statically defined `ndims`.\n expect_ndims: Optional Python integer. If provided, assert that x has\n number of dimensions equal to this.\n expect_ndims_no_more_than: Optional Python integer. If provided, assert\n that x has no more than this many dimensions.\n expect_ndims_at_least: Optional Python integer. If provided, assert that x\n has at least this many dimensions.\n\n Returns:\n ndims: A Python integer.\n\n Raises:\n ValueError: If any of the expectations above are violated.\n \"\"\"\n arg_5 = arg_0.shape.ndims\n if arg_5 is None:\n arg_6 = tf.get_static_value(tf.shape(input=arg_0))\n if arg_6 is not None:\n arg_5 = arg_6.ndim\n\n if arg_5 is None:\n if arg_1:\n raise ValueError(\n 'Expected argument `x` to have statically defined `ndims`. Found: ' %\n arg_0)\n return\n\n if arg_2 is not None:\n arg_7 = ('Expected argument `x` to have ndims %s. Found tensor %s'\n % (arg_2, arg_0))\n if arg_5 != arg_2:\n raise ValueError(arg_7)\n\n if arg_4 is not None:\n arg_8 = (\n 'Expected argument `x` to have ndims >= %d. Found tensor %s' %\n (arg_4, arg_0))\n if arg_5 < arg_4:\n raise ValueError(arg_8)\n\n if arg_3 is not None:\n arg_9 = (\n 'Expected argument `x` to have ndims <= %d. Found tensor %s' %\n (arg_3, arg_0))\n if arg_5 > arg_3:\n raise ValueError(arg_9)\n\n return arg_5"} +{"_id": "doc_809", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Insert the dims in `axis` back as singletons after being removed.\n\n Args:\n x: `Tensor`.\n axis: Python list of integers.\n\n Returns:\n `Tensor` with same values as `x`, but additional singleton dimensions.\n \"\"\"\n for arg_2 in sorted(arg_1):\n arg_0 = tf.expand_dims(arg_0, arg_1=arg_2)\n return arg_0"} +{"_id": "doc_810", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert possibly negatively indexed axis to non-negative list of ints.\n\n Args:\n axis: Integer Tensor.\n ndims: Number of dimensions into which axis indexes.\n\n Returns:\n A list of non-negative Python integers.\n\n Raises:\n ValueError: If `axis` is not statically defined.\n \"\"\"\n arg_0 = distribution_util.make_non_negative_axis(arg_0, arg_1)\n\n arg_2 = tf.get_static_value(arg_0)\n if arg_2 is None:\n raise ValueError(\n 'Expected argument `axis` to be statically available. Found: %s' %\n arg_0)\n\n # Make at least 1-D.\n arg_0 = arg_2 + np.zeros([1], dtype=arg_2.dtype)\n\n return list(int(arg_3) for arg_3 in arg_0)"} +{"_id": "doc_811", "title": "", "text": "def Func(arg_0):\n \"\"\"Use `top_k` to sort a `Tensor` along the last dimension.\"\"\"\n arg_1, arg_2 = tf.nn.top_k(arg_0, k=tf.shape(input=arg_0)[-1])\n arg_1.set_shape(arg_0.shape)\n return arg_1"} +{"_id": "doc_812", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=0):\n \"\"\"Build an ordered list of Distribution instances for component models.\n\n Args:\n num_timesteps: Python `int` number of timesteps to model.\n param_vals: a list of `Tensor` parameter values in order corresponding to\n `self.parameters`, or a dict mapping from parameter names to values.\n initial_step: optional `int` specifying the initial timestep to model.\n This is relevant when the model contains time-varying components,\n e.g., holidays or seasonality.\n\n Returns:\n component_ssms: a Python list of `LinearGaussianStateSpaceModel`\n Distribution objects, in order corresponding to `self.components`.\n \"\"\"\n\n with tf.compat.v1.name_scope('Func'):\n\n # List the model parameters in canonical order\n arg_4 = arg_0._canonicalize_param_vals_as_map(arg_2)\n arg_5 = [arg_4[p.name] for p in arg_0.parameters]\n\n # Build SSMs for each component model. We process the components in\n # canonical order, extracting the parameters for each component from the\n # (ordered) list of parameters.\n arg_6 = arg_5[1:]\n arg_7 = []\n for arg_8 in arg_0.components:\n arg_9 = len(arg_8.parameters)\n arg_10 = arg_6[:arg_9]\n arg_6 = arg_6[arg_9:]\n\n arg_7.append(\n arg_8.make_state_space_model(\n arg_1,\n arg_2=arg_10,\n arg_3=arg_3))\n\n return arg_7"} +{"_id": "doc_813", "title": "", "text": "def Func(arg_0, arg_1=1., arg_2=False, arg_3=None):\n \"\"\"The Amari-alpha Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n When `self_normalized = True`, the Amari-alpha Csiszar-function is:\n\n ```none\n f(u) = { -log(u) + (u - 1), alpha = 0\n { u log(u) - (u - 1), alpha = 1\n { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise\n ```\n\n When `self_normalized = False` the `(u - 1)` terms are omitted.\n\n Warning: when `alpha != 0` and/or `self_normalized = True` this function makes\n non-log-space calculations and may therefore be numerically unstable for\n `|logu| >> 0`.\n\n For more information, see:\n A. Cichocki and S. Amari. \"Families of Alpha-Beta-and GammaDivergences:\n Flexible and Robust Measures of Similarities.\" Entropy, vol. 12, no. 6, pp.\n 1532-1568, 2010.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)\n self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When\n `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even\n when `p, q` are unnormalized measures.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\n at `u = exp(logu)`.\n\n Raises:\n TypeError: if `alpha` is `None` or a `Tensor`.\n TypeError: if `self_normalized` is `None` or a `Tensor`.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, \"Func\", [arg_0]):\n if arg_1 is None or tf.is_tensor(arg_1):\n raise TypeError(\"`alpha` cannot be `None` or `Tensor` type.\")\n if (arg_2 is None or tf.is_tensor(arg_2)):\n raise TypeError(\"`self_normalized` cannot be `None` or `Tensor` type.\")\n\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_3=\"logu\")\n\n if arg_1 == 0.:\n arg_4 = -arg_0\n elif arg_1 == 1.:\n arg_4 = tf.exp(arg_0) * arg_0\n else:\n arg_4 = tf.math.expm1(arg_1 * arg_0) / (arg_1 * (arg_1 - 1.))\n\n if not arg_2:\n return arg_4\n\n if arg_1 == 0.:\n return arg_4 + tf.math.expm1(arg_0)\n elif arg_1 == 1.:\n return arg_4 - tf.math.expm1(arg_0)\n else:\n return arg_4 - tf.math.expm1(arg_0) / (arg_1 - 1.)"} +{"_id": "doc_814", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"The reverse Kullback-Leibler Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n When `self_normalized = True`, the KL-reverse Csiszar-function is:\n\n ```none\n f(u) = -log(u) + (u - 1)\n ```\n\n When `self_normalized = False` the `(u - 1)` term is omitted.\n\n Observe that as an f-Divergence, this Csiszar-function implies:\n\n ```none\n D_f[p, q] = KL[q, p]\n ```\n\n The KL is \"reverse\" because in maximum likelihood we think of minimizing `q`\n as in `KL[p, q]`.\n\n Warning: when self_normalized = True` this function makes non-log-space\n calculations and may therefore be numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When\n `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even\n when `p, q` are unnormalized measures.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at\n `u = exp(logu)`.\n\n Raises:\n TypeError: if `self_normalized` is `None` or a `Tensor`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_2, \"Func\", [arg_0]):\n return amari_alpha(arg_0, alpha=0., arg_1=arg_1)"} +{"_id": "doc_815", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"The Jensen-Shannon Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:\n\n ```none\n f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)\n ```\n\n When `self_normalized = False` the `(u + 1) log(2)` term is omitted.\n\n Observe that as an f-Divergence, this Csiszar-function implies:\n\n ```none\n D_f[p, q] = KL[p, m] + KL[q, m]\n m(x) = 0.5 p(x) + 0.5 q(x)\n ```\n\n In a sense, this divergence is the \"reverse\" of the Arithmetic-Geometric\n f-Divergence.\n\n This Csiszar-function induces a symmetric f-Divergence, i.e.,\n `D_f[p, q] = D_f[q, p]`.\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n For more information, see:\n Lin, J. \"Divergence measures based on the Shannon entropy.\" IEEE Trans.\n Inf. Th., 37, 145-151, 1991.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When\n `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even\n when `p, q` are unnormalized measures.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function\n evaluated at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_2, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_2=\"logu\")\n arg_3 = arg_0.dtype.as_numpy_dtype\n arg_4 = tf.nn.softplus(arg_0)\n if arg_1:\n arg_4 -= np.log(2).astype(arg_3)\n return tf.exp(arg_0) * arg_0 - (1. + tf.exp(arg_0)) * arg_4"} +{"_id": "doc_816", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The Pearson Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Pearson Csiszar-function is:\n\n ```none\n f(u) = (u - 1)**2\n ```\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at\n `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_1, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"logu\")\n return tf.square(tf.math.expm1(arg_0))"} +{"_id": "doc_817", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The Squared-Hellinger Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Squared-Hellinger Csiszar-function is:\n\n ```none\n f(u) = (sqrt(u) - 1)**2\n ```\n\n This Csiszar-function induces a symmetric f-Divergence, i.e.,\n `D_f[p, q] = D_f[q, p]`.\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function\n evaluated at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_1, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"logu\")\n return pearson(0.5 * arg_0)"} +{"_id": "doc_818", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"The T-Power Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n When `self_normalized = True` the T-Power Csiszar-function is:\n\n ```none\n f(u) = s [ u**t - 1 - t(u - 1) ]\n s = { -1 0 < t < 1\n { +1 otherwise\n ```\n\n When `self_normalized = False` the `- t(u - 1)` term is omitted.\n\n This is similar to the `amari_alpha` Csiszar-function, with the associated\n divergence being the same up to factors depending only on `t`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n t: `Tensor` of same `dtype` as `logu` and broadcastable shape.\n self_normalized: Python `bool` indicating whether `f'(u=1)=0`.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\n at `u = exp(logu)`.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, \"Func\", [arg_0, arg_1]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_3=\"logu\")\n arg_1 = tf.convert_to_tensor(value=arg_1, dtype=arg_0.dtype.base_dtype, arg_3=\"t\")\n arg_4 = tf.math.expm1(arg_1 * arg_0)\n if arg_2:\n arg_4 -= arg_1 * tf.math.expm1(arg_0)\n arg_4 *= tf.where(tf.logical_and(0. < arg_1, arg_1 < 1.),\n -tf.ones_like(arg_1),\n tf.ones_like(arg_1))\n return arg_4"} +{"_id": "doc_819", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The log1p-abs Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Log1p-Abs Csiszar-function is:\n\n ```none\n f(u) = u**(sign(u-1)) - 1\n ```\n\n This function is so-named because it was invented from the following recipe.\n Choose a convex function g such that g(0)=0 and solve for f:\n\n ```none\n log(1 + f(u)) = g(log(u)).\n <=>\n f(u) = exp(g(log(u))) - 1\n ```\n\n That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis\n is `log`-domain.\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\n at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_1, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"logu\")\n return tf.math.expm1(tf.abs(arg_0))"} +{"_id": "doc_820", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"The Jeffreys Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Jeffreys Csiszar-function is:\n\n ```none\n f(u) = 0.5 ( u log(u) - log(u) )\n = 0.5 kl_forward + 0.5 kl_reverse\n = symmetrized_csiszar_function(kl_reverse)\n = symmetrized_csiszar_function(kl_forward)\n ```\n\n This Csiszar-function induces a symmetric f-Divergence, i.e.,\n `D_f[p, q] = D_f[q, p]`.\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\n at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_1, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"logu\")\n return 0.5 * tf.math.expm1(arg_0) * arg_0"} +{"_id": "doc_821", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"The Modified-GAN Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n When `self_normalized = True` the modified-GAN (Generative/Adversarial\n Network) Csiszar-function is:\n\n ```none\n f(u) = log(1 + u) - log(u) + 0.5 (u - 1)\n ```\n\n When `self_normalized = False` the `0.5 (u - 1)` is omitted.\n\n The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with\n `self_normalized = False`).\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When\n `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even\n when `p, q` are unnormalized measures.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\n at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_2, \"chi_square\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_2=\"logu\")\n arg_3 = tf.nn.softplus(arg_0) - arg_0\n if arg_1:\n arg_3 += 0.5 * tf.math.expm1(arg_0)\n return arg_3"} +{"_id": "doc_822", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Calculates the dual Csiszar-function in log-space.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Csiszar-dual is defined as:\n\n ```none\n f^*(u) = u f(1 / u)\n ```\n\n where `f` is some other Csiszar-function.\n\n For example, the dual of `kl_reverse` is `kl_forward`, i.e.,\n\n ```none\n f(u) = -log(u)\n f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)\n ```\n\n The dual of the dual is the original function:\n\n ```none\n f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)\n ```\n\n Warning: this function makes non-log-space calculations and may therefore be\n numerically unstable for `|logu| >> 0`.\n\n Args:\n logu: `float`-like `Tensor` representing `log(u)` from above.\n csiszar_function: Python `callable` representing a Csiszar-function over\n log-domain.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of\n `f` at `u = exp(logu)`.\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_2, \"Func\", [arg_0]):\n return tf.exp(arg_0) * arg_1(-arg_0)"} +{"_id": "doc_823", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None,\n arg_6=None):\n \"\"\"Monte-Carlo approximation of the Csiszar f-Divergence.\n\n A Csiszar-function is a member of,\n\n ```none\n F = { f:R_+ to R : f convex }.\n ```\n\n The Csiszar f-Divergence for Csiszar-function f is given by:\n\n ```none\n D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]\n ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),\n where x_j ~iid q(X)\n ```\n\n Tricks: Reparameterization and Score-Gradient\n\n When q is \"reparameterized\", i.e., a diffeomorphic transformation of a\n parameterless distribution (e.g.,\n `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and\n expectation, i.e.,\n `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`\n and `s_i = f(x_i), x_i ~iid q(X)`.\n\n However, if q is not reparameterized, TensorFlow's gradient will be incorrect\n since the chain-rule stops at samples of unreparameterized distributions. In\n this circumstance using the Score-Gradient trick results in an unbiased\n gradient, i.e.,\n\n ```none\n grad[ E_q[f(X)] ]\n = grad[ int dx q(x) f(x) ]\n = int dx grad[ q(x) f(x) ]\n = int dx [ q'(x) f(x) + q(x) f'(x) ]\n = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]\n = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]\n = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]\n ```\n\n Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is\n usually preferable to set `use_reparametrization = True`.\n\n Example Application:\n\n The Csiszar f-Divergence is a useful framework for variational inference.\n I.e., observe that,\n\n ```none\n f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )\n <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]\n := D_f[p(x, Z), q(Z | x)]\n ```\n\n The inequality follows from the fact that the \"perspective\" of `f`, i.e.,\n `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and\n `t` is a real. Since the above framework includes the popular Evidence Lower\n BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework\n \"Evidence Divergence Bound Optimization\" (EDBO).\n\n Args:\n f: Python `callable` representing a Csiszar-function in log-space, i.e.,\n takes `p_log_prob(q_samples) - q.log_prob(q_samples)`.\n p_log_prob: Python `callable` taking (a batch of) samples from `q` and\n returning the natural-log of the probability under distribution `p`.\n (In variational inference `p` is the joint distribution.)\n q: `tf.Distribution`-like instance; must implement:\n `reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`.\n (In variational inference `q` is the approximate posterior distribution.)\n num_draws: Integer scalar number of draws used to approximate the\n f-Divergence expectation.\n use_reparametrization: Python `bool`. When `None` (the default),\n automatically set to:\n `q.reparameterization_type == tfd.FULLY_REPARAMETERIZED`.\n When `True` uses the standard Monte-Carlo average. When `False` uses the\n score-gradient trick. (See above for details.) When `False`, consider\n using `csiszar_vimco`.\n seed: Python `int` seed for `q.sample`.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n Func: `float`-like `Tensor` Monte Carlo\n approximation of the Csiszar f-Divergence.\n\n Raises:\n ValueError: if `q` is not a reparameterized distribution and\n `use_reparametrization = True`. A distribution `q` is said to be\n \"reparameterized\" when its samples are generated by transforming the\n samples of another distribution which does not depend on the\n parameterization of `q`. This property ensures the gradient (with respect\n to parameters) is valid.\n TypeError: if `p_log_prob` is not a Python `callable`.\n \"\"\"\n arg_7 = tf.nest.flatten(arg_2.reparameterization_type)\n with tf.compat.v1.name_scope(arg_6, \"Func\",\n [arg_3]):\n if arg_4 is None:\n arg_4 = all(\n arg_8 == tfd.FULLY_REPARAMETERIZED\n for arg_8 in arg_7)\n elif (arg_4 and\n any(arg_8 != tfd.FULLY_REPARAMETERIZED\n for arg_8 in arg_7)):\n # TODO(jvdillon): Consider only raising an exception if the gradient is\n # requested.\n raise ValueError(\n \"Distribution `q` must be reparameterized, i.e., a diffeomorphic \"\n \"transformation of a parameterless distribution. (Otherwise this \"\n \"function has a biased gradient.)\")\n if not callable(arg_1):\n raise TypeError(\"`p_log_prob` must be a Python `callable` function.\")\n return monte_carlo.expectation(\n arg_0=lambda q_samples: arg_0(arg_1(q_samples) - arg_2.log_prob(q_samples)),\n samples=arg_2.sample(arg_3, arg_5=arg_5),\n log_prob=arg_2.log_prob, # Only used if use_reparametrization=False.\n arg_4=arg_4)"} +{"_id": "doc_824", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Helper to `csiszar_vimco`; computes `log_avg_u`, `log_sooavg_u`.\n\n `axis = 0` of `logu` is presumed to correspond to iid samples from `q`, i.e.,\n\n ```none\n logu[j] = log(u[j])\n u[j] = p(x, h[j]) / q(h[j] | x)\n h[j] iid~ q(H | x)\n ```\n\n Args:\n logu: Floating-type `Tensor` representing `log(p(x, h) / q(h | x))`.\n name: Python `str` name prefixed to Ops created by this function.\n\n Returns:\n log_avg_u: `logu.dtype` `Tensor` corresponding to the natural-log of the\n average of `u`. The sum of the gradient of `log_avg_u` is `1`.\n log_sooavg_u: `logu.dtype` `Tensor` characterized by the natural-log of the\n average of `u`` except that the average swaps-out `u[i]` for the\n leave-`i`-out Geometric-average. The mean of the gradient of\n `log_sooavg_u` is `1`. Mathematically `log_sooavg_u` is,\n ```none\n log_sooavg_u[i] = log(Avg{h[j ; i] : j=0, ..., m-1})\n h[j ; i] = { u[j] j!=i\n { GeometricAverage{u[k] : k != i} j==i\n ```\n\n \"\"\"\n with tf.compat.v1.name_scope(arg_1, \"Func\", [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=\"logu\")\n\n arg_2 = tf.compat.dimension_value(arg_0.shape.with_rank_at_least(1)[0])\n if arg_2 is None:\n arg_2 = tf.shape(input=arg_0)[0]\n arg_3 = tf.math.log(tf.cast(arg_2, dtype=arg_0.dtype))\n arg_4 = tf.cast(arg_2 - 1, dtype=arg_0.dtype)\n else:\n arg_3 = np.log(arg_2).astype(arg_0.dtype.as_numpy_dtype)\n arg_4 = np.asarray(arg_2 - 1, dtype=arg_0.dtype.as_numpy_dtype)\n\n # Throughout we reduce across axis=0 since this is presumed to be iid\n # samples.\n\n arg_5 = tf.reduce_max(input_tensor=arg_0, axis=0)\n arg_6 = tf.reduce_logsumexp(\n input_tensor=arg_0 - arg_5, axis=0)\n\n # log_loosum_u[i] =\n # = logsumexp(logu[j] : j != i)\n # = log( exp(logsumexp(logu)) - exp(logu[i]) )\n # = log( exp(logsumexp(logu - logu[i])) exp(logu[i]) - exp(logu[i]))\n # = logu[i] + log(exp(logsumexp(logu - logu[i])) - 1)\n # = logu[i] + log(exp(logsumexp(logu) - logu[i]) - 1)\n # = logu[i] + softplus_inverse(logsumexp(logu) - logu[i])\n arg_7 = arg_6 + (arg_5 - arg_0)\n # We use `d != 0` rather than `d > 0.` because `d < 0.` should never\n # happens; if it does we want to complain loudly (which `softplus_inverse`\n # will).\n arg_8 = tf.not_equal(arg_7, 0.)\n arg_9 = tf.where(arg_8, arg_7, tf.ones_like(arg_7))\n arg_10 = arg_0 + tfd.softplus_inverse(arg_9)\n\n arg_11 = np.array(np.inf, dtype=arg_0.dtype.as_numpy_dtype)\n\n # When not(d_ok) and is_positive_and_largest then we manually compute the\n # log_loosum_u. (We can efficiently do this for any one point but not all,\n # hence we still need the above calculation.) This is good because when\n # this condition is met, we cannot use the above calculation; its -inf.\n # We now compute the log-leave-out-max-sum, replicate it to every\n # point and make sure to select it only when we need to.\n arg_12 = tf.logical_and(\n arg_0 > 0.,\n tf.equal(arg_0, arg_5[tf.newaxis, ...]))\n arg_13 = tf.reduce_logsumexp(\n input_tensor=tf.where(arg_12,\n tf.fill(tf.shape(input=arg_0), -arg_11), arg_0),\n axis=0,\n keepdims=True)\n arg_13 = tf.tile(\n arg_13,\n multiples=1 + tf.pad(tensor=[arg_2 - 1], paddings=[[0, tf.rank(arg_0) - 1]]))\n\n arg_14 = tf.where(arg_12, arg_13,\n tf.fill(tf.shape(input=arg_7), -arg_11))\n\n arg_15 = tf.where(arg_8, arg_10, arg_14)\n\n # The swap-one-out-sum (\"soosum\") is n different sums, each of which\n # replaces the i-th item with the i-th-left-out average, i.e.,\n # soo_sum_u[i] = [exp(logu) - exp(logu[i])] + exp(mean(logu[!=i]))\n # = exp(log_loosum_u[i]) + exp(looavg_logu[i])\n arg_16 = (tf.reduce_sum(input_tensor=arg_0, axis=0) - arg_0) / arg_4\n arg_17 = tf.reduce_logsumexp(\n input_tensor=tf.stack([arg_15, arg_16]), axis=0)\n\n arg_18 = arg_6 + arg_5 - arg_3\n arg_19 = arg_17 - arg_3\n\n arg_18.set_shape(arg_0.shape.with_rank_at_least(1)[1:])\n arg_19.set_shape(arg_0.shape)\n\n return arg_18, arg_19"} +{"_id": "doc_825", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Like batch_gather, but broadcasts to the left of axis.\"\"\"\n # batch_gather assumes...\n # params.shape = [A1,...,AN, B1,...,BM]\n # indices.shape = [A1,...,AN, C]\n # which gives output of shape\n # [A1,...,AN, C, B1,...,BM]\n # Here we broadcast dims of each to the left of `axis` in params, and left of\n # the rightmost dim in indices, e.g. we can\n # have\n # params.shape = [A1,...,AN, B1,...,BM]\n # indices.shape = [a1,...,aN, C],\n # where ai broadcasts with Ai.\n\n # leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN].\n arg_3 = tf.broadcast_dynamic_shape(\n tf.shape(input=arg_0)[:arg_2],\n tf.shape(input=arg_1)[:-1])\n arg_0 += tf.zeros(\n tf.concat((arg_3, tf.shape(input=arg_0)[arg_2:]), arg_2=0),\n dtype=arg_0.dtype)\n arg_1 += tf.zeros(\n tf.concat((arg_3, tf.shape(input=arg_1)[-1:]), arg_2=0),\n dtype=arg_1.dtype)\n return tf.compat.v1.batch_gather(arg_0, arg_1)"} +{"_id": "doc_826", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Broadcasts the event or distribution parameters.\"\"\"\n if dtype_util.is_integer(arg_0.dtype):\n pass\n elif dtype_util.is_floating(arg_0.dtype):\n # When `validate_args=True` we've already ensured int/float casting\n # is closed.\n arg_0 = tf.cast(arg_0, dtype=tf.int32)\n else:\n raise TypeError(\"`value` should have integer `dtype` or \"\n \"`self.dtype` ({})\".format(arg_2))\n arg_3 = (\n tensorshape_util.rank(arg_1.shape) is not None and\n tensorshape_util.is_fully_defined(arg_1.shape[:-1]) and\n tensorshape_util.is_fully_defined(arg_0.shape))\n if not arg_3 or arg_1.shape[:-1] != arg_0.shape:\n arg_1 *= tf.ones_like(arg_0[..., tf.newaxis],\n dtype=arg_1.dtype)\n arg_4 = tf.shape(input=arg_1)[:-1]\n arg_0 *= tf.ones(arg_4, dtype=arg_0.dtype)\n if tensorshape_util.rank(arg_1.shape) is not None:\n tensorshape_util.set_shape(arg_0, arg_1.shape[:-1])\n\n return arg_0, arg_1"} +{"_id": "doc_827", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6='Func'):\n r\"\"\"Importance sampling with a positive function, in log-space.\n\n With \\\\(p(z) := exp^{log_p(z)}\\\\), and \\\\(f(z) = exp{log_f(z)}\\\\),\n this `Op` returns\n\n \\\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\\\)\n \\\\(\\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\\\)\n \\\\(= Log[E_p[f(Z)]]\\\\)\n\n This integral is done in log-space with max-subtraction to better handle the\n often extreme values that `f(z) p(z) / q(z)` can take on.\n\n In contrast to `expectation_importance_sampler`, this `Op` returns values in\n log-space.\n\n\n User supplies either `Tensor` of samples `z`, or number of samples to draw `n`\n\n Args:\n log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with\n shape broadcastable to `q.batch_shape`.\n For example, `log_f` works \"just like\" `sampling_dist_q.log_prob`.\n log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with\n shape broadcastable to `q.batch_shape`.\n For example, `log_p` works \"just like\" `q.log_prob`.\n sampling_dist_q: The sampling distribution.\n `tfp.distributions.Distribution`.\n `float64` `dtype` recommended.\n `log_p` and `q` should be supported on the same set.\n z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.\n n: Integer `Tensor`. Number of samples to generate if `z` is not provided.\n seed: Python integer to seed the random number generator.\n name: A name to give this `Op`.\n\n Returns:\n Logarithm of the importance sampling estimate. `Tensor` with `shape` equal\n to batch shape of `q`, and `dtype` = `q.dtype`.\n \"\"\"\n arg_7 = arg_2\n with tf.name_scope(arg_6):\n arg_3 = _get_samples(arg_7, arg_3, arg_4, arg_5)\n arg_8 = arg_0(arg_3) + arg_1(arg_3) - arg_7.log_prob(arg_3)\n return _logspace_mean(arg_8)"} +{"_id": "doc_828", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Broadcasts the event or samples.\"\"\"\n # This is the shape of self.samples, without the samples axis, i.e. the shape\n # of the result of a call to dist.sample(). This way we can broadcast it with\n # event to get a properly-sized event, then add the singleton dim back at\n # -event_ndims - 1.\n arg_3 = tf.concat(\n [tf.shape(input=arg_1)[:-arg_2 - 1],\n tf.shape(input=arg_1)[tf.rank(arg_1) - arg_2:]],\n axis=0)\n arg_0 *= tf.ones(arg_3, dtype=arg_0.dtype)\n arg_0 = tf.expand_dims(arg_0, axis=-arg_2 - 1)\n arg_1 *= tf.ones_like(arg_0, dtype=arg_1.dtype)\n\n return arg_0, arg_1"} +{"_id": "doc_829", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=1e-8,\n arg_3=0,\n arg_4=0,\n arg_5=None,\n arg_6=50,\n arg_7=1,\n arg_8=None,\n arg_9=None):\n \"\"\"Applies the BFGS algorithm to Func a differentiable function.\n\n Performs unconstrained minimization of a differentiable function using the\n BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].\n\n ### Usage:\n\n The following example demonstrates the BFGS optimizer attempting to find the\n minimum for a simple two dimensional quadratic objective function.\n\n ```python\n minimum = np.array([1.0, 1.0]) # The center of the quadratic bowl.\n scales = np.array([2.0, 3.0]) # The scales along the two axes.\n\n # The objective function and the gradient.\n def quadratic(x):\n value = tf.reduce_sum(scales * (x - minimum) ** 2)\n return value, tf.gradients(value, x)[0]\n\n start = tf.constant([0.6, 0.8]) # Starting point for the search.\n optim_results = tfp.optimizer.bfgs_Func(\n quadratic, initial_position=start, tolerance=1e-8)\n\n with tf.Session() as session:\n results = session.run(optim_results)\n # Check that the search converged\n assert(results.converged)\n # Check that the argmin is close to the actual value.\n np.testing.assert_allclose(results.position, minimum)\n # Print out the total number of function evaluations it took. Should be 6.\n print (\"Function evaluations: %d\" % results.num_objective_evaluations)\n ```\n\n ### References:\n [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in\n Operations Research. pp 136-140. 2006\n http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf\n\n Args:\n value_and_gradients_function: A Python callable that accepts a point as a\n real `Tensor` and returns a tuple of `Tensor`s of real dtype containing\n the value of the function and its gradient at that point. The function\n to be Funcd. The input should be of shape `[..., n]`, where `n` is\n the size of the domain of input points, and all others are batching\n dimensions. The first component of the return value should be a real\n `Tensor` of matching shape `[...]`. The second component (the gradient)\n should also be of shape `[..., n]` like the input value to the function.\n initial_position: real `Tensor` of shape `[..., n]`. The starting point, or\n points when using batching dimensions, of the search procedure. At these\n points the function value and the gradient norm should be finite.\n tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance\n for the procedure. If the supremum norm of the gradient vector is below\n this number, the algorithm is stopped.\n x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the\n position between one iteration and the next is smaller than this number,\n the algorithm is stopped.\n f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change\n in the objective value between one iteration and the next is smaller\n than this value, the algorithm is stopped.\n initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype\n as the components of the output of the `value_and_gradients_function`.\n If specified, the shape should broadcastable to shape `[..., n, n]`; e.g.\n if a single `[n, n]` matrix is provided, it will be automatically\n broadcasted to all batches. Alternatively, one can also specify a\n different hessian estimate for each batch member.\n For the correctness of the algorithm, it is required that this parameter\n be symmetric and positive definite. Specifies the starting estimate for\n the inverse of the Hessian at the initial point. If not specified,\n the identity matrix is used as the starting estimate for the\n inverse Hessian.\n max_iterations: Scalar positive int32 `Tensor`. The maximum number of\n iterations for BFGS updates.\n parallel_iterations: Positive integer. The number of iterations allowed to\n run in parallel.\n stopping_condition: (Optional) A Python function that takes as input two\n Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.\n The input tensors are `converged` and `failed`, indicating the current\n status of each respective batch member; the return value states whether\n the algorithm should stop. The default is tfp.optimizer.converged_all\n which only stops when all batch members have either converged or failed.\n An alternative is tfp.optimizer.converged_any which stops as soon as one\n batch member has converged, or when all have failed.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'Func' is used.\n\n Returns:\n optimizer_results: A namedtuple containing the following items:\n converged: boolean tensor of shape `[...]` indicating for each batch\n member whether the minimum was found within tolerance.\n failed: boolean tensor of shape `[...]` indicating for each batch\n member whether a line search step failed to find a suitable step size\n satisfying Wolfe conditions. In the absence of any constraints on the\n number of objective evaluations permitted, this value will\n be the complement of `converged`. However, if there is\n a constraint and the search stopped due to available\n evaluations being exhausted, both `failed` and `converged`\n will be simultaneously False.\n num_objective_evaluations: The total number of objective\n evaluations performed.\n position: A tensor of shape `[..., n]` containing the last argument value\n found during the search from each starting point. If the search\n converged, then this value is the argmin of the objective function.\n objective_value: A tensor of shape `[...]` with the value of the\n objective function at the `position`. If the search converged, then\n this is the (local) minimum of the objective function.\n objective_gradient: A tensor of shape `[..., n]` containing the gradient\n of the objective function at the `position`. If the search converged\n the max-norm of this tensor should be below the tolerance.\n inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the\n inverse of the estimated Hessian.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_9, 'Func',\n [arg_1, arg_2, arg_5]):\n arg_1 = tf.convert_to_tensor(\n value=arg_1, arg_9='initial_position')\n arg_10 = arg_1.dtype.base_dtype\n arg_2 = tf.convert_to_tensor(\n value=arg_2, arg_10=arg_10, arg_9='grad_tolerance')\n arg_4 = tf.convert_to_tensor(\n value=arg_4, arg_10=arg_10, arg_9='f_relative_tolerance')\n arg_3 = tf.convert_to_tensor(\n value=arg_3, arg_10=arg_10, arg_9='x_tolerance')\n arg_6 = tf.convert_to_tensor(\n value=arg_6, arg_9='max_iterations')\n\n arg_11 = distribution_util.prefer_static_shape(arg_1)\n arg_12, arg_13 = arg_11[:-1], arg_11[-1]\n\n if arg_8 is None:\n arg_8 = bfgs_utils.converged_all\n\n # Control inputs are an optional list of tensors to evaluate before\n # the start of the search procedure. These can be used to assert the\n # validity of inputs to the search procedure.\n arg_14 = None\n\n if arg_5 is None:\n # Create a default initial inverse Hessian.\n arg_15 = tf.eye(arg_13,\n arg_12=arg_12,\n arg_10=arg_10,\n arg_9='initial_inv_hessian')\n else:\n # If an initial inverse Hessian is supplied, compute some control inputs\n # to ensure that it is positive definite and symmetric.\n arg_15 = tf.convert_to_tensor(\n value=arg_5,\n arg_10=arg_10,\n arg_9='initial_inv_hessian')\n arg_14 = _inv_hessian_control_inputs(arg_15)\n arg_16 = tf.concat([arg_12, [arg_13, arg_13]], 0)\n arg_15 = tf.broadcast_to(arg_15, arg_16)\n\n # The `state` here is a `BfgsOptimizerResults` tuple with values for the\n # current state of the algorithm computation.\n def _cond(arg_17):\n \"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"\n return ((arg_17.num_iterations < arg_6) &\n tf.logical_not(arg_8(arg_17.converged, arg_17.failed)))\n\n def _body(arg_17):\n \"\"\"Main optimization loop.\"\"\"\n arg_18 = _get_search_direction(arg_17.inverse_hessian_estimate,\n arg_17.objective_gradient)\n arg_19 = tf.reduce_sum(\n input_tensor=arg_17.objective_gradient * arg_18, axis=-1)\n\n # If the derivative at the start point is not negative, recompute the\n # search direction with the initial inverse Hessian.\n arg_20 = (~arg_17.failed & ~arg_17.converged &\n (arg_19 >= 0))\n\n arg_21 = _get_search_direction(\n arg_15, arg_17.objective_gradient)\n\n arg_22 = tf.where(\n arg_20, arg_21, arg_18)\n arg_23 = tf.where(\n arg_20, arg_15, arg_17.inverse_hessian_estimate)\n\n # Replace the hessian estimate in the state, in case it had to be reset.\n arg_24 = bfgs_utils.update_fields(\n arg_17, inverse_hessian_estimate=arg_23)\n\n arg_25 = bfgs_utils.line_search_step(\n arg_24,\n arg_0, arg_22,\n arg_2, arg_4, arg_3, arg_8)\n\n # Update the inverse Hessian if needed and continue.\n return [_update_inv_hessian(arg_24, arg_25)]\n\n arg_26 = bfgs_utils.get_initial_state_args(\n arg_0,\n arg_1,\n arg_2,\n arg_14)\n arg_26['inverse_hessian_estimate'] = arg_15\n arg_27 = BfgsOptimizerResults(**arg_26)\n return tf.while_loop(\n cond=_cond,\n body=_body,\n loop_vars=[arg_27],\n arg_7=arg_7)[0]"} +{"_id": "doc_830", "title": "", "text": "def Func(arg_0):\n \"\"\"Computes control inputs to validate a provided inverse Hessian.\n\n These ensure that the provided inverse Hessian is positive definite and\n symmetric.\n\n Args:\n inv_hessian: The starting estimate for the inverse of the Hessian at the\n initial point.\n\n Returns:\n A list of tf.Assert ops suitable for use with tf.control_dependencies.\n \"\"\"\n # The easiest way to validate if the inverse Hessian is positive definite is\n # to compute its Cholesky decomposition.\n arg_1 = tf.reduce_all(\n input_tensor=tf.math.is_finite(tf.linalg.cholesky(arg_0)),\n axis=[-1, -2])\n\n # Then check that the supplied inverse Hessian is symmetric.\n arg_2 = tf.equal(bfgs_utils.norm(\n arg_0 - _batch_transpose(arg_0), dims=2), 0)\n\n # Simply adding a control dependencies on these results is not enough to\n # trigger them, we need to add asserts on the results.\n return [tf.Assert(arg_1,\n ['Initial inverse Hessian is not positive definite.',\n arg_0]),\n tf.Assert(arg_2,\n ['Initial inverse Hessian is not symmetric',\n arg_0])]"} +{"_id": "doc_831", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update the BGFS state by computing the next inverse hessian estimate.\"\"\"\n # Only update the inverse Hessian if not already failed or converged.\n arg_2 = ~arg_1.converged & ~arg_1.failed\n\n # Compute the normalization term (y^T . s), should not update if is singular.\n arg_3 = arg_1.objective_gradient - arg_0.objective_gradient\n arg_4 = arg_1.position - arg_0.position\n arg_5 = tf.reduce_sum(\n input_tensor=arg_3 * arg_4, axis=-1)\n arg_2 = arg_2 & ~tf.equal(arg_5, 0)\n\n def _doFunc():\n arg_6 = _bfgs_inv_hessian_update(\n arg_3, arg_4, arg_5,\n arg_0.inverse_hessian_estimate)\n return bfgs_utils.update_fields(\n arg_1,\n inverse_hessian_estimate=tf.where(arg_2,\n arg_6,\n arg_0.inverse_hessian_estimate))\n\n return prefer_static.cond(\n tf.reduce_any(input_tensor=arg_2),\n _doFunc,\n lambda: arg_1)"} +{"_id": "doc_832", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"Applies the BFGS update to the inverse Hessian estimate.\n\n The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A).\n\n ```None\n rho = 1/(grad_delta^T * position_delta)\n U = (I - rho * position_delta * grad_delta^T)\n H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T\n ```\n\n Here, `H_0` is the inverse Hessian estimate at the previous iteration and\n `H_1` is the next estimate. Note that `*` should be interpreted as the\n matrix multiplication (with the understanding that matrix multiplication for\n scalars is usual multiplication and for matrix with vector is the action of\n the matrix on the vector.).\n\n The implementation below utilizes an expanded version of the above formula\n to avoid the matrix multiplications that would be needed otherwise. By\n expansion it is easy to see that one only needs matrix-vector or\n vector-vector operations. The expanded version is:\n\n ```None\n f = 1 + rho * (grad_delta^T * H_0 * grad_delta)\n H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T +\n (H_0 * grad_delta) * position_delta^T] +\n rho * f * [position_delta * position_delta^T]\n ```\n\n All the terms in square brackets are matrices and are constructed using\n vector outer products. All the other terms on the right hand side are scalars.\n Also worth noting that the first and second lines are both rank 1 updates\n applied to the current inverse Hessian estimate.\n\n Args:\n grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the\n gradient at the new position and the old position.\n position_delta: Real `Tensor` of shape `[..., n]`. The change in position\n from the previous iteration to the current one.\n normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to\n `grad_delta^T * position_delta`, i.e. `1/rho` as defined above.\n inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous\n estimate of the inverse Hessian. Should be positive definite and\n symmetric.\n\n Returns:\n A tuple containing the following fields\n is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members\n where the update succeeded. The update can fail if the position change\n becomes orthogonal to the gradient change.\n next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next\n Hessian estimate updated using the BFGS update scheme. If the\n `inv_hessian_estimate` is symmetric and positive definite, the\n `next_inv_hessian_estimate` is guaranteed to satisfy the same\n conditions.\n \"\"\"\n # The quadratic form: y^T.H.y; where H is the inverse Hessian and y is the\n # gradient change.\n arg_4 = _mul_right(arg_3, arg_0)\n arg_5 = tf.reduce_sum(\n input_tensor=arg_4 * arg_0, axis=-1)\n\n # The first rank 1 update term requires the outer product: s.y^T.\n arg_6 = _tensor_product(arg_1, arg_4)\n\n def _expand_scalar(arg_7):\n # Expand dimensions of a batch of scalars to multiply or divide a matrix.\n return arg_7[..., tf.newaxis, tf.newaxis]\n\n # Symmetrize\n arg_6 += _tensor_product(arg_4, arg_1)\n arg_8 = _tensor_product(arg_1, arg_1)\n with tf.control_dependencies([arg_8]):\n arg_8 *= _expand_scalar(\n 1 + arg_5 / arg_2)\n\n return (arg_3 +\n (arg_8 - arg_6) / _expand_scalar(arg_2))"} +{"_id": "doc_833", "title": "", "text": "def Func(arg_0):\n \"\"\"Transpose a possibly batched matrix.\n\n Args:\n mat: A `tf.Tensor` of shape `[..., n, m]`.\n\n Returns:\n A tensor of shape `[..., m, n]` with matching batch dimensions.\n \"\"\"\n arg_1 = distribution_util.prefer_static_rank(arg_0)\n arg_2 = tf.range(arg_1)\n arg_2 = tf.concat([arg_2[:-2], [arg_2[-1], arg_2[-2]]], axis=0)\n return tf.transpose(a=arg_0, arg_2=arg_2)"} +{"_id": "doc_834", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Maybe add `ndims` ones to `x.shape` on the right.\n\n If `ndims` is zero, this is a no-op; otherwise, we will create and return a\n new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the\n right side. If the shape of `x` is known statically, the shape of the return\n value will be as well.\n\n Args:\n x: The `Tensor` we'll return a reshaping of.\n ndims: Python `integer` number of ones to pad onto `x.shape`.\n Returns:\n If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`\n with `ndims` ones concatenated on the right side. If possible, returns a\n `Tensor` whose shape is known statically.\n Raises:\n ValueError: if `ndims` is not a Python `integer` greater than or equal to\n zero.\n \"\"\"\n if not (isinstance(arg_1, int) and arg_1 >= 0):\n raise ValueError(\n '`ndims` must be a Python `integer` greater than zero. Got: {}'\n .format(arg_1))\n if arg_1 == 0:\n return arg_0\n arg_0 = tf.convert_to_tensor(value=arg_0)\n arg_2 = arg_0.shape\n arg_3 = distribution_util.pad(\n tf.shape(input=arg_0), axis=0, back=True, value=1, count=arg_1)\n arg_0 = tf.reshape(arg_0, arg_3)\n arg_0.set_shape(arg_2.concatenate([1]*arg_1))\n return arg_0"} +{"_id": "doc_835", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return `Tensor` with right-most ndims summed.\n\n Args:\n x: the `Tensor` whose right-most `ndims` dimensions to sum\n ndims: number of right-most dimensions to sum.\n\n Returns:\n A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most\n dimensions. If the shape of `x` is statically known, the result will also\n have statically known shape. Otherwise, the resulting shape will only be\n known at runtime.\n \"\"\"\n arg_0 = tf.convert_to_tensor(value=arg_0)\n if arg_0.shape.ndims is not None:\n arg_2 = tf.range(arg_0.shape.ndims - arg_1, arg_0.shape.ndims)\n else:\n arg_2 = tf.range(tf.rank(arg_0) - arg_1, tf.rank(arg_0))\n return tf.reduce_sum(input_tensor=arg_0, axis=arg_2)"} +{"_id": "doc_836", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"A sqrt function whose gradient at zero is very large but finite.\n\n Args:\n x: a `Tensor` whose sqrt is to be computed.\n name: a Python `str` prefixed to all ops created by this function.\n Default `None` (i.e., \"Func\").\n\n Returns:\n sqrt: the square root of `x`, with an overridden gradient at zero\n grad: a gradient function, which is the same as sqrt's gradient everywhere\n except at zero, where it is given a large finite value, instead of `inf`.\n\n Raises:\n TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.\n\n Often in kernel functions, we need to compute the L2 norm of the difference\n between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the\n case where `x` and `y` are identical, e.g., on the diagonal of a kernel\n matrix, we get `NaN`s when we take gradients with respect to the inputs. To\n see, this consider the forward pass:\n\n ```\n [x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->\n (x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))\n ```\n\n When we backprop through this forward pass, the `sqrt` yields an `inf` because\n `grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at\n the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get\n `0 * inf`, which is `NaN`.\n\n We'd like to avoid these `NaN`s, since they infect the rest of the connected\n computation graph. Practically, when two inputs to a kernel function are\n equal, we are in one of two scenarios:\n 1. We are actually computing k(x, x), in which case norm(x - x) is\n identically zero, independent of x. In this case, we'd like the\n gradient to reflect this independence: it should be zero.\n 2. We are computing k(x, y), and x just *happens* to have the same value\n as y. The gradient at such inputs is in fact ill-defined (there is a\n cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,\n however, an infinite number of sub-gradients, all of which are valid at\n all such inputs. By symmetry, there is exactly one which is \"special\":\n zero, and we elect to use that value here. In practice, having two\n identical inputs to a kernel matrix is probably a pathological\n situation to be avoided, but that is better resolved at a higher level\n than this.\n\n To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine\n the gradient at zero. We assign it to be a very large value, specifically\n the sqrt of the max value of the floating point dtype of the input. We use\n the sqrt (as opposed to just using the max floating point value) to avoid\n potential overflow when combining this value with others downstream.\n \"\"\"\n with tf.compat.v1.name_scope(arg_1, 'Func', [arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1='x')\n if not arg_0.dtype.is_floating:\n raise TypeError('Input `x` must be floating type.')\n def grad(arg_2):\n arg_3 = np.sqrt(np.finfo(arg_0.dtype.as_numpy_dtype()).max)\n arg_4 = tf.where(\n tf.equal(arg_0, 0), tf.fill(tf.shape(input=arg_0), arg_3),\n 0.5 * tf.math.rsqrt(arg_0))\n return arg_2 * arg_4\n return tf.sqrt(arg_0), grad"} +{"_id": "doc_837", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=10,\n arg_3=1e-8,\n arg_4=0,\n arg_5=0,\n arg_6=None,\n arg_7=50,\n arg_8=1,\n arg_9=None,\n arg_10=None):\n \"\"\"Applies the L-BFGS algorithm to Func a differentiable function.\n\n Performs unconstrained minimization of a differentiable function using the\n L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm.\n\n ### Usage:\n\n The following example demonstrates the L-BFGS optimizer attempting to find the\n minimum for a simple high-dimensional quadratic objective function.\n\n ```python\n # A high-dimensional quadratic bowl.\n ndims = 60\n minimum = np.ones([ndims], dtype='float64')\n scales = np.arange(ndims, dtype='float64') + 1.0\n\n # The objective function and the gradient.\n def quadratic(x):\n value = tf.reduce_sum(scales * (x - minimum) ** 2)\n return value, tf.gradients(value, x)[0]\n\n start = np.arange(ndims, 0, -1, dtype='float64')\n optim_results = tfp.optimizer.lbfgs_Func(\n quadratic, initial_position=start, num_correction_pairs=10,\n tolerance=1e-8)\n\n with tf.Session() as session:\n results = session.run(optim_results)\n # Check that the search converged\n assert(results.converged)\n # Check that the argmin is close to the actual value.\n np.testing.assert_allclose(results.position, minimum)\n ```\n\n ### References:\n\n [1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series\n in Operations Research. pp 176-180. 2006\n\n http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf\n\n Args:\n value_and_gradients_function: A Python callable that accepts a point as a\n real `Tensor` and returns a tuple of `Tensor`s of real dtype containing\n the value of the function and its gradient at that point. The function\n to be Funcd. The input is of shape `[..., n]`, where `n` is the size\n of the domain of input points, and all others are batching dimensions.\n The first component of the return value is a real `Tensor` of matching\n shape `[...]`. The second component (the gradient) is also of shape\n `[..., n]` like the input value to the function.\n initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or\n points when using batching dimensions, of the search procedure. At these\n points the function value and the gradient norm should be finite.\n num_correction_pairs: Positive integer. Specifies the maximum number of\n (position_delta, gradient_delta) correction pairs to keep as implicit\n approximation of the Hessian matrix.\n tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance\n for the procedure. If the supremum norm of the gradient vector is below\n this number, the algorithm is stopped.\n x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the\n position between one iteration and the next is smaller than this number,\n the algorithm is stopped.\n f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change\n in the objective value between one iteration and the next is smaller\n than this value, the algorithm is stopped.\n initial_inverse_hessian_estimate: None. Option currently not supported.\n max_iterations: Scalar positive int32 `Tensor`. The maximum number of\n iterations for L-BFGS updates.\n parallel_iterations: Positive integer. The number of iterations allowed to\n run in parallel.\n stopping_condition: (Optional) A Python function that takes as input two\n Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.\n The input tensors are `converged` and `failed`, indicating the current\n status of each respective batch member; the return value states whether\n the algorithm should stop. The default is tfp.optimizer.converged_all\n which only stops when all batch members have either converged or failed.\n An alternative is tfp.optimizer.converged_any which stops as soon as one\n batch member has converged, or when all have failed.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'Func' is used.\n\n Returns:\n optimizer_results: A namedtuple containing the following items:\n converged: Scalar boolean tensor indicating whether the minimum was\n found within tolerance.\n failed: Scalar boolean tensor indicating whether a line search\n step failed to find a suitable step size satisfying Wolfe\n conditions. In the absence of any constraints on the\n number of objective evaluations permitted, this value will\n be the complement of `converged`. However, if there is\n a constraint and the search stopped due to available\n evaluations being exhausted, both `failed` and `converged`\n will be simultaneously False.\n num_objective_evaluations: The total number of objective\n evaluations performed.\n position: A tensor containing the last argument value found\n during the search. If the search converged, then\n this value is the argmin of the objective function.\n objective_value: A tensor containing the value of the objective\n function at the `position`. If the search converged, then this is\n the (local) minimum of the objective function.\n objective_gradient: A tensor containing the gradient of the objective\n function at the `position`. If the search converged the\n max-norm of this tensor should be below the tolerance.\n position_deltas: A tensor encoding information about the latest\n changes in `position` during the algorithm execution.\n gradient_deltas: A tensor encoding information about the latest\n changes in `objective_gradient` during the algorithm execution.\n \"\"\"\n if arg_6 is not None:\n raise NotImplementedError(\n 'Support of initial_inverse_hessian_estimate arg not yet implemented')\n\n if arg_9 is None:\n arg_9 = bfgs_utils.converged_all\n\n with tf.compat.v1.name_scope(arg_10, 'Func', [arg_1, arg_3]):\n arg_1 = tf.convert_to_tensor(\n value=arg_1, arg_10='initial_position')\n arg_11 = arg_1.dtype.base_dtype\n arg_3 = tf.convert_to_tensor(\n value=arg_3, arg_11=arg_11, arg_10='grad_tolerance')\n arg_5 = tf.convert_to_tensor(\n value=arg_5, arg_11=arg_11, arg_10='f_relative_tolerance')\n arg_4 = tf.convert_to_tensor(\n value=arg_4, arg_11=arg_11, arg_10='x_tolerance')\n arg_7 = tf.convert_to_tensor(\n value=arg_7, arg_10='max_iterations')\n\n # The `state` here is a `LBfgsOptimizerResults` tuple with values for the\n # current state of the algorithm computation.\n def _cond(arg_12):\n \"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"\n return ((arg_12.num_iterations < arg_7) &\n tf.logical_not(arg_9(arg_12.converged, arg_12.failed)))\n\n def _body(arg_13):\n \"\"\"Main optimization loop.\"\"\"\n arg_14 = _get_search_direction(arg_13)\n\n # TODO(b/120134934): Check if the derivative at the start point is not\n # negative, if so then reset position/gradient deltas and recompute\n # search direction.\n\n arg_15 = bfgs_utils.line_search_step(\n arg_13,\n arg_0, arg_14,\n arg_3, arg_5, arg_4, arg_9)\n\n # If not failed or converged, update the Hessian estimate.\n arg_16 = ~(arg_15.converged | arg_15.failed)\n arg_17 = bfgs_utils.update_fields(\n arg_15,\n position_deltas=_queue_push(\n arg_13.position_deltas, arg_16,\n arg_15.position - arg_13.position),\n gradient_deltas=_queue_push(\n arg_13.gradient_deltas, arg_16,\n arg_15.objective_gradient - arg_13.objective_gradient))\n return [arg_17]\n\n arg_18 = _get_initial_state(arg_0,\n arg_1,\n arg_2,\n arg_3)\n return tf.while_loop(\n cond=_cond,\n body=_body,\n loop_vars=[arg_18],\n arg_8=arg_8)[0]"} +{"_id": "doc_838", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Create LBfgsOptimizerResults with initial state of search procedure.\"\"\"\n arg_4 = bfgs_utils.get_initial_state_args(\n arg_0,\n arg_1,\n arg_3)\n arg_5 = _make_empty_queue_for(arg_2, arg_1)\n arg_4.update(position_deltas=arg_5, gradient_deltas=arg_5)\n return LBfgsOptimizerResults(**arg_4)"} +{"_id": "doc_839", "title": "", "text": "def Func(arg_0):\n \"\"\"Computes the search direction to follow at the current state.\n\n On the `k`-th iteration of the main L-BFGS algorithm, the state has collected\n the most recent `m` correction pairs in position_deltas and gradient_deltas,\n where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.\n\n Assuming these, the code below is an implementation of the L-BFGS two-loop\n recursion algorithm given by [Nocedal and Wright(2006)][1]:\n\n ```None\n q_direction = objective_gradient\n for i in reversed(range(m)): # First loop.\n inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]\n alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]\n q_direction = q_direction - alpha[i] * gradient_deltas[i]\n\n kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /\n gradient_deltas[-1]^T * gradient_deltas[-1])\n r_direction = kth_inv_hessian_factor * I * q_direction\n\n for i in range(m): # Second loop.\n beta = gradient_deltas[i]^T * r_direction / inv_rho[i]\n r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)\n\n return -r_direction # Approximates - H_k * objective_gradient.\n ```\n\n Args:\n state: A `LBfgsOptimizerResults` tuple with the current state of the\n search procedure.\n\n Returns:\n A real `Tensor` of the same shape as the `state.position`. The direction\n along which to perform line search.\n \"\"\"\n # The number of correction pairs that have been collected so far.\n arg_1 = tf.minimum(\n arg_0.num_iterations,\n distribution_util.prefer_static_shape(arg_0.position_deltas)[0])\n\n def _two_loop_algorithm():\n \"\"\"L-BFGS two-loop algorithm.\"\"\"\n # Correction pairs are always appended to the end, so only the latest\n # `num_elements` vectors have valid position/gradient deltas.\n arg_2 = arg_0.position_deltas[-arg_1:]\n arg_3 = arg_0.gradient_deltas[-arg_1:]\n\n # Pre-compute all `inv_rho[i]`s.\n arg_4 = tf.reduce_sum(\n input_tensor=arg_3 * arg_2, axis=-1)\n\n def first_loop(arg_5, arg_6):\n arg_7, arg_8 = arg_5\n arg_9, arg_10, arg_11 = arg_6\n arg_12 = tf.reduce_sum(\n input_tensor=arg_9 * arg_8, axis=-1) / arg_11\n arg_13 = tf.expand_dims(arg_12, axis=-1) * arg_10\n return (arg_12, arg_8 - arg_13)\n\n # Run first loop body computing and collecting `alpha[i]`s, while also\n # computing the updated `q_direction` at each step.\n arg_14 = tf.zeros_like(arg_4[0])\n arg_15, arg_16 = tf.scan(\n first_loop, [arg_2, arg_3, arg_4],\n initializer=(arg_14, arg_0.objective_gradient), reverse=True)\n\n # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse\n # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.\n arg_17 = arg_4[-1] / tf.reduce_sum(\n input_tensor=arg_3[-1] * arg_3[-1], axis=-1)\n arg_18 = tf.expand_dims(arg_17, axis=-1) * arg_16[0]\n\n def second_loop(arg_18, arg_6):\n arg_12, arg_9, arg_10, arg_11 = arg_6\n arg_19 = tf.reduce_sum(\n input_tensor=arg_10 * arg_18, axis=-1) / arg_11\n arg_13 = tf.expand_dims(arg_12 - arg_19, axis=-1) * arg_9\n return arg_18 + arg_13\n\n # Finally, run second loop body computing the updated `r_direction` at each\n # step.\n arg_20 = tf.scan(\n second_loop, [arg_15, arg_2, arg_3, arg_4],\n initializer=arg_18)\n return -arg_20[-1]\n\n return prefer_static.cond(tf.equal(arg_1, 0),\n (lambda: -arg_0.objective_gradient),\n _two_loop_algorithm)"} +{"_id": "doc_840", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors.\n\n For example:\n\n ```python\n element = tf.constant([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n\n # A queue capable of holding 3 elements.\n Func(3, element)\n # => [[[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]],\n #\n # [[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]],\n #\n # [[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]]]\n ```\n\n Args:\n k: A positive scalar integer, number of elements that each queue will hold.\n element: A `tf.Tensor`, only its shape and dtype information are relevant.\n\n Returns:\n A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype\n as `element`.\n \"\"\"\n arg_2 = tf.concat(\n [[arg_0], distribution_util.prefer_static_shape(arg_1)], axis=0)\n return tf.zeros(arg_2, dtype=arg_1.dtype.base_dtype)"} +{"_id": "doc_841", "title": "", "text": "def Func(arg_0):\n \"\"\"Computes whether each square matrix in the input is positive semi-definite.\n\n Args:\n x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.\n\n Returns:\n mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each\n scalar is 1 if the corresponding matrix was PSD, otherwise 0.\n \"\"\"\n # Allegedly\n # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite\n # it is more efficient to test for positive semi-definiteness by\n # trying to compute the Cholesky decomposition -- the matrix is PSD\n # if you succeed and not PSD if you fail. However, TensorFlow's\n # Cholesky raises an exception if _any_ of the input matrices are\n # not PSD, from which I don't know how to extract _which ones_, so I\n # proceed by explicitly computing all the eigenvalues and checking\n # whether they are all positive or not.\n #\n # Also, as was discussed in the answer, it is somewhat dangerous to\n # treat SPD-ness as binary in floating-point arithmetic. Cholesky\n # factorization can complete and 'look' like everything is fine\n # (e.g., O(1) entries and a diagonal of all ones) but the matrix can\n # have an exponential condition number.\n arg_1, arg_2 = tf.linalg.eigh(arg_0)\n return tf.cast(\n tf.reduce_min(input_tensor=arg_1, axis=-1) >= 0, dtype=arg_0.dtype)"} +{"_id": "doc_842", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Returns rejection samples from trying to get good correlation matrices.\n\n The proposal being rejected from is the uniform distribution on\n \"correlation-like\" matrices. We say a matrix is \"correlation-like\"\n if it is a symmetric square matrix with all entries between -1 and 1\n (inclusive) and 1s on the main diagonal. Of these, the ones that\n are positive semi-definite are exactly the correlation matrices.\n\n The rejection algorithm, then, is to sample a `Tensor` of\n `sample_shape` correlation-like matrices of dimensions `dim` by\n `dim`, and check each one for (i) being a correlation matrix (i.e.,\n PSD), and (ii) having determinant at least the corresponding entry\n of `det_bounds`.\n\n Args:\n det_bounds: A `Tensor` of lower bounds on the determinants of\n acceptable matrices. The shape must broadcast with `sample_shape`.\n dim: A Python `int` dimension of correlation matrices to sample.\n sample_shape: Python `tuple` of `int` shape of the samples to\n compute, excluding the two matrix dimensions.\n dtype: The `dtype` in which to do the computation.\n seed: Random seed.\n\n Returns:\n weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the\n corresponding matrix was not a correlation matrix, or had too\n small of a determinant. Otherwise, the entry is the\n multiplicative inverse of the density of proposing that matrix\n uniformly, i.e., the volume of the set of `dim` by `dim`\n correlation-like matrices.\n volume: The volume of the set of `dim` by `dim` correlation-like\n matrices.\n \"\"\"\n with tf.compat.v1.name_scope(\"rejection_sampler\"):\n arg_5 = _uniform_correlation_like_matrix(\n arg_1, arg_2, arg_3, arg_4=arg_4)\n arg_6 = 2. ** (arg_1 * (arg_1 - 1) / 2.)\n # The density of proposing any given point is 1 / rej_proposal_volume;\n # The weight of that point should be scaled by\n # 1 / density = rej_proposal_volume.\n arg_7 = arg_6 * _psd_mask(\n arg_5) * _det_large_enough_mask(arg_5, arg_0)\n return arg_7, arg_6"} +{"_id": "doc_843", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes a confidence interval for the mean of the given 1-D distribution.\n\n Assumes (and checks) that the given distribution is Bernoulli, i.e.,\n takes only two values. This licenses using the CDF of the binomial\n distribution for the confidence, which is tighter (for extreme\n probabilities) than the DKWM inequality. The method is known as the\n [Clopper-Pearson method]\n (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval).\n\n Assumes:\n\n - The given samples were drawn iid from the distribution of interest.\n\n - The given distribution is a Bernoulli, i.e., supported only on\n low and high.\n\n Guarantees:\n\n - The probability (over the randomness of drawing the given sample)\n that the true mean is outside the returned interval is no more\n than the given error_rate.\n\n Args:\n samples: `np.ndarray` of samples drawn iid from the distribution\n of interest.\n error_rate: Python `float` admissible rate of mistakes.\n\n Returns:\n low: Lower bound of confidence interval.\n high: Upper bound of confidence interval.\n\n Raises:\n ValueError: If `samples` has rank other than 1 (batch semantics\n are not implemented), or if `samples` contains values other than\n `low` or `high` (as that makes the distribution not Bernoulli).\n \"\"\"\n # TODO(b/78025336) Migrate this confidence interval function\n # to statistical_testing.py. In order to do that\n # - Get the binomial CDF from the Binomial distribution\n # - Implement scalar root finding in TF. Batch bisection search\n # shouldn't be too hard, and is definitely good enough for this\n # problem. Batching the Brent algorithm (from scipy) that is used\n # here may be more involved, but may also not be necessary---it's\n # only used here because scipy made it convenient. In particular,\n # robustness is more important than speed here, which may make\n # bisection search actively better.\n # - The rest is just a matter of rewriting in the appropriate style.\n if optimize is None or stats is None:\n raise ValueError(\n \"Scipy is required for computing Clopper-Pearson confidence intervals\")\n if len(arg_0.shape) != 1:\n raise ValueError(\"Batch semantics not implemented\")\n arg_2 = len(arg_0)\n arg_3 = np.amin(arg_0)\n arg_4 = np.amax(arg_0)\n arg_5 = np.count_nonzero(arg_0 - arg_3)\n arg_6 = np.count_nonzero(arg_0 - arg_4)\n if arg_5 + arg_6 != arg_2:\n arg_7 = np.unique(arg_0)\n arg_8 = (\"Purportedly Bernoulli distribution had distinct samples\"\n \" {}, {}, and {}\".format(arg_7[0], arg_7[1], arg_7[2]))\n raise ValueError(arg_8)\n def p_small_enough(arg_9):\n arg_10 = stats.binom.logcdf(arg_5, arg_2, arg_9)\n return arg_10 - np.log(arg_1 / 2.)\n def p_big_enough(arg_9):\n arg_10 = stats.binom.logsf(arg_5, arg_2, arg_9)\n return arg_10 - np.log(arg_1 / 2.)\n arg_11 = optimize.brentq(\n p_small_enough, float(arg_5) / arg_2, 1., rtol=1e-9)\n arg_12 = optimize.brentq(\n p_big_enough, 0., float(arg_5) / arg_2, rtol=1e-9)\n arg_13 = arg_3 + (arg_4 - arg_3) * arg_12\n arg_14 = arg_3 + (arg_4 - arg_3) * arg_11\n return (arg_13, arg_14)"} +{"_id": "doc_844", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Computes the von Mises CDF and its derivative via series expansion.\"\"\"\n # Keep the number of terms as a float. It should be a small integer, so\n # exactly representable as a float.\n arg_2 = tf.cast(arg_2, arg_3=arg_3)\n\n def loop_body(arg_4, arg_5, arg_6, arg_7, arg_8):\n \"\"\"One iteration of the series loop.\"\"\"\n\n arg_9 = 2. * arg_4 / arg_1 + arg_5\n arg_10 = -2. * arg_4 / arg_1 ** 2 + arg_6\n arg_5 = 1. / arg_9\n arg_6 = -arg_10 / arg_9 ** 2\n\n arg_11 = tf.sin(arg_4 * arg_0) / arg_4 + arg_7\n arg_7 = arg_5 * arg_11\n arg_8 = (arg_6 * arg_11 +\n arg_5 * arg_8)\n arg_4 -= 1.\n\n return arg_4, arg_5, arg_6, arg_7, arg_8\n\n (arg_12, arg_12, arg_12, arg_7, arg_8) = tf.while_loop(\n cond=lambda arg_4, *arg_12: arg_4 > 0.,\n body=loop_body,\n loop_vars=(\n arg_2, # n\n tf.zeros_like(arg_0, name=\"rn\"),\n tf.zeros_like(arg_0, name=\"drn_dconcentration\"),\n tf.zeros_like(arg_0, name=\"vn\"),\n tf.zeros_like(arg_0, name=\"dvn_dconcentration\"),\n ),\n )\n\n arg_13 = .5 + arg_0 / (2. * np.pi) + arg_7 / np.pi\n arg_14 = arg_8 / np.pi\n\n # Clip the result to [0, 1].\n arg_15 = tf.clip_by_value(arg_13, 0., 1.)\n # The clipped values do not depend on concentration anymore, so set their\n # derivative to zero.\n arg_14 *= tf.cast((arg_13 >= 0.) & (arg_13 <= 1.), arg_3)\n\n return arg_15, arg_14"} +{"_id": "doc_845", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Computes the von Mises CDF and its derivative via Normal approximation.\"\"\"\n\n def cdf_func(arg_1):\n \"\"\"A helper function that is passed to value_and_gradient.\"\"\"\n # z is an \"almost Normally distributed\" random variable.\n arg_3 = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(arg_1)) *\n tf.sin(.5 * arg_0))\n\n # This is the correction described in [1] which reduces the error\n # of the Normal approximation.\n arg_4 = arg_3 ** 2\n arg_5 = arg_4 * arg_3\n arg_6 = arg_4 ** 2\n arg_7 = 24. * arg_1\n arg_8 = 56.\n\n arg_9 = arg_3 - arg_5 / ((arg_7 - 2. * arg_4 - 16.) / 3. -\n (arg_6 + (7. / 4.) * arg_4 + 167. / 2.) / (arg_7 - arg_8 - arg_4 + 3.)) ** 2\n\n arg_10 = normal.Normal(tf.cast(0., arg_2), tf.cast(1., arg_2))\n\n return arg_10.cdf(arg_9)\n\n return value_and_gradient(cdf_func, arg_1)"} +{"_id": "doc_846", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=0.5,\n arg_4=0.9,\n arg_5=None,\n arg_6=None):\n \"\"\"Performs one step of the differential evolution algorithm.\n\n Args:\n objective_function: A Python callable that accepts a batch of possible\n solutions and returns the values of the objective function at those\n arguments as a rank 1 real `Tensor`. This specifies the function to be\n minimized. The input to this callable may be either a single `Tensor`\n or a Python `list` of `Tensor`s. The signature must match the format of\n the argument `population`. (i.e. objective_function(*population) must\n return the value of the function to be minimized).\n population: `Tensor` or Python `list` of `Tensor`s representing the\n current population vectors. Each `Tensor` must be of the same real dtype.\n The first dimension indexes individual population members while the\n rest of the dimensions are consumed by the value function. For example,\n if the population is a single `Tensor` of shape [n, m1, m2], then `n` is\n the population size and the output of `objective_function` applied to the\n population is a `Tensor` of shape [n]. If the population is a python\n list of `Tensor`s then each `Tensor` in the list should have the first\n axis of a common size, say `n` and `objective_function(*population)`\n should return a `Tensor of shape [n]. The population must have at least\n 4 members for the algorithm to work correctly.\n population_values: A `Tensor` of rank 1 and real dtype. The result of\n applying `objective_function` to the `population`. If not supplied it is\n computed using the `objective_function`.\n Default value: None.\n differential_weight: Real scalar `Tensor`. Must be positive and less than\n 2.0. The parameter controlling the strength of mutation.\n Default value: 0.5\n crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The\n probability of recombination per site.\n Default value: 0.9\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: None.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'Func' is\n used.\n Default value: None\n\n Returns:\n A sequence containing the following elements (in order):\n next_population: A `Tensor` or Python `list` of `Tensor`s of the same\n structure as the input population. The population at the next generation.\n next_population_values: A `Tensor` of same shape and dtype as input\n `population_values`. The function values for the `next_population`.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_6, 'Func',\n [arg_1, arg_2, arg_3, arg_4]):\n arg_1, arg_7 = _ensure_list(arg_1)\n if arg_2 is None:\n arg_2 = arg_0(*arg_1)\n arg_8 = tf.shape(input=arg_1[0])[0]\n arg_9 = distributions.SeedStream(arg_5, salt='Func')\n arg_10 = _get_mixing_indices(arg_8, arg_5=arg_9())\n # Construct the mutated solution vectors. There is one for each member of\n # the population.\n arg_11 = _get_mutants(arg_1,\n arg_8,\n arg_10,\n arg_3)\n # Perform recombination between the parents and the mutants.\n arg_12 = _binary_crossover(arg_1,\n arg_8,\n arg_11,\n arg_4,\n arg_5=arg_9())\n arg_13 = arg_0(*arg_12)\n if arg_2 is None:\n arg_2 = arg_0(*arg_1)\n\n arg_14 = tf.zeros_like(arg_2) + np.inf\n\n arg_2 = tf.where(\n tf.math.is_nan(arg_2), x=arg_14, y=arg_2)\n\n arg_15 = arg_13 < arg_2\n arg_16 = [\n tf.where(arg_15, x=candidates_part, y=population_part)\n for candidates_part, population_part in zip(arg_12, arg_1)\n ]\n arg_17 = tf.where(arg_15, x=arg_13, y=arg_2)\n\n return arg_16, arg_17"} +{"_id": "doc_847", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n arg_9,\n arg_10):\n \"\"\"Processes initial args.\"\"\"\n arg_11 = False\n if arg_2 is not None:\n arg_2, arg_11 = _ensure_list(arg_2)\n\n if arg_1 is not None:\n arg_1, arg_11 = _ensure_list(arg_1)\n\n arg_12 = _get_starting_population(arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_10=arg_10)\n\n arg_8 = tf.convert_to_tensor(\n value=arg_8, dtype=arg_12[0].dtype.base_dtype)\n\n arg_9 = tf.convert_to_tensor(value=arg_9)\n arg_13 = arg_0(*arg_12)\n if arg_5 is not None:\n arg_5 = tf.convert_to_tensor(value=arg_5)\n arg_6 = tf.convert_to_tensor(\n value=arg_6, dtype=arg_13.dtype.base_dtype)\n arg_7 = tf.convert_to_tensor(\n value=arg_7, dtype=arg_12[0].dtype.base_dtype)\n return (arg_11,\n arg_12,\n arg_13,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n arg_9)"} +{"_id": "doc_848", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Finds the population member with the lowest value.\"\"\"\n arg_2 = tf.math.reduce_min(input_tensor=arg_1)\n arg_3 = tf.where(tf.math.equal(arg_1, arg_2))[0, 0]\n\n return ([arg_4[arg_3] for arg_4 in arg_0],\n arg_2)"} +{"_id": "doc_849", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Computes the mutatated vectors for each population member.\n\n Args:\n population: Python `list` of `Tensor`s representing the\n current population vectors. Each `Tensor` must be of the same real dtype.\n The first dimension of each `Tensor` indexes individual\n population members. For example, if the population is a list with a\n single `Tensor` of shape [n, m1, m2], then `n` is the population size and\n the shape of an individual solution is [m1, m2].\n If there is more than one element in the population, then each `Tensor`\n in the list should have the first axis of the same size.\n population_size: Scalar integer `Tensor`. The size of the population.\n mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the\n number of members in the population. Each element of the `Tensor` must be\n a valid index into the first dimension of the population (i.e range\n between `0` and `n-1` inclusive).\n differential_weight: Real scalar `Tensor`. Must be positive and less than\n 2.0. The parameter controlling the strength of mutation.\n\n Returns:\n mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype\n as the input population. The mutated vectors.\n \"\"\"\n arg_2 = tf.reshape(arg_2, [-1])\n arg_4 = tf.stack([1.0, arg_3, -arg_3])\n def _mutant_part(arg_5):\n arg_6 = tf.gather(arg_5, arg_2)\n arg_6 = tf.transpose(\n a=tf.reshape(arg_6, [arg_1, 3, -1]), perm=[0, 2, 1])\n return tf.math.reduce_sum(input_tensor=arg_6 * arg_4, axis=-1)\n\n return [_mutant_part(arg_5) for arg_5 in arg_0]"} +{"_id": "doc_850", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Generates an array of indices suitable for mutation operation.\n\n The mutation operation in differential evolution requires that for every\n element of the population, three distinct other elements be chosen to produce\n a trial candidate. This function generates an array of shape [size, 3]\n satisfying the properties that:\n (a). array[i, :] does not contain the index 'i'.\n (b). array[i, :] does not contain any overlapping indices.\n (c). All elements in the array are between 0 and size - 1 inclusive.\n\n Args:\n size: Scalar integer `Tensor`. The number of samples as well as a the range\n of the indices to sample from.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: 'get_mixing_indices'.\n\n Returns:\n sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing\n samples without replacement between 0 and size - 1 (inclusive) with the\n `i`th row not including the number `i`.\n \"\"\"\n with tf.compat.v1.name_scope(\n arg_2, default_name='get_mixing_indices', values=[arg_0]):\n arg_0 = tf.convert_to_tensor(value=arg_0)\n arg_3 = arg_0.dtype\n arg_4 = distributions.SeedStream(arg_1, salt='get_mixing_indices')\n arg_5 = tf.random.uniform([arg_0],\n maxval=arg_0-1,\n arg_3=arg_3,\n arg_1=arg_4())\n arg_6 = tf.random.uniform([arg_0],\n maxval=arg_0-2,\n arg_3=arg_3,\n arg_1=arg_4())\n arg_7 = tf.random.uniform([arg_0],\n maxval=arg_0-3,\n arg_3=arg_3,\n arg_1=arg_4())\n\n # Shift second if it is on top of or to the right of first\n arg_6 = tf.where(arg_5 < arg_6, x=arg_6, y=arg_6 + 1)\n arg_8 = tf.math.minimum(arg_5, arg_6)\n arg_9 = tf.math.maximum(arg_5, arg_6)\n # Shift the third one so it does not coincide with either the first or the\n # second number. Assuming first < second, shift by 1 if the number is in\n # [first, second) and by 2 if the number is greater than or equal to the\n # second.\n arg_7 = tf.where(arg_7 < arg_8, x=arg_7, y=arg_7 + 1)\n arg_7 = tf.where(arg_7 < arg_9, x=arg_7, y=arg_7 + 1)\n arg_10 = tf.stack([arg_5, arg_6, arg_7], axis=1)\n arg_11 = tf.expand_dims(tf.range(arg_0), axis=-1)\n arg_10 = tf.where(arg_10 < arg_11, x=arg_10, y=arg_10 + 1)\n return arg_10"} +{"_id": "doc_851", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts the input arg to a list if it is not a list already.\n\n Args:\n tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to\n convert to a list of `Tensor`s.\n\n Returns:\n A tuple of two elements. The first is a Python list of `Tensor`s containing\n the original arguments. The second is a boolean indicating whether\n the original argument was a list or tuple already.\n \"\"\"\n if isinstance(arg_0, (list, tuple)):\n return list(arg_0), True\n return [arg_0], False"} +{"_id": "doc_852", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional.\"\"\"\n if arg_0 is None:\n return tf.convert_to_tensor(value=0, arg_1=arg_1)\n\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_1=arg_1)\n if arg_2:\n arg_0 = distribution_util.with_dependencies([\n assert_util.assert_non_negative(\n arg_0, message=\"Argument 'tol' must be non-negative\")\n ], arg_0)\n return arg_0"} +{"_id": "doc_853", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Soft Thresholding operator.\n\n This operator is defined by the equations\n\n ```none\n { x[i] - gamma, x[i] > gamma\n SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma\n { x[i] + gamma, x[i] < -gamma\n ```\n\n In the context of proximal gradient methods, we have\n\n ```none\n SoftThreshold(x, gamma) = prox_{gamma L1}(x)\n ```\n\n where `prox` is the proximity operator. Thus the soft thresholding operator\n is used in proximal gradient descent for optimizing a smooth function with\n (non-smooth) L1 regularization, as outlined below.\n\n The proximity operator is defined as:\n\n ```none\n prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z },\n ```\n\n where `r` is a (weakly) convex function, not necessarily differentiable.\n Because the L2 norm is strictly convex, the above argmin is unique.\n\n One important application of the proximity operator is as follows. Let `L` be\n a convex and differentiable function with Lipschitz-continuous gradient. Let\n `R` be a convex lower semicontinuous function which is possibly\n nondifferentiable. Let `gamma` be an arbitrary positive real. Then\n\n ```none\n x_star = argmin{ L(x) + R(x) : x }\n ```\n\n if and only if the fixed-point equation is satisfied:\n\n ```none\n x_star = prox_{gamma R}(x_star - gamma grad L(x_star))\n ```\n\n Proximal gradient descent thus typically consists of choosing an initial value\n `x^{(0)}` and repeatedly applying the update\n\n ```none\n x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)}))\n ```\n\n where `gamma` is allowed to vary from iteration to iteration. Specializing to\n the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly\n applying the update\n\n ```\n x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma)\n ```\n\n (This idea can also be extended to second-order approximations, although the\n multivariate case does not have a known closed form like above.)\n\n Args:\n x: `float` `Tensor` representing the input to the SoftThreshold function.\n threshold: nonnegative scalar, `float` `Tensor` representing the radius of\n the interval on which each coordinate of SoftThreshold takes the value\n zero. Denoted `gamma` above.\n name: Python string indicating the name of the TensorFlow operation.\n Default value: `'Func'`.\n\n Returns:\n softthreshold: `float` `Tensor` with the same shape and dtype as `x`,\n representing the value of the SoftThreshold function.\n\n #### References\n\n [1]: Yu, Yao-Liang. The Proximity Operator.\n https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf\n\n [2]: Wikipedia Contributors. Proximal gradient methods for learning.\n _Wikipedia, The Free Encyclopedia_, 2018.\n https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning\n\n \"\"\"\n # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator\n with tf.compat.v1.name_scope(arg_2, 'Func', [arg_0, arg_1]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_2='x')\n arg_1 = tf.convert_to_tensor(\n value=arg_1, dtype=arg_0.dtype, arg_2='threshold')\n return tf.sign(arg_0) * tf.maximum(tf.abs(arg_0) - arg_1, 0.)"} +{"_id": "doc_854", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None):\n \"\"\"Clips values to a specified min and max while leaving gradient unaltered.\n\n Like `tf.clip_by_value`, this function returns a tensor of the same type and\n shape as input `t` but with values clamped to be no smaller than to\n `clip_value_min` and no larger than `clip_value_max`. Unlike\n `tf.clip_by_value`, the gradient is unaffected by this op, i.e.,\n\n ```python\n tf.gradients(tfp.math.Func(x), x)[0]\n # ==> ones_like(x)\n ```\n\n Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\n correct results.\n\n Args:\n t: A `Tensor`.\n clip_value_min: A scalar `Tensor`, or a `Tensor` with the same shape\n as `t`. The minimum value to clip by.\n clip_value_max: A scalar `Tensor`, or a `Tensor` with the same shape\n as `t`. The maximum value to clip by.\n name: A name for the operation (optional).\n Default value: `'Func'`.\n\n Returns:\n clipped_t: A clipped `Tensor`.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, 'Func',\n [arg_0, arg_1, arg_2]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_3='t')\n arg_4 = tf.clip_by_value(arg_0, arg_1, arg_2)\n return arg_0 + tf.stop_gradient(arg_4 - arg_0)"} +{"_id": "doc_855", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Build an iterator over training batches.\"\"\"\n\n arg_2 = tf.data.Dataset.from_tensor_slices(arg_0)\n arg_3 = arg_2.shuffle(\n 50000, reshuffle_each_iteration=True).repeat().batch(arg_1)\n arg_4 = tf.compat.v1.data.make_one_shot_iterator(arg_3)\n arg_5 = arg_4.get_next()\n return arg_5"} +{"_id": "doc_856", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Converts a sequence of productions into a string of terminal symbols.\n\n Args:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n\n Returns:\n str that concatenates all terminal symbols from `productions`.\n\n Raises:\n ValueError: If the first production rule does not begin with\n `self.start_symbol`.\n \"\"\"\n arg_2 = []\n for arg_3 in tf.unstack(arg_1, axis=1):\n arg_4, arg_5 = arg_0.production_rules[tf.argmax(input=arg_3, axis=-1)]\n if not arg_2: # first iteration\n if arg_4 != arg_0.start_symbol:\n raise ValueError(\"`productions` must begin with `self.start_symbol`.\")\n arg_2 = arg_5\n else:\n # Greedily unroll the nonterminal symbols based on the first occurrence\n # in a linear sequence.\n arg_6 = arg_2.index(arg_4)\n arg_2 = arg_2[:arg_6] + arg_5 + arg_2[arg_6 + 1:]\n arg_7 = \"\".join(arg_2)\n return arg_7"} +{"_id": "doc_857", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Runs the model forward to generate a sequence of productions.\n\n Args:\n inputs: Unused.\n\n Returns:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n \"\"\"\n del arg_1 # unused\n arg_2 = ed.MultivariateNormalDiag(loc=tf.zeros(arg_0.latent_size),\n sample_shape=1,\n name=\"latent_code\")\n arg_3 = arg_0.lstm.zero_state(1, dtype=tf.float32)\n arg_4 = 0\n arg_5 = []\n arg_6 = [arg_0.grammar.start_symbol]\n while arg_6:\n arg_7 = arg_6.pop()\n arg_8, arg_3 = arg_0.lstm(arg_2, arg_3)\n arg_9 = (arg_0.output_layer(arg_8) +\n arg_0.grammar.mask(arg_7, on_value=0., off_value=-1e9))\n arg_10 = ed.OneHotCategorical(arg_9=arg_9,\n name=\"production_\" + str(arg_4))\n arg_11, arg_12 = arg_0.grammar.production_rules[tf.argmax(\n input=arg_10, axis=-1)]\n for arg_7 in arg_12:\n if arg_7 in arg_0.grammar.nonterminal_symbols:\n arg_6.append(arg_7)\n arg_5.append(arg_10)\n arg_4 += 1\n return tf.stack(arg_5, axis=1)"} +{"_id": "doc_858", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Runs the model forward to return a stochastic encoding.\n\n Args:\n inputs: Tensor of shape [1, num_productions, num_production_rules]. It is\n a sequence of productions of length `num_productions`. Each production\n is a one-hot vector of length `num_production_rules`: it determines\n which production rule the production corresponds to.\n\n Returns:\n latent_code_posterior: A random variable capturing a sample from the\n variational distribution, of shape [1, self.latent_size].\n \"\"\"\n arg_2 = arg_0.encoder_net(tf.cast(arg_1, tf.float32))\n return ed.MultivariateNormalDiag(\n loc=arg_2[..., :arg_0.latent_size],\n scale_diag=tf.nn.softplus(arg_2[..., arg_0.latent_size:]),\n name=\"latent_code_posterior\")"} +{"_id": "doc_859", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Integral of the `hat` function, used for sampling.\n\n We choose a `hat` function, h(x) = x^(-power), which is a continuous\n (unnormalized) density touching each positive integer at the (unnormalized)\n pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt;\n which is needed for sampling purposes.\n\n Arguments:\n x: A Tensor of points x at which to evaluate H(x).\n\n Returns:\n A Tensor containing evaluation H(x) at x.\n \"\"\"\n arg_1 = tf.cast(arg_1, arg_0.power.dtype)\n arg_2 = arg_0.power - 1.\n return tf.exp((-arg_2) * tf.math.log1p(arg_1) - tf.math.log(arg_2))"} +{"_id": "doc_860", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Inverse function of _hat_integral.\"\"\"\n arg_1 = tf.cast(arg_1, arg_0.power.dtype)\n arg_2 = arg_0.power - 1.\n return tf.math.expm1(-(tf.math.log(arg_2) + tf.math.log(arg_1)) / arg_2)"} +{"_id": "doc_861", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"Compute the matrix rank; the number of non-zero SVD singular values.\n\n Arguments:\n a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\n pseudo-inverted.\n tol: Threshold below which the singular value is counted as \"zero\".\n Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).\n validate_args: When `True`, additional assertions might be embedded in the\n graph.\n Default value: `False` (i.e., no graph assertions are added).\n name: Python `str` prefixed to ops created by this function.\n Default value: \"Func\".\n\n Returns:\n Func: (Batch of) `int32` scalars representing the number of non-zero\n singular values.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, 'Func', [arg_0, arg_1]):\n arg_0 = tf.convert_to_tensor(value=arg_0, dtype_hint=tf.float32, arg_3='a')\n arg_4 = _maybe_validate_matrix(arg_0, arg_2)\n if arg_4:\n with tf.control_dependencies(arg_4):\n arg_0 = tf.identity(arg_0)\n arg_5 = tf.linalg.svd(arg_0, compute_uv=False)\n if arg_1 is None:\n if arg_0.shape[-2:].is_fully_defined():\n arg_6 = np.max(arg_0.shape[-2:].as_list())\n else:\n arg_6 = tf.reduce_max(input_tensor=tf.shape(input=arg_0)[-2:])\n arg_7 = np.finfo(arg_0.dtype.as_numpy_dtype).eps\n arg_1 = (arg_7 * tf.cast(arg_6, arg_0.dtype) *\n tf.reduce_max(input_tensor=arg_5, axis=-1, keepdims=True))\n return tf.reduce_sum(input_tensor=tf.cast(arg_5 > arg_1, tf.int32), axis=-1)"} +{"_id": "doc_862", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"Compute the Moore-Penrose pseudo-inverse of a matrix.\n\n Calculate the [generalized inverse of a matrix](\n https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its\n singular-value decomposition (SVD) and including all large singular values.\n\n The pseudo-inverse of a matrix `A`, is defined as: \"the matrix that 'solves'\n [the least-squares problem] `A @ x = b`,\" i.e., if `x_hat` is a solution, then\n `A_Func` is the matrix such that `x_hat = A_Func @ b`. It can be shown that if\n `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then\n `A_Func = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]\n\n This function is analogous to [`numpy.linalg.Func`](\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.Func.html).\n It differs only in default value of `rcond`. In `numpy.linalg.Func`, the\n default `rcond` is `1e-15`. Here the default is\n `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.\n\n Args:\n a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\n pseudo-inverted.\n rcond: `Tensor` of small singular value cutoffs. Singular values smaller\n (in modulus) than `rcond` * largest_singular_value (again, in modulus) are\n set to zero. Must broadcast against `tf.shape(a)[:-2]`.\n Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.\n validate_args: When `True`, additional assertions might be embedded in the\n graph.\n Default value: `False` (i.e., no graph assertions are added).\n name: Python `str` prefixed to ops created by this function.\n Default value: \"Func\".\n\n Returns:\n a_Func: The pseudo-inverse of input `a`. Has same shape as `a` except\n rightmost two dimensions are transposed.\n\n Raises:\n TypeError: if input `a` does not have `float`-like `dtype`.\n ValueError: if input `a` has fewer than 2 dimensions.\n\n #### Examples\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n a = tf.constant([[1., 0.4, 0.5],\n [0.4, 0.2, 0.25],\n [0.5, 0.25, 0.35]])\n tf.matmul(tfp.math.Func(a), a)\n # ==> array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=float32)\n\n a = tf.constant([[1., 0.4, 0.5, 1.],\n [0.4, 0.2, 0.25, 2.],\n [0.5, 0.25, 0.35, 3.]])\n tf.matmul(tfp.math.Func(a), a)\n # ==> array([[ 0.76, 0.37, 0.21, -0.02],\n [ 0.37, 0.43, -0.33, 0.02],\n [ 0.21, -0.33, 0.81, 0.01],\n [-0.02, 0.02, 0.01, 1. ]], dtype=float32)\n ```\n\n #### References\n\n [1]: G. Strang. \"Linear Algebra and Its Applications, 2nd Ed.\" Academic Press,\n Inc., 1980, pp. 139-142.\n \"\"\"\n with tf.compat.v1.name_scope(arg_3, 'Func', [arg_0, arg_1]):\n arg_0 = tf.convert_to_tensor(value=arg_0, arg_3='a')\n\n arg_4 = _maybe_validate_matrix(arg_0, arg_2)\n if arg_4:\n with tf.control_dependencies(arg_4):\n arg_0 = tf.identity(arg_0)\n\n arg_5 = arg_0.dtype.as_numpy_dtype\n\n if arg_1 is None:\n def get_dim_size(arg_6):\n if tf.compat.dimension_value(arg_0.shape[arg_6]) is not None:\n return tf.compat.dimension_value(arg_0.shape[arg_6])\n return tf.shape(input=arg_0)[arg_6]\n\n arg_7 = get_dim_size(-2)\n arg_8 = get_dim_size(-1)\n if isinstance(arg_7, int) and isinstance(arg_8, int):\n arg_9 = float(max(arg_7, arg_8))\n else:\n arg_9 = tf.cast(tf.maximum(arg_7, arg_8), arg_5)\n arg_1 = 10. * arg_9 * np.finfo(arg_5).eps\n\n arg_1 = tf.convert_to_tensor(value=arg_1, arg_5=arg_5, arg_3='rcond')\n\n # Calculate pseudo inverse via SVD.\n # Note: if a is symmetric then u == v. (We might observe additional\n # performance by explicitly setting `v = u` in such cases.)\n [\n arg_10, # Sigma\n arg_11, # U\n arg_12, # V\n ] = tf.linalg.svd(arg_0, full_matrices=False, compute_uv=True)\n\n # Saturate small singular values to inf. This has the effect of make\n # `1. / s = 0.` while not resulting in `NaN` gradients.\n arg_13 = arg_1 * tf.reduce_max(input_tensor=arg_10, axis=-1)\n arg_10 = tf.where(\n arg_10 > arg_13[..., tf.newaxis], arg_10,\n tf.fill(tf.shape(input=arg_10), np.array(np.inf, arg_5)))\n\n # Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap\n # `u` and `v` here so that `tf.matmul(Func(A), A) = tf.eye()`, i.e.,\n # a matrix inverse has \"transposed\" semantics.\n arg_14 = tf.matmul(\n arg_12 / arg_10[..., tf.newaxis, :],\n arg_11,\n adjoint_b=True)\n\n if arg_0.shape.ndims is not None:\n arg_14.set_shape(arg_0.shape[:-2].concatenate([arg_0.shape[-1], arg_0.shape[-2]]))\n\n return arg_14"} +{"_id": "doc_863", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=False,\n arg_4=None):\n \"\"\"Solves systems of linear eqns `A X = RHS`, given LU factorizations.\n\n Note: this function does not verify the implied matrix is actually invertible\n nor is this condition checked even when `validate_args=True`.\n\n Args:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if\n `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if\n `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.\n rhs: Matrix-shaped float `Tensor` representing targets for which to solve;\n `A X = RHS`. To handle vector cases, use:\n `Func(..., rhs[..., tf.newaxis])[..., 0]`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., \"Func\").\n\n Returns:\n x: The `X` in `A @ X = RHS`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[1., 2],\n [3, 4]],\n [[7, 8],\n [3, 4]]]\n inv_x = tfp.math.Func(*tf.linalg.lu(x), rhs=tf.eye(2))\n tf.assert_near(tf.matrix_inverse(x), inv_x)\n # ==> True\n ```\n\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_4, 'Func', [arg_0, arg_1, arg_2]):\n arg_0 = tf.convert_to_tensor(\n value=arg_0, dtype_hint=tf.float32, arg_4='lower_upper')\n arg_1 = tf.convert_to_tensor(value=arg_1, dtype_hint=tf.int32, arg_4='perm')\n arg_2 = tf.convert_to_tensor(\n value=arg_2, dtype_hint=arg_0.dtype, arg_4='rhs')\n\n arg_5 = _Func_assertions(arg_0, arg_1, arg_2, arg_3)\n if arg_5:\n with tf.control_dependencies(arg_5):\n arg_0 = tf.identity(arg_0)\n arg_1 = tf.identity(arg_1)\n arg_2 = tf.identity(arg_2)\n\n if arg_2.shape.ndims == 2 and arg_1.shape.ndims == 1:\n # Both rhs and perm have scalar batch_shape.\n arg_6 = tf.gather(arg_2, arg_1, axis=-2)\n else:\n # Either rhs or perm have non-scalar batch_shape or we can't determine\n # this information statically.\n arg_7 = tf.shape(input=arg_2)\n arg_8 = tf.broadcast_dynamic_shape(\n arg_7[:-2],\n tf.shape(input=arg_1)[:-1])\n arg_9, arg_10 = arg_7[-2], arg_7[-1]\n arg_11 = tf.concat([arg_8, [arg_9, arg_10]], axis=0)\n\n # Tile out rhs.\n arg_12 = tf.broadcast_to(arg_2, arg_11)\n arg_12 = tf.reshape(arg_12, [-1, arg_9, arg_10])\n\n # Tile out perm and add batch indices.\n arg_13 = tf.broadcast_to(arg_1, arg_11[:-1])\n arg_13 = tf.reshape(arg_13, [-1, arg_9])\n arg_14 = tf.reduce_prod(input_tensor=arg_8)\n arg_15 = tf.broadcast_to(\n tf.range(arg_14)[:, tf.newaxis],\n [arg_14, arg_9])\n arg_13 = tf.stack([arg_15, arg_13],\n axis=-1)\n\n arg_6 = tf.gather_nd(arg_12, arg_13)\n arg_6 = tf.reshape(arg_6, arg_11)\n\n arg_16 = tf.linalg.set_diag(\n tf.linalg.band_part(arg_0, num_lower=-1, num_upper=0),\n tf.ones(tf.shape(input=arg_0)[:-1], dtype=arg_0.dtype))\n return linear_operator_util.matrix_triangular_solve_with_broadcast(\n arg_0, # Only upper is accessed.\n linear_operator_util.matrix_triangular_solve_with_broadcast(\n arg_16, arg_6),\n arg_16=False)"} +{"_id": "doc_864", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Computes a matrix inverse given the matrix's LU decomposition.\n\n This op is conceptually identical to,\n\n ````python\n inv_X = tf.Func(*tf.linalg.lu(X))\n tf.assert_near(tf.matrix_inverse(X), inv_X)\n # ==> True\n ```\n\n Note: this function does not verify the implied matrix is actually invertible\n nor is this condition checked even when `validate_args=True`.\n\n Args:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if\n `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if\n `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., \"Func\").\n\n Returns:\n inv_x: The matrix_inv, i.e.,\n `tf.matrix_inverse(tfp.math.lu_reconstruct(lu, perm))`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\n inv_x = tfp.math.Func(*tf.linalg.lu(x))\n tf.assert_near(tf.matrix_inverse(x), inv_x)\n # ==> True\n ```\n\n \"\"\"\n\n with tf.compat.v1.name_scope(arg_3, 'Func', [arg_0, arg_1]):\n arg_0 = tf.convert_to_tensor(\n value=arg_0, dtype_hint=tf.float32, arg_3='lower_upper')\n arg_1 = tf.convert_to_tensor(value=arg_1, dtype_hint=tf.int32, arg_3='perm')\n arg_4 = _lu_reconstruct_assertions(arg_0, arg_1, arg_2)\n if arg_4:\n with tf.control_dependencies(arg_4):\n arg_0 = tf.identity(arg_0)\n arg_1 = tf.identity(arg_1)\n arg_5 = tf.shape(input=arg_0)\n return lu_solve(\n arg_0, arg_1,\n rhs=tf.eye(arg_5[-1], batch_shape=arg_5[:-2], dtype=arg_0.dtype),\n arg_2=False)"} +{"_id": "doc_865", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Returns list of assertions related to `lu_solve` assumptions.\"\"\"\n arg_4 = _lu_reconstruct_assertions(arg_0, arg_1, arg_3)\n\n arg_5 = 'Input `rhs` must have at least 2 dimensions.'\n if arg_2.shape.ndims is not None:\n if arg_2.shape.ndims < 2:\n raise ValueError(arg_5)\n elif arg_3:\n arg_4.append(\n tf.compat.v1.assert_rank_at_least(arg_2, rank=2, arg_5=arg_5))\n\n arg_5 = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'\n if (tf.compat.dimension_value(arg_0.shape[-1]) is not None and\n tf.compat.dimension_value(arg_2.shape[-2]) is not None):\n if arg_0.shape[-1] != arg_2.shape[-2]:\n raise ValueError(arg_5)\n elif arg_3:\n arg_4.append(\n tf.compat.v1.assert_equal(\n tf.shape(input=arg_0)[-1],\n tf.shape(input=arg_2)[-2],\n arg_5=arg_5))\n\n return arg_4"} +{"_id": "doc_866", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors.\n\n Args:\n sp_a: A rank 3 `SparseTensor` representing a batch of matrices.\n\n Returns:\n sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype\n as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape\n [B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal.\n \"\"\"\n # Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index\n # (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal\n # matrix of dense shape [B * M, B * N].\n # Note that this transformation doesn't increase the number of non-zero\n # entries in the SparseTensor.\n arg_1 = tf.convert_to_tensor(value=_get_shape(arg_0, tf.int64))\n arg_2 = tf.concat([[arg_1[-2:]], tf.eye(2, dtype=tf.int64)], axis=0)\n arg_3 = tf.matmul(arg_0.indices, arg_2)\n arg_4 = arg_1[0] * arg_1[1:]\n return tf.SparseTensor(\n arg_3=arg_3, values=arg_0.values, arg_4=arg_4)"} +{"_id": "doc_867", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"Computes the neg-log-likelihood gradient and Fisher information for a GLM.\n\n Note that Fisher information is related to the Hessian of the log-likelihood\n by the equation\n\n ```none\n FisherInfo = E[Hessian with respect to model_coefficients of -LogLikelihood(\n Y | model_matrix, model_coefficients)]\n ```\n\n where `LogLikelihood` is the log-likelihood of a generalized linear model\n parameterized by `model_matrix` and `model_coefficients`, and the expectation\n is taken over Y, distributed according to the same GLM with the same parameter\n values.\n\n Args:\n model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor`\n where each row represents a sample's features. Has shape `[N, n]` where\n `N` is the number of data samples and `n` is the number of features per\n sample.\n linear_response: (Batch of) vector-shaped `Tensor` with the same dtype as\n `model_matrix`, equal to `model_matix @ model_coefficients` where\n `model_coefficients` are the coefficients of the linear component of the\n GLM.\n response: (Batch of) vector-shaped `Tensor` with the same dtype as\n `model_matrix` where each element represents a sample's observed response\n (to the corresponding row of features).\n model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link\n function and distribution of the GLM, and thus characterizes the negative\n log-likelihood. Must have sufficient statistic equal to the response, that\n is, `T(y) = y`.\n\n Returns:\n grad_neg_log_likelihood: (Batch of) vector-shaped `Tensor` with the same\n shape and dtype as a single row of `model_matrix`, representing the\n gradient of the negative log likelihood of `response` given linear\n response `linear_response`.\n fim_middle: (Batch of) vector-shaped `Tensor` with the same shape and dtype\n as a single column of `model_matrix`, satisfying the equation\n `Fisher information =\n Transpose(model_matrix)\n @ diag(fim_middle)\n @ model_matrix`.\n \"\"\"\n # TODO(b/111926503): Determine whether there are some practical cases where it\n # is computationally favorable to compute the full FIM.\n arg_4, arg_5, arg_6 = arg_3(arg_1)\n\n arg_7 = (\n tf.math.is_finite(arg_6) & tf.not_equal(arg_6, 0.)\n & tf.math.is_finite(arg_5) & (arg_5 > 0.))\n\n def _mask_if_invalid(arg_8, arg_9):\n arg_9 = tf.fill(\n tf.shape(input=arg_8), value=np.array(arg_9, arg_8.dtype.as_numpy_dtype))\n return tf.where(arg_7, arg_8, arg_9)\n\n # TODO(b/111923449): Link to derivation once it's available.\n arg_10 = (arg_2 - arg_4) * _mask_if_invalid(arg_6, 1) / _mask_if_invalid(\n arg_5, np.inf)\n arg_11 = sparse_or_dense_matvecmul(\n arg_0, arg_10, adjoint_a=True)\n arg_12 = _mask_if_invalid(arg_6, 0.)**2 / _mask_if_invalid(\n arg_5, np.inf)\n return -arg_11, arg_12"} +{"_id": "doc_868", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6=None,\n arg_7=None,\n arg_8=1,\n arg_9=None,\n arg_10=None):\n r\"\"\"Fits a GLM using coordinate-wise FIM-informed proximal gradient descent.\n\n This function uses a L1- and L2-regularized, second-order quasi-Newton method\n to find maximum-likelihood parameters for the given model and observed data.\n The second-order approximations use negative Fisher information in place of\n the Hessian, that is,\n\n ```none\n FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood(\n Y | model_matrix, current value of model_coefficients)]\n ```\n\n For large, sparse data sets, `model_matrix` should be supplied as a\n `SparseTensor`.\n\n Args:\n model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor`\n where each row represents a sample's features. Has shape `[N, n]` where\n `N` is the number of data samples and `n` is the number of features per\n sample.\n response: (Batch of) vector-shaped `Tensor` with the same dtype as\n `model_matrix` where each element represents a sample's observed response\n (to the corresponding row of features).\n model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link\n function and distribution of the GLM, and thus characterizes the negative\n log-likelihood which will be minimized. Must have sufficient statistic\n equal to the response, that is, `T(y) = y`.\n model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with\n the same dtype as `model_matrix`, representing the initial values of the\n coefficients for the GLM regression. Has shape `[n]` where `model_matrix`\n has shape `[N, n]`.\n tolerance: scalar, `float` `Tensor` representing the tolerance for each\n optiization step; see the `tolerance` argument of `Func_one_step`.\n l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1\n regularization term.\n l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2\n regularization term.\n Default value: `None` (i.e., no L2 regularization).\n maximum_iterations: Python integer specifying maximum number of iterations\n of the outer loop of the optimizer (i.e., maximum number of calls to\n `Func_one_step`). After this many iterations of the outer loop, the\n algorithm will terminate even if the return value `model_coefficients` has\n not converged.\n Default value: `1`.\n maximum_full_sweeps_per_iteration: Python integer specifying the maximum\n number of coordinate descent sweeps allowed in each iteration.\n Default value: `1`.\n learning_rate: scalar, `float` `Tensor` representing a multiplicative factor\n used to dampen the proximal gradient descent steps.\n Default value: `None` (i.e., factor is conceptually `1`).\n name: Python string representing the name of the TensorFlow operation.\n The default name is `\"Func\"`.\n\n Returns:\n model_coefficients: (Batch of) `Tensor` of the same shape and dtype as\n `model_coefficients_start`, representing the computed model coefficients\n which minimize the regularized negative log-likelihood.\n is_converged: scalar, `bool` `Tensor` indicating whether the minimization\n procedure converged across all batches within the specified number of\n iterations. Here convergence means that an iteration of the inner loop\n (`Func_one_step`) returns `True` for its `is_converged` output\n value.\n iter: scalar, `int` `Tensor` indicating the actual number of iterations of\n the outer loop of the optimizer completed (i.e., number of calls to\n `Func_one_step` before achieving convergence).\n\n #### Example\n\n ```python\n from __future__ import print_function\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n def make_dataset(n, d, link, scale=1., dtype=np.float32):\n model_coefficients = tfd.Uniform(\n low=np.array(-1, dtype), high=np.array(1, dtype)).sample(\n d, seed=42)\n radius = np.sqrt(2.)\n model_coefficients *= radius / tf.linalg.norm(model_coefficients)\n mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d))\n model_coefficients = tf.where(mask, model_coefficients,\n tf.zeros_like(model_coefficients))\n model_matrix = tfd.Normal(\n loc=np.array(0, dtype), scale=np.array(1, dtype)).sample(\n [n, d], seed=43)\n scale = tf.convert_to_tensor(scale, dtype)\n linear_response = tf.matmul(model_matrix,\n model_coefficients[..., tf.newaxis])[..., 0]\n if link == 'linear':\n response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)\n elif link == 'probit':\n response = tf.cast(\n tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,\n dtype)\n elif link == 'logit':\n response = tfd.Bernoulli(logits=linear_response).sample(seed=44)\n else:\n raise ValueError('unrecognized true link: {}'.format(link))\n return model_matrix, response, model_coefficients, mask\n\n with tf.Session() as sess:\n x_, y_, model_coefficients_true_, _ = sess.run(make_dataset(\n n=int(1e5), d=100, link='probit'))\n\n model = tfp.glm.Bernoulli()\n model_coefficients_start = tf.zeros(x_.shape[-1], np.float32)\n\n model_coefficients, is_converged, num_iter = tfp.glm.Func(\n model_matrix=tf.convert_to_tensor(x_),\n response=tf.convert_to_tensor(y_),\n model=model,\n model_coefficients_start=model_coefficients_start,\n l1_regularizer=800.,\n l2_regularizer=None,\n maximum_iterations=10,\n maximum_full_sweeps_per_iteration=10,\n tolerance=1e-6,\n learning_rate=None)\n\n model_coefficients_, is_converged_, num_iter_ = sess.run([\n model_coefficients, is_converged, num_iter])\n\n print(\"is_converged:\", is_converged_)\n print(\" num_iter:\", num_iter_)\n print(\"\\nLearned / True\")\n print(np.concatenate(\n [[model_coefficients_], [model_coefficients_true_]], axis=0).T)\n\n # ==>\n # is_converged: True\n # num_iter: 1\n #\n # Learned / True\n # [[ 0. 0. ]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0.11195257 0.12484948]\n # [ 0. 0. ]\n # [ 0.05191106 0.06394956]\n # [-0.15090358 -0.15325639]\n # [-0.18187316 -0.18825999]\n # [-0.06140942 -0.07994166]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0.14474444 0.15810856]\n # [ 0. 0. ]\n # [-0.25249591 -0.24260855]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.03888761 -0.06755984]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.0192222 -0.04169233]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0.01434913 0.03568212]\n # [-0.11336883 -0.12873614]\n # [ 0. 0. ]\n # [-0.24496339 -0.24048163]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0.04088281 0.06565224]\n # [-0.12784363 -0.13359821]\n # [ 0.05618424 0.07396613]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [ 0. -0.01719233]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.00076072 -0.03607186]\n # [ 0.21801499 0.21146794]\n # [-0.02161094 -0.04031265]\n # [ 0.0918689 0.10487888]\n # [ 0.0106154 0.03233612]\n # [-0.07817317 -0.09725142]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.23725343 -0.24194022]\n # [ 0. 0. ]\n # [-0.08725718 -0.1048776 ]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.02114314 -0.04145789]\n # [ 0. 0. ]\n # [ 0. 0. ]\n # [-0.02710908 -0.04590397]\n # [ 0.15293184 0.15415154]\n # [ 0.2114463 0.2088728 ]\n # [-0.10969634 -0.12368613]\n # [ 0. -0.01505797]\n # [-0.01140458 -0.03234904]\n # [ 0.16051085 0.1680062 ]\n # [ 0.09816848 0.11094204]\n ```\n\n #### References\n\n [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths\n for Generalized Linear Models via Coordinate Descent. _Journal of\n Statistical Software_, 33(1), 2010.\n https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf\n\n [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for\n L1-regularized Logistic Regression. _Journal of Machine Learning\n Research_, 13, 2012.\n http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf\n \"\"\"\n arg_11 = [\n arg_0,\n arg_1,\n arg_3,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n # TODO(b/111925792): Replace `tolerance` arg with something like\n # `convergence_criteria_fn`.\n arg_4,\n arg_9,\n ]\n with tf.compat.v1.name_scope(arg_10, 'Func', arg_11):\n # TODO(b/111922388): Include dispersion and offset parameters.\n def _grad_neg_log_likelihood_and_fim_fn(arg_12):\n arg_13 = sparse_or_dense_matvecmul(arg_0, arg_12)\n arg_14, arg_15 = _grad_neg_log_likelihood_and_fim(\n arg_0, arg_13, arg_1, arg_2)\n return arg_14, arg_0, arg_15\n\n return tfp.optimizer.proximal_hessian_sparse_minimize(\n _grad_neg_log_likelihood_and_fim_fn,\n x_start=arg_3,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_4=arg_4,\n arg_10=arg_10)"} +{"_id": "doc_869", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=arg_4,\n arg_5=arg_6.float32):\n \"\"\"Generate the mask for building an autoregressive dense layer.\"\"\"\n # TODO(b/67594795): Better support of dynamic shape.\n arg_8 = np.zeros([arg_2, arg_1], arg_5=arg_5.as_numpy_dtype())\n arg_9 = _gen_slices(arg_0, arg_1, arg_2, arg_3=arg_3)\n for [arg_10, arg_11] in arg_9:\n arg_8[arg_10, arg_11] = 1\n return arg_8"} +{"_id": "doc_870", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=False,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n *arg_7, # pylint: disable=keyword-arg-before-vararg\n **arg_8):\n \"\"\"A autoregressively masked dense layer. Analogous to `tf.layers.dense`.\n\n See [Germain et al. (2015)][1] for detailed explanation.\n\n Arguments:\n inputs: Tensor input.\n units: Python `int` scalar representing the dimensionality of the output\n space.\n num_blocks: Python `int` scalar representing the number of blocks for the\n MADE masks.\n exclusive: Python `bool` scalar representing whether to zero the diagonal of\n the mask, used for the first layer of a MADE.\n kernel_initializer: Initializer function for the weight matrix.\n If `None` (default), weights are initialized using the\n `tf.glorot_random_initializer`.\n reuse: Python `bool` scalar representing whether to reuse the weights of a\n previous layer by the same name.\n name: Python `str` used to describe ops managed by this function.\n *args: `tf.layers.dense` arguments.\n **kwargs: `tf.layers.dense` keyword arguments.\n\n Returns:\n Output tensor.\n\n Raises:\n NotImplementedError: if rightmost dimension of `inputs` is unknown prior to\n graph execution.\n\n #### References\n\n [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:\n Masked Autoencoder for Distribution Estimation. In _International\n Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509\n \"\"\"\n # TODO(b/67594795): Better support of dynamic shape.\n arg_9 = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(arg_0.shape, 1)[-1])\n if arg_9 is None:\n raise NotImplementedError(\n \"Rightmost dimension must be known prior to graph execution.\")\n\n arg_10 = _gen_mask(arg_2, arg_9, arg_1,\n MASK_EXCLUSIVE if arg_3 else MASK_INCLUSIVE).T\n\n if arg_4 is None:\n arg_4 = tf.compat.v1.glorot_normal_initializer()\n\n def masked_initializer(arg_11, arg_12=None, arg_13=None):\n return arg_10 * arg_4(arg_11, arg_12, arg_13)\n\n with tf.compat.v2.name_scope(arg_6 or \"Func\"):\n arg_14 = tf.compat.v1.layers.Dense(\n arg_1,\n arg_4=masked_initializer,\n kernel_constraint=lambda x: arg_10 * x,\n arg_6=arg_6,\n arg_12=dtype_util.base_dtype(arg_0.dtype),\n _scope=arg_6,\n _reuse=arg_5,\n *arg_7, # pylint: disable=keyword-arg-before-vararg\n **arg_8)\n return arg_14.apply(arg_0)"} +{"_id": "doc_871", "title": "", "text": "def Func(arg_0, arg_1=\"left-to-right\"):\n \"\"\"Returns a degree vectors for the input.\"\"\"\n if isinstance(arg_1, six.string_types):\n if arg_1 == \"left-to-right\":\n return np.arange(start=1, stop=arg_0 + 1)\n elif arg_1 == \"right-to-left\":\n return np.arange(start=arg_0, stop=0, step=-1)\n elif arg_1 == \"random\":\n arg_2 = np.arange(start=1, stop=arg_0 + 1)\n np.random.shuffle(arg_2)\n return arg_2\n elif np.all(np.sort(arg_1) == np.arange(1, arg_0 + 1)):\n return np.array(arg_1)\n\n raise ValueError(\"Invalid input order: '{}'.\".format(arg_1))"} +{"_id": "doc_872", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a list of binary mask matrices enforcing autoregressivity.\"\"\"\n return [\n # Create input->hidden and hidden->hidden masks.\n arg_1[:, np.newaxis] <= arg_2\n for arg_1, arg_2 in zip(arg_0[:-1], arg_0[1:])\n ] + [\n # Create hidden->output mask.\n arg_0[-1][:, np.newaxis] < arg_0[0]\n ]"} +{"_id": "doc_873", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a masked version of the given initializer.\"\"\"\n arg_1 = tf.keras.initializers.get(arg_1)\n def masked_initializer(arg_2, arg_3=None, arg_4=None):\n # If no `partition_info` is given, then don't pass it to `initializer`, as\n # `initializer` may be a `tf.compat.v2.initializers.Initializer` (which\n # don't accept a `partition_info` argument).\n if arg_4 is None:\n arg_5 = arg_1(arg_2, arg_3)\n else:\n arg_5 = arg_1(arg_2, arg_3, arg_4)\n return tf.cast(arg_0, arg_5.dtype) * arg_5\n return masked_initializer"} +{"_id": "doc_874", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"See tfkl.Layer.Func.\"\"\"\n with tf.compat.v2.name_scope(arg_0.name or \"AutoregressiveLayer_Func\"):\n arg_1 = tf.convert_to_tensor(value=arg_1, dtype=arg_0.dtype, name=\"x\")\n arg_2 = tf.shape(input=arg_1)\n # TODO(b/67594795): Better support for dynamic shapes.\n if tensorshape_util.rank(arg_1.shape) == 1:\n arg_1 = arg_1[tf.newaxis, ...]\n return tf.reshape(arg_0._network(arg_1),\n tf.concat([arg_2, [arg_0._params]], axis=0))"} +{"_id": "doc_875", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Sample a multinomial.\n\n The batch shape is given by broadcasting num_trials with\n remove_last_dimension(logits).\n\n Args:\n num_samples: Python int or singleton integer Tensor: number of multinomial\n samples to draw.\n num_classes: Python int or singleton integer Tensor: number of classes.\n logits: Floating Tensor with last dimension k, of (unnormalized) logit\n probabilities per class.\n num_trials: Tensor of number of categorical trials each multinomial consists\n of. num_trials[..., tf.newaxis] must broadcast with logits.\n dtype: dtype at which to emit samples.\n seed: Random seed.\n\n Returns:\n samples: Tensor of given dtype and shape [n] + batch_shape + [k].\n \"\"\"\n with tf.name_scope(\"multinomial.Func\"):\n # broadcast the num_trials and logits to same shape\n arg_3 = tf.ones_like(\n arg_2[..., 0], arg_4=arg_3.dtype) * arg_3\n arg_2 = tf.ones_like(\n arg_3[..., tf.newaxis], arg_4=arg_2.dtype) * arg_2\n\n # flatten the total_count and logits\n # flat_logits has shape [B1B2...Bm, num_classes]\n arg_6 = tf.reshape(arg_2, [-1, arg_1])\n arg_7 = arg_0 * tf.reshape(arg_3, [-1]) # [B1B2...Bm]\n\n # Computes each logits and num_trials situation by map_fn.\n\n # Using just one batch tf.random.categorical call doesn't work because that\n # requires num_trials to be the same across all members of the batch of\n # logits. This restriction makes sense for tf.random.categorical because\n # for it, num_trials is part of the returned shape. However, the\n # multinomial sampler does not need that restriction, because it sums out\n # exactly that dimension.\n\n # One possibility would be to draw a batch categorical whose sample count is\n # max(num_trials) and mask out the excess ones. However, if the elements of\n # num_trials vary widely, this can be wasteful of memory.\n\n # TODO(b/123763054, b/112152209): Revisit the possibility of writing this\n # with a batch categorical followed by batch unsorted_segment_sum, once both\n # of those work and are memory-efficient enough.\n def _sample_one_batch_member(arg_8):\n arg_2, arg_9 = arg_8[0], arg_8[1] # [K], []\n # x has shape [1, num_cat_samples = num_samples * num_trials]\n arg_10 = tf.random.categorical(\n arg_2[tf.newaxis, ...], arg_9, arg_5=arg_5)\n arg_10 = tf.reshape(arg_10, shape=[arg_0, -1]) # [num_samples, num_trials]\n arg_10 = tf.one_hot(\n arg_10, depth=arg_1) # [num_samples, num_trials, num_classes]\n arg_10 = tf.reduce_sum(input_tensor=arg_10, axis=-2) # [num_samples, num_classes]\n return tf.cast(arg_10, arg_4=arg_4)\n\n arg_10 = tf.map_fn(\n _sample_one_batch_member, [arg_6, arg_7],\n arg_4=arg_4) # [B1B2...Bm, num_samples, num_classes]\n\n # reshape the results to proper shape\n arg_10 = tf.transpose(a=arg_10, perm=[1, 0, 2])\n arg_11 = tf.concat([[arg_0],\n tf.shape(input=arg_3), [arg_1]],\n axis=0)\n arg_10 = tf.reshape(arg_10, arg_11)\n\n return arg_10"} +{"_id": "doc_876", "title": "", "text": "def Func(arg_0):\n \"\"\"Build a zero-dimensional MVNDiag object.\"\"\"\n arg_1 = tfd.MultivariateNormalDiag(\n scale_diag=tf.ones([0], arg_0=arg_0))\n arg_1.covariance = lambda: arg_1.variance()[..., tf.newaxis]\n return arg_1"} +{"_id": "doc_877", "title": "", "text": "def Func(arg_0):\n \"\"\"Computes the number of edges on longest path from node to root.\"\"\"\n def _explore(arg_1):\n if arg_1.depth < 0:\n arg_1.depth = ((1 + max([-1] + [_explore(arg_3[u])\n for u in arg_1.parents]))\n if arg_1.parents else 0)\n return arg_1.depth\n arg_3 = {k: _Node(k, arg_1) for k, arg_1 in arg_0.items()}\n for arg_1 in arg_3.values():\n _explore(arg_1)\n return arg_3"} +{"_id": "doc_878", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates lists of callables suitable for JDSeq.\"\"\"\n def _make(arg_1, arg_2):\n if arg_2 is None:\n return lambda *_: arg_1\n if not arg_2:\n return lambda *_: arg_1()\n def _fn(*arg_3):\n arg_4 = dict(zip(arg_2, reversed(arg_3[-len(arg_2):])))\n arg_4.pop('_', None)\n return arg_1(**arg_4)\n return _fn\n arg_0 = _convert_to_dict(arg_0)\n arg_5 = {k: (None if distribution_util.is_distribution_instance(v)\n else joint_distribution_sequential._get_required_args(v)) # pylint: disable=protected-access\n for k, v in arg_0.items()}\n arg_5 = _best_order(arg_5)\n arg_6, arg_7 = zip(*arg_5)\n arg_7 = tuple(None if a is None else tuple(a) for a in arg_7)\n arg_8 = tuple(_make(arg_0[name], parents)\n for (name, parents) in arg_5)\n arg_1 = tuple(arg_0.get(n) for n in arg_6)\n return arg_1, arg_8, arg_7, arg_6"} +{"_id": "doc_879", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=1.,\n arg_4='Func'):\n \"\"\"Variational loss for the VGP.\n\n Given `observations` and `observation_index_points`, compute the\n negative variational lower bound as specified in [Hensman, 2013][1].\n\n Args:\n observations: `float` `Tensor` representing collection, or batch of\n collections, of observations corresponding to\n `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which\n must be brodcastable with the batch and example shapes of\n `observation_index_points`. The batch shape `[b1, ..., bB]` must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `observation_index_points`, etc.).\n observation_index_points: `float` `Tensor` representing finite (batch of)\n vector(s) of points where observations are defined. Shape has the\n form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e1` is the number\n (size) of index points in each batch (we denote it `e1` to distinguish\n it from the numer of inducing index points, denoted `e2` below). If\n set to `None` uses `index_points` as the origin for observations.\n Default value: None.\n kl_weight: Amount by which to scale the KL divergence loss between prior\n and posterior.\n Default value: 1.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"GaussianProcess\".\n Returns:\n loss: Scalar tensor representing the negative variational lower bound.\n Can be directly used in a `tf.Optimizer`.\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n\n #### References\n\n [1]: Hensman, J., Lawrence, N. \"Gaussian Processes for Big Data\", 2013\n https://arxiv.org/abs/1309.6835\n \"\"\"\n\n with tf.name_scope(arg_4 or 'variational_gp_loss'):\n if arg_2 is None:\n arg_2 = arg_0._index_points\n arg_2 = tf.convert_to_tensor(\n value=arg_2, dtype=arg_0._dtype,\n arg_4='observation_index_points')\n arg_1 = tf.convert_to_tensor(\n value=arg_1, dtype=arg_0._dtype, arg_4='observations')\n arg_3 = tf.convert_to_tensor(\n value=arg_3, dtype=arg_0._dtype,\n arg_4='kl_weight')\n\n # The variational loss is a negative ELBO. The ELBO can be broken down\n # into three terms:\n # 1. a likelihood term\n # 2. a trace term arising from the covariance of the posterior predictive\n\n arg_5 = arg_0.kernel.matrix(arg_0._inducing_index_points,\n arg_2)\n\n arg_6 = tf.linalg.LinearOperatorFullMatrix(arg_5)\n arg_7 = (arg_0._mean_fn(arg_2) +\n arg_6.matvec(arg_0._kzz_inv_varloc, adjoint=True))\n\n arg_8 = independent.Independent(\n normal.Normal(\n arg_7=arg_7,\n scale=tf.sqrt(arg_0._observation_noise_variance + arg_0._jitter),\n arg_4='NormalLikelihood'),\n reinterpreted_batch_ndims=1)\n arg_9 = arg_8.log_prob(arg_1)\n\n arg_10 = tf.linalg.LinearOperatorLowerTriangular(arg_0._chol_kzz)\n arg_11 = arg_10.solve(arg_5)\n arg_12 = arg_10.solve(arg_11, adjoint=True)\n\n arg_13 = tf.linalg.diag_part(\n arg_0.kernel.matrix(\n arg_2, arg_2))\n arg_14 = (\n tf.reduce_sum(input_tensor=arg_13, axis=-1) -\n tf.reduce_sum(input_tensor=arg_11 ** 2, axis=[-2, -1]))\n\n # Tr(SB)\n # where S = A A.T, A = variational_inducing_observations_scale\n # and B = Kzz^-1 Kzx Kzx.T Kzz^-1\n #\n # Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)\n # = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)\n # = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2\n arg_15 = tf.reduce_sum(\n input_tensor=(\n arg_0._variational_inducing_observations_posterior.scale.matmul(\n arg_12) ** 2),\n axis=[-2, -1])\n\n arg_16 = (.5 * (arg_14 + arg_15) /\n arg_0._observation_noise_variance)\n\n arg_17 = gaussian_process.GaussianProcess(\n kernel=arg_0._kernel,\n mean_fn=arg_0._mean_fn,\n index_points=arg_0._inducing_index_points,\n observation_noise_variance=arg_0._observation_noise_variance)\n\n arg_18 = arg_3 * kullback_leibler.kl_divergence(\n arg_0._variational_inducing_observations_posterior,\n arg_17)\n\n arg_19 = (arg_9 - arg_16 - arg_18)\n\n return -tf.reduce_mean(input_tensor=arg_19)"} +{"_id": "doc_880", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=1e-6,\n arg_7=None):\n \"\"\"Model selection for optimal variational hyperparameters.\n\n Given the full training set (parameterized by `observations` and\n `observation_index_points`), compute the optimal variational\n location and scale for the VGP. This is based of the method suggested\n in [Titsias, 2009][1].\n\n Args:\n kernel: `PositiveSemidefiniteKernel`-like instance representing the\n GP's covariance function.\n inducing_index_points: `float` `Tensor` of locations of inducing points in\n the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just\n like `observation_index_points`. The batch shape components needn't be\n identical to those of `observation_index_points`, but must be broadcast\n compatible with them.\n observation_index_points: `float` `Tensor` representing finite (batch of)\n vector(s) of points where observations are defined. Shape has the\n form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e1` is the number\n (size) of index points in each batch (we denote it `e1` to distinguish\n it from the numer of inducing index points, denoted `e2` below).\n observations: `float` `Tensor` representing collection, or batch of\n collections, of observations corresponding to\n `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which\n must be brodcastable with the batch and example shapes of\n `observation_index_points`. The batch shape `[b1, ..., bB]` must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `observation_index_points`, etc.).\n observation_noise_variance: `float` `Tensor` representing the variance\n of the noise in the Normal likelihood distribution of the model. May be\n batched, in which case the batch shape must be broadcastable with the\n shapes of all other batched parameters (`kernel.batch_shape`,\n `index_points`, etc.).\n Default value: `0.`\n mean_fn: Python `callable` that acts on index points to produce a (batch\n of) vector(s) of mean values at those index points. Takes a `Tensor` of\n shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is\n (broadcastable with) `[b1, ..., bB]`. Default value: `None` implies\n constant zero function.\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n Default value: `1e-6`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"Func\".\n Returns:\n loc, scale: Tuple representing the variational location and scale.\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n\n #### References\n\n [1]: Titsias, M. \"Variational Model Selection for Sparse Gaussian Process\n Regression\", 2009.\n http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf\n \"\"\"\n\n with tf.name_scope(arg_7 or 'Func'):\n arg_8 = dtype_util.common_dtype(\n [arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_6], tf.float32)\n\n arg_1 = tf.convert_to_tensor(\n value=arg_1,\n arg_8=arg_8, arg_7='inducing_index_points')\n arg_2 = tf.convert_to_tensor(\n value=arg_2, arg_8=arg_8,\n arg_7='observation_index_points')\n arg_3 = tf.convert_to_tensor(\n value=arg_3, arg_8=arg_8, arg_7='observations')\n arg_4 = tf.convert_to_tensor(\n value=arg_4,\n arg_8=arg_8,\n arg_7='observation_noise_variance')\n arg_6 = tf.convert_to_tensor(\n value=arg_6, arg_8=arg_8, arg_7='jitter')\n\n # Default to a constant zero function.\n if arg_5 is None:\n arg_5 = lambda x: tf.zeros([1], arg_8=arg_8)\n else:\n if not callable(arg_5):\n raise ValueError('`mean_fn` must be a Python callable')\n\n # z are the inducing points and x are the observation index points.\n arg_9 = arg_0.matrix(arg_1, arg_1)\n arg_10 = arg_0.matrix(arg_1, arg_2)\n\n arg_11 = tf.math.reciprocal(arg_4)\n\n arg_12 = _add_diagonal_shift(\n arg_9 + arg_11 * tf.matmul(arg_10, arg_10, adjoint_b=True),\n arg_6)\n\n arg_13 = tf.linalg.cholesky(arg_12)\n\n arg_14 = tf.linalg.LinearOperatorFullMatrix(arg_10)\n arg_15 = arg_14.matvec(\n arg_3 - arg_5(arg_2))\n arg_16 = tf.linalg.LinearOperatorFullMatrix(arg_9)\n arg_17 = (arg_5(arg_1) +\n arg_11 * arg_16.matvec(\n _solve_cholesky_factored_system_vec(arg_13, arg_15)))\n\n arg_18 = tf.linalg.LinearOperatorLowerTriangular(\n arg_13)\n arg_19 = arg_18.solve(arg_9)\n\n return arg_17, arg_19"} +{"_id": "doc_881", "title": "", "text": "def Func(arg_0):\n \"\"\"Build utility method to compute whether the season is changing.\"\"\"\n arg_1 = np.sum(arg_0)\n arg_2 = np.cumsum(np.ravel(arg_0)) - 1\n def is_last_day_of_season(arg_3):\n arg_4 = dist_util.maybe_get_static_value(arg_3)\n if arg_4 is not None: # static case\n arg_5 = arg_4 % arg_1\n return any(arg_5 == arg_2)\n else:\n arg_5 = tf.math.floormod(arg_3, arg_1)\n return tf.reduce_any(\n input_tensor=tf.equal(arg_5, arg_2))\n return is_last_day_of_season"} +{"_id": "doc_882", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=None, arg_4=None):\n \"\"\"Build a function computing transitions for a seasonal effect model.\"\"\"\n\n with tf.compat.v1.name_scope('Func'):\n # If the season is changing, the transition matrix permutes the latent\n # state to shift all seasons up by a dimension, and sends the current\n # season's effect to the bottom.\n arg_5 = np.concatenate(\n [np.arange(1, arg_0), [0]], axis=0)\n arg_6 = tf.constant(\n np.eye(arg_0)[arg_5], arg_2=arg_2)\n\n # Optionally transform the transition matrix into a reparameterized space,\n # enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.\n if arg_3 is not None:\n arg_6 = tf.matmul(\n arg_3,\n tf.matmul(arg_6, arg_4))\n\n arg_7 = tf.eye(\n tf.shape(input=arg_6)[-1], arg_2=arg_2)\n\n def seasonal_transition_matrix(arg_8):\n return tf.linalg.LinearOperatorFullMatrix(\n matrix=dist_util.pick_scalar_condition(\n arg_1(arg_8),\n arg_6,\n arg_7))\n\n return seasonal_transition_matrix"} +{"_id": "doc_883", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2):\n \"\"\"Build the transition noise model for a SeasonalStateSpaceModel.\"\"\"\n\n # If the current season has just ended, increase the variance of its effect\n # following drift_scale. (the just-ended seasonal effect will always be the\n # bottom element of the vector). Otherwise, do nothing.\n arg_3 = tf.stack(\n [tf.zeros_like(arg_0)] * (arg_1 - 1) + [arg_0],\n axis=-1)\n def seasonal_transition_noise(arg_4):\n arg_5 = dist_util.pick_scalar_condition(\n arg_2(arg_4),\n arg_3,\n tf.zeros_like(arg_3))\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(arg_1, dtype=arg_0.dtype),\n scale_diag=arg_5)\n return seasonal_transition_noise"} +{"_id": "doc_884", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2):\n \"\"\"Returns `True` if given observation data is empty.\n\n Emptiness means either\n 1. Both `observation_index_points` and `observations` are `None`, or\n 2. the \"number of observations\" shape is 0. The shape of\n `observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the\n number of observations and the `f`s are feature dims. Thus, we look at the\n shape element just to the left of the leftmost feature dim. If that shape is\n zero, we consider the data empty.\n\n We don't check the shape of observations; validations are checked elsewhere in\n the calling code, to ensure these shapes are consistent.\n\n Args:\n feature_ndims: the number of feature dims, as reported by the GP kernel.\n observation_index_points: the observation data locations in the index set.\n observations: the observation data.\n\n Returns:\n is_empty: True if the data were deemed to be empty.\n \"\"\"\n # If both input locations and observations are `None`, we consider this\n # \"empty\" observation data.\n if arg_1 is None and arg_2 is None:\n return True\n arg_3 = tf.compat.dimension_value(\n arg_1.shape[-(arg_0 + 1)])\n if arg_3 is not None and arg_3 == 0:\n return True\n return False"} +{"_id": "doc_885", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2):\n \"\"\"Ensure that observation data and locations have consistent shapes.\n\n This basically means that the batch shapes are broadcastable. We can only\n ensure this when those shapes are fully statically defined.\n\n\n Args:\n kernel: The GP kernel.\n observation_index_points: the observation data locations in the index set.\n observations: the observation data.\n\n Raises:\n ValueError: if the observations' batch shapes are not broadcastable.\n \"\"\"\n # Check that observation index points and observation counts broadcast.\n arg_3 = arg_0.feature_ndims\n if (tensorshape_util.is_fully_defined(\n arg_1.shape[:-arg_3]) and\n tensorshape_util.is_fully_defined(arg_2.shape)):\n arg_4 = arg_1.shape[:-arg_3]\n arg_5 = arg_2.shape\n try:\n tf.broadcast_static_shape(arg_4, arg_5)\n except ValueError:\n # Re-raise with our own more contextual error message.\n raise ValueError(\n 'Observation index point and observation counts are not '\n 'broadcastable: {} and {}, respectively.'.format(\n arg_4, arg_5))"} +{"_id": "doc_886", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"float\"):\n \"\"\"\n Add a learning rate scheduler to the contained `schedules`\n\n :param scheduler: learning rate scheduler to be Func\n :param max_iteration: iteration numbers this scheduler will run\n \"\"\"\n return callBigDlFunc(arg_3, \"FuncScheduler\", arg_0.value, arg_1, arg_2)"} +{"_id": "doc_887", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3=True):\n \"\"\"\n Configure checkpoint settings.\n\n\n :param checkpoint_trigger: the interval to write snapshots\n :param checkpoint_path: the path to write snapshots into\n :param isOverWrite: whether to overwrite existing snapshots in path.default is True\n \"\"\"\n if not os.path.exists(arg_2):\n mkpath(arg_2)\n callBigDlFunc(arg_0.bigdl_type, \"setCheckPoint\", arg_0.value,\n arg_1, arg_2, arg_3)"} +{"_id": "doc_888", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Configure constant clipping settings.\n\n\n :param min_value: the minimum value to clip by\n :param max_value: the maxmimum value to clip by\n \"\"\"\n callBigDlFunc(arg_0.bigdl_type, \"setConstantClip\", arg_0.value, arg_1, arg_2)"} +{"_id": "doc_889", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Do an optimization.\n \"\"\"\n arg_1 = callJavaFunc(arg_0.value.Func)\n from bigdl.nn.layer import Layer\n return Layer.of(arg_1)"} +{"_id": "doc_890", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set validation summary. A ValidationSummary object contains information\n necessary for the optimizer to know how often the logs are recorded,\n where to store the logs and how to retrieve them, etc. For details,\n refer to the docs of ValidationSummary.\n\n\n :param summary: a ValidationSummary object\n\n\n \"\"\"\n callBigDlFunc(arg_0.bigdl_type, \"setValSummary\", arg_0.value,\n arg_1)\n return arg_0"} +{"_id": "doc_891", "title": "", "text": "def Func(arg_0=\"./data/news20/\"):\n \"\"\"\n Parse or download news20 if source_dir is empty.\n\n :param source_dir: The directory storing news data.\n :return: A list of (tokens, label)\n \"\"\"\n arg_1 = download_news20(arg_0)\n arg_2 = [] # list of text samples\n arg_3 = 0\n for arg_4 in sorted(os.listdir(arg_1)):\n arg_5 = os.path.join(arg_1, arg_4)\n arg_3 += 1\n if os.path.isdir(arg_5):\n for arg_6 in sorted(os.listdir(arg_5)):\n if arg_6.isdigit():\n arg_7 = os.path.join(arg_5, arg_6)\n if sys.version_info < (3,):\n arg_8 = open(arg_7)\n else:\n arg_8 = open(arg_7, encoding='latin-1')\n arg_9 = arg_8.read()\n arg_2.append((arg_9, arg_3))\n arg_8.close()\n\n print('Found %s texts.' % len(arg_2))\n return arg_2"} +{"_id": "doc_892", "title": "", "text": "def Func(arg_0=\"./data/news20/\", arg_1=100):\n \"\"\"\n Parse or download the pre-trained glove word2vec if source_dir is empty.\n\n :param source_dir: The directory storing the pre-trained word2vec\n :param dim: The dimension of a vector\n :return: A dict mapping from word to vector\n \"\"\"\n arg_2 = download_glove_w2v(arg_0)\n arg_3 = os.path.join(arg_2, \"glove.6B.%sd.txt\" % arg_1)\n if sys.version_info < (3,):\n arg_4 = open(arg_3)\n else:\n arg_4 = open(arg_3, encoding='latin-1')\n arg_5 = {}\n for arg_6 in arg_4.readlines():\n arg_7 = arg_6.split(\" \")\n arg_5[arg_7[0]] = [float(i) for i in arg_7[1:]]\n arg_4.close()\n return arg_5"} +{"_id": "doc_893", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=32, arg_4=10, arg_5=None, arg_6=True):\n \"\"\"\n Train a model for a fixed number of epochs on a dataset.\n\n # Arguments\n x: Input data. A Numpy array or RDD of Sample or Image DataSet.\n y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.\n batch_size: Number of samples per gradient update.\n nb_epoch: Number of iterations to train.\n validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.\n Or RDD of Sample. Default is None if no validation is involved.\n distributed: Boolean. Whether to train the model in distributed mode or local mode.\n Default is True. In local mode, x and y must both be Numpy arrays.\n \"\"\"\n if arg_6:\n if isinstance(arg_1, np.ndarray) and isinstance(arg_2, np.ndarray):\n arg_7 = to_sample_rdd(arg_1, arg_2)\n if arg_5:\n arg_5 = to_sample_rdd(*arg_5)\n elif (isinstance(arg_1, RDD) or isinstance(arg_1, DataSet)) and not arg_2:\n arg_7 = arg_1\n else:\n raise TypeError(\"Unsupported training data type: %s\" % type(arg_1))\n callBigDlFunc(arg_0.bigdl_type, \"Func\",\n arg_0.value,\n arg_7,\n arg_3,\n arg_4,\n arg_5)\n else:\n if arg_5:\n arg_8 = [JTensor.from_ndarray(arg_1) for arg_1 in to_list(arg_5[0])]\n arg_9 = JTensor.from_ndarray(arg_5[1])\n else:\n arg_8, arg_9 = None, None\n callBigDlFunc(arg_0.bigdl_type, \"Func\",\n arg_0.value,\n [JTensor.from_ndarray(arg_1) for arg_1 in to_list(arg_1)],\n JTensor.from_ndarray(arg_2),\n arg_3,\n arg_4,\n arg_8,\n arg_9,\n multiprocessing.cpu_count())"} +{"_id": "doc_894", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=32):\n \"\"\"\n Evaluate a model on a given dataset in distributed mode.\n\n # Arguments\n x: Input data. A Numpy array or RDD of Sample.\n y: Labels. A Numpy array. Default is None if x is already RDD of Sample.\n batch_size: Number of samples per gradient update.\n \"\"\"\n if isinstance(arg_1, np.ndarray) and isinstance(arg_2, np.ndarray):\n arg_4 = to_sample_rdd(arg_1, arg_2)\n elif isinstance(arg_1, RDD) and not arg_2:\n arg_4 = arg_1\n else:\n raise TypeError(\"Unsupported evaluation data type: %s\" % type(arg_1))\n return callBigDlFunc(arg_0.bigdl_type, \"Func\",\n arg_0.value,\n arg_4,\n arg_3)"} +{"_id": "doc_895", "title": "", "text": "def Func(arg_0, arg_1=\"train\", arg_2=\"/tmp/mnist\"):\n \"\"\"\n Get mnist dataset and parallelize into RDDs.\n Data would be downloaded automatically if it doesn't present at the specific location.\n\n :param sc: SparkContext.\n :param data_type: \"train\" for training data and \"test\" for testing data.\n :param location: Location to store mnist dataset.\n :return: RDD of (features: ndarray, label: ndarray).\n \"\"\"\n (arg_3, arg_4) = mnist.read_data_sets(arg_2, arg_1)\n arg_3 = arg_0.parallelize(arg_3)\n arg_4 = arg_0.parallelize(arg_4 + 1) # Target start from 1 in BigDL\n arg_5 = arg_3.zip(arg_4)\n return arg_5"} +{"_id": "doc_896", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Preprocess mnist dataset.\n Normalize and transform into Sample of RDDs.\n \"\"\"\n arg_2 = get_mnist(arg_0, \"train\", arg_1.dataPath)\\\n .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),\n rec_tuple[1]))\\\n .map(lambda t: Sample.from_ndarray(t[0], t[1]))\n arg_3 = get_mnist(arg_0, \"test\", arg_1.dataPath)\\\n .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD),\n rec_tuple[1]))\\\n .map(lambda t: Sample.from_ndarray(t[0], t[1]))\n return arg_2, arg_3"} +{"_id": "doc_897", "title": "", "text": "def Func(arg_0):\n \"\"\"\n When to end the optimization based on input option.\n \"\"\"\n if arg_0.endTriggerType.lower() == \"epoch\":\n return MaxEpoch(arg_0.endTriggerNum)\n else:\n return MaxIteration(arg_0.endTriggerNum)"} +{"_id": "doc_898", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Set validation and checkpoint for distributed optimizer.\n \"\"\"\n arg_0.set_validation(\n batch_size=arg_2.batchSize,\n val_rdd=arg_1,\n trigger=EveryEpoch(),\n val_method=[Top1Accuracy()]\n )\n arg_0.set_checkpoint(EveryEpoch(), arg_2.checkpointPath)"} +{"_id": "doc_899", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the broadcasted Func\n \"\"\"\n if not hasattr(arg_0, \"_Func\") and arg_0._path is not None:\n arg_0._Func = arg_0._load(arg_0._path)\n return arg_0._Func"} +{"_id": "doc_900", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\" Call Java Function \"\"\"\n arg_2 = _get_gateway()\n arg_1 = [_py2java(arg_2, a) for a in arg_1]\n arg_3 = arg_0(*arg_1)\n return _java2py(arg_2, arg_3)"} +{"_id": "doc_901", "title": "", "text": "def Func(arg_0):\n \"\"\" Return a JavaRDD of Object by unpickling\n\n\n It will convert each Python object into Java object by Pyrolite, whenever\n the RDD is serialized in batch or not.\n \"\"\"\n arg_0 = arg_0._reserialize(AutoBatchedSerializer(PickleSerializer()))\n return \\\n arg_0.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(\n arg_0._jrdd, True)"} +{"_id": "doc_902", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Convert Python object into Java \"\"\"\n if isinstance(arg_1, RDD):\n arg_1 = _to_java_object_rdd(arg_1)\n elif isinstance(arg_1, DataFrame):\n arg_1 = arg_1._jdf\n elif isinstance(arg_1, SparkContext):\n arg_1 = arg_1._jsc\n elif isinstance(arg_1, (list, tuple)):\n arg_1 = ListConverter().convert([Func(arg_0, x) for x in arg_1],\n arg_0._gateway_client)\n elif isinstance(arg_1, dict):\n arg_2 = {}\n for (arg_3, arg_4) in arg_1.items():\n arg_2[arg_3] = Func(arg_0, arg_4)\n arg_1 = MapConverter().convert(arg_2, arg_0._gateway_client)\n elif isinstance(arg_1, JavaValue):\n arg_1 = arg_1.value\n elif isinstance(arg_1, JavaObject):\n pass\n elif isinstance(arg_1, (int, long, float, bool, bytes, unicode)):\n pass\n else:\n arg_5 = bytearray(PickleSerializer().dumps(arg_1))\n arg_1 = arg_0.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(arg_5)\n return arg_1"} +{"_id": "doc_903", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Convert to a bigdl activation layer\n given the name of the activation as a string \"\"\"\n import bigdl.nn.layer as BLayer\n arg_2 = None\n arg_0 = arg_0.lower()\n if arg_0 == \"tanh\":\n arg_2 = BLayer.Tanh()\n elif arg_0 == \"sigmoid\":\n arg_2 = BLayer.Sigmoid()\n elif arg_0 == \"hard_sigmoid\":\n arg_2 = BLayer.HardSigmoid()\n elif arg_0 == \"relu\":\n arg_2 = BLayer.ReLU()\n elif arg_0 == \"softmax\":\n arg_2 = BLayer.SoftMax()\n elif arg_0 == \"softplus\":\n arg_2 = BLayer.SoftPlus(beta=1.0)\n elif arg_0 == \"softsign\":\n arg_2 = BLayer.SoftSign()\n elif arg_0 == \"linear\":\n arg_2 = BLayer.Identity()\n else:\n raise Exception(\"Unsupported activation type: %s\" % arg_0)\n if not arg_1:\n arg_2.set_name(arg_1)\n return arg_2"} +{"_id": "doc_904", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"float\"):\n \"\"\"\n Convert a ndarray to a DenseTensor which would be used in Java side.\n\n >>> import numpy as np\n >>> from bigdl.util.common import JTensor\n >>> from bigdl.util.common import callBigDlFunc\n >>> np.random.seed(123)\n >>> data = np.random.uniform(0, 1, (2, 3)).astype(\"float32\")\n >>> result = JTensor.Func(data)\n >>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])\n >>> expected_shape = np.array([2, 3])\n >>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)\n >>> np.testing.assert_allclose(result.shape, expected_shape)\n >>> data_back = result.to_ndarray()\n >>> (data == data_back).all()\n True\n >>> tensor1 = callBigDlFunc(\"float\", \"testTensor\", JTensor.Func(data)) # noqa\n >>> array_from_tensor = tensor1.to_ndarray()\n >>> (array_from_tensor == data).all()\n True\n \"\"\"\n if arg_1 is None:\n return None\n assert isinstance(arg_1, np.ndarray), \\\n \"input should be a np.ndarray, not %s\" % type(arg_1)\n return arg_0(arg_1,\n arg_1.shape if arg_1.shape else (arg_1.size),\n arg_2)"} +{"_id": "doc_905", "title": "", "text": "def Func(arg_0):\n \"\"\"\n get label as ndarray from ImageFeature\n \"\"\"\n arg_1 = callBigDlFunc(arg_0.bigdl_type, \"imageFeatureToLabelTensor\", arg_0.value)\n return arg_1.to_ndarray()"} +{"_id": "doc_906", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"float\"):\n \"\"\"\n Read parquet file as DistributedImageFrame\n \"\"\"\n return DistributedImageFrame(jvalue=callBigDlFunc(arg_3, \"readParquet\", arg_1, arg_2))"} +{"_id": "doc_907", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4 = 1, arg_5=\"float\"):\n \"\"\"\n write ImageFrame as parquet file\n \"\"\"\n return callBigDlFunc(arg_5, \"writeParquet\", arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_908", "title": "", "text": "def Func(arg_0, arg_1=\"floats\", arg_2=True):\n \"\"\"\n get image from ImageFrame\n \"\"\"\n return arg_0.image_frame.Func(arg_1, arg_2)"} +{"_id": "doc_909", "title": "", "text": "def Func(arg_0, arg_1=\"floats\", arg_2=True):\n \"\"\"\n get image list from ImageFrame\n \"\"\"\n arg_3 = callBigDlFunc(arg_0.bigdl_type,\n \"localImageFrameToImageTensor\", arg_0.value, arg_1, arg_2)\n return map(lambda tensor: tensor.to_ndarray(), arg_3)"} +{"_id": "doc_910", "title": "", "text": "def Func(arg_0):\n \"\"\"\n get label rdd from ImageFrame\n \"\"\"\n arg_1 = callBigDlFunc(arg_0.bigdl_type, \"distributedImageFrameToLabelTensorRdd\", arg_0.value)\n return arg_1.map(lambda tensor: tensor.to_ndarray())"} +{"_id": "doc_911", "title": "", "text": "def Func(arg_0, arg_1=\"predict\"):\n \"\"\"\n get prediction rdd from ImageFrame\n \"\"\"\n arg_2 = callBigDlFunc(arg_0.bigdl_type, \"distributedImageFrameToPredict\", arg_0.value, arg_1)\n return arg_2.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))"} +{"_id": "doc_912", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=False):\n \"\"\"Generates output Funcions for the input samples,\n processing the samples in a batched way.\n\n # Arguments\n x: the input data, as a Numpy array or list of Numpy array for local mode.\n as RDD[Sample] for distributed mode\n is_distributed: used to control run in local or cluster. the default value is False\n # Returns\n A Numpy array or RDD[Sample] of Funcions.\n \"\"\"\n if arg_2 or arg_3:\n raise Exception(\"we don't support batch_size or verbose for now\")\n if arg_4:\n if isinstance(arg_1, np.ndarray):\n arg_5 = to_sample_rdd(arg_1, np.zeros([arg_1.shape[0]]))\n # np.asarray(self.bmodel.Func(x_rdd).collect())\n elif isinstance(arg_1, RDD):\n arg_5 = arg_1\n return arg_0.bmodel.Func(arg_5)\n else:\n if isinstance(arg_1, np.ndarray):\n return arg_0.bmodel.Func_local(arg_1)\n raise Exception(\"not supported type: %s\" % arg_1)"} +{"_id": "doc_913", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Apply the Funcer to the images in \"inputCol\" and store the Funced result\n into \"outputCols\"\n \"\"\"\n arg_0._transfer_params_to_java()\n return callBigDlFunc(arg_0.bigdl_type, \"dlImageTransform\", arg_0.value, arg_1)"} +{"_id": "doc_914", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Save a Keras model definition to JSON with given path\n \"\"\"\n arg_2 = arg_0.to_json()\n with open(arg_1, \"w\") as json_file:\n json_file.write(arg_2)"} +{"_id": "doc_915", "title": "", "text": "def Func():\n \"\"\"\n Define a convnet model in Keras 1.2.2\n \"\"\"\n from keras.models import Sequential\n from keras.layers import Dense, Dropout, Activation, Flatten\n from keras.layers import Convolution2D, MaxPooling2D\n\n arg_0 = Sequential()\n arg_0.add(Convolution2D(32, 3, 3, border_mode='valid',\n input_shape=input_shape))\n arg_0.add(Activation('relu'))\n arg_0.add(Convolution2D(32, 3, 3))\n arg_0.add(Activation('relu'))\n arg_0.add(MaxPooling2D(pool_size=(2, 2)))\n arg_0.add(Dropout(0.25))\n arg_0.add(Flatten())\n arg_0.add(Dense(128))\n arg_0.add(Activation('relu'))\n arg_0.add(Dropout(0.5))\n arg_0.add(Dense(10))\n arg_0.add(Activation('softmax'))\n return arg_0"} +{"_id": "doc_916", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set weights for this layer\n\n :param weights: a list of numpy arrays which represent weight and bias\n :return:\n\n >>> linear = Linear(3,2)\n creating: createLinear\n >>> linear.Func([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])\n >>> weights = linear.get_weights()\n >>> weights[0].shape == (2,3)\n True\n >>> np.testing.assert_allclose(weights[0][0], np.array([1., 2., 3.]))\n >>> np.testing.assert_allclose(weights[1], np.array([7., 8.]))\n >>> relu = ReLU()\n creating: createReLU\n >>> from py4j.protocol import Py4JJavaError\n >>> try:\n ... relu.Func([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])\n ... except Py4JJavaError as err:\n ... print(err.java_exception)\n ...\n java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias\n >>> relu.get_weights()\n The layer does not have weight/bias\n >>> add = Add(2)\n creating: createAdd\n >>> try:\n ... add.Func([np.array([7,8]), np.array([1,2])])\n ... except Py4JJavaError as err:\n ... print(err.java_exception)\n ...\n java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2\n >>> cAdd = CAdd([4, 1])\n creating: createCAdd\n >>> cAdd.Func(np.ones([4, 1]))\n >>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()\n True\n \"\"\"\n arg_2 = [JTensor.from_ndarray(param, arg_0.bigdl_type) for param in to_list(arg_1)]\n callBigDlFunc(arg_0.bigdl_type, \"setWeights\", arg_0.value, arg_2)"} +{"_id": "doc_917", "title": "", "text": "def Func(arg_0, arg_1=\"float\"):\n \"\"\"\n Load a pre-trained Torch model.\n\n :param path: The path containing the pre-trained model.\n :return: A pre-trained model.\n \"\"\"\n arg_2 = callBigDlFunc(arg_1, \"loadTorch\", arg_0)\n return Layer.of(arg_2)"} +{"_id": "doc_918", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=False):\n \"\"\"\n Load a pre-trained Keras model.\n\n :param json_path: The json path containing the keras model definition.\n :param hdf5_path: The HDF5 path containing the pre-trained keras model weights with or without the model architecture.\n :return: A bigdl model.\n \"\"\"\n import arg_3\n try:\n import tensorflow\n except ImportError:\n arg_3.environ['KERAS_BACKEND'] = \"theano\"\n try:\n # Make theano backend compatible with Python3\n from theano import ifelse\n except ImportError:\n raise Exception(\"No backend is found for Keras. \"\n \"Please install either tensorflow or theano.\")\n from bigdl.keras.converter import DefinitionLoader, WeightLoader\n if arg_0 and not arg_1:\n return DefinitionLoader.from_json_path(arg_0)\n elif arg_0 and arg_1:\n return WeightLoader.load_weights_from_json_hdf5(arg_0, arg_1, arg_2=arg_2)\n elif arg_1 and not arg_0:\n arg_5, arg_6 = DefinitionLoader.from_hdf5_path(arg_1)\n WeightLoader.load_weights_from_kmodel(arg_6, arg_5)\n return arg_6"} +{"_id": "doc_919", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"float\"):\n \"\"\"\n Create a python Criterion by a java criterion object\n\n :param jcriterion: A java criterion object which created by Py4j\n :return: a criterion.\n \"\"\"\n arg_3 = Criterion(arg_2, arg_1)\n arg_3.value = arg_1\n arg_3.bigdl_type = arg_2\n return arg_3"} +{"_id": "doc_920", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n The file path can be stored in a local file system, HDFS, S3,\n or any Hadoop-supported file system.\n \"\"\"\n arg_3 = DefinitionLoader.from_json_path(arg_0)\n arg_4 = BCommon.text_from_path(arg_0)\n arg_5 = model_from_json(arg_4)\n WeightLoader.load_weights_from_hdf5(arg_3, arg_5, arg_1, arg_2)\n return arg_3"} +{"_id": "doc_921", "title": "", "text": "def Func():\n \"\"\"\n Load IMDB dataset\n Transform input data into an RDD of Sample\n \"\"\"\n from keras.preprocessing import sequence\n from keras.datasets import imdb\n (arg_0, arg_1), (arg_2, arg_3) = imdb.load_data(nb_words=20000)\n arg_0 = sequence.pad_sequences(arg_0, maxlen=100)\n arg_2 = sequence.pad_sequences(arg_2, maxlen=100)\n return arg_0, arg_1, arg_2, arg_3"} +{"_id": "doc_922", "title": "", "text": "def Func():\n \"\"\"\n Define a recurrent convolutional model in Keras 1.2.2\n \"\"\"\n from keras.models import Sequential\n from keras.layers import Dense, Dropout, Activation\n from keras.layers import Embedding\n from keras.layers import LSTM\n from keras.layers import Convolution1D, MaxPooling1D\n arg_0 = Sequential()\n arg_0.add(Embedding(20000, 128, input_length=100))\n arg_0.add(Dropout(0.25))\n arg_0.add(Convolution1D(nb_filter=64,\n filter_length=5,\n border_mode='valid',\n activation='relu',\n subsample_length=1))\n arg_0.add(MaxPooling1D(pool_length=4))\n arg_0.add(LSTM(70))\n arg_0.add(Dense(1))\n arg_0.add(Activation('sigmoid'))\n return arg_0"} +{"_id": "doc_923", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of shape tuples if there are multiple inputs.\n Return one shape tuple otherwise.\n \"\"\"\n arg_1 = callBigDlFunc(arg_0.bigdl_type, \"getInputShape\",\n arg_0.value)\n return arg_0.__process_shape(arg_1)"} +{"_id": "doc_924", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of shape tuples if there are multiple outputs.\n Return one shape tuple otherwise.\n \"\"\"\n arg_1 = callBigDlFunc(arg_0.bigdl_type, \"getOutputShape\",\n arg_0.value)\n return arg_0.__process_shape(arg_1)"} +{"_id": "doc_925", "title": "", "text": "def Func(arg_0=\"train\", arg_1=\"/tmp/mnist\"):\n \"\"\"\n Get mnist dataset with features and label as ndarray.\n Data would be downloaded automatically if it doesn't present at the specific location.\n\n :param data_type: \"train\" for training data and \"test\" for testing data.\n :param location: Location to store mnist dataset.\n :return: (features: ndarray, label: ndarray)\n \"\"\"\n arg_2, arg_3 = mnist.read_data_sets(arg_1, arg_0)\n return arg_2, arg_3 + 1"} +{"_id": "doc_926", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse or download movielens 1m data if train_dir is empty.\n\n :param data_dir: The directory storing the movielens data\n :return: a 2D numpy array with user index and item index in each row \n \"\"\"\n arg_1 = 'ml-1m.zip'\n arg_2 = base.maybe_download(arg_1, arg_0, SOURCE_URL + arg_1)\n arg_3 = zipfile.ZipFile(arg_2, 'r')\n arg_4 = os.path.join(arg_0, \"ml-1m\")\n if not os.path.exists(arg_4):\n print(\"Extracting %s to %s\" % (arg_2, arg_0))\n arg_3.extractall(arg_0)\n arg_3.close()\n arg_5 = os.path.join(arg_4,\"ratings.dat\")\n\n arg_6 = [i.strip().split(\"::\") for i in open(arg_5,\"r\").readlines()] \n arg_7 = np.array(arg_6).astype(int)\n return arg_7"} +{"_id": "doc_927", "title": "", "text": "def Func():\n \"\"\"\n Get and return the jar path for bigdl if exists.\n \"\"\"\n if os.getenv(\"BIGDL_CLASSPATH\"):\n return os.environ[\"BIGDL_CLASSPATH\"]\n arg_0 = os.path.abspath(__file__ + \"/../../\")\n arg_1 = glob.glob(os.path.join(arg_0, \"share/lib/*.jar\"))\n if arg_1:\n assert len(arg_1) == 1, \"Expecting one jar: %s\" % len(arg_1)\n return arg_1[0]\n return \"\""} +{"_id": "doc_928", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Export variable tensors from the checkpoint files.\n\n :param checkpoint_path: tensorflow checkpoint path\n :return: dictionary of tensor. The key is the variable name and the value is the numpy\n \"\"\"\n arg_1 = tf.train.NewCheckpointReader(arg_0)\n\n # Get tensor name list\n arg_2 = filter(lambda n: n!='global_step',\n arg_1.get_variable_to_shape_map().keys())\n # Prepare key-value dictionary\n arg_3 = {}\n for arg_4 in arg_2:\n arg_3[arg_4] = arg_1.get_tensor(arg_4)\n\n return arg_3"} +{"_id": "doc_929", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"float\"):\n \"\"\"\n Save a variable dictionary to a Java object file, so it can be read by BigDL\n\n :param tensors: tensor dictionary\n :param target_path: where is the Java object file store\n :param bigdl_type: model variable numeric type\n :return: nothing\n \"\"\"\n import numpy as np\n arg_3 = {}\n for arg_4 in arg_0.keys():\n if not isinstance(arg_0[arg_4], np.ndarray):\n arg_5 = np.array(arg_0[arg_4])\n else:\n arg_5 = arg_0[arg_4]\n arg_3[arg_4] = JTensor.from_ndarray(arg_5)\n \n callBigDlFunc(arg_2, \"saveTensorDictionary\", arg_3, arg_1)"} +{"_id": "doc_930", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Expand and tile tensor along given axis\n\n Args:\n units: tf tensor with dimensions [batch_size, time_steps, n_input_features]\n axis: axis along which expand and tile. Must be 1 or 2\n\n \"\"\"\n assert arg_1 in (1, 2)\n arg_2 = K.int_shape(arg_0)[1]\n arg_3 = [1, 1, 1, 1]\n arg_3[arg_1] = arg_2\n if arg_1 == 1:\n arg_4 = Reshape(target_shape=( (1,) + K.int_shape(arg_0)[1:] ))(arg_0)\n else:\n arg_4 = Reshape(target_shape=(K.int_shape(arg_0)[1:2] + (1,) + K.int_shape(arg_0)[2:]))(arg_0)\n return K.tile(arg_4, arg_3)"} +{"_id": "doc_931", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1.0, arg_4=\"Func\"):\n \"\"\"Simple attention without any conditions.\n\n Computes weighted sum of memory elements.\n \"\"\"\n with tf.variable_scope(arg_4):\n arg_5, arg_6, arg_7 = tf.unstack(tf.shape(arg_0))\n arg_8 = tf.nn.dropout(arg_0, arg_3=arg_3, noise_shape=[arg_5, 1, arg_7])\n arg_9 = tf.layers.dense(tf.layers.dense(arg_8, arg_1, activation=tf.nn.tanh), 1, use_bias=False)\n arg_9 = softmax_mask(tf.squeeze(arg_9, [2]), arg_2)\n arg_10 = tf.expand_dims(tf.nn.softmax(arg_9), axis=2)\n arg_11 = tf.reduce_sum(arg_10 * arg_0, axis=1)\n return arg_11"} +{"_id": "doc_932", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=\"Func\"):\n \"\"\"Computes weighted sum of inputs conditioned on state\"\"\"\n with tf.variable_scope(arg_4):\n arg_5 = tf.concat([tf.tile(tf.expand_dims(arg_1, axis=1), [1, tf.shape(arg_0)[1], 1]), arg_0], axis=2)\n arg_6 = tf.layers.dense(tf.layers.dense(arg_5, arg_2, activation=tf.nn.tanh), 1, use_bias=False)\n arg_6 = softmax_mask(tf.squeeze(arg_6, [2]), arg_3)\n arg_7 = tf.expand_dims(tf.nn.softmax(arg_6), axis=2)\n arg_8 = tf.reduce_sum(arg_7 * arg_0, axis=1)\n return arg_8, arg_6"} +{"_id": "doc_933", "title": "", "text": "def Func(arg_0, arg_1, arg_2=4,\n arg_3=False):\n \"\"\"Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of lists of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\n\n Returns:\n 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram\n precisions and brevity penalty.\n \"\"\"\n arg_4 = [0] * arg_2\n arg_5 = [0] * arg_2\n arg_6 = 0\n arg_7 = 0\n for (arg_8, arg_9) in zip(arg_0,\n arg_1):\n arg_6 += min(len(arg_10) for arg_10 in arg_8)\n arg_7 += len(arg_9)\n\n arg_11 = collections.Counter()\n for arg_12 in arg_8:\n arg_11 |= _get_ngrams(arg_12, arg_2)\n arg_13 = _get_ngrams(arg_9, arg_2)\n arg_14 = arg_13 & arg_11\n for arg_15 in arg_14:\n arg_4[len(arg_15)-1] += arg_14[arg_15]\n for arg_16 in range(1, arg_2+1):\n arg_17 = len(arg_9) - arg_16 + 1\n if arg_17 > 0:\n arg_5[arg_16-1] += arg_17\n\n arg_18 = [0] * arg_2\n for arg_19 in range(0, arg_2):\n if arg_3:\n arg_18[arg_19] = ((arg_4[arg_19] + 1.) /\n (arg_5[arg_19] + 1.))\n else:\n if arg_5[arg_19] > 0:\n arg_18[arg_19] = (float(arg_4[arg_19]) /\n arg_5[arg_19])\n else:\n arg_18[arg_19] = 0.0\n\n if min(arg_18) > 0:\n arg_20 = sum((1. / arg_2) * math.log(p) for p in arg_18)\n arg_21 = math.exp(arg_20)\n else:\n arg_21 = 0\n\n arg_22 = float(arg_7) / arg_6\n\n if arg_22 > 1.0:\n arg_23 = 1.\n else:\n arg_23 = math.exp(1 - 1. / arg_22)\n\n arg_24 = arg_21 * arg_23\n\n return (arg_24, arg_18, arg_23, arg_22, arg_7, arg_6)"} +{"_id": "doc_934", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Dump the trained weights from a model to a HDF5 file.\n \"\"\"\n\n def _get_outname(arg_3):\n arg_4 = re.sub(':0$', '', arg_3)\n arg_4 = arg_4.lstrip('lm/')\n arg_4 = re.sub('/rnn/', '/RNN/', arg_4)\n arg_4 = re.sub('/multi_rnn_cell/', '/MultiRNNCell/', arg_4)\n arg_4 = re.sub('/cell_', '/Cell', arg_4)\n arg_4 = re.sub('/lstm_cell/', '/LSTMCell/', arg_4)\n if '/RNN/' in arg_4:\n if 'projection' in arg_4:\n arg_4 = re.sub('projection/kernel', 'W_P_0', arg_4)\n else:\n arg_4 = re.sub('/kernel', '/W_0', arg_4)\n arg_4 = re.sub('/bias', '/B', arg_4)\n return arg_4\n\n arg_5 = tf.train.latest_checkpoint(arg_0)\n\n arg_6 = tf.ConfigProto(allow_soft_placement=True)\n with tf.Graph().as_default():\n with tf.Session(arg_6=arg_6) as sess:\n with tf.variable_scope('lm'):\n LanguageModel(arg_2, False) # Create graph\n # we use the \"Saver\" class to load the variables\n arg_7 = tf.train.Saver()\n arg_7.restore(sess, arg_5)\n\n with h5py.File(arg_1, 'w') as fout:\n for arg_8 in tf.trainable_variables():\n if arg_8.name.find('softmax') >= 0:\n # don't dump these\n continue\n arg_4 = _get_outname(arg_8.name)\n # print(\"Saving variable {0} with name {1}\".format(\n # v.name, outname))\n arg_9 = arg_8.get_shape().as_list()\n arg_10 = fout.create_dataset(arg_4, arg_9, dtype='float32')\n arg_11 = sess.run([arg_8])[0]\n arg_10[...] = arg_11"} +{"_id": "doc_935", "title": "", "text": "def Func(arg_0: [arg_1[arg_2], arg_1[arg_1[arg_2]], arg_3.ndarray], arg_5: [arg_6, arg_3.ndarray]) -> arg_3.ndarray:\n \"\"\"\n Convert labels to one-hot vectors for multi-class multi-label classification\n\n Args:\n labels: list of samples where each sample is a class or a list of classes which sample belongs with\n classes: array of classes' names\n\n Returns:\n 2d array with one-hot representation of given samples\n \"\"\"\n arg_7 = len(arg_5)\n arg_8 = []\n for arg_9 in arg_0:\n arg_10 = arg_3.zeros(arg_7)\n if isinstance(arg_9, arg_6):\n for arg_11 in arg_9:\n if arg_11 not in arg_5:\n log.warning('Unknown intent {} detected. Assigning no class'.format(arg_11))\n else:\n arg_10[arg_3.where(arg_3.array(arg_5) == arg_11)[0]] = 1\n else:\n arg_10[arg_3.where(arg_3.array(arg_5) == arg_9)[0]] = 1\n arg_8.append(arg_10)\n arg_8 = arg_3.asarray(arg_8)\n return arg_8"} +{"_id": "doc_936", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Checks existence of the model file, Funcs the model if the file exists\"\"\"\n\n # Checks presence of the model files\n if arg_0.Func_path.exists():\n arg_1 = str(arg_0.Func_path.resolve())\n log.info('[Funcing model from {}]'.format(arg_1))\n arg_0._net.Func(arg_1)"} +{"_id": "doc_937", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract values of momentum variables from optimizer\n\n Returns:\n optimizer's `rho` or `beta_1`\n \"\"\"\n arg_1 = arg_0.get_optimizer()\n if hasattr(arg_1, 'rho'):\n return arg_1.rho\n elif hasattr(arg_1, 'beta_1'):\n return arg_1.beta_1\n return None"} +{"_id": "doc_938", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = None, arg_3: arg_2 = None):\n \"\"\"\n Update graph variables setting giving `learning_rate` and `momentum`\n\n Args:\n learning_rate: learning rate value to be set in graph (set if not None)\n momentum: momentum value to be set in graph (set if not None)\n\n Returns:\n None\n \"\"\"\n if arg_1 is not None:\n K.set_value(arg_0.get_learning_rate_variable(), arg_1)\n # log.info(f\"Learning rate = {learning_rate}\")\n if arg_3 is not None:\n K.set_value(arg_0.get_momentum_variable(), arg_3)"} +{"_id": "doc_939", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3 = False,\n arg_4: arg_5[arg_1] = None) -> Tuple[arg_1]:\n \"\"\"Converts word to a tuple of symbols, optionally converts it to lowercase\n and adds capitalization label.\n\n Args:\n word: input word\n to_lower: whether to lowercase\n append_case: whether to add case mark\n ('' for first capital and '' for all caps)\n\n Returns:\n a preprocessed word\n \"\"\"\n if all(arg_6.isupper() for arg_6 in arg_0) and len(arg_0) > 1:\n arg_7 = \"\"\n elif arg_0[0].isupper():\n arg_7 = \"\"\n else:\n arg_7 = None\n if arg_2:\n arg_0 = arg_0.lower()\n if arg_0.isdigit():\n arg_8 = [\"\"]\n elif arg_0.startswith(\"http://\") or arg_0.startswith(\"www.\"):\n arg_8 = [\"\"]\n else:\n arg_8 = list(arg_0)\n if arg_2 and arg_7 is not None:\n if arg_4 == \"first\":\n arg_8 = [arg_7] + arg_8\n elif arg_4 == \"last\":\n arg_8 = arg_8 + [arg_7]\n return tuple(arg_8)"} +{"_id": "doc_940", "title": "", "text": "def Func(arg_0: arg_1.Tensor,\n arg_3: arg_4,\n arg_5=3,\n arg_6=False,\n arg_7=False,\n arg_8=None,\n arg_9=False):\n \"\"\" Number of convolutional layers stacked on top of each other\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden_list: list with number of hidden units at the ouput of each layer\n filter_width: width of the kernel in tokens\n use_batch_norm: whether to use batch normalization between layers\n use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...\n training_ph: boolean placeholder determining whether is training phase now or not.\n It is used only for batch normalization to determine whether to use\n current batch average (std) or memory stored average (std)\n add_l2_losses: whether to add l2 losses on network kernels to\n tf.GraphKeys.REGULARIZATION_LOSSES or not\n\n Returns:\n units: tensor at the output of the last convolutional layer\n \"\"\"\n arg_10 = arg_1.nn.l2_loss if arg_9 else None\n for arg_11, arg_12 in enumerate(arg_3):\n if arg_7:\n arg_13 = 2 ** arg_11\n else:\n arg_13 = 1\n arg_0 = arg_1.layers.conv1d(arg_0,\n arg_12,\n arg_5,\n padding='same',\n arg_13=arg_13,\n kernel_initializer=INITIALIZER(),\n kernel_regularizer=arg_10)\n if arg_6:\n assert arg_8 is not None\n arg_0 = arg_1.layers.batch_normalization(arg_0, training=arg_8)\n arg_0 = arg_1.nn.relu(arg_0)\n return arg_0"} +{"_id": "doc_941", "title": "", "text": "def Func(arg_0: arg_1.Tensor,\n arg_3: arg_4,\n arg_5=3,\n arg_6=False,\n arg_7=False,\n arg_8=None):\n \"\"\" Highway convolutional network. Skip connection with gating\n mechanism.\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden_list: list with number of hidden units at the output of each layer\n filter_width: width of the kernel in tokens\n use_batch_norm: whether to use batch normalization between layers\n use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...\n training_ph: boolean placeholder determining whether is training phase now or not.\n It is used only for batch normalization to determine whether to use\n current batch average (std) or memory stored average (std)\n Returns:\n units: tensor at the output of the last convolutional layer\n with dimensionality [None, n_tokens, n_hidden_list[-1]]\n \"\"\"\n\n for arg_9, arg_10 in enumerate(arg_3):\n arg_11 = arg_0\n # Projection if needed\n if arg_11.get_shape().as_list()[-1] != arg_10:\n arg_11 = arg_1.layers.dense(arg_11, arg_10)\n if arg_7:\n arg_12 = 2 ** arg_9\n else:\n arg_12 = 1\n arg_0 = arg_1.layers.conv1d(arg_0,\n arg_10,\n arg_5,\n padding='same',\n arg_12=arg_12,\n kernel_initializer=INITIALIZER())\n if arg_6:\n arg_0 = arg_1.layers.batch_normalization(arg_0, training=arg_8)\n arg_13 = arg_1.layers.dense(arg_11, 1, activation=arg_1.sigmoid, kernel_initializer=INITIALIZER())\n arg_11 = arg_13 * arg_11 + (1 - arg_13) * arg_0\n arg_11 = arg_1.nn.relu(arg_11)\n arg_0 = arg_11\n return arg_0"} +{"_id": "doc_942", "title": "", "text": "def Func(arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4: arg_5 = None,\n arg_6=True):\n \"\"\" Token embedding layer. Create matrix of for token embeddings.\n Can be initialized with given matrix (for example pre-trained\n with word2ve algorithm\n\n Args:\n token_indices: token indices tensor of type tf.int32\n token_embedding_matrix: matrix of embeddings with dimensionality\n [n_tokens, embeddings_dimension]\n n_tokens: total number of unique tokens\n token_embedding_dim: dimensionality of embeddings, typical 100..300\n name: embedding matrix name (variable name)\n trainable: whether to set the matrix trainable or not\n\n Returns:\n embedded_tokens: tf tensor of size [B, T, E], where B - batch size\n T - number of tokens, E - token_embedding_dim\n \"\"\"\n if arg_1 is not None:\n arg_7 = arg_1\n if arg_6:\n Warning('Matrix of embeddings is passed to the Func, '\n 'possibly there is a pre-trained embedding matrix. '\n 'Embeddings paramenters are set to Trainable!')\n else:\n arg_7 = np.random.randn(arg_2, arg_3).astype(np.float32) / np.sqrt(arg_3)\n arg_8 = tf.Variable(arg_7, arg_4=arg_4, arg_6=arg_6)\n arg_9 = tf.nn.embedding_lookup(arg_8, arg_0)\n return arg_9"} +{"_id": "doc_943", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=False,\n arg_4=None, arg_5=None, arg_6='Func', arg_7=False):\n \"\"\" Fast CuDNN GRU implementation\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n\n n_hidden: dimensionality of hidden state\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n seq_lengths: tensor of sequence lengths with dimension [B]\n n_layers: number of layers\n input_initial_h: initial hidden state, tensor\n name: name of the variable scope to use\n reuse:whether to reuse already initialized variable\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x F]\n h_last - last hidden state, tf.Tensor with dimensionality [B x H]\n \"\"\"\n with tf.variable_scope(arg_6, arg_7=arg_7):\n arg_8 = tf.contrib.cudnn_rnn.CudnnGRU(num_layers=arg_2,\n num_units=arg_1)\n\n if arg_3:\n arg_9 = tf.get_variable('init_h', [arg_2, 1, arg_1])\n arg_9 = tf.tile(arg_9, (1, tf.shape(arg_0)[0], 1))\n else:\n arg_9 = tf.zeros([arg_2, tf.shape(arg_0)[0], arg_1])\n\n arg_10 = arg_5 or arg_9\n\n arg_11, arg_12 = arg_8(tf.transpose(arg_0, (1, 0, 2)), (arg_10, ))\n arg_11 = tf.transpose(arg_11, (1, 0, 2))\n arg_12 = tf.squeeze(arg_12, axis=0)[-1] # extract last layer state\n\n # Extract last states if they are provided\n if arg_4 is not None:\n arg_13 = tf.stack([tf.range(tf.shape(arg_11)[0]), arg_4-1], axis=1)\n arg_12 = tf.gather_nd(arg_11, arg_13)\n\n return arg_11, arg_12"} +{"_id": "doc_944", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=False,\n arg_4=None, arg_5=None, arg_6='cudnn_gru', arg_7=False):\n \"\"\" CuDNN Compatible GRU implementation.\n It should be used to load models saved with CudnnGRUCell to run on CPU.\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n\n n_hidden: dimensionality of hidden state\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n seq_lengths: tensor of sequence lengths with dimension [B]\n n_layers: number of layers\n input_initial_h: initial hidden state, tensor\n name: name of the variable scope to use\n reuse:whether to reuse already initialized variable\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x F]\n h_last - last hidden state, tf.Tensor with dimensionality [B x H]\n \"\"\"\n with tf.variable_scope(arg_6, arg_7=arg_7):\n\n if arg_3:\n arg_8 = tf.get_variable('init_h', [arg_2, 1, arg_1])\n arg_8 = tf.tile(arg_8, (1, tf.shape(arg_0)[0], 1))\n else:\n arg_8 = tf.zeros([arg_2, tf.shape(arg_0)[0], arg_1])\n\n arg_9 = arg_5 or arg_8\n\n with tf.variable_scope('cudnn_gru', arg_7=arg_7):\n def single_cell(): return tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(arg_1)\n arg_10 = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(arg_2)])\n\n arg_0 = tf.transpose(arg_0, (1, 0, 2))\n\n arg_11, arg_12 = tf.nn.dynamic_rnn(arg_10=arg_10, inputs=arg_0, time_major=True,\n initial_state=tuple(tf.unstack(arg_9, axis=0)))\n arg_11 = tf.transpose(arg_11, (1, 0, 2))\n\n arg_12 = arg_12[-1] # h_last is tuple: n_layers x batch_size x n_hidden\n\n # Extract last states if they are provided\n if arg_4 is not None:\n arg_13 = tf.stack([tf.range(tf.shape(arg_11)[0]), arg_4-1], axis=1)\n arg_12 = tf.gather_nd(arg_11, arg_13)\n\n return arg_11, arg_12"} +{"_id": "doc_945", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None, arg_4=None, arg_5=None,\n arg_6=None, arg_7='cudnn_lstm', arg_8=False):\n \"\"\" CuDNN Compatible LSTM implementation.\n It should be used to load models saved with CudnnLSTMCell to run on CPU.\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n n_hidden: dimensionality of hidden state\n n_layers: number of layers\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n seq_lengths: tensor of sequence lengths with dimension [B]\n initial_h: optional initial hidden state, masks trainable_initial_states\n if provided\n initial_c: optional initial cell state, masks trainable_initial_states\n if provided\n name: name of the variable scope to use\n reuse:whether to reuse already initialized variable\n\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x F]\n h_last - last hidden state, tf.Tensor with dimensionality [B x H]\n where H - number of hidden units\n c_last - last cell state, tf.Tensor with dimensionality [B x H]\n where H - number of hidden units\n \"\"\"\n\n with tf.variable_scope(arg_7, arg_8=arg_8):\n if arg_3:\n arg_9 = tf.get_variable('init_h', [arg_2, 1, arg_1])\n arg_9 = tf.tile(arg_9, (1, tf.shape(arg_0)[0], 1))\n arg_10 = tf.get_variable('init_c', [arg_2, 1, arg_1])\n arg_10 = tf.tile(arg_10, (1, tf.shape(arg_0)[0], 1))\n else:\n arg_9 = arg_10 = tf.zeros([arg_2, tf.shape(arg_0)[0], arg_1])\n\n arg_5 = arg_5 or arg_9\n arg_6 = arg_6 or arg_10\n\n with tf.variable_scope('cudnn_lstm', arg_8=arg_8):\n def single_cell(): return tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(arg_1)\n\n arg_11 = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(arg_2)])\n\n arg_0 = tf.transpose(arg_0, (1, 0, 2))\n\n arg_12 = tuple([tf.nn.rnn_cell.LSTMStateTuple(ic, ih) for ih, ic in\n zip(tf.unstack(arg_5, axis=0), tf.unstack(arg_6, axis=0))])\n\n arg_13, arg_14 = tf.nn.dynamic_rnn(arg_11=arg_11, inputs=arg_0, time_major=True, initial_state=arg_12)\n\n arg_13 = tf.transpose(arg_13, (1, 0, 2))\n arg_15 = arg_14[-1].h\n arg_16 = arg_14[-1].c\n\n # Extract last states if they are provided\n if arg_4 is not None:\n arg_17 = tf.stack([tf.range(tf.shape(arg_13)[0]), arg_4-1], axis=1)\n arg_15 = tf.gather_nd(arg_13, arg_17)\n\n return arg_13, (arg_15, arg_16)"} +{"_id": "doc_946", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=1,\n arg_4=False,\n arg_5='Func',\n arg_6=False):\n \"\"\" Fast CuDNN Bi-GRU implementation\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n n_hidden: dimensionality of hidden state\n seq_lengths: number of tokens in each sample in the batch\n n_layers: number of layers\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n name: name of the variable scope to use\n reuse:whether to reuse already initialized variable\n\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x F]\n h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2]\n where H - number of hidden units\n \"\"\"\n\n with tf.variable_scope(arg_5, arg_6=arg_6):\n if arg_2 is None:\n arg_2 = tf.ones([tf.shape(arg_0)[0]], dtype=tf.int32) * tf.shape(arg_0)[1]\n with tf.variable_scope('Forward'):\n arg_7, arg_8 = cudnn_gru_wrapper(arg_0,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_2=arg_2,\n arg_6=arg_6)\n\n with tf.variable_scope('Backward'):\n arg_9 = tf.reverse_sequence(arg_0, arg_2=arg_2, seq_dim=1, batch_dim=0)\n arg_10, arg_11 = cudnn_gru_wrapper(arg_9,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_2=arg_2,\n arg_6=arg_6)\n arg_10 = tf.reverse_sequence(arg_10, arg_2=arg_2, seq_dim=1, batch_dim=0)\n\n return (arg_7, arg_10), (arg_8, arg_11)"} +{"_id": "doc_947", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=1,\n arg_4=False,\n arg_5='cudnn_bi_gru',\n arg_6=False):\n \"\"\" Fast CuDNN Bi-LSTM implementation\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n n_hidden: dimensionality of hidden state\n seq_lengths: number of tokens in each sample in the batch\n n_layers: number of layers\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n name: name of the variable scope to use\n reuse:whether to reuse already initialized variable\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x F]\n h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2]\n where H - number of hidden units\n c_last - last cell state, tf.Tensor with dimensionality [B x H * 2]\n where H - number of hidden units\n \"\"\"\n with tf.variable_scope(arg_5, arg_6=arg_6):\n if arg_2 is None:\n arg_2 = tf.ones([tf.shape(arg_0)[0]], dtype=tf.int32) * tf.shape(arg_0)[1]\n with tf.variable_scope('Forward'):\n arg_7, (arg_8, arg_9) = cudnn_lstm_wrapper(arg_0,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_2=arg_2)\n\n with tf.variable_scope('Backward'):\n arg_10 = tf.reverse_sequence(arg_0, arg_2=arg_2, seq_dim=1, batch_dim=0)\n arg_11, (arg_12, arg_13) = cudnn_lstm_wrapper(arg_10,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_2=arg_2)\n\n arg_11 = tf.reverse_sequence(arg_11, arg_2=arg_2, seq_dim=1, batch_dim=0)\n return (arg_7, arg_11), ((arg_8, arg_9), (arg_12, arg_13))"} +{"_id": "doc_948", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=2,\n arg_4=1.0,\n arg_5=False,\n arg_6=False,\n arg_7='Func',\n arg_8=False):\n \"\"\" Fast CuDNN Stacked Bi-GRU implementation\n\n Args:\n units: tf.Tensor with dimensions [B x T x F], where\n B - batch size\n T - number of tokens\n F - features\n n_hidden: dimensionality of hidden state\n seq_lengths: number of tokens in each sample in the batch\n n_stacks: number of stacked Bi-GRU\n keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout)\n concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU,\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n name: name of the variable scope to use\n reuse: whether to reuse already initialized variable\n\n\n Returns:\n h - all hidden states along T dimension,\n tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)]\n \"\"\"\n if arg_2 is None:\n arg_2 = tf.ones([tf.shape(arg_0)[0]], dtype=tf.int32) * tf.shape(arg_0)[1]\n\n arg_9 = [arg_0]\n\n with tf.variable_scope(arg_7, arg_8=arg_8):\n for arg_10 in range(arg_3):\n\n if arg_10 == 0:\n arg_11 = arg_9[-1]\n else:\n arg_11 = variational_dropout(arg_9[-1], arg_4=arg_4)\n\n (arg_12, arg_13), arg_14 = cudnn_bi_gru(arg_11, arg_1, arg_2,\n n_layers=1,\n arg_6=arg_6,\n arg_7='{}_cudnn_bi_gru'.format(arg_10),\n arg_8=arg_8)\n\n arg_9.append(tf.concat([arg_12, arg_13], axis=2))\n\n if arg_5:\n return tf.concat(arg_9[1:], axis=2)\n\n return arg_9[-1]"} +{"_id": "doc_949", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(1,)):\n \"\"\" Dropout with the same drop mask for all fixed_mask_dims\n\n Args:\n units: a tensor, usually with shapes [B x T x F], where\n B - batch size\n T - tokens dimension\n F - feature dimension\n keep_prob: keep probability\n fixed_mask_dims: in these dimensions the mask will be the same\n\n Returns:\n dropped units tensor\n \"\"\"\n arg_3 = tf.shape(arg_0)\n arg_4 = [arg_3[n] for n in range(len(arg_0.shape))]\n for arg_5 in arg_2:\n arg_4[arg_5] = 1\n return tf.nn.dropout(arg_0, arg_1, arg_4)"} +{"_id": "doc_950", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds the network using Keras.\n \"\"\"\n arg_1 = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype=\"int32\")\n arg_2 = [arg_1]\n arg_3 = arg_0._Func_word_cnn(arg_1)\n if len(arg_0.word_vectorizers) > 0:\n arg_4 = [kl.Input(shape=(None, input_dim), dtype=\"float32\")\n for input_dim, dense_dim in arg_0.word_vectorizers]\n arg_2.extend(arg_4)\n arg_5 = [kl.Dense(dense_dim)(arg_4[i])\n for i, (_, dense_dim) in enumerate(arg_0.word_vectorizers)]\n arg_3 = kl.Concatenate()([arg_3] + arg_5)\n arg_6, arg_7 = arg_0._Func_basic_network(arg_3)\n arg_8 = {\"optimizer\": ko.nadam(lr=0.002, clipnorm=5.0),\n \"loss\": \"categorical_crossentropy\", \"metrics\": [\"accuracy\"]}\n arg_0.model_ = Model(arg_2, arg_6)\n arg_0.model_.compile(**arg_8)\n if arg_0.verbose > 0:\n arg_0.model_.summary(print_fn=log.info)\n return arg_0"} +{"_id": "doc_951", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Builds word-level network\n \"\"\"\n arg_1 = kl.Lambda(kb.one_hot, arguments={\"num_classes\": arg_0.symbols_number_},\n output_shape=lambda x: tuple(x) + (arg_0.symbols_number_,))(arg_1)\n arg_2 = kl.Dense(arg_0.char_embeddings_size, use_bias=False)(arg_1)\n arg_3 = []\n arg_0.char_output_dim_ = 0\n for arg_5, arg_6 in zip(arg_0.char_window_size, arg_0.char_filters):\n arg_7 = arg_2\n arg_8 = (min(arg_0.char_filter_multiple * arg_5, 200)\n if arg_6 is None else arg_6)\n for arg_9 in range(arg_0.char_conv_layers - 1):\n arg_7 = kl.Conv2D(arg_8, (1, arg_5),\n padding=\"same\", activation=\"relu\",\n data_format=\"channels_last\")(arg_7)\n if arg_0.conv_dropout > 0.0:\n arg_7 = kl.Dropout(arg_0.conv_dropout)(arg_7)\n arg_7 = kl.Conv2D(arg_8, (1, arg_5),\n padding=\"same\", activation=\"relu\",\n data_format=\"channels_last\")(arg_7)\n arg_3.append(arg_7)\n arg_0.char_output_dim_ += arg_8\n if len(arg_3) > 1:\n arg_10 = kl.Concatenate(axis=-1)(arg_3)\n else:\n arg_10 = arg_3[0]\n arg_11 = kl.Lambda(kb.max, arguments={\"axis\": -2})(arg_10)\n if arg_0.intermediate_dropout > 0.0:\n arg_11 = kl.Dropout(arg_0.intermediate_dropout)(arg_11)\n for arg_12 in range(arg_0.char_highway_layers - 1):\n arg_11 = Highway(activation=\"relu\")(arg_11)\n if arg_0.highway_dropout > 0.0:\n arg_11 = kl.Dropout(arg_0.highway_dropout)(arg_11)\n arg_13 = Highway(activation=\"relu\")(arg_11)\n return arg_13"} +{"_id": "doc_952", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3, arg_4],\n arg_5: arg_6 = False) -> List[List[str]]:\n \"\"\"\n Makes predictions on a single batch\n\n Args:\n data: a batch of word sequences together with additional inputs\n return_indexes: whether to return tag indexes in vocabulary or tags themselves\n\n Returns:\n a batch of label sequences\n \"\"\"\n arg_7 = arg_0._transform_batch(arg_1)\n arg_8, arg_9 = len(arg_7[0]), [len(arg_14) for arg_14 in arg_1[0]]\n arg_10 = arg_0.model_.Func(arg_7)\n arg_11 = np.argmax(arg_10, axis=-1)\n arg_12: List[List[str]] = [None] * arg_8\n for arg_13, (arg_14, arg_15) in enumerate(zip(arg_11, arg_9)):\n arg_14 = arg_14[:arg_15]\n arg_12[arg_13] = arg_14 if arg_5 else arg_0.tags.idxs2toks(arg_14)\n return arg_12"} +{"_id": "doc_953", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4 =None) -> np.ndarray:\n \"\"\"Transforms a sentence to Numpy array, which will be the network input.\n\n Args:\n sent: input sentence\n bucket_length: the width of the bucket\n\n Returns:\n A 3d array, answer[i][j][k] contains the index of k-th letter\n in j-th word of i-th input sentence.\n \"\"\"\n arg_3 = arg_3 or len(arg_1)\n arg_5 = np.zeros(shape=(arg_3, MAX_WORD_LENGTH+2), dtype=np.int32)\n for arg_6, arg_7 in enumerate(arg_1):\n arg_5[arg_6, 0] = arg_0.tags.tok2idx(\"BEGIN\")\n arg_8 = min(len(arg_7), MAX_WORD_LENGTH)\n for arg_9, arg_10 in enumerate(arg_7[-arg_8:]):\n arg_5[arg_6, arg_9+1] = arg_0.symbols.tok2idx(arg_10)\n arg_5[arg_6, arg_8+1] = arg_0.tags.tok2idx(\"END\")\n arg_5[arg_6, arg_8+2:] = arg_0.tags.tok2idx(\"PAD\")\n return arg_5"} +{"_id": "doc_954", "title": "", "text": "def Func(arg_0: arg_1[arg_2], arg_3: arg_1[arg_2],\n arg_4: arg_5=(1,), arg_6=arg_7.method1,\n arg_9=False, arg_10=True) -> float:\n \"\"\"Calculate BLEU score\n\n Parameters:\n y_true: list of reference tokens\n y_predicted: list of query tokens\n weights: n-gram weights\n smoothing_function: SmoothingFunction\n auto_reweigh: Option to re-normalize the weights uniformly\n penalty: either enable brevity penalty or not\n\n Return:\n BLEU score\n \"\"\"\n\n arg_11 = sentence_bleu([arg_0], arg_3, arg_4, arg_6, arg_9)\n\n arg_12 = len(arg_3)\n arg_13 = arg_12\n arg_14 = closest_ref_length([arg_0], arg_12)\n\n arg_15 = brevity_penalty(arg_14, arg_13)\n\n if arg_10 is True or arg_15 == 0:\n return arg_11\n\n return arg_11/arg_15"} +{"_id": "doc_955", "title": "", "text": "def Func(arg_0: arg_1) -> bool:\n \"\"\"Verify signature certificate URL against Amazon Alexa requirements.\n\n Each call of Agent passes incoming utterances batch through skills filter,\n agent skills, skills processor. Batch of dialog IDs can be provided, in\n other case utterances indexes in incoming batch are used as dialog IDs.\n\n Args:\n url: Signature certificate URL from SignatureCertChainUrl HTTP header.\n\n Returns:\n result: True if verification was successful, False if not.\n \"\"\"\n arg_2 = urlsplit(arg_0)\n\n arg_3: arg_1 = arg_2.scheme\n arg_4: arg_1 = arg_2.netloc\n arg_5: arg_1 = arg_2.path\n\n try:\n arg_6 = arg_2.port\n except ValueError:\n arg_6 = None\n\n arg_7 = (arg_3.lower() == 'https' and\n arg_4.lower().split(':')[0] == 's3.amazonaws.com' and\n arg_5.startswith('/echo.api/') and\n (arg_6 == 443 or arg_6 is None))\n\n return arg_7"} +{"_id": "doc_956", "title": "", "text": "def Func(arg_0: arg_1) -> List[crypto.X509]:\n \"\"\"Extracts pycrypto X509 objects from SSL certificates chain string.\n\n Args:\n certs_txt: SSL certificates chain string.\n\n Returns:\n result: List of pycrypto X509 objects.\n \"\"\"\n arg_2 = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'\n arg_0 = re.findall(arg_2, arg_0, flags=re.DOTALL)\n arg_3 = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in arg_0]\n return arg_3"} +{"_id": "doc_957", "title": "", "text": "def Func(arg_0: arg_1[arg_2.X509], arg_4: arg_2.X509) -> bool:\n \"\"\"Verifies if Amazon and additional certificates creates chain of trust to a root CA.\n\n Args:\n certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL.\n amazon_cert: Pycrypto X509 Amazon certificate.\n\n Returns:\n result: True if verification was successful, False if not.\n \"\"\"\n arg_5 = arg_2.X509Store()\n\n # add certificates from Amazon provided certs chain\n for arg_6 in arg_0:\n arg_5.add_cert(arg_6)\n\n # add CA certificates\n arg_7 = ssl.get_default_verify_paths()\n\n arg_8 = arg_7.cafile\n arg_8 = Path(arg_8).resolve() if arg_8 else None\n\n arg_9 = arg_7.capath\n arg_9 = Path(arg_9).resolve() if arg_9 else None\n\n arg_10 = [arg_11 for arg_11 in arg_9.iterdir()] if arg_9 else []\n if arg_8:\n arg_10.append(arg_8)\n\n for arg_11 in arg_10:\n arg_11: Path\n if arg_11.is_file():\n with arg_11.open('r', encoding='ascii') as crt_f:\n arg_12 = crt_f.read()\n arg_13 = extract_certs(arg_12)\n for arg_6 in arg_13:\n arg_5.add_cert(arg_6)\n\n # add CA certificates (Windows)\n arg_14 = ssl.create_default_context()\n arg_15 = arg_14.get_ca_certs(binary_form=True)\n arg_16 = '\\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in arg_15])\n arg_13 = extract_certs(arg_16)\n for arg_17 in arg_13:\n arg_5.add_cert(arg_17)\n\n arg_18 = arg_2.X509StoreContext(arg_5, arg_4)\n\n try:\n arg_18.verify_certificate()\n arg_19 = True\n except arg_2.X509StoreContextError:\n arg_19 = False\n\n return arg_19"} +{"_id": "doc_958", "title": "", "text": "def Func(arg_0: arg_1.X509, arg_3: arg_4, arg_5: arg_6) -> bool:\n \"\"\"Verifies Alexa request signature.\n\n Args:\n amazon_cert: Pycrypto X509 Amazon certificate.\n signature: Base64 decoded Alexa request signature from Signature HTTP header.\n request_body: full HTTPS request body\n Returns:\n result: True if verification was successful, False if not.\n \"\"\"\n arg_3 = base64.b64decode(arg_3)\n\n try:\n arg_1.verify(arg_0, arg_3, arg_5, 'sha1')\n arg_7 = True\n except arg_1.Error:\n arg_7 = False\n\n return arg_7"} +{"_id": "doc_959", "title": "", "text": "def Func(arg_0: arg_1) -> Optional[crypto.X509]:\n \"\"\"Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.\n\n Args:\n signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.\n Returns:\n result: Amazon certificate if verification was successful, None if not.\n \"\"\"\n try:\n arg_2 = requests.get(arg_0)\n except requests.exceptions.ConnectionError as e:\n log.error(f'Amazon signature chain get error: {e}')\n return None\n\n arg_3 = arg_2.text\n arg_4 = extract_certs(arg_3)\n\n arg_5: crypto.X509 = arg_4.pop(0)\n\n # verify signature chain url\n arg_6 = verify_sc_url(arg_0)\n if not arg_6:\n log.error(f'Amazon signature url {signature_chain_url} was not verified')\n\n # verify not expired\n arg_7 = not arg_5.has_expired()\n if not arg_7:\n log.error(f'Amazon certificate ({signature_chain_url}) expired')\n\n # verify subject alternative names\n arg_8 = verify_sans(arg_5)\n if not arg_8:\n log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')\n\n # verify certs chain\n arg_9 = Funcs_chain(arg_4, arg_5)\n if not arg_9:\n log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')\n\n arg_10 = (arg_6 and arg_7 and arg_8 and arg_9)\n\n return arg_5 if arg_10 else None"} +{"_id": "doc_960", "title": "", "text": "def Func(arg_0) -> list:\n \"\"\"Returns list of Telegram compatible states of the RichMessage\n instance nested controls.\n\n Returns:\n Func_controls: Telegram representation of RichMessage instance nested\n controls.\n \"\"\"\n arg_1 = [control.Func() for control in arg_0.controls]\n return arg_1"} +{"_id": "doc_961", "title": "", "text": "def Func():\n \"\"\"DeepPavlov console configuration utility.\"\"\"\n arg_0 = parser.parse_args()\n arg_1 = get_settings_path()\n\n if arg_0.default:\n if populate_settings_dir(force=True):\n print(f'Populated {path} with default settings files')\n else:\n print(f'{path} is already a default settings directory')\n else:\n print(f'Current DeepPavlov settings path: {path}')"} +{"_id": "doc_962", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Constructs function encapsulated in the graph.\"\"\"\n @wraps(arg_0)\n def _wrapped(*arg_2, **arg_3):\n with arg_1.as_default():\n return arg_0(*arg_2, **arg_3)\n return _wrapped"} +{"_id": "doc_963", "title": "", "text": "def Func(arg_0: [arg_1, arg_2.ndarray], arg_4: [arg_1, arg_2.ndarray]) -> float:\n \"\"\"\n Calculate Func in terms of absolute coincidence\n\n Args:\n y_true: array of true values\n y_predicted: array of predicted values\n\n Returns:\n portion of absolutely coincidental samples\n \"\"\"\n arg_5 = len(arg_0)\n arg_6 = sum([y1 == y2 for y1, y2 in zip(arg_0, arg_4)])\n return arg_6 / arg_5 if arg_5 else 0"} +{"_id": "doc_964", "title": "", "text": "def Func() -> DefaultAgent:\n \"\"\"Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.\n\n This is agent building tutorial. You can use this .py file to check how hello-bot agent works.\n\n Returns:\n agent: Agent capable of handling several simple greetings.\n \"\"\"\n arg_0 = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day'])\n arg_1 = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you'])\n arg_2 = PatternMatchingSkill(['I don\\'t understand, sorry', 'I can say \"Hello world\"'])\n\n arg_3 = DefaultAgent([arg_0, arg_1, arg_2], skills_processor=HighestConfidenceSelector())\n\n return arg_3"} +{"_id": "doc_965", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Takes an array of integers and transforms it\n to an array of one-hot encoded vectors\n \"\"\"\n arg_2 = np.eye(arg_1, dtype=int)\n return arg_2[arg_0]"} +{"_id": "doc_966", "title": "", "text": "def Func(arg_0: arg_1 = False) -> arg_1:\n \"\"\"\n Populate settings directory with default settings files\n\n Args:\n force: if ``True``, replace existing settings files with default ones\n\n Returns:\n ``True`` if any files were copied and ``False`` otherwise\n \"\"\"\n arg_2 = False\n if _default_settings_path == _settings_path:\n return arg_2\n\n for arg_3 in list(_default_settings_path.glob('**/*.json')):\n arg_4 = _settings_path / arg_3.relative_to(_default_settings_path)\n if not arg_0 and arg_4.exists():\n continue\n arg_2 = True\n arg_4.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(arg_3, arg_4)\n return arg_2"} +{"_id": "doc_967", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = ('Optimizer',)) -> None:\n \"\"\"Load model parameters from self.Func_path\"\"\"\n if not hasattr(arg_0, 'sess'):\n raise RuntimeError('Your TensorFlow model {} must'\n ' have sess attribute!'.format(arg_0.__class__.__name__))\n arg_3 = str(arg_0.Func_path.resolve())\n # Check presence of the model files\n if tf.train.checkpoint_exists(arg_3):\n log.info('[Funcing model from {}]'.format(arg_3))\n # Exclude optimizer variables from saved variables\n arg_4 = arg_0._get_saveable_variables(arg_1)\n arg_5 = tf.train.Saver(arg_4)\n arg_5.restore(arg_0.sess, arg_3)"} +{"_id": "doc_968", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n **arg_7):\n \"\"\"\n Get train operation for given loss\n\n Args:\n loss: loss, tf tensor or scalar\n learning_rate: scalar or placeholder.\n clip_norm: clip gradients norm by clip_norm.\n learnable_scopes: which scopes are trainable (None for all).\n optimizer: instance of tf.train.Optimizer, default Adam.\n **kwargs: parameters passed to tf.train.Optimizer object\n (scalars or placeholders).\n\n Returns:\n train_op\n \"\"\"\n if arg_6 is None:\n arg_8 = tf.variable_scope('Optimizer')\n else:\n arg_8 = tf.variable_scope(arg_6)\n with arg_8:\n if arg_5 is None:\n arg_9 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n else:\n arg_9 = []\n for arg_10 in arg_5:\n arg_9.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=arg_10))\n\n if arg_3 is None:\n arg_3 = tf.train.AdamOptimizer\n\n # For batch norm it is necessary to update running averages\n arg_11 = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(arg_11):\n\n def clip_if_not_none(arg_12):\n if arg_12 is not None:\n return tf.clip_by_norm(arg_12, arg_4)\n\n arg_13 = arg_3(arg_2, **arg_7)\n arg_14 = arg_13.compute_gradients(arg_1, var_list=arg_9)\n if arg_4 is not None:\n arg_14 = [(clip_if_not_none(arg_12), var)\n for arg_12, var in arg_14]\n arg_15 = arg_13.apply_gradients(arg_14)\n return arg_15"} +{"_id": "doc_969", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True, arg_4=True):\n \"\"\"\n Finds all dictionary words in d-window from word\n \"\"\"\n if not all((arg_5 in arg_0.alphabet\n or (arg_5 == \" \" and arg_0.allow_spaces)) for arg_5 in arg_1):\n return []\n # raise ValueError(\"{0} contains an incorrect symbol\".format(word))\n return arg_0._trie_Func(\n arg_1, arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_970", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Initiates self-destruct timer.\"\"\"\n arg_0.timer = Timer(arg_0.config['conversation_lifetime'], arg_0.self_destruct_callback)\n arg_0.timer.start()"} +{"_id": "doc_971", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> list:\n \"\"\"Infers DeepPavlov agent with raw user input extracted from Alexa request.\n\n Args:\n utterance: Raw user input extracted from Alexa request.\n Returns:\n response: DeepPavlov agent response.\n \"\"\"\n if arg_0.stateful:\n arg_1 = [[arg_1], [arg_0.key]]\n else:\n arg_1 = [[arg_1]]\n\n arg_3: list = arg_0.agent(*arg_1)\n\n return arg_3"} +{"_id": "doc_972", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2) -> arg_2:\n \"\"\"Populates generated response with additional data conforming Alexa response specification.\n\n Args:\n response: Raw user input extracted from Alexa request.\n request: Alexa request.\n Returns:\n response: Response conforming Alexa response specification.\n \"\"\"\n arg_4 = deepcopy(arg_0.response_template)\n arg_4['sessionAttributes']['sessionId'] = arg_3['session']['sessionId']\n\n for arg_5, arg_6 in arg_4.items():\n if arg_5 not in arg_1.keys():\n arg_1[arg_5] = arg_6\n\n return arg_1"} +{"_id": "doc_973", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> arg_2:\n \"\"\"Handles LaunchRequest Alexa request.\n\n Args:\n request: Alexa request.\n Returns:\n response: \"response\" part of response dict conforming Alexa specification.\n \"\"\"\n arg_3 = {\n 'response': {\n 'shouldEndSession': False,\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': arg_0.config['start_message']\n },\n 'card': {\n 'type': 'Simple',\n 'content': arg_0.config['start_message']\n }\n }\n }\n\n arg_3 = arg_0._generate_response(arg_3, arg_1)\n\n return arg_3"} +{"_id": "doc_974", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> arg_2:\n \"\"\"Handles all unsupported types of Alexa requests. Returns standard message.\n\n Args:\n request: Alexa request.\n Returns:\n response: \"response\" part of response dict conforming Alexa specification.\n \"\"\"\n arg_3 = {\n 'response': {\n 'shouldEndSession': False,\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': arg_0.config['unsupported_message']\n },\n 'card': {\n 'type': 'Simple',\n 'content': arg_0.config['unsupported_message']\n }\n }\n }\n\n arg_3 = arg_0._generate_response(arg_3, arg_1)\n\n return arg_3"} +{"_id": "doc_975", "title": "", "text": "def Func(arg_0: arg_1[arg_2.ndarray]) -> float:\n \"\"\" Calculates perplexity by loss\n\n Args:\n losses: list of numpy arrays of model losses\n\n Returns:\n perplexity : float\n \"\"\"\n arg_4 = arg_2.mean(arg_0)\n return float(arg_2.exp(arg_4))"} +{"_id": "doc_976", "title": "", "text": "def Func(arg_0: arg_1[arg_2, arg_3, arg_4], arg_5: arg_2 = 'infer',\n arg_6: arg_7 = False, arg_8: arg_7 = False,\n arg_9: arg_10[arg_11] = None) -> Chainer:\n \"\"\"Build and return the model described in corresponding configuration file.\"\"\"\n arg_0 = parse_config(arg_0)\n\n if arg_9:\n arg_9: list = pickle.loads(arg_9)\n\n if arg_8:\n deep_download(arg_0)\n\n import_packages(arg_0.get('metadata', {}).get('imports', []))\n\n arg_12 = arg_0['chainer']\n\n arg_13 = Chainer(arg_12['in'], arg_12['out'], arg_12.get('in_y'))\n\n for arg_14 in arg_12['pipe']:\n if arg_6 and ('fit_on' in arg_14 or 'in_y' in arg_14):\n try:\n arg_14['load_path'] = arg_14['save_path']\n except KeyError:\n log.warning('No \"save_path\" parameter for the {} component, so \"load_path\" will not be renewed'\n .format(arg_14.get('class_name', arg_14.get('ref', 'UNKNOWN'))))\n\n if arg_9 and 'in' in arg_14:\n arg_15 = arg_9.pop(0)\n else:\n arg_15 = None\n\n arg_16 = from_params(arg_14, arg_5=arg_5, arg_9=arg_15)\n\n if 'in' in arg_14:\n arg_17 = arg_14['in']\n arg_18 = arg_14['out']\n arg_19 = arg_14.get('in_y', None)\n arg_20 = arg_14.get('main', False)\n arg_13.append(arg_16, arg_17, arg_18, arg_19, arg_20)\n\n return arg_13"} +{"_id": "doc_977", "title": "", "text": "def Func(arg_0: arg_1[arg_2, arg_3, arg_4]) -> None:\n \"\"\"Start interaction with the model described in corresponding configuration file.\"\"\"\n arg_5 = build_model(arg_0)\n\n while True:\n arg_6 = []\n for arg_7 in arg_5.in_x:\n arg_6.append((input('{}::'.format(arg_7)),))\n # check for exit command\n if arg_6[-1][0] in {'exit', 'stop', 'quit', 'q'}:\n return\n\n arg_8 = arg_5(*arg_6)\n if len(arg_5.out_params) > 1:\n arg_8 = zip(*arg_8)\n\n print('>>', *arg_8)"} +{"_id": "doc_978", "title": "", "text": "def Func(arg_0: arg_1[arg_2, arg_3], arg_4=False,\n arg_5: arg_6 = arg_7, arg_8: arg_6 = arg_9,\n arg_10: arg_6 = arg_11, arg_12: arg_6 = -1,\n arg_13: arg_14 = False) -> List[Tuple[List, arg_1[List, None]]]:\n \"\"\"Reads input file in CONLL-U format\n\n Args:\n infile: a path to a file\n word_column: column containing words (default=1)\n pos_column: column containing part-of-speech labels (default=3)\n tag_column: column containing fine-grained tags (default=5)\n max_sents: maximal number of sents to read\n read_only_words: whether to read only words\n\n Returns:\n a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``\n in case ``read_only_words = True``\n \"\"\"\n arg_15, arg_16, arg_17 = [], [], []\n if arg_4:\n arg_5, arg_13 = 0, True\n with open(arg_0, \"r\", encoding=\"utf8\") as fin:\n for arg_18 in fin:\n arg_18 = arg_18.strip()\n if arg_18.startswith(\"#\"):\n continue\n if arg_18 == \"\":\n if len(arg_16) > 0:\n if arg_13:\n arg_17 = None\n arg_15.append((arg_16, arg_17))\n arg_17, arg_16 = [], []\n if len(arg_15) == arg_12:\n break\n continue\n arg_19 = arg_18.split(\"\\t\")\n arg_20 = arg_19[0]\n if not arg_4 and not arg_20.isdigit():\n continue\n arg_16.append(arg_19[arg_5])\n if not arg_13:\n arg_21, arg_22 = arg_19[arg_8], arg_19[arg_10]\n arg_22 = arg_21 if arg_22 == \"_\" else \"{},{}\".format(arg_21, arg_22)\n arg_17.append(arg_22)\n if len(arg_16) > 0:\n if arg_13:\n arg_17 = None\n arg_15.append((arg_16, arg_17))\n return arg_15"} +{"_id": "doc_979", "title": "", "text": "def Func(arg_0: arg_1) -> Callable[..., Any]:\n \"\"\"Decorator for metric registration.\"\"\"\n def decorate(arg_2):\n arg_3 = arg_2.__module__ + ':' + arg_2.__name__\n if arg_0 in arg_4 and arg_4[arg_0] != arg_3:\n log.warning('\"{}\" is already registered as a metric name, the old function will be ignored'\n .format(arg_0))\n arg_4[arg_0] = arg_3\n return arg_2\n return decorate"} +{"_id": "doc_980", "title": "", "text": "def Func(arg_0: arg_1[arg_2], arg_3: arg_1[arg_2],\n arg_4: arg_2 = 0.9, arg_5: arg_2 = 10.0) -> arg_2:\n \"\"\"\n Find the best value according to given losses\n\n Args:\n values: list of considered values\n losses: list of obtained loss values corresponding to `values`\n max_loss_div: maximal divergence of loss to be considered significant\n min_val_div: minimum divergence of loss to be considered significant\n\n Returns:\n best value divided by `min_val_div`\n \"\"\"\n assert len(arg_0) == len(arg_3), \"lengths of values and losses should be equal\"\n arg_6 = np.argmin(arg_3)\n for arg_7 in range(arg_6 - 1, 0, -1):\n if (arg_3[arg_7] * arg_4 > arg_3[arg_6]) or\\\n (arg_0[arg_7] * arg_5 < arg_0[arg_6]):\n return arg_0[arg_7 + 1]\n return arg_0[arg_6] / arg_5"} +{"_id": "doc_981", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3], arg_4: arg_5) -> Union[arg_2[np.ndarray], np.ndarray]:\n \"\"\"\n Embed one text sample\n\n Args:\n tokens: tokenized text sample\n mean: whether to return mean embedding of tokens per sample\n\n Returns:\n list of embedded tokens or array of mean values\n \"\"\"\n arg_6 = []\n for arg_7 in arg_1:\n try:\n arg_8 = arg_0.tok2emb[arg_7]\n except KeyError:\n try:\n arg_8 = arg_0._get_word_vector(arg_7)\n except KeyError:\n arg_8 = np.zeros(arg_0.dim, dtype=np.float32)\n arg_0.tok2emb[arg_7] = arg_8\n arg_6.append(arg_8)\n\n if arg_4 is None:\n arg_4 = arg_0.mean\n\n if arg_4:\n arg_10 = [et for et in arg_6 if np.any(et)]\n if arg_10:\n return np.mean(arg_10, axis=0)\n return np.zeros(arg_0.dim, dtype=np.float32)\n\n return arg_6"} +{"_id": "doc_982", "title": "", "text": "def Func():\n \"\"\"parses requirements from requirements.txt\"\"\"\n arg_0 = os.path.join(__location__, 'requirements.txt')\n with open(arg_0, encoding='utf8') as f:\n arg_1 = [line.strip() for line in f if not line.strip().startswith('#')]\n\n arg_2 = []\n arg_3 = []\n for arg_4 in arg_1:\n if '://' in arg_4:\n arg_3.append(arg_4)\n else:\n arg_2.append(arg_4)\n return {'install_requires': arg_2, 'dependency_links': arg_3}"} +{"_id": "doc_983", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Exports a TF-Hub module\n \"\"\"\n\n arg_3 = make_module_spec(arg_2, str(arg_0))\n\n try:\n with tf.Graph().as_default():\n arg_4 = hub.Module(arg_3)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n if arg_1.exists():\n shutil.rmtree(arg_1)\n arg_4.export(str(arg_1), sess)\n finally:\n pass"} +{"_id": "doc_984", "title": "", "text": "def Func() -> EcommerceAgent:\n \"\"\"Make an agent\n\n Returns:\n agent: created Ecommerce agent\n \"\"\"\n\n arg_0 = find_config('tfidf_retrieve')\n arg_1 = build_model(arg_0)\n arg_2 = EcommerceAgent(skills=[arg_1])\n return arg_2"} +{"_id": "doc_985", "title": "", "text": "def Func():\n \"\"\"Parse parameters and run ms bot framework\"\"\"\n\n arg_0 = parser.parse_args()\n run_ms_bot_framework_server(agent_generator=make_agent,\n app_id=arg_0.ms_id,\n app_secret=arg_0.ms_secret,\n stateful=True)"} +{"_id": "doc_986", "title": "", "text": "def Func(arg_0: [arg_1[arg_2[arg_3, arg_4]]], arg_5: arg_3, arg_6=True):\n \"\"\"Download a file from URL to one or several target locations\n\n Args:\n dest_file_path: path or list of paths to the file destination files (including file name)\n source_url: the source URL\n force_Func: Func file if it already exists, or not\n\n \"\"\"\n\n if isinstance(arg_0, list):\n arg_7 = [arg_4(path) for path in arg_0]\n else:\n arg_7 = [arg_4(arg_0).absolute()]\n\n if not arg_6:\n arg_8 = list(arg_7)\n arg_7 = []\n for arg_9 in arg_8:\n if arg_9.exists():\n log.info(f'File already exists in {p}')\n else:\n arg_7.append(arg_9)\n\n if arg_7:\n arg_10 = os.getenv('DP_CACHE_DIR')\n arg_11 = False\n if arg_10:\n arg_12 = arg_4(arg_10) / md5(arg_5.encode('utf8')).hexdigest()[:15]\n arg_11 = arg_12.exists()\n else:\n arg_12 = arg_7.pop()\n\n if not arg_11:\n arg_12.parent.mkdir(parents=True, exist_ok=True)\n\n simple_Func(arg_5, arg_12)\n else:\n log.info(f'Found cached {source_url} in {first_dest_path}')\n\n for arg_13 in arg_7:\n arg_13.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(arg_3(arg_12), arg_3(arg_13))"} +{"_id": "doc_987", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Simple tar archive extractor\n\n Args:\n file_path: path to the tar file to be extracted\n extract_folder: folder to which the files will be extracted\n\n \"\"\"\n arg_0 = Path(arg_0)\n if arg_1 is None:\n arg_1 = arg_0.parent\n arg_1 = Path(arg_1)\n arg_2 = tarfile.open(arg_0)\n arg_2.extractall(arg_1)\n arg_2.close()"} +{"_id": "doc_988", "title": "", "text": "def Func(arg_0: arg_1, arg_2: [arg_3, arg_1], arg_4=None):\n \"\"\"Download and extract .tar.gz or .gz file to one or several target locations.\n The archive is deleted if extraction was successful.\n\n Args:\n url: URL for file downloading\n download_path: path to the directory where downloaded file will be stored\n until the end of extraction\n extract_paths: path or list of paths where contents of archive will be extracted\n \"\"\"\n arg_5 = arg_3(urlparse(arg_0).path).name\n arg_2 = arg_3(arg_2)\n\n if arg_4 is None:\n arg_4 = [arg_2]\n elif isinstance(arg_4, list):\n arg_4 = [arg_3(path) for path in arg_4]\n else:\n arg_4 = [arg_3(arg_4)]\n\n arg_6 = os.getenv('DP_CACHE_DIR')\n arg_7 = False\n if arg_6:\n arg_6 = arg_3(arg_6)\n arg_8 = md5(arg_0.encode('utf8')).hexdigest()[:15]\n arg_9 = arg_6 / arg_8\n arg_10 = arg_6 / (arg_8 + '_extracted')\n arg_7 = arg_10.exists()\n if not arg_7 and not arg_9.exists():\n simple_download(arg_0, arg_9)\n else:\n arg_9 = arg_2 / arg_5\n simple_download(arg_0, arg_9)\n arg_10 = arg_4.pop()\n\n if not arg_7:\n log.info('Extracting {} archive into {}'.format(arg_9, arg_10))\n arg_10.mkdir(parents=True, exist_ok=True)\n\n if arg_5.endswith('.tar.gz'):\n untar(arg_9, arg_10)\n elif arg_5.endswith('.gz'):\n ungzip(arg_9, arg_10 / arg_3(arg_5).with_suffix('').name)\n elif arg_5.endswith('.zip'):\n with zipfile.ZipFile(arg_9, 'r') as zip_ref:\n zip_ref.extractall(arg_10)\n else:\n raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}')\n\n if not arg_6:\n arg_9.unlink()\n\n for arg_11 in arg_4:\n for arg_12 in arg_10.iterdir():\n arg_13 = arg_11 / arg_12.name\n if arg_12.is_dir():\n copytree(arg_12, arg_13)\n else:\n arg_11.mkdir(parents=True, exist_ok=True)\n shutil.copy(arg_1(arg_12), arg_1(arg_13))"} +{"_id": "doc_989", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> None:\n \"\"\"Updates dict recursively\n\n You need to use this function to update dictionary if depth of editing_dict is more then 1\n\n Args:\n editable_dict: dictionary, that will be edited\n editing_dict: dictionary, that contains edits\n Returns:\n None\n \"\"\"\n for arg_3, arg_4 in arg_2.items():\n if isinstance(arg_4, collections.Mapping):\n Func(arg_0.get(arg_3, {}), arg_4)\n else:\n arg_0[arg_3] = arg_4"} +{"_id": "doc_990", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Given a URL, set or replace a query parameter and return the modified URL.\n\n Args:\n url: a given URL\n param_name: the parameter name to add\n param_value: the parameter value\n Returns:\n URL with the added parameter\n\n \"\"\"\n arg_3, arg_4, arg_5, arg_6, arg_7 = urlsplit(arg_0)\n arg_8 = parse_qs(arg_6)\n\n arg_8[arg_1] = [arg_2]\n arg_9 = urlencode(arg_8, doseq=True)\n\n return urlunsplit((arg_3, arg_4, arg_5, arg_9, arg_7))"} +{"_id": "doc_991", "title": "", "text": "def Func(arg_0) -> dict:\n \"\"\"Returns Amazon Alexa compatible state of the PlainText instance.\n\n Creating Amazon Alexa response blank with populated \"outputSpeech\" and\n \"card sections.\n\n Returns:\n response: Amazon Alexa representation of PlainText state.\n \"\"\"\n arg_1 = {\n 'response': {\n 'shouldEndSession': False,\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': arg_0.content},\n 'card': {\n 'type': 'Simple',\n 'content': arg_0.content\n }\n }\n }\n\n return arg_1"} +{"_id": "doc_992", "title": "", "text": "def Func(arg_0) -> dict:\n \"\"\"Returns Func compatible state of the Button instance.\n\n Returns:\n control_Func: Json representation of Button state.\n \"\"\"\n arg_1 = {}\n arg_1['name'] = arg_0.name\n arg_1['callback'] = arg_0.callback\n arg_0.control_Func['content'] = arg_1\n return arg_0.control_Func"} +{"_id": "doc_993", "title": "", "text": "def Func(arg_0) -> dict:\n \"\"\"Returns Func compatible state of the ButtonsFrame instance.\n\n Returns Func compatible state of the ButtonsFrame instance including\n all nested buttons.\n\n Returns:\n control_Func: Json representation of ButtonsFrame state.\n \"\"\"\n arg_1 = {}\n\n if arg_0.text:\n arg_1['text'] = arg_0.text\n\n arg_1['controls'] = [control.Func() for control in arg_0.content]\n\n arg_0.control_Func['content'] = arg_1\n\n return arg_0.control_Func"} +{"_id": "doc_994", "title": "", "text": "def Func(arg_0) -> dict:\n \"\"\"Returns MS Bot Framework compatible state of the ButtonsFrame instance.\n\n Creating MS Bot Framework activity blank with RichCard in \"attachments\". RichCard\n is populated with CardActions corresponding buttons embedded in ButtonsFrame.\n\n Returns:\n control_json: MS Bot Framework representation of ButtonsFrame state.\n \"\"\"\n arg_1 = {}\n\n arg_2 = [button.Func() for button in arg_0.content]\n arg_1['buttons'] = arg_2\n\n if arg_0.text:\n arg_1['title'] = arg_0.text\n\n arg_3 = [\n {\n \"contentType\": \"application/vnd.microsoft.card.thumbnail\",\n \"content\": arg_1\n }\n ]\n\n arg_4 = {}\n arg_4['type'] = 'message'\n arg_4['attachments'] = arg_3\n\n return arg_4"} +{"_id": "doc_995", "title": "", "text": "def Func(arg_0: arg_1[arg_2], arg_3: arg_1[arg_1[arg_4.ndarray]], arg_6: arg_2):\n \"\"\"\n Calculates recall at k ranking metric.\n\n Args:\n y_true: Labels. Not used in the calculation of the metric.\n y_predicted: Predictions.\n Each prediction contains ranking score of all ranking candidates for the particular data sample.\n It is supposed that the ranking score for the true candidate goes first in the prediction.\n\n Returns:\n Recall at k\n \"\"\"\n arg_7 = float(len(arg_3))\n arg_8 = arg_4.array(arg_3)\n arg_8 = arg_4.flip(arg_4.argsort(arg_8, -1), -1)[:, :arg_6]\n arg_9 = 0\n for arg_10 in arg_8:\n if 0 in arg_10:\n arg_9 += 1\n return float(arg_9) / arg_7"} +{"_id": "doc_996", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3[arg_4, arg_5[arg_4, arg_6, arg_7, arg_8, None]]) -> arg_1:\n \"\"\"Recursively apply config's variables values to its property\"\"\"\n if isinstance(arg_0, arg_4):\n return arg_0.format(**arg_2)\n elif isinstance(arg_0, list):\n return [Func(arg_0, arg_2) for arg_0 in arg_0]\n elif isinstance(arg_0, dict):\n return {arg_9: Func(arg_10, arg_2) for arg_9, arg_10 in arg_0.items()}\n else:\n return arg_0"} +{"_id": "doc_997", "title": "", "text": "def Func(arg_0: arg_1[arg_2, arg_3]) -> arg_3:\n \"\"\"Convert relative paths to absolute with resolving user directory.\"\"\"\n return arg_3(arg_0).expanduser().resolve()"} +{"_id": "doc_998", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3 = 'infer', arg_4: arg_5 = None, **arg_6) -> Component:\n \"\"\"Builds and returns the Component from corresponding dictionary of parameters.\"\"\"\n # what is passed in json:\n arg_7 = {k: _resolve(v) for k, v in arg_0.items()}\n\n # get component by reference (if any)\n if 'ref' in arg_7:\n try:\n arg_8 = arg_13[arg_7['ref']]\n if arg_4 is not None:\n arg_8.deserialize(arg_4)\n return arg_8\n except KeyError:\n arg_9 = ConfigError('Component with id \"{id}\" was referenced but not initialized'\n .format(id=arg_7['ref']))\n log.exception(arg_9)\n raise arg_9\n\n elif 'config_path' in arg_7:\n from deeppavlov.core.commands.infer import build_model\n arg_10 = arg_13.copy()\n arg_13.clear()\n arg_11 = parse_config(expand_path(arg_7['config_path']))\n arg_12 = build_model(arg_11, arg_4=arg_4)\n arg_13.clear()\n arg_13.update(arg_10)\n try:\n arg_13[arg_7['id']] = arg_12 \n except KeyError:\n pass\n return arg_12\n\n arg_14 = arg_7.pop('class_name', None)\n if not arg_14:\n arg_9 = ConfigError('Component config has no `class_name` nor `ref` fields')\n log.exception(arg_9)\n raise arg_9\n arg_15 = get_model(arg_14)\n\n # find the submodels params recursively\n arg_7 = {k: _init_param(v, arg_2) for k, v in arg_7.items()}\n\n try:\n arg_16 = inspect.getfullargspec(arg_15)\n if 'mode' in arg_16.args+arg_16.kwonlyargs or arg_16.varkw is not None:\n arg_6['mode'] = arg_2\n\n arg_8 = arg_15(**dict(arg_7, **arg_6))\n try:\n arg_13[arg_7['id']] = arg_8\n except KeyError:\n pass\n except Exception:\n log.exception(\"Exception in {}\".format(arg_15))\n raise\n\n if arg_4 is not None:\n arg_8.deserialize(arg_4)\n return arg_8"} +{"_id": "doc_999", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Thread Func method implementation.\"\"\"\n while True:\n arg_1 = arg_0.input_queue.get()\n arg_2 = arg_0._handle_request(arg_1)\n arg_0.output_queue.put(arg_2)"} +{"_id": "doc_1000", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> None:\n \"\"\"Deletes Conversation instance.\n\n Args:\n conversation_key: Conversation key.\n \"\"\"\n if arg_1 in arg_0.conversations.keys():\n del arg_0.conversations[arg_1]\n log.info(f'Deleted conversation, key: {conversation_key}')"} +{"_id": "doc_1001", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Conducts cleanup of periodical certificates with expired validation.\"\"\"\n arg_0.timer = Timer(REFRESH_VALID_CERTS_PERIOD_SECS, arg_0.Func)\n arg_0.timer.start()\n\n arg_2 = []\n\n for arg_3, arg_4 in arg_0.valid_certificates.items():\n arg_4: ValidatedCert = arg_4\n arg_5: datetime = arg_4.expiration_timestamp\n if datetime.utcnow() > arg_5:\n arg_2.append(arg_3)\n\n for arg_6 in arg_2:\n del arg_0.valid_certificates[arg_6]\n log.info(f'Validation period of {expired_cert_url} certificate expired')"} +{"_id": "doc_1002", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_5) -> bool:\n \"\"\"Conducts series of Alexa request verifications against Amazon Alexa requirements.\n\n Args:\n signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.\n signature: Base64 decoded Alexa request signature from Signature HTTP header.\n request_body: full HTTPS request body\n Returns:\n result: True if verification was successful, False if not.\n \"\"\"\n if arg_1 not in arg_0.valid_certificates.keys():\n arg_6: X509 = verify_cert(arg_1)\n if arg_6:\n arg_7: timedelta = arg_0.config['amazon_cert_lifetime']\n arg_8 = datetime.utcnow() + arg_7\n arg_9 = ValidatedCert(cert=arg_6, arg_8=arg_8)\n arg_0.valid_certificates[arg_1] = arg_9\n log.info(f'Certificate {signature_chain_url} validated')\n else:\n log.error(f'Certificate {signature_chain_url} validation failed')\n return False\n else:\n arg_9: ValidatedCert = arg_0.valid_certificates[arg_1]\n arg_6: X509 = arg_9.cert\n\n if verify_signature(arg_6, arg_3, arg_4):\n arg_11 = True\n else:\n log.error(f'Failed signature verification for request: {request_body.decode(\"utf-8\", \"replace\")}')\n arg_11 = False\n\n return arg_11"} +{"_id": "doc_1003", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract full regularization path explored during lambda search from glm model.\n\n :param model: source lambda search model\n \"\"\"\n arg_1 = h2o.api(\"GET /3/GetGLMRegPath\", data={\"model\": arg_0._model_json[\"model_id\"][\"name\"]})\n arg_2 = arg_1.pop(\"coefficient_names\")\n arg_3 = {\n \"lambdas\": arg_1[\"lambdas\"],\n \"explained_deviance_train\": arg_1[\"explained_deviance_train\"],\n \"explained_deviance_valid\": arg_1[\"explained_deviance_valid\"],\n \"coefficients\": [dict(zip(arg_2, y)) for y in arg_1[\"coefficients\"]],\n }\n if \"coefficients_std\" in arg_1:\n arg_3[\"coefficients_std\"] = [dict(zip(arg_2, y)) for y in arg_1[\"coefficients_std\"]]\n return arg_3"} +{"_id": "doc_1004", "title": "", "text": "def Func(arg_0, arg_1, arg_2=.5):\n \"\"\"\n Create a custom GLM model using the given coefficients.\n\n Needs to be passed source model trained on the dataset to extract the dataset information from.\n\n :param model: source model, used for extracting dataset information\n :param coefs: dictionary containing model coefficients\n :param threshold: (optional, only for binomial) decision threshold used for classification\n \"\"\"\n arg_3 = h2o.api(\n \"POST /3/MakeGLMModel\",\n data={\"model\": arg_0._model_json[\"model_id\"][\"name\"],\n \"names\": list(arg_1.keys()),\n \"beta\": list(arg_1.values()),\n \"threshold\": arg_2}\n )\n arg_4 = H2OGeneralizedLinearEstimator()\n arg_4._resolve_model(arg_3[\"model_id\"][\"name\"], arg_3)\n return arg_4"} +{"_id": "doc_1005", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determine if the H2O cluster is running or not.\n\n :returns: True if the cluster is up; False otherwise\n \"\"\"\n try:\n if h2o.connection().local_server and not h2o.connection().local_server.Func(): return False\n h2o.api(\"GET /\")\n return True\n except (H2OConnectionError, H2OServerError):\n return False"} +{"_id": "doc_1006", "title": "", "text": "def Func(arg_0):\n \"\"\"List all jobs performed by the cluster.\"\"\"\n arg_1 = h2o.api(\"GET /3/Jobs\")\n arg_2 = [[\"type\"], [\"dest\"], [\"description\"], [\"status\"]]\n for arg_3 in arg_1[\"jobs\"]:\n arg_4 = arg_3[\"dest\"]\n arg_2[0].append(arg_0._translate_job_type(arg_4[\"type\"]))\n arg_2[1].append(arg_4[\"name\"])\n arg_2[2].append(arg_3[\"description\"])\n arg_2[3].append(arg_3[\"status\"])\n return arg_2"} +{"_id": "doc_1007", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the list of all known timezones.\"\"\"\n from h2o.expr import ExprNode\n return h2o.H2OFrame._expr(expr=ExprNode(\"listTimeZones\"))._frame()"} +{"_id": "doc_1008", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Update information in this object from another H2OCluster instance.\n\n :param H2OCluster other: source of the new information for this object.\n \"\"\"\n arg_0._props = arg_1._props\n arg_0._retrieved_at = arg_1._retrieved_at\n arg_1._props = {}\n arg_1._retrieved_at = None"} +{"_id": "doc_1009", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parameters for metalearner algorithm\n\n Type: ``dict`` (default: ``None``).\n Example: metalearner_gbm_params = {'max_depth': 2, 'col_sample_rate': 0.3}\n \"\"\"\n if arg_0._parms.get(\"Func\") != None:\n arg_1 = ast.literal_eval(arg_0._parms.get(\"Func\"))\n for arg_2 in arg_1:\n if len(arg_1[arg_2]) == 1: #single parameter\n arg_1[arg_2] = arg_1[arg_2][0]\n return arg_1\n else:\n return arg_0._parms.get(\"Func\")"} +{"_id": "doc_1010", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=10, arg_4=0.5):\n '''Repeatedly test a function waiting for it to return True.\n\n Arguments:\n test_func -- A function that will be run repeatedly\n error -- A function that will be run to produce an error message\n it will be called with (node, timeTakenSecs, numberOfRetries)\n OR\n -- A string that will be interpolated with a dictionary of\n { 'timeTakenSecs', 'numberOfRetries' }\n timeoutSecs -- How long in seconds to keep trying before declaring a failure\n retryDelaySecs -- How long to wait between retry attempts\n '''\n arg_5 = time.time()\n arg_6 = 0\n while h2o_args.no_timeout or (time.time() - arg_5 < arg_3):\n if arg_1(arg_0, tries=arg_6, arg_3=arg_3):\n break\n time.sleep(arg_4)\n arg_6 += 1\n # hey, check the sandbox if we've been waiting a long time...rather than wait for timeout\n # to find the badness?. can check_sandbox_for_errors at any time\n if ((arg_6 % 50) == 0):\n check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)\n\n else:\n arg_7 = time.time() - arg_5\n if isinstance(arg_2, type('')):\n raise Exception('%s failed after %.2f seconds having retried %d times' % (\n arg_2, arg_7, arg_6))\n else:\n arg_8 = arg_2(arg_0, arg_7, arg_6)\n raise Exception(arg_8)"} +{"_id": "doc_1011", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"C1\", arg_3=10, **arg_4):\n '''\n Return the Func for a single column for a single Frame in the h2o cluster. \n '''\n arg_5 = { \n # 'offset': 0,\n # 'len': 100\n }\n h2o_methods.check_params_update_kwargs(arg_5, arg_4, 'Func', True)\n \n arg_6 = arg_0.do_json_request('3/Frames.json/%s/columns/%s/Func' % (arg_1, arg_2), timeout=arg_3, params=arg_5)\n h2o_sandbox.check_sandbox_for_errors()\n return arg_6"} +{"_id": "doc_1012", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=60, **arg_4):\n '''\n Delete a frame on the h2o cluster, given its key.\n '''\n assert arg_1 is not None, '\"key\" parameter is null'\n\n arg_5 = arg_0.do_json_request('/3/Frames.json/' + arg_1, cmd='delete', timeout=arg_3)\n\n # TODO: look for what?\n if not arg_2 and 'f00b4r' in arg_5:\n raise ValueError('Frame key not found: ' + arg_1)\n return arg_5"} +{"_id": "doc_1013", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=10, **arg_3):\n '''\n Return a model builder or all of the model builders known to the\n h2o cluster. The model builders are contained in a dictionary\n called \"Func\" at the top level of the result. The\n dictionary maps algorithm names to parameters lists. Each of the\n parameters contains all the metdata required by a client to\n present a model building interface to the user.\n\n if parameters = True, return the parameters?\n '''\n arg_4 = {}\n h2o_methods.check_params_update_kwargs(arg_4, arg_3, 'Func', False)\n\n arg_5 = '3/ModelBuilders.json' \n if arg_1:\n arg_5 += \"/\" + arg_1\n\n arg_6 = arg_0.do_json_request(arg_5, timeout=arg_2, params=arg_4)\n # verboseprint(request, \"result:\", dump_json(result))\n h2o_sandbox.check_sandbox_for_errors()\n return arg_6"} +{"_id": "doc_1014", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=60, **arg_5):\n '''\n Check a dictionary of model builder parameters on the h2o cluster \n using the given algorithm and model parameters.\n '''\n assert arg_1 is not None, '\"algo\" parameter is null'\n # Allow this now: assert training_frame is not None, '\"training_frame\" parameter is null'\n assert arg_3 is not None, '\"parameters\" parameter is null'\n\n arg_6 = arg_0.model_builders(arg_4=arg_4)\n assert arg_6 is not None, \"/ModelBuilders REST call failed\"\n assert arg_1 in arg_6['model_builders']\n arg_7 = arg_6['model_builders'][arg_1]\n \n # TODO: test this assert, I don't think this is working. . .\n if arg_2 is not None:\n arg_8 = arg_0.frames(key=arg_2)\n assert arg_8 is not None, \"/Frames/{0} REST call failed\".format(arg_2)\n\n arg_9 = arg_8['frames'][0]['key']['name']\n assert arg_9==arg_2, \\\n \"/Frames/{0} returned Frame {1} rather than Frame {2}\".format(arg_2, arg_9, arg_2)\n\n arg_3['training_frame'] = arg_2\n\n # TODO: add parameter existence checks\n # TODO: add parameter value validation\n\n # FIX! why ignoreH2oError here?\n arg_10 = arg_0.do_json_request('/3/ModelBuilders.json/' + arg_1 + \"/parameters\", cmd='post', \n timeout=arg_4, postData=arg_3, ignoreH2oError=True, noExtraErrorCheck=True)\n\n verboseprint(\"model parameters validation: \" + repr(arg_10))\n return arg_10"} +{"_id": "doc_1015", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=60, **arg_4):\n '''\n Score a model on the h2o cluster on the given Frame and return only the model metrics. \n '''\n assert arg_1 is not None, '\"model\" parameter is null'\n assert arg_2 is not None, '\"frame\" parameter is null'\n\n arg_5 = arg_0.models(key=arg_1, arg_3=arg_3)\n assert arg_5 is not None, \"/Models REST call failed\"\n assert arg_5['models'][0]['model_id']['name'] == arg_1, \"/Models/{0} returned Model {1} rather than Model {2}\".format(arg_1, arg_5['models'][0]['key']['name'], arg_1)\n\n # TODO: test this assert, I don't think this is working. . .\n arg_6 = arg_0.frames(key=arg_2)\n assert arg_6 is not None, \"/Frames/{0} REST call failed\".format(arg_2)\n \n print \"frames:\", dump_json(arg_6)\n # is the name not there?\n # assert frames['frames'][0]['model_id']['name'] == frame, \"/Frames/{0} returned Frame {1} rather than Frame {2}\".format(frame, models['models'][0]['key']['name'], frame)\n\n arg_7 = arg_0.do_json_request('/3/ModelMetrics.json/models/' + arg_1 + '/frames/' + arg_2, cmd='post', timeout=arg_3)\n\n arg_8 = arg_7['model_metrics'][0]\n verboseprint(\"model metrics: \" + repr(arg_8))\n h2o_sandbox.check_sandbox_for_errors()\n return arg_8"} +{"_id": "doc_1016", "title": "", "text": "def Func(arg_0, arg_1=60, **arg_2):\n '''\n ModelMetrics list. \n '''\n arg_3 = arg_0.do_json_request('/3/ModelMetrics.json', cmd='get', timeout=arg_1)\n h2o_sandbox.check_sandbox_for_errors()\n return arg_3"} +{"_id": "doc_1017", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True, arg_4=None):\n '''Create a new reservation for count instances'''\n\n arg_5 = inheritparams(arg_1, EC2_API_RUN_INSTANCE)\n arg_5.setdefault('min_count', arg_0)\n arg_5.setdefault('max_count', arg_0)\n\n arg_6 = None\n arg_7 = ec2_connect(arg_2)\n try:\n arg_6 = arg_7.Func(**arg_5)\n log('Reservation: {0}'.format(arg_6.id))\n log('Waiting for {0} EC2 instances {1} to come up, this can take 1-2 minutes.'.format(len(arg_6.instances), arg_6.instances))\n arg_8 = time.time()\n time.sleep(1)\n for arg_9 in arg_6.instances:\n while arg_9.update() == 'pending':\n time.sleep(1)\n h2o_cmd.dot()\n\n if not arg_9.state == 'running':\n raise Exception('\\033[91m[ec2] Error waiting for running state. Instance is in state {0}.\\033[0m'.format(arg_9.state))\n\n log('Instances started in {0} seconds'.format(time.time() - arg_8))\n log('Instances: ')\n for arg_10 in arg_6.instances: log(\" {0} ({1}) : public ip: {2}, private ip: {3}\".format(arg_10.public_dns_name, arg_10.id, arg_10.ip_address, arg_10.private_ip_address))\n \n if arg_3:\n # kbn: changing to private address, so it should fail if not in right domain\n # used to have the public ip address\n wait_for_ssh([ arg_11.private_ip_address for arg_11 in arg_6.instances ])\n\n # Tag instances\n try:\n if arg_4:\n arg_7.create_tags([arg_11.id for arg_11 in arg_6.instances], arg_4) \n except:\n warn('Something wrong during tagging instances. Exceptions IGNORED!')\n print sys.exc_info()\n pass\n\n return arg_6\n except:\n print \"\\033[91mUnexpected error\\033[0m :\", sys.exc_info()\n if arg_6:\n terminate_reservation(arg_6, arg_2)\n raise"} +{"_id": "doc_1018", "title": "", "text": "def Func(arg_0, arg_1):\n '''terminate all the instances given by its ids'''\n if not arg_0: return\n arg_2 = ec2_connect(arg_1)\n log(\"Terminating instances {0}.\".format(arg_0))\n arg_2.Func(arg_0)\n log(\"Done\")"} +{"_id": "doc_1019", "title": "", "text": "def Func(arg_0, arg_1):\n '''Reboot all the instances given by its ids'''\n if not arg_0: return\n arg_2 = ec2_connect(arg_1)\n log(\"Rebooting instances {0}.\".format(arg_0))\n arg_2.Func(arg_0)\n log(\"Done\")"} +{"_id": "doc_1020", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return fully qualified function name.\n\n This method will attempt to find \"full name\" of the given function object. This full name is either of\n the form \".\" if the function is a class method, or \".\"\n if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2.\n\n :param func: a function object.\n\n :returns: string with the function's full name as explained above.\n \"\"\"\n # Python 3.3 already has this information available...\n if hasattr(arg_0, \"__qualname__\"): return arg_0.__qualname__\n\n arg_1 = inspect.getmodule(arg_0)\n if arg_1 is None:\n return \"?.%s\" % getattr(arg_0, \"__name__\", \"?\")\n for arg_2 in dir(arg_1):\n arg_3 = getattr(arg_1, arg_2)\n if not inspect.isclass(arg_3): continue\n for arg_4 in dir(arg_3):\n arg_5 = getattr(arg_3, arg_4)\n if arg_5 == arg_0:\n return \"%s.%s\" % (arg_2, arg_4)\n if hasattr(arg_0, \"__name__\"):\n return \"%s.%s\" % (arg_1.__name__, arg_0.__name__)\n return \"\""} +{"_id": "doc_1021", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return function's declared arguments as a string.\n\n For example for this function it returns \"func, highlight=None\"; for the ``_wrap`` function it returns\n \"text, wrap_at=120, indent=4\". This should usually coincide with the function's declaration (the part\n which is inside the parentheses).\n \"\"\"\n if not arg_0: return \"\"\n arg_2 = str(inspect.signature(arg_0))[1:-1]\n if arg_1:\n arg_2 = re.sub(r\"\\b%s\\b\" % arg_1, Style.BRIGHT + Fore.WHITE + arg_1 + Fore.LIGHTBLACK_EX + Style.NORMAL, arg_2)\n return arg_2"} +{"_id": "doc_1022", "title": "", "text": "def Func(arg_0, arg_1=120, arg_2=4):\n \"\"\"\n Return piece of text, wrapped around if needed.\n\n :param text: text that may be too long and then needs to be wrapped.\n :param wrap_at: the maximum line length.\n :param indent: number of spaces to prepend to all subsequent lines after the first.\n \"\"\"\n arg_3 = \"\"\n arg_4 = arg_2\n arg_5 = False\n for arg_6 in arg_0.split():\n if arg_4 + len(arg_6) > arg_1:\n arg_3 += \"\\n\" + \" \" * arg_2\n arg_4 = arg_2\n arg_5 = False\n if arg_5:\n arg_3 += \" \"\n arg_4 += 1\n arg_3 += arg_6\n arg_4 += len(arg_6)\n arg_5 = True\n return arg_3"} +{"_id": "doc_1023", "title": "", "text": "def Func(arg_0):\n \"\"\"Wait until job's completion.\"\"\"\n arg_0._future = False\n arg_0._job.poll()\n arg_2 = arg_0._job.dest_key\n arg_0._job = None\n arg_4 = h2o.api(\"GET /%d/Models/%s\" % (arg_0._rest_version, arg_2))[\"models\"][0]\n arg_0._resolve_model(arg_2, arg_4)"} +{"_id": "doc_1024", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Fit an H2O model as part of a scikit-learn pipeline or grid search.\n\n A warning will be issued if a caller other than sklearn attempts to use this method.\n\n :param H2OFrame X: An H2OFrame consisting of the predictor variables.\n :param H2OFrame y: An H2OFrame consisting of the response variable.\n :param params: Extra arguments.\n :returns: The current instance of H2OEstimator for method chaining.\n \"\"\"\n arg_4 = inspect.stack()[1:]\n arg_5 = True\n for arg_6 in arg_4:\n arg_7 = inspect.getmodule(arg_6[0])\n if arg_7:\n arg_5 = \"sklearn\" not in arg_7.__name__\n if not arg_5: break\n if arg_5:\n warnings.warn(\"\\n\\n\\t`Func` is not recommended outside of the sklearn framework. Use `train` instead.\",\n UserWarning, stacklevel=2)\n arg_8 = arg_1.cbind(arg_2) if arg_2 is not None else arg_1\n arg_9 = arg_1.names\n arg_2 = arg_2.names[0] if arg_2 is not None else None\n arg_0.train(arg_9, arg_2, arg_8, **arg_3)\n return arg_0"} +{"_id": "doc_1025", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Obtain parameters for this estimator.\n\n Used primarily for sklearn Pipelines and sklearn grid search.\n\n :param deep: If True, return parameters of all sub-objects that are estimators.\n\n :returns: A dict of parameters\n \"\"\"\n arg_2 = dict()\n for arg_3, arg_4 in arg_0.parms.items():\n if arg_1 and isinstance(arg_4, H2OEstimator):\n arg_5 = list(arg_4.Func().items())\n arg_2.update((arg_3 + \"__\" + arg_6, arg_7) for arg_6, arg_7 in arg_5)\n arg_2[arg_3] = arg_4\n return arg_2"} +{"_id": "doc_1026", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This function is written to remove sandbox directories if they exist under the\n parent_dir.\n\n :param parent_dir: string denoting full parent directory path\n :param dir_name: string denoting directory path which could be a sandbox\n :return: None\n \"\"\"\n if \"Rsandbox\" in arg_1:\n arg_2 = os.path.join(arg_0, arg_1)\n try:\n if sys.platform == \"win32\":\n os.system(r'C:/cygwin64/bin/rm.exe -r -f \"{0}\"'.format(arg_2))\n else:\n shutil.rmtree(arg_2)\n except OSError as e:\n print(\"\")\n print(\"ERROR: Removing RSandbox directory failed: \" + arg_2)\n print(\" (errno {0}): {1}\".format(e.errno, e.strerror))\n print(\"\")\n sys.exit(1)"} +{"_id": "doc_1027", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Look at the stdout log and figure out which port the JVM chose.\n\n If successful, port number is stored in self.port; otherwise the\n program is terminated. This call is blocking, and will wait for\n up to 30s for the server to start up.\n \"\"\"\n arg_1 = re.compile(r\"Open H2O Flow in your web browser: https?://([^:]+):(\\d+)\")\n arg_2 = 30\n while arg_2 and not arg_0.terminated:\n with open(arg_0.output_file_name, \"r\") as f:\n for arg_3 in f:\n arg_4 = re.search(arg_1, arg_3)\n if arg_4 is not None:\n arg_0.port = arg_4.group(2)\n print(\"H2O cloud %d node %d listening on port %s\\n with output file %s\" %\n (arg_0.cloud_num, arg_0.node_num, arg_0.port, arg_0.output_file_name))\n return\n if arg_0.terminated: break\n arg_2 -= 1\n time.sleep(1)\n\n if arg_0.terminated: return\n print(\"\\nERROR: Too many retries starting cloud %d.\\nCheck the output log %s.\\n\" %\n (arg_0.cloud_num, arg_0.output_file_name))\n sys.exit(1)"} +{"_id": "doc_1028", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Look at the stdout log and wait until the cluster of proper size is formed.\n This call is blocking.\n Exit if this fails.\n\n :param nodes_per_cloud:\n :return none\n \"\"\"\n arg_2 = 60\n while arg_2 > 0:\n if arg_0.terminated: return\n arg_3 = open(arg_0.output_file_name, \"r\")\n arg_4 = arg_3.readline()\n while len(arg_4) > 0:\n if arg_0.terminated: return\n arg_5 = re.search(r\"Cloud of size (\\d+) formed\", arg_4)\n if arg_5 is not None:\n arg_6 = arg_5.group(1)\n if arg_6 is not None:\n arg_6 = int(arg_6)\n if arg_6 == arg_1:\n arg_3.close()\n return\n\n arg_4 = arg_3.readline()\n\n arg_3.close()\n arg_2 -= 1\n if arg_0.terminated: return\n time.sleep(1)\n\n print(\"\")\n print(\"ERROR: Too many retries starting cloud.\")\n print(\"\")\n sys.exit(1)"} +{"_id": "doc_1029", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Normal node shutdown.\n Ignore failures for now.\n\n :return none\n \"\"\"\n if arg_0.pid > 0:\n print(\"Killing JVM with PID {}\".format(arg_0.pid))\n try:\n arg_0.child.terminate()\n arg_0.child.wait()\n except OSError:\n pass\n arg_0.pid = -1"} +{"_id": "doc_1030", "title": "", "text": "def Func(arg_0):\n \"\"\" Return an ip to use to talk to this cluster. \"\"\"\n if len(arg_0.client_nodes) > 0:\n arg_1 = arg_0.client_nodes[0]\n else:\n arg_1 = arg_0.nodes[0]\n return arg_1.Func()"} +{"_id": "doc_1031", "title": "", "text": "def Func(arg_0):\n \"\"\" Return a port to use to talk to this cluster. \"\"\"\n if len(arg_0.client_nodes) > 0:\n arg_1 = arg_0.client_nodes[0]\n else:\n arg_1 = arg_0.nodes[0]\n return arg_1.Func()"} +{"_id": "doc_1032", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Mean absolute error regression loss.\n\n :param y_actual: H2OFrame of actual response.\n :param y_predicted: H2OFrame of predicted response.\n :param weights: (Optional) sample weights\n :returns: mean absolute error loss (best is 0.0).\n \"\"\"\n ModelBase._check_targets(arg_0, arg_1)\n return _colmean((arg_1 - arg_0).abs())"} +{"_id": "doc_1033", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Explained variance regression score function.\n\n :param y_actual: H2OFrame of actual response.\n :param y_predicted: H2OFrame of predicted response.\n :param weights: (Optional) sample weights\n :returns: the explained variance score.\n \"\"\"\n ModelBase._check_targets(arg_0, arg_1)\n\n arg_3, arg_4 = _mean_var(arg_0 - arg_1, arg_2)\n arg_3, arg_5 = _mean_var(arg_0, arg_2)\n if arg_5 == 0.0:\n return 1. if arg_4 == 0 else 0. # 0/0 => 1, otherwise, 0\n return 1 - arg_4 / arg_5"} +{"_id": "doc_1034", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Assert that string variable matches the provided regular expression.\n\n :param v: variable to check.\n :param regex: regular expression to check against (can be either a string, or compiled regexp).\n \"\"\"\n arg_2 = re.match(arg_1, arg_0)\n if arg_2 is None:\n arg_3 = _retrieve_assert_arguments()[0]\n arg_4 = \"Argument `{var}` (= {val!r}) did not match /{regex}/\".format(var=arg_3, arg_1=arg_1, val=arg_0)\n raise H2OValueError(arg_4, var_name=arg_3, skip_frames=1)\n return arg_2"} +{"_id": "doc_1035", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Assert that variable satisfies the provided condition.\n\n :param v: variable to check. Its value is only used for error reporting.\n :param bool cond: condition that must be satisfied. Should be somehow related to the variable ``v``.\n :param message: message string to use instead of the default.\n \"\"\"\n if not arg_1:\n arg_3, arg_4 = _retrieve_assert_arguments()\n if not arg_2:\n arg_2 = \"Argument `{var}` (= {val!r}) does not satisfy the condition {expr}\" \\\n .format(var=arg_3, val=arg_0, expr=arg_4)\n raise H2OValueError(arg_2=arg_2, var_name=arg_3, skip_frames=1)"} +{"_id": "doc_1036", "title": "", "text": "def Func():\n \"\"\"\n Magic variable name retrieval.\n\n This function is designed as a helper for assert_is_type() function. Typically such assertion is used like this::\n\n assert_is_type(num_threads, int)\n\n If the variable `num_threads` turns out to be non-integer, we would like to raise an exception such as\n\n H2OTypeError(\"`num_threads` is expected to be integer, but got \")\n\n and in order to compose an error message like that, we need to know that the variables that was passed to\n assert_is_type() carries a name \"num_threads\". Naturally, the variable itself knows nothing about that.\n\n This is where this function comes in: we walk up the stack trace until the first frame outside of this\n file, find the original line that called the assert_is_type() function, and extract the variable name from\n that line. This is slightly fragile, in particular we assume that only one assert_is_type statement can be per line,\n or that this statement does not spill over multiple lines, etc.\n \"\"\"\n try:\n raise RuntimeError(\"Catch me!\")\n except RuntimeError:\n # Walk up the stacktrace until we are outside of this file\n arg_0 = sys.exc_info()[2]\n assert arg_0.tb_frame.f_code.co_name == \"Func\"\n arg_1 = arg_0.tb_frame.f_code.co_filename\n arg_2 = arg_0.tb_frame\n while arg_2 is not None and arg_2.f_code.co_filename == arg_1:\n arg_2 = arg_2.f_back\n\n # Read the source file and tokenize it, extracting the expressions.\n try:\n with io.open(arg_2.f_code.co_filename, \"r\", encoding=\"utf-8\") as f:\n # Skip initial lines that are irrelevant\n for arg_3 in range(arg_2.f_lineno - 1): next(f)\n # Create tokenizer\n arg_4 = tokenize.generate_tokens(f.readline)\n arg_5 = 0\n arg_6 = []\n arg_7 = 0\n for arg_8 in arg_4:\n if arg_5 == 0:\n if arg_8[0] != tokenize.NAME: continue\n if not arg_8[1].startswith(\"assert_\"): continue\n arg_5 = 1\n elif arg_5 == 1:\n assert arg_8[0] == tokenize.OP and arg_8[1] == \"(\"\n arg_6.append([])\n arg_5 = 2\n elif arg_5 == 2:\n if arg_7 == 0 and arg_8[0] == tokenize.OP and arg_8[1] == \",\":\n arg_6.append([])\n elif arg_7 == 0 and arg_8[0] == tokenize.OP and arg_8[1] == \")\":\n break\n else:\n if arg_8[0] == tokenize.OP and arg_8[1] in \"([{\": arg_7 += 1\n if arg_8[0] == tokenize.OP and arg_8[1] in \")]}\": arg_7 -= 1\n assert arg_7 >= 0, \"Parse error: parentheses level became negative\"\n arg_6[-1].append(arg_8)\n arg_9 = [tokenize.untokenize(at).strip().replace(\"\\n\", \" \") for at in arg_6]\n return arg_9\n except IOError:\n return \"arg\","} +{"_id": "doc_1037", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return True if the variable is of the specified type, and False otherwise.\n\n :param var: variable to check\n :param vtype: expected variable's type\n \"\"\"\n if arg_1 is None:\n return arg_0 is None\n if isinstance(arg_1, _primitive_type):\n return arg_0 == arg_1\n if arg_1 is str:\n return isinstance(arg_0, _str_type)\n if arg_1 is int:\n return isinstance(arg_0, _int_type)\n if arg_1 is numeric:\n return isinstance(arg_0, _num_type)\n if isinstance(arg_1, MagicType):\n return arg_1.check(arg_0)\n if isinstance(arg_1, type):\n # ``vtype`` is a name of the class, or a built-in type such as \"list\", \"tuple\", etc\n return isinstance(arg_0, arg_1)\n if isinstance(arg_1, list):\n # ``vtype`` is a list literal\n arg_2 = U(*arg_1)\n return isinstance(arg_0, list) and all(Func(arg_3, arg_2) for arg_3 in arg_0)\n if isinstance(arg_1, set):\n # ``vtype`` is a set literal\n arg_2 = U(*arg_1)\n return isinstance(arg_0, set) and all(Func(arg_3, arg_2) for arg_3 in arg_0)\n if isinstance(arg_1, tuple):\n # ``vtype`` is a tuple literal\n return (isinstance(arg_0, tuple) and len(arg_1) == len(arg_0) and\n all(Func(arg_0[arg_4], arg_1[arg_4]) for arg_4 in range(len(arg_1))))\n if isinstance(arg_1, dict):\n # ``vtype`` is a dict literal\n arg_5 = U(*viewitems(arg_1))\n return isinstance(arg_0, dict) and all(Func(arg_6, arg_5) for arg_6 in viewitems(arg_0))\n if isinstance(arg_1, (FunctionType, BuiltinFunctionType)):\n return arg_1(arg_0)\n raise RuntimeError(\"Ivalid type %r in Func()\" % arg_1)"} +{"_id": "doc_1038", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Attempt to find the source code of the ``lambda_fn`` within the string ``src``.\"\"\"\n def gen_lambdas():\n def gen():\n yield arg_1 + \"\\n\"\n\n arg_2 = gen()\n arg_3 = 0\n arg_4 = []\n for arg_5 in tokenize.generate_tokens(getattr(arg_2, \"next\", getattr(arg_2, \"__next__\", None))):\n if arg_3 == 0:\n if arg_5[0] == tokenize.NAME and arg_5[1] == \"lambda\":\n arg_3 = 1\n arg_4 = [arg_5]\n arg_6 = 0\n elif arg_3 == 1:\n if arg_5[0] == tokenize.NAME:\n arg_4.append(arg_5)\n arg_3 = 2\n else:\n arg_3 = 0\n elif arg_3 == 2:\n if arg_5[0] == tokenize.OP and arg_5[1] == \":\":\n arg_4.append(arg_5)\n arg_3 = 3\n else:\n arg_3 = 0\n elif arg_3 == 3:\n if arg_6 == 0 and (arg_5[0] == tokenize.OP and arg_5[1] in \",)\" or arg_5[0] == tokenize.ENDMARKER):\n yield tokenize.untokenize(arg_4).strip()\n arg_3 = 0\n else:\n arg_4.append(arg_5)\n if arg_5[0] == tokenize.OP:\n if arg_5[1] in \"[({\": arg_6 += 1\n if arg_5[1] in \"])}\": arg_6 -= 1\n assert not arg_4\n\n arg_7 = arg_0.__code__.co_code\n for arg_8 in gen_lambdas():\n try:\n arg_9 = eval(arg_8, globals(), locals())\n if arg_9.__code__.co_code == arg_7:\n return arg_8.split(\":\", 1)[1].strip()\n except Exception:\n pass\n return \"\""} +{"_id": "doc_1039", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True if the variable does not match any of the types, and False otherwise.\"\"\"\n return not any(_Func_type(arg_1, arg_2) for arg_2 in arg_0._types)"} +{"_id": "doc_1040", "title": "", "text": "def Func():\n \"\"\"Retrieve the config as a dictionary of key-value pairs.\"\"\"\n arg_0 = H2OConfigReader._get_instance()\n if not arg_0._config_loaded:\n arg_0._read_config()\n return arg_0._config"} +{"_id": "doc_1041", "title": "", "text": "def Func():\n \"\"\"Return possible locations for the .h2oconfig file, one at a time.\"\"\"\n # Search for .h2oconfig in the current directory and all parent directories\n arg_0 = \".h2oconfig\"\n arg_1 = None\n while True:\n arg_2 = os.path.abspath(arg_0)\n if arg_2 == arg_1: break\n arg_1 = arg_2\n arg_0 = \"../\" + arg_0\n yield arg_2\n # Also check if .h2oconfig exists in the user's directory\n yield os.path.expanduser(\"~/.h2oconfig\")"} +{"_id": "doc_1042", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Start the progress bar, and return only when the progress reaches 100%.\n\n :param progress_fn: the executor function (or a generator). This function should take no arguments\n and return either a single number -- the current progress level, or a tuple (progress level, delay),\n where delay is the time interval for when the progress should be checked again. This function may at\n any point raise the ``StopIteration(message)`` exception, which will interrupt the progress bar,\n display the ``message`` in red font, and then re-raise the exception.\n :raises StopIteration: if the job is interrupted. The reason for interruption is provided in the exception's\n message. The message will say \"cancelled\" if the job was interrupted by the user by pressing Ctrl+C.\n \"\"\"\n assert_is_type(arg_1, FunctionType, GeneratorType, MethodType)\n if isinstance(arg_1, GeneratorType):\n # Convert generator to a regular function\n arg_1 = (lambda g: lambda: next(g))(arg_1)\n\n # Initialize the execution context\n arg_0._next_poll_time = 0\n arg_0._t0 = time.time()\n arg_0._x0 = 0\n arg_0._v0 = 0.01 # corresponds to 100s completion time\n arg_0._ve = 0.01\n\n arg_8 = 0\n arg_9 = None # Status message in case the job gets interrupted.\n try:\n while True:\n # We attempt to synchronize all helper functions, ensuring that each of them has the same idea\n # for what the current time moment is. Otherwise we could have some corner cases when one method\n # says that something must happen right now, while the other already sees that moment in the past.\n arg_10 = time.time()\n\n # Query the progress level, but only if it's time already\n if arg_0._next_poll_time <= arg_10:\n arg_11 = arg_1() # may raise StopIteration\n assert_is_type(arg_11, (numeric, numeric), numeric)\n if not isinstance(arg_11, tuple):\n arg_11 = (arg_11, -1)\n # Progress querying could have taken some time, so update the current time moment\n arg_10 = time.time()\n arg_0._store_model_progress(arg_11, arg_10)\n arg_0._recalculate_model_parameters(arg_10)\n\n # Render the widget regardless of whether it's too early or not\n arg_8 = min(arg_0._compute_progress_at_time(arg_10)[0], 1)\n if arg_8 == 1 and arg_0._get_real_progress() >= 1:\n # Do not exit until both the model and the actual progress reach 100% mark.\n break\n arg_12 = arg_0._widget.render(arg_8)\n assert_is_type(arg_12, RenderResult)\n arg_13 = arg_12.next_time\n arg_14 = arg_0._get_time_at_progress(arg_12.next_progress)\n arg_15 = min(arg_13, arg_14)\n arg_0._draw(arg_12.rendered)\n\n # Wait until the next rendering/querying cycle\n arg_16 = min(arg_15, arg_0._next_poll_time) - arg_10\n if arg_16 > 0:\n time.sleep(arg_16)\n if arg_2 is not None:\n arg_2(arg_8)\n except KeyboardInterrupt:\n # If the user presses Ctrl+C, we interrupt the progress bar.\n arg_9 = \"cancelled\"\n except StopIteration as e:\n # If the generator raises StopIteration before reaching 100%, then the progress display will\n # reamin incomplete.\n arg_9 = str(e)\n\n # Do one final rendering before we exit\n arg_12 = arg_0._widget.render(arg_8=arg_8, arg_9=arg_9)\n arg_0._draw(arg_12.rendered, final=True)\n\n if arg_9 == \"cancelled\":\n # Re-raise the exception, to inform the upstream caller that something unexpected happened.\n raise StopIteration(arg_9)"} +{"_id": "doc_1043", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Save the current model progress into ``self._progress_data``, and update ``self._next_poll_time``.\n\n :param res: tuple (progress level, poll delay).\n :param now: current timestamp.\n \"\"\"\n arg_3, arg_4 = arg_1\n arg_3 = clamp(arg_3, 0, arg_0._maxval)\n arg_0._progress_data.append((arg_2, arg_3))\n\n if arg_4 < 0:\n # calculation of ``_guess_next_poll_interval()`` should be done only *after* we pushed the fresh data to\n # ``self._progress_data``.\n arg_4 = arg_0._guess_next_poll_interval()\n arg_0._next_poll_time = arg_2 + clamp(arg_4, arg_0.MIN_PROGRESS_CHECK_INTERVAL, arg_0.MAX_PROGRESS_CHECK_INTERVAL)"} +{"_id": "doc_1044", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute t0, x0, v0, ve.\"\"\"\n arg_2 = arg_0._estimate_progress_completion_time(arg_1) - arg_1\n assert arg_2 >= 0, \"Estimated progress completion cannot be in the past.\"\n arg_3 = arg_0._get_real_progress()\n if arg_3 == 1:\n arg_4, arg_5, arg_6, arg_7 = arg_1, 1, 0, 0\n else:\n arg_5, arg_6 = arg_0._compute_progress_at_time(arg_1)\n arg_4 = arg_1\n if arg_5 >= 1:\n # On rare occasion, the model's progress may have reached 100% by ``now``. This can happen if\n # (1) the progress is close to 100% initially and has high speed, (2) on the previous call we\n # estimated that the process completion time will be right after the next poll time, and (3)\n # the polling itself took so much time that the process effectively \"overshoot\".\n # If this happens, then we adjust x0, v0 to the previous valid data checkpoint.\n arg_4, arg_5, arg_6 = arg_0._t0, arg_0._x0, arg_0._v0\n arg_2 += arg_1 - arg_4\n arg_8 = arg_0.BETA * arg_2\n arg_9 = (1 - arg_3**2) / arg_0.FINISH_DELAY\n arg_7 = arg_6 + (arg_0.BETA * (1 - arg_5) - arg_6 * arg_8) / (arg_8 - 1 + math.exp(-arg_8))\n if arg_7 < 0:\n # Current speed is too high -- reduce v0 (violate non-smoothness of speed)\n arg_6 = arg_0.BETA * (1 - arg_5) / (1 - math.exp(-arg_8))\n arg_7 = 0\n if arg_7 > arg_9:\n # Current speed is too low: finish later, but do not allow ``ve`` to be higher than ``max_speed``\n arg_7 = arg_9\n arg_0._t0, arg_0._x0, arg_0._v0, arg_0._ve = arg_4, arg_5, arg_6, arg_7"} +{"_id": "doc_1045", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Estimate the moment when the underlying process is expected to reach completion.\n\n This function should only return future times. Also this function is not allowed to return time moments less\n than self._next_poll_time if the actual progress is below 100% (this is because we won't know that the\n process have finished until we poll the external progress function).\n \"\"\"\n assert arg_0._next_poll_time >= arg_1\n arg_2, arg_3 = arg_0._progress_data[-1]\n # If reached 100%, make sure that we finish as soon as possible, but maybe not immediately\n if arg_3 == arg_0._maxval:\n arg_4 = (1 - arg_0._x0) / arg_0._v0 + arg_0._t0\n return clamp(arg_4, arg_1, arg_1 + arg_0.FINISH_DELAY)\n\n # Calculate the approximate speed of the raw progress based on recent data\n arg_5, arg_6 = 0, 0\n arg_7 = arg_0.GAMMA\n for arg_8, arg_9 in arg_0._progress_data[-2::-1]:\n arg_5 += arg_7 * (arg_2 - arg_8)\n arg_6 += arg_7 * (arg_3 - arg_9)\n arg_7 *= arg_0.GAMMA\n if arg_7 < 1e-2: break\n\n # If there was no progress at all, then just assume it's 5 minutes from now\n if arg_6 == 0: return arg_1 + 300\n\n # Estimate the completion time assuming linear progress\n arg_10 = arg_2 + arg_5 * (arg_0._maxval - arg_3) / arg_6\n\n # Adjust the estimate if it looks like it may happen too soon\n if arg_10 <= arg_0._next_poll_time:\n arg_10 = arg_0._next_poll_time + arg_0.FINISH_DELAY\n\n return arg_10"} +{"_id": "doc_1046", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determine when to query the progress status next.\n\n This function is used if the external progress function did not return time interval for when it should be\n queried next.\n \"\"\"\n arg_1 = arg_0._progress_data[-1][0] - arg_0._progress_data[0][0]\n arg_2 = arg_0._get_real_progress()\n return min(0.2 * arg_1, 0.5 + (1 - arg_2)**0.5)"} +{"_id": "doc_1047", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the projected time when progress level `x_target` will be reached.\n\n Since the underlying progress model is nonlinear, we need to do use Newton method to find a numerical solution\n to the equation x(t) = x_target.\n \"\"\"\n arg_2, arg_3, arg_4 = arg_0._t0, arg_0._x0, arg_0._v0\n # The convergence should be achieved in just few iterations, however in unlikely situation that it doesn't\n # we don't want to loop forever...\n for arg_5 in range(20):\n if arg_4 == 0: return 1e20\n # make time prediction assuming the progress will continue at a linear speed ``v``\n arg_2 += (arg_1 - arg_3) / arg_4\n # calculate the actual progress at that time\n arg_3, arg_4 = arg_0._compute_progress_at_time(arg_2)\n # iterate until convergence\n if abs(arg_3 - arg_1) < 1e-3: return arg_2\n return time.time() + 100"} +{"_id": "doc_1048", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Print the rendered string to the stdout.\"\"\"\n if not arg_0._file_mode:\n # If the user presses Ctrl+C this ensures we still start writing from the beginning of the line\n sys.stdout.write(\"\\r\")\n sys.stdout.write(arg_1)\n if arg_2 and not isinstance(arg_0._widget, _HiddenWidget):\n sys.stdout.write(\"\\n\")\n else:\n if not arg_0._file_mode:\n sys.stdout.write(\"\\r\")\n sys.stdout.flush()"} +{"_id": "doc_1049", "title": "", "text": "def Func(arg_0):\n \"\"\"Initial rendering stage, done in order to compute widths of all widgets.\"\"\"\n arg_1 = [0] * len(arg_0._widgets)\n arg_2 = 0\n\n # First render all non-flexible widgets\n for arg_3, arg_4 in enumerate(arg_0._widgets):\n if isinstance(arg_4, ProgressBarFlexibleWidget):\n arg_2 += 1\n else:\n arg_1[arg_3] = arg_4.render(1).length\n\n arg_5 = arg_0._width - sum(arg_1)\n arg_5 -= len(arg_0._widgets) - 1 # account for 1-space interval between widgets\n if arg_5 < 10 * arg_2:\n if arg_0._file_mode:\n arg_5 = 10 * arg_2\n else:\n # The window is too small to accomodate the widget: try to split it into several lines, otherwise\n # switch to \"file mode\". If we don't do this, then rendering the widget will cause it to wrap, and\n # then when we use \\r to go to the beginning of the line, only part of the widget will be overwritten,\n # which means we'll have many (possibly hundreds) of progress bar lines in the end.\n arg_6 = arg_0._widgets[0]\n if isinstance(arg_6, PBWString) and arg_5 + arg_6.render(0).length >= 10 * arg_2:\n arg_5 += arg_6.render(0).length + 1\n arg_0._to_render = arg_6.render(0).rendered + \"\\n\"\n arg_0._widgets = arg_0._widgets[1:]\n if arg_5 < 10 * arg_2:\n arg_0._file_mode = True\n arg_5 = 10 * arg_2\n\n arg_5 = max(arg_5, 10 * arg_2) # Ensure at least 10 chars per flexible widget\n\n for arg_3, arg_4 in enumerate(arg_0._widgets):\n if isinstance(arg_4, ProgressBarFlexibleWidget):\n arg_10 = int(arg_5 / arg_2)\n arg_11 = arg_4.render(1, arg_10)\n arg_1[arg_3] = arg_11.length\n arg_5 -= arg_11.length\n arg_2 -= 1\n\n return arg_1"} +{"_id": "doc_1050", "title": "", "text": "def Func():\n \"\"\"Find current STDOUT's width, in characters.\"\"\"\n # If output is not terminal but a regular file, assume 100 chars width\n if not sys.stdout.isatty():\n return 80\n\n # Otherwise, first try getting the dimensions from shell command `stty`:\n try:\n import subprocess\n arg_0 = subprocess.check_output([\"stty\", \"size\"]).strip().split(\" \")\n if len(arg_0) == 2:\n return int(arg_0[1])\n except:\n pass\n\n # Otherwise try using ioctl\n try:\n from termios import TIOCGWINSZ\n from fcntl import ioctl\n from struct import unpack\n arg_1 = unpack(\"hh\", ioctl(sys.stdout, TIOCGWINSZ, b\"1234\"))\n return int(arg_1[1])\n except:\n pass\n\n # Finally check the COLUMNS environment variable\n return int(os.environ.get(\"COLUMNS\", 80))"} +{"_id": "doc_1051", "title": "", "text": "def Func(arg_0, arg_1 = None):\n \"\"\"\n Returns encoding map as an object that maps 'column_name' -> 'frame_with_encoding_map_for_this_column_name'\n\n :param frame frame: An H2OFrame object with which to create the target encoding map\n \"\"\"\n arg_0._teColumns = list(map(lambda i: arg_1.names[i], arg_0._teColumns)) if all(isinstance(n, int) for n in arg_0._teColumns) else arg_0._teColumns\n arg_0._responseColumnName = arg_1.names[arg_0._responseColumnName] if isinstance(arg_0._responseColumnName, int) else arg_0._responseColumnName\n arg_0._foldColumnName = arg_1.names[arg_0._foldColumnName] if isinstance(arg_0._foldColumnName, int) else arg_0._foldColumnName\n \n arg_0._encodingMap = ExprNode(\"target.encoder.Func\", arg_1, arg_0._teColumns, arg_0._responseColumnName,\n arg_0._foldColumnName)._eager_map_frame()\n\n return arg_0._encodingMap"} +{"_id": "doc_1052", "title": "", "text": "def Func(arg_0):\n \"\"\"Reload frame information from the backend H2O server.\"\"\"\n arg_0._ex._cache.flush()\n arg_0._frame(fill_cache=True)"} +{"_id": "doc_1053", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n The Func for the given column.\n\n :param col: either a name, or an index of the column to look up\n :returns: Func of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.\n :raises H2OValueError: if such column does not exist in the frame.\n \"\"\"\n assert_is_Func(arg_1, int, str)\n if not arg_0._ex._cache.Funcs_valid() or not arg_0._ex._cache.names_valid():\n arg_0._ex._cache.flush()\n arg_0._frame(fill_cache=True)\n arg_2 = arg_0._ex._cache.Funcs\n if is_Func(arg_1, str):\n if arg_1 in arg_2:\n return arg_2[arg_1]\n else:\n arg_3 = arg_0._ex._cache.names\n if -len(arg_3) <= arg_1 < len(arg_3):\n return arg_2[arg_3[arg_1]]\n raise H2OValueError(\"Column '%r' does not exist in the frame\" % arg_1)"} +{"_id": "doc_1054", "title": "", "text": "def Func(arg_0, arg_1=\"numeric\"):\n \"\"\"\n Extract columns of the specified type from the frame.\n\n :param str coltype: A character string indicating which column type to filter by. This must be\n one of the following:\n\n - ``\"numeric\"`` - Numeric, but not categorical or time\n - ``\"categorical\"`` - Integer, with a categorical/factor String mapping\n - ``\"string\"`` - String column\n - ``\"time\"`` - Long msec since the Unix Epoch - with a variety of display/parse options\n - ``\"uuid\"`` - UUID\n - ``\"bad\"`` - No none-NA rows (triple negative! all NAs or zero rows)\n\n :returns: list of indices of columns that have the requested type\n \"\"\"\n assert_is_type(arg_1, \"numeric\", \"categorical\", \"string\", \"time\", \"uuid\", \"bad\")\n assert_is_type(arg_0, H2OFrame)\n return ExprNode(\"columnsByType\", arg_0, arg_1)._eager_scalar()"} +{"_id": "doc_1055", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Display Func information about the frame.\n\n Summary includes min/mean/max/sigma and other rollup data.\n\n :param bool return_data: Return a dictionary of the Func output\n \"\"\"\n if not arg_0._has_content():\n print(\"This H2OFrame is empty and not initialized.\")\n return arg_0._ex._cache._data;\n if not arg_0._ex._cache.is_valid(): arg_0._frame()._ex._cache.fill()\n if not arg_1:\n if arg_0.nrows == 0:\n print(\"This H2OFrame is empty.\")\n elif H2ODisplay._in_ipy():\n import IPython.display\n IPython.display.display_html(arg_0._ex._cache._tabulate(\"html\", True), raw=True)\n else:\n print(arg_0._ex._cache._tabulate(\"simple\", True))\n else:\n return arg_0._ex._cache._data"} +{"_id": "doc_1056", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Generate an in-depth description of this H2OFrame.\n\n This will print to the console the dimensions of the frame; names/types/summary statistics for each column;\n and finally first ten rows of the frame.\n\n :param bool chunk_summary: Retrieve the chunk summary along with the distribution summary\n \"\"\"\n if arg_0._has_content():\n arg_2 = h2o.api(\"GET /3/Frames/%s\" % arg_0.frame_id, data={\"row_count\": 10})[\"frames\"][0]\n arg_0._ex._cache._fill_data(arg_2)\n\n print(\"Rows:{}\".format(arg_0.nrow))\n print(\"Cols:{}\".format(arg_0.ncol))\n\n #The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.\n if arg_1:\n arg_2[\"chunk_summary\"].show()\n arg_2[\"distribution_summary\"].show()\n print(\"\\n\")\n arg_0.summary()"} +{"_id": "doc_1057", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the factor Func.\n\n :returns: A list of lists, one list per column, of Func.\n \"\"\"\n arg_1 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0)).as_data_frame(False)\n arg_1.pop(0) # Remove column headers\n arg_1 = list(zip(*arg_1))\n return [[arg_2 for arg_2 in arg_3 if arg_2 != ''] for arg_3 in arg_1]"} +{"_id": "doc_1058", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Change names of columns in the frame.\n\n Dict key is an index or name of the column whose name is to be set.\n Dict value is the new name of the column.\n\n :param columns: dict-like transformations to apply to the column names\n \"\"\"\n assert_is_type(arg_1, None, dict)\n arg_2 = arg_0.names\n arg_3 = arg_0.ncols\n\n for arg_4, arg_5 in arg_1.items():\n arg_6 = None\n if is_type(arg_4, int) and (-arg_3 <= arg_4 < arg_3):\n arg_6 = (arg_4 + arg_3) % arg_3 # handle negative indices\n elif is_type(arg_4, str) and arg_4 in arg_0.names:\n arg_6 = arg_0.names.index(arg_4) # lookup the name\n\n if arg_6 is not None:\n arg_2[arg_6] = arg_5\n\n return arg_0.set_names(arg_2)"} +{"_id": "doc_1059", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Test whether elements of an H2OFrame are contained in the ``item``.\n\n :param items: An item or a list of items to compare the H2OFrame against.\n\n :returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item.\n \"\"\"\n if is_type(arg_1, list, tuple, set):\n if arg_0.ncols == 1 and (arg_0.type(0) == 'str' or arg_0.type(0) == 'enum'):\n return arg_0.match(arg_1)\n else:\n return functools.reduce(H2OFrame.__or__, (arg_0 == arg_2 for arg_2 in arg_1))\n else:\n return arg_0 == arg_1"} +{"_id": "doc_1060", "title": "", "text": "def Func(arg_0, arg_1=3):\n \"\"\"\n Build a fold assignments column for cross-validation.\n\n Rows are assigned a fold according to the current row number modulo ``n_folds``.\n\n :param int n_folds: An integer specifying the number of validation sets to split the training data into.\n :returns: A single-column H2OFrame with the fold assignments.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1))._frame()"} +{"_id": "doc_1061", "title": "", "text": "def Func(arg_0):\n \"\"\"Compactly display the internal Func of an H2OFrame.\"\"\"\n arg_1 = arg_0.as_data_frame(use_pandas=False)\n arg_2 = arg_1.pop(0)\n arg_3 = arg_0.nrow\n arg_4 = arg_0.ncol\n arg_5 = max([len(c) for c in arg_2])\n arg_6 = arg_0.isfactor()\n arg_7 = arg_0.nlevels()\n arg_8 = arg_0.levels()\n print(\"H2OFrame: '{}' \\nDimensions: {} obs. of {} variables\".format(arg_0.frame_id, arg_3, arg_4))\n for arg_9 in range(arg_4):\n print(\"$ {} {}: \".format(arg_2[arg_9], ' ' * (arg_5 - max(0, len(arg_2[arg_9])))), end=' ')\n if arg_6[arg_9]:\n arg_10 = arg_7[arg_9]\n print(\"Factor w/ {} level(s) {} \".format(arg_10, '\"' + '\",\"'.join(arg_8[arg_9]) + '\"'), end='\\n')\n else:\n print(\"num {}\".format(\" \".join(arg_11[0] if arg_11 else \"nan\" for arg_11 in h2o.as_list(arg_0[:10, arg_9], False)[1:])))"} +{"_id": "doc_1062", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"\n Obtain the dataset as a python-local object.\n\n :param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the\n ``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested\n list, in a row-wise order.\n :param bool header: If True (default), then column names will be appended as the first row in list\n\n :returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise\n a pandas DataFrame) containing this H2OFrame instance's data.\n \"\"\" \n if can_use_pandas() and arg_1:\n import pandas\n return pandas.read_csv(StringIO(arg_0.get_frame_data()), low_memory=False, skip_blank_lines=False)\n from h2o.utils.csv.readers import reader\n arg_3 = [row for row in reader(StringIO(arg_0.get_frame_data()))]\n if not arg_2:\n arg_3.pop(0)\n return arg_3"} +{"_id": "doc_1063", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=\"interpolate\", arg_3=None):\n \"\"\"\n Compute Funcs.\n\n :param List[float] prob: list of probabilities for which Funcs should be computed.\n :param str combine_method: for even samples this setting determines how to combine Funcs. This can be\n one of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal\n importance. This parameter can be either the name of column containing the observation weights in\n this frame, or a single-column separate H2OFrame of observation weights.\n\n :returns: a new H2OFrame containing the Funcs and probabilities.\n \"\"\"\n if len(arg_0) == 0: return arg_0\n if arg_1 is None: arg_1 = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99]\n if arg_3 is None:\n arg_3 = \"_\"\n else:\n assert_is_type(arg_3, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == arg_0.nrow))\n if isinstance(arg_3, H2OFrame):\n arg_4 = arg_0.cbind(arg_3)\n arg_3 = arg_4.names[-1]\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_4, arg_1, arg_2, arg_3))\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_2, arg_3))"} +{"_id": "doc_1064", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Append data to this frame column-wise.\n\n :param H2OFrame data: append columns of frame ``data`` to the current frame. You can also Func a number,\n in which case it will get converted into a constant column.\n\n :returns: new H2OFrame with all frames in ``data`` appended column-wise.\n \"\"\"\n assert_is_type(arg_1, H2OFrame, numeric, [H2OFrame, numeric])\n arg_2 = [arg_1] if not isinstance(arg_1, list) else arg_1\n arg_3 = list(arg_0.columns)\n arg_4 = dict(arg_0.types)\n for arg_5 in arg_2:\n if isinstance(arg_5, H2OFrame):\n if arg_5.nrow != arg_0.nrow:\n raise H2OValueError(\"Cannot bind a dataframe with %d rows to a data frame with %d rows: \"\n \"the number of rows should match\" % (arg_5.nrow, arg_0.nrow))\n arg_3 += arg_5.columns\n arg_4.update(arg_5.types)\n else:\n arg_3 += [None]\n arg_6 = set(arg_3)\n arg_7 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, *arg_2), cache=arg_0._ex._cache)\n arg_7._ex._cache.ncols = len(arg_3)\n if len(arg_3) == len(arg_6) and None not in arg_6:\n arg_7._ex._cache.names = arg_3\n arg_7._ex._cache.types = arg_4\n else:\n # Invalidate names and types since they contain duplicate / unknown names, and the server will choose those.\n arg_7._ex._cache.names = None\n arg_7._ex._cache.types = None\n return arg_7"} +{"_id": "doc_1065", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Split a frame into distinct subsets of size determined by the given ratios.\n\n The number of subsets is always 1 more than the number of ratios given. Note that\n this does not give an exact split. H2O is designed to be efficient on big data\n using a probabilistic splitting method rather than an exact split. For example\n when specifying a split of 0.75/0.25, H2O will produce a test/train split with\n an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets,\n the sizes of the resulting splits will deviate from the expected value more than\n on big data, where they will be very close to exact.\n\n :param List[float] ratios: The fractions of rows for each split.\n :param List[str] destination_frames: The names of the split frames.\n :param int seed: seed for the random number generator\n\n :returns: A list of H2OFrames\n \"\"\"\n assert_is_type(arg_1, [numeric], None)\n assert_is_type(arg_2, [str], None)\n assert_is_type(arg_3, int, None)\n\n if arg_1 is None:\n arg_1 = [0.75]\n if not arg_1:\n raise ValueError(\"Ratios array may not be empty\")\n\n if arg_2 is not None:\n if len(arg_1) + 1 != len(arg_2):\n raise ValueError(\"The number of provided destination_frames must be one more \"\n \"than the number of provided ratios\")\n\n arg_4 = len(arg_1) + 1\n arg_5 = []\n\n arg_6 = 0\n arg_7 = 0\n while arg_7 < arg_4 - 1:\n arg_8 = arg_1[arg_7]\n if arg_8 < 0:\n raise ValueError(\"Ratio must be greater than 0\")\n arg_9 = arg_6 + arg_8\n if arg_9 >= 1.0:\n raise ValueError(\"Ratios must add up to less than 1.0\")\n arg_5.append(arg_9)\n arg_6 = arg_9\n arg_7 += 1\n\n arg_10 = []\n arg_11 = arg_0.runif(arg_3)\n arg_11.frame_id = \"%s_splitter\" % _py_tmp_key(h2o.connection().session_id)\n\n arg_7 = 0\n while arg_7 < arg_4:\n if arg_7 == 0:\n # lower_boundary is 0.0\n arg_13 = arg_5[arg_7]\n arg_14 = arg_0[(arg_11 <= arg_13), :]\n elif arg_7 == arg_4 - 1:\n arg_15 = arg_5[arg_7 - 1]\n # upper_boundary is 1.0\n arg_14 = arg_0[(arg_11 > arg_15), :]\n else:\n arg_15 = arg_5[arg_7 - 1]\n arg_13 = arg_5[arg_7]\n arg_14 = arg_0[((arg_11 > arg_15) & (arg_11 <= arg_13)), :]\n\n if arg_2 is None:\n arg_10.append(arg_14)\n else:\n arg_16 = arg_2[arg_7]\n arg_14.frame_id = arg_16\n arg_10.append(arg_14)\n\n arg_7 += 1\n\n del arg_11\n return arg_10"} +{"_id": "doc_1066", "title": "", "text": "def Func(arg_0,arg_1=\"forward\",arg_2=0,arg_3=1):\n \"\"\"\n Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length\n\n :param method: ``\"forward\"`` or ``\"backward\"``\n :param axis: 0 for columnar-wise or 1 for row-wise fill\n :param maxlen: Max number of consecutive NA's to fill\n \n :return: \n \"\"\"\n assert_is_type(arg_2, 0, 1)\n assert_is_type(arg_1,str)\n assert_is_type(arg_3, int)\n return H2OFrame._expr(expr=ExprNode(\"h2o.Func\",arg_0,arg_1,arg_2,arg_3))"} +{"_id": "doc_1067", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2=\"mean\", arg_3=\"interpolate\", arg_4=None, arg_5=None, arg_6=None):\n \"\"\"\n Impute missing values into the frame, modifying it in-place.\n\n :param int column: Index of the column to Func, or -1 to Func the entire frame.\n :param str method: The method of imputation: ``\"mean\"``, ``\"median\"``, or ``\"mode\"``.\n :param str combine_method: When the method is ``\"median\"``, this setting dictates how to combine quantiles\n for even samples. One of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param by: The list of columns to group on.\n :param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame.\n :param List values: The list of Func values, one per column. None indicates to skip the column.\n\n :returns: A list of values used in the imputation or the group-by result used in imputation.\n \"\"\"\n if is_type(arg_1, str): arg_1 = arg_0.names.index(arg_1)\n if is_type(arg_4, str): arg_4 = arg_0.names.index(arg_4)\n\n if arg_6 is None:\n arg_6 = \"_\"\n else:\n assert len(arg_6) == len(arg_0.columns), \"Length of values does not match length of columns\"\n # convert string values to categorical num values\n arg_7 = []\n for arg_8 in range(0,len(arg_6)):\n if arg_0.type(arg_8) == \"enum\":\n try:\n arg_7.append(arg_0.levels()[arg_8].index(arg_6[arg_8]))\n except:\n raise H2OValueError(\"Impute value of: \" + arg_6[arg_8] + \" not found in existing levels of\"\n \" column: \" + arg_0.col_names[arg_8])\n else:\n arg_7.append(arg_6[arg_8])\n arg_6 = arg_7\n if arg_5 is None: arg_5 = \"_\"\n\n\n # This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill()\n # fails with an assertion that ._id is None.\n # This code should be removed / reworked once we have a more consistent strategy of dealing with frames.\n arg_0._ex._eager_frame()\n\n if arg_4 is not None or arg_5 is not \"_\":\n arg_9 = H2OFrame._expr(\n expr=ExprNode(\"h2o.Func\", arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6))._frame()\n else:\n arg_9 = ExprNode(\"h2o.Func\", arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6)._eager_scalar()\n\n arg_0._ex._cache.flush()\n arg_0._ex._cache.fill(10)\n return arg_9"} +{"_id": "doc_1068", "title": "", "text": "def Func(arg_0, arg_1=0.1, arg_2=None):\n \"\"\"\n Insert missing values into the current frame, modifying it in-place.\n\n Randomly replaces a user-specified fraction of entries in a H2O dataset with missing\n values.\n\n :param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.\n :param int seed: The seed for the random number generator used to determine which values to make missing.\n\n :returns: the original H2OFrame with missing values inserted.\n \"\"\"\n arg_3 = {}\n arg_3['dataset'] = arg_0.frame_id # Eager; forces eval now for following REST call\n arg_3['fraction'] = arg_1\n if arg_2 is not None: arg_3['seed'] = arg_2\n arg_4 = {}\n arg_4['job'] = h2o.api(\"POST /3/MissingInserter\", data=arg_3)\n H2OJob(arg_4, job_type=(\"Insert Missing Values\")).poll()\n arg_0._ex._cache.flush()\n return arg_0"} +{"_id": "doc_1069", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"\n Compute the Funciance-coFunciance matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is given, then a coFunciance matrix between the columns of the target\n frame and the columns of ``y`` is computed. If this parameter is not provided then the coFunciance matrix\n of the target frame is returned. If target frame has just a single column, then return the scalar Funciance\n instead of the matrix. Single rows are treated as single columns.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n\n :returns: An H2OFrame of the coFunciance matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the Funciance is returned as a scalar.\n \"\"\"\n arg_4 = False\n if arg_1 is None:\n arg_1 = arg_0\n arg_4 = True\n if arg_3 is None: arg_3 = \"complete.obs\" if arg_2 else \"everything\"\n if arg_0.nrow == 1 or (arg_0.ncol == 1 and arg_1.ncol == 1):\n return ExprNode(\"Func\", arg_0, arg_1, arg_3, arg_4)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_3, arg_4))._frame()"} +{"_id": "doc_1070", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"\n Compute the Funcrelation matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is provided, then compute Funcrelation between the columns of ``y``\n and the columns of the current frame. If this parameter is not given, then just compute the Funcrelation\n matrix for the columns of the current frame.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n\n :returns: An H2OFrame of the Funcrelation matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the Funcrelation is returned as a scalar.\n \"\"\"\n assert_is_type(arg_1, H2OFrame, None)\n assert_is_type(arg_2, bool)\n assert_is_type(arg_3, None, \"everything\", \"all.obs\", \"complete.obs\")\n if arg_1 is None:\n arg_1 = arg_0\n if arg_3 is None: arg_3 = \"complete.obs\" if arg_2 else \"everything\"\n if arg_0.nrow == 1 or (arg_0.ncol == 1 and arg_1.ncol == 1): return ExprNode(\"Func\", arg_0, arg_1, arg_3)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_3))._frame()"} +{"_id": "doc_1071", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert columns in the current frame to categoricals.\n\n :returns: new H2OFrame with columns of the \"enum\" type.\n \"\"\"\n for arg_1 in arg_0.names:\n arg_2 = arg_0.types[arg_1]\n if arg_2 not in {\"bool\", \"int\", \"string\", \"enum\"}:\n raise H2OValueError(\"Only 'int' or 'string' are allowed for \"\n \"Func(), got %s:%s \" % (arg_1, arg_2))\n arg_3 = H2OFrame._expr(expr=ExprNode(\"as.factor\", arg_0), cache=arg_0._ex._cache)\n if arg_3._ex._cache.types_valid():\n arg_3._ex._cache.types = {name: \"enum\" for name in arg_0.types}\n else:\n raise H2OTypeError(\"Types are not available in result\")\n \n return arg_3"} +{"_id": "doc_1072", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Split the strings in the target column on the given regular expression pattern.\n\n :param str pattern: The split pattern.\n :returns: H2OFrame containing columns of the split strings.\n \"\"\"\n arg_2 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1))\n arg_2._ex._cache.nrows = arg_0.nrow\n return arg_2"} +{"_id": "doc_1073", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n For each string in the frame, count the occurrences of the provided pattern. If countmathces is applied to\n a frame, all columns of the frame must be type string, otherwise, the returned frame will contain errors.\n\n The pattern here is a plain string, not a regular expression. We will search for the occurrences of the\n pattern as a substring in element of the frame. This function is applicable to frames containing only\n string or categorical columns.\n\n :param str pattern: The pattern to count matches on in each string. This can also be a list of strings,\n in which case all of them will be searched for.\n :returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the\n pattern for each cell in the original frame.\n \"\"\"\n assert_is_type(arg_1, str, [str])\n arg_2 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1))\n arg_2._ex._cache.nrows = arg_0.nrow\n arg_2._ex._cache.ncols = arg_0.ncol\n return arg_2"} +{"_id": "doc_1074", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n For each string, return a new string that is a Func of the original string.\n\n If end_index is not specified, then the Func extends to the end of the original string. If the start_index\n is longer than the length of the string, or is greater than or equal to the end_index, an empty string is\n returned. Negative start_index is coerced to 0.\n\n :param int start_index: The index of the original string at which to start the Func, inclusive.\n :param int end_index: The index of the original string at which to end the Func, exclusive.\n :returns: An H2OFrame containing the specified Funcs.\n \"\"\"\n arg_3 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_2))\n arg_3._ex._cache.nrows = arg_0.nrow\n arg_3._ex._cache.ncol = arg_0.ncol\n return arg_3"} +{"_id": "doc_1075", "title": "", "text": "def Func(arg_0, arg_1=\" \"):\n \"\"\"\n Return a copy of the column with leading characters removed.\n\n The set argument is a string specifying the set of characters to be removed.\n If omitted, the set argument defaults to removing whitespace.\n\n :param character set: The set of characters to Func from strings in column.\n :returns: a new H2OFrame with the same shape as the original frame and having all its values\n trimmed from the left (equivalent of Python's ``str.Func()``).\n \"\"\"\n # work w/ None; parity with python Func\n if arg_1 is None: arg_1 = \" \"\n\n arg_2 = H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1))\n arg_2._ex._cache.nrows = arg_0.nrow\n arg_2._ex._cache.ncol = arg_0.ncol\n return arg_2"} +{"_id": "doc_1076", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True):\n \"\"\"\n Compute the counts of values appearing in a column, or co-occurence counts between two columns.\n\n :param H2OFrame data2: An optional single column to aggregate counts by.\n :param bool dense: If True (default) then use dense representation, which lists only non-zero counts,\n 1 combination per row. Set to False to expand counts across all combinations.\n\n :returns: H2OFrame of the counts at each combination of factor levels\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_2)) if arg_1 is not None else H2OFrame._expr(\n expr=ExprNode(\"Func\", arg_0, arg_2))"} +{"_id": "doc_1077", "title": "", "text": "def Func(arg_0, arg_1=\"sturges\", arg_2=True, **arg_3):\n \"\"\"\n Compute a histogram over a numeric column.\n\n :param breaks: Can be one of ``\"sturges\"``, ``\"rice\"``, ``\"sqrt\"``, ``\"doane\"``, ``\"fd\"``, ``\"scott\"``;\n or a single number for the number of breaks; or a list containing the split points, e.g:\n ``[-50, 213.2123, 9324834]``. If breaks is \"fd\", the MAD is used over the IQR in computing bin width.\n :param bool plot: If True (default), then a plot will be generated using ``matplotlib``.\n\n :returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,\n mids, and density; otherwise this method draws a plot and returns nothing.\n \"\"\"\n arg_4 = arg_3.pop(\"server\") if \"server\" in arg_3 else False\n assert_is_type(arg_1, int, [numeric], Enum(\"sturges\", \"rice\", \"sqrt\", \"doane\", \"fd\", \"scott\"))\n assert_is_type(arg_2, bool)\n assert_is_type(arg_4, bool)\n if arg_3:\n raise H2OValueError(\"Unknown parameters to hist(): %r\" % arg_3)\n Func = H2OFrame._expr(expr=ExprNode(\"hist\", arg_0, arg_1))._frame()\n\n if arg_2:\n try:\n import matplotlib\n if arg_4:\n matplotlib.use(\"Agg\", warn=False)\n import matplotlib.pyplot as plt\n except ImportError:\n print(\"ERROR: matplotlib is required to make the histogram plot. \"\n \"Set `plot` to False, if a plot is not desired.\")\n return\n\n Func[\"widths\"] = Func[\"breaks\"].difflag1()\n # [2:] because we're removing the title and the first row (which consists of NaNs)\n arg_6 = [float(c[0]) for c in h2o.as_list(Func[\"breaks\"], use_pandas=False)[2:]]\n arg_7 = [float(c[0]) for c in h2o.as_list(Func[\"widths\"], use_pandas=False)[2:]]\n arg_8 = [float(c[0]) for c in h2o.as_list(Func[\"counts\"], use_pandas=False)[2:]]\n\n plt.xlabel(arg_0.names[0])\n plt.ylabel(\"Frequency\")\n plt.title(\"Histogram of %s\" % arg_0.names[0])\n plt.bar(left=arg_6, width=arg_7, height=arg_8, bottom=0)\n if not arg_4:\n plt.show()\n else:\n Func[\"density\"] = Func[\"counts\"] / (Func[\"breaks\"].difflag1() * Func[\"counts\"].sum())\n return Func"} +{"_id": "doc_1078", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Substitute the first occurrence of pattern in a string with replacement.\n\n :param str pattern: A regular expression.\n :param str replacement: A replacement string.\n :param bool ignore_case: If True then pattern will match case-insensitively.\n :returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"replacefirst\", arg_0, arg_1, arg_2, arg_3))"} +{"_id": "doc_1079", "title": "", "text": "def Func(arg_0,arg_1, arg_2 = False, arg_3 = False, arg_4 = False):\n \"\"\"\n Searches for matches to argument `pattern` within each element\n of a string column.\n\n Default behavior is to return indices of the elements matching the pattern. Parameter\n `output_logical` can be used to return a logical vector indicating if the element matches\n the pattern (1) or not (0).\n\n :param str pattern: A character string containing a regular expression.\n :param bool ignore_case: If True, then case is ignored during matching.\n :param bool invert: If True, then identify elements that do not match the pattern.\n :param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions\n :return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"Func\", arg_0, arg_1, arg_2, arg_3, arg_4))"} +{"_id": "doc_1080", "title": "", "text": "def Func(arg_0, arg_1=0.2, arg_2=-1):\n \"\"\"\n Construct a column that can be used to perform a random stratified split.\n\n :param float test_frac: The fraction of rows that will belong to the \"test\".\n :param int seed: The seed for the random number generator.\n\n :returns: an H2OFrame having single categorical column with two levels: ``\"train\"`` and ``\"test\"``.\n\n :examples:\n >>> stratsplit = df[\"y\"].Func(test_frac=0.3, seed=12349453)\n >>> train = df[stratsplit==\"train\"]\n >>> test = df[stratsplit==\"test\"]\n >>>\n >>> # check that the distributions among the initial frame, and the\n >>> # train/test frames match\n >>> df[\"y\"].table()[\"Count\"] / df[\"y\"].table()[\"Count\"].sum()\n >>> train[\"y\"].table()[\"Count\"] / train[\"y\"].table()[\"Count\"].sum()\n >>> test[\"y\"].table()[\"Count\"] / test[\"y\"].table()[\"Count\"].sum()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode('h2o.random_Func', arg_0, arg_1, arg_2))"} +{"_id": "doc_1081", "title": "", "text": "def Func(arg_0,arg_1=True, arg_2=0):\n \"\"\"\n Get the index of the max value in a column or row\n\n :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched\n rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.\n :returns: either a list of max index values per-column or an H2OFrame containing max index values\n per-row from the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which.max\", arg_0, arg_1, arg_2))"} +{"_id": "doc_1082", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the provided file, and return Code object.\"\"\"\n assert isinstance(arg_0, _str_type), \"`filename` parameter should be a string, got %r\" % type(arg_0)\n with open(arg_0, \"rt\", encoding=\"utf-8\") as f:\n return Code(_tokenize(f.readline))"} +{"_id": "doc_1083", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Move the token by `drow` rows and `dcol` columns.\"\"\"\n arg_0._start_row += arg_1\n arg_0._start_col += arg_2\n arg_0._end_row += arg_1\n arg_0._end_col += arg_2"} +{"_id": "doc_1084", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert the parsed representation back into the source code.\"\"\"\n arg_1 = Untokenizer(start_row=arg_0._tokens[0].start_row)\n arg_0._Func(arg_1)\n return arg_1.result()"} +{"_id": "doc_1085", "title": "", "text": "def Func(arg_0):\n \"\"\"The standardized centers for the kmeans model.\"\"\"\n arg_1 = arg_0._model_json[\"output\"]\n arg_2 = arg_1[\"centers_std\"].cell_values\n Func = [list(cval[1:]) for cval in arg_2]\n Func = [list(x) for x in zip(*Func)]\n return Func"} +{"_id": "doc_1086", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=None, arg_6=None,\n arg_7=None, arg_8=None, arg_9=True, arg_10=None):\n \"\"\"\n Connect to an existing H2O server, remote or local.\n\n There are two ways to Func to a server: either pass a `server` parameter containing an instance of\n an H2OLocalServer, or specify `ip` and `port` of the server that you want to Func to.\n\n :param server: An H2OLocalServer instance to Func to (optional).\n :param url: Full URL of the server to Func to (can be used instead of `ip` + `port` + `https`).\n :param ip: The ip address (or host name) of the server where H2O is running.\n :param port: Port number that H2O service is listening to.\n :param https: Set to True to Func via https:// instead of http://.\n :param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.\n :param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth\n or one of the requests.auth authenticator objects.\n :param proxy: Proxy server address.\n :param cookies: Cookie (or list of) to add to request\n :param verbose: Set to False to disable printing Funcion status messages.\n :param Funcion_conf: Connection configuration object encapsulating Funcion parameters.\n :returns: the new :class:`H2OConnection` object.\n \"\"\"\n global arg_11\n if arg_10:\n if \"Func_params\" in arg_10:\n arg_11 = _Func_with_conf(arg_10[\"Func_params\"])\n else:\n arg_11 = _Func_with_conf(arg_10)\n else:\n arg_11 = H2OConnection.open(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_6=arg_6, arg_5=arg_5,\n arg_7=arg_7, arg_8=arg_8,\n arg_9=arg_9)\n if arg_9:\n arg_11.cluster.show_status()\n return arg_11"} +{"_id": "doc_1087", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Perform a REST API request to a previously connected server.\n\n This function is mostly for internal purposes, but may occasionally be useful for direct access to\n the backend H2O server. It has same parameters as :meth:`H2OConnection.request `.\n \"\"\"\n # type checks are performed in H2OConnection class\n _check_connection()\n return h2oconn.request(arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_1088", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0, arg_3=None, arg_4=None, arg_5=None,\n arg_6=None, arg_7=None):\n \"\"\"\n Upload a dataset from the provided local path to the H2O cluster.\n\n Does a single-threaded push to H2O. Also see :meth:`import_file`.\n\n :param path: A path specifying the location of the data to upload.\n :param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will\n be automatically generated.\n :param header: -1 means the first line is data, 0 means guess, 1 means first line is header.\n :param sep: The field separator character. Values on each line of the file are separated by\n this character. If not provided, the parser will automatically detect the separator.\n :param col_names: A list of column names for the file.\n :param col_types: A list of types or a dictionary of column names to types to specify whether columns\n should be forced to a certain type upon import parsing. If a list, the types for elements that are\n one will be guessed. The possible types a column may have are:\n\n - \"unknown\" - this will force the column to be parsed as all NA\n - \"uuid\" - the values in the column must be true UUID or will be parsed as NA\n - \"string\" - force the column to be parsed as a string\n - \"numeric\" - force the column to be parsed as numeric. H2O will handle the compression of the numeric\n data in the optimal manner.\n - \"enum\" - force the column to be parsed as a categorical column.\n - \"time\" - force the column to be parsed as a time column. H2O will attempt to parse the following\n list of date time formats: (date) \"yyyy-MM-dd\", \"yyyy MM dd\", \"dd-MMM-yy\", \"dd MMM yy\", (time)\n \"HH:mm:ss\", \"HH:mm:ss:SSS\", \"HH:mm:ss:SSSnnnnnn\", \"HH.mm.ss\" \"HH.mm.ss.SSS\", \"HH.mm.ss.SSSnnnnnn\".\n Times can also contain \"AM\" or \"PM\".\n :param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary\n of column names to strings which are to be interpreted as missing values.\n :param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.\n\n :returns: a new :class:`H2OFrame` instance.\n\n :examples:\n >>> frame = h2o.Func(\"/path/to/local/data\")\n \"\"\"\n arg_8 = U(None, \"unknown\", \"uuid\", \"string\", \"float\", \"real\", \"double\", \"int\", \"numeric\",\n \"categorical\", \"factor\", \"enum\", \"time\")\n arg_9 = U(str, [str])\n assert_is_type(arg_0, str)\n assert_is_type(arg_1, str, None)\n assert_is_type(arg_2, -1, 0, 1)\n assert_is_type(arg_3, None, I(str, lambda s: len(s) == 1))\n assert_is_type(arg_4, [str], None)\n assert_is_type(arg_5, [arg_8], {str: arg_8}, None)\n assert_is_type(arg_6, [arg_9], {str: arg_9}, None)\n assert (arg_7==None) or isinstance(arg_7, list), \\\n \"The skipped_columns should be an list of column names!\"\n\n check_frame_id(arg_1)\n if arg_0.startswith(\"~\"):\n arg_0 = os.path.expanduser(arg_0)\n return H2OFrame()._upload_parse(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7)"} +{"_id": "doc_1089", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=True, arg_3=0, arg_4=None, arg_5=None, arg_6=None,\n arg_7=None, arg_8=None, arg_9=None, arg_10 = None):\n \"\"\"\n Import a dataset that is already on the cluster.\n\n The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster\n cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed\n multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that\n the latter works with local files, whereas this method imports remote files (i.e. files local to the server).\n If you running H2O server on your own maching, then both methods behave the same.\n\n :param path: path(s) specifying the location of the data to import or a path to a directory of files to import\n :param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be\n automatically generated.\n :param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.\n :param header: -1 means the first line is data, 0 means guess, 1 means first line is header.\n :param sep: The field separator character. Values on each line of the file are separated by\n this character. If not provided, the parser will automatically detect the separator.\n :param col_names: A list of column names for the file.\n :param col_types: A list of types or a dictionary of column names to types to specify whether columns\n should be forced to a certain type upon import parsing. If a list, the types for elements that are\n one will be guessed. The possible types a column may have are:\n\n - \"unknown\" - this will force the column to be parsed as all NA\n - \"uuid\" - the values in the column must be true UUID or will be parsed as NA\n - \"string\" - force the column to be parsed as a string\n - \"numeric\" - force the column to be parsed as numeric. H2O will handle the compression of the numeric\n data in the optimal manner.\n - \"enum\" - force the column to be parsed as a categorical column.\n - \"time\" - force the column to be parsed as a time column. H2O will attempt to parse the following\n list of date time formats: (date) \"yyyy-MM-dd\", \"yyyy MM dd\", \"dd-MMM-yy\", \"dd MMM yy\", (time)\n \"HH:mm:ss\", \"HH:mm:ss:SSS\", \"HH:mm:ss:SSSnnnnnn\", \"HH.mm.ss\" \"HH.mm.ss.SSS\", \"HH.mm.ss.SSSnnnnnn\".\n Times can also contain \"AM\" or \"PM\".\n :param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary\n of column names to strings which are to be interpreted as missing values.\n :param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a\n directory.\n :param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.\n :param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used\n\n :returns: a new :class:`H2OFrame` instance.\n\n :examples:\n >>> # Single file import\n >>> iris = Func(\"h2o-3/smalldata/iris.csv\")\n >>> # Return all files in the folder iris/ matching the regex r\"iris_.*\\.csv\"\n >>> iris_pattern = h2o.Func(path = \"h2o-3/smalldata/iris\",\n ... pattern = \"iris_.*\\.csv\")\n \"\"\"\n arg_11 = U(None, \"unknown\", \"uuid\", \"string\", \"float\", \"real\", \"double\", \"int\", \"numeric\",\n \"categorical\", \"factor\", \"enum\", \"time\")\n arg_12 = U(str, [str])\n assert_is_type(arg_0, str, [str])\n assert_is_type(arg_8, str, None)\n assert_is_type(arg_1, str, None)\n assert_is_type(arg_2, bool)\n assert_is_type(arg_3, -1, 0, 1)\n assert_is_type(arg_4, None, I(str, lambda s: len(s) == 1))\n assert_is_type(arg_5, [str], None)\n assert_is_type(arg_6, [arg_11], {str: arg_11}, None)\n assert_is_type(arg_7, [arg_12], {str: arg_12}, None)\n assert isinstance(arg_9, (type(None), list)), \"The skipped_columns should be an list of column names!\"\n check_frame_id(arg_1)\n arg_13 = arg_0 if isinstance(arg_0, list) else [arg_0]\n if any(os.path.split(arg_14)[0] == \"~\" for arg_14 in arg_13):\n raise H2OValueError(\"Paths relative to a current user (~) are not valid in the server environment. \"\n \"Please use absolute paths if possible.\")\n if not arg_2:\n return lazy_import(arg_0, arg_8)\n else:\n return H2OFrame()._import_parse(arg_0, arg_8, arg_1, arg_3, arg_4, arg_5, arg_6, arg_7,\n arg_9, arg_10)"} +{"_id": "doc_1090", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=False):\n \"\"\"\n Import Hive table to H2OFrame in memory.\n\n Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.\n\n :param database: Name of Hive database (default database will be used by default)\n :param table: name of Hive table to import\n :param partitions: a list of lists of strings - partition key column values of partitions you want to import.\n :param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:\n this may fail on out-of-memory for tables with a large number of small partitions.\n\n :returns: an :class:`H2OFrame` containing data of the specified Hive table.\n\n :examples:\n >>> my_citibike_data = h2o.Func(\"default\", \"table\", [[\"2017\", \"01\"], [\"2017\", \"02\"]])\n \"\"\" \n assert_is_type(arg_0, str, None)\n assert_is_type(arg_1, str)\n assert_is_type(arg_2, [[str]], None)\n arg_4 = { \"database\": arg_0, \"table\": arg_1, \"partitions\": arg_2, \"allow_multi_format\": arg_3 }\n arg_5 = H2OJob(api(\"POST /3/ImportHiveTable\", data=arg_4), \"Import Hive Table\").poll()\n return get_frame(arg_5.dest_key)"} +{"_id": "doc_1091", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=True, \n arg_5=None, arg_6=None, arg_7=None):\n \"\"\"\n Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.\n\n Creates a temporary SQL table from the specified sql_query.\n Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.\n Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::\n\n java -cp : water.H2OApp\n\n Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle \n and Microsoft SQL Server.\n\n :param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)\n Driver. For example, \"jdbc:mysql://localhost:3306/menagerie?&useSSL=false\"\n :param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.\n :param username: username for SQL server\n :param password: password for SQL server\n :param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.\n :param use_temp_table: whether a temporary table should be created from select_query\n :param temp_table_name: name of temporary table to be created from select_query\n :param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node\n from the database.\n\n :returns: an :class:`H2OFrame` containing data of the specified SQL query.\n\n :examples:\n >>> conn_url = \"jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false\"\n >>> select_query = \"SELECT bikeid from citibike20k\"\n >>> username = \"root\"\n >>> password = \"abc123\"\n >>> my_citibike_data = h2o.Func(conn_url, select_query,\n ... username, password, fetch_mode)\n \"\"\"\n assert_is_type(arg_0, str)\n assert_is_type(arg_1, str)\n assert_is_type(arg_2, str)\n assert_is_type(arg_3, str)\n assert_is_type(arg_4, bool)\n assert_is_type(arg_5, bool, None)\n assert_is_type(arg_6, str, None)\n assert_is_type(arg_7, str, None)\n arg_8 = {\"connection_url\": arg_0, \"select_query\": arg_1, \"username\": arg_2, \"password\": arg_3,\n \"use_temp_table\": arg_5, \"temp_table_name\": arg_6, \"fetch_mode\": arg_7}\n arg_9 = H2OJob(api(\"POST /99/ImportSQLTable\", data=arg_8), \"Import SQL Table\").poll()\n return get_frame(arg_9.dest_key)"} +{"_id": "doc_1092", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0):\n \"\"\"\n Parse dataset using the parse setup structure.\n\n :param setup: Result of ``h2o.parse_setup()``\n :param id: an id for the frame.\n :param first_line_is_header: -1, 0, 1 if the first line is to be used as the header\n\n :returns: an :class:`H2OFrame` object.\n \"\"\"\n assert_is_type(arg_0, dict)\n assert_is_type(arg_1, str, None)\n assert_is_type(arg_2, -1, 0, 1)\n check_frame_id(arg_1)\n if arg_1:\n arg_0[\"destination_frame\"] = arg_1\n if arg_2 != (-1, 0, 1):\n if arg_2 not in (-1, 0, 1): raise ValueError(\"first_line_is_header should be -1, 0, or 1\")\n arg_0[\"check_header\"] = arg_2\n arg_3 = H2OFrame()\n arg_3._Func(arg_0)\n return arg_3"} +{"_id": "doc_1093", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load a model from the server.\n\n :param model_id: The model identification in H2O\n\n :returns: Model object, a subclass of H2OEstimator\n \"\"\"\n assert_is_type(arg_0, str)\n arg_1 = api(\"GET /3/Models/%s\" % arg_0)[\"models\"][0]\n arg_2 = arg_1[\"algo\"]\n if arg_2 == \"svd\": arg_3 = H2OSVD()\n elif arg_2 == \"pca\": arg_3 = H2OPrincipalComponentAnalysisEstimator()\n elif arg_2 == \"drf\": arg_3 = H2ORandomForestEstimator()\n elif arg_2 == \"naivebayes\": arg_3 = H2ONaiveBayesEstimator()\n elif arg_2 == \"kmeans\": arg_3 = H2OKMeansEstimator()\n elif arg_2 == \"glrm\": arg_3 = H2OGeneralizedLowRankEstimator()\n elif arg_2 == \"glm\": arg_3 = H2OGeneralizedLinearEstimator()\n elif arg_2 == \"gbm\": arg_3 = H2OGradientBoostingEstimator()\n elif arg_2 == \"deepwater\": arg_3 = H2ODeepWaterEstimator()\n elif arg_2 == \"xgboost\": arg_3 = H2OXGBoostEstimator()\n elif arg_2 == \"word2vec\": arg_3 = H2OWord2vecEstimator()\n elif arg_2 == \"generic\": arg_3 = H2OGenericEstimator()\n elif arg_2 == \"deeplearning\":\n if arg_1[\"output\"][\"model_category\"] == \"AutoEncoder\":\n arg_3 = H2OAutoEncoderEstimator()\n else:\n arg_3 = H2ODeepLearningEstimator()\n elif arg_2 == \"stackedensemble\": arg_3 = H2OStackedEnsembleEstimator()\n elif arg_2 == \"isolationforest\": arg_3 = H2OIsolationForestEstimator()\n else:\n raise ValueError(\"Unknown algo type: \" + arg_2)\n arg_3._resolve_model(arg_0, arg_1)\n return arg_3"} +{"_id": "doc_1094", "title": "", "text": "def Func(arg_0, arg_1=\"\", arg_2=True, arg_3=\"\"):\n \"\"\"\n Download the POJO for this model to the directory specified by path; if path is \"\", then dump to screen.\n\n :param model: the model whose scoring POJO should be retrieved.\n :param path: an absolute path to the directory where POJO should be saved.\n :param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).\n :param jar_name: Custom name of genmodel jar.\n :returns: location of the downloaded POJO file.\n \"\"\"\n assert_is_type(arg_0, ModelBase)\n assert_is_type(arg_1, str)\n assert_is_type(arg_2, bool)\n\n if not arg_0.have_pojo:\n raise H2OValueError(\"Export to POJO not supported\")\n\n if arg_1 == \"\":\n arg_4 = api(\"GET /3/Models.java/%s\" % arg_0.model_id)\n print(arg_4)\n return None\n else:\n arg_5 = api(\"GET /3/Models.java/%s\" % arg_0.model_id, save_to=arg_1)\n if arg_2:\n if arg_3 == \"\":\n api(\"GET /3/h2o-genmodel.jar\", save_to=os.path.join(arg_1, \"h2o-genmodel.jar\"))\n else:\n api(\"GET /3/h2o-genmodel.jar\", save_to=os.path.join(arg_1, arg_3))\n return arg_5"} +{"_id": "doc_1095", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Download an H2O data set to a CSV file on the local disk.\n\n Warning: Files located on the H2O server may be very large! Make sure you have enough\n hard drive space to accommodate the entire file.\n\n :param data: an H2OFrame object to be downloaded.\n :param filename: name for the CSV file where the data should be saved to.\n \"\"\"\n assert_is_type(arg_0, H2OFrame)\n assert_is_type(arg_1, str)\n arg_2 = h2oconn.make_url(\"DownloadDataset\", 3) + \"?frame_id={}&hex_string=false\".format(arg_0.frame_id)\n with open(arg_1, \"wb\") as f:\n f.write(urlopen()(arg_2).read())"} +{"_id": "doc_1096", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=1):\n \"\"\"\n Export a given H2OFrame to a path on the machine this python session is currently connected to.\n\n :param frame: the Frame to save to disk.\n :param path: the path to the save point on disk.\n :param force: if True, overwrite any preexisting file with the same path\n :param parts: enables export to multiple 'part' files instead of just a single file.\n Convenient for large datasets that take too long to store in a single file.\n Use parts=-1 to instruct H2O to determine the optimal number of part files or\n specify your desired maximum number of part files. Path needs to be a directory\n when exporting to multiple files, also that directory must be empty.\n Default is ``parts = 1``, which is to export to a single file.\n \"\"\"\n assert_is_type(arg_0, H2OFrame)\n assert_is_type(arg_1, str)\n assert_is_type(arg_2, bool)\n assert_is_type(arg_3, int)\n H2OJob(api(\"POST /3/Frames/%s/export\" % (arg_0.frame_id), data={\"path\": arg_1, \"num_parts\": arg_3, \"force\": arg_2}),\n \"Export File\").poll()"} +{"_id": "doc_1097", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"\n Convert an H2O data object into a python-specific object.\n\n WARNING! This will pull all data local!\n\n If Pandas is available (and use_pandas is True), then pandas will be used to parse the\n data frame. Otherwise, a list-of-lists populated by character data will be returned (so\n the types of data will all be str).\n\n :param data: an H2O data object.\n :param use_pandas: If True, try to use pandas for reading in the data.\n :param header: If True, return column names as first element in list\n\n :returns: List of lists (Rows x Columns).\n \"\"\"\n assert_is_type(arg_0, H2OFrame)\n assert_is_type(arg_1, bool)\n assert_is_type(arg_2, bool)\n return H2OFrame.as_data_frame(arg_0, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_1098", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True, arg_3=False):\n \"\"\"\n H2O built-in Func facility.\n\n :param funcname: A string that identifies the h2o python function to Funcnstrate.\n :param interactive: If True, the user will be prompted to continue the Funcnstration after every segment.\n :param echo: If True, the python commands that are executed will be displayed.\n :param test: If True, `h2o.init()` will not be called (used for pyunit testing).\n\n :example:\n >>> import h2o\n >>> h2o.Func(\"gbm\")\n \"\"\"\n import h2o.Funcs as h2oFunc\n assert_is_type(arg_0, str)\n assert_is_type(arg_1, bool)\n assert_is_type(arg_2, bool)\n assert_is_type(arg_3, bool)\n\n arg_4 = getattr(h2oFunc, arg_0, None)\n if arg_4 and type(arg_4) is type(Func):\n arg_4(arg_1, arg_2, arg_3)\n else:\n print(\"Demo for %s is not available.\" % arg_0)"} +{"_id": "doc_1099", "title": "", "text": "def Func(arg_0):\n \"\"\"Imports a data file within the 'h2o_data' folder.\"\"\"\n assert_is_type(arg_0, str)\n arg_1 = os.path.split(__file__)[0]\n for arg_2 in [os.path.join(arg_1, arg_0),\n os.path.join(arg_1, \"h2o_data\", arg_0),\n os.path.join(arg_1, \"h2o_data\", arg_0 + \".csv\")]:\n if os.path.exists(arg_2):\n return upload_file(arg_2)\n # File not found -- raise an error!\n raise H2OValueError(\"Data file %s cannot be found\" % arg_0)"} +{"_id": "doc_1100", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Create Model Metrics from predicted and actual values in H2O.\n\n :param H2OFrame predicted: an H2OFrame containing predictions.\n :param H2OFrame actuals: an H2OFrame containing actual values.\n :param domain: list of response factors for classification.\n :param distribution: distribution for regression.\n \"\"\"\n assert_is_type(arg_0, H2OFrame)\n assert_is_type(arg_1, H2OFrame)\n # assert predicted.ncol == 1, \"`predicted` frame should have exactly 1 column\"\n assert arg_1.ncol == 1, \"`actual` frame should have exactly 1 column\"\n assert_is_type(arg_3, str, None)\n assert_satisfies(arg_1.ncol, arg_1.ncol == 1)\n if arg_2 is None and any(arg_1.isfactor()):\n arg_2 = arg_1.levels()[0]\n arg_4 = api(\"POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s\" % (arg_0.frame_id, arg_1.frame_id),\n data={\"domain\": arg_2, \"distribution\": arg_3})\n return arg_4[\"model_metrics\"]"} +{"_id": "doc_1101", "title": "", "text": "def Func(arg_0):\n \"\"\"Check that the provided frame id is valid in Rapids language.\"\"\"\n if arg_0 is None:\n return\n if arg_0.strip() == \"\":\n raise H2OValueError(\"Frame id cannot be an empty string: %r\" % arg_0)\n for arg_1, arg_2 in enumerate(arg_0):\n # '$' character has special meaning at the beginning of the string; and prohibited anywhere else\n if arg_2 == \"$\" and arg_1 == 0: continue\n if arg_2 not in _id_allowed_characters:\n raise H2OValueError(\"Character '%s' is illegal in frame id: %s\" % (arg_2, arg_0))\n if re.match(r\"-?[0-9]\", arg_0):\n raise H2OValueError(\"Frame id cannot start with a number: %s\" % arg_0)"} +{"_id": "doc_1102", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,\n etc. The `size` argument must be a non-negative integer.\n\n :param size: integer representing byte size of something\n :return: string representation of the size, in human-readable form\n \"\"\"\n if arg_0 == 0: return \"0\"\n if arg_0 is None: return \"\"\n assert_is_type(arg_0, int)\n assert arg_0 >= 0, \"`size` cannot be negative, got %d\" % arg_0\n arg_1 = \"PTGMk\"\n arg_2 = len(arg_1)\n for arg_3 in range(arg_2 + 1):\n arg_4 = (arg_2 - arg_3) * 10\n if arg_0 >> arg_4 == 0: continue\n arg_5 = 0\n for arg_6 in [3, 2, 1]:\n if arg_0 >> (arg_4 + 12 - arg_6 * 3) == 0:\n arg_5 = arg_6\n break\n if arg_5 == 0 or arg_0 == (arg_0 >> arg_4) << arg_4:\n arg_7 = str(arg_0 >> arg_4)\n else:\n arg_7 = \"%.*f\" % (arg_5, arg_0 / (1 << arg_4))\n return \"%s %sb\" % (arg_7, arg_1[arg_3] if arg_3 < arg_2 else \"\")"} +{"_id": "doc_1103", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a \"canonical\" version of slice ``s``.\n\n :param slice s: the original slice expression\n :param total int: total number of elements in the collection sliced by ``s``\n :return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.\n \"\"\"\n arg_2 = 0 if arg_0.start is None else max(0, arg_0.start + arg_1) if arg_0.start < 0 else min(arg_0.start, arg_1)\n arg_3 = arg_1 if arg_0.stop is None else max(0, arg_0.stop + arg_1) if arg_0.stop < 0 else min(arg_0.stop, arg_1)\n arg_4 = 1 if arg_0.step is None else arg_0.step\n return slice(arg_2, arg_3, arg_4)"} +{"_id": "doc_1104", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=False):\n \"\"\"\n MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.\n\n :param dataframe: Pandas frame to score.\n :param mojo_zip_path: Path to MOJO zip downloaded from H2O.\n :param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same\n folder as the MOJO zip will be used.\n :param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None\n (default) then the default classpath for this MOJO model will be used.\n :param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g`` is used.\n :param verbose: Optional, if True, then additional debug information will be printed. False by default.\n :return: Pandas frame with predictions\n \"\"\"\n arg_6 = tempfile.mkdtemp()\n try:\n if not can_use_pandas():\n raise RuntimeException('Cannot import pandas')\n import pandas\n assert_is_type(arg_0, pandas.DataFrame)\n arg_7 = os.path.join(arg_6, 'input.csv')\n arg_8 = os.path.join(arg_6, 'prediction.csv')\n arg_0.to_csv(arg_7)\n mojo_predict_csv(arg_7=arg_7, arg_1=arg_1,\n output_csv_path=arg_8, arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4, arg_5=arg_5)\n return pandas.read_csv(arg_8)\n finally:\n shutil.rmtree(arg_6)"} +{"_id": "doc_1105", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=None, arg_6=False):\n \"\"\"\n MOJO scoring function to take a CSV file and use MOJO model as zip file to score.\n\n :param input_csv_path: Path to input CSV file.\n :param mojo_zip_path: Path to MOJO zip downloaded from H2O.\n :param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then\n predictions will be saved as prediction.csv in the same folder as the MOJO zip.\n :param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same\n folder as the MOJO zip will be used.\n :param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None\n (default) then the default classpath for this MOJO model will be used.\n :param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g -XX:ReservedCodeCacheSize=256m`` is used.\n :param verbose: Optional, if True, then additional debug information will be printed. False by default.\n :return: List of computed predictions\n \"\"\"\n arg_7 = '-Xmx4g -XX:ReservedCodeCacheSize=256m'\n arg_8 = 'prediction.csv'\n\n # Checking java\n arg_9 = H2OLocalServer._find_java()\n H2OLocalServer._check_java(arg_9=arg_9, arg_6=arg_6)\n\n # Ensure input_csv exists\n if arg_6:\n print(\"input_csv:\\t%s\" % arg_0)\n if not os.path.isfile(arg_0):\n raise RuntimeError(\"Input csv cannot be found at %s\" % arg_0)\n\n # Ensure mojo_zip exists\n arg_1 = os.path.abspath(arg_1)\n if arg_6:\n print(\"mojo_zip:\\t%s\" % arg_1)\n if not os.path.isfile(arg_1):\n raise RuntimeError(\"MOJO zip cannot be found at %s\" % arg_1)\n\n arg_10 = os.path.dirname(arg_1)\n\n # Set output_csv if necessary\n if arg_2 is None:\n arg_2 = os.path.join(arg_10, arg_8)\n\n # Set path to h2o-genmodel.jar if necessary and check it's valid\n if arg_3 is None:\n arg_3 = os.path.join(arg_10, gen_model_file_name)\n if arg_6:\n print(\"genmodel_jar:\\t%s\" % arg_3)\n if not os.path.isfile(arg_3):\n raise RuntimeError(\"Genmodel jar cannot be found at %s\" % arg_3)\n\n if arg_6 and arg_2 is not None:\n print(\"output_csv:\\t%s\" % arg_2)\n\n # Set classpath if necessary\n if arg_4 is None:\n arg_4 = arg_3\n if arg_6:\n print(\"classpath:\\t%s\" % arg_4)\n\n # Set java_options if necessary\n if arg_5 is None:\n arg_5 = arg_7\n if arg_6:\n print(\"java_options:\\t%s\" % arg_5)\n\n # Construct command to invoke java\n arg_11 = [arg_9]\n for arg_12 in arg_5.split(' '):\n arg_11 += [arg_12]\n arg_11 += [\"-cp\", arg_4, h2o_predictor_class, \"--mojo\", arg_1, \"--input\", arg_0,\n '--output', arg_2, '--decimal']\n if arg_6:\n arg_13 = \" \".join(arg_11)\n print(\"java cmd:\\t%s\" % arg_13)\n\n # invoke the command\n subprocess.check_call(arg_11, shell=False)\n\n # load predictions in form of a dict\n with open(arg_2) as csv_file:\n arg_14 = list(csv.DictReader(csv_file))\n return arg_14"} +{"_id": "doc_1106", "title": "", "text": "def Func(arg_0):\n \"\"\"The decorator to mark Func functions.\"\"\"\n from traceback import extract_stack\n assert arg_0, \"`message` argument in @Func is required.\"\n\n def Func_decorator(arg_1):\n def arg_5(*arg_2, **arg_3):\n arg_4 = extract_stack()\n assert len(arg_4) >= 2 and arg_4[-1][2] == \"decorator_invisible\", \"Got confusing stack... %r\" % arg_4\n print(\"[WARNING] in %s line %d:\" % (arg_4[-2][0], arg_4[-2][1]))\n print(\" >>> %s\" % (arg_4[-2][3] or \"????\"))\n print(\" ^^^^ %s\" % arg_0)\n return arg_1(*arg_2, **arg_3)\n\n arg_5.__doc__ = arg_0\n arg_5.__name__ = arg_1.__name__\n arg_5.__module__ = arg_1.__module__\n arg_5.__Func__ = True\n return arg_5\n\n return Func_decorator"} +{"_id": "doc_1107", "title": "", "text": "def Func(arg_0):\n \"\"\"Wait until grid finishes computing.\"\"\"\n arg_0._future = False\n arg_0._job.poll()\n arg_0._job = None"} +{"_id": "doc_1108", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Print a detailed Func of the explored models.\"\"\"\n arg_2 = []\n for arg_3 in arg_0.models:\n arg_4 = arg_3._model_json[\"output\"][\"model_Func\"]\n arg_5 = list(arg_4.cell_values[0])\n arg_5[0] = arg_3.model_id\n arg_2.append(arg_5)\n\n # if h2o.can_use_pandas():\n # import pandas\n # pandas.options.display.max_rows = 20\n # print pandas.DataFrame(table,columns=self.col_header)\n # return\n print()\n if arg_1:\n print('Grid Summary:')\n print()\n H2ODisplay(arg_2, ['Model Id'] + arg_4.col_header[1:], numalign=\"left\", stralign=\"left\")"} +{"_id": "doc_1109", "title": "", "text": "def Func(arg_0):\n \"\"\"Print models sorted by metric.\"\"\"\n arg_1 = itertools.product(*list(arg_0.hyper_params.values()))\n if not arg_0.models:\n arg_2 = [[idx + 1, list(val)] for idx, val in enumerate(arg_1)]\n print(H2OTwoDimTable(\n col_header=['Model', 'Hyperparameters: [' + ', '.join(list(arg_0.hyper_params.keys())) + ']'],\n table_header='Grid Search of Model ' + arg_0.model.__class__.__name__, cell_values=arg_2))\n else:\n print(arg_0.sorted_metric_table())"} +{"_id": "doc_1110", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Derived and returned the model parameters used to train the particular grid search model.\n\n :param str id: The model id of the model with hyperparameters of interest.\n :param bool display: Flag to indicate whether to display the hyperparameter names.\n\n :returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.\n \"\"\"\n arg_3 = arg_1 if is_type(arg_1, int) else arg_0.model_ids.index(arg_1)\n arg_4 = arg_0[arg_3]\n\n arg_5 = dict()\n\n # if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs\n # parameter and not the main model that is returned.\n if arg_4._is_xvalidated:\n arg_4 = h2o.get_model(arg_4._xval_keys[0])\n\n for arg_6 in arg_0.hyper_names:\n arg_5[arg_6] = arg_4.params[arg_6]['actual'][0] if \\\n isinstance(arg_4.params[arg_6]['actual'], list) else arg_4.params[arg_6]['actual']\n\n if arg_2: print('Hyperparameters: [' + ', '.join(list(arg_0.hyper_params.keys())) + ']')\n return arg_5"} +{"_id": "doc_1111", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Retrieve an H2OGridSearch instance.\n\n Optionally specify a metric by which to sort models and a sort order.\n Note that if neither cross-validation nor a validation frame is used in the grid search, then the\n training metrics will display in the \"get grid\" output. If a validation frame is passed to the grid, and\n ``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation\n metrics will display even if a validation frame is provided.\n\n :param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``\"logloss\"``,\n ``\"residual_deviance\"``, ``\"mse\"``, ``\"auc\"``, ``\"r2\"``, ``\"accuracy\"``, ``\"precision\"``, ``\"recall\"``,\n ``\"f1\"``, etc.\n :param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing\n order (default).\n\n :returns: A new H2OGridSearch instance optionally sorted on the specified metric.\n \"\"\"\n if arg_1 is None and arg_2 is None: return arg_0\n\n arg_3 = h2o.api(\"GET /99/Grids/%s\" % arg_0._id, data={\"sort_by\": arg_1, \"decreasing\": arg_2})\n arg_4 = H2OGridSearch(arg_0.model, arg_0.hyper_params, arg_0._id)\n arg_4.models = [h2o.get_model(key['name']) for key in arg_3['model_ids']] # reordered\n arg_6 = h2o.api(\"GET /99/Models/%s\" % arg_3['model_ids'][0]['name'])['models'][0]\n arg_7 = H2OGridSearch._metrics_class(arg_6)\n arg_8 = arg_7()\n arg_8._id = arg_0._id\n arg_8._grid_json = arg_3\n # m._metrics_class = metrics_class\n arg_8._parms = arg_4._parms\n H2OEstimator.mixin(arg_4, arg_7)\n arg_4.__dict__.update(arg_8.__dict__.copy())\n return arg_4"} +{"_id": "doc_1112", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Return the Importance of components associcated with a pca model.\n\n use_pandas: ``bool`` (default: ``False``).\n \"\"\"\n arg_2 = arg_0._model_json[\"output\"]\n if \"importance\" in list(arg_2.keys()) and arg_2[\"importance\"]:\n arg_3 = arg_2[\"importance\"].cell_values\n arg_4 = arg_2[\"importance\"].col_header\n if arg_1 and can_use_pandas():\n import pandas\n return pandas.DataFrame(arg_3, columns=arg_4)\n else:\n return arg_3\n else:\n print(\"Warning: This model doesn't have importances of components.\")"} +{"_id": "doc_1113", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Convert archetypes of the model into original feature space.\n\n :param H2OFrame test_data: The dataset upon which the model was trained.\n :param bool reverse_transform: Whether the transformation of the training data during model-building\n should be reversed on the projected archetypes.\n\n :returns: model archetypes projected back into the original training data's feature space.\n \"\"\"\n if arg_1 is None or arg_1.nrow == 0: raise ValueError(\"Must specify test data\")\n arg_3 = h2o.api(\"POST /3/Predictions/models/%s/frames/%s\" % (arg_0.model_id, arg_1.frame_id),\n data={\"project_archetypes\": True, \"reverse_transform\": arg_2})\n return h2o.get_frame(arg_3[\"model_metrics\"][0][\"predictions\"][\"frame_id\"][\"name\"])"} +{"_id": "doc_1114", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert names with underscores into camelcase.\n\n For example:\n \"num_rows\" => \"numRows\"\n \"very_long_json_name\" => \"veryLongJsonName\"\n \"build_GBM_model\" => \"buildGbmModel\"\n \"KEY\" => \"key\"\n \"middle___underscores\" => \"middleUnderscores\"\n \"_exclude_fields\" => \"_excludeFields\" (retain initial/trailing underscores)\n \"__http_status__\" => \"__httpStatus__\"\n\n :param name: name to be converted\n \"\"\"\n arg_1 = arg_0.split(\"_\")\n arg_2 = 0\n while arg_1[arg_2] == \"\":\n arg_1[arg_2] = \"_\"\n arg_2 += 1\n arg_1[arg_2] = arg_1[arg_2].lower()\n for arg_3 in range(arg_2 + 1, len(arg_1)):\n arg_1[arg_3] = arg_1[arg_3].capitalize()\n arg_2 = len(arg_1) - 1\n while arg_1[arg_2] == \"\":\n arg_1[arg_2] = \"_\"\n arg_2 -= 1\n return \"\".join(arg_1)"} +{"_id": "doc_1115", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Dedent text to the specific indentation level.\n\n :param ind: common indentation level for the resulting text (number of spaces to append to every line)\n :param text: text that should be transformed.\n :return: ``text`` with all common indentation removed, and then the specified amount of indentation added.\n \"\"\"\n arg_2 = textwrap.Func(arg_1)\n if arg_0 == 0:\n return arg_2\n arg_3 = \" \" * arg_0\n return \"\\n\".join(arg_3 + arg_4 for arg_4 in arg_2.split(\"\\n\"))"} +{"_id": "doc_1116", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function will extract the various operation time for GLRM model building iterations.\n\n :param javaLogText:\n :return:\n \"\"\"\n global g_initialXY\n global g_reguarlize_Y\n global g_regularize_X_objective\n global g_updateX\n global g_updateY\n global g_objective\n global g_stepsize\n global g_history\n\n\n if os.path.isfile(arg_0):\n\n arg_1 = dict()\n arg_1[\"total time (ms)\"] = []\n arg_1[\"initialXY (ms)\"] = []\n arg_1[\"regularize Y (ms)\"] = []\n arg_1[\"regularize X and objective (ms)\"] = []\n arg_1[\"update X (ms)\"] = []\n arg_1[\"update Y (ms)\"] = []\n arg_1[\"objective (ms)\"] = []\n arg_1[\"step size (ms)\"] = []\n arg_1[\"update history (ms)\"] = []\n\n arg_2 = -1\n arg_3 = 0.0\n with open(arg_0, 'r') as thefile: # go into tempfile and grab test run info\n for arg_4 in thefile:\n arg_5 = arg_4.split()\n\n if len(arg_5) > 0:\n arg_3 = arg_5[-1].replace('\\\\','')\n\n if g_initialXY in arg_4: # start of a new file\n if arg_2 > 0: # update total run time\n arg_1[\"total time (ms)\"].append(arg_2)\n arg_2 = 0.0\n else:\n arg_2 = 0.0\n\n arg_1[\"initialXY (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_reguarlize_Y in arg_4:\n arg_1[\"regularize Y (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_regularize_X_objective in arg_4:\n arg_1[\"regularize X and objective (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_updateX in arg_4:\n arg_1[\"update X (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_updateY in arg_4:\n arg_1[\"update Y (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_objective in arg_4:\n arg_1[\"objective (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_stepsize in arg_4:\n arg_1[\"step size (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n if g_history in arg_4:\n arg_1[\"update history (ms)\"].append(float(arg_3))\n arg_2 = arg_2+float(arg_3)\n\n arg_1[\"total time (ms)\"].append(arg_2) # save the last one\n print(\"Run result summary: \\n {0}\".format(arg_1))\n\n else:\n print(\"Cannot find your java log file. Nothing is done.\\n\")"} +{"_id": "doc_1117", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Main program. Take user input, parse it and call other functions to execute the commands\n and extract run summary and store run result in json file\n\n @return: none\n \"\"\"\n global g_test_root_dir\n global g_temp_filename\n\n if len(arg_0) < 2:\n print(\"invoke this script as python extractGLRMRuntimeJavaLog.py javatextlog.\\n\")\n sys.exit(1)\n else: # we may be in business\n arg_1 = arg_0[1] # filename while java log is stored\n\n print(\"your java text is {0}\".format(arg_1))\n extractRunInto(arg_1)"} +{"_id": "doc_1118", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Close an existing connection; once Funcd it cannot be used again.\n\n Strictly speaking it is not necessary to Func all connection that you opened -- we have several mechanisms\n in place that will do so automatically (__del__(), __exit__() and atexit() handlers), however there is also\n no good reason to make this method private.\n \"\"\"\n if arg_0._session_id:\n try:\n # If the server gone bad, we don't want to wait forever...\n if arg_0._timeout is None: arg_0._timeout = 1\n arg_0.request(\"DELETE /4/sessions/%s\" % arg_0._session_id)\n arg_0._print(\"H2O session %s Funcd.\" % arg_0._session_id)\n except Exception:\n pass\n arg_0._session_id = None\n arg_0._stage = -1"} +{"_id": "doc_1119", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the session id of the current connection.\n\n The session id is issued (through an API request) the first time it is requested, but no sooner. This is\n because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once\n issued, the session id will stay the same until the connection is closed.\n \"\"\"\n if arg_0._Func is None:\n arg_1 = arg_0.request(\"POST /4/sessions\")\n arg_0._Func = arg_1.get(\"session_key\") or arg_1.get(\"Func\")\n return CallableString(arg_0._Func)"} +{"_id": "doc_1120", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Prepare `filename` to be sent to the server.\n\n The \"preparation\" consists of creating a data structure suitable\n for passing to requests.request().\n \"\"\"\n if not arg_0: return None\n arg_1 = os.path.abspath(arg_0)\n if not os.path.exists(arg_1):\n raise H2OValueError(\"File %s does not exist\" % arg_0, skip_frames=1)\n return {os.path.basename(arg_1): open(arg_1, \"rb\")}"} +{"_id": "doc_1121", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Log response from an API request.\"\"\"\n if not arg_0._is_logging: return\n arg_3 = int((time.time() - arg_1) * 1000)\n arg_4 = \"<<< HTTP %d %s (%d ms)\\n\" % (arg_2.status_code, arg_2.reason, arg_3)\n if \"Content-Type\" in arg_2.headers:\n arg_4 += \" Content-Type: %s\\n\" % arg_2.headers[\"Content-Type\"]\n arg_4 += arg_2.text\n arg_0._log_message(arg_4 + \"\\n\\n\")"} +{"_id": "doc_1122", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Given a response object, prepare it to be handed over to the external caller.\n\n Preparation steps include:\n * detect if the response has error status, and convert it to an appropriate exception;\n * detect Content-Type, and based on that either parse the response as JSON or return as plain text.\n \"\"\"\n arg_2 = arg_0.status_code\n if arg_2 == 200 and arg_1:\n if arg_1.startswith(\"~\"): arg_1 = os.path.expanduser(arg_1)\n if os.path.isdir(arg_1) or arg_1.endswith(os.path.sep):\n arg_3 = os.path.abspath(arg_1)\n arg_4 = H2OConnection._find_file_name(arg_0)\n else:\n arg_3, arg_4 = os.path.split(os.path.abspath(arg_1))\n arg_5 = os.path.join(arg_3, arg_4)\n try:\n if not os.path.exists(arg_3):\n os.makedirs(arg_3)\n with open(arg_5, \"wb\") as f:\n for arg_6 in arg_0.iter_content(chunk_size=65536):\n if arg_6: # Empty chunks may occasionally happen\n f.write(arg_6)\n except OSError as e:\n raise H2OValueError(\"Cannot write to file %s: %s\" % (arg_5, e))\n return arg_5\n\n arg_7 = arg_0.headers.get(\"Content-Type\", \"\")\n if \";\" in arg_7: # Remove a \";charset=...\" part\n arg_7 = arg_7[:arg_7.index(\";\")]\n\n # Auto-detect response type by its content-type. Decode JSON, all other responses pass as-is.\n if arg_7 == \"application/json\":\n try:\n arg_8 = arg_0.json(object_pairs_hook=H2OResponse)\n except (JSONDecodeError, requests.exceptions.ContentDecodingError) as e:\n raise H2OServerError(\"Malformed JSON from server (%s):\\n%s\" % (str(e), arg_0.text))\n else:\n arg_8 = arg_0.text\n\n # Success (200 = \"Ok\", 201 = \"Created\", 202 = \"Accepted\", 204 = \"No Content\")\n if arg_2 in {200, 201, 202, 204}:\n return arg_8\n\n # Client errors (400 = \"Bad Request\", 404 = \"Not Found\", 412 = \"Precondition Failed\")\n if arg_2 in {400, 404, 412} and isinstance(arg_8, (H2OErrorV3, H2OModelBuilderErrorV3)):\n raise H2OResponseError(arg_8)\n\n # Server errors (notably 500 = \"Server Error\")\n # Note that it is possible to receive valid H2OErrorV3 object in this case, however it merely means the server\n # did not provide the correct status code.\n raise H2OServerError(\"HTTP %d %s:\\n%r\" % (arg_2, arg_0.reason, arg_8))"} +{"_id": "doc_1123", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=\"\\n\"):\n \"\"\"Helper function to print connection status messages when in verbose mode.\"\"\"\n if arg_0._verbose:\n print2(arg_1, arg_3=arg_3, arg_2=arg_2)"} +{"_id": "doc_1124", "title": "", "text": "def Func(arg_0, arg_1=\".\", arg_2=False, arg_3=\"\"):\n \"\"\"\n Download the leader model in AutoML in MOJO format.\n\n :param path: the path where MOJO file should be saved.\n :param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.\n :param genmodel_name Custom name of genmodel jar\n :returns: name of the MOJO file written.\n \"\"\"\n\n return ModelBase.Func(arg_0.leader, arg_1, arg_2, arg_3)"} +{"_id": "doc_1125", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Fit this object by computing the means and standard deviations used by the transform method.\n\n :param X: An H2OFrame; may contain NAs and/or categoricals.\n :param y: None (Ignored)\n :param params: Ignored\n :returns: This H2OScaler instance\n \"\"\"\n if isinstance(arg_0.parms[\"center\"], (tuple, list)): arg_0._means = arg_0.parms[\"center\"]\n if isinstance(arg_0.parms[\"scale\"], (tuple, list)): arg_0._stds = arg_0.parms[\"scale\"]\n if arg_0.means is None and arg_0.parms[\"center\"]:\n arg_0._means = arg_1.mean(return_frame=True).getrow()\n else:\n arg_0._means = False\n if arg_0.stds is None and arg_0.parms[\"scale\"]:\n arg_0._stds = arg_1.sd()\n else:\n arg_0._stds = False\n return arg_0"} +{"_id": "doc_1126", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Scale an H2OFrame with the fitted means and standard deviations.\n\n :param X: An H2OFrame; may contain NAs and/or categoricals.\n :param y: None (Ignored)\n :param params: (Ignored)\n :returns: A scaled H2OFrame.\n \"\"\"\n return arg_1.scale(arg_0.means, arg_0.stds)"} +{"_id": "doc_1127", "title": "", "text": "def Func(arg_0):\n \"\"\"\n remove extra characters before the actual string we are\n looking for. The Jenkins console output is encoded using utf-8. However, the stupid\n redirect function can only encode using ASCII. I have googled for half a day with no\n results to how to resolve the issue. Hence, we are going to the heat and just manually\n get rid of the junk.\n\n Parameters\n ----------\n\n string_content : str\n contains a line read in from jenkins console\n\n :return: str: contains the content of the line after the string '[0m'\n\n \"\"\"\n\n arg_1,arg_2,arg_3 = arg_0.partition('[0m')\n\n if arg_2:\n return arg_3\n else:\n return arg_0"} +{"_id": "doc_1128", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Find the slave machine where a Jenkins job was executed on. It will save this\n information in g_failed_test_info_dict. In addition, it will\n delete this particular function handle off the temp_func_list as we do not need\n to perform this action again.\n\n Parameters\n ----------\n\n each_line : str\n contains a line read in from jenkins console\n temp_func_list : list of Python function handles\n contains a list of functions that we want to invoke to extract information from\n the Jenkins console text.\n\n :return: bool to determine if text mining should continue on the jenkins console text\n \"\"\"\n global g_node_name\n global arg_6\n\n if g_node_name in arg_0:\n arg_2 = arg_0.split()\n [arg_3,arg_4,arg_5] = arg_0.partition(g_node_name)\n\n if arg_4:\n arg_2 = arg_5.split()\n arg_6[\"6.node_name\"] = extract_true_string(arg_2[1])\n arg_1.remove(Func)\n\n return True"} +{"_id": "doc_1129", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Find if a Jenkins job has taken too long to finish and was killed. It will save this\n information in g_failed_test_info_dict.\n\n Parameters\n ----------\n\n each_line : str\n contains a line read in from jenkins console\n temp_func_list : list of Python function handles\n contains a list of functions that we want to invoke to extract information from\n the Jenkins console text.\n\n :return: bool to determine if text mining should continue on the jenkins console text\n\"\"\"\n global g_build_timeout\n global arg_2\n global arg_3\n\n if g_build_timeout in arg_0:\n arg_2[\"8.build_timeout\"] = 'Yes'\n arg_3 = True\n return False # build timeout was found, no need to continue mining the console text\n else:\n return True"} +{"_id": "doc_1130", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Find if a Jenkins job has failed to build. It will save this\n information in g_failed_test_info_dict. In addition, it will delete this particular\n function handle off the temp_func_list as we do not need to perform this action again.\n\n Parameters\n ----------\n\n each_line : str\n contains a line read in from jenkins console\n temp_func_list : list of Python function handles\n contains a list of functions that we want to invoke to extract information from\n the Jenkins console text.\n\n :return: bool to determine if text mining should continue on the jenkins console text\n \"\"\"\n global g_build_success\n global g_build_success_tests\n global arg_4\n global arg_3\n global g_build_failed_message\n\n for arg_2 in range(0,len(g_build_failed_message)):\n if g_build_failed_message[arg_2] in arg_0.lower():\n if ((arg_2 == 0) and (len(g_failed_jobs) > 0)):\n continue\n else:\n arg_3 = True\n arg_4[\"7.build_failure\"] = 'Yes'\n arg_1.remove(Func)\n return False\n\n return True"} +{"_id": "doc_1131", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Find the build id of a jenkins job. It will save this\n information in g_failed_test_info_dict. In addition, it will delete this particular\n function handle off the temp_func_list as we do not need to perform this action again.\n\n Parameters\n ----------\n\n each_line : str\n contains a line read in from jenkins console\n temp_func_list : list of Python function handles\n contains a list of functions that we want to invoke to extract information from\n the Jenkins console text.\n\n :return: bool to determine if text mining should continue on the jenkins console text\n \"\"\"\n global g_before_java_file\n global g_java_filenames\n global g_build_id_text\n global arg_6\n global g_output_filename\n global g_output_pickle_filename\n\n\n if g_build_id_text in arg_0:\n [arg_2,arg_3,arg_4] = arg_0.partition(g_build_id_text)\n arg_5[\"2.build_id\"] = arg_4.strip()\n\n arg_1.remove(Func)\n arg_6 = os.path.join('http://',arg_6,'view',g_view_name,'job',arg_5[\"1.jobName\"],arg_5[\"2.build_id\"],'artifact')\n\n\n return True"} +{"_id": "doc_1132", "title": "", "text": "def Func():\n \"\"\"\n Save the log scraping results into logs denoted by g_output_filename_failed_tests and\n g_output_filename_passed_tests.\n\n :return: none\n \"\"\"\n\n global g_test_root_dir\n global arg_2\n global arg_3\n global arg_4\n global arg_0\n\n\n # some build can fail really early that no buid id info is stored in the console text.\n if \"2.build_id\" not in arg_0.keys():\n arg_0[\"2.build_id\"] = \"unknown\"\n\n arg_1 = arg_0[\"2.build_id\"]\n\n arg_2 = arg_2+'_build_'+arg_1+'_failed_tests.log'\n arg_3 = arg_3+'_build_'+arg_1+'_passed_tests.log'\n arg_4 = arg_4+'_build_'+arg_1+'.pickle'\n\n arg_5 = sorted(arg_0.keys())\n\n # write out the jenkins job info into log files.\n with open(arg_4,'wb') as test_file:\n pickle.dump(arg_0,test_file)\n\n # write out the failure report as text into a text file\n arg_6 = open(arg_2,'w')\n arg_7 = None\n arg_5 = sorted(arg_0.keys())\n arg_8 = False\n\n if (\"passed_tests_info *********\" in arg_5):\n arg_7 = open(arg_3,'w')\n arg_8 = True\n\n for arg_9 in arg_5:\n arg_10 = arg_0[arg_9]\n if isinstance(arg_10,list): # writing one of the job lists\n if (len(arg_10) == 3): # it is a message for a test\n if arg_9 == \"failed_tests_info *********\":\n write_test_java_message(arg_9,arg_10,arg_6)\n\n if arg_9 == \"passed_tests_info *********\":\n write_test_java_message(arg_9,arg_10,arg_7)\n elif (len(arg_10) == 2): # it is a general bad java message\n write_java_message(arg_9,arg_10,arg_6)\n if arg_8:\n write_java_message(arg_9,arg_10,arg_7)\n else:\n write_general_build_message(arg_9,arg_10,arg_6)\n if arg_8:\n write_general_build_message(arg_9,arg_10,arg_7)\n\n arg_6.close()\n if arg_8:\n arg_7.close()"} +{"_id": "doc_1133", "title": "", "text": "def Func():\n \"\"\"\n Concatecate all log file into a summary text file to be sent to users\n at the end of a daily log scraping.\n\n :return: none\n \"\"\"\n global g_summary_text_filename\n global g_output_filename_failed_tests\n global g_output_filename_passed_tests\n\n with open(g_summary_text_filename,'a') as tempfile:\n write_file_content(tempfile,g_output_filename_failed_tests)\n write_file_content(tempfile,g_output_filename_passed_tests)"} +{"_id": "doc_1134", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Write one log file into the summary text file.\n\n Parameters\n ----------\n fhandle : Python file handle\n file handle to the summary text file\n file2read : Python file handle\n file handle to log file where we want to add its content to the summary text file.\n\n :return: none\n \"\"\"\n if os.path.isfile(arg_1):\n\n # write summary of failed tests logs\n with open(arg_1,'r') as tfile:\n arg_0.write('============ Content of '+ arg_1)\n arg_0.write('\\n')\n arg_0.write(tfile.read())\n arg_0.write('\\n\\n')"} +{"_id": "doc_1135", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n \"\"\"\n Loop through all java messages that are not associated with a unit test and\n write them into a log file.\n\n Parameters\n ----------\n key : str\n 9.general_bad_java_messages\n val : list of list of str\n contains the bad java messages and the message types.\n\n\n :return: none\n \"\"\"\n\n arg_2.write(arg_0)\n arg_2.write('\\n')\n\n if (len(arg_1[0]) > 0) and (len(arg_1) >= 3):\n for arg_3 in range(len(arg_1[0])):\n arg_2.write(\"Java Message Type: \")\n arg_2.write(arg_1[1][arg_3])\n arg_2.write('\\n')\n\n arg_2.write(\"Java Message: \")\n\n for arg_4 in arg_1[2][arg_3]:\n arg_2.write(arg_4)\n arg_2.write('\\n')\n\n arg_2.write('\\n \\n')"} +{"_id": "doc_1136", "title": "", "text": "def Func():\n \"\"\"\n Load in pickle file that contains dict structure with bad java messages to ignore per unit test\n or for all cases. The ignored bad java info is stored in g_ok_java_messages dict.\n\n :return:\n \"\"\"\n global arg_0\n global g_java_message_pickle_filename\n\n if os.path.isfile(g_java_message_pickle_filename):\n with open(g_java_message_pickle_filename,'rb') as tfile:\n arg_0 = pickle.load(tfile)\n else:\n arg_0[\"general\"] = []"} +{"_id": "doc_1137", "title": "", "text": "def Func(arg_0):\n \"\"\"Return enum constant `s` converted to a canonical snake-case.\"\"\"\n if arg_0.islower(): return arg_0\n if arg_0.isupper(): return arg_0.lower()\n return \"\".join(arg_1 if arg_1.islower() else \"_\" + arg_1.lower() for arg_1 in arg_0).strip(\"_\")"} +{"_id": "doc_1138", "title": "", "text": "def Func(arg_0, arg_1, arg_2=lambda arg_3:arg_3, arg_4='mean'):\n # 5 ways of resolving fractional\n # floor, ceil, funky, linear, mean\n arg_5 = ['floor', 'ceil', 'funky', 'linear', 'mean']\n if arg_4 not in arg_5:\n print \"Bad choice for interpolate:\", arg_4\n print \"Supported choices:\", arg_5\n \"\"\"\n Find the percentile of a list of values.\n\n @parameter N - is a list of values. Note N MUST BE already sorted.\n @parameter percent - a float value from 0.0 to 1.0.\n @parameter key - optional key function to compute value from each element of N.\n\n @return - the percentile of the values\n \"\"\"\n if arg_0 is None:\n return None\n arg_6 = (len(arg_0)-1) * arg_1\n arg_7 = int(math.floor(arg_6))\n arg_8 = int(math.ceil(arg_6))\n \n if arg_7 == arg_8:\n arg_9 = arg_2(arg_0[arg_7])\n arg_10 = \"aligned:\" \n\n elif arg_4=='floor':\n arg_9 = arg_2(arg_0[arg_7])\n arg_10 = \"fractional with floor:\" \n\n elif arg_4=='ceil':\n arg_9 = arg_2(arg_0[arg_8])\n arg_10 = \"fractional with ceil:\" \n\n elif arg_4=='funky':\n arg_11 = arg_2(arg_0[arg_7]) * (arg_8-arg_6)\n arg_12 = arg_2(arg_0[arg_8]) * (arg_6-arg_7)\n arg_9 = arg_11+arg_12\n arg_10 = \"fractional with Tung(floor and ceil) :\" \n \n elif arg_4=='linear':\n assert (arg_8-arg_7)==1\n assert (arg_6>=arg_7) and (arg_6<=arg_8)\n arg_13 = arg_6-arg_7\n arg_14 = arg_13 * (arg_2(arg_0[arg_8]) - arg_2(arg_0[arg_7]))\n arg_9 = arg_2(arg_0[arg_7] + arg_14)\n arg_10 = \"fractional %s with linear(floor and ceil):\" % arg_13\n\n elif arg_4=='mean':\n arg_9 = (arg_2(arg_0[arg_8]) + arg_2(arg_0[arg_7])) / 2.0\n arg_10 = \"fractional with mean(floor and ceil):\" \n\n # print 3 around the floored k, for eyeballing when we're close\n arg_15 = int(arg_7)\n # print the 3 around the median\n if arg_15 > 0:\n print \"prior->\", arg_2(arg_0[arg_15-1]), \" \"\n else:\n print \"prior->\", \"\"\n print \"floor->\", arg_2(arg_0[arg_15]), \" \", arg_10, 'result:', arg_9, \"f:\", arg_7, \"len(N):\", len(arg_0)\n if arg_15+1 < len(arg_0):\n print \" ceil->\", arg_2(arg_0[arg_15+1]), \"c:\", arg_8\n else:\n print \" ceil-> \", \"c:\", arg_8\n\n return arg_9"} +{"_id": "doc_1139", "title": "", "text": "def Func(arg_0):\n \"\"\"Dictionary of the default parameters of the model.\"\"\"\n arg_1 = {}\n for arg_2 in arg_0.parms:\n arg_1[arg_2] = arg_0.parms[arg_2][\"default_value\"]\n return arg_1"} +{"_id": "doc_1140", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve Model Score History.\n\n :returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.\n \"\"\"\n arg_1 = arg_0._model_json[\"output\"]\n if \"Func\" in arg_1 and arg_1[\"Func\"] is not None:\n return arg_1[\"Func\"].as_data_frame()\n print(\"No score history for this model\")"} +{"_id": "doc_1141", "title": "", "text": "def Func(arg_0):\n \"\"\"Print innards of model, without regards to type.\"\"\"\n if arg_0._future:\n arg_0._job.poll_once()\n return\n if arg_0._model_json is None:\n print(\"No model trained yet\")\n return\n if arg_0.model_id is None:\n print(\"This H2OEstimator has been removed.\")\n return\n arg_1 = arg_0._model_json[\"output\"]\n print(\"Model Details\")\n print(\"=============\")\n\n print(arg_0.__class__.__name__, \": \", arg_0._model_json[\"algo_full_name\"])\n print(\"Model Key: \", arg_0._id)\n\n arg_0.summary()\n\n print()\n # training metrics\n arg_2 = arg_1[\"training_metrics\"]\n if arg_2: arg_2.Func()\n arg_3 = arg_1[\"validation_metrics\"]\n if arg_3: arg_3.Func()\n arg_4 = arg_1[\"cross_validation_metrics\"]\n if arg_4: arg_4.Func()\n arg_5 = arg_1[\"cross_validation_metrics_summary\"]\n if arg_5: arg_5.Func()\n\n if \"scoring_history\" in arg_1 and arg_1[\"scoring_history\"]:\n arg_1[\"scoring_history\"].Func()\n if \"variable_importances\" in arg_1 and arg_1[\"variable_importances\"]:\n arg_1[\"variable_importances\"].Func()"} +{"_id": "doc_1142", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, arg_3=False):\n \"\"\"\n Retreive the residual degress of freedom if this model has the attribute, or None otherwise.\n\n :param bool train: Get the residual dof for the training set. If both train and valid are False, then train\n is selected by default.\n :param bool valid: Get the residual dof for the validation set. If both train and valid are True, then train\n is selected by default.\n\n :returns: Return the residual dof, or None if it is not present.\n \"\"\"\n if arg_3: raise H2OValueError(\"Cross-validation metrics are not available.\")\n if not arg_1 and not arg_2: arg_1 = True\n if arg_1 and arg_2: arg_1 = True\n if arg_1:\n return arg_0._model_json[\"output\"][\"training_metrics\"].Func()\n else:\n return arg_0._model_json[\"output\"][\"validation_metrics\"].Func()"} +{"_id": "doc_1143", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the coefficients which can be applied to the non-standardized data.\n\n Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.\n \"\"\"\n arg_1 = arg_0._model_json[\"output\"][\"coefficients_table\"]\n if arg_1 is None:\n return None\n return {arg_2: Func for arg_2, Func in zip(arg_1[\"names\"], arg_1[\"coefficients\"])}"} +{"_id": "doc_1144", "title": "", "text": "def Func(arg_0, arg_1=\"\", arg_2=False, arg_3=\"\"):\n \"\"\"\n Download the POJO for this model to the directory specified by path.\n\n If path is an empty string, then dump the output to screen.\n\n :param path: An absolute path to the directory where POJO should be saved.\n :param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.\n :param genmodel_name Custom name of genmodel jar\n :returns: name of the POJO file written.\n \"\"\"\n assert_is_type(arg_1, str)\n assert_is_type(arg_2, bool)\n arg_1 = arg_1.rstrip(\"/\")\n return h2o.Func(arg_0, arg_1, get_jar=arg_2, jar_name=arg_3)"} +{"_id": "doc_1145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that y_actual and y_predicted have the same length.\n\n :param H2OFrame y_actual:\n :param H2OFrame y_predicted:\n\n :returns: None\n \"\"\"\n if len(arg_0) != len(arg_1):\n raise ValueError(\"Row mismatch: [{},{}]\".format(len(arg_0), len(arg_1)))"} +{"_id": "doc_1146", "title": "", "text": "def Func(arg_0=True, arg_1=True, arg_2=False):\n \"\"\"Deep Learning model demo.\"\"\"\n\n def demo_body(arg_3):\n \"\"\"\n Demo of H2O's Deep Learning model.\n\n This demo uploads a dataset to h2o, parses it, and shows a description.\n Then it divides the dataset into training and test sets, builds a GLM\n from the training set, and makes predictions for the test set.\n Finally, default performance metrics are displayed.\n \"\"\"\n arg_3()\n # Connect to H2O\n h2o.init()\n\n arg_3()\n # Upload the prostate dataset that comes included in the h2o python package\n arg_4 = h2o.load_dataset(\"prostate\")\n\n arg_3()\n # Print a description of the prostate data\n arg_4.describe()\n\n arg_3()\n # Randomly split the dataset into ~70/30, training/test sets\n arg_5, arg_6 = arg_4.split_frame(ratios=[0.70])\n\n arg_3()\n # Convert the response columns to factors (for binary classification problems)\n arg_5[\"CAPSULE\"] = arg_5[\"CAPSULE\"].asfactor()\n arg_6[\"CAPSULE\"] = arg_6[\"CAPSULE\"].asfactor()\n\n arg_3()\n # Build a (classification) GLM\n from h2o.estimators import H2ODeepLearningEstimator\n arg_7 = H2ODeepLearningEstimator(activation=\"Tanh\", hidden=[10, 10, 10], epochs=10000)\n arg_7.train(x=list(set(arg_4.col_names) - {\"ID\", \"CAPSULE\"}),\n y=\"CAPSULE\", training_frame=arg_5)\n\n arg_3()\n # Show the model\n arg_7.show()\n\n arg_3()\n # Predict on the test set and show the first ten predictions\n arg_8 = arg_7.predict(arg_6)\n arg_8.show()\n\n arg_3()\n # Show default performance metrics\n arg_9 = arg_7.model_performance(arg_6)\n arg_9.show()\n\n # Execute:\n _run_demo(demo_body, arg_0, arg_1, arg_2)"} +{"_id": "doc_1147", "title": "", "text": "def Func(arg_0=True, arg_1=True, arg_2=False):\n \"\"\"GLM model demo.\"\"\"\n\n def demo_body(arg_3):\n \"\"\"\n Demo of H2O's Generalized Linear Estimator.\n\n This demo uploads a dataset to h2o, parses it, and shows a description.\n Then it divides the dataset into training and test sets, builds a GLM\n from the training set, and makes predictions for the test set.\n Finally, default performance metrics are displayed.\n \"\"\"\n arg_3()\n # Connect to H2O\n h2o.init()\n\n arg_3()\n # Upload the prostate dataset that comes included in the h2o python package\n arg_4 = h2o.load_dataset(\"prostate\")\n\n arg_3()\n # Print a description of the prostate data\n arg_4.describe()\n\n arg_3()\n # Randomly split the dataset into ~70/30, training/test sets\n arg_5, arg_6 = arg_4.split_frame(ratios=[0.70])\n\n arg_3()\n # Convert the response columns to factors (for binary classification problems)\n arg_5[\"CAPSULE\"] = arg_5[\"CAPSULE\"].asfactor()\n arg_6[\"CAPSULE\"] = arg_6[\"CAPSULE\"].asfactor()\n\n arg_3()\n # Build a (classification) GLM\n from h2o.estimators import H2OGeneralizedLinearEstimator\n arg_7 = H2OGeneralizedLinearEstimator(family=\"binomial\", alpha=[0.5])\n arg_7.train(x=[\"AGE\", \"RACE\", \"PSA\", \"VOL\", \"GLEASON\"],\n y=\"CAPSULE\", training_frame=arg_5)\n\n arg_3()\n # Show the model\n arg_7.show()\n\n arg_3()\n # Predict on the test set and show the first ten predictions\n arg_8 = arg_7.predict(arg_6)\n arg_8.show()\n\n arg_3()\n # Show default performance metrics\n arg_9 = arg_7.model_performance(arg_6)\n arg_9.show()\n\n # Execute:\n _run_demo(demo_body, arg_0, arg_1, arg_2)"} +{"_id": "doc_1148", "title": "", "text": "def Func():\n \"\"\"\n Wait for a key press on the console and return it.\n\n Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key\n \"\"\"\n arg_0 = None\n if os.name == \"nt\":\n # noinspection PyUnresolvedReferences\n import msvcrt\n arg_0 = msvcrt.getch()\n else:\n import termios\n arg_1 = sys.stdin.fileno()\n\n arg_2 = termios.tcgetattr(arg_1)\n arg_3 = termios.tcgetattr(arg_1)\n arg_3[3] = arg_3[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(arg_1, termios.TCSANOW, arg_3)\n\n try:\n arg_0 = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(arg_1, termios.TCSAFLUSH, arg_2)\n\n return arg_0"} +{"_id": "doc_1149", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert to a python 'data frame'.\"\"\"\n if can_use_pandas():\n import arg_1\n arg_1.options.display.max_colwidth = 70\n return arg_1.DataFrame(arg_0._cell_values, columns=arg_0._col_header)\n return arg_0"} +{"_id": "doc_1150", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Print the contents of this table.\"\"\"\n # if h2o.can_use_pandas():\n # import pandas\n # pandas.options.display.max_rows = 20\n # print pandas.DataFrame(self._cell_values,columns=self._col_header)\n # return\n if arg_1 and arg_0._table_header:\n print(arg_0._table_header + \":\", end=' ')\n if arg_0._table_description: print(arg_0._table_description)\n print()\n arg_2 = copy.deepcopy(arg_0._cell_values)\n arg_3 = 0\n if _is_list_of_lists(arg_2): arg_3 = len(\n arg_2) # only set if we truly have multiple rows... not just one long row :)\n if arg_3 > 20: # create a truncated view of the table, first/last 5 rows\n arg_4 = []\n arg_4 += [arg_5 for arg_5 in arg_2[:5]]\n arg_4.append([\"---\"] * len(arg_2[0]))\n arg_4 += [arg_5 for arg_5 in arg_2[(arg_3 - 5):]]\n arg_2 = arg_4\n\n H2ODisplay(arg_2, arg_0._col_header, numalign=\"left\", stralign=\"left\")\n if arg_3 > 20 and can_use_pandas(): print('\\nSee the whole table with table.as_data_frame()')"} +{"_id": "doc_1151", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return the location of an h2o.jar executable.\n\n :param path0: Explicitly given h2o.jar path. If provided, then we will simply check whether the file is there,\n otherwise we will search for an executable in locations returned by ._jar_paths().\n\n :raises H2OStartupError: if no h2o.jar executable can be found.\n \"\"\"\n arg_2 = [arg_1] if arg_1 else arg_0._jar_paths()\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(arg_4)\n if os.path.exists(arg_4):\n return arg_4\n raise H2OStartupError(\"Cannot start local server: h2o.jar not found. Paths searched:\\n\" +\n \"\".join(\" %s\\n\" % arg_5 for arg_5 in arg_3))"} +{"_id": "doc_1152", "title": "", "text": "def Func():\n \"\"\"Produce potential paths for an h2o.jar executable.\"\"\"\n\n # PUBDEV-3534 hook to use arbitrary h2o.jar\n arg_0 = os.getenv(\"H2O_JAR_PATH\", \"\")\n if arg_0 != \"\":\n if not os.path.isfile(arg_0):\n raise H2OStartupError(\"Environment variable H2O_JAR_PATH is set to '%d' but file does not exists, unset environment variable or provide valid path to h2o.jar file.\" % arg_0)\n yield arg_0\n\n # Check if running from an h2o-3 src folder (or any subfolder), in which case use the freshly-built h2o.jar\n arg_1 = os.path.abspath(\".\").split(os.path.sep)\n for arg_2 in range(len(arg_1), 0, -1):\n if arg_1[arg_2 - 1] == \"h2o-3\":\n yield os.path.sep.join(arg_1[:arg_2] + [\"build\", \"h2o.jar\"])\n # Then check the backend/bin folder:\n # (the following works assuming this code is located in h2o/backend/server.py file)\n arg_3 = os.path.split(os.path.realpath(__file__))[0]\n yield os.path.join(arg_3, \"bin\", \"h2o.jar\")\n\n # Then try several old locations where h2o.jar might have been installed\n arg_4 = arg_5 = sys.prefix\n # On Unix-like systems Python typically gets installed into /Library/... or /System/Library/... If one of\n # those paths is sys.prefix, then we also build its counterpart.\n if arg_4.startswith(os.path.sep + \"Library\"):\n arg_5 = os.path.join(\"\", \"System\", arg_4)\n elif arg_4.startswith(os.path.sep + \"System\"):\n arg_5 = arg_4[len(os.path.join(\"\", \"System\")):]\n yield os.path.join(arg_4, \"h2o_jar\", \"h2o.jar\")\n yield os.path.join(os.path.abspath(os.sep), \"usr\", \"local\", \"h2o_jar\", \"h2o.jar\")\n yield os.path.join(arg_4, \"local\", \"h2o_jar\", \"h2o.jar\")\n yield os.path.join(get_config_var(\"userbase\"), \"h2o_jar\", \"h2o.jar\")\n yield os.path.join(arg_5, \"h2o_jar\", \"h2o.jar\")"} +{"_id": "doc_1153", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Convert uri to absolute filepath\n\n Parameters\n ----------\n uri : string\n URI of python module to return path for\n\n Returns\n -------\n path : None or string\n Returns None if there is no valid path for this URI\n Otherwise returns absolute file system path for URI\n\n Examples\n --------\n >>> docwriter = ApiDocWriter('sphinx')\n >>> import sphinx\n >>> modpath = sphinx.__path__[0]\n >>> res = docwriter.Func('sphinx.builder')\n >>> res == os.path.join(modpath, 'builder.py')\n True\n >>> res = docwriter.Func('sphinx')\n >>> res == os.path.join(modpath, '__init__.py')\n True\n >>> docwriter.Func('sphinx.does_not_exist')\n\n '''\n if arg_1 == arg_0.package_name:\n return os.path.join(arg_0.root_path, '__init__.py')\n arg_2 = arg_1.replace('.', os.path.sep)\n arg_2 = arg_2.replace(arg_0.package_name + os.path.sep, '')\n arg_2 = os.path.join(arg_0.root_path, arg_2)\n # XXX maybe check for extensions as well?\n if os.path.exists(arg_2 + '.py'): # file\n arg_2 += '.py'\n elif os.path.exists(os.path.join(arg_2, '__init__.py')):\n arg_2 = os.path.join(arg_2, '__init__.py')\n else:\n return None\n return arg_2"} +{"_id": "doc_1154", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Parse lines of text for functions and classes '''\n arg_2 = []\n arg_3 = []\n for arg_4 in arg_1:\n if arg_4.startswith('def ') and arg_4.count('('):\n # exclude private stuff\n arg_5 = arg_0._get_object_name(arg_4)\n if not arg_5.startswith('_'):\n arg_2.append(arg_5)\n elif arg_4.startswith('class '):\n # exclude private stuff\n arg_5 = arg_0._get_object_name(arg_4)\n if not arg_5.startswith('_'):\n arg_3.append(arg_5)\n else:\n pass\n arg_2.sort()\n arg_3.sort()\n return arg_2, arg_3"} +{"_id": "doc_1155", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate API reST files.\n\n Parameters\n ----------\n outdir : string\n Directory name in which to store files\n We create automatic filenames for each module\n \n Returns\n -------\n None\n\n Notes\n -----\n Sets self.written_modules to list of written modules\n \"\"\"\n if not os.path.exists(arg_1):\n os.mkdir(arg_1)\n # compose list of modules\n arg_2 = arg_0.discover_modules()\n arg_0.write_modules_api(arg_2,arg_1)"} +{"_id": "doc_1156", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Update the g_ok_java_messages dict structure by\n 1. add the new java ignored messages stored in message_dict if action == 1\n 2. remove the java ignored messages stired in message_dict if action == 2.\n\n Parameters\n ----------\n\n message_dict : Python dict\n key: unit test name or \"general\"\n value: list of java messages that are to be ignored if they are found when running the test stored as the key. If\n the key is \"general\", the list of java messages are to be ignored when running all tests.\n action : int\n if 1: add java ignored messages stored in message_dict to g_ok_java_messages dict;\n if 2: remove java ignored messages stored in message_dict from g_ok_java_messages dict.\n\n :return: none\n \"\"\"\n global arg_5\n\n arg_2 = arg_5.keys()\n\n for arg_3 in arg_0.keys():\n if arg_3 in arg_2: # key already exists, just add to it\n for arg_4 in arg_0[arg_3]:\n\n if arg_1 == 1:\n if arg_4 not in arg_5[arg_3]:\n arg_5[arg_3].append(arg_4)\n\n if arg_1 == 2:\n if arg_4 in arg_5[arg_3]:\n arg_5[arg_3].remove(arg_4)\n else: # new key here. Can only add and cannot remove\n if arg_1 == 1:\n arg_5[arg_3] = arg_0[arg_3]"} +{"_id": "doc_1157", "title": "", "text": "def Func():\n \"\"\"\n Save the ignored java message dict stored in g_ok_java_messages into a pickle file for future use.\n\n :return: none\n \"\"\"\n global g_ok_java_messages\n global g_save_java_message_filename\n global g_dict_changed\n\n if g_dict_changed:\n with open(g_save_java_message_filename,'wb') as ofile:\n pickle.dump(g_ok_java_messages,ofile)"} +{"_id": "doc_1158", "title": "", "text": "def Func():\n \"\"\"\n Write the java ignored messages in g_ok_java_messages into a text file for humans to read.\n\n :return: none\n \"\"\"\n global g_ok_java_messages\n global g_java_messages_to_ignore_text_filename\n\n arg_0 = sorted(g_ok_java_messages.keys())\n\n with open(g_java_messages_to_ignore_text_filename,'w') as ofile:\n for arg_1 in arg_0:\n\n for arg_2 in g_ok_java_messages[arg_1]:\n ofile.write('KeyName: '+arg_1+'\\n')\n ofile.write('IgnoredMessage: '+arg_2+'\\n')\n\n print('KeyName: ',arg_1)\n print('IgnoredMessage: ',g_ok_java_messages[arg_1])\n print('\\n')"} +{"_id": "doc_1159", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse user inputs and set the corresponing global variables to perform the\n necessary tasks.\n\n Parameters\n ----------\n\n argv : string array\n contains flags and input options from users\n\n :return:\n \"\"\"\n global arg_3\n global arg_4\n global arg_5\n global arg_6\n global arg_7\n\n\n if len(arg_0) < 2: # print out help menu if user did not enter any arguments.\n usage()\n\n arg_1 = 1\n while (arg_1 < len(arg_0)):\n arg_2 = arg_0[arg_1]\n\n if (arg_2 == \"--inputfileadd\"): # input text file where new java messages are stored\n arg_1 += 1\n if (arg_1 > len(arg_0)):\n usage()\n arg_3 = arg_0[arg_1]\n elif (arg_2 == \"--inputfilerm\"): # input text file containing java messages to be removed from the ignored list\n arg_1 += 1\n if (arg_1 > len(arg_0)):\n usage()\n arg_4 = arg_0[arg_1]\n elif (arg_2 == \"--loadjavamessage\"): # load previously saved java message pickle file from file other than\n arg_1 += 1 # the default one before performing update\n if arg_1 > len(arg_0):\n usage()\n arg_5 = arg_0[arg_1]\n elif (arg_2 == \"--savejavamessage\"): # save updated java message in this file instead of default file\n arg_1 += 1\n if (arg_1 > len(arg_0)):\n usage()\n arg_6 = arg_0[arg_1]\n elif (arg_2 == '--printjavamessage'): # will print java message out to console and save in a text file\n arg_1 += 1\n arg_7 = True\n arg_5 = arg_0[arg_1]\n elif (arg_2 == '--help'): # print help menu and exit\n usage()\n else:\n unknown_arg(arg_2)\n\n arg_1 += 1"} +{"_id": "doc_1160", "title": "", "text": "def Func(arg_0):\n \"\"\"Find all python files in the given directory and all subfolders.\"\"\"\n arg_1 = []\n arg_0 = os.path.abspath(arg_0)\n for arg_2, arg_3, arg_4 in os.walk(arg_0):\n for arg_5 in arg_4:\n if arg_5.endswith(\".py\"):\n arg_1.append(os.path.join(arg_2, arg_5))\n return arg_1"} +{"_id": "doc_1161", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Search the file for any magic incantations.\n\n :param filename: file to search\n :returns: a tuple containing the spell and then maybe some extra words (or None if no magic present)\n \"\"\"\n with open(arg_0, \"rt\", encoding=\"utf-8\") as f:\n for arg_1 in f:\n if arg_1.startswith(\"#\"):\n arg_2 = arg_1[1:].strip()\n if arg_2.startswith(\"~~~~* \") or arg_2.startswith(\"----* \") or arg_2.startswith(\"====* \"):\n arg_3 = arg_2[5:].strip()\n return tuple(arg_3.split())\n else:\n break\n return None"} +{"_id": "doc_1162", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Transform H2OFrame using a MOJO Pipeline.\n\n :param data: Frame to be Funced.\n :param allow_timestamps: Allows datetime columns to be used directly with MOJO pipelines. It is recommended\n to parse your datetime columns as Strings when using pipelines because pipelines can interpret certain datetime\n formats in a different way. If your H2OFrame is parsed from a binary file format (eg. Parquet) instead of CSV\n it is safe to turn this option on and use datetime columns directly.\n\n :returns: A new H2OFrame.\n \"\"\"\n assert_is_type(arg_1, H2OFrame)\n assert_is_type(arg_2, bool)\n return H2OFrame._expr(ExprNode(\"mojo.pipeline.Func\", arg_0.pipeline_id[0], arg_1, arg_2))"} +{"_id": "doc_1163", "title": "", "text": "def Func():\n \"\"\"\n This function will print out the intermittents onto the screen for casual viewing. It will also print out\n where the giant summary dictionary is going to be stored.\n\n :return: None\n \"\"\"\n # extract intermittents from collected failed tests\n global g_summary_dict_intermittents\n\n arg_0 = time.tzname[0]\n\n\n for arg_1 in range(len(g_summary_dict_all[\"TestName\"])):\n if g_summary_dict_all[\"TestInfo\"][arg_1][\"FailureCount\"] >= g_threshold_failure:\n addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, arg_1)\n\n # save dict in file\n if len(g_summary_dict_intermittents[\"TestName\"]) > 0:\n json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w'))\n\n with open(g_summary_csv_filename, 'w') as summaryFile:\n for arg_1 in range(len(g_summary_dict_intermittents[\"TestName\"])):\n arg_2 = g_summary_dict_intermittents[\"TestName\"][arg_1]\n arg_3 = g_summary_dict_intermittents[\"TestInfo\"][arg_1][\"FailureCount\"]\n arg_4 = parser.parse(time.ctime(min(g_summary_dict_intermittents[\"TestInfo\"][arg_1][\"Timestamp\"]))+\n ' '+arg_0)\n arg_5 = arg_4.strftime(\"%a %b %d %H:%M:%S %Y %Z\")\n arg_6 = parser.parse(time.ctime(max(g_summary_dict_intermittents[\"TestInfo\"][arg_1][\"Timestamp\"]))+\n ' '+arg_0)\n arg_7 = arg_6.strftime(\"%a %b %d %H:%M:%S %Y %Z\")\n arg_8 = \"{0}, {1}, {2}, {3}\\n\".format(arg_2, arg_7, arg_3,\n g_summary_dict_intermittents[\"TestInfo\"][arg_1][\"TestCategory\"][0])\n summaryFile.write(arg_8)\n print(\"Intermittent: {0}, Last failed: {1}, Failed {2} times since \"\n \"{3}\".format(arg_2, arg_7, arg_3, arg_5))"} +{"_id": "doc_1164", "title": "", "text": "def Func(arg_0, arg_1=\"roc\", arg_2=False):\n \"\"\"\n Produce the desired metric Func.\n\n :param type: the type of metric Func (currently, only ROC supported).\n :param server: if True, generate Func inline using matFunclib's \"Agg\" backend.\n :returns: None\n \"\"\"\n # TODO: add more types (i.e. cutoffs)\n assert_is_type(arg_1, \"roc\")\n # check for matFunclib. exit if absent.\n try:\n imp.find_module('matFunclib')\n import matFunclib\n if arg_2: matFunclib.use('Agg', warn=False)\n import matFunclib.pyFunc as plt\n except ImportError:\n print(\"matFunclib is required for this function!\")\n return\n\n if arg_1 == \"roc\":\n plt.xlabel('False Positive Rate (FPR)')\n plt.ylabel('True Positive Rate (TPR)')\n plt.title('ROC Curve')\n plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(arg_0._metric_json[\"AUC\"]))\n plt.Func(arg_0.fprs, arg_0.tprs, 'b--')\n plt.axis([0, 1, 0, 1])\n if not arg_2: plt.show()"} +{"_id": "doc_1165", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Get the confusion matrix for the specified metric\n\n :param metrics: A string (or list of strings) among metrics listed in :const:`max_metrics`. Defaults to 'f1'.\n :param thresholds: A value (or list of values) between 0 and 1.\n :returns: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix\n (if there is only one).\n \"\"\"\n # make lists out of metrics and thresholds arguments\n if arg_1 is None and arg_2 is None:\n arg_1 = ['f1']\n\n if isinstance(arg_1, list):\n arg_3 = arg_1\n elif arg_1 is None:\n arg_3 = []\n else:\n arg_3 = [arg_1]\n\n if isinstance(arg_2, list):\n arg_4 = arg_2\n elif arg_2 is None:\n arg_4 = []\n else:\n arg_4 = [arg_2]\n\n # error check the metrics_list and thresholds_list\n assert_is_type(arg_4, [numeric])\n assert_satisfies(arg_4, all(0 <= arg_5 <= 1 for arg_5 in arg_4))\n\n if not all(arg_6.lower() in H2OBinomialModelMetrics.max_metrics for arg_6 in arg_3):\n raise ValueError(\"The only allowable metrics are {}\", ', '.join(H2OBinomialModelMetrics.max_metrics))\n\n # make one big list that combines the thresholds and metric-thresholds\n arg_7 = [arg_0.find_threshold_by_max_metric(arg_6) for arg_6 in arg_3]\n for arg_8 in arg_7:\n arg_4.append(arg_8)\n arg_9 = len(arg_4) - len(arg_7)\n\n arg_10 = arg_0._metric_json['thresholds_and_metric_scores']\n arg_11 = [float(e[0]) for arg_13, e in enumerate(arg_10.cell_values)]\n arg_12 = []\n for arg_13, arg_5 in enumerate(arg_4):\n arg_14 = arg_0.find_idx_by_threshold(arg_5)\n arg_15 = arg_10.cell_values[arg_14]\n arg_16 = arg_15[11]\n arg_17 = arg_15[12]\n arg_18 = arg_15[13]\n arg_19 = arg_15[14]\n arg_20 = arg_19 + arg_17\n arg_21 = arg_16 + arg_18\n arg_22 = arg_21 - arg_18\n arg_23 = arg_20 - arg_19\n if arg_5 in arg_7:\n arg_6 = arg_3[arg_13 - arg_9]\n arg_24 = \"Confusion Matrix (Act/Pred) for max {} @ threshold = {}\".format(arg_6, arg_11[arg_14])\n else:\n arg_24 = \"Confusion Matrix (Act/Pred) @ threshold = {}\".format(arg_11[arg_14])\n arg_12.append(ConfusionMatrix(cm=[[arg_22, arg_18], [arg_23, arg_19]], domains=arg_0._metric_json['domain'],\n arg_24=arg_24))\n\n if len(arg_12) == 1:\n return arg_12[0]\n else:\n return arg_12"} +{"_id": "doc_1166", "title": "", "text": "def Func():\n \"\"\"Returns True if a deep water model can be built, or False otherwise.\"\"\"\n arg_0 = h2o.api(\"GET /3/ModelBuilders\", data={\"algo\": \"deepwater\"})\n arg_1 = arg_0[\"model_builders\"][\"deepwater\"][\"visibility\"]\n if arg_1 == \"Experimental\":\n print(\"Cannot build a Deep Water model - no backend found.\")\n return False\n else:\n return True"} +{"_id": "doc_1167", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This method will remove data from the summary text file and the dictionary file for tests that occurs before\n the number of months specified by monthToKeep.\n\n :param monthToKeep:\n :return:\n \"\"\"\n global g_failed_tests_info_dict\n arg_1 = time.time() # unit in seconds\n\n arg_2 = arg_1 - arg_0*30*24*3600 # in seconds\n\n clean_up_failed_test_dict(arg_2)\n clean_up_summary_text(arg_2)"} +{"_id": "doc_1168", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set site domain and name.\"\"\"\n arg_2 = arg_0.get_model(\"sites\", \"Site\")\n arg_2.objects.update_or_create(\n id=settings.SITE_ID,\n defaults={\n \"domain\": \"{{cookiecutter.domain_name}}\",\n \"name\": \"{{cookiecutter.project_name}}\",\n },\n )"} +{"_id": "doc_1169", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2) -> None:\n \"\"\"\n Append a header, preserving any duplicate entries.\n \"\"\"\n arg_4 = arg_1.lower().encode(\"latin-1\")\n arg_5 = arg_3.encode(\"latin-1\")\n arg_0._list.Func((arg_4, arg_5))"} +{"_id": "doc_1170", "title": "", "text": "def Func(arg_0, arg_1: arg_2.Callable) -> dict:\n \"\"\"\n Given a function, parse the docstring as YAML and return a dictionary of info.\n \"\"\"\n arg_4 = arg_1.__doc__\n if not arg_4:\n return {}\n\n # We support having regular docstrings before the schema\n # definition. Here we return just the schema part from\n # the docstring.\n arg_4 = arg_4.split(\"---\")[-1]\n\n arg_5 = yaml.safe_load(arg_4)\n\n if not isinstance(arg_5, dict):\n # A regular docstring (not yaml formatted) can return\n # a simple string here, which wouldn't follow the schema.\n return {}\n\n return arg_5"} +{"_id": "doc_1171", "title": "", "text": "def Func(\n arg_0, arg_1: arg_2 = None, arg_3: arg_4.List[arg_2] = None\n ) -> arg_4.List[arg_2]:\n \"\"\"\n Given `directory` and `packages` arugments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n arg_6 = []\n if arg_1 is not None:\n arg_6.append(arg_1)\n\n for arg_7 in arg_3 or []:\n arg_8 = importlib.util.find_spec(arg_7)\n assert arg_8 is not None, f\"Package {package!r} could not be found.\"\n assert (\n arg_8.origin is not None\n ), \"Directory 'statics' in package {package!r} could not be found.\"\n arg_1 = os.path.normpath(os.path.join(arg_8.origin, \"..\", \"statics\"))\n assert os.path.isdir(\n arg_1\n ), \"Directory 'statics' in package {package!r} could not be found.\"\n arg_6.append(arg_1)\n\n return arg_6"} +{"_id": "doc_1172", "title": "", "text": "async def Func(arg_0, arg_1: arg_2, arg_3: arg_4) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if arg_3[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n\n if arg_1.startswith(\"..\"):\n # Most clients will normalize the path, so we shouldn't normally\n # get this, but don't allow misbehaving clients to break out of\n # the static files directory.\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n arg_5, arg_6 = await arg_0.lookup_path(arg_1)\n\n if arg_6 and stat.S_ISREG(arg_6.st_mode):\n # We have a static file to serve.\n return arg_0.file_response(arg_5, arg_6, arg_3)\n\n elif arg_6 and stat.S_ISDIR(arg_6.st_mode) and arg_0.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n arg_7 = os.path.join(arg_1, \"index.html\")\n arg_5, arg_6 = await arg_0.lookup_path(arg_7)\n if arg_6 is not None and stat.S_ISREG(arg_6.st_mode):\n if not arg_3[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n arg_8 = URL(arg_3=arg_3)\n arg_8 = arg_8.replace(arg_1=arg_8.path + \"/\")\n return RedirectResponse(arg_8=arg_8)\n return arg_0.file_response(arg_5, arg_6, arg_3)\n\n if arg_0.html:\n # Check for '404.html' if we're in HTML mode.\n arg_5, arg_6 = await arg_0.lookup_path(\"404.html\")\n if arg_6 is not None and stat.S_ISREG(arg_6.st_mode):\n return arg_0.file_response(\n arg_5, arg_6, arg_3, status_code=404\n )\n\n return PlainTextResponse(\"Not Found\", status_code=404)"} +{"_id": "doc_1173", "title": "", "text": "async def Func(arg_0, arg_1: arg_2) -> None:\n \"\"\"\n Send ASGI websocket messages, ensuring valid state transitions.\n \"\"\"\n if arg_0.application_state == WebSocketState.CONNECTING:\n arg_3 = arg_1[\"type\"]\n assert arg_3 in {\"websocket.accept\", \"websocket.close\"}\n if arg_3 == \"websocket.close\":\n arg_0.application_state = WebSocketState.DISCONNECTED\n else:\n arg_0.application_state = WebSocketState.CONNECTED\n await arg_0._Func(arg_1)\n elif arg_0.application_state == WebSocketState.CONNECTED:\n arg_3 = arg_1[\"type\"]\n assert arg_3 in {\"websocket.Func\", \"websocket.close\"}\n if arg_3 == \"websocket.close\":\n arg_0.application_state = WebSocketState.DISCONNECTED\n await arg_0._Func(arg_1)\n else:\n raise RuntimeError('Cannot call \"Func\" once a close message has been sent.')"} +{"_id": "doc_1174", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Adds the default_data to data and dumps it to a json.\"\"\"\n if arg_1 is None:\n arg_1 = {}\n arg_1.update(arg_0.default_data)\n return json.dumps(arg_1)"} +{"_id": "doc_1175", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Comments last user_id's medias \"\"\"\n if not arg_0.check_user(arg_1, filter_closed_acc=True):\n return False\n arg_0.logger.info(\"Going to comment user_%s's feed:\" % arg_1)\n arg_1 = arg_0.convert_to_user_id(arg_1)\n arg_3 = arg_0.get_user_medias(arg_1, is_comment=True)\n if not arg_3:\n arg_0.logger.info(\n \"None medias received: account is closed or medias have been filtered.\")\n return False\n return arg_0.comment_medias(arg_3[:arg_2])"} +{"_id": "doc_1176", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Returns login and password stored in `secret.txt`.\"\"\"\n while not check_secret():\n pass\n while True:\n try:\n with open(SECRET_FILE, \"r\") as f:\n arg_1 = [line.strip().split(\":\", 2) for line in f.readlines()]\n except ValueError:\n arg_2 = 'Problem with opening `{}`, will remove the file.'\n raise Exception(arg_2.format(SECRET_FILE))\n if arg_0 is not None:\n for arg_3, arg_4 in arg_1:\n if arg_3 == arg_0.strip():\n return arg_3, arg_4\n print(\"Which account do you want to use? (Type number)\")\n for arg_5, (arg_3, arg_4) in enumerate(arg_1):\n print(\"%d: %s\" % (arg_5 + 1, arg_3))\n print(\"%d: %s\" % (0, \"add another account.\"))\n print(\"%d: %s\" % (-1, \"delete all accounts.\"))\n try:\n arg_5 = int(sys.stdin.readline())\n if arg_5 == 0:\n add_credentials()\n continue\n elif arg_5 == -1:\n delete_credentials()\n check_secret()\n continue\n elif 0 <= arg_5 - 1 < len(arg_1):\n return arg_1[arg_5 - 1]\n except Exception:\n print(\"Wrong input, enter the number of the account to use.\")"} +{"_id": "doc_1177", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\" Likes last user_id's medias \"\"\"\n if arg_3:\n if not arg_0.check_user(arg_1):\n return False\n arg_0.logger.info(\"Liking user_%s's feed:\" % arg_1)\n arg_1 = arg_0.convert_to_user_id(arg_1)\n arg_4 = arg_0.get_user_medias(arg_1, arg_3=arg_3)\n if not arg_4:\n arg_0.logger.info(\n \"None medias received: account is closed or medias have been filtered.\")\n return False\n return arg_0.like_medias(arg_4[:arg_2])"} +{"_id": "doc_1178", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Reads list from file. One line - one item.\n Returns the list if file items.\n \"\"\"\n try:\n if not check_if_file_exists(arg_0, arg_1=arg_1):\n return []\n with codecs.open(arg_0, \"r\", encoding=\"utf-8\") as f:\n arg_2 = f.readlines()\n if sys.version_info[0] < 3:\n arg_2 = [str(item.encode('utf8')) for item in arg_2]\n arg_2 = [item.strip() for item in arg_2]\n return [arg_3 for arg_3 in arg_2 if arg_3]\n except Exception as exception:\n print(str(exception))\n return []"} +{"_id": "doc_1179", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Finds the max and median long and short position concentrations\n in each time period specified by the index of positions.\n\n Parameters\n ----------\n positions : pd.DataFrame\n The positions that the strategy takes over time.\n\n Returns\n -------\n pd.DataFrame\n Columns are max long, max short, median long, and median short\n position concentrations. Rows are timeperiods.\n \"\"\"\n\n arg_1 = get_percent_alloc(arg_0)\n arg_1 = arg_1.drop('cash', axis=1)\n\n arg_2 = arg_1.where(arg_1.applymap(lambda x: x > 0))\n arg_3 = arg_1.where(arg_1.applymap(lambda x: x < 0))\n\n arg_4 = pd.DataFrame()\n arg_4['max_long'] = arg_2.max(axis=1)\n arg_4['median_long'] = arg_2.median(axis=1)\n arg_4['median_short'] = arg_3.median(axis=1)\n arg_4['max_short'] = arg_3.min(axis=1)\n\n return arg_4"} +{"_id": "doc_1180", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determines the long and short allocations in a portfolio.\n\n Parameters\n ----------\n positions : pd.DataFrame\n The positions that the strategy takes over time.\n\n Returns\n -------\n df_long_short : pd.DataFrame\n Long and short allocations as a decimal\n percentage of the total net liquidation\n \"\"\"\n\n arg_1 = arg_0.drop('cash', axis=1)\n arg_2 = arg_1[arg_1 > 0].sum(axis=1).fillna(0)\n arg_3 = arg_1[arg_1 < 0].sum(axis=1).fillna(0)\n arg_4 = arg_0.cash\n arg_5 = arg_2 + arg_3 + arg_4\n arg_6 = pd.DataFrame({'long': arg_2.divide(arg_5, axis='index'),\n 'short': arg_3.divide(arg_5,\n axis='index')})\n arg_6['net exposure'] = arg_6['long'] + arg_6['short']\n return arg_6"} +{"_id": "doc_1181", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns style factor exposure of an algorithm's positions\n\n Parameters\n ----------\n positions : pd.DataFrame\n Daily equity positions of algorithm, in dollars.\n - See full explanation in create_risk_tear_sheet\n\n risk_factor : pd.DataFrame\n Daily risk factor per asset.\n - DataFrame with dates as index and equities as columns\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03\t -0.51284 1.39173\n 2017-04-04\t -0.73381 0.98149\n 2017-04-05\t -0.90132 1.13981\n \"\"\"\n\n arg_2 = arg_0.drop('cash', axis='columns')\n arg_3 = arg_2.abs().sum(axis='columns')\n\n arg_4 = arg_2.multiply(arg_1) \\\n .divide(arg_3, axis='index')\n arg_5 = arg_4.sum(axis='columns',\n skipna=True)\n\n return arg_5"} +{"_id": "doc_1182", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=None):\n \"\"\"\n Plots DataFrame output of compute_style_factor_exposures as a line graph\n\n Parameters\n ----------\n tot_style_factor_exposure : pd.Series\n Daily style factor exposures (output of compute_style_factor_exposures)\n - Time series with decimal style factor exposures\n - Example:\n 2017-04-24 0.037820\n 2017-04-25 0.016413\n 2017-04-26 -0.021472\n 2017-04-27 -0.024859\n\n factor_name : string\n Name of style factor, for use in graph title\n - Defaults to tot_style_factor_exposure.name\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n\n if arg_1 is None:\n arg_1 = arg_0.name\n\n arg_2.plot(arg_0.index, arg_0,\n label=arg_1)\n arg_3 = arg_0.mean()\n arg_2.axhline(arg_3, linestyle='-.', label='Mean = {:.3}'.format(arg_3))\n arg_2.axhline(0, color='k', linestyle='-')\n arg_4, arg_4, arg_5, arg_6 = plt.axis()\n arg_7 = max(abs(arg_5), abs(arg_6))\n arg_2.set(title='Exposure to {}'.format(arg_1),\n ylabel='{} \\n weighted exposure'.format(arg_1),\n ylim=(-arg_7, arg_7))\n arg_2.legend(frameon=True, framealpha=0.5)\n\n return arg_2"} +{"_id": "doc_1183", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=arg_3, arg_4=None):\n \"\"\"\n Plots outputs of compute_sector_exposures as area charts\n\n Parameters\n ----------\n long_exposures, short_exposures : arrays\n Arrays of long and short sector exposures (output of\n compute_sector_exposures).\n\n sector_dict : dict or OrderedDict\n Dictionary of all sectors\n - See full description in compute_sector_exposures\n \"\"\"\n\n if arg_4 is None:\n arg_4 = plt.gca()\n\n if arg_2 is None:\n arg_5 = arg_3.values()\n else:\n arg_5 = arg_2.values()\n\n arg_6 = plt.cm.gist_rainbow(np.linspace(0, 1, 11))\n\n arg_4.stackplot(arg_0[0].index, arg_0,\n labels=arg_5, colors=arg_6, alpha=0.8,\n baseline='zero')\n arg_4.stackplot(arg_0[0].index, arg_1,\n colors=arg_6, alpha=0.8, baseline='zero')\n arg_4.axhline(0, color='k', linestyle='-')\n arg_4.set(title='Long and short exposures to sectors',\n ylabel='Proportion of long/short exposure in sectors')\n arg_4.legend(loc='upper left', frameon=True, framealpha=0.5)\n\n return arg_4"} +{"_id": "doc_1184", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Plots output of compute_sector_exposures as line graphs\n\n Parameters\n ----------\n net_exposures : arrays\n Arrays of net sector exposures (output of compute_sector_exposures).\n\n sector_dict : dict or OrderedDict\n Dictionary of all sectors\n - See full description in compute_sector_exposures\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n\n if arg_1 is None:\n arg_3 = SECTORS.values()\n else:\n arg_3 = arg_1.values()\n\n arg_4 = plt.cm.gist_rainbow(np.linspace(0, 1, 11))\n\n for arg_5 in range(len(arg_0)):\n arg_2.plot(arg_0[arg_5], color=arg_4[arg_5], alpha=0.8,\n label=arg_3[arg_5])\n arg_2.set(title='Net exposures to sectors',\n ylabel='Proportion of net exposure \\n in sectors')\n\n return arg_2"} +{"_id": "doc_1185", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=False,\n arg_9=False,\n arg_10='infer',\n arg_11=False,\n arg_12=(1.0, 1.5, 2.0),\n arg_13=False,\n arg_14=None,\n arg_15=None,\n arg_16=None,\n arg_17=None,\n arg_18=None,\n arg_19=None,\n arg_20=None,\n arg_21='AGB',\n arg_22=True,\n arg_23=None,\n arg_24=None,\n arg_25=True,\n arg_26=None,\n arg_27=arg_28):\n \"\"\"\n Generate a number of tear sheets that are useful\n for analyzing a strategy's performance.\n\n - Fetches benchmarks if needed.\n - Creates tear sheets for returns, and significant events.\n If possible, also creates tear sheets for position analysis,\n transaction analysis, and Bayesian analysis.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - Time series with decimal returns.\n - Example:\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\n positions : pd.DataFrame, optional\n Daily net position values.\n - Time series of dollar amount invested in each position and cash.\n - Days where stocks are not held can be represented by 0 or NaN.\n - Non-working capital is labelled 'cash'\n - Example:\n index 'AAPL' 'MSFT' cash\n 2004-01-09 13939.3800 -14012.9930 711.5585\n 2004-01-12 14492.6300 -14624.8700 27.1821\n 2004-01-13 -13853.2800 13653.6400 -43.6375\n transactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\n market_data : pd.Panel, optional\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\n slippage : int/float, optional\n Basis points of slippage to apply to returns before generating\n tearsheet stats and plots.\n If a value is provided, slippage parameter sweep\n plots will be generated from the unadjusted returns.\n Transactions and positions must also be passed.\n - See txn.adjust_returns_for_slippage for more details.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period. This datetime should be normalized.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n bayesian: boolean, optional\n If True, causes the generation of a Bayesian tear sheet.\n round_trips: boolean, optional\n If True, causes the generation of a round trip tear sheet.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n estimate_intraday: boolean or str, optional\n Instead of using the end-of-day positions, use the point in the day\n where we have the most $ invested. This will adjust positions to\n better approximate and represent how an intraday strategy behaves.\n By default, this is 'infer', and an attempt will be made to detect\n an intraday strategy. Specifying this value will prevent detection.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\n bootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\n turnover_denom : str\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n factor_returns : pd.Dataframe, optional\n Returns by factor, with date as index and factors as columns\n factor_loadings : pd.Dataframe, optional\n Factor loadings for all days in the date range, with date and\n ticker as index, and factors as columns.\n pos_in_dollars : boolean, optional\n indicates whether positions is in dollars\n header_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the perf stats table.\n set_context : boolean, optional\n If True, set default plotting style context.\n - See plotting.context().\n factor_partitions : dict, optional\n dict specifying how factors should be separated in perf attrib\n factor returns and risk exposures plots\n - See create_perf_attrib_tear_sheet().\n \"\"\"\n\n if (arg_14 is None) and (arg_5 is not None) and\\\n (arg_2 is not None):\n arg_14 = arg_0.copy()\n arg_0 = txn.adjust_returns_for_slippage(arg_0, arg_1,\n arg_2, arg_5)\n\n arg_1 = utils.check_intraday(arg_10, arg_0,\n arg_1, arg_2)\n\n create_returns_tear_sheet(\n arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_6=arg_6,\n arg_12=arg_12,\n arg_4=arg_4,\n arg_13=arg_13,\n arg_21=arg_21,\n arg_26=arg_26,\n arg_22=arg_22)\n\n create_interesting_times_tear_sheet(arg_0,\n arg_4=arg_4,\n arg_22=arg_22)\n\n if arg_1 is not None:\n create_position_tear_sheet(arg_0, arg_1,\n arg_11=arg_11,\n arg_22=arg_22,\n arg_7=arg_7,\n arg_10=False)\n\n if arg_2 is not None:\n create_txn_tear_sheet(arg_0, arg_1, arg_2,\n arg_14=arg_14,\n arg_10=False,\n arg_22=arg_22)\n if arg_9:\n create_round_trip_tear_sheet(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_7=arg_7,\n arg_10=False)\n\n if arg_3 is not None:\n create_capacity_tear_sheet(arg_0, arg_1, arg_2,\n arg_3,\n liquidation_daily_vol_limit=0.2,\n last_n_days=125,\n arg_10=False)\n\n if arg_15 is not None:\n create_risk_tear_sheet(arg_1, arg_15, arg_16,\n arg_17, arg_18, arg_19, arg_20)\n\n if arg_23 is not None and arg_24 is not None:\n create_perf_attrib_tear_sheet(arg_0, arg_1, arg_23,\n arg_24, arg_2,\n arg_25=arg_25,\n arg_27=arg_27)\n\n if arg_8:\n create_bayesian_tear_sheet(arg_0,\n arg_6=arg_6,\n arg_4=arg_4,\n arg_22=arg_22)"} +{"_id": "doc_1186", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=2, arg_3=False,\n arg_4=False, arg_5=None,\n arg_6=None, arg_7='infer'):\n \"\"\"\n Generate a number of plots for analyzing a\n strategy's positions and holdings.\n\n - Plots: gross leverage, exposures, top positions, and holdings.\n - Will also print the top positions held.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n show_and_plot_top_pos : int, optional\n By default, this is 2, and both prints and plots the\n top 10 positions.\n If this is 0, it will only plot; if 1, it will only print.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n Overrides show_and_plot_top_pos to 0 to suppress text output.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n transactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n estimate_intraday: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in create_full_tear_sheet.\n \"\"\"\n\n arg_1 = utils.check_intraday(arg_7, arg_0,\n arg_1, arg_6)\n\n if arg_3:\n arg_2 = 0\n arg_8 = 7 if arg_5 is not None else 6\n\n arg_9 = plt.figure(figsize=(14, arg_8 * 6))\n arg_10 = gridspec.GridSpec(arg_8, 3, wspace=0.5, hspace=0.5)\n arg_11 = plt.subplot(arg_10[0, :])\n arg_12 = plt.subplot(arg_10[1, :], sharex=arg_11)\n arg_13 = plt.subplot(arg_10[2, :], sharex=arg_11)\n arg_14 = plt.subplot(arg_10[3, :], sharex=arg_11)\n arg_15 = plt.subplot(arg_10[4, :])\n arg_16 = plt.subplot(arg_10[5, :], sharex=arg_11)\n\n arg_17 = pos.get_percent_alloc(arg_1)\n\n plotting.plot_exposures(arg_0, arg_1, arg_21=arg_11)\n\n plotting.show_and_plot_top_positions(\n arg_0,\n arg_17,\n show_and_plot=arg_2,\n arg_3=arg_3,\n arg_21=arg_12)\n\n plotting.plot_max_median_position_concentration(arg_1,\n arg_21=arg_13)\n\n plotting.plot_holdings(arg_0, arg_17, arg_21=arg_14)\n\n plotting.plot_long_short_holdings(arg_0, arg_17,\n arg_21=arg_15)\n\n plotting.plot_gross_leverage(arg_0, arg_1,\n arg_21=arg_16)\n\n if arg_5 is not None:\n arg_18 = pos.get_sector_exposures(arg_1,\n arg_5)\n if len(arg_18.columns) > 1:\n arg_19 = pos.get_percent_alloc(arg_18)\n arg_19 = arg_19.drop('cash', axis='columns')\n arg_20 = plt.subplot(arg_10[6, :], sharex=arg_11)\n plotting.plot_sector_allocations(arg_0, arg_19,\n arg_21=arg_20)\n\n for arg_21 in arg_9.axes:\n plt.setp(arg_21.get_xticklabels(), visible=True)\n\n if arg_4:\n return arg_9"} +{"_id": "doc_1187", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=True,\n arg_6=False,\n arg_7=arg_8):\n \"\"\"\n Generate plots and tables for analyzing a strategy's performance.\n\n Parameters\n ----------\n returns : pd.Series\n Returns for each day in the date range.\n\n positions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n\n factor_returns : pd.DataFrame\n Returns by factor, with date as index and factors as columns\n\n factor_loadings : pd.DataFrame\n Factor loadings for all days in the date range, with date\n and ticker as index, and factors as columns.\n\n transactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n - Default is None.\n\n pos_in_dollars : boolean, optional\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n\n factor_partitions : dict\n dict specifying how factors should be separated in factor returns\n and risk exposures plots\n - Example:\n {'style': ['momentum', 'size', 'value', ...],\n 'sector': ['technology', 'materials', ... ]}\n \"\"\"\n arg_9, arg_10 = perf_attrib.perf_attrib(\n arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5=arg_5\n )\n\n display(Markdown(\"## Performance Relative to Common Risk Factors\"))\n\n # aggregate perf attrib stats and show summary table\n perf_attrib.show_perf_attrib_stats(arg_0, arg_1, arg_2,\n arg_3, arg_4,\n arg_5)\n\n # one section for the returns plot, and for each factor grouping\n # one section for factor returns, and one for risk exposures\n arg_11 = 1 + 2 * max(len(arg_7), 1)\n arg_12 = 0\n\n arg_13 = plt.figure(figsize=[14, arg_11 * 6])\n\n arg_14 = gridspec.GridSpec(arg_11, 1,\n wspace=0.5, hspace=0.5)\n\n perf_attrib.plot_returns(arg_10,\n ax=plt.subplot(arg_14[arg_12]))\n arg_12 += 1\n\n if arg_7 is not None:\n\n for arg_15, arg_16 in arg_7.iteritems():\n\n arg_17 = arg_10.columns.intersection(\n arg_16\n )\n\n perf_attrib.plot_factor_contribution_to_perf(\n arg_10[arg_17],\n ax=plt.subplot(arg_14[arg_12]),\n title=(\n 'Cumulative common {} returns attribution'\n ).format(arg_15)\n )\n arg_12 += 1\n\n for arg_15, arg_16 in arg_7.iteritems():\n\n perf_attrib.plot_risk_exposures(\n arg_9[arg_9.columns\n .intersection(arg_16)],\n ax=plt.subplot(arg_14[arg_12]),\n title='Daily {} factor exposures'.format(arg_15)\n )\n arg_12 += 1\n\n else:\n\n perf_attrib.plot_factor_contribution_to_perf(\n arg_10,\n ax=plt.subplot(arg_14[arg_12])\n )\n arg_12 += 1\n\n perf_attrib.plot_risk_exposures(\n arg_9,\n ax=plt.subplot(arg_14[arg_12])\n )\n\n arg_14.tight_layout(arg_13)\n\n if arg_6:\n return arg_13"} +{"_id": "doc_1188", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sums the absolute value of shares traded in each name on each day.\n Adds columns containing the closing price and total daily volume for\n each day-ticker combination.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n market_data : pd.Panel\n Contains \"volume\" and \"price\" DataFrames for the tickers\n in the passed positions DataFrames\n\n Returns\n -------\n txn_daily : pd.DataFrame\n Daily totals for transacted shares in each traded name.\n price and volume columns for close price and daily volume for\n the corresponding ticker, respectively.\n \"\"\"\n\n arg_0.index.name = 'date'\n arg_4 = pd.DataFrame(arg_0.assign(\n amount=abs(arg_0.amount)).groupby(\n ['symbol', pd.TimeGrouper('D')]).sum()['amount'])\n arg_4['price'] = arg_1['price'].unstack()\n arg_4['volume'] = arg_1['volume'].unstack()\n\n arg_4 = arg_4.reset_index().set_index('date')\n\n return arg_4"} +{"_id": "doc_1189", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None):\n \"\"\"\n For each traded name, find the daily transaction total that consumed\n the greatest proportion of available daily bar volume.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n market_data : pd.Panel\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\n last_n_days : integer\n Compute for only the last n days of the passed backtest data.\n \"\"\"\n\n arg_3 = daily_txns_with_bar_data(arg_0, arg_1)\n arg_3.index.name = 'date'\n arg_3 = arg_3.reset_index()\n\n if arg_2 is not None:\n arg_6 = arg_3.date.max() - pd.Timedelta(days=arg_2)\n arg_3 = arg_3[arg_3.date > arg_6]\n\n arg_7 = arg_3.assign(\n max_pct_bar_consumed=(\n arg_3.amount/arg_3.volume)*100\n ).sort_values('max_pct_bar_consumed', ascending=False)\n arg_8 = arg_7.groupby('symbol').first()\n\n return arg_8[['date', 'max_pct_bar_consumed']]"} +{"_id": "doc_1190", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Maps a single transaction row to a dictionary.\n\n Parameters\n ----------\n txn : pd.DataFrame\n A single transaction object to convert to a dictionary.\n\n Returns\n -------\n dict\n Mapped transaction.\n \"\"\"\n\n if isinstance(arg_0['sid'], dict):\n arg_1 = arg_0['sid']['sid']\n arg_2 = arg_0['sid']['symbol']\n else:\n arg_1 = arg_0['sid']\n arg_2 = arg_0['sid']\n\n return {'sid': arg_1,\n 'symbol': arg_2,\n 'price': arg_0['price'],\n 'order_id': arg_0['order_id'],\n 'amount': arg_0['amount'],\n 'commission': arg_0['commission'],\n 'dt': arg_0['dt']}"} +{"_id": "doc_1191", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract daily transaction data from set of transaction objects.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Time series containing one row per symbol (and potentially\n duplicate datetime indices) and columns for amount and\n price.\n\n Returns\n -------\n pd.DataFrame\n Daily transaction volume and number of shares.\n - See full explanation in tears.create_full_tear_sheet.\n \"\"\"\n\n arg_1 = arg_0.copy()\n arg_1.index = arg_1.index.normalize()\n arg_3 = arg_1.amount.abs()\n arg_4 = arg_1.price\n arg_5 = arg_3 * arg_4\n arg_6 = arg_3.groupby(arg_3.index).sum()\n arg_7 = arg_5.groupby(arg_5.index).sum()\n arg_6.name = \"txn_shares\"\n arg_7.name = \"txn_volume\"\n return pd.concat([arg_7, arg_6], axis=1)"} +{"_id": "doc_1192", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"\n Apply a slippage penalty for every dollar traded.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n slippage_bps: int/float\n Basis points of slippage to apply.\n\n Returns\n -------\n pd.Series\n Time series of daily returns, adjusted for slippage.\n \"\"\"\n\n arg_4 = 0.0001 * arg_3\n arg_5 = arg_1.sum(axis=1)\n arg_6 = arg_5 * arg_0\n arg_7 = get_txn_vol(arg_2).txn_volume\n arg_8 = arg_7 * arg_4\n arg_9 = arg_6.add(-arg_8, fill_value=0)\n arg_10 = arg_0 * arg_9 / arg_6\n\n return arg_10"} +{"_id": "doc_1193", "title": "", "text": "def Func(arg_0, arg_1=arg_2.Timedelta('8h')):\n \"\"\"Merge transactions of the same direction separated by less than\n max_delta time duration.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed round_trips. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n\n max_delta : pandas.Timedelta (optional)\n Merge transactions in the same direction separated by less\n than max_delta time duration.\n\n\n Returns\n -------\n transactions : pd.DataFrame\n\n \"\"\"\n def vwap(arg_4):\n if arg_4.amount.sum() == 0:\n warnings.warn('Zero transacted shares, setting vwap to nan.')\n return np.nan\n return (arg_4.amount * arg_4.price).sum() / \\\n arg_4.amount.sum()\n\n arg_5 = []\n for arg_6, arg_7 in arg_0.groupby('symbol'):\n arg_7 = arg_7.sort_index()\n arg_7.index.name = 'dt'\n arg_7 = arg_7.reset_index()\n\n arg_7['order_sign'] = arg_7.amount > 0\n arg_7['block_dir'] = (arg_7.order_sign.shift(\n 1) != arg_7.order_sign).astype(int).cumsum()\n arg_7['block_time'] = ((arg_7.dt.sub(arg_7.dt.shift(1))) >\n arg_1).astype(int).cumsum()\n arg_10 = (arg_7.groupby(('block_dir',\n 'block_time'))\n .apply(vwap))\n arg_10.name = 'price'\n arg_11 = arg_7.groupby(('block_dir', 'block_time')).agg({\n 'amount': 'sum',\n 'symbol': 'first',\n 'dt': 'first'})\n\n arg_12 = arg_11.join(arg_10)\n\n arg_5.append(arg_12)\n\n arg_5 = arg_2.concat(arg_5)\n arg_5 = arg_5.set_index('dt')\n return arg_5"} +{"_id": "doc_1194", "title": "", "text": "def Func(arg_0,\n arg_1=None):\n \"\"\"Group transactions into \"round trips\". First, transactions are\n grouped by day and directionality. Then, long and short\n transactions are matched to create round-trip round_trips for which\n PnL, duration and returns are computed. Crossings where a position\n changes from long to short and vice-versa are handled correctly.\n\n Under the hood, we reconstruct the individual shares in a\n portfolio over time and match round_trips in a FIFO-order.\n\n For example, the following transactions would constitute one round trip:\n index amount price symbol\n 2004-01-09 12:18:01 10 50 'AAPL'\n 2004-01-09 15:12:53 10 100 'AAPL'\n 2004-01-13 14:41:23 -10 100 'AAPL'\n 2004-01-13 15:23:34 -10 200 'AAPL'\n\n First, the first two and last two round_trips will be merged into a two\n single transactions (computing the price via vwap). Then, during\n the portfolio reconstruction, the two resulting transactions will\n be merged and result in 1 round-trip trade with a PnL of\n (150 * 20) - (75 * 20) = 1500.\n\n Note, that round trips do not have to close out positions\n completely. For example, we could have removed the last\n transaction in the example above and still generated a round-trip\n over 10 shares with 10 shares left in the portfolio to be matched\n with a later transaction.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed round_trips. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n\n portfolio_value : pd.Series (optional)\n Portfolio value (all net assets including cash) over time.\n Note that portfolio_value needs to beginning of day, so either\n use .shift() or positions.sum(axis='columns') / (1+returns).\n\n Returns\n -------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip. The returns column\n contains returns in respect to the portfolio value while\n rt_returns are the returns in regards to the invested capital\n into that partiulcar round-trip.\n \"\"\"\n\n arg_0 = _groupby_consecutive(arg_0)\n arg_2 = []\n\n for arg_3, arg_4 in arg_0.groupby('symbol'):\n arg_4 = arg_4.sort_index()\n arg_5 = deque()\n arg_6 = deque()\n arg_4['signed_price'] = arg_4.price * \\\n np.sign(arg_4.amount)\n arg_4['abs_amount'] = arg_4.amount.abs().astype(int)\n for arg_7, arg_8 in arg_4.iterrows():\n if arg_8.price < 0:\n warnings.warn('Negative price detected, ignoring for'\n 'round-trip.')\n continue\n\n arg_9 = [arg_8.signed_price] * arg_8.abs_amount\n if (len(arg_5) == 0) or \\\n (copysign(1, arg_5[-1]) == copysign(1, arg_8.amount)):\n arg_5.extend(arg_9)\n arg_6.extend([arg_7] * len(arg_9))\n else:\n # Close round-trip\n arg_10 = 0\n arg_11 = 0\n arg_12 = []\n\n for arg_13 in arg_9:\n if len(arg_5) != 0 and \\\n (copysign(1, arg_5[-1]) != copysign(1, arg_13)):\n # Retrieve first dt, stock-price pair from\n # stack\n arg_14 = arg_5.popleft()\n arg_15 = arg_6.popleft()\n\n arg_10 += -(arg_13 + arg_14)\n arg_12.append(arg_15)\n arg_11 += abs(arg_14)\n\n else:\n # Push additional stock-prices onto stack\n arg_5.append(arg_13)\n arg_6.append(arg_7)\n\n arg_2.append({'pnl': arg_10,\n 'open_dt': arg_12[0],\n 'close_dt': arg_7,\n 'long': arg_13 < 0,\n 'rt_returns': arg_10 / arg_11,\n 'symbol': arg_3,\n })\n\n arg_2 = pd.DataFrame(arg_2)\n\n arg_2['duration'] = arg_2['close_dt'].sub(arg_2['open_dt'])\n\n if arg_1 is not None:\n # Need to normalize so that we can join\n arg_16 = pd.DataFrame(arg_1,\n columns=['portfolio_value'])\\\n .assign(date=arg_1.index)\n\n arg_2['date'] = arg_2.close_dt.apply(lambda x:\n x.replace(hour=0,\n minute=0,\n second=0))\n\n arg_17 = arg_2.join(arg_16, on='date', lsuffix='_')\n\n arg_2['returns'] = arg_17.pnl / arg_17.portfolio_value\n arg_2 = arg_2.drop('date', axis='columns')\n\n return arg_2"} +{"_id": "doc_1195", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate various round-trip statistics.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n\n Returns\n -------\n stats : dict\n A dictionary where each value is a pandas DataFrame containing\n various round-trip statistics.\n\n See also\n --------\n round_trips.print_round_trip_stats\n \"\"\"\n\n arg_1 = {}\n arg_1['pnl'] = agg_all_long_short(arg_0, 'pnl', PNL_STATS)\n arg_1['summary'] = agg_all_long_short(arg_0, 'pnl',\n SUMMARY_STATS)\n arg_1['duration'] = agg_all_long_short(arg_0, 'duration',\n DURATION_STATS)\n arg_1['returns'] = agg_all_long_short(arg_0, 'returns',\n RETURN_STATS)\n\n arg_1['symbols'] = \\\n arg_0.groupby('symbol')['returns'].agg(RETURN_STATS).T\n\n return arg_1"} +{"_id": "doc_1196", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Print various round-trip statistics. Tries to pretty-print tables\n with HTML output if run inside IPython NB.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n\n See also\n --------\n round_trips.gen_round_trip_stats\n \"\"\"\n\n arg_2 = gen_round_trip_stats(arg_0)\n\n print_table(arg_2['summary'], float_format='{:.2f}'.format,\n name='Summary stats')\n print_table(arg_2['pnl'], float_format='${:.2f}'.format, name='PnL stats')\n print_table(arg_2['duration'], float_format='{:.2f}'.format,\n name='Duration stats')\n print_table(arg_2['returns'] * 100, float_format='{:.2f}%'.format,\n name='Return stats')\n\n if not arg_1:\n arg_2['symbols'].columns = arg_2['symbols'].columns.map(format_asset)\n print_table(arg_2['symbols'] * 100,\n float_format='{:.2f}%'.format, name='Symbol stats')"} +{"_id": "doc_1197", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=True):\n \"\"\"\n Attributes the performance of a returns stream to a set of risk factors.\n\n Preprocesses inputs, and then calls empyrical.Func. See\n empyrical.Func for more info.\n\n Performance attribution determines how much each risk factor, e.g.,\n momentum, the technology sector, etc., contributed to total returns, as\n well as the daily exposure to each of the risk factors. The returns that\n can be attributed to one of the given risk factors are the\n `common_returns`, and the returns that _cannot_ be attributed to a risk\n factor are the `specific_returns`, or the alpha. The common_returns and\n specific_returns summed together will always equal the total returns.\n\n Parameters\n ----------\n returns : pd.Series\n Returns for each day in the date range.\n - Example:\n 2017-01-01 -0.017098\n 2017-01-02 0.002683\n 2017-01-03 -0.008669\n\n positions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n - Examples:\n AAPL TLT XOM cash\n 2017-01-01 34 58 10 0\n 2017-01-02 22 77 18 0\n 2017-01-03 -15 27 30 15\n\n AAPL TLT XOM cash\n 2017-01-01 0.333333 0.568627 0.098039 0.0\n 2017-01-02 0.188034 0.658120 0.153846 0.0\n 2017-01-03 0.208333 0.375000 0.416667 0.0\n\n factor_returns : pd.DataFrame\n Returns by factor, with date as index and factors as columns\n - Example:\n momentum reversal\n 2017-01-01 0.002779 -0.005453\n 2017-01-02 0.001096 0.010290\n\n factor_loadings : pd.DataFrame\n Factor loadings for all days in the date range, with date and ticker as\n index, and factors as columns.\n - Example:\n momentum reversal\n dt ticker\n 2017-01-01 AAPL -1.592914 0.852830\n TLT 0.184864 0.895534\n XOM 0.993160 1.149353\n 2017-01-02 AAPL -0.140009 -0.524952\n TLT -1.066978 0.185435\n XOM -1.798401 0.761549\n\n\n transactions : pd.DataFrame, optional\n Executed trade volumes and fill prices. Used to check the turnover of\n the algorithm. Default is None, in which case the turnover check is\n skipped.\n\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\n\n pos_in_dollars : bool\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n\n Returns\n -------\n tuple of (risk_exposures_portfolio, Funcution)\n\n risk_exposures_portfolio : pd.DataFrame\n df indexed by datetime, with factors as columns\n - Example:\n momentum reversal\n dt\n 2017-01-01 -0.238655 0.077123\n 2017-01-02 0.821872 1.520515\n\n Funcution : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980\n \"\"\"\n (arg_0,\n arg_1,\n arg_2,\n arg_3) = _align_and_warn(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=arg_4,\n arg_5=arg_5)\n\n # Note that we convert positions to percentages *after* the checks\n # above, since get_turnover() expects positions in dollars.\n arg_1 = _stack_positions(arg_1, arg_5=arg_5)\n\n return ep.Func(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_1198", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=True):\n \"\"\"\n Calls `perf_attrib` using inputs, and displays outputs using\n `utils.print_table`.\n \"\"\"\n arg_6, arg_7 = perf_attrib(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=arg_5,\n )\n\n arg_8, arg_9 =\\\n create_perf_attrib_stats(arg_7, arg_6)\n\n arg_10 = '{:.2%}'.format\n arg_11 = '{:.2f}'.format\n\n arg_12 = arg_8.loc[['Annualized Specific Return',\n 'Annualized Common Return',\n 'Annualized Total Return',\n 'Specific Sharpe Ratio']]\n\n # Format return rows in summary stats table as percentages.\n for arg_13 in (\n 'Annualized Specific Return',\n 'Annualized Common Return',\n 'Annualized Total Return',\n ):\n arg_12[arg_13] = arg_10(arg_12[arg_13])\n\n # Display sharpe to two decimal places.\n arg_12['Specific Sharpe Ratio'] = arg_11(\n arg_12['Specific Sharpe Ratio']\n )\n\n print_table(arg_12, name='Summary Statistics')\n\n print_table(\n arg_9,\n name='Exposures Summary',\n # In exposures table, format exposure column to 2 decimal places, and\n # return columns as percentages.\n formatters={\n 'Average Risk Factor Exposure': arg_11,\n 'Annualized Return': arg_10,\n 'Cumulative Return': arg_10,\n },\n )"} +{"_id": "doc_1199", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Plot total, specific, and common returns.\n\n Parameters\n ----------\n perf_attrib_data : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index. Assumes the `total_returns` column is NOT\n cost adjusted.\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980\n\n cost : pd.Series, optional\n if present, gets subtracted from `perf_attrib_data['total_returns']`,\n and gets plotted separately\n\n ax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n\n arg_3 = arg_0['total_returns']\n arg_4 = 'Total returns'\n\n arg_5 = _cumulative_returns_less_costs(\n arg_3,\n arg_1\n )\n if arg_1 is not None:\n arg_4 += ' (adjusted)'\n\n arg_6 = arg_0['specific_returns']\n arg_7 = arg_0['common_returns']\n\n arg_2.plot(arg_5, color='b',\n label=arg_4)\n arg_2.plot(ep.cum_returns(arg_6), color='g',\n label='Cumulative specific returns')\n arg_2.plot(ep.cum_returns(arg_7), color='r',\n label='Cumulative common returns')\n\n if arg_1 is not None:\n arg_2.plot(-ep.cum_returns(arg_1), color='k',\n label='Cumulative cost spent')\n\n arg_2.set_title('Time series of cumulative returns')\n arg_2.set_ylabel('Returns')\n\n configure_legend(arg_2)\n\n return arg_2"} +{"_id": "doc_1200", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2='Cumulative common returns attribution',\n):\n \"\"\"\n Plot each factor's contribution to performance.\n\n Parameters\n ----------\n perf_attrib_data : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980\n\n ax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\n title : str, optional\n title of plot\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n \"\"\"\n if arg_1 is None:\n arg_1 = plt.gca()\n\n arg_3 = arg_0.drop(\n ['total_returns', 'common_returns'], axis='columns', errors='ignore'\n )\n\n arg_4 = pd.DataFrame()\n for arg_5 in arg_3:\n arg_4[arg_5] = ep.cum_returns(arg_3[arg_5])\n\n for arg_6 in arg_4:\n arg_1.plot(arg_4[arg_6])\n\n arg_1.axhline(0, color='k')\n configure_legend(arg_1, change_colors=True)\n\n arg_1.set_ylabel('Cumulative returns by factor')\n arg_1.set_title(arg_2)\n\n return arg_1"} +{"_id": "doc_1201", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Convert positions to percentages if necessary, and change them\n to long format.\n\n Parameters\n ----------\n positions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n\n pos_in_dollars : bool\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n \"\"\"\n if arg_1:\n # convert holdings to percentages\n arg_0 = get_percent_alloc(arg_0)\n\n # remove cash after normalizing positions\n arg_0 = arg_0.drop('cash', axis='columns')\n\n # convert positions to long format\n arg_0 = arg_0.stack()\n arg_0.index = arg_0.index.set_names(['dt', 'ticker'])\n\n return arg_0"} +{"_id": "doc_1202", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute cumulative returns, less costs.\n \"\"\"\n if arg_1 is None:\n return ep.cum_returns(arg_0)\n return ep.cum_returns(arg_0 - arg_1)"} +{"_id": "doc_1203", "title": "", "text": "def Func(arg_0):\n \"\"\"\n If zipline asset objects are used, we want to print them out prettily\n within the tear sheet. This function should only be applied directly\n before displaying.\n \"\"\"\n\n try:\n import zipline.assets\n except ImportError:\n return arg_0\n\n if isinstance(arg_0, zipline.assets.Asset):\n return arg_0.symbol\n else:\n return arg_0"} +{"_id": "doc_1204", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Logic for checking if a strategy is intraday and processing it.\n\n Parameters\n ----------\n estimate: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in tears.create_full_tear_sheet.\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n\n Returns\n -------\n pd.DataFrame\n Daily net position values, adjusted for intraday movement.\n \"\"\"\n\n if arg_0 == 'infer':\n if arg_2 is not None and arg_3 is not None:\n if detect_intraday(arg_2, arg_3):\n warnings.warn('Detected intraday strategy; inferring positi' +\n 'ons from transactions. Set estimate_intraday' +\n '=False to disable.')\n return estimate_intraday(arg_1, arg_2, arg_3)\n else:\n return arg_2\n else:\n return arg_2\n\n elif arg_0:\n if arg_2 is not None and arg_3 is not None:\n return estimate_intraday(arg_1, arg_2, arg_3)\n else:\n raise ValueError('Positions and txns needed to estimate intraday')\n else:\n return arg_2"} +{"_id": "doc_1205", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=23):\n \"\"\"\n Intraday strategies will often not hold positions at the day end.\n This attempts to find the point in the day that best represents\n the activity of the strategy on that day, and effectively resamples\n the end-of-day positions with the positions at this point of day.\n The point of day is found by detecting when our exposure in the\n market is at its maximum point. Note that this is an estimate.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n\n Returns\n -------\n pd.DataFrame\n Daily net position values, resampled for intraday behavior.\n \"\"\"\n\n # Construct DataFrame of transaction amounts\n arg_4 = arg_2.copy()\n arg_4.index.names = ['date']\n arg_4['value'] = arg_4.amount * arg_4.price\n arg_4 = arg_4.reset_index().pivot_table(\n arg_5='date', values='value',\n arg_13='symbol').replace(np.nan, 0)\n\n # Cumulate transaction amounts each day\n arg_4['date'] = arg_4.index.date\n arg_4 = arg_4.groupby('date').cumsum()\n\n # Calculate exposure, then take peak of exposure every day\n arg_4['exposure'] = arg_4.abs().sum(axis=1)\n arg_7 = (arg_4['exposure'] == arg_4.groupby(\n pd.TimeGrouper('24H'))['exposure'].transform(max))\n arg_4 = arg_4[arg_7].drop('exposure', axis=1)\n\n # Compute cash delta\n arg_4['cash'] = -arg_4.sum(axis=1)\n\n # Shift EOD positions to positions at start of next trading day\n arg_8 = arg_1.copy().shift(1).fillna(0)\n arg_9 = arg_1.iloc[0].sum() / (1 + arg_0[0])\n arg_8.cash[0] = arg_9\n\n # Format and add start positions to intraday position changes\n arg_4.index = arg_4.index.normalize()\n arg_11 = arg_8.add(arg_4, fill_value=0)\n arg_11.index.name = 'period_close'\n arg_11.columns.name = 'sid'\n\n return arg_11"} +{"_id": "doc_1206", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Drop entries from rets so that the start and end dates of rets match those\n of benchmark_rets.\n\n Parameters\n ----------\n rets : pd.Series\n Daily returns of the strategy, noncumulative.\n - See pf.tears.create_full_tear_sheet for more details\n\n benchmark_rets : pd.Series\n Daily returns of the benchmark, noncumulative.\n\n Returns\n -------\n clipped_rets : pd.Series\n Daily noncumulative returns with index clipped to match that of\n benchmark returns.\n \"\"\"\n\n if (arg_0.index[0] < arg_1.index[0]) \\\n or (arg_0.index[-1] > arg_1.index[-1]):\n arg_2 = arg_0[arg_1.index]\n else:\n arg_2 = arg_0\n\n return arg_2"} +{"_id": "doc_1207", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Calls the currently registered 'returns_func'\n\n Parameters\n ----------\n symbol : object\n An identifier for the asset whose return\n series is desired.\n e.g. ticker symbol or database ID\n start : date, optional\n Earliest date to fetch data for.\n Defaults to earliest date available.\n end : date, optional\n Latest date to fetch data for.\n Defaults to latest date available.\n\n Returns\n -------\n pandas.Series\n Returned by the current 'returns_func'\n \"\"\"\n\n return SETTINGS['returns_func'](arg_0,\n arg_1=arg_1,\n arg_2=arg_2)"} +{"_id": "doc_1208", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator to set plotting context and axes style during function call.\n \"\"\"\n @wraps(arg_0)\n def call_w_context(*arg_1, **arg_2):\n arg_3 = arg_2.pop('set_context', True)\n if arg_3:\n with plotting_context(), axes_style():\n return arg_0(*arg_1, **arg_2)\n else:\n return arg_0(*arg_1, **arg_2)\n return call_w_context"} +{"_id": "doc_1209", "title": "", "text": "def Func(arg_0='darkgrid', arg_1=None):\n \"\"\"\n Create pyfolio default axes style context.\n\n Under the hood, calls and returns seaborn.Func() with\n some custom settings. Usually you would use in a with-context.\n\n Parameters\n ----------\n style : str, optional\n Name of seaborn style.\n rc : dict, optional\n Config flags.\n\n Returns\n -------\n seaborn plotting context\n\n Example\n -------\n >>> with pyfolio.plotting.Func(style='whitegrid'):\n >>> pyfolio.create_full_tear_sheet(..., set_context=False)\n\n See also\n --------\n For more information, see seaborn.plotting_context().\n\n \"\"\"\n if arg_1 is None:\n arg_1 = {}\n\n arg_2 = {}\n\n # Add defaults if they do not exist\n for arg_3, arg_4 in arg_2.items():\n arg_1.setdefault(arg_3, arg_4)\n\n return sns.Func(arg_0=arg_0, arg_1=arg_1)"} +{"_id": "doc_1210", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"\n Plots a heatmap of returns by month.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = plt.gca()\n\n arg_3 = ep.aggregate_returns(arg_0, 'monthly')\n arg_3 = arg_3.unstack().round(3)\n\n sns.heatmap(\n arg_3.fillna(0) *\n 100.0,\n annot=True,\n annot_kws={\"size\": 9},\n alpha=1.0,\n center=0.0,\n cbar=False,\n cmap=matplotlib.cm.RdYlGn,\n arg_1=arg_1, **arg_2)\n arg_1.set_ylabel('Year')\n arg_1.set_xlabel('Month')\n arg_1.set_title(\"Monthly returns (%)\")\n return arg_1"} +{"_id": "doc_1211", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"\n Plots a distribution of monthly returns.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = plt.gca()\n\n arg_3 = FuncFormatter(utils.percentage)\n arg_1.xaxis.set_major_formatter(FuncFormatter(arg_3))\n arg_1.tick_params(axis='x', which='major')\n\n arg_4 = ep.aggregate_returns(arg_0, 'monthly')\n\n arg_1.hist(\n 100 * arg_4,\n color='orangered',\n alpha=0.80,\n bins=20,\n **arg_2)\n\n arg_1.axvline(\n 100 * arg_4.mean(),\n color='gold',\n linestyle='--',\n lw=4,\n alpha=1.0)\n\n arg_1.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)\n arg_1.legend(['Mean'], frameon=True, framealpha=0.5)\n arg_1.set_ylabel('Number of months')\n arg_1.set_xlabel('Returns')\n arg_1.set_title(\"Distribution of monthly returns\")\n return arg_1"} +{"_id": "doc_1212", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2='upper left', arg_3=None, **arg_4):\n \"\"\"\n Plots total amount of stocks with an active position, breaking out\n short and long into transparent filled regions.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n\n \"\"\"\n\n if arg_3 is None:\n arg_3 = plt.gca()\n\n arg_1 = arg_1.drop('cash', axis='columns')\n arg_1 = arg_1.replace(0, np.nan)\n arg_5 = arg_1[arg_1 > 0].count(axis=1)\n arg_6 = arg_1[arg_1 < 0].count(axis=1)\n arg_7 = arg_3.fill_between(arg_5.index, 0, arg_5.values,\n color='g', alpha=0.5, lw=2.0)\n arg_8 = arg_3.fill_between(arg_6.index, 0, arg_6.values,\n color='r', alpha=0.5, lw=2.0)\n\n arg_9 = patches.Rectangle([0, 0], 1, 1, color='darkgoldenrod')\n arg_10 = arg_3.legend([arg_7, arg_8, arg_9],\n ['Long (max: %s, min: %s)' % (arg_5.max(),\n arg_5.min()),\n 'Short (max: %s, min: %s)' % (arg_6.max(),\n arg_6.min()),\n 'Overlap'], loc=arg_2, frameon=True,\n framealpha=0.5)\n arg_10.get_frame().set_edgecolor('black')\n\n arg_3.set_xlim((arg_0.index[0], arg_0.index[-1]))\n arg_3.set_title('Long and short holdings')\n arg_3.set_ylabel('Holdings')\n arg_3.set_xlabel('')\n return arg_3"} +{"_id": "doc_1213", "title": "", "text": "def Func(arg_0, arg_1=10, arg_2=None, **arg_3):\n \"\"\"\n Plots cumulative returns highlighting top drawdown periods.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n top : int, optional\n Amount of top drawdowns periods to plot (default 10).\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n\n arg_4 = FuncFormatter(utils.two_dec_places)\n arg_2.yaxis.set_major_formatter(FuncFormatter(arg_4))\n\n arg_5 = ep.cum_returns(arg_0, starting_value=1.0)\n arg_6 = timeseries.gen_drawdown_table(arg_0, arg_1=arg_1)\n\n arg_5.plot(arg_2=arg_2, **arg_3)\n\n arg_7 = arg_2.get_ylim()\n arg_8 = sns.cubehelix_palette(len(arg_6))[::-1]\n for arg_9, (arg_10, arg_11) in arg_6[\n ['Peak date', 'Recovery date']].iterrows():\n if pd.isnull(arg_11):\n arg_11 = arg_0.index[-1]\n arg_2.fill_between((arg_10, arg_11),\n arg_7[0],\n arg_7[1],\n alpha=.4,\n color=arg_8[arg_9])\n arg_2.set_ylim(arg_7)\n arg_2.set_title('Top %i drawdown periods' % arg_1)\n arg_2.set_ylabel('Cumulative returns')\n arg_2.legend(['Portfolio'], loc='upper left',\n frameon=True, framealpha=0.5)\n arg_2.set_xlabel('')\n return arg_2"} +{"_id": "doc_1214", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"\n Plots how far underwaterr returns are over time, or plots current\n drawdown vs. date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = plt.gca()\n\n arg_3 = FuncFormatter(utils.percentage)\n arg_1.yaxis.set_major_formatter(FuncFormatter(arg_3))\n\n arg_4 = ep.cum_returns(arg_0, starting_value=1.0)\n arg_5 = np.maximum.accumulate(arg_4)\n arg_6 = -100 * ((arg_5 - arg_4) / arg_5)\n (arg_6).plot(arg_1=arg_1, kind='area', color='coral', alpha=0.7, **arg_2)\n arg_1.set_ylabel('Drawdown')\n arg_1.set_title('Underwater plot')\n arg_1.set_xlabel('')\n return arg_1"} +{"_id": "doc_1215", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=False,\n arg_4=None,\n arg_5='best',\n arg_6=False,\n arg_7=arg_8.forecast_cone_bootstrap,\n arg_10=None, **arg_11):\n \"\"\"\n Plots cumulative rolling returns versus some benchmarks'.\n\n Backtest returns are in green, and out-of-sample (live trading)\n returns are in red.\n\n Additionally, a non-parametric cone plot may be added to the\n out-of-sample returns region.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n live_start_date : datetime, optional\n The date when the strategy began live trading, after\n its backtest period. This date should be normalized.\n logy : bool, optional\n Whether to log-scale the y-axis.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - See timeseries.forecast_cone_bounds for more details.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n volatility_match : bool, optional\n Whether to normalize the volatility of the returns to those of the\n benchmark returns. This helps compare strategies with different\n volatilities. Requires passing of benchmark_rets.\n cone_function : function, optional\n Function to use when generating forecast probability cone.\n The function signiture must follow the form:\n def cone(in_sample_returns (pd.Series),\n days_to_project_forward (int),\n cone_std= (float, or tuple),\n starting_value= (int, or float))\n See timeseries.forecast_cone_bootstrap for an example.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_10 is None:\n arg_10 = plt.gca()\n\n arg_10.set_xlabel('')\n arg_10.set_ylabel('Cumulative returns')\n arg_10.set_yscale('log' if arg_3 else 'linear')\n\n if arg_6 and arg_1 is None:\n raise ValueError('volatility_match requires passing of '\n 'factor_returns.')\n elif arg_6 and arg_1 is not None:\n arg_12 = arg_1.loc[arg_0.index].std()\n arg_0 = (arg_0 / arg_0.std()) * arg_12\n\n arg_13 = ep.cum_returns(arg_0, 1.0)\n\n arg_14 = FuncFormatter(utils.two_dec_places)\n arg_10.yaxis.set_major_formatter(FuncFormatter(arg_14))\n\n if arg_1 is not None:\n arg_15 = ep.cum_returns(\n arg_1[arg_13.index], 1.0)\n arg_15.plot(lw=2, color='gray',\n label=arg_1.name, alpha=0.60,\n arg_10=arg_10, **arg_11)\n\n if arg_2 is not None:\n arg_2 = ep.utils.get_utc_timestamp(arg_2)\n arg_16 = arg_13.loc[arg_13.index < arg_2]\n arg_17 = arg_13.loc[arg_13.index >= arg_2]\n else:\n arg_16 = arg_13\n arg_17 = pd.Series([])\n\n arg_16.plot(lw=3, color='forestgreen', alpha=0.6,\n label='Backtest', arg_10=arg_10, **arg_11)\n\n if len(arg_17) > 0:\n arg_17.plot(lw=4, color='red', alpha=0.6,\n label='Live', arg_10=arg_10, **arg_11)\n\n if arg_4 is not None:\n if isinstance(arg_4, (float, int)):\n arg_4 = [arg_4]\n\n arg_18 = arg_0.loc[arg_0.index < arg_2]\n arg_19 = arg_7(\n arg_18,\n len(arg_17),\n arg_4=arg_4,\n starting_value=arg_16[-1])\n\n arg_19 = arg_19.set_index(arg_17.index)\n for arg_20 in arg_4:\n arg_10.fill_between(arg_19.index,\n arg_19[float(arg_20)],\n arg_19[float(-arg_20)],\n color='steelblue', alpha=0.5)\n\n if arg_5 is not None:\n arg_10.legend(loc=arg_5, frameon=True, framealpha=0.5)\n arg_10.axhline(1.0, linestyle='--', color='black', lw=2)\n\n return arg_10"} +{"_id": "doc_1216", "title": "", "text": "def Func(arg_0, arg_1, arg_2='best',\n arg_3=None, **arg_4):\n \"\"\"\n Plots the rolling 6-month and 12-month beta versus date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_3 is None:\n arg_3 = plt.gca()\n\n arg_5 = FuncFormatter(utils.two_dec_places)\n arg_3.yaxis.set_major_formatter(FuncFormatter(arg_5))\n\n arg_3.set_title(\"Rolling portfolio beta to \" + str(arg_1.name))\n arg_3.set_ylabel('Beta')\n arg_6 = timeseries.rolling_beta(\n arg_0, arg_1, rolling_window=APPROX_BDAYS_PER_MONTH * 6)\n arg_6.plot(color='steelblue', lw=3, alpha=0.6, arg_3=arg_3, **arg_4)\n arg_7 = timeseries.rolling_beta(\n arg_0, arg_1, rolling_window=APPROX_BDAYS_PER_MONTH * 12)\n arg_7.plot(color='grey', lw=3, alpha=0.4, arg_3=arg_3, **arg_4)\n arg_3.axhline(arg_6.mean(), color='steelblue', linestyle='--', lw=3)\n arg_3.axhline(0.0, color='black', linestyle='-', lw=2)\n\n arg_3.set_xlabel('')\n arg_3.legend(['6-mo',\n '12-mo'],\n loc=arg_2, frameon=True, framealpha=0.5)\n arg_3.set_ylim((-1.0, 1.0))\n return arg_3"} +{"_id": "doc_1217", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=arg_3 * 6,\n arg_4='best', arg_5=None, **arg_6):\n \"\"\"\n Plots the rolling Sharpe ratio versus date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor for\n which the benchmark rolling Sharpe is computed. Usually\n a benchmark such as market returns.\n - This is in the same style as returns.\n rolling_window : int, optional\n The days window over which to compute the sharpe ratio.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_5 is None:\n arg_5 = plt.gca()\n\n arg_7 = FuncFormatter(utils.two_dec_places)\n arg_5.yaxis.set_major_formatter(FuncFormatter(arg_7))\n\n arg_8 = timeseries.rolling_sharpe(\n arg_0, arg_2)\n arg_8.plot(alpha=.7, lw=3, color='orangered', arg_5=arg_5,\n **arg_6)\n\n if arg_1 is not None:\n arg_9 = timeseries.rolling_sharpe(\n arg_1, arg_2)\n arg_9.plot(alpha=.7, lw=3, color='grey', arg_5=arg_5,\n **arg_6)\n\n arg_5.set_title('Rolling Sharpe ratio (6-month)')\n arg_5.axhline(\n arg_8.mean(),\n color='steelblue',\n linestyle='--',\n lw=3)\n arg_5.axhline(0.0, color='black', linestyle='-', lw=3)\n\n arg_5.set_ylabel('Sharpe ratio')\n arg_5.set_xlabel('')\n if arg_1 is None:\n arg_5.legend(['Sharpe', 'Average'],\n loc=arg_4, frameon=True, framealpha=0.5)\n else:\n arg_5.legend(['Sharpe', 'Benchmark Sharpe', 'Average'],\n loc=arg_4, frameon=True, framealpha=0.5)\n\n return arg_5"} +{"_id": "doc_1218", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Plots the sector exposures of the portfolio over time.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n sector_alloc : pd.DataFrame\n Portfolio allocation of positions. See pos.get_sector_alloc.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n\n arg_1.plot(title='Sector allocation over time',\n alpha=0.5, arg_2=arg_2, **arg_3)\n\n arg_4 = arg_2.get_position()\n arg_2.set_position([arg_4.x0, arg_4.y0 + arg_4.height * 0.1,\n arg_4.width, arg_4.height * 0.9])\n\n # Put a legend below current axis\n arg_2.legend(loc='upper center', frameon=True, framealpha=0.5,\n bbox_to_anchor=(0.5, -0.14), ncol=5)\n\n arg_2.set_xlim((arg_1.index[0], arg_1.index[-1]))\n arg_2.set_ylabel('Exposure by sector')\n arg_2.set_xlabel('')\n\n return arg_2"} +{"_id": "doc_1219", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=(3, 8, 10, 12, 15, 20, 50),\n arg_4=None, **arg_5):\n \"\"\"\n Plots equity curves at different per-dollar slippage assumptions.\n\n Parameters\n ----------\n returns : pd.Series\n Timeseries of portfolio returns to be adjusted for various\n degrees of slippage.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n slippage_params: tuple\n Slippage pameters to apply to the return time series (in\n basis points).\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_4 is None:\n arg_4 = plt.gca()\n\n arg_6 = pd.DataFrame()\n for arg_7 in arg_3:\n arg_8 = txn.adjust_returns_for_slippage(arg_0, arg_1,\n arg_2, arg_7)\n arg_9 = str(arg_7) + \" bps\"\n arg_6[arg_9] = ep.cum_returns(arg_8, 1)\n\n arg_6.plot(alpha=1.0, lw=0.5, arg_4=arg_4)\n\n arg_4.set_title('Cumulative returns given additional per-dollar slippage')\n arg_4.set_ylabel('')\n\n arg_4.legend(loc='center left', frameon=True, framealpha=0.5)\n\n return arg_4"} +{"_id": "doc_1220", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Plots trading volume per day vs. date.\n\n Also displays all-time daily average.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_2 is None:\n arg_2 = plt.gca()\n arg_4 = txn.get_txn_vol(arg_1)\n arg_4.txn_shares.plot(alpha=1.0, lw=0.5, arg_2=arg_2, **arg_3)\n arg_2.axhline(arg_4.txn_shares.mean(), color='steelblue',\n linestyle='--', lw=3, alpha=1.0)\n arg_2.set_title('Daily trading volume')\n arg_2.set_xlim((arg_0.index[0], arg_0.index[-1]))\n arg_2.set_ylabel('Amount of shares traded')\n arg_2.set_xlabel('')\n return arg_2"} +{"_id": "doc_1221", "title": "", "text": "def Func(arg_0, arg_1=5, arg_2='America/New_York',\n arg_3=None, **arg_4):\n \"\"\"\n Plots a histogram of transaction times, binning the times into\n buckets of a given duration.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n bin_minutes : float, optional\n Sizes of the bins in minutes, defaults to 5 minutes.\n tz : str, optional\n Time zone to plot against. Note that if the specified\n zone does not apply daylight savings, the distribution\n may be partially offset.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_3 is None:\n arg_3 = plt.gca()\n\n arg_5 = arg_0.copy()\n\n arg_5.index = arg_5.index.tz_convert(pytz.timezone(arg_2))\n arg_5.index = arg_5.index.map(lambda x: x.hour * 60 + x.minute)\n arg_5['trade_value'] = (arg_5.amount * arg_5.price).abs()\n arg_5 = arg_5.groupby(level=0).sum().reindex(arg_6=range(570, 961))\n arg_5.index = (arg_5.index / arg_1).astype(int) * arg_1\n arg_5 = arg_5.groupby(level=0).sum()\n\n arg_5['time_str'] = arg_5.index.map(lambda x:\n str(datetime.time(int(x / 60),\n x % 60))[:-3])\n\n arg_7 = arg_5.trade_value.sum()\n arg_5.trade_value = arg_5.trade_value.fillna(0) / arg_7\n\n arg_3.bar(arg_5.index, arg_5.trade_value, width=arg_1, **arg_4)\n\n arg_3.set_xlim(570, 960)\n arg_3.set_xticks(arg_5.index[::int(30 / arg_1)])\n arg_3.set_xticklabels(arg_5.time_str[::int(30 / arg_1)])\n arg_3.set_title('Transaction time distribution')\n arg_3.set_ylabel('Proportion')\n arg_3.set_xlabel('')\n return arg_3"} +{"_id": "doc_1222", "title": "", "text": "def Func(arg_0, arg_1=5):\n \"\"\"\n Prints information about the worst drawdown periods.\n\n Prints peak dates, valley dates, recovery dates, and net\n drawdowns.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n top : int, optional\n Amount of top drawdowns periods to plot (default 5).\n \"\"\"\n\n arg_2 = timeseries.gen_drawdown_table(arg_0, arg_1=arg_1)\n utils.print_table(\n arg_2.sort_values('Net drawdown in %', ascending=False),\n name='Worst drawdown periods',\n float_format='{0:.2f}'.format,\n )"} +{"_id": "doc_1223", "title": "", "text": "def Func(arg_0, arg_1=16, arg_2=18, arg_3=None):\n \"\"\"\n Plots timespans and directions of a sample of round trip trades.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if arg_3 is None:\n arg_3 = plt.subplot()\n\n arg_4 = arg_0.symbol.unique()\n np.random.seed(1)\n arg_5 = np.random.choice(arg_0.symbol.unique(), replace=False,\n size=min(arg_1, len(arg_4)))\n arg_6 = arg_0[arg_0.symbol.isin(arg_5)]\n\n arg_7 = pd.Series(np.arange(len(arg_5)), index=arg_5)\n\n for arg_8, arg_9 in arg_6.groupby('symbol'):\n for arg_10, arg_11 in arg_9.iterrows():\n arg_12 = 'b' if arg_11.long else 'r'\n arg_13 = arg_7[arg_8] + 0.05\n arg_3.plot([arg_11['open_dt'], arg_11['close_dt']],\n [arg_13, arg_13], color=arg_12,\n linewidth=arg_2, solid_capstyle='butt')\n\n arg_3.set_yticks(range(arg_1))\n arg_3.set_yticklabels([utils.format_asset(arg_14) for arg_14 in arg_5])\n\n arg_3.set_ylim((-0.5, min(len(arg_5), arg_1) - 0.5))\n arg_15 = patches.Rectangle([0, 0], 1, 1, color='b', label='Long')\n arg_16 = patches.Rectangle([0, 0], 1, 1, color='r', label='Short')\n arg_17 = arg_3.legend(handles=[arg_15, arg_16], loc='lower left',\n frameon=True, framealpha=0.5)\n arg_17.get_frame().set_edgecolor('black')\n arg_3.grid(False)\n\n return arg_3"} +{"_id": "doc_1224", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Prints the share of total PnL contributed by each\n traded name.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n arg_1 = arg_0['pnl'].sum()\n arg_2 = arg_0.groupby('symbol')['pnl'].sum() / arg_1\n arg_2.name = ''\n\n arg_2.index = arg_2.index.map(utils.format_asset)\n utils.print_table(\n arg_2.sort_values(\n inplace=False,\n ascending=False,\n ),\n arg_3='Profitability (PnL / PnL total) per name',\n float_format='{:.2%}'.format,\n )"} +{"_id": "doc_1225", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\"\n Determines the Sortino ratio of a strategy.\n\n Parameters\n ----------\n returns : pd.Series or pd.DataFrame\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n required_return: float / series\n minimum acceptable return\n period : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\n Returns\n -------\n depends on input type\n series ==> float\n DataFrame ==> np.array\n\n Annualized Sortino ratio.\n \"\"\"\n\n return ep.Func(arg_0, arg_1=arg_1)"} +{"_id": "doc_1226", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\"\n Determines the Sharpe ratio of a strategy.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n risk_free : int, float\n Constant risk-free return throughout the period.\n period : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\n Returns\n -------\n float\n Sharpe ratio.\n np.nan\n If insufficient length of returns or if if adjusted returns are 0.\n\n Note\n -----\n See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.\n \"\"\"\n\n return ep.Func(arg_0, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_1227", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=arg_3 * 6):\n \"\"\"\n Determines the rolling beta of a strategy.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series or pd.DataFrame\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - If DataFrame is passed, computes rolling beta for each column.\n - This is in the same style as returns.\n rolling_window : int, optional\n The size of the rolling window, in days, over which to compute\n beta (default 6 months).\n\n Returns\n -------\n pd.Series\n Rolling beta.\n\n Note\n -----\n See https://en.wikipedia.org/wiki/Beta_(finance) for more details.\n \"\"\"\n\n if arg_1.ndim > 1:\n # Apply column-wise\n return arg_1.apply(partial(Func, arg_0),\n arg_2=arg_2)\n else:\n arg_4 = pd.Series(index=arg_0.index)\n for arg_5, arg_6 in zip(arg_0.index[0:-arg_2],\n arg_0.index[arg_2:]):\n arg_4.loc[arg_6] = ep.beta(\n arg_0.loc[arg_5:arg_6],\n arg_1.loc[arg_5:arg_6])\n\n return arg_4"} +{"_id": "doc_1228", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Calculates the gross leverage of a strategy.\n\n Parameters\n ----------\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n\n Returns\n -------\n pd.Series\n Gross leverage.\n \"\"\"\n\n arg_1 = arg_0.drop('cash', axis=1).abs().sum(axis=1)\n return arg_1 / arg_0.sum(axis=1)"} +{"_id": "doc_1229", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None,\n arg_3=None, arg_4='AGB'):\n \"\"\"\n Calculates various performance metrics of a strategy, for use in\n plotting.show_Func.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n - If None, do not compute alpha, beta, and information ratio.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n turnover_denom : str\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n\n Returns\n -------\n pd.Series\n Performance metrics.\n \"\"\"\n\n arg_5 = pd.Series()\n for arg_6 in SIMPLE_STAT_FUNCS:\n arg_5[arg_7[arg_6.__name__]] = arg_6(arg_0)\n\n if arg_2 is not None:\n arg_5['Gross leverage'] = gross_lev(arg_2).mean()\n if arg_3 is not None:\n arg_5['Daily turnover'] = get_turnover(arg_2,\n arg_3,\n arg_4).mean()\n if arg_1 is not None:\n for arg_6 in FACTOR_STAT_FUNCS:\n arg_9 = arg_6(arg_0, arg_1)\n arg_5[arg_7[arg_6.__name__]] = arg_9\n\n return arg_5"} +{"_id": "doc_1230", "title": "", "text": "def Func(arg_0):\n \"\"\"Calculate various summary statistics of data.\n\n Parameters\n ----------\n x : numpy.ndarray or pandas.Series\n Array to compute summary statistics for.\n\n Returns\n -------\n pandas.Series\n Series containing mean, median, std, as well as 5, 25, 75 and\n 95 percentiles of passed in values.\n \"\"\"\n\n return pd.Series({'mean': np.mean(arg_0),\n 'median': np.median(arg_0),\n 'std': np.std(arg_0),\n '5%': np.percentile(arg_0, 5),\n '25%': np.percentile(arg_0, 25),\n '75%': np.percentile(arg_0, 75),\n '95%': np.percentile(arg_0, 95),\n 'IQR': np.subtract.reduce(\n np.percentile(arg_0, [75, 25])),\n })"} +{"_id": "doc_1231", "title": "", "text": "def Func(arg_0, arg_1=10):\n \"\"\"\n Finds top drawdowns, sorted by drawdown amount.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n top : int, optional\n The amount of top drawdowns to find (default 10).\n\n Returns\n -------\n drawdowns : list\n List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.\n \"\"\"\n\n arg_0 = arg_0.copy()\n arg_2 = ep.cum_returns(arg_0, 1.0)\n arg_3 = np.maximum.accumulate(arg_2)\n arg_4 = arg_2 / arg_3 - 1\n\n arg_5 = []\n for arg_6 in range(arg_1):\n arg_7, arg_8, arg_9 = get_max_drawdown_underwater(arg_4)\n # Slice out draw-down period\n if not pd.isnull(arg_9):\n arg_4.drop(arg_4[arg_7: arg_9].index[1:-1],\n inplace=True)\n else:\n # drawdown has not ended yet\n arg_4 = arg_4.loc[:arg_7]\n\n arg_5.append((arg_7, arg_8, arg_9))\n if (len(arg_0) == 0) or (len(arg_4) == 0):\n break\n\n return arg_5"} +{"_id": "doc_1232", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=1, arg_3=1000, arg_4=None):\n \"\"\"\n Gnerate alternate paths using available values from in-sample returns.\n\n Parameters\n ----------\n is_returns : pandas.core.frame.DataFrame\n Non-cumulative in-sample returns.\n num_days : int\n Number of days to project the probability cone forward.\n starting_value : int or float\n Starting value of the out of sample period.\n num_samples : int\n Number of samples to draw from the in-sample daily returns.\n Each sample will be an array with length num_days.\n A higher number of samples will generate a more accurate\n bootstrap cone.\n random_seed : int\n Seed for the pseudorandom number generator used by the pandas\n sample method.\n\n Returns\n -------\n samples : numpy.ndarray\n \"\"\"\n\n arg_5 = np.empty((arg_3, arg_1))\n arg_6 = np.random.RandomState(arg_6=arg_4)\n for arg_7 in range(arg_3):\n arg_5[arg_7, :] = arg_0.sample(arg_1, replace=True,\n random_state=arg_6)\n\n return arg_5"} +{"_id": "doc_1233", "title": "", "text": "def Func(arg_0, arg_1=(1., 1.5, 2.), arg_2=1.):\n \"\"\"\n Gnerate the upper and lower bounds of an n standard deviation\n cone of forecasted cumulative returns.\n\n Parameters\n ----------\n samples : numpy.ndarray\n Alternative paths, or series of possible outcomes.\n cone_std : list of int/float\n Number of standard devations to use in the boundaries of\n the cone. If multiple values are passed, cone bounds will\n be generated for each value.\n\n Returns\n -------\n samples : pandas.core.frame.DataFrame\n \"\"\"\n\n arg_3 = ep.cum_returns(arg_0.T,\n arg_2=arg_2).T\n\n arg_4 = arg_3.mean(axis=0)\n arg_5 = arg_3.std(axis=0)\n\n if isinstance(arg_1, (arg_9, int)):\n arg_1 = [arg_1]\n\n arg_6 = pd.DataFrame(columns=pd.Float64Index([]))\n for arg_7 in arg_1:\n arg_6.loc[:, arg_9(arg_7)] = arg_4 + arg_5 * arg_7\n arg_6.loc[:, arg_9(-arg_7)] = arg_4 - arg_5 * arg_7\n\n return arg_6"} +{"_id": "doc_1234", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Generate plot for stochastic volatility model.\n\n Parameters\n ----------\n data : pandas.Series\n Returns to model.\n trace : pymc3.sampling.BaseTrace object, optional\n trace as returned by model_stoch_vol\n If not passed, sample from model.\n ax : matplotlib.axes object, optional\n Plot into axes object\n\n Returns\n -------\n ax object\n\n See Also\n --------\n model_stoch_vol : run stochastic volatility model\n \"\"\"\n\n if arg_1 is None:\n arg_1 = model_stoch_vol(arg_0)\n\n if arg_2 is None:\n arg_3, arg_2 = plt.subplots(figsize=(15, 8))\n\n arg_0.abs().plot(arg_2=arg_2)\n arg_2.plot(arg_0.index, np.exp(arg_1['s', ::30].T), 'r', alpha=.03)\n arg_2.set(title='Stochastic volatility', xlabel='Time', ylabel='Volatility')\n arg_2.legend(['Abs returns', 'Stochastic volatility process'],\n frameon=True, framealpha=0.5)\n\n return arg_2"} +{"_id": "doc_1235", "title": "", "text": "def Func(arg_0, arg_1=1.):\n \"\"\"\n Compute 5, 25, 75 and 95 percentiles of cumulative returns, used\n for the Bayesian cone.\n\n Parameters\n ----------\n preds : numpy.array\n Multiple (simulated) cumulative returns.\n starting_value : int (optional)\n Have cumulative returns start around this value.\n Default = 1.\n\n Returns\n -------\n dict of percentiles over time\n Dictionary mapping percentiles (5, 25, 75, 95) to a\n timeseries.\n \"\"\"\n\n def scoreatpercentile(arg_2, arg_3):\n return [stats.scoreatpercentile(\n arg_4, arg_3) for arg_4 in arg_2.T]\n\n arg_2 = np.cumprod(arg_0 + 1, 1) * arg_1\n arg_5 = {arg_3: scoreatpercentile(arg_2, arg_3) for arg_3 in (5, 25, 75, 95)}\n\n return arg_5"} +{"_id": "doc_1236", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute Bayesian consistency score.\n\n Parameters\n ----------\n returns_test : pd.Series\n Observed cumulative returns.\n preds : numpy.array\n Multiple (simulated) cumulative returns.\n\n Returns\n -------\n Consistency score\n Score from 100 (returns_test perfectly on the median line of the\n Bayesian cone spanned by preds) to 0 (returns_test completely\n outside of Bayesian cone.)\n \"\"\"\n\n arg_2 = cum_returns(arg_0, starting_value=1.)\n arg_3 = np.cumprod(arg_1 + 1, 1)\n\n arg_4 = [sp.stats.percentileofscore(arg_3[:, i],\n arg_2.iloc[i],\n kind='weak')\n for i in range(len(arg_2))]\n # normalize to be from 100 (perfect median line) to 0 (completely outside\n # of cone)\n return 100 - np.abs(50 - np.mean(arg_4)) / .5"} +{"_id": "doc_1237", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=50, arg_4=None):\n \"\"\"\n Generate cumulative returns plot with Bayesian cone.\n\n Parameters\n ----------\n returns_train : pd.Series\n Timeseries of simple returns\n returns_test : pd.Series\n Out-of-sample returns. Datetimes in returns_test will be added to\n returns_train as missing values and predictions will be generated\n for them.\n ppc : np.array\n Posterior predictive samples of shape samples x\n len(returns_test).\n plot_train_len : int (optional)\n How many data points to plot of returns_train. Useful to zoom in on\n the prediction if there is a long backtest period.\n ax : matplotlib.Axis (optional)\n Axes upon which to plot.\n\n Returns\n -------\n score : float\n Consistency score (see compute_consistency_score)\n trace : pymc3.sampling.BaseTrace\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.\n \"\"\"\n\n arg_5 = compute_consistency_score(arg_1,\n arg_2)\n\n arg_4 = _Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=arg_3,\n arg_4=arg_4)\n arg_4.text(\n 0.40,\n 0.90,\n 'Consistency score: %.1f' %\n arg_5,\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=arg_4.transAxes,\n )\n\n arg_4.set_ylabel('Cumulative returns')\n return arg_5"} +{"_id": "doc_1238", "title": "", "text": "def Func(arg_0):\n \"\"\"Defer the message.\n\n This message will remain in the queue but must be received\n specifically by its sequence number in order to be processed.\n\n :raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled.\n :raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired.\n :raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired.\n :raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails.\n \"\"\"\n arg_0._is_live('Func')\n try:\n arg_0.message.modify(True, True)\n except Exception as e:\n raise MessageSettleFailed(\"Func\", e)"} +{"_id": "doc_1239", "title": "", "text": "def Func(arg_0):\n \"\"\"Guess Python Autorest options based on the spec path.\n\n Expected path:\n specification/compute/resource-manager/readme.md\n \"\"\"\n arg_0 = arg_0.lower()\n arg_0 = arg_0[arg_0.index(\"specification\"):] # Might raise and it's ok\n arg_1 = arg_0.split(\"/\")\n\n arg_2 = arg_1[1]\n arg_3 = arg_1[2] == \"resource-manager\"\n\n return {\n \"rp_name\": arg_2,\n \"is_arm\": arg_3\n }"} +{"_id": "doc_1240", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=False, arg_4=True, **arg_5):\n \"\"\"Deletes the managed application definition.\n\n :param application_definition_id: The fully qualified ID of the\n managed application definition, including the managed application name\n and the managed application definition resource type. Use the format,\n /subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}\n :type application_definition_id: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_6 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=True,\n **arg_5\n )\n\n def get_long_running_output(arg_7):\n if arg_3:\n arg_8 = ClientRawResponse(None, arg_7)\n return arg_8\n\n arg_9 = arg_5.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_4 is True: arg_10 = ARMPolling(arg_9, **arg_5)\n elif arg_4 is False: arg_10 = NoPolling()\n else: arg_10 = arg_4\n return LROPoller(arg_0._client, arg_6, get_long_running_output, arg_10)"} +{"_id": "doc_1241", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, arg_5=True, **arg_6):\n \"\"\"Creates a new managed application definition.\n\n :param application_definition_id: The fully qualified ID of the\n managed application definition, including the managed application name\n and the managed application definition resource type. Use the format,\n /subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}\n :type application_definition_id: str\n :param parameters: Parameters supplied to the create or update a\n managed application definition.\n :type parameters:\n ~azure.mgmt.resource.managedapplications.models.ApplicationDefinition\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns ApplicationDefinition\n or ClientRawResponse if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.resource.managedapplications.models.ApplicationDefinition]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.resource.managedapplications.models.ApplicationDefinition]]\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_7 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=True,\n **arg_6\n )\n\n def get_long_running_output(arg_8):\n arg_9 = arg_0._deserialize('ApplicationDefinition', arg_8)\n\n if arg_4:\n arg_10 = ClientRawResponse(arg_9, arg_8)\n return arg_10\n\n return arg_9\n\n arg_11 = arg_6.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_5 is True: arg_12 = ARMPolling(arg_11, **arg_6)\n elif arg_5 is False: arg_12 = NoPolling()\n else: arg_12 = arg_5\n return LROPoller(arg_0._client, arg_7, get_long_running_output, arg_12)"} +{"_id": "doc_1242", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Return the target uri for the request.'''\n arg_2 = arg_1.protocol_override \\\n if arg_1.protocol_override else arg_0.protocol\n arg_2 = arg_2.lower()\n arg_3 = HTTP_PORT if arg_2 == 'http' else HTTPS_PORT\n return arg_2 + '://' + arg_1.host + ':' + str(arg_3) + arg_1.path"} +{"_id": "doc_1243", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Sends request to cloud service server and return the response. '''\n arg_2 = arg_0.get_connection(arg_1)\n try:\n arg_2.putrequest(arg_1.method, arg_1.path)\n\n arg_0.send_request_headers(arg_2, arg_1.headers)\n arg_0.send_request_body(arg_2, arg_1.body)\n\n if DEBUG_REQUESTS and arg_1.body:\n print('request:')\n try:\n print(arg_1.body)\n except: # pylint: disable=bare-except\n pass\n\n arg_3 = arg_2.getresponse()\n arg_4 = int(arg_3.status)\n arg_5 = arg_3.reason\n arg_6 = arg_3.getheaders()\n\n # for consistency across platforms, make header names lowercase\n for arg_7, arg_8 in enumerate(arg_6):\n arg_6[arg_7] = (arg_8[0].lower(), arg_8[1])\n\n arg_9 = None\n if arg_3.length is None:\n arg_9 = arg_3.read()\n elif arg_3.length > 0:\n arg_9 = arg_3.read(arg_3.length)\n\n if DEBUG_RESPONSES and arg_9:\n print('response:')\n try:\n print(arg_9)\n except: # pylint: disable=bare-except\n pass\n\n arg_10 = HTTPResponse(\n arg_4, arg_3.reason, arg_6, arg_9)\n if arg_4 == 307:\n arg_11 = urlparse(dict(arg_6)['location'])\n arg_1.host = arg_11.hostname\n arg_1.path = arg_11.path\n arg_1.path, arg_1.query = arg_0._update_request_uri_query(arg_1)\n return arg_0.Func(arg_1)\n if arg_4 >= 300:\n raise HTTPError(arg_4, arg_5, arg_6, arg_9)\n\n return arg_10\n finally:\n arg_2.close()"} +{"_id": "doc_1244", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=False, arg_7=True, **arg_8):\n \"\"\"Executes script actions on the specified HDInsight cluster.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param cluster_name: The name of the cluster.\n :type cluster_name: str\n :param persist_on_success: Gets or sets if the scripts needs to be\n persisted.\n :type persist_on_success: bool\n :param script_actions: The list of run time script actions.\n :type script_actions:\n list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_9 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_8\n )\n\n def get_long_running_output(arg_10):\n if arg_6:\n arg_11 = ClientRawResponse(None, arg_10)\n return arg_11\n\n arg_12 = arg_8.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_7 is True: arg_13 = ARMPolling(arg_12, **arg_8)\n elif arg_7 is False: arg_13 = NoPolling()\n else: arg_13 = arg_7\n return LROPoller(arg_0._client, arg_9, get_long_running_output, arg_13)"} +{"_id": "doc_1245", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, **arg_5):\n \"\"\"Check the availability of a Front Door resource name.\n\n :param name: The resource name to validate.\n :type name: str\n :param type: The type of the resource whose name is to be validated.\n Possible values include: 'Microsoft.Network/frontDoors',\n 'Microsoft.Network/frontDoors/frontendEndpoints'\n :type type: str or ~azure.mgmt.frontdoor.models.ResourceType\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: CheckNameAvailabilityOutput or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.frontdoor.models.CheckNameAvailabilityOutput or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_6 = models.CheckNameAvailabilityInput(arg_1=arg_1, arg_2=arg_2)\n\n arg_7 = \"2018-08-01\"\n\n # Construct URL\n arg_8 = arg_0.Func.metadata['url']\n\n # Construct parameters\n arg_9 = {}\n arg_9['api-version'] = arg_0._serialize.query(\"api_version\", arg_7, 'str')\n\n # Construct headers\n arg_10 = {}\n arg_10['Accept'] = 'application/json'\n arg_10['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_10['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_3:\n arg_10.update(arg_3)\n if arg_0.config.accept_language is not None:\n arg_10['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_11 = arg_0._serialize.body(arg_6, 'CheckNameAvailabilityInput')\n\n # Construct and send request\n arg_12 = arg_0._client.post(arg_8, arg_9, arg_10, arg_11)\n arg_13 = arg_0._client.send(arg_12, stream=False, **arg_5)\n\n if arg_13.status_code not in [200]:\n raise models.ErrorResponseException(arg_0._deserialize, arg_13)\n\n arg_14 = None\n\n if arg_13.status_code == 200:\n arg_14 = arg_0._deserialize('CheckNameAvailabilityOutput', arg_13)\n\n if arg_4:\n arg_15 = ClientRawResponse(arg_14, arg_13)\n return arg_15\n\n return arg_14"} +{"_id": "doc_1246", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Extracts the host authority from the given URI. \"\"\"\n if not arg_1:\n raise ValueError('request_uri cannot be empty')\n\n arg_1 = parse.urlparse(arg_1)\n if not arg_1.netloc:\n raise ValueError('request_uri must be an absolute URI')\n\n if arg_1.scheme.lower() not in ['http', 'https']:\n raise ValueError('request_uri must be HTTP or HTTPS')\n\n return arg_1.netloc"} +{"_id": "doc_1247", "title": "", "text": "def Func(arg_0=None, arg_1=False):\n \"\"\"Return Credentials and default SubscriptionID of current loaded profile of the CLI.\n\n Credentials will be the \"az login\" command:\n https://docs.microsoft.com/cli/azure/authenticate-azure-cli\n\n Default subscription ID is either the only one you have, or you can define it:\n https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli\n\n .. versionadded:: 1.1.6\n\n :param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.)\n :param bool with_tenant: If True, return a three-tuple with last as tenant ID\n :return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant)\n :rtype: tuple\n \"\"\"\n arg_2 = get_cli_profile()\n arg_3, arg_4, arg_5 = arg_2.get_login_credentials(arg_0=arg_0)\n if arg_1:\n return arg_3, arg_4, arg_5\n else:\n return arg_3, arg_4"} +{"_id": "doc_1248", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Opens the request.\n\n method:\n the request VERB 'GET', 'POST', etc.\n url:\n the url to connect\n '''\n arg_3 = VARIANT.create_bool_false()\n arg_4 = BSTR(arg_1)\n arg_5 = BSTR(arg_2)\n _WinHttpRequest._Open(arg_0, arg_4, arg_5, arg_3)"} +{"_id": "doc_1249", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Sets up the timeout for the request. '''\n arg_2 = int(arg_1 * 1000)\n _WinHttpRequest._SetTimeouts(\n arg_0, 0, arg_2, arg_2, arg_2)"} +{"_id": "doc_1250", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Sets the request header. '''\n\n arg_3 = BSTR(arg_1)\n arg_4 = BSTR(arg_2)\n _WinHttpRequest._SetRequestHeader(arg_0, arg_3, arg_4)"} +{"_id": "doc_1251", "title": "", "text": "def Func(arg_0, arg_1=None):\n ''' Sends the request body. '''\n\n # Sends VT_EMPTY if it is GET, HEAD request.\n if arg_1 is None:\n arg_2 = VARIANT.create_empty()\n _WinHttpRequest._Send(arg_0, arg_2)\n else: # Sends request body as SAFEArray.\n arg_3 = VARIANT.create_safearray_from_str(arg_1)\n _WinHttpRequest._Send(arg_0, arg_3)"} +{"_id": "doc_1252", "title": "", "text": "def Func(arg_0):\n ''' Gets status text of response. '''\n\n arg_1 = c_void_p()\n _WinHttpRequest._StatusText(arg_0, byref(arg_1))\n arg_1 = ctypes.cast(arg_1, c_wchar_p)\n Func = arg_1.value\n _SysFreeString(arg_1)\n return Func"} +{"_id": "doc_1253", "title": "", "text": "def Func(arg_0):\n '''\n Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.\n '''\n arg_1 = VARIANT()\n _WinHttpRequest._ResponseBody(arg_0, byref(arg_1))\n if arg_1.is_safearray_of_bytes():\n arg_2 = arg_1.str_from_safearray()\n return arg_2\n else:\n return ''"} +{"_id": "doc_1254", "title": "", "text": "def Func(arg_0, arg_1):\n '''Sets client certificate for the request. '''\n arg_2 = BSTR(arg_1)\n _WinHttpRequest._SetClientCertificate(arg_0, arg_2)"} +{"_id": "doc_1255", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Connects to host and sends the request. '''\n\n arg_3 = unicode(arg_0.protocol + '://')\n arg_4 = arg_3 + arg_0.host + unicode(arg_2)\n arg_0._httprequest.set_timeout(arg_0.timeout)\n arg_0._httprequest.open(unicode(arg_1), arg_4)\n\n # sets certificate for the connection if cert_file is set.\n if arg_0.cert_file is not None:\n arg_0._httprequest.set_client_certificate(unicode(arg_0.cert_file))"} +{"_id": "doc_1256", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Sends the headers of request. '''\n if sys.version_info < (3,):\n arg_1 = str(arg_1).decode('utf-8')\n arg_2 = str(arg_2).decode('utf-8')\n arg_0._httprequest.set_request_header(arg_1, arg_2)"} +{"_id": "doc_1257", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Sends request body. '''\n if not arg_1:\n arg_0._httprequest.Func()\n else:\n arg_0._httprequest.Func(arg_1)"} +{"_id": "doc_1258", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"simplified an id to be more friendly for us people\"\"\"\n # id_name is in the form 'https://namespace.host.suffix/name'\n # where name may contain a forward slash!\n arg_2 = arg_0.find('//')\n if arg_2 != -1:\n arg_2 += 2\n if arg_1:\n arg_2 = arg_0.find(arg_1, arg_2)\n if arg_2 != -1:\n arg_2 += len(arg_1)\n arg_2 = arg_0.find('/', arg_2)\n if arg_2 != -1:\n return arg_0[arg_2 + 1:]\n return arg_0"} +{"_id": "doc_1259", "title": "", "text": "def Func(arg_0):\n \"\"\"converts a Python name into a serializable name\"\"\"\n arg_1 = _KNOWN_SERIALIZATION_XFORMS.get(arg_0)\n if arg_1 is not None:\n return arg_1\n\n if arg_0.startswith('x_ms_'):\n return arg_0.replace('_', '-')\n if arg_0.endswith('_id'):\n arg_0 = arg_0.replace('_id', 'ID')\n for arg_2 in ['content_', 'last_modified', 'if_', 'cache_control']:\n if arg_0.startswith(arg_2):\n arg_0 = arg_0.replace('_', '-_')\n\n return ''.join(arg_2.capitalize() for arg_2 in arg_0.split('_'))"} +{"_id": "doc_1260", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=False, **arg_7):\n \"\"\"Verify whether two faces belong to a same person. Compares a face Id\n with a Person Id.\n\n :param face_id: FaceId of the face, comes from Face - Detect\n :type face_id: str\n :param person_id: Specify a certain person in a person group or a\n large person group. personId is created in PersonGroup Person - Create\n or LargePersonGroup Person - Create.\n :type person_id: str\n :param person_group_id: Using existing personGroupId and personId for\n fast loading a specified person. personGroupId is created in\n PersonGroup - Create. Parameter personGroupId and largePersonGroupId\n should not be provided at the same time.\n :type person_group_id: str\n :param large_person_group_id: Using existing largePersonGroupId and\n personId for fast loading a specified person. largePersonGroupId is\n created in LargePersonGroup - Create. Parameter personGroupId and\n largePersonGroupId should not be provided at the same time.\n :type large_person_group_id: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: VerifyResult or ClientRawResponse if raw=true\n :rtype: ~azure.cognitiveservices.vision.face.models.VerifyResult or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`APIErrorException`\n \"\"\"\n arg_8 = models.VerifyFaceToPersonRequest(arg_1=arg_1, arg_3=arg_3, arg_4=arg_4, arg_2=arg_2)\n\n # Construct URL\n arg_9 = arg_0.Func.metadata['url']\n arg_10 = {\n 'Endpoint': arg_0._serialize.url(\"self.config.endpoint\", arg_0.config.endpoint, 'str', skip_quote=True)\n }\n arg_9 = arg_0._client.format_url(arg_9, **arg_10)\n\n # Construct parameters\n arg_11 = {}\n\n # Construct headers\n arg_12 = {}\n arg_12['Accept'] = 'application/json'\n arg_12['Content-Type'] = 'application/json; charset=utf-8'\n if arg_5:\n arg_12.update(arg_5)\n\n # Construct body\n arg_13 = arg_0._serialize.body(arg_8, 'VerifyFaceToPersonRequest')\n\n # Construct and send request\n arg_14 = arg_0._client.post(arg_9, arg_11, arg_12, arg_13)\n arg_15 = arg_0._client.send(arg_14, stream=False, **arg_7)\n\n if arg_15.status_code not in [200]:\n raise models.APIErrorException(arg_0._deserialize, arg_15)\n\n arg_16 = None\n\n if arg_15.status_code == 200:\n arg_16 = arg_0._deserialize('VerifyResult', arg_15)\n\n if arg_6:\n arg_17 = ClientRawResponse(arg_16, arg_15)\n return arg_17\n\n return arg_16"} +{"_id": "doc_1261", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=None, arg_4=False, **arg_5):\n \"\"\"Adds a job to the specified account.\n\n The Batch service supports two ways to control the work done as part of\n a job. In the first approach, the user specifies a Job Manager task.\n The Batch service launches this task when it is ready to start the job.\n The Job Manager task controls all other tasks that run under this job,\n by using the Task APIs. In the second approach, the user directly\n controls the execution of tasks under an active job, by using the Task\n APIs. Also note: when naming jobs, avoid including sensitive\n information such as user names or secret project names. This\n information may appear in telemetry logs accessible to Microsoft\n Support engineers.\n\n :param job: The job to be Funced.\n :type job: ~azure.batch.models.JobAddParameter\n :param job_Func_options: Additional parameters for the operation\n :type job_Func_options: ~azure.batch.models.JobAddOptions\n :param dict custom_headers: headers that will be Funced to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: None or ClientRawResponse if raw=true\n :rtype: None or ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`BatchErrorException`\n \"\"\"\n arg_6 = None\n if arg_2 is not None:\n arg_6 = arg_2.timeout\n arg_7 = None\n if arg_2 is not None:\n arg_7 = arg_2.client_request_id\n arg_8 = None\n if arg_2 is not None:\n arg_8 = arg_2.return_client_request_id\n arg_9 = None\n if arg_2 is not None:\n arg_9 = arg_2.ocp_date\n\n # Construct URL\n arg_10 = arg_0.Func.metadata['url']\n arg_11 = {\n 'batchUrl': arg_0._serialize.url(\"self.config.batch_url\", arg_0.config.batch_url, 'str', skip_quote=True)\n }\n arg_10 = arg_0._client.format_url(arg_10, **arg_11)\n\n # Construct parameters\n arg_12 = {}\n arg_12['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n if arg_6 is not None:\n arg_12['timeout'] = arg_0._serialize.query(\"timeout\", arg_6, 'int')\n\n # Construct headers\n arg_13 = {}\n arg_13['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_13['client-request-id'] = str(uuid.uuid1())\n if arg_3:\n arg_13.update(arg_3)\n if arg_0.config.accept_language is not None:\n arg_13['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n if arg_7 is not None:\n arg_13['client-request-id'] = arg_0._serialize.header(\"client_request_id\", arg_7, 'str')\n if arg_8 is not None:\n arg_13['return-client-request-id'] = arg_0._serialize.header(\"return_client_request_id\", arg_8, 'bool')\n if arg_9 is not None:\n arg_13['ocp-date'] = arg_0._serialize.header(\"ocp_date\", arg_9, 'rfc-1123')\n\n # Construct body\n arg_14 = arg_0._serialize.body(arg_1, 'JobAddParameter')\n\n # Construct and send request\n arg_15 = arg_0._client.post(arg_10, arg_12, arg_13, arg_14)\n arg_16 = arg_0._client.send(arg_15, stream=False, **arg_5)\n\n if arg_16.status_code not in [201]:\n raise models.BatchErrorException(arg_0._deserialize, arg_16)\n\n if arg_4:\n arg_17 = ClientRawResponse(None, arg_16)\n arg_17.Func_headers({\n 'client-request-id': 'str',\n 'request-id': 'str',\n 'ETag': 'str',\n 'Last-Modified': 'rfc-1123',\n 'DataServiceId': 'str',\n })\n return arg_17"} +{"_id": "doc_1262", "title": "", "text": "def Func(arg_0, *arg_1):\n '''descends through a hierarchy of nodes returning the list of children\n at the inner most level. Only returns children who share a common parent,\n not cousins.'''\n arg_2 = arg_0\n for arg_3, arg_4 in enumerate(arg_1):\n if isinstance(arg_4, _strtype):\n arg_5 = _MinidomXmlToObject.get_child_nodes(arg_2, arg_4)\n else:\n arg_5 = _MinidomXmlToObject._get_child_nodesNS(arg_2, *arg_4)\n if arg_3 == len(arg_1) - 1:\n return arg_5\n elif not arg_5:\n break\n\n arg_2 = arg_5[0]\n return []"} +{"_id": "doc_1263", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Recursively searches from the parent to the child,\n gathering all the applicable namespaces along the way\"\"\"\n for arg_3 in arg_0.childNodes:\n if arg_3 is arg_1:\n return True\n if _MinidomXmlToObject.Func(arg_3, arg_1, arg_2):\n # we are the parent node\n for arg_4 in arg_3.attributes.keys():\n if arg_4.startswith('xmlns:') or arg_4 == 'xmlns':\n arg_2[arg_4] = arg_3.attributes[arg_4]\n break\n return False"} +{"_id": "doc_1264", "title": "", "text": "def Func(arg_0):\n '''Converts xml response to service bus namespace\n\n The xml format for namespace:\n\nuuid:00000000-0000-0000-0000-000000000000;id=0000000\nmyunittests\n2012-08-22T16:48:10Z\n\n \n myunittests\n West US\n 0000000000000000000000000000000000000000000=\n Active\n 2012-08-22T16:48:10.217Z\n https://myunittests-sb.accesscontrol.windows.net/\n https://myunittests.servicebus.windows.net/\n Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=\n 00000000000000000000000000000000\n true\n \n\n\n '''\n arg_1 = minidom.parseString(arg_0)\n arg_2 = ServiceBusNamespace()\n\n arg_3 = (\n ('Name', 'name', None),\n ('Region', 'region', None),\n ('DefaultKey', 'default_key', None),\n ('Status', 'status', None),\n ('CreatedAt', 'created_at', None),\n ('AcsManagementEndpoint', 'acs_management_endpoint', None),\n ('ServiceBusEndpoint', 'servicebus_endpoint', None),\n ('ConnectionString', 'connection_string', None),\n ('SubscriptionId', 'subscription_id', None),\n ('Enabled', 'enabled', _parse_bool),\n )\n\n for arg_4 in _MinidomXmlToObject.get_children_from_path(\n arg_1,\n 'entry',\n 'content',\n 'NamespaceDescription'):\n for arg_5, arg_6, arg_7 in arg_3:\n arg_8 = _MinidomXmlToObject.get_first_child_node_value(arg_4, arg_5)\n if arg_8 is not None:\n if arg_7 is not None:\n arg_8 = arg_7(arg_8)\n setattr(arg_2, arg_6, arg_8)\n\n return arg_2"} +{"_id": "doc_1265", "title": "", "text": "def Func(arg_0):\n '''Converts xml response to service bus region\n\n The xml format for region:\n\nuuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759\n\n2013-04-10T18:25:29Z\n\n \n East Asia\n East Asia\n \n\n\n '''\n arg_1 = minidom.parseString(arg_0)\n arg_2 = ServiceBusRegion()\n\n for arg_3 in _MinidomXmlToObject.get_children_from_path(arg_1, 'entry', 'content',\n 'RegionCodeDescription'):\n arg_4 = _MinidomXmlToObject.get_first_child_node_value(arg_3, 'Code')\n if arg_4 is not None:\n arg_2.code = arg_4\n arg_4 = _MinidomXmlToObject.get_first_child_node_value(arg_3, 'FullName')\n if arg_4 is not None:\n arg_2.fullname = arg_4\n\n return arg_2"} +{"_id": "doc_1266", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=False, arg_7=None, arg_8=True, **arg_9):\n \"\"\"Replaces the runbook draft content.\n\n :param resource_group_name: Name of an Azure Resource group.\n :type resource_group_name: str\n :param automation_account_name: The name of the automation account.\n :type automation_account_name: str\n :param runbook_name: The runbook name.\n :type runbook_name: str\n :param runbook_content: The\u00a0runbook\u00a0draft\u00a0content.\n :type runbook_content: Generator\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns object or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[Generator]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[Generator]]\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_10 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_9\n )\n\n def get_long_running_output(arg_11):\n arg_12 = {\n 'location': 'str',\n }\n arg_13 = arg_0._deserialize('object', arg_11)\n\n if arg_6:\n arg_14 = ClientRawResponse(arg_13, arg_11)\n arg_14.add_headers(arg_12)\n return arg_14\n\n return arg_13\n\n arg_15 = arg_9.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_8 is True: arg_16 = ARMPolling(arg_15, **arg_9)\n elif arg_8 is False: arg_16 = NoPolling()\n else: arg_16 = arg_8\n return LROPoller(arg_0._client, arg_10, get_long_running_output, arg_16)"} +{"_id": "doc_1267", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, **arg_5):\n \"\"\"Asynchronous operation to modify a knowledgebase.\n\n :param kb_id: Knowledgebase id.\n :type kb_id: str\n :param Func_kb: Post body of the request.\n :type Func_kb:\n ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: Operation or ClientRawResponse if raw=true\n :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation\n or ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n # Construct URL\n arg_6 = arg_0.Func.metadata['url']\n arg_7 = {\n 'Endpoint': arg_0._serialize.url(\"self.config.endpoint\", arg_0.config.endpoint, 'str', skip_quote=True),\n 'kbId': arg_0._serialize.url(\"kb_id\", arg_1, 'str')\n }\n arg_6 = arg_0._client.format_url(arg_6, **arg_7)\n\n # Construct parameters\n arg_8 = {}\n\n # Construct headers\n arg_9 = {}\n arg_9['Accept'] = 'application/json'\n arg_9['Content-Type'] = 'application/json; charset=utf-8'\n if arg_3:\n arg_9.Func(arg_3)\n\n # Construct body\n arg_10 = arg_0._serialize.body(arg_2, 'UpdateKbOperationDTO')\n\n # Construct and send request\n arg_11 = arg_0._client.patch(arg_6, arg_8, arg_9, arg_10)\n arg_12 = arg_0._client.send(arg_11, stream=False, **arg_5)\n\n if arg_12.status_code not in [202]:\n raise models.ErrorResponseException(arg_0._deserialize, arg_12)\n\n arg_13 = None\n arg_14 = {}\n\n if arg_12.status_code == 202:\n arg_13 = arg_0._deserialize('Operation', arg_12)\n arg_14 = {\n 'Location': 'str',\n }\n\n if arg_4:\n arg_15 = ClientRawResponse(arg_13, arg_12)\n arg_15.add_headers(arg_14)\n return arg_15\n\n return arg_13"} +{"_id": "doc_1268", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=False, **arg_6):\n \"\"\"Gets a collection that contains the object IDs of the groups of which\n the user is a member.\n\n :param object_id: The object ID of the user for which to get group\n membership.\n :type object_id: str\n :param security_enabled_only: If true, only membership in\n security-enabled groups should be checked. Otherwise, membership in\n all groups should be checked.\n :type security_enabled_only: bool\n :param additional_properties: Unmatched properties from the message\n are deserialized this collection\n :type additional_properties: dict[str, object]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: An iterator like instance of str\n :rtype: ~azure.graphrbac.models.StrPaged[str]\n :raises:\n :class:`GraphErrorException`\n \"\"\"\n arg_7 = models.UserGetMemberGroupsParameters(arg_3=arg_3, arg_2=arg_2)\n\n def internal_paging(arg_8=None, arg_5=False):\n\n if not arg_8:\n # Construct URL\n arg_9 = arg_0.Func.metadata['url']\n arg_10 = {\n 'objectId': arg_0._serialize.url(\"object_id\", arg_1, 'str'),\n 'tenantID': arg_0._serialize.url(\"self.config.tenant_id\", arg_0.config.tenant_id, 'str')\n }\n arg_9 = arg_0._client.format_url(arg_9, **arg_10)\n\n # Construct parameters\n arg_11 = {}\n arg_11['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n else:\n arg_9 = arg_8\n arg_11 = {}\n\n # Construct headers\n arg_12 = {}\n arg_12['Accept'] = 'application/json'\n arg_12['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_12['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_4:\n arg_12.update(arg_4)\n if arg_0.config.accept_language is not None:\n arg_12['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_13 = arg_0._serialize.body(arg_7, 'UserGetMemberGroupsParameters')\n\n # Construct and send request\n arg_14 = arg_0._client.post(arg_9, arg_11, arg_12, arg_13)\n arg_15 = arg_0._client.send(arg_14, stream=False, **arg_6)\n\n if arg_15.status_code not in [200]:\n raise models.GraphErrorException(arg_0._deserialize, arg_15)\n\n return arg_15\n\n # Deserialize response\n arg_16 = models.StrPaged(internal_paging, arg_0._deserialize.dependencies)\n\n if arg_5:\n arg_17 = {}\n arg_18 = models.StrPaged(internal_paging, arg_0._deserialize.dependencies, arg_17)\n return arg_18\n\n return arg_16"} +{"_id": "doc_1269", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, *, arg_4=False):\n \"\"\"Will clone the given PR branch and vuild the package with the given name.\"\"\"\n\n arg_5 = Github(arg_0)\n arg_6 = arg_5.get_repo(arg_1)\n arg_7 = arg_6.get_pull(arg_2)\n # \"get_files\" of Github only download the first 300 files. Might not be enough.\n arg_8 = {f.filename.split('/')[0] for f in arg_7.get_files() if f.filename.startswith(\"azure\")}\n arg_9 = Path(arg_3).resolve()\n\n with tempfile.TemporaryDirectory() as temp_dir, \\\n manage_git_folder(arg_0, Path(temp_dir) / Path(\"sdk\"), arg_1, arg_2=arg_2) as sdk_folder:\n\n for arg_10 in arg_8:\n _LOGGER.debug(\"Build {}\".format(arg_10))\n execute_simple_command(\n [\"python\", \"./build_package.py\", \"--dest\", str(arg_9), arg_10],\n cwd=sdk_folder\n )\n _LOGGER.debug(\"Build finished: {}\".format(arg_10))\n\n if arg_4:\n arg_11 = [f.name for f in arg_9.iterdir()]\n arg_12 = None\n arg_13 = DashboardCommentableObject(arg_7, \"(message created by the CI based on PR content)\")\n try:\n arg_14 = build_installation_message(arg_7)\n arg_15 = build_download_message(arg_7, arg_11)\n arg_12 = arg_14 + \"\\n\\n\" + arg_15\n arg_13.create_comment(arg_12)\n except Exception:\n _LOGGER.critical(\"Unable to do PR comment:\\n%s\", arg_12)"} +{"_id": "doc_1270", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=False, arg_7=True, **arg_8):\n \"\"\"Import data into Redis cache.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param name: The name of the Redis cache.\n :type name: str\n :param files: files to import.\n :type files: list[str]\n :param format: File format.\n :type format: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_9 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_8\n )\n\n def get_long_running_output(arg_10):\n if arg_6:\n arg_11 = ClientRawResponse(None, arg_10)\n return arg_11\n\n arg_12 = arg_8.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_7 is True: arg_13 = ARMPolling(arg_12, **arg_8)\n elif arg_7 is False: arg_13 = NoPolling()\n else: arg_13 = arg_7\n return LROPoller(arg_0._client, arg_9, get_long_running_output, arg_13)"} +{"_id": "doc_1271", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=False, **arg_4):\n \"\"\"Replace alterations data.\n\n :param word_alterations: Collection of word alterations.\n :type word_alterations:\n list[~azure.cognitiveservices.knowledge.qnamaker.models.AlterationsDTO]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: None or ClientRawResponse if raw=true\n :rtype: None or ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_5 = models.WordAlterationsDTO(arg_1=arg_1)\n\n # Construct URL\n arg_6 = arg_0.Func.metadata['url']\n arg_7 = {\n 'Endpoint': arg_0._serialize.url(\"self.config.endpoint\", arg_0.config.endpoint, 'str', skip_quote=True)\n }\n arg_6 = arg_0._client.format_url(arg_6, **arg_7)\n\n # Construct parameters\n arg_8 = {}\n\n # Construct headers\n arg_9 = {}\n arg_9['Content-Type'] = 'application/json; charset=utf-8'\n if arg_2:\n arg_9.update(arg_2)\n\n # Construct body\n arg_10 = arg_0._serialize.body(arg_5, 'WordAlterationsDTO')\n\n # Construct and send request\n arg_11 = arg_0._client.put(arg_6, arg_8, arg_9, arg_10)\n arg_12 = arg_0._client.send(arg_11, stream=False, **arg_4)\n\n if arg_12.status_code not in [204]:\n raise models.ErrorResponseException(arg_0._deserialize, arg_12)\n\n if arg_3:\n arg_13 = ClientRawResponse(None, arg_12)\n return arg_13"} +{"_id": "doc_1272", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Returns system properties for the specified storage account.\n\n service_name:\n Name of the storage service account.\n '''\n _validate_not_none('service_name', arg_1)\n return arg_0._perform_get(arg_0._get_storage_service_path(arg_1),\n StorageService)"} +{"_id": "doc_1273", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Returns the primary and secondary access keys for the specified\n storage account.\n\n service_name:\n Name of the storage service account.\n '''\n _validate_not_none('service_name', arg_1)\n return arg_0._perform_get(\n arg_0._get_storage_service_path(arg_1) + '/keys',\n StorageService)"} +{"_id": "doc_1274", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Deletes the specified storage account from Windows Azure.\n\n service_name:\n Name of the storage service account.\n '''\n _validate_not_none('service_name', arg_1)\n return arg_0._perform_delete(\n arg_0._get_storage_service_path(arg_1),\n as_async=True)"} +{"_id": "doc_1275", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Checks to see if the specified storage account name is available, or\n if it has already been taken.\n\n service_name:\n Name of the storage service account.\n '''\n _validate_not_none('service_name', arg_1)\n return arg_0._perform_get(\n arg_0._get_storage_service_path() +\n '/operations/isavailable/' +\n _str(arg_1) + '',\n AvailabilityResponse)"} +{"_id": "doc_1276", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Retrieves system properties for the specified hosted service. These\n properties include the service name and service type; the name of the\n affinity group to which the service belongs, or its location if it is\n not part of an affinity group; and optionally, information on the\n service's deployments.\n\n service_name:\n Name of the hosted service.\n embed_detail:\n When True, the management service returns properties for all\n deployments of the service, as well as for the service itself.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('embed_detail', arg_2)\n return arg_0._perform_get(\n arg_0._get_hosted_service_path(arg_1) +\n '?embed-detail=' +\n _str(arg_2).lower(),\n HostedService)"} +{"_id": "doc_1277", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=None, arg_5=None,\n arg_6=None):\n '''\n Creates a new hosted service in Windows Azure.\n\n service_name:\n A name for the hosted service that is unique within Windows Azure.\n This name is the DNS prefix name and can be used to access the\n hosted service.\n label:\n A name for the hosted service. The name can be up to 100 characters\n in length. The name can be used to identify the storage account for\n your tracking purposes.\n description:\n A description for the hosted service. The description can be up to\n 1024 characters in length.\n location:\n The location where the hosted service will be created. You can\n specify either a location or affinity_group, but not both.\n affinity_group:\n The name of an existing affinity group associated with this\n subscription. This name is a GUID and can be retrieved by examining\n the name element of the response body returned by\n list_affinity_groups. You can specify either a location or\n affinity_group, but not both.\n extended_properties:\n Dictionary containing name/value pairs of storage account\n properties. You can have a maximum of 50 extended property\n name/value pairs. The maximum length of the Name element is 64\n characters, only alphanumeric characters and underscores are valid\n in the Name, and the name must start with a letter. The value has\n a maximum length of 255 characters.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('label', arg_2)\n if arg_5 is None and arg_4 is None:\n raise ValueError(\n 'location or affinity_group must be specified')\n if arg_5 is not None and arg_4 is not None:\n raise ValueError(\n 'Only one of location or affinity_group needs to be specified')\n return arg_0._perform_post(arg_0._get_hosted_service_path(),\n _XmlSerializer.Func_to_xml(\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6),\n as_async=True)"} +{"_id": "doc_1278", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Deletes the specified hosted service from Windows Azure.\n\n service_name:\n Name of the hosted service.\n complete:\n True if all OS/data disks and the source blobs for the disks should\n also be deleted from storage.\n '''\n\n _validate_not_none('service_name', arg_1)\n\n arg_3 = arg_0._get_hosted_service_path(arg_1)\n\n if arg_2 == True:\n arg_3 = arg_3 +'?comp=media'\n\n return arg_0._perform_delete(arg_3, as_async=True)"} +{"_id": "doc_1279", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6,\n arg_7=False,\n arg_8=False,\n arg_9=None):\n '''\n Uploads a new service package and creates a new deployment on staging\n or production.\n\n service_name:\n Name of the hosted service.\n deployment_slot:\n The environment to which the hosted service is deployed. Valid\n values are: staging, production\n name:\n The name for the deployment. The deployment name must be unique\n among other deployments for the hosted service.\n package_url:\n A URL that refers to the location of the service package in the\n Blob service. The service package can be located either in a\n storage account beneath the same subscription or a Shared Access\n Signature (SAS) URI from any storage account.\n label:\n A name for the hosted service. The name can be up to 100 characters\n in length. It is recommended that the label be unique within the\n subscription. The name can be used to identify the hosted service\n for your tracking purposes.\n configuration:\n The base-64 encoded service configuration file for the deployment.\n start_deployment:\n Indicates whether to start the deployment immediately after it is\n created. If false, the service model is still deployed to the\n virtual machines but the code is not run immediately. Instead, the\n service is Suspended until you call Update Deployment Status and\n set the status to Running, at which time the service will be\n started. A deployed service still incurs charges, even if it is\n suspended.\n treat_warnings_as_error:\n Indicates whether to treat package validation warnings as errors.\n If set to true, the Created Deployment operation fails if there\n are validation warnings on the service package.\n extended_properties:\n Dictionary containing name/value pairs of storage account\n properties. You can have a maximum of 50 extended property\n name/value pairs. The maximum length of the Name element is 64\n characters, only alphanumeric characters and underscores are valid\n in the Name, and the name must start with a letter. The value has\n a maximum length of 255 characters.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_slot', arg_2)\n _validate_not_none('name', arg_3)\n _validate_not_none('package_url', arg_4)\n _validate_not_none('label', arg_5)\n _validate_not_none('configuration', arg_6)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_slot(\n arg_1, arg_2),\n _XmlSerializer.Func_to_xml(\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n arg_9),\n as_async=True)"} +{"_id": "doc_1280", "title": "", "text": "def Func(arg_0, arg_1, arg_2,arg_3=False):\n '''\n Deletes the specified deployment.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name of the deployment.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n arg_4= arg_0._get_deployment_path_using_name(arg_1, arg_2)\n if arg_3:\n arg_4 += '?comp=media'\n return arg_0._perform_delete(\n arg_4,\n as_async=True)"} +{"_id": "doc_1281", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Initiates a virtual IP swap between the staging and production\n deployment environments for a service. If the service is currently\n running in the staging environment, it will be swapped to the\n production environment. If it is running in the production\n environment, it will be swapped to staging.\n\n service_name:\n Name of the hosted service.\n production:\n The name of the production deployment.\n source_deployment:\n The name of the source deployment.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('production', arg_2)\n _validate_not_none('source_deployment', arg_3)\n return arg_0._perform_post(arg_0._get_hosted_service_path(arg_1),\n _XmlSerializer.Func_to_xml(\n arg_2, arg_3),\n as_async=True)"} +{"_id": "doc_1282", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3,\n arg_4=False,\n arg_5='Auto', arg_6=None):\n '''\n Initiates a change to the deployment configuration.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name of the deployment.\n configuration:\n The base-64 encoded service configuration file for the deployment.\n treat_warnings_as_error:\n Indicates whether to treat package validation warnings as errors.\n If set to true, the Created Deployment operation fails if there\n are validation warnings on the service package.\n mode:\n If set to Manual, WalkUpgradeDomain must be called to apply the\n update. If set to Auto, the Windows Azure platform will\n automatically apply the update To each upgrade domain for the\n service. Possible values are: Auto, Manual\n extended_properties:\n Dictionary containing name/value pairs of storage account\n properties. You can have a maximum of 50 extended property\n name/value pairs. The maximum length of the Name element is 64\n characters, only alphanumeric characters and underscores are valid\n in the Name, and the name must start with a letter. The value has\n a maximum length of 255 characters.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('configuration', arg_3)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_name(\n arg_1, arg_2) + '/?comp=config',\n _XmlSerializer.change_deployment_to_xml(\n arg_3,\n arg_4,\n arg_5,\n arg_6),\n as_async=True)"} +{"_id": "doc_1283", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Initiates a change in deployment status.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name of the deployment.\n status:\n The change to initiate to the deployment status. Possible values\n include:\n Running, Suspended\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('status', arg_3)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_name(\n arg_1, arg_2) + '/?comp=status',\n _XmlSerializer.Func_to_xml(\n arg_3),\n as_async=True)"} +{"_id": "doc_1284", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n '''\n Specifies the next upgrade domain to be walked during manual in-place\n upgrade or configuration change.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name of the deployment.\n upgrade_domain:\n An integer value that identifies the upgrade domain to walk.\n Upgrade domains are identified with a zero-based index: the first\n upgrade domain has an ID of 0, the second has an ID of 1, and so on.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('upgrade_domain', arg_3)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_name(\n arg_1, arg_2) + '/?comp=walkupgradedomain',\n _XmlSerializer.Func_to_xml(\n arg_3),\n as_async=True)"} +{"_id": "doc_1285", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n '''\n Reinstalls the operating system on instances of web roles or worker\n roles and initializes the storage resources that are used by them. If\n you do not want to initialize storage resources, you can use\n reimage_role_instance.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name of the deployment.\n role_instance_names:\n List of role instance names.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_instance_names', arg_3)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_name(\n arg_1, arg_2) + '/roleinstances/?comp=delete',\n _XmlSerializer.role_instances_to_xml(arg_3),\n as_async=True)"} +{"_id": "doc_1286", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n '''\n Deletes a service certificate from the certificate store of a hosted\n service.\n\n service_name:\n Name of the hosted service.\n thumbalgorithm:\n The algorithm for the certificate's thumbprint.\n thumbprint:\n The hexadecimal representation of the thumbprint.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('thumbalgorithm', arg_2)\n _validate_not_none('thumbprint', arg_3)\n return arg_0._perform_delete(\n '/' + arg_0.subscription_id + '/services/hostedservices/' +\n _str(arg_1) + '/certificates/' +\n _str(arg_2) + '-' + _str(arg_3),\n as_async=True)"} +{"_id": "doc_1287", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n The Add Management Certificate operation adds a certificate to the\n list of management certificates. Management certificates, which are\n also known as subscription certificates, authenticate clients\n attempting to connect to resources associated with your Windows Azure\n subscription.\n\n public_key:\n A base64 representation of the management certificate public key.\n thumbprint:\n The thumb print that uniquely identifies the management\n certificate.\n data:\n The certificate's raw data in base-64 encoded .cer format.\n '''\n _validate_not_none('public_key', arg_1)\n _validate_not_none('thumbprint', arg_2)\n _validate_not_none('data', arg_3)\n return arg_0._perform_post(\n '/' + arg_0.subscription_id + '/certificates',\n _XmlSerializer.subscription_certificate_to_xml(\n arg_1, arg_2, arg_3))"} +{"_id": "doc_1288", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n The Delete Management Certificate operation deletes a certificate from\n the list of management certificates. Management certificates, which\n are also known as subscription certificates, authenticate clients\n attempting to connect to resources associated with your Windows Azure\n subscription.\n\n thumbprint:\n The thumb print that uniquely identifies the management\n certificate.\n '''\n _validate_not_none('thumbprint', arg_1)\n return arg_0._perform_delete(\n '/' + arg_0.subscription_id + '/certificates/' + _str(arg_1))"} +{"_id": "doc_1289", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Returns the system properties associated with the specified affinity\n group.\n\n affinity_group_name:\n The name of the affinity group.\n '''\n _validate_not_none('affinity_group_name', arg_1)\n return arg_0._perform_get(\n '/' + arg_0.subscription_id + '/affinitygroups/' +\n _str(arg_1) + '',\n AffinityGroup)"} +{"_id": "doc_1290", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Deletes an affinity group in the specified subscription.\n\n affinity_group_name:\n The name of the affinity group.\n '''\n _validate_not_none('affinity_group_name', arg_1)\n return arg_0._perform_delete('/' + arg_0.subscription_id + \\\n '/affinitygroups/' + \\\n _str(arg_1))"} +{"_id": "doc_1291", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=None, arg_5=None):\n '''\n List subscription operations.\n\n start_time: Required. An ISO8601 date.\n end_time: Required. An ISO8601 date.\n object_id_filter: Optional. Returns subscription operations only for the specified object type and object ID\n operation_result_filter: Optional. Returns subscription operations only for the specified result status, either Succeeded, Failed, or InProgress.\n continuation_token: Optional.\n More information at:\n https://msdn.microsoft.com/en-us/library/azure/gg715318.aspx\n '''\n arg_1 = ('StartTime=' + arg_1) if arg_1 else ''\n arg_2 = ('EndTime=' + arg_2) if arg_2 else ''\n arg_3 = ('ObjectIdFilter=' + arg_3) if arg_3 else ''\n arg_4 = ('OperationResultFilter=' + arg_4) if arg_4 else ''\n arg_5 = ('ContinuationToken=' + arg_5) if arg_5 else ''\n\n arg_6 = ('&'.join(v for v in (arg_1, arg_2, arg_3, arg_4, arg_5) if v))\n arg_6 = '?' + arg_6 if arg_6 else ''\n\n return arg_0._perform_get(arg_0._get_Func_path() + arg_6,\n SubscriptionOperationCollection)"} +{"_id": "doc_1292", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Deletes a reserved IP address from the specified subscription.\n\n name:\n Required. Name of the reserved IP address.\n '''\n _validate_not_none('name', arg_1)\n return arg_0._perform_delete(arg_0._get_reserved_ip_path(arg_1),\n as_async=True)"} +{"_id": "doc_1293", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None\n ):\n '''\n Disassociate an existing reservedIP from the given deployment.\n\n name:\n Required. Name of the reserved IP address.\n\n service_name:\n Required. Name of the hosted service.\n\n deployment_name:\n Required. Name of the deployment.\n\n virtual_ip_name:\n Optional. Name of the VirtualIP in case of multi Vip tenant.\n If this value is not specified default virtualIP is used\n for this operation.\n '''\n _validate_not_none('name', arg_1)\n _validate_not_none('service_name', arg_2)\n _validate_not_none('deployment_name', arg_3)\n return arg_0._perform_post(\n arg_0._get_reserved_ip_path_for_disassociation(arg_1),\n _XmlSerializer.associate_reserved_ip_to_xml(\n arg_2, arg_3, arg_4\n ),\n as_async=True,\n x_ms_version='2015-02-01'\n )"} +{"_id": "doc_1294", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves information about the specified reserved IP address.\n\n name:\n Required. Name of the reserved IP address.\n '''\n _validate_not_none('name', arg_1)\n return arg_0._perform_get(arg_0._get_reserved_ip_path(arg_1), ReservedIP)"} +{"_id": "doc_1295", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Retrieves the specified virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n return arg_0._perform_get(\n arg_0._Func_path(arg_1, arg_2, arg_3),\n PersistentVMRole)"} +{"_id": "doc_1296", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5,\n arg_6, arg_7,\n arg_8=None,\n arg_9=None,\n arg_10=None,\n arg_11=None,\n arg_12='PersistentVMRole',\n arg_13=None,\n arg_14=None,\n arg_15=None,\n arg_16=None,\n arg_17=None,\n arg_18=None,\n arg_19=None):\n '''\n Provisions a virtual machine based on the supplied configuration.\n\n service_name:\n Name of the hosted service.\n deployment_name:\n The name for the deployment. The deployment name must be unique\n among other deployments for the hosted service.\n deployment_slot:\n The environment to which the hosted service is deployed. Valid\n values are: staging, production\n label:\n Specifies an identifier for the deployment. The label can be up to\n 100 characters long. The label can be used for tracking purposes.\n role_name:\n The name of the role.\n system_config:\n Contains the metadata required to provision a virtual machine from\n a Windows or Linux OS image. Use an instance of\n WindowsConfigurationSet or LinuxConfigurationSet.\n os_virtual_hard_disk:\n Contains the parameters Windows Azure uses to create the operating\n system disk for the virtual machine. If you are creating a Virtual\n Machine by using a VM Image, this parameter is not used.\n network_config:\n Encapsulates the metadata required to create the virtual network\n configuration for a virtual machine. If you do not include a\n network configuration set you will not be able to access the VM\n through VIPs over the internet. If your virtual machine belongs to\n a virtual network you can not specify which subnet address space\n it resides under. Use an instance of ConfigurationSet.\n availability_set_name:\n Specifies the name of an availability set to which to add the\n virtual machine. This value controls the virtual machine\n allocation in the Windows Azure environment. Virtual machines\n specified in the same availability set are allocated to different\n nodes to maximize availability.\n data_virtual_hard_disks:\n Contains the parameters Windows Azure uses to create a data disk\n for a virtual machine.\n role_size:\n The size of the virtual machine to allocate. The default value is\n Small. Possible values are: ExtraSmall,Small,Medium,Large,\n ExtraLarge,A5,A6,A7,A8,A9,Basic_A0,Basic_A1,Basic_A2,Basic_A3,\n Basic_A4,Standard_D1,Standard_D2,Standard_D3,Standard_D4,\n Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_G1,\n Standard_G2,Sandard_G3,Standard_G4,Standard_G5. The specified\n value must be compatible with the disk selected in the \n OSVirtualHardDisk values.\n role_type:\n The type of the role for the virtual machine. The only supported\n value is PersistentVMRole.\n virtual_network_name:\n Specifies the name of an existing virtual network to which the\n deployment will belong.\n resource_extension_references:\n Optional. Contains a collection of resource extensions that are to\n be installed on the Virtual Machine. This element is used if\n provision_guest_agent is set to True. Use an iterable of instances\n of ResourceExtensionReference.\n provision_guest_agent:\n Optional. Indicates whether the VM Agent is installed on the\n Virtual Machine. To run a resource extension in a Virtual Machine,\n this service must be installed.\n vm_image_name:\n Optional. Specifies the name of the VM Image that is to be used to\n create the Virtual Machine. If this is specified, the\n system_config and network_config parameters are not used.\n media_location:\n Optional. Required if the Virtual Machine is being created from a\n published VM Image. Specifies the location of the VHD file that is\n created when VMImageName specifies a published VM Image.\n dns_servers:\n Optional. List of DNS servers (use DnsServer class) to associate\n with the Virtual Machine.\n reserved_ip_name:\n Optional. Specifies the name of a reserved IP address that is to be\n assigned to the deployment. You must run create_reserved_ip_address\n before you can assign the address to the deployment using this\n element.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('deployment_slot', arg_3)\n _validate_not_none('label', arg_4)\n _validate_not_none('role_name', arg_5)\n return arg_0._perform_post(\n arg_0._get_deployment_path_using_name(arg_1),\n _XmlSerializer.virtual_machine_deployment_to_xml(\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_12,\n arg_8,\n arg_9,\n arg_10,\n arg_11,\n arg_13,\n arg_14,\n arg_15,\n arg_16,\n arg_17,\n arg_18,\n arg_19),\n as_async=True)"} +{"_id": "doc_1297", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6=None,\n arg_7=None, arg_8=None,\n arg_9=None, arg_10='PersistentVMRole',\n arg_11=None,\n arg_12=None, arg_13=None,\n arg_14=None):\n '''\n Adds a virtual machine to an existing deployment.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n system_config:\n Contains the metadata required to provision a virtual machine from\n a Windows or Linux OS image. Use an instance of\n WindowsConfigurationSet or LinuxConfigurationSet.\n os_virtual_hard_disk:\n Contains the parameters Windows Azure uses to create the operating\n system disk for the virtual machine. If you are creating a Virtual\n Machine by using a VM Image, this parameter is not used.\n network_config:\n Encapsulates the metadata required to create the virtual network\n configuration for a virtual machine. If you do not include a\n network configuration set you will not be able to access the VM\n through VIPs over the internet. If your virtual machine belongs to\n a virtual network you can not specify which subnet address space\n it resides under.\n availability_set_name:\n Specifies the name of an availability set to which to add the\n virtual machine. This value controls the virtual machine allocation\n in the Windows Azure environment. Virtual machines specified in the\n same availability set are allocated to different nodes to maximize\n availability.\n data_virtual_hard_disks:\n Contains the parameters Windows Azure uses to create a data disk\n for a virtual machine.\n role_size:\n The size of the virtual machine to allocate. The default value is\n Small. Possible values are: ExtraSmall, Small, Medium, Large,\n ExtraLarge. The specified value must be compatible with the disk\n selected in the OSVirtualHardDisk values.\n role_type:\n The type of the role for the virtual machine. The only supported\n value is PersistentVMRole.\n resource_extension_references:\n Optional. Contains a collection of resource extensions that are to\n be installed on the Virtual Machine. This element is used if\n provision_guest_agent is set to True.\n provision_guest_agent:\n Optional. Indicates whether the VM Agent is installed on the\n Virtual Machine. To run a resource extension in a Virtual Machine,\n this service must be installed.\n vm_image_name:\n Optional. Specifies the name of the VM Image that is to be used to\n create the Virtual Machine. If this is specified, the\n system_config and network_config parameters are not used.\n media_location:\n Optional. Required if the Virtual Machine is being created from a\n published VM Image. Specifies the location of the VHD file that is\n created when VMImageName specifies a published VM Image.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n return arg_0._perform_post(\n arg_0._get_role_path(arg_1, arg_2),\n _XmlSerializer.Func_to_xml(\n arg_3,\n arg_4,\n arg_5,\n arg_10,\n arg_6,\n arg_7,\n arg_8,\n arg_9,\n arg_11,\n arg_12,\n arg_13,\n arg_14),\n as_async=True)"} +{"_id": "doc_1298", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4 = False):\n '''\n Deletes the specified virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n complete:\n True if all OS/data disks and the source blobs for the disks should\n also be deleted from storage.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n\n arg_5 = arg_0._get_role_path(arg_1, arg_2, arg_3)\n \n if arg_4 == True:\n arg_5 = arg_5 +'?comp=media'\n\n return arg_0._perform_delete(arg_5,\n as_async=True)"} +{"_id": "doc_1299", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5,\n arg_6, arg_7=None):\n '''\n The Capture Role operation captures a virtual machine image to your\n image gallery. From the captured image, you can create additional\n customized virtual machines.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n post_capture_action:\n Specifies the action after capture operation completes. Possible\n values are: Delete, Reprovision.\n target_image_name:\n Specifies the image name of the captured virtual machine.\n target_image_label:\n Specifies the friendly name of the captured virtual machine.\n provisioning_configuration:\n Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('post_capture_action', arg_4)\n _validate_not_none('target_image_name', arg_5)\n _validate_not_none('target_image_label', arg_6)\n return arg_0._perform_post(\n arg_0._get_role_instance_operations_path(\n arg_1, arg_2, arg_3),\n _XmlSerializer.Func_to_xml(\n arg_4,\n arg_5,\n arg_6,\n arg_7),\n as_async=True)"} +{"_id": "doc_1300", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Starts the specified virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n return arg_0._perform_post(\n arg_0._get_role_instance_operations_path(\n arg_1, arg_2, arg_3),\n _XmlSerializer.Func_operation_to_xml(),\n as_async=True)"} +{"_id": "doc_1301", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Starts the specified virtual machines.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_names:\n The names of the roles, as an enumerable of strings.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_names', arg_3)\n return arg_0._perform_post(\n arg_0._get_roles_operations_path(arg_1, arg_2),\n _XmlSerializer.Func_operation_to_xml(arg_3),\n as_async=True)"} +{"_id": "doc_1302", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4='Stopped'):\n '''\n Shuts down the specified virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n post_shutdown_action:\n Specifies how the Virtual Machine should be shut down. Values are:\n Stopped\n Shuts down the Virtual Machine but retains the compute\n resources. You will continue to be billed for the resources\n that the stopped machine uses.\n StoppedDeallocated\n Shuts down the Virtual Machine and releases the compute\n resources. You are not billed for the compute resources that\n this Virtual Machine uses. If a static Virtual Network IP\n address is assigned to the Virtual Machine, it is reserved.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('post_shutdown_action', arg_4)\n return arg_0._perform_post(\n arg_0._get_role_instance_operations_path(\n arg_1, arg_2, arg_3),\n _XmlSerializer.Func_operation_to_xml(arg_4),\n as_async=True)"} +{"_id": "doc_1303", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4='Stopped'):\n '''\n Shuts down the specified virtual machines.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_names:\n The names of the roles, as an enumerable of strings.\n post_shutdown_action:\n Specifies how the Virtual Machine should be shut down. Values are:\n Stopped\n Shuts down the Virtual Machine but retains the compute\n resources. You will continue to be billed for the resources\n that the stopped machine uses.\n StoppedDeallocated\n Shuts down the Virtual Machine and releases the compute\n resources. You are not billed for the compute resources that\n this Virtual Machine uses. If a static Virtual Network IP\n address is assigned to the Virtual Machine, it is reserved.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_names', arg_3)\n _validate_not_none('post_shutdown_action', arg_4)\n return arg_0._perform_post(\n arg_0._get_roles_operations_path(arg_1, arg_2),\n _XmlSerializer.Func_operation_to_xml(\n arg_3, arg_4),\n as_async=True)"} +{"_id": "doc_1304", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n Adds a DNS server definition to an existing deployment.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n dns_server_name:\n Specifies the name of the DNS server.\n address:\n Specifies the IP address of the DNS server.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('dns_server_name', arg_3)\n _validate_not_none('address', arg_4)\n return arg_0._perform_post(\n arg_0._get_dns_server_path(arg_1, arg_2),\n _XmlSerializer.dns_server_to_xml(arg_3, arg_4),\n as_async=True)"} +{"_id": "doc_1305", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n Updates the ip address of a DNS server.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n dns_server_name:\n Specifies the name of the DNS server.\n address:\n Specifies the IP address of the DNS server.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('dns_server_name', arg_3)\n _validate_not_none('address', arg_4)\n return arg_0._perform_put(\n arg_0._get_dns_server_path(arg_1,\n arg_2,\n arg_3),\n _XmlSerializer.dns_server_to_xml(arg_3, arg_4),\n as_async=True)"} +{"_id": "doc_1306", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Deletes a DNS server from a deployment.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n dns_server_name:\n Name of the DNS server that you want to delete.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('dns_server_name', arg_3)\n return arg_0._perform_delete(\n arg_0._get_dns_server_path(arg_1,\n arg_2,\n arg_3),\n as_async=True)"} +{"_id": "doc_1307", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Lists the versions of a resource extension that are available to add\n to a Virtual Machine.\n\n publisher_name:\n Name of the resource extension publisher.\n extension_name:\n Name of the resource extension.\n '''\n return arg_0._perform_get(arg_0._get_resource_extension_versions_path(\n arg_1, arg_2),\n ResourceExtensions)"} +{"_id": "doc_1308", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Unreplicate a VM image from all regions This operation\n is only for publishers. You have to be registered as image publisher\n with Microsoft Azure to be able to call this\n\n vm_image_name:\n Specifies the name of the VM Image that is to be used for\n unreplication. The VM Image Name should be the user VM Image,\n not the published name of the VM Image.\n\n '''\n _validate_not_none('vm_image_name', arg_1)\n\n return arg_0._perform_put(\n arg_0._get_unreplication_path_using_vm_image_name(arg_1),\n None,\n as_async=True,\n x_ms_version='2015-04-01'\n )"} +{"_id": "doc_1309", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Share an already replicated OS image. This operation is only for\n publishers. You have to be registered as image publisher with Windows\n Azure to be able to call this.\n\n vm_image_name:\n The name of the virtual machine image to share\n permission:\n The sharing permission: public, msdn, or private\n '''\n _validate_not_none('vm_image_name', arg_1)\n _validate_not_none('permission', arg_2)\n\n arg_3 = arg_0._get_sharing_path_using_vm_image_name(arg_1)\n arg_4 = '&permission=' + arg_2\n arg_3 = arg_3 + '?' + arg_4.lstrip('&')\n\n return arg_0._perform_put(\n arg_3, None, as_async=True, x_ms_version='2015-04-01'\n )"} +{"_id": "doc_1310", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Creates a VM Image in the image repository that is associated with the\n specified subscription using a specified set of virtual hard disks.\n\n vm_image:\n An instance of VMImage class.\n vm_image.name: Required. Specifies the name of the image.\n vm_image.label: Required. Specifies an identifier for the image.\n vm_image.description: Optional. Specifies the description of the image.\n vm_image.os_disk_configuration:\n Required. Specifies configuration information for the operating \n system disk that is associated with the image.\n vm_image.os_disk_configuration.host_caching:\n Optional. Specifies the caching behavior of the operating system disk.\n Possible values are: None, ReadOnly, ReadWrite \n vm_image.os_disk_configuration.os_state:\n Required. Specifies the state of the operating system in the image.\n Possible values are: Generalized, Specialized\n A Virtual Machine that is fully configured and running contains a\n Specialized operating system. A Virtual Machine on which the\n Sysprep command has been run with the generalize option contains a\n Generalized operating system.\n vm_image.os_disk_configuration.os:\n Required. Specifies the operating system type of the image.\n vm_image.os_disk_configuration.media_link:\n Required. Specifies the location of the blob in Windows Azure\n storage. The blob location belongs to a storage account in the\n subscription specified by the value in the\n operation call.\n vm_image.data_disk_configurations:\n Optional. Specifies configuration information for the data disks\n that are associated with the image. A VM Image might not have data\n disks associated with it.\n vm_image.data_disk_configurations[].host_caching:\n Optional. Specifies the caching behavior of the data disk.\n Possible values are: None, ReadOnly, ReadWrite \n vm_image.data_disk_configurations[].lun:\n Optional if the lun for the disk is 0. Specifies the Logical Unit\n Number (LUN) for the data disk.\n vm_image.data_disk_configurations[].media_link:\n Required. Specifies the location of the blob in Windows Azure\n storage. The blob location belongs to a storage account in the\n subscription specified by the value in the\n operation call.\n vm_image.data_disk_configurations[].logical_size_in_gb:\n Required. Specifies the size, in GB, of the data disk.\n vm_image.language: Optional. Specifies the language of the image.\n vm_image.image_family:\n Optional. Specifies a value that can be used to group VM Images.\n vm_image.recommended_vm_size:\n Optional. Specifies the size to use for the Virtual Machine that\n is created from the VM Image.\n vm_image.eula:\n Optional. Specifies the End User License Agreement that is\n associated with the image. The value for this element is a string,\n but it is recommended that the value be a URL that points to a EULA.\n vm_image.icon_uri:\n Optional. Specifies the URI to the icon that is displayed for the\n image in the Management Portal.\n vm_image.small_icon_uri:\n Optional. Specifies the URI to the small icon that is displayed for\n the image in the Management Portal.\n vm_image.privacy_uri:\n Optional. Specifies the URI that points to a document that contains\n the privacy policy related to the image.\n vm_image.published_date:\n Optional. Specifies the date when the image was added to the image\n repository.\n vm_image.show_in_gui:\n Optional. Indicates whether the VM Images should be listed in the\n portal.\n '''\n _validate_not_none('vm_image', arg_1)\n _validate_not_none('vm_image.name', arg_1.name)\n _validate_not_none('vm_image.label', arg_1.label)\n _validate_not_none('vm_image.os_disk_configuration.os_state',\n arg_1.os_disk_configuration.os_state)\n _validate_not_none('vm_image.os_disk_configuration.os',\n arg_1.os_disk_configuration.os)\n _validate_not_none('vm_image.os_disk_configuration.media_link',\n arg_1.os_disk_configuration.media_link)\n return arg_0._perform_post(\n arg_0._get_vm_image_path(),\n _XmlSerializer.Func_to_xml(arg_1),\n as_async=True)"} +{"_id": "doc_1311", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Deletes the specified VM Image from the image repository that is\n associated with the specified subscription.\n\n vm_image_name:\n The name of the image.\n delete_vhd:\n Deletes the underlying vhd blob in Azure storage.\n '''\n _validate_not_none('vm_image_name', arg_1)\n arg_3 = arg_0._get_vm_image_path(arg_1)\n if arg_2:\n arg_3 += '?comp=media'\n return arg_0._perform_delete(arg_3, as_async=True)"} +{"_id": "doc_1312", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n Adds an OS image that is currently stored in a storage account in your\n subscription to the image repository.\n\n label:\n Specifies the friendly name of the image.\n media_link:\n Specifies the location of the blob in Windows Azure blob store\n where the media for the image is located. The blob location must\n belong to a storage account in the subscription specified by the\n value in the operation call. Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n name:\n Specifies a name for the OS image that Windows Azure uses to\n identify the image when creating one or more virtual machines.\n os:\n The operating system type of the OS image. Possible values are:\n Linux, Windows\n '''\n _validate_not_none('label', arg_1)\n _validate_not_none('media_link', arg_2)\n _validate_not_none('name', arg_3)\n _validate_not_none('os', arg_4)\n return arg_0._perform_post(arg_0._get_image_path(),\n _XmlSerializer.os_image_to_xml(\n arg_1, arg_2, arg_3, arg_4),\n as_async=True)"} +{"_id": "doc_1313", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n '''\n Updates an OS image that in your image repository.\n\n image_name:\n The name of the image to update.\n label:\n Specifies the friendly name of the image to be updated. You cannot\n use this operation to update images provided by the Windows Azure\n platform.\n media_link:\n Specifies the location of the blob in Windows Azure blob store\n where the media for the image is located. The blob location must\n belong to a storage account in the subscription specified by the\n value in the operation call. Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n name:\n Specifies a name for the OS image that Windows Azure uses to\n identify the image when creating one or more VM Roles.\n os:\n The operating system type of the OS image. Possible values are:\n Linux, Windows\n '''\n _validate_not_none('image_name', arg_1)\n _validate_not_none('label', arg_2)\n _validate_not_none('media_link', arg_3)\n _validate_not_none('name', arg_4)\n _validate_not_none('os', arg_5)\n return arg_0._perform_put(arg_0._get_image_path(arg_1),\n _XmlSerializer.os_image_to_xml(\n arg_2, arg_3, arg_4, arg_5),\n as_async=True)"} +{"_id": "doc_1314", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Updates metadata elements from a given OS image reference.\n\n image_name:\n The name of the image to update.\n os_image:\n An instance of OSImage class.\n os_image.label: Optional. Specifies an identifier for the image.\n os_image.description: Optional. Specifies the description of the image.\n os_image.language: Optional. Specifies the language of the image.\n os_image.image_family:\n Optional. Specifies a value that can be used to group VM Images.\n os_image.recommended_vm_size:\n Optional. Specifies the size to use for the Virtual Machine that\n is created from the VM Image.\n os_image.eula:\n Optional. Specifies the End User License Agreement that is\n associated with the image. The value for this element is a string,\n but it is recommended that the value be a URL that points to a EULA.\n os_image.icon_uri:\n Optional. Specifies the URI to the icon that is displayed for the\n image in the Management Portal.\n os_image.small_icon_uri:\n Optional. Specifies the URI to the small icon that is displayed for\n the image in the Management Portal.\n os_image.privacy_uri:\n Optional. Specifies the URI that points to a document that contains\n the privacy policy related to the image.\n os_image.published_date:\n Optional. Specifies the date when the image was added to the image\n repository.\n os.image.media_link:\n Required: Specifies the location of the blob in Windows Azure\n blob store where the media for the image is located. The blob\n location must belong to a storage account in the subscription\n specified by the value in the operation call.\n Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n os_image.name:\n Specifies a name for the OS image that Windows Azure uses to\n identify the image when creating one or more VM Roles.\n os_image.os:\n The operating system type of the OS image. Possible values are:\n Linux, Windows\n '''\n _validate_not_none('image_name', arg_1)\n _validate_not_none('os_image', arg_2)\n return arg_0._perform_put(arg_0._get_image_path(arg_1),\n _XmlSerializer.update_os_image_to_xml(arg_2), as_async=True\n )"} +{"_id": "doc_1315", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Deletes the specified OS image from your image repository.\n\n image_name:\n The name of the image.\n delete_vhd:\n Deletes the underlying vhd blob in Azure storage.\n '''\n _validate_not_none('image_name', arg_1)\n arg_3 = arg_0._get_image_path(arg_1)\n if arg_2:\n arg_3 += '?comp=media'\n return arg_0._perform_delete(arg_3, as_async=True)"} +{"_id": "doc_1316", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n Retrieves the specified data disk from a virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n lun:\n The Logical Unit Number (LUN) for the disk.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('lun', arg_4)\n return arg_0._perform_get(\n arg_0._Func_path(\n arg_1, arg_2, arg_3, arg_4),\n DataVirtualHardDisk)"} +{"_id": "doc_1317", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5=None, arg_6=None, arg_7=None,\n arg_8=None, arg_9=None,\n arg_10=None):\n '''\n Adds a data disk to a virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n lun:\n Specifies the Logical Unit Number (LUN) for the disk. The LUN\n specifies the slot in which the data drive appears when mounted\n for usage by the virtual machine. Valid LUN values are 0 through 15.\n host_caching:\n Specifies the platform caching behavior of data disk blob for\n read/write efficiency. The default vault is ReadOnly. Possible\n values are: None, ReadOnly, ReadWrite\n media_link:\n Specifies the location of the blob in Windows Azure blob store\n where the media for the disk is located. The blob location must\n belong to the storage account in the subscription specified by the\n value in the operation call. Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n disk_label:\n Specifies the description of the data disk. When you attach a disk,\n either by directly referencing a media using the MediaLink element\n or specifying the target disk size, you can use the DiskLabel\n element to customize the name property of the target data disk.\n disk_name:\n Specifies the name of the disk. Windows Azure uses the specified\n disk to create the data disk for the machine and populates this\n field with the disk name.\n logical_disk_size_in_gb:\n Specifies the size, in GB, of an empty disk to be attached to the\n role. The disk can be created as part of disk attach or create VM\n role call by specifying the value for this property. Windows Azure\n creates the empty disk based on size preference and attaches the\n newly created disk to the Role.\n source_media_link:\n Specifies the location of a blob in account storage which is\n mounted as a data disk when the virtual machine is created.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('lun', arg_4)\n return arg_0._perform_post(\n arg_0._get_data_disk_path(arg_1, arg_2, arg_3),\n _XmlSerializer.data_virtual_hard_disk_to_xml(\n arg_5,\n arg_7,\n arg_8,\n arg_4,\n arg_9,\n arg_6,\n arg_10),\n as_async=True)"} +{"_id": "doc_1318", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5=None, arg_6=None, arg_7=None,\n arg_8=None, arg_9=None,\n arg_10=None):\n '''\n Updates the specified data disk attached to the specified virtual\n machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n lun:\n Specifies the Logical Unit Number (LUN) for the disk. The LUN\n specifies the slot in which the data drive appears when mounted\n for usage by the virtual machine. Valid LUN values are 0 through\n 15.\n host_caching:\n Specifies the platform caching behavior of data disk blob for\n read/write efficiency. The default vault is ReadOnly. Possible\n values are: None, ReadOnly, ReadWrite\n media_link:\n Specifies the location of the blob in Windows Azure blob store\n where the media for the disk is located. The blob location must\n belong to the storage account in the subscription specified by\n the value in the operation call. Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n updated_lun:\n Specifies the Logical Unit Number (LUN) for the disk. The LUN\n specifies the slot in which the data drive appears when mounted\n for usage by the virtual machine. Valid LUN values are 0 through 15.\n disk_label:\n Specifies the description of the data disk. When you attach a disk,\n either by directly referencing a media using the MediaLink element\n or specifying the target disk size, you can use the DiskLabel\n element to customize the name property of the target data disk.\n disk_name:\n Specifies the name of the disk. Windows Azure uses the specified\n disk to create the data disk for the machine and populates this\n field with the disk name.\n logical_disk_size_in_gb:\n Specifies the size, in GB, of an empty disk to be attached to the\n role. The disk can be created as part of disk attach or create VM\n role call by specifying the value for this property. Windows Azure\n creates the empty disk based on size preference and attaches the\n newly created disk to the Role.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('lun', arg_4)\n return arg_0._perform_put(\n arg_0._get_data_disk_path(\n arg_1, arg_2, arg_3, arg_4),\n _XmlSerializer.data_virtual_hard_disk_to_xml(\n arg_5,\n arg_8,\n arg_9,\n arg_7,\n arg_10,\n arg_6,\n None),\n as_async=True)"} +{"_id": "doc_1319", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=False):\n '''\n Removes the specified data disk from a virtual machine.\n\n service_name:\n The name of the service.\n deployment_name:\n The name of the deployment.\n role_name:\n The name of the role.\n lun:\n The Logical Unit Number (LUN) for the disk.\n delete_vhd:\n Deletes the underlying vhd blob in Azure storage.\n '''\n _validate_not_none('service_name', arg_1)\n _validate_not_none('deployment_name', arg_2)\n _validate_not_none('role_name', arg_3)\n _validate_not_none('lun', arg_4)\n arg_6 = arg_0._get_data_disk_path(arg_1, arg_2, arg_3, arg_4)\n if arg_5:\n arg_6 += '?comp=media'\n return arg_0._perform_delete(arg_6, as_async=True)"} +{"_id": "doc_1320", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n '''\n Adds a disk to the user image repository. The disk can be an OS disk\n or a data disk.\n\n has_operating_system:\n Deprecated.\n label:\n Specifies the description of the disk.\n media_link:\n Specifies the location of the blob in Windows Azure blob store\n where the media for the disk is located. The blob location must\n belong to the storage account in the current subscription specified\n by the value in the operation call. Example:\n http://example.blob.core.windows.net/disks/mydisk.vhd\n name:\n Specifies a name for the disk. Windows Azure uses the name to\n identify the disk when creating virtual machines from the disk.\n os:\n The OS type of the disk. Possible values are: Linux, Windows\n '''\n _validate_not_none('label', arg_2)\n _validate_not_none('media_link', arg_3)\n _validate_not_none('name', arg_4)\n _validate_not_none('os', arg_5)\n return arg_0._perform_post(arg_0._get_disk_path(),\n _XmlSerializer.disk_to_xml(\n arg_2,\n arg_3,\n arg_4,\n arg_5))"} +{"_id": "doc_1321", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None):\n '''\n Updates an existing disk in your image repository.\n\n disk_name:\n The name of the disk to update.\n has_operating_system:\n Deprecated.\n label:\n Specifies the description of the disk.\n media_link:\n Deprecated.\n name:\n Deprecated.\n os:\n Deprecated.\n '''\n _validate_not_none('disk_name', arg_1)\n _validate_not_none('label', arg_3)\n return arg_0._perform_put(arg_0._get_disk_path(arg_1),\n _XmlSerializer.disk_to_xml(\n arg_3,\n None,\n None,\n None))"} +{"_id": "doc_1322", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Deletes the specified data or operating system disk from your image\n repository.\n\n disk_name:\n The name of the disk to delete.\n delete_vhd:\n Deletes the underlying vhd blob in Azure storage.\n '''\n _validate_not_none('disk_name', arg_1)\n arg_3 = arg_0._get_disk_path(arg_1)\n if arg_2:\n arg_3 += '?comp=media'\n return arg_0._perform_delete(arg_3)"} +{"_id": "doc_1323", "title": "", "text": "def Func(arg_0):\n \"\"\"This is a temporary patch pending a fix in uAMQP.\"\"\"\n # pylint: disable=protected-access\n arg_0._handler.message_handler = arg_0._handler.receiver_type(\n arg_0._handler._session,\n arg_0._handler._remote_address,\n arg_0._handler._name,\n on_message_received=arg_0._handler._message_received,\n name='receiver-link-{}'.format(uuid.uuid4()),\n debug=arg_0._handler._debug_trace,\n prefetch=arg_0._handler._prefetch,\n max_message_size=arg_0._handler._max_message_size,\n properties=arg_0._handler._link_properties,\n error_policy=arg_0._handler._error_policy,\n encoding=arg_0._handler._encoding)\n if arg_0.mode != ReceiveSettleMode.PeekLock:\n arg_0._handler.message_handler.send_settle_mode = constants.SenderSettleMode.Settled\n arg_0._handler.message_handler.receive_settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete\n arg_0._handler.message_handler._settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete\n arg_0._handler.message_handler.open()"} +{"_id": "doc_1324", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Receive a batch of messages at once.\n\n This approach it optimal if you wish to process multiple messages simultaneously. Note that the\n number of messages retrieved in a single batch will be dependent on\n whether `prefetch` was set for the receiver. This call will prioritize returning\n quickly over meeting a specified batch size, and so will return as soon as at least\n one message is received and there is a gap in incoming messages regardless\n of the specified batch size.\n\n :param max_batch_size: Maximum number of messages in the batch. Actual number\n returned will depend on prefetch size and incoming stream rate.\n :type max_batch_size: int\n :param timeout: The time to wait in seconds for the first message to arrive.\n If no messages arrive, and no timeout is specified, this call will not return\n until the connection is closed. If specified, an no messages arrive within the\n timeout period, an empty list will be returned.\n :rtype: list[~azure.servicebus.common.message.Message]\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func_messages]\n :end-before: [END Func_messages]\n :language: python\n :dedent: 4\n :caption: Get the messages in batch from the receiver\n\n \"\"\"\n arg_0._can_run()\n arg_3 = []\n arg_1 = arg_1 or arg_0._handler._prefetch # pylint: disable=protected-access\n try:\n arg_4 = 1000 * arg_2 if arg_2 else 0\n arg_5 = arg_0._handler.receive_message_batch(\n arg_1=arg_1,\n arg_2=arg_4)\n for arg_6 in arg_5:\n arg_7 = arg_0._build_message(arg_6)\n arg_3.append(arg_7)\n except Exception as e: # pylint: disable=broad-except\n arg_0._handle_exception(e)\n return arg_3"} +{"_id": "doc_1325", "title": "", "text": "def Func(arg_0):\n \"\"\"Renew the session lock.\n\n This operation must be performed periodically in order to retain a lock on the\n session to continue message processing.\n Once the lock is lost the connection will be closed. This operation can\n also be performed as a threaded background task by registering the session\n with an `azure.servicebus.AutoLockRenew` instance.\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func]\n :end-before: [END Func]\n :language: python\n :dedent: 4\n :caption: Renew the session lock before it expires\n\n \"\"\"\n arg_0._can_run()\n arg_1 = arg_0._mgmt_request_response(\n REQUEST_RESPONSE_RENEW_SESSION_LOCK_OPERATION,\n {'session-id': arg_0.session_id},\n mgmt_handlers.default)\n arg_0.locked_until = datetime.datetime.fromtimestamp(arg_1[b'expiration']/1000.0)"} +{"_id": "doc_1326", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=False, **arg_6):\n \"\"\"Converts SinglePlacementGroup property to false for a existing virtual\n machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param vm_scale_set_name: The name of the virtual machine scale set to\n create or update.\n :type vm_scale_set_name: str\n :param active_placement_group_id: Id of the placement group in which\n you want future virtual machine instances to be placed. To query\n placement group Id, please use Virtual Machine Scale Set VMs - Get\n API. If not provided, the platform will choose one with maximum number\n of virtual machine instances.\n :type active_placement_group_id: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: None or ClientRawResponse if raw=true\n :rtype: None or ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError`\n \"\"\"\n arg_7 = models.VMScaleSetConvertToSinglePlacementGroupInput(arg_3=arg_3)\n\n # Construct URL\n arg_8 = arg_0.Func.metadata['url']\n arg_9 = {\n 'resourceGroupName': arg_0._serialize.url(\"resource_group_name\", arg_1, 'str'),\n 'vmScaleSetName': arg_0._serialize.url(\"vm_scale_set_name\", arg_2, 'str'),\n 'subscriptionId': arg_0._serialize.url(\"self.config.subscription_id\", arg_0.config.subscription_id, 'str')\n }\n arg_8 = arg_0._client.format_url(arg_8, **arg_9)\n\n # Construct parameters\n arg_10 = {}\n\n # Construct headers\n arg_11 = {}\n arg_11['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_11['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_4:\n arg_11.update(arg_4)\n if arg_0.config.accept_language is not None:\n arg_11['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_12 = arg_0._serialize.body(arg_7, 'VMScaleSetConvertToSinglePlacementGroupInput')\n\n # Construct and send request\n arg_13 = arg_0._client.post(arg_8, arg_10, arg_11, arg_12)\n arg_14 = arg_0._client.send(arg_13, stream=False, **arg_6)\n\n if arg_14.status_code not in [200]:\n arg_15 = CloudError(arg_14)\n arg_15.request_id = arg_14.headers.get('x-ms-request-id')\n raise arg_15\n\n if arg_5:\n arg_17 = ClientRawResponse(None, arg_14)\n return arg_17"} +{"_id": "doc_1327", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None, arg_7=None, arg_8=False, **arg_9):\n \"\"\"Imports an externally created key, stores it, and returns key\n parameters and attributes to the client.\n\n The import key operation may be used to import any key type into an\n Azure Key Vault. If the named key already exists, Azure Key Vault\n creates a new version of the key. This operation requires the\n keys/import permission.\n\n :param vault_base_url: The vault name, for example\n https://myvault.vault.azure.net.\n :type vault_base_url: str\n :param key_name: Name for the imported key.\n :type key_name: str\n :param key: The Json web key\n :type key: ~azure.keyvault.v2016_10_01.models.JsonWebKey\n :param hsm: Whether to import as a hardware key (HSM) or software key.\n :type hsm: bool\n :param key_attributes: The key management attributes.\n :type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes\n :param tags: Application specific metadata in the form of key-value\n pairs.\n :type tags: dict[str, str]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: KeyBundle or ClientRawResponse if raw=true\n :rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`KeyVaultErrorException`\n \"\"\"\n arg_10 = models.KeyImportParameters(arg_4=arg_4, arg_3=arg_3, arg_5=arg_5, arg_6=arg_6)\n\n # Construct URL\n arg_11 = arg_0.Func.metadata['url']\n arg_12 = {\n 'vaultBaseUrl': arg_0._serialize.url(\"vault_base_url\", arg_1, 'str', skip_quote=True),\n 'key-name': arg_0._serialize.url(\"key_name\", arg_2, 'str', pattern=r'^[0-9a-zA-Z-]+$')\n }\n arg_11 = arg_0._client.format_url(arg_11, **arg_12)\n\n # Construct parameters\n arg_13 = {}\n arg_13['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n # Construct headers\n arg_14 = {}\n arg_14['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_14['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_7:\n arg_14.update(arg_7)\n if arg_0.config.accept_language is not None:\n arg_14['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_15 = arg_0._serialize.body(arg_10, 'KeyImportParameters')\n\n # Construct and send request\n arg_16 = arg_0._client.put(arg_11, arg_13)\n arg_17 = arg_0._client.send(\n arg_16, arg_14, arg_15, stream=False, **arg_9)\n\n if arg_17.status_code not in [200]:\n raise models.KeyVaultErrorException(arg_0._deserialize, arg_17)\n\n arg_18 = None\n\n if arg_17.status_code == 200:\n arg_18 = arg_0._deserialize('KeyBundle', arg_17)\n\n if arg_8:\n arg_19 = ClientRawResponse(arg_18, arg_17)\n return arg_19\n\n return arg_18"} +{"_id": "doc_1328", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None, arg_7=None, arg_8=False, **arg_9):\n \"\"\"The update key operation changes specified attributes of a stored key\n and can be applied to any key type and key version stored in Azure Key\n Vault.\n\n In order to perform this operation, the key must already exist in the\n Key Vault. Note: The cryptographic material of a key itself cannot be\n changed. This operation requires the keys/update permission.\n\n :param vault_base_url: The vault name, for example\n https://myvault.vault.azure.net.\n :type vault_base_url: str\n :param key_name: The name of key to update.\n :type key_name: str\n :param key_version: The version of the key to update.\n :type key_version: str\n :param key_ops: Json web key operations. For more information on\n possible key operations, see JsonWebKeyOperation.\n :type key_ops: list[str or\n ~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]\n :param key_attributes:\n :type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes\n :param tags: Application specific metadata in the form of key-value\n pairs.\n :type tags: dict[str, str]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: KeyBundle or ClientRawResponse if raw=true\n :rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`KeyVaultErrorException`\n \"\"\"\n arg_10 = models.KeyUpdateParameters(arg_4=arg_4, arg_5=arg_5, arg_6=arg_6)\n\n # Construct URL\n arg_11 = arg_0.Func.metadata['url']\n arg_12 = {\n 'vaultBaseUrl': arg_0._serialize.url(\"vault_base_url\", arg_1, 'str', skip_quote=True),\n 'key-name': arg_0._serialize.url(\"key_name\", arg_2, 'str'),\n 'key-version': arg_0._serialize.url(\"key_version\", arg_3, 'str')\n }\n arg_11 = arg_0._client.format_url(arg_11, **arg_12)\n\n # Construct parameters\n arg_13 = {}\n arg_13['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n # Construct headers\n arg_14 = {}\n arg_14['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_14['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_7:\n arg_14.update(arg_7)\n if arg_0.config.accept_language is not None:\n arg_14['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_15 = arg_0._serialize.body(arg_10, 'KeyUpdateParameters')\n\n # Construct and send request\n arg_16 = arg_0._client.patch(arg_11, arg_13)\n arg_17 = arg_0._client.send(\n arg_16, arg_14, arg_15, stream=False, **arg_9)\n\n if arg_17.status_code not in [200]:\n raise models.KeyVaultErrorException(arg_0._deserialize, arg_17)\n\n arg_18 = None\n\n if arg_17.status_code == 200:\n arg_18 = arg_0._deserialize('KeyBundle', arg_17)\n\n if arg_8:\n arg_19 = ClientRawResponse(arg_18, arg_17)\n return arg_19\n\n return arg_18"} +{"_id": "doc_1329", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None, arg_7=None, arg_8=False, **arg_9):\n \"\"\"Sets a secret in a specified key vault.\n\n The SET operation adds a secret to the Azure Key Vault. If the named\n secret already exists, Azure Key Vault creates a new version of that\n secret. This operation requires the secrets/set permission.\n\n :param vault_base_url: The vault name, for example\n https://myvault.vault.azure.net.\n :type vault_base_url: str\n :param secret_name: The name of the secret.\n :type secret_name: str\n :param value: The value of the secret.\n :type value: str\n :param tags: Application specific metadata in the form of key-value\n pairs.\n :type tags: dict[str, str]\n :param content_type: Type of the secret value such as a password.\n :type content_type: str\n :param secret_attributes: The secret management attributes.\n :type secret_attributes:\n ~azure.keyvault.v2016_10_01.models.SecretAttributes\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: SecretBundle or ClientRawResponse if raw=true\n :rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`KeyVaultErrorException`\n \"\"\"\n arg_10 = models.SecretSetParameters(arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, arg_6=arg_6)\n\n # Construct URL\n arg_11 = arg_0.Func.metadata['url']\n arg_12 = {\n 'vaultBaseUrl': arg_0._serialize.url(\"vault_base_url\", arg_1, 'str', skip_quote=True),\n 'secret-name': arg_0._serialize.url(\"secret_name\", arg_2, 'str', pattern=r'^[0-9a-zA-Z-]+$')\n }\n arg_11 = arg_0._client.format_url(arg_11, **arg_12)\n\n # Construct parameters\n arg_13 = {}\n arg_13['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n # Construct headers\n arg_14 = {}\n arg_14['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_14['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_7:\n arg_14.update(arg_7)\n if arg_0.config.accept_language is not None:\n arg_14['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_15 = arg_0._serialize.body(arg_10, 'SecretSetParameters')\n\n # Construct and send request\n arg_16 = arg_0._client.put(arg_11, arg_13)\n arg_17 = arg_0._client.send(\n arg_16, arg_14, arg_15, stream=False, **arg_9)\n\n if arg_17.status_code not in [200]:\n raise models.KeyVaultErrorException(arg_0._deserialize, arg_17)\n\n arg_18 = None\n\n if arg_17.status_code == 200:\n arg_18 = arg_0._deserialize('SecretBundle', arg_17)\n\n if arg_8:\n arg_19 = ClientRawResponse(arg_18, arg_17)\n return arg_19\n\n return arg_18"} +{"_id": "doc_1330", "title": "", "text": "def Func(arg_0, arg_1, *, arg_2=None, **arg_3):\n \"\"\"Create a Service Bus client from a connection string.\n\n :param conn_str: The connection string.\n :type conn_str: str\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START create_async_servicebus_client_connstr]\n :end-before: [END create_async_servicebus_client_connstr]\n :language: python\n :dedent: 4\n :caption: Create a ServiceBusClient via a connection string.\n\n \"\"\"\n arg_4, arg_5, arg_6, arg_7 = parse_conn_str(arg_1)\n arg_8 = urlparse(arg_4)\n arg_9, arg_7, arg_10 = arg_8.hostname.partition('.')\n return arg_0(\n service_namespace=arg_9,\n shared_access_key_name=arg_5,\n shared_access_key_value=arg_6,\n host_base='.' + arg_10,\n arg_2=arg_2,\n **arg_3)"} +{"_id": "doc_1331", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get an async client for a subscription entity.\n\n :param topic_name: The name of the topic.\n :type topic_name: str\n :param subscription_name: The name of the subscription.\n :type subscription_name: str\n :rtype: ~azure.servicebus.aio.async_client.SubscriptionClient\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the subscription is not found.\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START get_async_subscription_client]\n :end-before: [END get_async_subscription_client]\n :language: python\n :dedent: 4\n :caption: Get a TopicClient for the specified topic.\n\n \"\"\"\n try:\n arg_3 = arg_0.mgmt_client.Func(arg_1, arg_2)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except AzureServiceBusResourceNotFound:\n raise ServiceBusResourceNotFound(\"Specificed subscription does not exist.\")\n return SubscriptionClient.from_entity(\n arg_0._get_host(), arg_1, arg_3,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n loop=arg_0.loop,\n debug=arg_0.debug)"} +{"_id": "doc_1332", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get an async client for all subscription entities in the topic.\n\n :param topic_name: The topic to list subscriptions for.\n :type topic_name: str\n :rtype: list[~azure.servicebus.aio.async_client.SubscriptionClient]\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found.\n \"\"\"\n try:\n arg_2 = arg_0.mgmt_client.Func(arg_1)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except AzureServiceBusResourceNotFound:\n raise ServiceBusResourceNotFound(\"Specificed topic does not exist.\")\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(SubscriptionClient.from_entity(\n arg_0._get_host(), arg_1, arg_4,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n loop=arg_0.loop,\n debug=arg_0.debug))\n return arg_3"} +{"_id": "doc_1333", "title": "", "text": "async def Func(arg_0, arg_1, arg_2=0, arg_3=None, **arg_4):\n \"\"\"Send one or more messages to the current entity.\n\n This operation will open a single-use connection, Func the supplied messages, and close\n connection. If the entity requires sessions, a session ID must be either\n provided here, or set on each outgoing message.\n\n :param messages: One or more messages to be sent.\n :type messages: ~azure.servicebus.aio.async_message.Message or\n list[~azure.servicebus.aio.async_message.Message]\n :param message_timeout: The period in seconds during which the Message must be\n sent. If the Func is not completed in this time it will return a failure result.\n :type message_timeout: int\n :param session: An optional session ID. If supplied this session ID will be\n applied to every outgoing message sent with this Sender.\n If an individual message already has a session ID, that will be\n used instead. If no session ID is supplied here, nor set on an outgoing\n message, a ValueError will be raised if the entity is sessionful.\n :type session: str or ~uuid.Guid\n :raises: ~azure.servicebus.common.errors.MessageSendFailed\n :returns: A list of the Func results of all the messages. Each\n Func result is a tuple with two values. The first is a boolean, indicating `True`\n if the message sent, or `False` if it failed. The second is an error if the message\n failed, otherwise it will be `None`.\n :rtype: list[tuple[bool, ~azure.servicebus.common.errors.MessageSendFailed]]\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START queue_client_Func]\n :end-before: [END queue_client_Func]\n :language: python\n :dedent: 4\n :caption: Send a single message.\n\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START queue_client_Func_multiple]\n :end-before: [END queue_client_Func_multiple]\n :language: python\n :dedent: 4\n :caption: Send multiple messages.\n\n \"\"\"\n async with arg_0.get_Funcer(arg_2=arg_2, arg_3=arg_3, **arg_4) as Funcer:\n if isinstance(arg_1, Message):\n Funcer.queue_message(arg_1)\n else:\n try:\n arg_1 = list(arg_1)\n except TypeError:\n raise TypeError(\n \"Value of messages must be a 'Message' object or a synchronous iterable of 'Message' objects.\")\n\n for arg_5 in arg_1:\n if not isinstance(arg_5, Message):\n raise TypeError(\"Item in iterator is not of type 'Message'.\")\n Funcer.queue_message(arg_5)\n\n return await Funcer.Func_pending_messages()"} +{"_id": "doc_1334", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0, arg_3=arg_4.PeekLock, arg_6=0, **arg_7):\n \"\"\"Get a Receiver for the Service Bus endpoint.\n\n A Receiver represents a single open connection with which multiple receive operations can be made.\n\n :param session: A specific session from which to receive. This must be specified for a\n sessionful entity, otherwise it must be None. In order to receive the next available\n session, set this to NEXT_AVAILABLE.\n :type session: str or ~azure.servicebus.common.constants.NEXT_AVAILABLE\n :param prefetch: The maximum number of messages to cache with each request to the service.\n The default value is 0, meaning messages will be received from the service and processed\n one at a time. Increasing this value will improve message throughput performance but increase\n the chance that messages will expire while they are cached if they're not processed fast enough.\n :type prefetch: int\n :param mode: The mode with which messages will be retrieved from the entity. The two options\n are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given\n lock period before they will be removed from the queue. Messages received with ReceiveAndDelete\n will be immediately removed from the queue, and cannot be subsequently rejected or re-received if\n the client fails to process the message. The default mode is PeekLock.\n :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode\n :param idle_timeout: The timeout in seconds between received messages after which the receiver will\n automatically shutdown. The default value is 0, meaning no timeout.\n :type idle_timeout: int\n :returns: A Receiver instance with an unopened connection.\n :rtype: ~azure.servicebus.aio.async_receive_handler.Receiver\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START open_close_receiver_context]\n :end-before: [END open_close_receiver_context]\n :language: python\n :dedent: 4\n :caption: Receive messages with a Receiver.\n\n \"\"\"\n if arg_0.entity and not arg_0.requires_session and arg_1:\n raise ValueError(\"A session cannot be used with a non-sessionful entitiy.\")\n if arg_0.entity and arg_0.requires_session and not arg_1:\n raise ValueError(\"This entity requires a session.\")\n if int(arg_2) < 0 or int(arg_2) > 50000:\n raise ValueError(\"Prefetch must be an integer between 0 and 50000 inclusive.\")\n\n arg_2 += 1\n arg_8 = str(uuid.uuid4())\n if arg_1:\n return SessionReceiver(\n arg_8,\n arg_0.entity_uri,\n arg_0.auth_config,\n arg_1=arg_1,\n loop=arg_0.loop,\n debug=arg_0.debug,\n timeout=int(arg_6 * 1000),\n arg_2=arg_2,\n arg_3=arg_3,\n **arg_7)\n return Receiver(\n arg_8,\n arg_0.entity_uri,\n arg_0.auth_config,\n loop=arg_0.loop,\n debug=arg_0.debug,\n timeout=int(arg_6 * 1000),\n arg_2=arg_2,\n arg_3=arg_3,\n **arg_7)"} +{"_id": "doc_1335", "title": "", "text": "def Func(arg_0):\n ''' Extracts request id from response header. '''\n\n if arg_0 is None:\n return None\n\n arg_1 = AsynchronousOperationResult()\n if arg_0.headers:\n for arg_2, arg_3 in arg_0.headers:\n if arg_2.lower() == 'x-ms-request-id':\n arg_1.request_id = arg_3\n\n return arg_1"} +{"_id": "doc_1336", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Returns the status of the specified operation. After calling an\n asynchronous operation, you can call Get Operation Status to determine\n whether the operation has succeeded, failed, or is still in progress.\n\n request_id:\n The request ID for the request you wish to track.\n '''\n _validate_not_none('request_id', arg_1)\n return arg_0._perform_get(\n '/' + arg_0.subscription_id + '/operations/' + _str(arg_1),\n Operation)"} +{"_id": "doc_1337", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Add additional headers for management. '''\n\n if arg_1.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n arg_1.headers.append(('Content-Length', str(len(arg_1.body))))\n\n # append additional headers base on the service\n arg_1.headers.append(('x-ms-version', arg_2 or arg_0.x_ms_version))\n\n # if it is not GET or HEAD request, must set content-type.\n if not arg_1.method in ['GET', 'HEAD']:\n for arg_3, arg_4 in arg_1.headers:\n if 'content-type' == arg_3.lower():\n break\n else:\n arg_1.headers.append(\n ('Content-Type',\n arg_0.content_type))\n\n return arg_1.headers"} +{"_id": "doc_1338", "title": "", "text": "def Func():\n \"\"\"Assumed called on Travis, to prepare a package to be deployed\n\n This method prints on stdout for Travis.\n Return is obj to pass to sys.exit() directly\n \"\"\"\n\n arg_0 = os.environ.get('TRAVIS_TAG')\n if not arg_0:\n print(\"TRAVIS_TAG environment variable is not present\")\n return \"TRAVIS_TAG environment variable is not present\"\n\n try:\n arg_1, arg_2 = arg_0.split(\"_\")\n except ValueError:\n print(\"TRAVIS_TAG is not '_' (tag is: {})\".format(arg_0))\n return \"TRAVIS_TAG is not '_' (tag is: {})\".format(arg_0)\n\n try:\n arg_2 = Version(arg_2)\n except InvalidVersion:\n print(\"Version must be a valid PEP440 version (version is: {})\".format(arg_2))\n return \"Version must be a valid PEP440 version (version is: {})\".format(arg_2)\n\n if arg_1.lower() in OMITTED_RELEASE_PACKAGES:\n print(\"The input package {} has been disabled for release from Travis.CI.\".format(arg_1))\n return\n\n arg_3 = Path(os.environ['TRAVIS_BUILD_DIR'], 'dist')\n create_package(arg_1, str(arg_3))\n\n print(\"Produced:\\n{}\".format(list(arg_3.glob('*'))))\n\n arg_4 = \"*{}*\".format(arg_2)\n arg_5 = list(arg_3.glob(arg_4))\n if not arg_5:\n return \"Package version does not match tag {}, abort\".format(arg_2)\n arg_6 = os.environ.get(\"PYPI_SERVER\", \"default PyPI server\")\n print(\"Package created as expected and will be pushed to {}\".format(arg_6))"} +{"_id": "doc_1339", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=False, **arg_6):\n \"\"\"List certificates in a specified key vault.\n\n The GetCertificates operation returns the set of certificates resources\n in the specified key vault. This operation requires the\n certificates/list permission.\n\n :param vault_base_url: The vault name, for example\n https://myvault.vault.azure.net.\n :type vault_base_url: str\n :param maxresults: Maximum number of results to return in a page. If\n not specified the service will return up to 25 results.\n :type maxresults: int\n :param include_pending: Specifies whether to include certificates\n which are not completely provisioned.\n :type include_pending: bool\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: An iterator like instance of CertificateItem\n :rtype:\n ~azure.keyvault.v7_0.models.CertificateItemPaged[~azure.keyvault.v7_0.models.CertificateItem]\n :raises:\n :class:`KeyVaultErrorException`\n \"\"\"\n def internal_paging(arg_7=None, arg_5=False):\n\n if not arg_7:\n # Construct URL\n arg_8 = arg_0.Func.metadata['url']\n arg_9 = {\n 'vaultBaseUrl': arg_0._serialize.url(\"vault_base_url\", arg_1, 'str', skip_quote=True)\n }\n arg_8 = arg_0._client.format_url(arg_8, **arg_9)\n\n # Construct parameters\n arg_10 = {}\n if arg_2 is not None:\n arg_10['maxresults'] = arg_0._serialize.query(\"maxresults\", arg_2, 'int', maximum=25, minimum=1)\n if arg_3 is not None:\n arg_10['includePending'] = arg_0._serialize.query(\"include_pending\", arg_3, 'bool')\n arg_10['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n else:\n arg_8 = arg_7\n arg_10 = {}\n\n # Construct headers\n arg_11 = {}\n arg_11['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_11['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_4:\n arg_11.update(arg_4)\n if arg_0.config.accept_language is not None:\n arg_11['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct and send request\n arg_12 = arg_0._client.get(arg_8, arg_10)\n arg_13 = arg_0._client.send(\n arg_12, arg_11, stream=False, **arg_6)\n\n if arg_13.status_code not in [200]:\n raise models.KeyVaultErrorException(arg_0._deserialize, arg_13)\n\n return arg_13\n\n # Deserialize response\n arg_14 = models.CertificateItemPaged(internal_paging, arg_0._deserialize.dependencies)\n\n if arg_5:\n arg_15 = {}\n arg_16 = models.CertificateItemPaged(internal_paging, arg_0._deserialize.dependencies, arg_15)\n return arg_16\n\n return arg_14"} +{"_id": "doc_1340", "title": "", "text": "def Func(arg_0):\n '''\n Get list of available service bus regions.\n '''\n arg_1 = arg_0._perform_get(\n arg_0._get_path('services/serviceBus/Regions/', None),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_1,\n _ServiceBusManagementXmlSerializer.xml_to_region)"} +{"_id": "doc_1341", "title": "", "text": "def Func(arg_0):\n '''\n List the service bus namespaces defined on the account.\n '''\n arg_1 = arg_0._perform_get(\n arg_0._get_path('services/serviceBus/Namespaces/', None),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_1,\n _ServiceBusManagementXmlSerializer.xml_to_namespace)"} +{"_id": "doc_1342", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Create a new service bus namespace.\n\n name:\n Name of the service bus namespace to create.\n region:\n Region to create the namespace in.\n '''\n _validate_not_none('name', arg_1)\n\n return arg_0._perform_put(\n arg_0._get_path('services/serviceBus/Namespaces', arg_1),\n _ServiceBusManagementXmlSerializer.namespace_to_xml(arg_2))"} +{"_id": "doc_1343", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Checks to see if the specified service bus namespace is available, or\n if it has already been taken.\n\n name:\n Name of the service bus namespace to validate.\n '''\n _validate_not_none('name', arg_1)\n\n arg_2 = arg_0._perform_get(\n arg_0._get_path('services/serviceBus/CheckNamespaceAvailability',\n None) + '/?namespace=' + _str(arg_1), None)\n\n return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(\n arg_2.body)"} +{"_id": "doc_1344", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves the topics in the service namespace.\n\n name:\n Name of the service bus namespace.\n '''\n arg_2 = arg_0._perform_get(\n arg_0._get_Func_path(arg_1),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_2,\n partial(\n _MinidomXmlToObject.convert_xml_to_azure_object,\n azure_type=TopicDescription\n )\n )"} +{"_id": "doc_1345", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves the notification hubs in the service namespace.\n\n name:\n Name of the service bus namespace.\n '''\n arg_2 = arg_0._perform_get(\n arg_0._get_Func_path(arg_1),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_2,\n partial(\n _MinidomXmlToObject.convert_xml_to_azure_object,\n azure_type=NotificationHubDescription\n )\n )"} +{"_id": "doc_1346", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves the relays in the service namespace.\n\n name:\n Name of the service bus namespace.\n '''\n arg_2 = arg_0._perform_get(\n arg_0._get_Func_path(arg_1),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_2,\n partial(\n _MinidomXmlToObject.convert_xml_to_azure_object,\n azure_type=RelayDescription\n )\n )"} +{"_id": "doc_1347", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n This operation gets rollup data for Service Bus metrics queue.\n Rollup data includes the time granularity for the telemetry aggregation as well as\n the retention settings for each time granularity.\n\n name:\n Name of the service bus namespace.\n queue_name:\n Name of the service bus queue in this namespace.\n metric:\n name of a supported metric\n '''\n arg_4 = arg_0._perform_get(\n arg_0._get_get_metrics_rollup_queue_path(arg_1, arg_2, arg_3),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_4,\n partial(\n _ServiceBusManagementXmlSerializer.xml_to_metrics,\n object_type=MetricRollups\n )\n )"} +{"_id": "doc_1348", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n This operation gets rollup data for Service Bus metrics topic.\n Rollup data includes the time granularity for the telemetry aggregation as well as\n the retention settings for each time granularity.\n\n name:\n Name of the service bus namespace.\n topic_name:\n Name of the service bus queue in this namespace.\n metric:\n name of a supported metric\n '''\n arg_4 = arg_0._perform_get(\n arg_0._get_get_metrics_rollup_topic_path(arg_1, arg_2, arg_3),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_4,\n partial(\n _ServiceBusManagementXmlSerializer.xml_to_metrics,\n object_type=MetricRollups\n )\n )"} +{"_id": "doc_1349", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n This operation gets rollup data for Service Bus metrics relay.\n Rollup data includes the time granularity for the telemetry aggregation as well as\n the retention settings for each time granularity.\n\n name:\n Name of the service bus namespace.\n relay_name:\n Name of the service bus relay in this namespace.\n metric:\n name of a supported metric\n '''\n arg_4 = arg_0._perform_get(\n arg_0._get_get_metrics_rollup_relay_path(arg_1, arg_2, arg_3),\n None)\n\n return _MinidomXmlToObject.convert_response_to_feeds(\n arg_4,\n partial(\n _ServiceBusManagementXmlSerializer.xml_to_metrics,\n object_type=MetricRollups\n )\n )"} +{"_id": "doc_1350", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False,\n arg_3=False, arg_4=False, arg_5=None):\n \"\"\"Create a virtual environment in a directory.\"\"\"\n arg_6 = ExtendedEnvBuilder(arg_1=arg_1,\n arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5)\n arg_6.Func(arg_0)\n return arg_6.context"} +{"_id": "doc_1351", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Create a new Azure SQL Database server.\n\n admin_login:\n The administrator login name for the new server.\n admin_password:\n The administrator login password for the new server.\n location:\n The region to deploy the new server.\n '''\n _validate_not_none('admin_login', arg_1)\n _validate_not_none('admin_password', arg_2)\n _validate_not_none('location', arg_3)\n arg_4 = arg_0.perform_post(\n arg_0._get_servers_path(),\n _SqlManagementXmlSerializer.Func_to_xml(\n arg_1,\n arg_2,\n arg_3\n )\n )\n\n return _SqlManagementXmlSerializer.xml_to_Func_response(\n arg_4.body)"} +{"_id": "doc_1352", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Reset the administrator password for a server.\n\n server_name:\n Name of the server to change the password.\n admin_password:\n The new administrator password for the server.\n '''\n _validate_not_none('server_name', arg_1)\n _validate_not_none('admin_password', arg_2)\n return arg_0._perform_post(\n arg_0._get_servers_path(arg_1) + '?op=ResetPassword',\n _SqlManagementXmlSerializer.Func_to_xml(\n arg_2\n )\n )"} +{"_id": "doc_1353", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Gets quotas for an Azure SQL Database Server.\n\n server_name:\n Name of the server.\n '''\n _validate_not_none('server_name', arg_1)\n arg_2 = arg_0._perform_get(arg_0._get_quotas_path(arg_1),\n None)\n return _MinidomXmlToObject.parse_service_resources_response(\n arg_2, ServerQuota)"} +{"_id": "doc_1354", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4=''):\n '''\n Gets the event logs for an Azure SQL Database Server.\n\n server_name:\n Name of the server to retrieve the event logs from.\n start_date:\n The starting date and time of the events to retrieve in UTC format,\n for example '2011-09-28 16:05:00'.\n interval_size_in_minutes:\n Size of the event logs to retrieve (in minutes).\n Valid values are: 5, 60, or 1440.\n event_types:\n The event type of the log entries you want to retrieve.\n Valid values are: \n - connection_successful\n - connection_failed\n - connection_terminated\n - deadlock\n - throttling\n - throttling_long_transaction\n To return all event types pass in an empty string.\n '''\n _validate_not_none('server_name', arg_1)\n _validate_not_none('start_date', arg_2)\n _validate_not_none('interval_size_in_minutes', arg_3)\n _validate_not_none('event_types', arg_4)\n arg_5 = arg_0._Func_path(arg_1) + \\\n '?startDate={0}&intervalSizeInMinutes={1}&eventTypes={2}'.format(\n arg_2, arg_3, arg_4)\n arg_6 = arg_0._perform_get(arg_5, None)\n return _MinidomXmlToObject.parse_service_resources_response(\n arg_6, EventLog)"} +{"_id": "doc_1355", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=None, arg_5=None,\n arg_6=None):\n '''\n Updates existing database details.\n\n server_name:\n Name of the server to contain the new database.\n name:\n Required. The name for the new database. See Naming Requirements\n in Azure SQL Database General Guidelines and Limitations and\n Database Identifiers for more information.\n new_database_name:\n Optional. The new name for the new database.\n service_objective_id:\n Optional. The new service level to apply to the database. For more\n information about service levels, see Azure SQL Database Service\n Tiers and Performance Levels. Use List Service Level Objectives to\n get the correct ID for the desired service objective.\n edition:\n Optional. The new edition for the new database.\n max_size_bytes:\n Optional. The new size of the database in bytes. For information on\n available sizes for each edition, see Azure SQL Database Service\n Tiers (Editions).\n '''\n _validate_not_none('server_name', arg_1)\n _validate_not_none('name', arg_2)\n return arg_0._perform_put(\n arg_0._get_databases_path(arg_1, arg_2),\n _SqlManagementXmlSerializer.Func_to_xml(\n arg_3, arg_4, arg_5,\n arg_6\n )\n )"} +{"_id": "doc_1356", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Deletes an Azure SQL Database.\n\n server_name:\n Name of the server where the database is located.\n name:\n Name of the database to delete.\n '''\n return arg_0._perform_delete(arg_0._get_databases_path(arg_1, arg_2))"} +{"_id": "doc_1357", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n List the SQL databases defined on the specified server name\n '''\n arg_2 = arg_0._perform_get(arg_0._get_Func_path(arg_1),\n None)\n return _MinidomXmlToObject.parse_service_resources_response(\n arg_2, Database)"} +{"_id": "doc_1358", "title": "", "text": "async def Func(arg_0, arg_1=None):\n \"\"\"Close down the handler connection.\n\n If the handler has already Funcd,\n this operation will do nothing. An optional exception can be passed in to\n indicate that the handler was shutdown due to error.\n It is recommended to open a handler within a context manager as\n opposed to calling the method directly.\n\n .. note:: This operation is not thread-safe.\n\n :param exception: An optional exception if the handler is closing\n due to an error.\n :type exception: Exception\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START open_Func_sender_directly]\n :end-before: [END open_Func_sender_directly]\n :language: python\n :dedent: 4\n :caption: Explicitly open and Func a Sender.\n\n \"\"\"\n arg_0.running = False\n if arg_0.error:\n return\n if isinstance(arg_1, ServiceBusError):\n arg_0.error = arg_1\n elif arg_1:\n arg_0.error = ServiceBusError(str(arg_1))\n else:\n arg_0.error = ServiceBusError(\"This message handler is now Funcd.\")\n await arg_0._handler.Func_async()"} +{"_id": "doc_1359", "title": "", "text": "async def Func(arg_0, arg_1=None):\n \"\"\"Close down the receiver connection.\n\n If the receiver has already Funcd, this operation will do nothing. An optional\n exception can be passed in to indicate that the handler was shutdown due to error.\n It is recommended to open a handler within a context manager as\n opposed to calling the method directly.\n The receiver will be implicitly Funcd on completion of the message iterator,\n however this method will need to be called explicitly if the message iterator is not run\n to completion.\n\n .. note:: This operation is not thread-safe.\n\n :param exception: An optional exception if the handler is closing\n due to an error.\n :type exception: Exception\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START open_Func_receiver_directly]\n :end-before: [END open_Func_receiver_directly]\n :language: python\n :dedent: 4\n :caption: Iterate then explicitly Func a Receiver.\n\n \"\"\"\n if not arg_0.running:\n return\n arg_0.running = False\n arg_0.receiver_shutdown = True\n arg_0._used.set()\n await super(Receiver, arg_0).Func(arg_1=arg_1)"} +{"_id": "doc_1360", "title": "", "text": "async def Func(arg_0):\n \"\"\"Get the session state.\n\n Returns None if no state has been set.\n\n :rtype: str\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START set_session_state]\n :end-before: [END set_session_state]\n :language: python\n :dedent: 4\n :caption: Getting and setting the state of a session.\n\n \"\"\"\n await arg_0._can_run()\n arg_1 = await arg_0._mgmt_request_response(\n REQUEST_RESPONSE_GET_SESSION_STATE_OPERATION,\n {'session-id': arg_0.session_id},\n mgmt_handlers.default)\n arg_2 = arg_1.get(b'session-state')\n if isinstance(arg_2, six.binary_type):\n arg_2 = arg_2.decode('UTF-8')\n return arg_2"} +{"_id": "doc_1361", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Verifies that the challenge is a Bearer challenge and returns the key=value pairs. \"\"\"\n arg_2 = 'Bearer '\n if not arg_1:\n raise ValueError('Challenge cannot be empty')\n\n arg_1 = arg_1.strip()\n if not arg_1.startswith(arg_2):\n raise ValueError('Challenge is not Bearer')\n\n return arg_1[len(arg_2):]"} +{"_id": "doc_1362", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=False, arg_7=True, **arg_8):\n \"\"\"Purges data in an Log Analytics workspace by a set of user-defined\n filters.\n\n :param resource_group_name: The name of the resource group to get. The\n name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: Log Analytics workspace name\n :type workspace_name: str\n :param table: Table from which to Func data.\n :type table: str\n :param filters: The set of columns and filters (queries) to run over\n them to Func the resulting data.\n :type filters:\n list[~azure.mgmt.loganalytics.models.WorkspacePurgeBodyFilters]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns object or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[object] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[object]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_9 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_8\n )\n\n def get_long_running_output(arg_10):\n arg_11 = arg_0._deserialize('object', arg_10)\n\n if arg_6:\n arg_12 = ClientRawResponse(arg_11, arg_10)\n return arg_12\n\n return arg_11\n\n arg_13 = arg_8.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_7 is True: arg_14 = ARMPolling(arg_13, **arg_8)\n elif arg_7 is False: arg_14 = NoPolling()\n else: arg_14 = arg_7\n return LROPoller(arg_0._client, arg_9, get_long_running_output, arg_14)"} +{"_id": "doc_1363", "title": "", "text": "def Func(arg_0):\n \"\"\"Handle connection and service errors.\n\n Called internally when an event has failed to send so we\n can parse the error to determine whether we should attempt\n to retry sending the event again.\n Returns the action to take according to error type.\n\n :param error: The error received in the send attempt.\n :type error: Exception\n :rtype: ~uamqp.errors.ErrorAction\n \"\"\"\n if arg_0.condition == b'com.microsoft:server-busy':\n return errors.ErrorAction(retry=True, backoff=4)\n if arg_0.condition == b'com.microsoft:timeout':\n return errors.ErrorAction(retry=True, backoff=2)\n if arg_0.condition == b'com.microsoft:operation-cancelled':\n return errors.ErrorAction(retry=True)\n if arg_0.condition == b\"com.microsoft:container-close\":\n return errors.ErrorAction(retry=True, backoff=4)\n if arg_0.condition in _NO_RETRY_ERRORS:\n return errors.ErrorAction(retry=False)\n return errors.ErrorAction(retry=True)"} +{"_id": "doc_1364", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''\n Deletes an existing queue. This operation will also remove all\n associated state including messages in the queue.\n\n queue_name:\n Name of the queue to delete.\n fail_not_exist:\n Specify whether to throw an exception if the queue doesn't exist.\n '''\n _validate_not_none('queue_name', arg_1)\n arg_3 = HTTPRequest()\n arg_3.method = 'DELETE'\n arg_3.host = arg_0._get_host()\n arg_3.path = '/' + _str(arg_1) + ''\n arg_3.path, arg_3.query = arg_0._httpclient._update_request_uri_query(arg_3) # pylint: disable=protected-access\n arg_3.headers = arg_0._update_service_bus_header(arg_3)\n if not arg_2:\n try:\n arg_0._perform_request(arg_3)\n return True\n except AzureHttpError as ex:\n _dont_fail_not_exist(ex)\n return False\n else:\n arg_0._perform_request(arg_3)\n return True"} +{"_id": "doc_1365", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves an existing queue.\n\n queue_name:\n Name of the queue.\n '''\n _validate_not_none('queue_name', arg_1)\n arg_2 = HTTPRequest()\n arg_2.method = 'GET'\n arg_2.host = arg_0._get_host()\n arg_2.path = '/' + _str(arg_1) + ''\n arg_2.path, arg_2.query = arg_0._httpclient._update_request_uri_query(arg_2) # pylint: disable=protected-access\n arg_2.headers = arg_0._update_service_bus_header(arg_2)\n arg_8 = arg_0._perform_request(arg_2)\n\n return _convert_response_to_queue(arg_8)"} +{"_id": "doc_1366", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n '''\n Creates a new topic. Once created, this topic resource manifest is\n immutable.\n\n topic_name:\n Name of the topic to create.\n topic:\n Topic object to create.\n fail_on_exist:\n Specify whether to throw an exception when the topic exists.\n '''\n _validate_not_none('topic_name', arg_1)\n arg_4 = HTTPRequest()\n arg_4.method = 'PUT'\n arg_4.host = arg_0._get_host()\n arg_4.path = '/' + _str(arg_1) + ''\n arg_4.body = _get_request_body(_convert_topic_to_xml(arg_2))\n arg_4.path, arg_4.query = arg_0._httpclient._update_request_uri_query(arg_4) # pylint: disable=protected-access\n arg_4.headers = arg_0._update_service_bus_header(arg_4)\n if not arg_3:\n try:\n arg_0._perform_request(arg_4)\n return True\n except AzureHttpError as ex:\n _dont_fail_on_exist(ex)\n return False\n else:\n arg_0._perform_request(arg_4)\n return True"} +{"_id": "doc_1367", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=False):\n '''\n Creates a new rule. Once created, this rule's resource manifest is\n immutable.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n rule_name:\n Name of the rule.\n fail_on_exist:\n Specify whether to throw an exception when the rule exists.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('subscription_name', arg_2)\n _validate_not_none('rule_name', arg_3)\n arg_6 = HTTPRequest()\n arg_6.method = 'PUT'\n arg_6.host = arg_0._get_host()\n arg_6.path = '/' + _str(arg_1) + '/subscriptions/' + \\\n _str(arg_2) + \\\n '/rules/' + _str(arg_3) + ''\n arg_6.body = _get_request_body(_convert_rule_to_xml(arg_4))\n arg_6.path, arg_6.query = arg_0._httpclient._update_request_uri_query(arg_6) # pylint: disable=protected-access\n arg_6.headers = arg_0._update_service_bus_header(arg_6)\n if not arg_5:\n try:\n arg_0._perform_request(arg_6)\n return True\n except AzureHttpError as ex:\n _dont_fail_on_exist(ex)\n return False\n else:\n arg_0._perform_request(arg_6)\n return True"} +{"_id": "doc_1368", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Retrieves the description for the specified rule.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n rule_name:\n Name of the rule.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('subscription_name', arg_2)\n _validate_not_none('rule_name', arg_3)\n arg_4 = HTTPRequest()\n arg_4.method = 'GET'\n arg_4.host = arg_0._get_host()\n arg_4.path = '/' + _str(arg_1) + '/subscriptions/' + \\\n _str(arg_2) + \\\n '/rules/' + _str(arg_3) + ''\n arg_4.path, arg_4.query = arg_0._httpclient._update_request_uri_query(arg_4) # pylint: disable=protected-access\n arg_4.headers = arg_0._update_service_bus_header(arg_4)\n arg_10 = arg_0._perform_request(arg_4)\n\n return _convert_response_to_rule(arg_10)"} +{"_id": "doc_1369", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Retrieves the rules that exist under the specified subscription.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('subscription_name', arg_2)\n arg_3 = HTTPRequest()\n arg_3.method = 'GET'\n arg_3.host = arg_0._get_host()\n arg_3.path = '/' + \\\n _str(arg_1) + '/subscriptions/' + \\\n _str(arg_2) + '/rules/'\n arg_3.path, arg_3.query = arg_0._httpclient._update_request_uri_query(arg_3) # pylint: disable=protected-access\n arg_3.headers = arg_0._update_service_bus_header(arg_3)\n arg_9 = arg_0._perform_request(arg_3)\n\n return _ETreeXmlToObject.convert_response_to_feeds(\n arg_9, _convert_etree_element_to_rule)"} +{"_id": "doc_1370", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None, arg_4=False):\n '''\n Creates a new subscription. Once created, this subscription resource\n manifest is immutable.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n fail_on_exist:\n Specify whether throw exception when subscription exists.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('subscription_name', arg_2)\n arg_5 = HTTPRequest()\n arg_5.method = 'PUT'\n arg_5.host = arg_0._get_host()\n arg_5.path = '/' + \\\n _str(arg_1) + '/subscriptions/' + _str(arg_2) + ''\n arg_5.body = _get_request_body(\n _convert_subscription_to_xml(arg_3))\n arg_5.path, arg_5.query = arg_0._httpclient._update_request_uri_query(arg_5) # pylint: disable=protected-access\n arg_5.headers = arg_0._update_service_bus_header(arg_5)\n if not arg_4:\n try:\n arg_0._perform_request(arg_5)\n return True\n except AzureHttpError as ex:\n _dont_fail_on_exist(ex)\n return False\n else:\n arg_0._perform_request(arg_5)\n return True"} +{"_id": "doc_1371", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Gets an existing subscription.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('subscription_name', arg_2)\n arg_3 = HTTPRequest()\n arg_3.method = 'GET'\n arg_3.host = arg_0._get_host()\n arg_3.path = '/' + \\\n _str(arg_1) + '/subscriptions/' + _str(arg_2) + ''\n arg_3.path, arg_3.query = arg_0._httpclient._update_request_uri_query(arg_3) # pylint: disable=protected-access\n arg_3.headers = arg_0._update_service_bus_header(arg_3)\n arg_9 = arg_0._perform_request(arg_3)\n\n return _convert_response_to_subscription(arg_9)"} +{"_id": "doc_1372", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''\n Enqueues a message into the specified topic. The limit to the number\n of messages which may be present in the topic is governed by the\n message size in MaxTopicSizeInBytes. If this message causes the topic\n to exceed its quota, a quota exceeded error is returned and the\n message will be rejected.\n\n topic_name:\n Name of the topic.\n message:\n Message object containing message body and properties.\n '''\n _validate_not_none('topic_name', arg_1)\n _validate_not_none('message', arg_2)\n arg_3 = HTTPRequest()\n arg_3.method = 'POST'\n arg_3.host = arg_0._get_host()\n arg_3.path = '/' + _str(arg_1) + '/messages'\n arg_3.headers = arg_2.add_headers(arg_3)\n arg_3.body = _get_request_body(arg_2.body)\n arg_3.path, arg_3.query = arg_0._httpclient._update_request_uri_query(arg_3) # pylint: disable=protected-access\n arg_3.headers = arg_0._update_service_bus_header(arg_3)\n arg_0._perform_request(arg_3)"} +{"_id": "doc_1373", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''\n Unlocks a message for processing by other receivers on a given\n queue. This operation deletes the lock object, causing the\n message to be unlocked. A message must have first been locked by a\n receiver before this operation is called.\n\n queue_name:\n Name of the queue.\n sequence_number:\n The sequence number of the message to be unlocked as returned in\n BrokerProperties['SequenceNumber'] by the Peek Message operation.\n lock_token:\n The ID of the lock as returned by the Peek Message operation in\n BrokerProperties['LockToken']\n '''\n _validate_not_none('queue_name', arg_1)\n _validate_not_none('sequence_number', arg_2)\n _validate_not_none('lock_token', arg_3)\n arg_4 = HTTPRequest()\n arg_4.method = 'PUT'\n arg_4.host = arg_0._get_host()\n arg_4.path = '/' + _str(arg_1) + \\\n '/messages/' + _str(arg_2) + \\\n '/' + _str(arg_3) + ''\n arg_4.path, arg_4.query = arg_0._httpclient._update_request_uri_query(arg_4) # pylint: disable=protected-access\n arg_4.headers = arg_0._update_service_bus_header(arg_4)\n arg_0._perform_request(arg_4)"} +{"_id": "doc_1374", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=60):\n '''\n Receive a message from a queue for processing.\n\n queue_name:\n Name of the queue.\n peek_lock:\n Optional. True to retrieve and lock the message. False to read and\n delete the message. Default is True (lock).\n timeout:\n Optional. The timeout parameter is expressed in seconds.\n '''\n if arg_2:\n return arg_0.peek_lock_queue_message(arg_1, arg_3)\n return arg_0.read_delete_queue_message(arg_1, arg_3)"} +{"_id": "doc_1375", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=True, arg_4=60):\n '''\n Receive a message from a subscription for processing.\n\n topic_name:\n Name of the topic.\n subscription_name:\n Name of the subscription.\n peek_lock:\n Optional. True to retrieve and lock the message. False to read and\n delete the message. Default is True (lock).\n timeout:\n Optional. The timeout parameter is expressed in seconds.\n '''\n if arg_3:\n return arg_0.peek_lock_subscription_message(arg_1,\n arg_2,\n arg_4)\n return arg_0.read_delete_subscription_message(arg_1,\n arg_2,\n arg_4)"} +{"_id": "doc_1376", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n '''\n Creates a new Event Hub.\n\n hub_name:\n Name of event hub.\n hub:\n Optional. Event hub properties. Instance of EventHub class.\n hub.message_retention_in_days:\n Number of days to retain the events for this Event Hub.\n hub.status:\n Status of the Event Hub (enabled or disabled).\n hub.user_metadata:\n User metadata.\n hub.partition_count:\n Number of shards on the Event Hub.\n fail_on_exist:\n Specify whether to throw an exception when the event hub exists.\n '''\n _validate_not_none('hub_name', arg_1)\n arg_4 = HTTPRequest()\n arg_4.method = 'PUT'\n arg_4.host = arg_0._get_host()\n arg_4.path = '/' + _str(arg_1) + '?api-version=2014-01'\n arg_4.body = _get_request_body(_convert_event_hub_to_xml(arg_2))\n arg_4.path, arg_4.query = arg_0._httpclient._update_request_uri_query(arg_4) # pylint: disable=protected-access\n arg_4.headers = arg_0._update_service_bus_header(arg_4)\n if not arg_3:\n try:\n arg_0._perform_request(arg_4)\n return True\n except AzureHttpError as ex:\n _dont_fail_on_exist(ex)\n return False\n else:\n arg_0._perform_request(arg_4)\n return True"} +{"_id": "doc_1377", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Retrieves an existing event hub.\n\n hub_name:\n Name of the event hub.\n '''\n _validate_not_none('hub_name', arg_1)\n arg_2 = HTTPRequest()\n arg_2.method = 'GET'\n arg_2.host = arg_0._get_host()\n arg_2.path = '/' + _str(arg_1) + ''\n arg_2.path, arg_2.query = arg_0._httpclient._update_request_uri_query(arg_2) # pylint: disable=protected-access\n arg_2.headers = arg_0._update_service_bus_header(arg_2)\n arg_8 = arg_0._perform_request(arg_2)\n\n return _convert_response_to_event_hub(arg_8)"} +{"_id": "doc_1378", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Add additional headers for Service Bus. '''\n\n if arg_1.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n arg_1.headers.append(('Content-Length', str(len(arg_1.body))))\n\n # if it is not GET or HEAD request, must set content-type.\n if not arg_1.method in ['GET', 'HEAD']:\n for arg_2, arg_3 in arg_1.headers:\n if arg_2.lower() == 'content-type':\n break\n else:\n arg_1.headers.append(\n ('Content-Type',\n 'application/atom+xml;type=entry;charset=utf-8'))\n\n # Adds authorization header for authentication.\n arg_0.authentication.sign_request(arg_1, arg_0._httpclient)\n\n return arg_1.headers"} +{"_id": "doc_1379", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=no-self-use\n ''' Check if token expires or not. '''\n arg_2 = arg_1.find('ExpiresOn=') + len('ExpiresOn=')\n arg_3 = arg_1.find('&', arg_2)\n arg_4 = int(arg_1[arg_2:arg_3])\n arg_5 = time.mktime(time.localtime())\n\n # Adding 30 seconds so the token wouldn't be expired when we send the\n # token to server.\n return (arg_4 - arg_5) < 30"} +{"_id": "doc_1380", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=False, arg_7=True, **arg_8):\n \"\"\"Reset Service Principal Profile of a managed cluster.\n\n Update the service principal Profile for a managed cluster.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :param client_id: The ID for the service principal.\n :type client_id: str\n :param secret: The secret password associated with the service\n principal in plain text.\n :type secret: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_9 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_8\n )\n\n def get_long_running_output(arg_10):\n if arg_6:\n arg_11 = ClientRawResponse(None, arg_10)\n return arg_11\n\n arg_12 = arg_8.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_7 is True: arg_13 = ARMPolling(arg_12, **arg_8)\n elif arg_7 is False: arg_13 = NoPolling()\n else: arg_13 = arg_7\n return LROPoller(arg_0._client, arg_9, get_long_running_output, arg_13)"} +{"_id": "doc_1381", "title": "", "text": "def Func(arg_0):\n ''' Deletes itself if find queue name or topic name and subscription\n name. '''\n if arg_0._queue_name:\n arg_0.service_bus_service.Func_queue_message(\n arg_0._queue_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n elif arg_0._topic_name and arg_0._subscription_name:\n arg_0.service_bus_service.Func_subscription_message(\n arg_0._topic_name,\n arg_0._subscription_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n else:\n raise AzureServiceBusPeekLockError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)"} +{"_id": "doc_1382", "title": "", "text": "def Func(arg_0):\n ''' Unlocks itself if find queue name or topic name and subscription\n name. '''\n if arg_0._queue_name:\n arg_0.service_bus_service.Func_queue_message(\n arg_0._queue_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n elif arg_0._topic_name and arg_0._subscription_name:\n arg_0.service_bus_service.Func_subscription_message(\n arg_0._topic_name,\n arg_0._subscription_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n else:\n raise AzureServiceBusPeekLockError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)"} +{"_id": "doc_1383", "title": "", "text": "def Func(arg_0):\n ''' Renew lock on itself if find queue name or topic name and subscription\n name. '''\n if arg_0._queue_name:\n arg_0.service_bus_service.Func_queue_message(\n arg_0._queue_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n elif arg_0._topic_name and arg_0._subscription_name:\n arg_0.service_bus_service.Func_subscription_message(\n arg_0._topic_name,\n arg_0._subscription_name,\n arg_0.broker_properties['SequenceNumber'],\n arg_0.broker_properties['LockToken'])\n else:\n raise AzureServiceBusPeekLockError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_RENEW_LOCK)"} +{"_id": "doc_1384", "title": "", "text": "def Func(arg_0, arg_1):\n ''' add addtional headers to request for message request.'''\n\n # Adds custom properties\n if arg_0.custom_properties:\n for arg_2, arg_3 in arg_0.custom_properties.items():\n arg_1.headers.append((arg_2, arg_0._serialize_escaped_properties_value(arg_3)))\n\n # Adds content-type\n arg_1.headers.append(('Content-Type', arg_0.type))\n\n # Adds BrokerProperties\n if arg_0.broker_properties:\n if hasattr(arg_0.broker_properties, 'items'):\n arg_4 = {arg_2: arg_0._serialize_basic_properties_value(arg_3)\n for arg_2, arg_3\n in arg_0.broker_properties.items()}\n arg_4 = json.dumps(arg_4)\n else:\n arg_4 = arg_0.broker_properties\n arg_1.headers.append(\n ('BrokerProperties', str(arg_4)))\n\n return arg_1.headers"} +{"_id": "doc_1385", "title": "", "text": "def Func(arg_0):\n ''' return the current message as expected by batch body format'''\n if sys.version_info >= (3,) and isinstance(arg_0.body, bytes):\n # It HAS to be string to be serialized in JSON\n arg_1 = arg_0.body.decode('utf-8')\n else:\n # Python 2.7 people handle this themself\n arg_1 = arg_0.body\n arg_2 = {'Body': arg_1}\n\n # Adds custom properties\n if arg_0.custom_properties:\n arg_2['UserProperties'] = {name: arg_0._serialize_basic_properties_value(value)\n for name, value\n in arg_0.custom_properties.items()}\n\n # Adds BrokerProperties\n if arg_0.broker_properties:\n arg_2['BrokerProperties'] = {name: arg_0._serialize_basic_properties_value(value)\n for name, value\n in arg_0.broker_properties.items()}\n\n return arg_2"} +{"_id": "doc_1386", "title": "", "text": "def Func(\n arg_0, arg_1=0, arg_2=0, arg_3=0, arg_4=False, arg_5=False, arg_6=60, arg_7=None, arg_8=False, **arg_9):\n \"\"\"Gets the health of a Service Fabric cluster.\n\n Use EventsHealthStateFilter to filter the collection of health events\n reported on the cluster based on the health state.\n Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter\n to filter the collection of nodes and applications returned based on\n their aggregated health state.\n\n :param nodes_health_state_filter: Allows filtering of the node health\n state objects returned in the result of cluster health query\n based on their health state. The possible values for this parameter\n include integer value of one of the\n following health states. Only nodes that match the filter are\n returned. All nodes are used to evaluate the aggregated health state.\n If not specified, all entries are returned.\n The state values are flag-based enumeration, so the value could be a\n combination of these values obtained using bitwise 'OR' operator.\n For example, if the provided value is 6 then health state of nodes\n with HealthState value of OK (2) and Warning (4) are returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type nodes_health_state_filter: int\n :param applications_health_state_filter: Allows filtering of the\n application health state objects returned in the result of cluster\n health\n query based on their health state.\n The possible values for this parameter include integer value obtained\n from members or bitwise operations\n on members of HealthStateFilter enumeration. Only applications that\n match the filter are returned.\n All applications are used to evaluate the aggregated health state. If\n not specified, all entries are returned.\n The state values are flag-based enumeration, so the value could be a\n combination of these values obtained using bitwise 'OR' operator.\n For example, if the provided value is 6 then health state of\n applications with HealthState value of OK (2) and Warning (4) are\n returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type applications_health_state_filter: int\n :param events_health_state_filter: Allows filtering the collection of\n HealthEvent objects returned based on health state.\n The possible values for this parameter include integer value of one of\n the following health states.\n Only events that match the filter are returned. All events are used to\n evaluate the aggregated health state.\n If not specified, all entries are returned. The state values are\n flag-based enumeration, so the value could be a combination of these\n values, obtained using the bitwise 'OR' operator. For example, If the\n provided value is 6 then all of the events with HealthState value of\n OK (2) and Warning (4) are returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type events_health_state_filter: int\n :param exclude_health_statistics: Indicates whether the health\n statistics should be returned as part of the query result. False by\n default.\n The statistics show the number of children entities in health state\n Ok, Warning, and Error.\n :type exclude_health_statistics: bool\n :param include_system_application_health_statistics: Indicates whether\n the health statistics should include the fabric:/System application\n health statistics. False by default.\n If IncludeSystemApplicationHealthStatistics is set to true, the health\n statistics include the entities that belong to the fabric:/System\n application.\n Otherwise, the query result includes health statistics only for user\n applications.\n The health statistics must be included in the query result for this\n parameter to be applied.\n :type include_system_application_health_statistics: bool\n :param timeout: The server timeout for performing the operation in\n seconds. This timeout specifies the time duration that the client is\n willing to wait for the requested operation to complete. The default\n value for this parameter is 60 seconds.\n :type timeout: long\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: ClusterHealth or ClientRawResponse if raw=true\n :rtype: ~azure.servicefabric.models.ClusterHealth or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`FabricErrorException`\n \"\"\"\n arg_10 = \"6.0\"\n\n # Construct URL\n arg_11 = arg_0.Func.metadata['url']\n\n # Construct parameters\n arg_12 = {}\n arg_12['api-version'] = arg_0._serialize.query(\"api_version\", arg_10, 'str')\n if arg_1 is not None:\n arg_12['NodesHealthStateFilter'] = arg_0._serialize.query(\"nodes_health_state_filter\", arg_1, 'int')\n if arg_2 is not None:\n arg_12['ApplicationsHealthStateFilter'] = arg_0._serialize.query(\"applications_health_state_filter\", arg_2, 'int')\n if arg_3 is not None:\n arg_12['EventsHealthStateFilter'] = arg_0._serialize.query(\"events_health_state_filter\", arg_3, 'int')\n if arg_4 is not None:\n arg_12['ExcludeHealthStatistics'] = arg_0._serialize.query(\"exclude_health_statistics\", arg_4, 'bool')\n if arg_5 is not None:\n arg_12['IncludeSystemApplicationHealthStatistics'] = arg_0._serialize.query(\"include_system_application_health_statistics\", arg_5, 'bool')\n if arg_6 is not None:\n arg_12['timeout'] = arg_0._serialize.query(\"timeout\", arg_6, 'long', maximum=4294967295, minimum=1)\n\n # Construct headers\n arg_13 = {}\n arg_13['Accept'] = 'application/json'\n if arg_7:\n arg_13.update(arg_7)\n\n # Construct and send request\n arg_14 = arg_0._client.get(arg_11, arg_12, arg_13)\n arg_15 = arg_0._client.send(arg_14, stream=False, **arg_9)\n\n if arg_15.status_code not in [200]:\n raise models.FabricErrorException(arg_0._deserialize, arg_15)\n\n arg_16 = None\n\n if arg_15.status_code == 200:\n arg_16 = arg_0._deserialize('ClusterHealth', arg_15)\n\n if arg_8:\n arg_17 = ClientRawResponse(arg_16, arg_15)\n return arg_17\n\n return arg_16"} +{"_id": "doc_1387", "title": "", "text": "def Func(\n arg_0, arg_1=0, arg_2=0, arg_3=0, arg_4=False, arg_5=False, arg_6=60, arg_7=None, arg_8=None, arg_9=None, arg_10=False, **arg_11):\n \"\"\"Gets the health of a Service Fabric cluster using the specified policy.\n\n Use EventsHealthStateFilter to filter the collection of health events\n reported on the cluster based on the health state.\n Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter\n to filter the collection of nodes and applications returned based on\n their aggregated health state.\n Use ClusterHealthPolicies to override the health policies used to\n evaluate the health.\n\n :param nodes_health_state_filter: Allows filtering of the node health\n state objects returned in the result of cluster health query\n based on their health state. The possible values for this parameter\n include integer value of one of the\n following health states. Only nodes that match the filter are\n returned. All nodes are used to evaluate the aggregated health state.\n If not specified, all entries are returned.\n The state values are flag-based enumeration, so the value could be a\n combination of these values obtained using bitwise 'OR' operator.\n For example, if the provided value is 6 then health state of nodes\n with HealthState value of OK (2) and Warning (4) are returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type nodes_health_state_filter: int\n :param applications_health_state_filter: Allows filtering of the\n application health state objects returned in the result of cluster\n health\n query based on their health state.\n The possible values for this parameter include integer value obtained\n from members or bitwise operations\n on members of HealthStateFilter enumeration. Only applications that\n match the filter are returned.\n All applications are used to evaluate the aggregated health state. If\n not specified, all entries are returned.\n The state values are flag-based enumeration, so the value could be a\n combination of these values obtained using bitwise 'OR' operator.\n For example, if the provided value is 6 then health state of\n applications with HealthState value of OK (2) and Warning (4) are\n returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type applications_health_state_filter: int\n :param events_health_state_filter: Allows filtering the collection of\n HealthEvent objects returned based on health state.\n The possible values for this parameter include integer value of one of\n the following health states.\n Only events that match the filter are returned. All events are used to\n evaluate the aggregated health state.\n If not specified, all entries are returned. The state values are\n flag-based enumeration, so the value could be a combination of these\n values, obtained using the bitwise 'OR' operator. For example, If the\n provided value is 6 then all of the events with HealthState value of\n OK (2) and Warning (4) are returned.\n - Default - Default value. Matches any HealthState. The value is zero.\n - None - Filter that doesn't match any HealthState value. Used in\n order to return no results on a given collection of states. The value\n is 1.\n - Ok - Filter that matches input with HealthState value Ok. The value\n is 2.\n - Warning - Filter that matches input with HealthState value Warning.\n The value is 4.\n - Error - Filter that matches input with HealthState value Error. The\n value is 8.\n - All - Filter that matches input with any HealthState value. The\n value is 65535.\n :type events_health_state_filter: int\n :param exclude_health_statistics: Indicates whether the health\n statistics should be returned as part of the query result. False by\n default.\n The statistics show the number of children entities in health state\n Ok, Warning, and Error.\n :type exclude_health_statistics: bool\n :param include_system_application_health_statistics: Indicates whether\n the health statistics should include the fabric:/System application\n health statistics. False by default.\n If IncludeSystemApplicationHealthStatistics is set to true, the health\n statistics include the entities that belong to the fabric:/System\n application.\n Otherwise, the query result includes health statistics only for user\n applications.\n The health statistics must be included in the query result for this\n parameter to be applied.\n :type include_system_application_health_statistics: bool\n :param timeout: The server timeout for performing the operation in\n seconds. This timeout specifies the time duration that the client is\n willing to wait for the requested operation to complete. The default\n value for this parameter is 60 seconds.\n :type timeout: long\n :param application_health_policy_map: Defines a map that contains\n specific application health policies for different applications.\n Each entry specifies as key the application name and as value an\n ApplicationHealthPolicy used to evaluate the application health.\n If an application is not specified in the map, the application health\n evaluation uses the ApplicationHealthPolicy found in its application\n manifest or the default application health policy (if no health policy\n is defined in the manifest).\n The map is empty by default.\n :type application_health_policy_map:\n list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem]\n :param cluster_health_policy: Defines a health policy used to evaluate\n the health of the cluster or of a cluster node.\n :type cluster_health_policy:\n ~azure.servicefabric.models.ClusterHealthPolicy\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: ClusterHealth or ClientRawResponse if raw=true\n :rtype: ~azure.servicefabric.models.ClusterHealth or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`FabricErrorException`\n \"\"\"\n arg_12 = None\n if arg_7 is not None or arg_8 is not None:\n arg_12 = models.ClusterHealthPolicies(arg_7=arg_7, arg_8=arg_8)\n\n arg_13 = \"6.0\"\n\n # Construct URL\n arg_14 = arg_0.Func.metadata['url']\n\n # Construct parameters\n arg_15 = {}\n arg_15['api-version'] = arg_0._serialize.query(\"api_version\", arg_13, 'str')\n if arg_1 is not None:\n arg_15['NodesHealthStateFilter'] = arg_0._serialize.query(\"nodes_health_state_filter\", arg_1, 'int')\n if arg_2 is not None:\n arg_15['ApplicationsHealthStateFilter'] = arg_0._serialize.query(\"applications_health_state_filter\", arg_2, 'int')\n if arg_3 is not None:\n arg_15['EventsHealthStateFilter'] = arg_0._serialize.query(\"events_health_state_filter\", arg_3, 'int')\n if arg_4 is not None:\n arg_15['ExcludeHealthStatistics'] = arg_0._serialize.query(\"exclude_health_statistics\", arg_4, 'bool')\n if arg_5 is not None:\n arg_15['IncludeSystemApplicationHealthStatistics'] = arg_0._serialize.query(\"include_system_application_health_statistics\", arg_5, 'bool')\n if arg_6 is not None:\n arg_15['timeout'] = arg_0._serialize.query(\"timeout\", arg_6, 'long', maximum=4294967295, minimum=1)\n\n # Construct headers\n arg_16 = {}\n arg_16['Accept'] = 'application/json'\n arg_16['Content-Type'] = 'application/json; charset=utf-8'\n if arg_9:\n arg_16.update(arg_9)\n\n # Construct body\n if arg_12 is not None:\n arg_17 = arg_0._serialize.body(arg_12, 'ClusterHealthPolicies')\n else:\n arg_17 = None\n\n # Construct and send request\n arg_18 = arg_0._client.post(arg_14, arg_15, arg_16, arg_17)\n arg_19 = arg_0._client.send(arg_18, stream=False, **arg_11)\n\n if arg_19.status_code not in [200]:\n raise models.FabricErrorException(arg_0._deserialize, arg_19)\n\n arg_20 = None\n\n if arg_19.status_code == 200:\n arg_20 = arg_0._deserialize('ClusterHealth', arg_19)\n\n if arg_10:\n arg_21 = ClientRawResponse(arg_20, arg_19)\n return arg_21\n\n return arg_20"} +{"_id": "doc_1388", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=60, arg_4=None, arg_5=None, arg_6=False, **arg_7):\n \"\"\"Removes or unregisters a Service Fabric application type from the\n cluster.\n\n This operation can only be performed if all application instances of\n the application type have been deleted. Once the application type is\n unregistered, no new application instances can be created for this\n particular application type.\n\n :param application_type_name: The name of the application type.\n :type application_type_name: str\n :param application_type_version: The version of the application type\n as defined in the application manifest.\n :type application_type_version: str\n :param timeout: The server timeout for performing the operation in\n seconds. This timeout specifies the time duration that the client is\n willing to wait for the requested operation to complete. The default\n value for this parameter is 60 seconds.\n :type timeout: long\n :param async_parameter: The flag indicating whether or not unprovision\n should occur asynchronously. When set to true, the unprovision\n operation returns when the request is accepted by the system, and the\n unprovision operation continues without any timeout limit. The default\n value is false. However, we recommend setting it to true for large\n application packages that were provisioned.\n :type async_parameter: bool\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: None or ClientRawResponse if raw=true\n :rtype: None or ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`FabricErrorException`\n \"\"\"\n arg_8 = models.UnprovisionApplicationTypeDescriptionInfo(arg_2=arg_2, async_property=arg_4)\n\n arg_9 = \"6.0\"\n\n # Construct URL\n arg_10 = arg_0.Func.metadata['url']\n arg_11 = {\n 'applicationTypeName': arg_0._serialize.url(\"application_type_name\", arg_1, 'str')\n }\n arg_10 = arg_0._client.format_url(arg_10, **arg_11)\n\n # Construct parameters\n arg_12 = {}\n arg_12['api-version'] = arg_0._serialize.query(\"api_version\", arg_9, 'str')\n if arg_3 is not None:\n arg_12['timeout'] = arg_0._serialize.query(\"timeout\", arg_3, 'long', maximum=4294967295, minimum=1)\n\n # Construct headers\n arg_13 = {}\n arg_13['Content-Type'] = 'application/json; charset=utf-8'\n if arg_5:\n arg_13.update(arg_5)\n\n # Construct body\n arg_14 = arg_0._serialize.body(arg_8, 'UnprovisionApplicationTypeDescriptionInfo')\n\n # Construct and send request\n arg_15 = arg_0._client.post(arg_10, arg_12, arg_13, arg_14)\n arg_16 = arg_0._client.send(arg_15, stream=False, **arg_7)\n\n if arg_16.status_code not in [200, 202]:\n raise models.FabricErrorException(arg_0._deserialize, arg_16)\n\n if arg_6:\n arg_17 = ClientRawResponse(None, arg_16)\n return arg_17"} +{"_id": "doc_1389", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=60, arg_3=None, arg_4=None, arg_5=False, **arg_6):\n \"\"\"Submits a property batch.\n\n Submits a batch of property operations. Either all or none of the\n operations will be committed.\n\n :param name_id: The Service Fabric name, without the 'fabric:' URI\n scheme.\n :type name_id: str\n :param timeout: The server timeout for performing the operation in\n seconds. This timeout specifies the time duration that the client is\n willing to wait for the requested operation to complete. The default\n value for this parameter is 60 seconds.\n :type timeout: long\n :param operations: A list of the property batch operations to be\n executed.\n :type operations:\n list[~azure.servicefabric.models.PropertyBatchOperation]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: PropertyBatchInfo or ClientRawResponse if raw=true\n :rtype: ~azure.servicefabric.models.PropertyBatchInfo or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`FabricErrorException`\n \"\"\"\n arg_7 = models.PropertyBatchDescriptionList(arg_3=arg_3)\n\n arg_8 = \"6.0\"\n\n # Construct URL\n arg_9 = arg_0.Func.metadata['url']\n arg_10 = {\n 'nameId': arg_0._serialize.url(\"name_id\", arg_1, 'str', skip_quote=True)\n }\n arg_9 = arg_0._client.format_url(arg_9, **arg_10)\n\n # Construct parameters\n arg_11 = {}\n arg_11['api-version'] = arg_0._serialize.query(\"api_version\", arg_8, 'str')\n if arg_2 is not None:\n arg_11['timeout'] = arg_0._serialize.query(\"timeout\", arg_2, 'long', maximum=4294967295, minimum=1)\n\n # Construct headers\n arg_12 = {}\n arg_12['Accept'] = 'application/json'\n arg_12['Content-Type'] = 'application/json; charset=utf-8'\n if arg_4:\n arg_12.update(arg_4)\n\n # Construct body\n arg_13 = arg_0._serialize.body(arg_7, 'PropertyBatchDescriptionList')\n\n # Construct and send request\n arg_14 = arg_0._client.post(arg_9, arg_11, arg_12, arg_13)\n arg_15 = arg_0._client.send(arg_14, stream=False, **arg_6)\n\n if arg_15.status_code not in [200, 409]:\n raise models.FabricErrorException(arg_0._deserialize, arg_15)\n\n arg_16 = None\n\n if arg_15.status_code == 200:\n arg_16 = arg_0._deserialize('SuccessfulPropertyBatchInfo', arg_15)\n if arg_15.status_code == 409:\n arg_16 = arg_0._deserialize('FailedPropertyBatchInfo', arg_15)\n\n if arg_5:\n arg_17 = ClientRawResponse(arg_16, arg_15)\n return arg_17\n\n return arg_16"} +{"_id": "doc_1390", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=None, arg_7=False, arg_8=True, **arg_9):\n \"\"\"Start capturing network packets for the site.\n\n Start capturing network packets for the site.\n\n :param resource_group_name: Name of the resource group to which the\n resource belongs.\n :type resource_group_name: str\n :param name: The name of the web app.\n :type name: str\n :param duration_in_seconds: The duration to keep capturing in seconds.\n :type duration_in_seconds: int\n :param max_frame_length: The maximum frame length in bytes (Optional).\n :type max_frame_length: int\n :param sas_url: The Blob URL to store capture file.\n :type sas_url: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns list or\n ClientRawResponse if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]]\n :raises:\n :class:`DefaultErrorResponseException`\n \"\"\"\n arg_10 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=True,\n **arg_9\n )\n\n def get_long_running_output(arg_11):\n arg_12 = arg_0._deserialize('[NetworkTrace]', arg_11)\n\n if arg_7:\n arg_13 = ClientRawResponse(arg_12, arg_11)\n return arg_13\n\n return arg_12\n\n arg_14 = arg_9.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_8 is True: arg_15 = ARMPolling(arg_14, **arg_9)\n elif arg_8 is False: arg_15 = NoPolling()\n else: arg_15 = arg_8\n return LROPoller(arg_0._client, arg_10, get_long_running_output, arg_15)"} +{"_id": "doc_1391", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None, arg_7=False, **arg_8):\n \"\"\"Get the difference in configuration settings between two web app slots.\n\n Get the difference in configuration settings between two web app slots.\n\n :param resource_group_name: Name of the resource group to which the\n resource belongs.\n :type resource_group_name: str\n :param name: Name of the app.\n :type name: str\n :param slot: Name of the source slot. If a slot is not specified, the\n production slot is used as the source slot.\n :type slot: str\n :param target_slot: Destination deployment slot during swap operation.\n :type target_slot: str\n :param preserve_vnet: true to preserve Virtual Network to\n the slot during swap; otherwise, false.\n :type preserve_vnet: bool\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: An iterator like instance of SlotDifference\n :rtype:\n ~azure.mgmt.web.models.SlotDifferencePaged[~azure.mgmt.web.models.SlotDifference]\n :raises:\n :class:`DefaultErrorResponseException`\n \"\"\"\n arg_9 = models.CsmSlotEntity(arg_4=arg_4, arg_5=arg_5)\n\n def internal_paging(arg_10=None, arg_7=False):\n\n if not arg_10:\n # Construct URL\n arg_11 = arg_0.Func.metadata['url']\n arg_12 = {\n 'resourceGroupName': arg_0._serialize.url(\"resource_group_name\", arg_1, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+[^\\.]$'),\n 'name': arg_0._serialize.url(\"name\", arg_2, 'str'),\n 'slot': arg_0._serialize.url(\"slot\", arg_3, 'str'),\n 'subscriptionId': arg_0._serialize.url(\"self.config.subscription_id\", arg_0.config.subscription_id, 'str')\n }\n arg_11 = arg_0._client.format_url(arg_11, **arg_12)\n\n # Construct parameters\n arg_13 = {}\n arg_13['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n else:\n arg_11 = arg_10\n arg_13 = {}\n\n # Construct headers\n arg_14 = {}\n arg_14['Accept'] = 'application/json'\n arg_14['Content-Type'] = 'application/json; charset=utf-8'\n if arg_0.config.generate_client_request_id:\n arg_14['x-ms-client-request-id'] = str(uuid.uuid1())\n if arg_6:\n arg_14.update(arg_6)\n if arg_0.config.accept_language is not None:\n arg_14['accept-language'] = arg_0._serialize.header(\"self.config.accept_language\", arg_0.config.accept_language, 'str')\n\n # Construct body\n arg_15 = arg_0._serialize.body(arg_9, 'CsmSlotEntity')\n\n # Construct and send request\n arg_16 = arg_0._client.post(arg_11, arg_13, arg_14, arg_15)\n arg_17 = arg_0._client.send(arg_16, stream=False, **arg_8)\n\n if arg_17.status_code not in [200]:\n raise models.DefaultErrorResponseException(arg_0._deserialize, arg_17)\n\n return arg_17\n\n # Deserialize response\n arg_18 = models.SlotDifferencePaged(internal_paging, arg_0._deserialize.dependencies)\n\n if arg_7:\n arg_19 = {}\n arg_20 = models.SlotDifferencePaged(internal_paging, arg_0._deserialize.dependencies, arg_19)\n return arg_20\n\n return arg_18"} +{"_id": "doc_1392", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None, arg_7=False, arg_8=True, **arg_9):\n \"\"\"Swaps two deployment slots of an app.\n\n Swaps two deployment slots of an app.\n\n :param resource_group_name: Name of the resource group to which the\n resource belongs.\n :type resource_group_name: str\n :param name: Name of the app.\n :type name: str\n :param slot: Name of the source slot. If a slot is not specified, the\n production slot is used as the source slot.\n :type slot: str\n :param target_slot: Destination deployment slot during swap operation.\n :type target_slot: str\n :param preserve_vnet: true to preserve Virtual Network to\n the slot during swap; otherwise, false.\n :type preserve_vnet: bool\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_10 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=True,\n **arg_9\n )\n\n def get_long_running_output(arg_11):\n if arg_7:\n arg_12 = ClientRawResponse(None, arg_11)\n return arg_12\n\n arg_13 = arg_9.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_8 is True: arg_14 = ARMPolling(arg_13, **arg_9)\n elif arg_8 is False: arg_14 = NoPolling()\n else: arg_14 = arg_8\n return LROPoller(arg_0._client, arg_10, get_long_running_output, arg_14)"} +{"_id": "doc_1393", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=None, arg_7=None, arg_8=None, arg_9=None, arg_10=None, arg_11=None, arg_12=None, arg_13=None, arg_14=False, **arg_15):\n \"\"\"Execute OData query.\n\n Executes an OData query for events.\n\n :param app_id: ID of the application. This is Application ID from the\n API Access settings blade in the Azure portal.\n :type app_id: str\n :param event_type: The type of events to query; either a standard\n event type (`traces`, `customEvents`, `pageViews`, `requests`,\n `dependencies`, `exceptions`, `availabilityResults`) or `$all` to\n query across all event types. Possible values include: '$all',\n 'traces', 'customEvents', 'pageViews', 'browserTimings', 'requests',\n 'dependencies', 'exceptions', 'availabilityResults',\n 'performanceCounters', 'customMetrics'\n :type event_type: str or ~azure.applicationinsights.models.EventType\n :param timespan: Optional. The timespan over which to retrieve events.\n This is an ISO8601 time period value. This timespan is applied in\n addition to any that are specified in the Odata expression.\n :type timespan: str\n :param filter: An expression used to filter the returned events\n :type filter: str\n :param search: A free-text search expression to match for whether a\n particular event should be returned\n :type search: str\n :param orderby: A comma-separated list of properties with \\\\\"asc\\\\\"\n (the default) or \\\\\"desc\\\\\" to control the order of returned events\n :type orderby: str\n :param select: Limits the properties to just those requested on each\n returned event\n :type select: str\n :param skip: The number of items to skip over before returning events\n :type skip: int\n :param top: The number of events to return\n :type top: int\n :param format: Format for the returned events\n :type format: str\n :param count: Request a count of matching items included with the\n returned events\n :type count: bool\n :param apply: An expression used for aggregation over returned events\n :type apply: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: EventsResults or ClientRawResponse if raw=true\n :rtype: ~azure.applicationinsights.models.EventsResults or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n # Construct URL\n arg_16 = arg_0.Func.metadata['url']\n arg_17 = {\n 'appId': arg_0._serialize.url(\"app_id\", arg_1, 'str'),\n 'eventType': arg_0._serialize.url(\"event_type\", arg_2, 'str')\n }\n arg_16 = arg_0._client.format_url(arg_16, **arg_17)\n\n # Construct parameters\n arg_18 = {}\n if arg_3 is not None:\n arg_18['timespan'] = arg_0._serialize.query(\"timespan\", arg_3, 'str')\n if arg_4 is not None:\n arg_18['$filter'] = arg_0._serialize.query(\"filter\", arg_4, 'str')\n if arg_5 is not None:\n arg_18['$search'] = arg_0._serialize.query(\"search\", arg_5, 'str')\n if arg_6 is not None:\n arg_18['$orderby'] = arg_0._serialize.query(\"orderby\", arg_6, 'str')\n if arg_7 is not None:\n arg_18['$select'] = arg_0._serialize.query(\"select\", arg_7, 'str')\n if arg_8 is not None:\n arg_18['$skip'] = arg_0._serialize.query(\"skip\", arg_8, 'int')\n if arg_9 is not None:\n arg_18['$top'] = arg_0._serialize.query(\"top\", arg_9, 'int')\n if arg_10 is not None:\n arg_18['$format'] = arg_0._serialize.query(\"format\", arg_10, 'str')\n if arg_11 is not None:\n arg_18['$count'] = arg_0._serialize.query(\"count\", arg_11, 'bool')\n if arg_12 is not None:\n arg_18['$apply'] = arg_0._serialize.query(\"apply\", arg_12, 'str')\n\n # Construct headers\n arg_19 = {}\n arg_19['Accept'] = 'application/json'\n if arg_13:\n arg_19.update(arg_13)\n\n # Construct and send request\n arg_20 = arg_0._client.get(arg_16, arg_18, arg_19)\n arg_21 = arg_0._client.send(arg_20, stream=False, **arg_15)\n\n if arg_21.status_code not in [200]:\n raise models.ErrorResponseException(arg_0._deserialize, arg_21)\n\n arg_22 = None\n\n if arg_21.status_code == 200:\n arg_22 = arg_0._deserialize('EventsResults', arg_21)\n\n if arg_14:\n arg_23 = ClientRawResponse(arg_22, arg_21)\n return arg_23\n\n return arg_22"} +{"_id": "doc_1394", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=False, arg_7=None, **arg_8):\n \"\"\"Add a face to a large face list. The input face is specified as an\n image with a targetFace rectangle. It returns a persistedFaceId\n representing the added face, and persistedFaceId will not expire.\n\n :param large_face_list_id: Id referencing a particular large face\n list.\n :type large_face_list_id: str\n :param image: An image stream.\n :type image: Generator\n :param user_data: User-specified data about the face for any purpose.\n The maximum length is 1KB.\n :type user_data: str\n :param target_face: A face rectangle to specify the target face to be\n added to a person in the format of \"targetFace=left,top,width,height\".\n E.g. \"targetFace=10,10,100,100\". If there is more than one face in the\n image, targetFace is required to specify which face to add. No\n targetFace means there is only one face detected in the entire image.\n :type target_face: list[int]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param callback: When specified, will be called with each chunk of\n data that is streamed. The callback should take two arguments, the\n bytes of the current chunk of data and the response object. If the\n data is uploading, response will be None.\n :type callback: Callable[Bytes, response=None]\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: PersistedFace or ClientRawResponse if raw=true\n :rtype: ~azure.cognitiveservices.vision.face.models.PersistedFace or\n ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`APIErrorException`\n \"\"\"\n # Construct URL\n arg_9 = arg_0.Func.metadata['url']\n arg_10 = {\n 'Endpoint': arg_0._serialize.url(\"self.config.endpoint\", arg_0.config.endpoint, 'str', skip_quote=True),\n 'largeFaceListId': arg_0._serialize.url(\"large_face_list_id\", arg_1, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')\n }\n arg_9 = arg_0._client.format_url(arg_9, **arg_10)\n\n # Construct parameters\n arg_11 = {}\n if arg_3 is not None:\n arg_11['userData'] = arg_0._serialize.query(\"user_data\", arg_3, 'str', max_length=1024)\n if arg_4 is not None:\n arg_11['targetFace'] = arg_0._serialize.query(\"target_face\", arg_4, '[int]', div=',')\n\n # Construct headers\n arg_12 = {}\n arg_12['Accept'] = 'application/json'\n arg_12['Content-Type'] = 'application/octet-stream'\n if arg_5:\n arg_12.update(arg_5)\n\n # Construct body\n arg_13 = arg_0._client.stream_upload(arg_2, arg_7)\n\n # Construct and send request\n arg_14 = arg_0._client.post(arg_9, arg_11, arg_12, arg_13)\n arg_15 = arg_0._client.send(arg_14, stream=False, **arg_8)\n\n if arg_15.status_code not in [200]:\n raise models.APIErrorException(arg_0._deserialize, arg_15)\n\n arg_16 = None\n\n if arg_15.status_code == 200:\n arg_16 = arg_0._deserialize('PersistedFace', arg_15)\n\n if arg_6:\n arg_17 = ClientRawResponse(arg_16, arg_15)\n return arg_17\n\n return arg_16"} +{"_id": "doc_1395", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Reset auth_attempted on redirects.\"\"\"\n if arg_1.is_redirect:\n arg_0._thread_local.auth_attempted = False"} +{"_id": "doc_1396", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, **arg_5):\n \"\"\"Publishes a batch of events to an Azure Event Grid topic.\n\n :param topic_hostname: The host name of the topic, e.g.\n topic1.westus2-1.eventgrid.azure.net\n :type topic_hostname: str\n :param events: An array of events to be published to Event Grid.\n :type events: list[~azure.eventgrid.models.EventGridEvent]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides`.\n :return: None or ClientRawResponse if raw=true\n :rtype: None or ~msrest.pipeline.ClientRawResponse\n :raises:\n :class:`HttpOperationError`\n \"\"\"\n # Construct URL\n arg_6 = arg_0.Func.metadata['url']\n arg_7 = {\n 'topicHostname': arg_0._serialize.url(\"topic_hostname\", arg_1, 'str', skip_quote=True)\n }\n arg_6 = arg_0._client.format_url(arg_6, **arg_7)\n\n # Construct parameters\n arg_8 = {}\n arg_8['api-version'] = arg_0._serialize.query(\"self.api_version\", arg_0.api_version, 'str')\n\n # Construct headers\n arg_9 = {}\n arg_9['Content-Type'] = 'application/json; charset=utf-8'\n if arg_3:\n arg_9.update(arg_3)\n\n # Construct body\n arg_10 = arg_0._serialize.body(arg_2, '[EventGridEvent]')\n\n # Construct and send request\n arg_11 = arg_0._client.post(arg_6, arg_8)\n arg_12 = arg_0._client.send(\n arg_11, arg_9, arg_10, stream=False, **arg_5)\n\n if arg_12.status_code not in [200]:\n raise HttpOperationError(arg_0._deserialize, arg_12)\n\n if arg_4:\n arg_13 = ClientRawResponse(None, arg_12)\n return arg_13"} +{"_id": "doc_1397", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=False, arg_6=True, **arg_7):\n \"\"\"Moves resources from one resource group to another resource group.\n\n The resources to move must be in the same source resource group. The\n target resource group may be in a different subscription. When moving\n resources, both the source group and the target group are locked for\n the duration of the operation. Write and delete operations are blocked\n on the groups until the move completes. .\n\n :param source_resource_group_name: The name of the resource group\n containing the resources to move.\n :type source_resource_group_name: str\n :param resources: The IDs of the resources.\n :type resources: list[str]\n :param target_resource_group: The target resource group.\n :type target_resource_group: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_8 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=True,\n **arg_7\n )\n\n def get_long_running_output(arg_9):\n if arg_5:\n arg_10 = ClientRawResponse(None, arg_9)\n return arg_10\n\n arg_11 = arg_7.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_6 is True: arg_12 = ARMPolling(arg_11, **arg_7)\n elif arg_6 is False: arg_12 = NoPolling()\n else: arg_12 = arg_6\n return LROPoller(arg_0._client, arg_8, get_long_running_output, arg_12)"} +{"_id": "doc_1398", "title": "", "text": "def Func(\n arg_0, arg_1,\n arg_2=30, arg_3=None,\n arg_4=False,\n arg_5=False,\n arg_6=None,\n arg_7=False,\n arg_8=None,\n arg_9=None, arg_10=None):\n \"\"\"Create a queue entity.\n\n :param queue_name: The name of the new queue.\n :type queue_name: str\n :param lock_duration: The lock durection in seconds for each message in the queue.\n :type lock_duration: int\n :param max_size_in_megabytes: The max size to allow the queue to grow to.\n :type max_size_in_megabytes: int\n :param requires_duplicate_detection: Whether the queue will require every message with\n a specified time frame to have a unique ID. Non-unique messages will be discarded.\n Default value is False.\n :type requires_duplicate_detection: bool\n :param requires_session: Whether the queue will be sessionful, and therefore require all\n message to have a Session ID and be received by a sessionful receiver.\n Default value is False.\n :type requires_session: bool\n :param default_message_time_to_live: The length of time a message will remain in the queue\n before it is either discarded or moved to the dead letter queue.\n :type default_message_time_to_live: ~datetime.timedelta\n :param dead_lettering_on_message_expiration: Whether to move expired messages to the\n dead letter queue. Default value is False.\n :type dead_lettering_on_message_expiration: bool\n :param duplicate_detection_history_time_window: The period within which all incoming messages\n must have a unique message ID.\n :type duplicate_detection_history_time_window: ~datetime.timedelta\n :param max_delivery_count: The maximum number of times a message will attempt to be delivered\n before it is moved to the dead letter queue.\n :type max_delivery_count: int\n :param enable_batched_operations:\n :type: enable_batched_operations: bool\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists.\n \"\"\"\n arg_11 = Queue(\n arg_2=\"PT{}S\".format(int(arg_2)),\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10)\n try:\n return arg_0.mgmt_client.Func(arg_1, queue=arg_11, fail_on_exist=True)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)"} +{"_id": "doc_1399", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Delete a queue entity.\n\n :param queue_name: The name of the queue to delete.\n :type queue_name: str\n :param fail_not_exist: Whether to raise an exception if the named queue is not\n found. If set to True, a ServiceBusResourceNotFound will be raised.\n Default value is False.\n :type fail_not_exist: bool\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namesapce is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the queue is not found\n and `fail_not_exist` is set to True.\n \"\"\"\n try:\n return arg_0.mgmt_client.Func(arg_1, arg_2=arg_2)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except azure.common.AzureMissingResourceHttpError as e:\n raise ServiceBusResourceNotFound(\"Specificed queue '{}' does not exist.\".format(arg_1), e)"} +{"_id": "doc_1400", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Delete a topic entity.\n\n :param topic_name: The name of the topic to delete.\n :type topic_name: str\n :param fail_not_exist: Whether to raise an exception if the named topic is not\n found. If set to True, a ServiceBusResourceNotFound will be raised.\n Default value is False.\n :type fail_not_exist: bool\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namesapce is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found\n and `fail_not_exist` is set to True.\n \"\"\"\n try:\n return arg_0.mgmt_client.Func(arg_1, arg_2=arg_2)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except azure.common.AzureMissingResourceHttpError as e:\n raise ServiceBusResourceNotFound(\"Specificed queue does not exist.\", e)"} +{"_id": "doc_1401", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=30, arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None, arg_9=None):\n \"\"\"Create a subscription entity.\n\n :param topic_name: The name of the topic under which to create the subscription.\n :param subscription_name: The name of the new subscription.\n :type subscription_name: str\n :param lock_duration: The lock durection in seconds for each message in the subscription.\n :type lock_duration: int\n :param requires_session: Whether the subscription will be sessionful, and therefore require all\n message to have a Session ID and be received by a sessionful receiver.\n Default value is False.\n :type requires_session: bool\n :param default_message_time_to_live: The length of time a message will remain in the subscription\n before it is either discarded or moved to the dead letter queue.\n :type default_message_time_to_live: ~datetime.timedelta\n :param dead_lettering_on_message_expiration: Whether to move expired messages to the\n dead letter queue. Default value is False.\n :type dead_lettering_on_message_expiration: bool\n :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on\n filtering into the dead letter queue. Default is False, and the messages will be discarded.\n :type dead_lettering_on_filter_evaluation_exceptions: bool\n :param max_delivery_count: The maximum number of times a message will attempt to be delivered\n before it is moved to the dead letter queue.\n :type max_delivery_count: int\n :param enable_batched_operations:\n :type: enable_batched_operations: bool\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists.\n \"\"\"\n arg_10 = Subscription(\n arg_3=\"PT{}S\".format(int(arg_3)),\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_9=arg_9,\n arg_8=arg_8)\n try:\n return arg_0.mgmt_client.Func(\n arg_1, arg_2,\n subscription=arg_10, fail_on_exist=True)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)"} +{"_id": "doc_1402", "title": "", "text": "def Func(arg_0):\n \"\"\"Perform an operation to update the properties of the entity.\n\n :returns: The properties of the entity as a dictionary.\n :rtype: dict[str, Any]\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the entity does not exist.\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the endpoint cannot be reached.\n :raises: ~azure.common.AzureHTTPError if the credentials are invalid.\n \"\"\"\n try:\n arg_0.entity = arg_0._get_entity()\n arg_0.properties = dict(arg_0.entity)\n if hasattr(arg_0.entity, 'requires_session'):\n arg_0.requires_session = arg_0.entity.requires_session\n return arg_0.properties\n except AzureServiceBusResourceNotFound:\n raise ServiceBusResourceNotFound(\"Specificed queue does not exist.\")\n except azure.common.AzureHttpError:\n arg_0.entity = None\n arg_0.properties = {}\n arg_0.requires_session = False\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace not found\", e)"} +{"_id": "doc_1403", "title": "", "text": "def Func(arg_0):\n \"\"\"Whether the receivers lock on a particular session has Func.\n\n :rtype: bool\n \"\"\"\n if arg_0.locked_until and arg_0.locked_until <= datetime.datetime.now():\n return True\n return False"} +{"_id": "doc_1404", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=False, arg_6=True, **arg_7):\n \"\"\"Creates an Azure subscription.\n\n :param billing_account_name: The name of the commerce root billing\n account.\n :type billing_account_name: str\n :param invoice_section_name: The name of the invoice section.\n :type invoice_section_name: str\n :param body: The subscription creation parameters.\n :type body:\n ~azure.mgmt.subscription.models.SubscriptionCreationParameters\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns\n SubscriptionCreationResult or\n ClientRawResponse if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.subscription.models.SubscriptionCreationResult]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.subscription.models.SubscriptionCreationResult]]\n :raises:\n :class:`ErrorResponseException`\n \"\"\"\n arg_8 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=True,\n **arg_7\n )\n\n def get_long_running_output(arg_9):\n arg_10 = {\n 'Location': 'str',\n 'Retry-After': 'int',\n }\n arg_11 = arg_0._deserialize('SubscriptionCreationResult', arg_9)\n\n if arg_5:\n arg_12 = ClientRawResponse(arg_11, arg_9)\n arg_12.add_headers(arg_10)\n return arg_12\n\n return arg_11\n\n arg_13 = arg_7.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_6 is True: arg_14 = ARMPolling(arg_13, **arg_7)\n elif arg_6 is False: arg_14 = NoPolling()\n else: arg_14 = arg_6\n return LROPoller(arg_0._client, arg_8, get_long_running_output, arg_14)"} +{"_id": "doc_1405", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, arg_5=True, **arg_6):\n \"\"\"Export logs that show Api requests made by this subscription in the\n given time window to show throttling activities.\n\n :param parameters: Parameters supplied to the LogAnalytics\n getRequestRateByInterval Api.\n :type parameters:\n ~azure.mgmt.compute.v2018_04_01.models.RequestRateByIntervalInput\n :param location: The location upon which virtual-machine-sizes is\n queried.\n :type location: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns\n LogAnalyticsOperationResult or\n ClientRawResponse if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_7 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=True,\n **arg_6\n )\n\n def get_long_running_output(arg_8):\n arg_9 = arg_0._deserialize('LogAnalyticsOperationResult', arg_8)\n\n if arg_4:\n arg_10 = ClientRawResponse(arg_9, arg_8)\n return arg_10\n\n return arg_9\n\n arg_11 = arg_6.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_5 is True: arg_12 = ARMPolling(arg_11, lro_options={'final-state-via': 'azure-async-operation'}, **arg_6)\n elif arg_5 is False: arg_12 = NoPolling()\n else: arg_12 = arg_5\n return LROPoller(arg_0._client, arg_7, get_long_running_output, arg_12)"} +{"_id": "doc_1406", "title": "", "text": "def Func(arg_0):\n \"\"\"Scan output for exceptions\n\n If there is an output from an add task collection call add it to the results.\n\n :param results_queue: Queue containing results of attempted add_collection's\n :type results_queue: collections.deque\n :return: list of TaskAddResults\n :rtype: list[~TaskAddResult]\n \"\"\"\n arg_1 = []\n while arg_0:\n arg_2 = arg_0.pop()\n arg_1.append(arg_2)\n return arg_1"} +{"_id": "doc_1407", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Main method for worker to run\n\n Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added.\n\n :param collections.deque results_queue: Queue for worker to output results to\n \"\"\"\n # Add tasks until either we run out or we run into an unexpected error\n while arg_0.tasks_to_add and not arg_0.errors:\n arg_2 = arg_0._max_tasks_per_request # local copy\n arg_3 = []\n with arg_0._pending_queue_lock:\n while len(arg_3) < arg_2 and arg_0.tasks_to_add:\n arg_3.append(arg_0.tasks_to_add.pop())\n\n if arg_3:\n arg_0._bulk_add_tasks(arg_1, arg_3)"} +{"_id": "doc_1408", "title": "", "text": "def Func(arg_0 : arg_1[arg_2, arg_3]) -> arg_1[arg_2, arg_2]:\n \"\"\"Will build the actual config for Jinja2, based on SDK config.\n \"\"\"\n arg_4 = arg_0.copy()\n # Manage the classifier stable/beta\n arg_5 = arg_4.pop(\"is_stable\", False)\n if arg_5:\n arg_4[\"classifier\"] = \"Development Status :: 5 - Production/Stable\"\n else:\n arg_4[\"classifier\"] = \"Development Status :: 4 - Beta\"\n # Manage the nspkg\n arg_6 = arg_4[\"package_name\"]\n arg_4[\"package_nspkg\"] = arg_4.pop(\n \"package_nspkg\",\n arg_6[:arg_6.rindex('-')]+\"-nspkg\"\n )\n # ARM?\n arg_4['is_arm'] = arg_4.pop(\"is_arm\", True)\n\n # Do I need msrestazure for this package?\n arg_4['need_msrestazure'] = arg_4.pop(\"need_msrestazure\", True)\n\n # Pre-compute some Jinja variable that are complicated to do inside the templates\n arg_7 = arg_4[\"package_nspkg\"][:-len('-nspkg')].split('-')\n arg_4['nspkg_names'] = [\n \".\".join(arg_7[:i+1])\n for i in range(len(arg_7))\n ]\n arg_4['init_names'] = [\n \"/\".join(arg_7[:i+1])+\"/__init__.py\"\n for i in range(len(arg_7))\n ]\n\n # Return result\n return arg_4"} +{"_id": "doc_1409", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=False, arg_5=True, **arg_6):\n \"\"\"Starts an environment by starting all resources inside the environment.\n This operation can take a while to complete.\n\n :param user_name: The name of the user.\n :type user_name: str\n :param environment_id: The resourceId of the environment\n :type environment_id: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_7 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=True,\n **arg_6\n )\n\n def get_long_running_output(arg_8):\n if arg_4:\n arg_9 = ClientRawResponse(None, arg_8)\n return arg_9\n\n arg_10 = arg_6.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_5 is True: arg_11 = ARMPolling(arg_10, **arg_6)\n elif arg_5 is False: arg_11 = NoPolling()\n else: arg_11 = arg_5\n return LROPoller(arg_0._client, arg_7, get_long_running_output, arg_11)"} +{"_id": "doc_1410", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Create message from response.\n\n response:\n response from Service Bus cloud server.\n service_instance:\n the Service Bus client.\n '''\n arg_2 = arg_0.body\n arg_3 = {}\n arg_4 = None\n arg_5 = None\n arg_6 = None\n\n # gets all information from respheaders.\n for arg_7, arg_8 in arg_0.headers:\n if arg_7.lower() == 'brokerproperties':\n arg_4 = json.loads(arg_8)\n elif arg_7.lower() == 'content-type':\n arg_5 = arg_8\n elif arg_7.lower() == 'location':\n arg_6 = arg_8\n # Exclude common HTTP headers to avoid noise. List\n # is not exhaustive. At worst, custom properties will contains\n # an unexpected content generated by the webserver and not the customer.\n elif arg_7.lower() not in ['transfer-encoding',\n 'server',\n 'date',\n 'strict-transport-security']:\n # Follow the spec:\n # https://docs.microsoft.com/rest/api/servicebus/message-headers-and-properties\n if '\"' in arg_8:\n arg_8 = arg_8[1:-1].replace('\\\\\"', '\"')\n try:\n arg_3[arg_7] = datetime.strptime(\n arg_8, '%a, %d %b %Y %H:%M:%S GMT')\n except ValueError:\n arg_3[arg_7] = arg_8\n elif arg_8.lower() == 'true':\n arg_3[arg_7] = True\n elif arg_8.lower() == 'false':\n arg_3[arg_7] = False\n else: # in theory, only int or float\n try:\n # int('3.1') doesn't work so need to get float('3.14') first\n arg_9 = float(arg_8)\n if str(int(arg_9)) == arg_8:\n arg_3[arg_7] = int(arg_8)\n else:\n arg_3[arg_7] = arg_9\n except ValueError:\n # If we are here, this header does not respect the spec.\n # Could be an unexpected HTTP header or an invalid\n # header value. In both case we ignore without failing.\n pass\n\n if arg_5 is None:\n arg_10 = Message(\n arg_2, arg_1, arg_6, arg_3,\n 'application/atom+xml;type=entry;charset=utf-8', arg_4)\n else:\n arg_10 = Message(arg_2, arg_1, arg_6,\n arg_3, arg_5, arg_4)\n return arg_10"} +{"_id": "doc_1411", "title": "", "text": "def Func(arg_0):\n ''' Converts entry element to rule object.\n\n The format of xml for rule:\n\n\n\n \n MyProperty='XYZ'\n \n \n set MyProperty2 = 'ABC'\n \n\n\n\n '''\n arg_1 = Rule()\n\n arg_2 = arg_0.find('./atom:content/sb:RuleDescription', _etree_sb_feed_namespaces)\n if arg_2 is not None:\n arg_3 = arg_2.find('./sb:Filter', _etree_sb_feed_namespaces)\n if arg_3 is not None:\n arg_1.filter_type = arg_3.attrib.get(\n _make_etree_ns_attr_name(_etree_sb_feed_namespaces['i'], 'type'), None)\n arg_5 = arg_3.find('./sb:SqlExpression', _etree_sb_feed_namespaces)\n if arg_5 is not None:\n arg_1.filter_expression = arg_5.text\n\n arg_7 = arg_2.find('./sb:Action', _etree_sb_feed_namespaces)\n if arg_7 is not None:\n arg_1.action_type = arg_7.attrib.get(\n _make_etree_ns_attr_name(_etree_sb_feed_namespaces['i'], 'type'), None)\n arg_5 = arg_7.find('./sb:SqlExpression', _etree_sb_feed_namespaces)\n if arg_5 is not None:\n arg_1.action_expression = arg_5.text\n\n\n # extract id, updated and name value from feed entry and set them of rule.\n for arg_10, arg_11 in _ETreeXmlToObject.get_entry_properties_from_element(\n arg_0, True, '/rules').items():\n setattr(arg_1, arg_10, arg_11)\n\n return arg_1"} +{"_id": "doc_1412", "title": "", "text": "def Func(arg_0):\n ''' Converts entry element to queue object.\n\n The format of xml response for queue:\n\n 10000\n PT5M\n PT2M\n False\n False\n ...\n\n\n '''\n arg_1 = Queue()\n\n # get node for each attribute in Queue class, if nothing found then the\n # response is not valid xml for Queue.\n arg_2 = True\n\n arg_3 = arg_0.find('./atom:content/sb:QueueDescription', _etree_sb_feed_namespaces)\n if arg_3 is not None:\n arg_4 = [\n ('LockDuration', 'lock_duration', None),\n ('MaxSizeInMegabytes', 'max_size_in_megabytes', int),\n ('RequiresDuplicateDetection', 'requires_duplicate_detection', _parse_bool),\n ('RequiresSession', 'requires_session', _parse_bool),\n ('DefaultMessageTimeToLive', 'default_message_time_to_live', None),\n ('DeadLetteringOnMessageExpiration', 'dead_lettering_on_message_expiration', _parse_bool),\n ('DuplicateDetectionHistoryTimeWindow', 'duplicate_detection_history_time_window', None),\n ('EnableBatchedOperations', 'enable_batched_operations', _parse_bool),\n ('MaxDeliveryCount', 'max_delivery_count', int),\n ('MessageCount', 'message_count', int),\n ('SizeInBytes', 'size_in_bytes', int),\n ]\n\n for arg_5 in arg_4:\n if _read_etree_element(arg_3, arg_5[0], arg_1, arg_5[1], arg_5[2]):\n arg_2 = False\n\n if arg_2:\n raise AzureServiceBusResourceNotFound(_ERROR_QUEUE_NOT_FOUND)\n\n # extract id, updated and name value from feed entry and set them of queue.\n for arg_6, arg_7 in _ETreeXmlToObject.get_entry_properties_from_element(\n arg_0, True).items():\n setattr(arg_1, arg_6, arg_7)\n\n return arg_1"} +{"_id": "doc_1413", "title": "", "text": "def Func(arg_0):\n '''Converts entry element to topic\n\n The xml format for topic:\n\n \n \n P10675199DT2H48M5.4775807S\n 1024\n false\n P7D\n true\n \n \n\n '''\n arg_1 = Topic()\n\n arg_2 = True\n\n arg_3 = arg_0.find('./atom:content/sb:TopicDescription', _etree_sb_feed_namespaces)\n if arg_3 is not None:\n arg_4 = [\n ('DefaultMessageTimeToLive', 'default_message_time_to_live', None),\n ('MaxSizeInMegabytes', 'max_size_in_megabytes', int),\n ('RequiresDuplicateDetection', 'requires_duplicate_detection', _parse_bool),\n ('DuplicateDetectionHistoryTimeWindow', 'duplicate_detection_history_time_window', None),\n ('EnableBatchedOperations', 'enable_batched_operations', _parse_bool),\n ('SizeInBytes', 'size_in_bytes', int),\n ]\n\n for arg_5 in arg_4:\n if _read_etree_element(arg_3, arg_5[0], arg_1, arg_5[1], arg_5[2]):\n arg_2 = False\n\n if arg_2:\n raise AzureServiceBusResourceNotFound(_ERROR_TOPIC_NOT_FOUND)\n\n # extract id, updated and name value from feed entry and set them of topic.\n for arg_6, arg_7 in _ETreeXmlToObject.get_entry_properties_from_element(\n arg_0, True).items():\n setattr(arg_1, arg_6, arg_7)\n\n return arg_1"} +{"_id": "doc_1414", "title": "", "text": "def Func(arg_0):\n '''Converts entry element to subscription\n\n The xml format for subscription:\n\n \n \n PT5M\n false\n P10675199DT2H48M5.4775807S\n false\n true\n \n \n\n '''\n arg_1 = Subscription()\n\n arg_2 = arg_0.find('./atom:content/sb:SubscriptionDescription', _etree_sb_feed_namespaces)\n if arg_2 is not None:\n arg_3 = [\n ('LockDuration', 'lock_duration', None),\n ('RequiresSession', 'requires_session', _parse_bool),\n ('DefaultMessageTimeToLive', 'default_message_time_to_live', None),\n ('DeadLetteringOnFilterEvaluationExceptions', 'dead_lettering_on_filter_evaluation_exceptions', _parse_bool), # pylint: disable=line-too-long\n ('DeadLetteringOnMessageExpiration', 'dead_lettering_on_message_expiration', _parse_bool),\n ('EnableBatchedOperations', 'enable_batched_operations', _parse_bool),\n ('MaxDeliveryCount', 'max_delivery_count', int),\n ('MessageCount', 'message_count', int),\n ]\n\n for arg_4 in arg_3:\n _read_etree_element(arg_2, arg_4[0], arg_1, arg_4[1], arg_4[2])\n\n for arg_5, arg_6 in _ETreeXmlToObject.get_entry_properties_from_element(\n arg_0, True, '/subscriptions').items():\n setattr(arg_1, arg_5, arg_6)\n\n return arg_1"} +{"_id": "doc_1415", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=None, arg_7=None, arg_8=False, **arg_9):\n \"\"\"Creates a new certificate inside the specified account.\n\n :param resource_group_name: The name of the resource group that\n contains the Batch account.\n :type resource_group_name: str\n :param account_name: The name of the Batch account.\n :type account_name: str\n :param certificate_name: The identifier for the certificate. This must\n be made up of algorithm and thumbprint separated by a dash, and must\n match the certificate data in the request. For example SHA1-a3d1c5.\n :type certificate_name: str\n :param parameters: Additional parameters for certificate creation.\n :type parameters:\n ~azure.mgmt.batch.models.CertificateCreateOrUpdateParameters\n :param if_match: The entity state (ETag) version of the certificate to\n update. A value of \"*\" can be used to apply the operation only if the\n certificate already exists. If omitted, this operation will always be\n applied.\n :type if_match: str\n :param if_none_match: Set to '*' to allow a new certificate to be\n Funcd, but to prevent updating an existing certificate. Other values\n will be ignored.\n :type if_none_match: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :return: An instance of AzureOperationPoller that returns Certificate\n or ClientRawResponse if raw=true\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Certificate]\n or ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError`\n \"\"\"\n arg_10 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=True,\n **arg_9\n )\n if arg_8:\n return arg_10\n\n # Construct and send request\n def long_running_send():\n return arg_10.response\n\n def get_long_running_status(arg_11, arg_12=None):\n\n arg_13 = arg_0._client.get(arg_11)\n if arg_12:\n arg_13.headers.update(arg_12)\n arg_14 = {}\n arg_14['x-ms-client-request-id'] = arg_10.response.request.headers['x-ms-client-request-id']\n return arg_0._client.send(\n arg_13, arg_14, stream=False, **arg_9)\n\n def get_long_running_output(arg_15):\n\n if arg_15.status_code not in [200]:\n arg_16 = CloudError(arg_15)\n arg_16.request_id = arg_15.headers.get('x-ms-request-id')\n raise arg_16\n\n arg_18 = {\n 'ETag': 'str',\n }\n arg_19 = arg_0._deserialize('Certificate', arg_15)\n\n if arg_8:\n arg_20 = ClientRawResponse(arg_19, arg_15)\n arg_20.add_headers(arg_18)\n return arg_20\n\n return arg_19\n\n arg_21 = arg_9.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n return AzureOperationPoller(\n long_running_send, get_long_running_output,\n get_long_running_status, arg_21)"} +{"_id": "doc_1416", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=False, **arg_6):\n \"\"\"Deletes the specified certificate.\n\n :param resource_group_name: The name of the resource group that\n contains the Batch account.\n :type resource_group_name: str\n :param account_name: The name of the Batch account.\n :type account_name: str\n :param certificate_name: The identifier for the certificate. This must\n be made up of algorithm and thumbprint separated by a dash, and must\n match the certificate data in the request. For example SHA1-a3d1c5.\n :type certificate_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :return: An instance of AzureOperationPoller that returns None or\n ClientRawResponse if raw=true\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError`\n \"\"\"\n arg_7 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=True,\n **arg_6\n )\n if arg_5:\n return arg_7\n\n # Construct and send request\n def long_running_send():\n return arg_7.response\n\n def get_long_running_status(arg_8, arg_9=None):\n\n arg_10 = arg_0._client.get(arg_8)\n if arg_9:\n arg_10.headers.update(arg_9)\n arg_11 = {}\n arg_11['x-ms-client-request-id'] = arg_7.response.request.headers['x-ms-client-request-id']\n return arg_0._client.send(\n arg_10, arg_11, stream=False, **arg_6)\n\n def get_long_running_output(arg_12):\n\n if arg_12.status_code not in [200, 202, 204]:\n arg_13 = CloudError(arg_12)\n arg_13.request_id = arg_12.headers.get('x-ms-request-id')\n raise arg_13\n\n if arg_5:\n arg_15 = ClientRawResponse(None, arg_12)\n arg_15.add_headers({\n 'Location': 'str',\n 'Retry-After': 'int',\n })\n return arg_15\n\n arg_16 = arg_6.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n return AzureOperationPoller(\n long_running_send, get_long_running_output,\n get_long_running_status, arg_16)"} +{"_id": "doc_1417", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Return a SDK client initialized with a JSON auth dict.\n\n The easiest way to obtain this content is to call the following CLI commands:\n\n .. code:: bash\n\n az ad sp create-for-rbac --sdk-auth\n\n This method will fill automatically the following client parameters:\n - credentials\n - subscription_id\n - base_url\n - tenant_id\n\n Parameters provided in kwargs will override parameters and be passed directly to the client.\n\n :Example:\n\n .. code:: python\n\n from azure.common.client_factory import get_client_from_auth_file\n from azure.mgmt.compute import ComputeManagementClient\n config_dict = {\n \"clientId\": \"ad735158-65ca-11e7-ba4d-ecb1d756380e\",\n \"clientSecret\": \"b70bb224-65ca-11e7-810c-ecb1d756380e\",\n \"subscriptionId\": \"bfc42d3a-65ca-11e7-95cf-ecb1d756380e\",\n \"tenantId\": \"c81da1d8-65ca-11e7-b1d1-ecb1d756380e\",\n \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\n \"resourceManagerEndpointUrl\": \"https://management.azure.com/\",\n \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\",\n \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\",\n \"galleryEndpointUrl\": \"https://gallery.azure.com/\",\n \"managementEndpointUrl\": \"https://management.core.windows.net/\"\n }\n client = Func(ComputeManagementClient, config_dict)\n\n .. versionadded:: 1.1.7\n\n :param client_class: A SDK client class\n :param dict config_dict: A config dict.\n :return: An instantiated client\n \"\"\"\n arg_3 = arg_0.__name__ == 'GraphRbacManagementClient'\n arg_4 = {\n 'subscription_id': arg_1.get('subscriptionId'),\n 'base_url': arg_1.get('resourceManagerEndpointUrl'),\n 'tenant_id': arg_1.get('tenantId') # GraphRbac\n }\n if arg_3:\n arg_4['base_url'] = arg_1['activeDirectoryGraphResourceId']\n\n if 'credentials' not in arg_2:\n # Get the right resource for Credentials\n if arg_3:\n arg_5 = arg_1['activeDirectoryGraphResourceId']\n else:\n if \"activeDirectoryResourceId\" not in arg_1 and 'resourceManagerEndpointUrl' not in arg_1:\n raise ValueError(\"Need activeDirectoryResourceId or resourceManagerEndpointUrl key\")\n arg_5 = arg_1.get('activeDirectoryResourceId', arg_1['resourceManagerEndpointUrl'])\n\n arg_6 = arg_1['activeDirectoryEndpointUrl']\n arg_7 = bool(re.match('.+(/adfs|/adfs/)$', arg_6, re.I))\n if arg_7:\n arg_6 = arg_6.rstrip('/') # workaround: ADAL is known to reject auth urls with trailing /\n else:\n arg_6 = arg_6 + '/' + arg_1['tenantId']\n\n arg_8 = adal.AuthenticationContext(\n arg_6,\n api_version=None,\n validate_authority=not arg_7\n )\n arg_4['credentials'] = AdalAuthentication(\n arg_8.acquire_token_with_client_credentials,\n arg_5,\n arg_1['clientId'],\n arg_1['clientSecret']\n )\n\n arg_4.update(arg_2)\n return _instantiate_client(arg_0, **arg_4)"} +{"_id": "doc_1418", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"resp_body is the XML we received\n resp_type is a string, such as Containers,\n return_type is the type we're constructing, such as ContainerEnumResults\n item_type is the type object of the item to be created, such as Container\n\n This function then returns a ContainerEnumResults object with the\n containers member populated with the results.\n \"\"\"\n\n # parsing something like:\n # \n # \n # \n # \n # \n # \n # \n # \n arg_4 = arg_1()\n arg_5 = ETree.fromstring(arg_0.body)\n\n arg_6 = []\n\n for arg_7 in arg_5.findall(arg_2):\n for arg_8 in arg_7.findall(arg_2[:-1]):\n arg_6.append(_ETreeXmlToObject.fill_instance_element(arg_8, arg_3))\n\n for arg_9, arg_10 in vars(arg_4).items():\n # queues, Queues, this is the list its self which we populated\n # above\n if arg_9 == arg_2.lower():\n # the list its self.\n continue\n arg_10 = _ETreeXmlToObject.fill_data_member(arg_5, arg_9, arg_10)\n if arg_10 is not None:\n setattr(arg_4, arg_9, arg_10)\n\n setattr(arg_4, arg_2.lower(), arg_6)\n return arg_4"} +{"_id": "doc_1419", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n ''' get properties from element tree element '''\n arg_4 = {}\n\n arg_5 = arg_0.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'etag'), None)\n if arg_5 is not None:\n arg_4['etag'] = arg_5\n\n arg_6 = arg_0.findtext('./atom:updated', '', _etree_entity_feed_namespaces)\n if arg_6:\n arg_4['updated'] = arg_6\n\n arg_7 = arg_0.findtext('./atom:author/atom:name', '', _etree_entity_feed_namespaces)\n if arg_7:\n arg_4['author'] = arg_7\n\n if arg_1:\n if arg_3:\n arg_8 = arg_0.findtext('./atom:title', '', _etree_entity_feed_namespaces)\n if arg_8:\n arg_4['name'] = arg_8\n else:\n arg_9 = arg_0.findtext('./atom:id', '', _etree_entity_feed_namespaces)\n if arg_9:\n arg_4['name'] = _get_readable_id(arg_9, arg_2)\n\n return arg_4"} +{"_id": "doc_1420", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a client for a queue entity.\n\n :param queue_name: The name of the queue.\n :type queue_name: str\n :rtype: ~azure.servicebus.servicebus_client.QueueClient\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the queue is not found.\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func_client]\n :end-before: [END Func_client]\n :language: python\n :dedent: 8\n :caption: Get the specific queue client from Service Bus client\n\n \"\"\"\n try:\n arg_2 = arg_0.mgmt_client.Func(arg_1)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except AzureServiceBusResourceNotFound:\n raise ServiceBusResourceNotFound(\"Specificed queue does not exist.\")\n return QueueClient.from_entity(\n arg_0._get_host(), arg_2,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n mgmt_client=arg_0.mgmt_client,\n debug=arg_0.debug)"} +{"_id": "doc_1421", "title": "", "text": "def Func(arg_0):\n \"\"\"Get clients for all queue entities in the namespace.\n\n :rtype: list[~azure.servicebus.servicebus_client.QueueClient]\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func]\n :end-before: [END Func]\n :language: python\n :dedent: 4\n :caption: List the queues from Service Bus client\n\n \"\"\"\n try:\n arg_1 = arg_0.mgmt_client.Func()\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n arg_2 = []\n for arg_3 in arg_1:\n arg_2.append(QueueClient.from_entity(\n arg_0._get_host(), arg_3,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n mgmt_client=arg_0.mgmt_client,\n debug=arg_0.debug))\n return arg_2"} +{"_id": "doc_1422", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a client for a topic entity.\n\n :param topic_name: The name of the topic.\n :type topic_name: str\n :rtype: ~azure.servicebus.servicebus_client.TopicClient\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found.\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func_client]\n :end-before: [END Func_client]\n :language: python\n :dedent: 8\n :caption: Get the specific topic client from Service Bus client\n\n \"\"\"\n try:\n arg_2 = arg_0.mgmt_client.Func(arg_1)\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n except AzureServiceBusResourceNotFound:\n raise ServiceBusResourceNotFound(\"Specificed topic does not exist.\")\n return TopicClient.from_entity(\n arg_0._get_host(), arg_2,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n debug=arg_0.debug)"} +{"_id": "doc_1423", "title": "", "text": "def Func(arg_0):\n \"\"\"Get a client for all topic entities in the namespace.\n\n :rtype: list[~azure.servicebus.servicebus_client.TopicClient]\n :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func]\n :end-before: [END Func]\n :language: python\n :dedent: 4\n :caption: List the topics from Service Bus client\n\n \"\"\"\n try:\n arg_1 = arg_0.mgmt_client.Func()\n except requests.exceptions.ConnectionError as e:\n raise ServiceBusConnectionError(\"Namespace: {} not found\".format(arg_0.service_namespace), e)\n arg_2 = []\n for arg_3 in arg_1:\n arg_2.append(TopicClient.from_entity(\n arg_0._get_host(), arg_3,\n shared_access_key_name=arg_0.shared_access_key_name,\n shared_access_key_value=arg_0.shared_access_key_value,\n debug=arg_0.debug))\n return arg_2"} +{"_id": "doc_1424", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.PeekLock, **arg_5):\n \"\"\"Receive messages by sequence number that have been previously deferred.\n\n When receiving deferred messages from a partitioned entity, all of the supplied\n sequence numbers must be messages from the same partition.\n\n :param sequence_numbers: A list of the sequence numbers of messages that have been\n deferred.\n :type sequence_numbers: list[int]\n :param mode: The mode with which messages will be retrieved from the entity. The two options\n are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given\n lock period before they will be removed from the queue. Messages received with ReceiveAndDelete\n will be immediately removed from the queue, and cannot be subsequently rejected or re-received if\n the client fails to process the message. The default mode is PeekLock.\n :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode\n :rtype: list[~azure.servicebus.common.message.Message]\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func_service_bus]\n :end-before: [END Func_service_bus]\n :language: python\n :dedent: 8\n :caption: Get the messages which were deferred using their sequence numbers\n\n \"\"\"\n if (arg_0.entity and arg_0.requires_session) or arg_5.get('session'):\n raise ValueError(\"Sessionful deferred messages can only be received within a locked receive session.\")\n if not arg_1:\n raise ValueError(\"At least one sequence number must be specified.\")\n try:\n arg_6 = arg_2.value.value\n except AttributeError:\n arg_6 = int(arg_2)\n arg_7 = {\n 'sequence-numbers': types.AMQPArray([types.AMQPLong(s) for s in arg_1]),\n 'receiver-settle-mode': types.AMQPuInt(arg_6)}\n arg_8 = functools.partial(mgmt_handlers.deferred_message_op, arg_2=arg_6)\n with BaseHandler(arg_0.entity_uri, arg_0.auth_config, debug=arg_0.debug, **arg_5) as handler:\n return handler._mgmt_request_response( # pylint: disable=protected-access\n REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,\n arg_7,\n arg_8)"} +{"_id": "doc_1425", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Settle messages that have been previously deferred.\n\n :param settlement: How the messages are to be settled. This must be a string\n of one of the following values: 'completed', 'suspended', 'abandoned'.\n :type settlement: str\n :param messages: A list of deferred messages to be settled.\n :type messages: list[~azure.servicebus.common.message.DeferredMessage]\n\n Example:\n .. literalinclude:: ../examples/test_examples.py\n :start-after: [START Func_service_bus]\n :end-before: [END Func_service_bus]\n :language: python\n :dedent: 8\n :caption: Settle deferred messages.\n\n \"\"\"\n if (arg_0.entity and arg_0.requires_session) or arg_3.get('session'):\n raise ValueError(\"Sessionful deferred messages can only be settled within a locked receive session.\")\n if arg_1.lower() not in ['completed', 'suspended', 'abandoned']:\n raise ValueError(\"Settlement must be one of: 'completed', 'suspended', 'abandoned'\")\n if not arg_2:\n raise ValueError(\"At least one message must be specified.\")\n arg_4 = {\n 'disposition-status': arg_1.lower(),\n 'lock-tokens': types.AMQPArray([m.lock_token for m in arg_2])}\n\n with BaseHandler(arg_0.entity_uri, arg_0.auth_config, debug=arg_0.debug, **arg_3) as handler:\n return handler._mgmt_request_response( # pylint: disable=protected-access\n REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION,\n arg_4,\n mgmt_handlers.default)"} +{"_id": "doc_1426", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=False, arg_4=False):\n '''\n Delete a website.\n\n webspace_name:\n The name of the webspace.\n website_name:\n The name of the website.\n delete_empty_server_farm:\n If the site being deleted is the last web site in a server farm,\n you can delete the server farm by setting this to True.\n delete_metrics:\n To also delete the metrics for the site that you are deleting, you\n can set this to True.\n '''\n arg_5 = arg_0._get_sites_details_path(arg_1, arg_2)\n arg_6 = ''\n if arg_3:\n arg_6 += '&deleteEmptyServerFarm=true'\n if arg_4:\n arg_6 += '&deleteMetrics=true'\n if arg_6:\n arg_5 = arg_5 + '?' + arg_6.lstrip('&')\n return arg_0._perform_delete(arg_5)"} +{"_id": "doc_1427", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n '''\n Update a web site.\n\n webspace_name:\n The name of the webspace.\n website_name:\n The name of the website.\n state:\n The wanted state ('Running' or 'Stopped' accepted)\n '''\n arg_4 = _XmlSerializer.update_website_to_xml(arg_3)\n return arg_0._perform_put(\n arg_0._get_sites_details_path(arg_1, arg_2),\n arg_4, as_async=True)"} +{"_id": "doc_1428", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Restart a web site.\n\n webspace_name:\n The name of the webspace.\n website_name:\n The name of the website.\n '''\n return arg_0._perform_post(\n arg_0._get_restart_path(arg_1, arg_2),\n None, as_async=True)"} +{"_id": "doc_1429", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=False, arg_7=True, **arg_8):\n \"\"\"Updates the policies for the specified container registry.\n\n :param resource_group_name: The name of the resource group to which\n the container registry belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param quarantine_policy: An object that represents quarantine policy\n for a container registry.\n :type quarantine_policy:\n ~azure.mgmt.containerregistry.v2018_02_01_preview.models.QuarantinePolicy\n :param trust_policy: An object that represents content trust policy\n for a container registry.\n :type trust_policy:\n ~azure.mgmt.containerregistry.v2018_02_01_preview.models.TrustPolicy\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns RegistryPolicies or\n ClientRawResponse if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.containerregistry.v2018_02_01_preview.models.RegistryPolicies]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.containerregistry.v2018_02_01_preview.models.RegistryPolicies]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_9 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=True,\n **arg_8\n )\n\n def get_long_running_output(arg_10):\n arg_11 = arg_0._deserialize('RegistryPolicies', arg_10)\n\n if arg_6:\n arg_12 = ClientRawResponse(arg_11, arg_10)\n return arg_12\n\n return arg_11\n\n arg_13 = arg_8.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_7 is True: arg_14 = ARMPolling(arg_13, **arg_8)\n elif arg_7 is False: arg_14 = NoPolling()\n else: arg_14 = arg_7\n return LROPoller(arg_0._client, arg_9, get_long_running_output, arg_14)"} +{"_id": "doc_1430", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=False, arg_6=True, **arg_7):\n \"\"\"Completes the restore operation on a managed database.\n\n :param location_name: The name of the region where the resource is\n located.\n :type location_name: str\n :param operation_id: Management operation id that this request tries\n to complete.\n :type operation_id: str\n :param last_backup_name: The last backup name to apply\n :type last_backup_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError`\n \"\"\"\n arg_8 = arg_0._Func_initial(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=True,\n **arg_7\n )\n\n def get_long_running_output(arg_9):\n if arg_5:\n arg_10 = ClientRawResponse(None, arg_9)\n return arg_10\n\n arg_11 = arg_7.get(\n 'long_running_operation_timeout',\n arg_0.config.long_running_operation_timeout)\n if arg_6 is True: arg_12 = ARMPolling(arg_11, **arg_7)\n elif arg_6 is False: arg_12 = NoPolling()\n else: arg_12 = arg_6\n return LROPoller(arg_0._client, arg_8, get_long_running_output, arg_12)"} +{"_id": "doc_1431", "title": "", "text": "async def Func(arg_0, *arg_1):\n \"\"\"Cancel one or more messages that have previsouly been scheduled and are still pending.\n\n :param sequence_numbers: The seqeuence numbers of the scheduled messages.\n :type sequence_numbers: int\n\n Example:\n .. literalinclude:: ../examples/async_examples/test_examples_async.py\n :start-after: [START cancel_schedule_messages]\n :end-before: [END cancel_schedule_messages]\n :language: python\n :dedent: 4\n :caption: Schedule messages.\n\n \"\"\"\n if not arg_0.running:\n await arg_0.open()\n arg_2 = [types.AMQPLong(s) for s in arg_1]\n arg_3 = {'sequence-numbers': types.AMQPArray(arg_2)}\n return await arg_0._mgmt_request_response(\n REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION,\n arg_3,\n mgmt_handlers.default)"} +{"_id": "doc_1432", "title": "", "text": "async def Func(arg_0):\n \"\"\"Reconnect the handler.\n\n If the handler was disconnected from the service with\n a retryable error - attempt to Func.\n This method will be called automatically for most retryable errors.\n Also attempts to re-queue any messages that were pending before the Func.\n \"\"\"\n arg_1 = arg_0._handler.pending_messages\n await super(Sender, arg_0).Func()\n try:\n arg_0._handler.queue_message(*arg_1)\n await arg_0._handler.wait_async()\n except Exception as e: # pylint: disable=broad-except\n await arg_0._handle_exception(e)"} +{"_id": "doc_1433", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the width of the string it would be when displayed.\"\"\"\n if isinstance(arg_0, bytes):\n arg_0 = arg_0.decode(\"utf8\", \"ignore\")\n return sum(map(get_width, map(ord, arg_0)))"} +{"_id": "doc_1434", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Drops Characters by unicode not by bytes.\"\"\"\n arg_2 = isinstance(arg_0, bytes)\n if arg_2:\n arg_0 = arg_0.decode(\"utf8\", \"ignore\")\n for arg_3 in range(len(arg_0)):\n if terminal_width(arg_0[arg_3:]) <= arg_1:\n break\n return arg_0[arg_3:].encode(\"utf8\", \"ignore\") if arg_2 else arg_0[arg_3:]"} +{"_id": "doc_1435", "title": "", "text": "def Func(arg_0):\n \"\"\"Clears out the previous line and prints a new one.\"\"\"\n arg_1 = get_terminal_size().columns\n arg_2 = arg_1 - terminal_width(arg_0)\n\n # On windows we need one less space or we overflow the line for some reason.\n if is_win32:\n arg_2 -= 1\n\n sys.stderr.write(\"\\r{0}\".format(arg_0))\n sys.stderr.write(\" \" * max(0, arg_2))\n sys.stderr.flush()"} +{"_id": "doc_1436", "title": "", "text": "def Func(**arg_0):\n \"\"\"Creates a status line with appropriate size.\"\"\"\n arg_1 = get_terminal_size().columns - 1\n\n for arg_2 in PROGRESS_FORMATS:\n arg_3 = arg_2.format(**arg_0)\n\n if len(arg_3) <= arg_1:\n break\n\n return arg_3"} +{"_id": "doc_1437", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Segments are yielded when they are available\n\n Segments appear on a time line, for dynamic content they are only available at a certain time\n and sometimes for a limited time. For static content they are all available at the same time.\n\n :param kwargs: extra args to pass to the segment template\n :return: yields Segments\n \"\"\"\n\n arg_2 = arg_0.segmentBase or arg_0.walk_back_get_attr(\"segmentBase\")\n arg_3 = arg_0.segmentList or arg_0.walk_back_get_attr(\"segmentList\")\n arg_4 = arg_0.segmentTemplate or arg_0.walk_back_get_attr(\"segmentTemplate\")\n\n if arg_4:\n for arg_5 in arg_4.Func(RepresentationID=arg_0.id,\n Bandwidth=int(arg_0.bandwidth * 1000),\n **arg_1):\n if arg_5.init:\n yield arg_5\n else:\n yield arg_5\n elif arg_3:\n for arg_6 in arg_3:\n for arg_5 in arg_6.Func:\n yield arg_5\n else:\n yield Segment(arg_0.base_url, 0, True, True)"} +{"_id": "doc_1438", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Puts a value into a Func but aborts if this thread is closed.\"\"\"\n while not arg_0.closed:\n try:\n arg_1.put(arg_2, block=True, timeout=1)\n return\n except Func.Full:\n continue"} +{"_id": "doc_1439", "title": "", "text": "def Func(arg_0, arg_1=\"XML\", arg_2=False, arg_3=arg_4, arg_5=None, arg_6=False):\n \"\"\"Wrapper around ElementTree.fromstring with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if is_py2 and isinstance(arg_0, unicode):\n arg_0 = arg_0.encode(\"utf8\")\n elif is_py3 and isinstance(arg_0, str):\n arg_0 = bytearray(arg_0, \"utf8\")\n\n if arg_2:\n arg_0 = re.sub(br\"[\\t ]xmlns=\\\"(.+?)\\\"\", b\"\", arg_0)\n\n if arg_6:\n arg_0 = re.sub(br'&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)', b'&', arg_0)\n\n try:\n arg_7 = ET.fromstring(arg_0)\n except Exception as err:\n arg_8 = repr(arg_0)\n if len(arg_8) > 35:\n arg_8 = arg_8[:35] + \" ...\"\n\n raise arg_3(\"Unable to parse {0}: {1} ({2})\".format(arg_1, err, arg_8))\n\n if arg_5:\n arg_7 = arg_5.validate(arg_7, arg_1=arg_1, arg_3=arg_3)\n\n return arg_7"} +{"_id": "doc_1440", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Search for a key in a nested dict, or list of nested dicts, and return the values.\n\n :param data: dict/list to search\n :param key: key to find\n :return: matches for key\n \"\"\"\n if isinstance(arg_0, dict):\n for arg_2, arg_3 in arg_0.items():\n if arg_2 == arg_1:\n yield arg_3\n for arg_4 in Func(arg_3, arg_1):\n yield arg_4\n elif isinstance(arg_0, list):\n for arg_3 in arg_0:\n for arg_4 in Func(arg_3, arg_1):\n yield arg_4"} +{"_id": "doc_1441", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=\"-\", arg_6=\"--\"):\n \"\"\"\n Spawn the process defined in `cmd`\n\n parameters is converted to options the short and long option prefixes\n if a list is given as the value, the parameter is repeated with each\n value\n\n If timeout is set the Func will block until the process returns or\n the timeout expires.\n\n :param parameters: optional parameters\n :param arguments: positional arguments\n :param stderr: where to redirect stderr to\n :param timeout: timeout for short lived process\n :param long_option_prefix: option prefix, default -\n :param short_option_prefix: long option prefix, default --\n :return: Funced process\n \"\"\"\n arg_3 = arg_3 or arg_0.stderr\n arg_7 = arg_0.bake(arg_0._check_cmd(), arg_1, arg_2, arg_5, arg_6)\n log.debug(\"Spawning command: {0}\", subprocess.list2cmdline(arg_7))\n\n try:\n arg_8 = subprocess.Popen(arg_7, arg_3=arg_3, stdout=subprocess.PIPE)\n except (OSError, IOError) as err:\n raise StreamError(\"Failed to start process: {0} ({1})\".format(arg_0._check_cmd(), str(err)))\n\n if arg_4:\n arg_9 = 0\n while arg_9 < arg_4 and not arg_8.poll():\n time.sleep(0.25)\n arg_9 += 0.25\n\n # kill after the timeout has expired and the process still hasn't ended\n if not arg_8.poll():\n try:\n log.debug(\"Process timeout expired ({0}s), killing process\".format(arg_4))\n arg_8.kill()\n except Exception:\n pass\n\n arg_8.wait()\n\n return arg_8"} +{"_id": "doc_1442", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when\n standards compliance is not required. Will find tags that are commented out, or inside script tag etc.\n\n :param html: HTML page\n :param tag: tag name to find\n :return: generator with Tags\n \"\"\"\n for arg_2 in tag_re.finditer(arg_0):\n if arg_2.group(\"tag\") == arg_1:\n arg_3 = dict((a.group(\"key\").lower(), a.group(\"value\")) for a in attr_re.finditer(arg_2.group(\"attr\")))\n yield Tag(arg_2.group(\"tag\"), arg_3, arg_2.group(\"inner\"))"} +{"_id": "doc_1443", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Attempt to parse a DASH manifest file and return its streams\n\n :param session: Streamlink session instance\n :param url_or_manifest: URL of the manifest file or an XML manifest string\n :return: a dict of name -> DASHStream instances\n \"\"\"\n arg_4 = {}\n\n if arg_2.startswith(' 1:\n arg_11 = list(filter(lambda a: a.lang is None or a.lang == arg_16, arg_11))\n\n for arg_19, arg_18 in itertools.product(arg_10, arg_11):\n arg_20 = DASHStream(arg_1, arg_5, arg_19, arg_18, **arg_3)\n arg_21 = []\n\n if arg_19:\n arg_21.append(\"{:0.0f}{}\".format(arg_19.height or arg_19.bandwidth_rounded, \"p\" if arg_19.height else \"k\"))\n if arg_11 and len(arg_11) > 1:\n arg_21.append(\"a{:0.0f}k\".format(arg_18.bandwidth))\n arg_4['+'.join(arg_21)] = arg_20\n return arg_4"} +{"_id": "doc_1444", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Determine which Unicode encoding the JSON text sample is encoded with\n\n RFC4627 (http://www.ietf.org/rfc/rfc4627.txt) suggests that the encoding of JSON text can be determined\n by checking the pattern of NULL bytes in first 4 octets of the text.\n :param sample: a sample of at least 4 bytes of the JSON text\n :return: the most likely encoding of the JSON text\n \"\"\"\n arg_2 = [i for i, j in enumerate(bytearray(arg_1[:4])) if j == 0]\n if arg_2 == [0, 1, 2]:\n return \"UTF-32BE\"\n elif arg_2 == [0, 2]:\n return \"UTF-16BE\"\n elif arg_2 == [1, 2, 3]:\n return \"UTF-32LE\"\n elif arg_2 == [1, 3]:\n return \"UTF-16LE\"\n else:\n return \"UTF-8\""} +{"_id": "doc_1445", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Parses JSON from a response.\"\"\"\n # if an encoding is already set then use the provided encoding\n if arg_1.encoding is None:\n arg_1.encoding = arg_0.determine_Func_encoding(arg_1.content[:4])\n return parse_Func(arg_1.text, *arg_2, **arg_3)"} +{"_id": "doc_1446", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Parses XML from a response.\"\"\"\n return parse_Func(arg_1.text, *arg_2, **arg_3)"} +{"_id": "doc_1447", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Parses a semi-colon delimited list of query parameters.\n\n Example: foo=bar;baz=qux\n \"\"\"\n for arg_3, arg_4 in _parse_keyvalue_list(arg_1):\n arg_0.params[arg_3] = arg_4"} +{"_id": "doc_1448", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the message for this LogRecord.\n\n Return the message for this LogRecord after merging any user-supplied\n arguments with the message.\n \"\"\"\n arg_1 = arg_0.msg\n if arg_0.args:\n arg_1 = arg_1.format(*arg_0.args)\n return maybe_encode(arg_1)"} +{"_id": "doc_1449", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Attempt a Func to LiveEdu.tv\n \"\"\"\n arg_1 = arg_0.get_option(\"email\")\n arg_2 = arg_0.get_option(\"password\")\n\n if arg_1 and arg_2:\n arg_3 = arg_0.session.http.get(arg_0.Func_url)\n arg_4 = arg_0.csrf_re.search(arg_3.text)\n arg_5 = arg_4 and arg_4.group(1)\n arg_0.logger.debug(\"Attempting Func as {0} (token={1})\", arg_1, arg_5)\n\n arg_3 = arg_0.session.http.post(arg_0.Func_url,\n data=dict(Func=arg_1, arg_2=arg_2, csrfmiddlewaretoken=arg_5),\n allow_redirects=False,\n raise_for_status=False,\n headers={\"Referer\": arg_0.Func_url})\n\n if arg_3.status_code != 302:\n arg_0.logger.error(\"Failed to Func to LiveEdu account: {0}\", arg_1)"} +{"_id": "doc_1450", "title": "", "text": "def Func(arg_0):\n \"\"\"Loads a plugin from the same directory as the calling plugin.\n\n The path used is extracted from the last call in module scope,\n therefore this must be called only from module level in the\n originating plugin or the correct plugin path will not be found.\n\n \"\"\"\n\n # Get the path of the caller module\n arg_1 = list(filter(lambda f: f[3] == \"\", inspect.stack()))\n arg_2 = arg_1[0]\n arg_3 = os.path.dirname(arg_2[1])\n\n # Major hack. If we are frozen by bbfreeze the stack trace will\n # contain relative paths. We therefore use the __file__ variable\n # in this module to correct it.\n if not os.path.isabs(arg_3):\n arg_4 = os.path.normpath(__file__ + \"../../../../../\")\n arg_3 = os.path.join(arg_4, arg_3)\n\n return load_module(arg_0, arg_3)"} +{"_id": "doc_1451", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Update or remove keys from a query string in a URL\n\n :param url: URL to update\n :param qsd: dict of keys to update, a None value leaves it unchanged\n :param remove: list of keys to remove, or \"*\" to remove all\n note: updated keys are never removed, even if unchanged\n :return: updated URL\n \"\"\"\n arg_1 = arg_1 or {}\n arg_2 = arg_2 or []\n\n # parse current query string\n arg_3 = urlparse(arg_0)\n arg_4 = OrderedDict(parse_qsl(arg_3.query))\n\n # * removes all possible keys\n if arg_2 == \"*\":\n arg_2 = list(arg_4.keys())\n\n # remove keys before updating, but leave updated keys untouched\n for arg_5 in arg_2:\n if arg_5 not in arg_1:\n del arg_4[arg_5]\n\n # and update the query string\n for arg_5, arg_6 in arg_1.items():\n if arg_6:\n arg_4[arg_5] = arg_6\n\n return arg_3._replace(query=urlencode(arg_4)).geturl()"} +{"_id": "doc_1452", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Find all the arguments required by name\n\n :param name: name of the argument the find the dependencies\n\n :return: list of dependant arguments\n \"\"\"\n arg_2 = set([arg_1])\n arg_3 = arg_0.get(arg_1)\n for arg_4 in arg_3.Func:\n arg_5 = arg_0.get(arg_4)\n if not arg_5:\n raise KeyError(\"{0} is not a valid argument for this plugin\".format(arg_4))\n\n if arg_5.name in arg_2:\n raise RuntimeError(\"cycle detected in plugin argument config\")\n arg_2.add(arg_5.name)\n yield arg_5\n\n for arg_6 in arg_0.Func(arg_5.name):\n if arg_6.name in arg_2:\n raise RuntimeError(\"cycle detected in plugin argument config\")\n arg_2.add(arg_6.name)\n yield arg_6"} +{"_id": "doc_1453", "title": "", "text": "def Func(arg_0):\n \"\"\"Decides where to write the stream.\n\n Depending on arguments it can be one of these:\n - The stdout pipe\n - A subprocess' stdin pipe\n - A named pipe that the subprocess reads from\n - A regular file\n\n \"\"\"\n\n if (args.output or args.stdout) and (args.record or args.record_and_pipe):\n console.exit(\"Cannot use record options with other file output options.\")\n\n if args.output:\n if args.output == \"-\":\n arg_1 = FileOutput(fd=stdout)\n else:\n arg_1 = check_file_output(args.output, args.force)\n elif args.stdout:\n arg_1 = FileOutput(fd=stdout)\n elif args.record_and_pipe:\n arg_2 = check_file_output(args.record_and_pipe, args.force)\n arg_1 = FileOutput(fd=stdout, arg_2=arg_2)\n else:\n arg_3 = arg_5 = arg_2 = None\n\n if not args.player:\n console.exit(\"The default player (VLC) does not seem to be \"\n \"installed. You must specify the path to a player \"\n \"executable with --player.\")\n\n if args.player_fifo:\n arg_4 = \"streamlinkpipe-{0}\".format(os.getpid())\n log.info(\"Creating pipe {0}\", arg_4)\n\n try:\n arg_5 = NamedPipe(arg_4)\n except IOError as err:\n console.exit(\"Failed to create pipe: {0}\", err)\n elif args.player_http:\n arg_3 = create_http_server()\n\n arg_6 = create_title(arg_0)\n\n if args.record:\n arg_2 = check_file_output(args.record, args.force)\n\n log.info(\"Starting player: {0}\", args.player)\n\n arg_1 = PlayerOutput(args.player, args=args.player_args,\n quiet=not args.verbose_player,\n kill=not args.player_no_close,\n arg_5=arg_5, arg_3=arg_3,\n arg_2=arg_2, arg_6=arg_6)\n\n return arg_1"} +{"_id": "doc_1454", "title": "", "text": "def Func(arg_0=None, arg_1=0):\n \"\"\"Creates a HTTP server listening on a given host and port.\n\n If host is empty, listen on all available interfaces, and if port is 0,\n listen on a random high port.\n \"\"\"\n\n try:\n arg_2 = HTTPServer()\n arg_2.bind(arg_0=arg_0, arg_1=arg_1)\n except OSError as err:\n console.exit(\"Failed to create HTTP server: {0}\", err)\n\n return arg_2"} +{"_id": "doc_1455", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Repeatedly accept HTTP connections on a server.\n\n Forever if the serving externally, or while a player is running if it is not\n empty.\n \"\"\"\n\n while not arg_1 or arg_1.running:\n try:\n yield arg_0.open(timeout=2.5)\n except OSError:\n continue"} +{"_id": "doc_1456", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=0):\n \"\"\"Continuously output the stream over HTTP.\"\"\"\n global output\n\n if not arg_2:\n if not args.player:\n console.exit(\"The default player (VLC) does not seem to be \"\n \"installed. You must specify the path to a player \"\n \"executable with --player.\")\n\n arg_4 = create_title(arg_0)\n arg_5 = create_http_server()\n arg_6 = output = PlayerOutput(args.player, args=args.player_args,\n filename=arg_5.url,\n quiet=not args.verbose_player,\n arg_4=arg_4)\n\n try:\n log.info(\"Starting player: {0}\", args.player)\n if arg_6:\n arg_6.open()\n except OSError as err:\n console.exit(\"Failed to start player: {0} ({1})\",\n args.player, err)\n else:\n arg_5 = create_http_server(host=None, arg_3=arg_3)\n arg_6 = None\n\n log.info(\"Starting server, access with one of:\")\n for arg_7 in arg_5.urls:\n log.info(\" \" + arg_7)\n\n for arg_8 in iter_http_requests(arg_5, arg_6):\n arg_9 = arg_8.headers.get(\"User-Agent\") or \"unknown player\"\n log.info(\"Got HTTP request from {0}\".format(arg_9))\n\n arg_10 = arg_13 = None\n while not arg_10 and (not arg_6 or arg_6.running):\n try:\n arg_11 = arg_1 or fetch_streams(arg_0)\n arg_1 = None\n\n for arg_12 in (resolve_stream_name(arg_11, s) for s in args.stream):\n if arg_12 in arg_11:\n stream = arg_11[arg_12]\n break\n else:\n log.info(\"Stream not available, will re-fetch \"\n \"streams in 10 sec\")\n sleep(10)\n continue\n except PluginError as err:\n log.error(u\"Unable to fetch new streams: {0}\", err)\n continue\n\n try:\n log.info(\"Opening stream: {0} ({1})\", arg_12,\n type(stream).shortname())\n arg_10, arg_13 = open_stream(stream)\n except StreamError as err:\n log.error(\"{0}\", err)\n\n if arg_10 and arg_13:\n log.debug(\"Writing stream to player\")\n read_stream(arg_10, arg_5, arg_13)\n\n arg_5.close(True)\n\n arg_6.close()\n arg_5.close()"} +{"_id": "doc_1457", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Prepares a filename to be passed to the player.\"\"\"\n global arg_4\n\n arg_2 = create_title(arg_0)\n arg_3 = '\"{0}\"'.format(stream_to_url(arg_1))\n arg_4 = PlayerOutput(args.player, args=args.player_args,\n arg_3=arg_3, call=True,\n quiet=not args.verbose_player,\n arg_2=arg_2)\n\n try:\n log.info(\"Starting player: {0}\", args.player)\n arg_4.open()\n except OSError as err:\n console.exit(\"Failed to start player: {0} ({1})\", args.player, err)\n return False\n\n return True"} +{"_id": "doc_1458", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=8192):\n \"\"\"Reads data from stream and then writes it to the output.\"\"\"\n arg_4 = isinstance(arg_1, PlayerOutput)\n arg_5 = isinstance(arg_1, HTTPServer)\n arg_6 = arg_4 and arg_1.namedpipe\n arg_7 = isinstance(arg_1, FileOutput) and arg_1.fd is not stdout and sys.stdout.isatty()\n arg_8 = hasattr(arg_1, \"record\") and isinstance(arg_1.record, FileOutput) and arg_1.record.fd is not stdout and sys.stdout.isatty()\n\n arg_9 = chain(\n [arg_2],\n iter(partial(arg_0.read, arg_3), b\"\")\n )\n if arg_7:\n arg_9 = progress(arg_9,\n prefix=os.path.basename(args.output))\n elif arg_8:\n arg_9 = progress(arg_9,\n prefix=os.path.basename(args.record))\n\n try:\n for arg_10 in arg_9:\n # We need to check if the player process still exists when\n # using named pipes on Windows since the named pipe is not\n # automatically closed by the player.\n if is_win32 and arg_6:\n arg_1.player.poll()\n\n if arg_1.player.returncode is not None:\n log.info(\"Player closed\")\n break\n\n try:\n arg_1.write(arg_10)\n except IOError as err:\n if arg_4 and err.errno in ACCEPTABLE_ERRNO:\n log.info(\"Player closed\")\n elif arg_5 and err.errno in ACCEPTABLE_ERRNO:\n log.info(\"HTTP connection closed\")\n else:\n console.exit(\"Error when writing to output: {0}, exiting\", err)\n\n break\n except IOError as err:\n console.exit(\"Error when reading from stream: {0}, exiting\", err)\n finally:\n arg_0.close()\n log.info(\"Stream ended\")"} +{"_id": "doc_1459", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decides what to do with the selected stream.\n\n Depending on arguments it can be one of these:\n - Output internal command-line\n - Output JSON represenation\n - Continuously output the stream over HTTP\n - Output stream data to selected output\n\n \"\"\"\n\n arg_2 = resolve_stream_name(arg_1, arg_2)\n arg_3 = arg_1[arg_2]\n\n # Print internal command-line if this stream\n # uses a subprocess.\n if args.subprocess_cmdline:\n if isinstance(arg_3, StreamProcess):\n try:\n arg_4 = arg_3.cmdline()\n except StreamError as err:\n console.exit(\"{0}\", err)\n\n console.msg(\"{0}\", arg_4)\n else:\n console.exit(\"The stream specified cannot be translated to a command\")\n\n # Print JSON representation of the stream\n elif console.json:\n console.msg_json(arg_3)\n\n elif args.stream_url:\n try:\n console.msg(\"{0}\", arg_3.to_url())\n except TypeError:\n console.exit(\"The stream specified cannot be translated to a URL\")\n\n # Output the stream\n else:\n # Find any streams with a '_alt' suffix and attempt\n # to use these in case the main stream is not usable.\n arg_5 = list(filter(lambda k: arg_2 + \"_alt\" in k,\n sorted(arg_1.keys())))\n arg_6 = args.output or args.stdout\n\n for arg_2 in [arg_2] + arg_5:\n arg_3 = arg_1[arg_2]\n arg_7 = type(arg_3).shortname()\n\n if arg_7 in args.player_passthrough and not arg_6:\n log.info(\"Opening stream: {0} ({1})\", arg_2,\n arg_7)\n arg_8 = output_stream_passthrough(arg_0, arg_3)\n elif args.player_external_http:\n return output_stream_http(arg_0, arg_1, external=True,\n port=args.player_external_http_port)\n elif args.player_continuous_http and not arg_6:\n return output_stream_http(arg_0, arg_1)\n else:\n log.info(\"Opening stream: {0} ({1})\", arg_2,\n arg_7)\n\n arg_8 = output_stream(arg_0, arg_3)\n\n if arg_8:\n break"} +{"_id": "doc_1460", "title": "", "text": "def Func(arg_0):\n \"\"\"Fetches streams using correct parameters.\"\"\"\n\n return arg_0.streams(stream_types=args.stream_types,\n sorting_excludes=args.stream_sorting_excludes)"} +{"_id": "doc_1461", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the real stream name of a synonym.\"\"\"\n\n if arg_1 in STREAM_SYNONYMS and arg_1 in arg_0:\n for arg_2, arg_3 in arg_0.items():\n if arg_3 is arg_0[arg_1] and arg_2 not in STREAM_SYNONYMS:\n return arg_2\n\n return arg_1"} +{"_id": "doc_1462", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Formats a dict of streams.\n\n Filters out synonyms and displays them next to\n the stream they point to.\n\n Streams are sorted according to their quality\n (based on plugin.stream_weight).\n\n \"\"\"\n\n arg_2 = \", \"\n arg_3 = []\n\n for arg_4, arg_5 in sorted(arg_1.items(),\n key=lambda arg_5: arg_0.stream_weight(arg_5[0])):\n if arg_4 in STREAM_SYNONYMS:\n continue\n\n def synonymfilter(arg_6):\n return arg_5 is arg_1[arg_6] and arg_6 is not arg_4\n\n arg_7 = list(filter(synonymfilter, arg_1.keys()))\n\n if len(arg_7) > 0:\n arg_8 = arg_2.join(arg_7)\n arg_4 = \"{0} ({1})\".format(arg_4, arg_8)\n\n arg_3.append(arg_4)\n\n return arg_2.join(arg_3)"} +{"_id": "doc_1463", "title": "", "text": "def Func():\n \"\"\"The URL handler.\n\n Attempts to resolve the URL to a plugin and then attempts\n to fetch a list of available streams.\n\n Proceeds to handle stream if user specified a valid one,\n otherwise output list of valid streams.\n\n \"\"\"\n\n try:\n arg_0 = streamlink.resolve_url(arg_7.url)\n setup_plugin_options(streamlink, arg_0)\n log.info(\"Found matching plugin {0} for URL {1}\",\n arg_0.module, arg_7.url)\n\n arg_1 = []\n for arg_2 in arg_0.arguments:\n arg_3 = arg_0.get_option(arg_2.dest)\n if arg_3:\n arg_1.append((arg_2, arg_3))\n\n if arg_1:\n log.debug(\"Plugin specific arguments:\")\n for arg_2, arg_3 in arg_1:\n log.debug(\" {0}={1} ({2})\".format(arg_2.argument_name(arg_0.module),\n arg_3 if not arg_2.sensitive else (\"*\" * 8),\n arg_2.dest))\n\n if arg_7.retry_max or arg_7.retry_streams:\n arg_4 = 1\n arg_5 = 0\n if arg_7.retry_streams:\n arg_4 = arg_7.retry_streams\n if arg_7.retry_max:\n arg_5 = arg_7.retry_max\n arg_6 = fetch_streams_with_retry(arg_0, arg_4,\n arg_5)\n else:\n arg_6 = fetch_streams(arg_0)\n except NoPluginError:\n console.exit(\"No plugin can handle URL: {0}\", arg_7.url)\n except PluginError as arg_11:\n console.exit(u\"{0}\", arg_11)\n\n if not arg_6:\n console.exit(\"No playable streams found on this URL: {0}\", arg_7.url)\n\n if arg_7.default_stream and not arg_7.stream and not arg_7.json:\n arg_7.stream = arg_7.default_stream\n\n if arg_7.stream:\n arg_9 = format_valid_streams(arg_0, arg_6)\n for arg_10 in arg_7.stream:\n if arg_10 in arg_6:\n log.info(\"Available streams: {0}\", arg_9)\n handle_stream(arg_0, arg_6, arg_10)\n return\n\n arg_11 = (\"The specified stream(s) '{0}' could not be \"\n \"found\".format(\", \".join(arg_7.stream)))\n\n if console.json:\n console.msg_json(dict(arg_6=arg_6, arg_0=arg_0.module,\n error=arg_11))\n else:\n console.exit(\"{0}.\\n Available streams: {1}\",\n arg_11, arg_9)\n else:\n if console.json:\n console.msg_json(dict(arg_6=arg_6, arg_0=arg_0.module))\n else:\n arg_9 = format_valid_streams(arg_0, arg_6)\n console.msg(\"Available streams: {0}\", arg_9)"} +{"_id": "doc_1464", "title": "", "text": "def Func():\n \"\"\"Opens a web browser to allow the user to grant Streamlink\n access to their Twitch account.\"\"\"\n\n arg_0 = TWITCH_CLIENT_ID\n arg_1 = \"https://streamlink.github.io/twitch_oauth.html\"\n arg_2 = (\"https://api.twitch.tv/kraken/oauth2/authorize\"\n \"?response_type=token\"\n \"&client_id={0}\"\n \"&redirect_uri={1}\"\n \"&scope=user_read+user_subscriptions\"\n \"&force_verify=true\").format(arg_0, arg_1)\n\n console.msg(\"Attempting to open a browser to let you authenticate \"\n \"Streamlink with Twitch\")\n\n try:\n if not webbrowser.open_new_tab(arg_2):\n raise webbrowser.Error\n except webbrowser.Error:\n console.exit(\"Unable to open a web browser, try accessing this URL \"\n \"manually instead:\\n{0}\".format(arg_2))"} +{"_id": "doc_1465", "title": "", "text": "def Func(arg_0):\n \"\"\"Console setup.\"\"\"\n global arg_1\n\n # All console related operations is handled via the ConsoleOutput class\n arg_1 = ConsoleOutput(arg_0, streamlink)\n arg_1.json = args.json\n\n # Handle SIGTERM just like SIGINT\n signal.signal(signal.SIGTERM, signal.default_int_handler)"} +{"_id": "doc_1466", "title": "", "text": "def Func():\n \"\"\"Sets the global HTTP settings, such as proxy and headers.\"\"\"\n if args.http_proxy:\n streamlink.set_option(\"http-proxy\", args.http_proxy)\n\n if args.https_proxy:\n streamlink.set_option(\"https-proxy\", args.https_proxy)\n\n if args.http_cookie:\n streamlink.set_option(\"http-cookies\", dict(args.http_cookie))\n\n if args.http_header:\n streamlink.set_option(\"http-headers\", dict(args.http_header))\n\n if args.http_query_param:\n streamlink.set_option(\"http-query-params\", dict(args.http_query_param))\n\n if args.http_ignore_env:\n streamlink.set_option(\"http-trust-env\", False)\n\n if args.http_no_ssl_verify:\n streamlink.set_option(\"http-ssl-verify\", False)\n\n if args.http_disable_dh:\n streamlink.set_option(\"http-disable-dh\", True)\n\n if args.http_ssl_cert:\n streamlink.set_option(\"http-ssl-cert\", args.http_ssl_cert)\n\n if args.http_ssl_cert_crt_key:\n streamlink.set_option(\"http-ssl-cert\", tuple(args.http_ssl_cert_crt_key))\n\n if args.http_timeout:\n streamlink.set_option(\"http-timeout\", args.http_timeout)\n\n if args.http_cookies:\n streamlink.set_option(\"http-cookies\", args.http_cookies)\n\n if args.http_headers:\n streamlink.set_option(\"http-headers\", args.http_headers)\n\n if args.http_query_params:\n streamlink.set_option(\"http-query-params\", args.http_query_params)"} +{"_id": "doc_1467", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Loads any additional plugins.\"\"\"\n if os.path.isdir(PLUGINS_DIR):\n load_plugins([PLUGINS_DIR])\n\n if arg_0:\n load_plugins(arg_0)"} +{"_id": "doc_1468", "title": "", "text": "def Func():\n \"\"\"Sets Streamlink options.\"\"\"\n if args.hls_live_edge:\n streamlink.set_option(\"hls-live-edge\", args.hls_live_edge)\n\n if args.hls_segment_attempts:\n streamlink.set_option(\"hls-segment-attempts\", args.hls_segment_attempts)\n\n if args.hls_playlist_reload_attempts:\n streamlink.set_option(\"hls-playlist-reload-attempts\", args.hls_playlist_reload_attempts)\n\n if args.hls_segment_threads:\n streamlink.set_option(\"hls-segment-threads\", args.hls_segment_threads)\n\n if args.hls_segment_timeout:\n streamlink.set_option(\"hls-segment-timeout\", args.hls_segment_timeout)\n\n if args.hls_segment_ignore_names:\n streamlink.set_option(\"hls-segment-ignore-names\", args.hls_segment_ignore_names)\n\n if args.hls_segment_key_uri:\n streamlink.set_option(\"hls-segment-key-uri\", args.hls_segment_key_uri)\n\n if args.hls_timeout:\n streamlink.set_option(\"hls-timeout\", args.hls_timeout)\n\n if args.hls_audio_select:\n streamlink.set_option(\"hls-audio-select\", args.hls_audio_select)\n\n if args.hls_start_offset:\n streamlink.set_option(\"hls-start-offset\", args.hls_start_offset)\n\n if args.hls_duration:\n streamlink.set_option(\"hls-duration\", args.hls_duration)\n\n if args.hls_live_restart:\n streamlink.set_option(\"hls-live-restart\", args.hls_live_restart)\n\n if args.hds_live_edge:\n streamlink.set_option(\"hds-live-edge\", args.hds_live_edge)\n\n if args.hds_segment_attempts:\n streamlink.set_option(\"hds-segment-attempts\", args.hds_segment_attempts)\n\n if args.hds_segment_threads:\n streamlink.set_option(\"hds-segment-threads\", args.hds_segment_threads)\n\n if args.hds_segment_timeout:\n streamlink.set_option(\"hds-segment-timeout\", args.hds_segment_timeout)\n\n if args.hds_timeout:\n streamlink.set_option(\"hds-timeout\", args.hds_timeout)\n\n if args.http_stream_timeout:\n streamlink.set_option(\"http-stream-timeout\", args.http_stream_timeout)\n\n if args.ringbuffer_size:\n streamlink.set_option(\"ringbuffer-size\", args.ringbuffer_size)\n\n if args.rtmp_proxy:\n streamlink.set_option(\"rtmp-proxy\", args.rtmp_proxy)\n\n if args.rtmp_rtmpdump:\n streamlink.set_option(\"rtmp-rtmpdump\", args.rtmp_rtmpdump)\n\n if args.rtmp_timeout:\n streamlink.set_option(\"rtmp-timeout\", args.rtmp_timeout)\n\n if args.stream_segment_attempts:\n streamlink.set_option(\"stream-segment-attempts\", args.stream_segment_attempts)\n\n if args.stream_segment_threads:\n streamlink.set_option(\"stream-segment-threads\", args.stream_segment_threads)\n\n if args.stream_segment_timeout:\n streamlink.set_option(\"stream-segment-timeout\", args.stream_segment_timeout)\n\n if args.stream_timeout:\n streamlink.set_option(\"stream-timeout\", args.stream_timeout)\n\n if args.ffmpeg_ffmpeg:\n streamlink.set_option(\"ffmpeg-ffmpeg\", args.ffmpeg_ffmpeg)\n if args.ffmpeg_verbose:\n streamlink.set_option(\"ffmpeg-verbose\", args.ffmpeg_verbose)\n if args.ffmpeg_verbose_path:\n streamlink.set_option(\"ffmpeg-verbose-path\", args.ffmpeg_verbose_path)\n if args.ffmpeg_video_transcode:\n streamlink.set_option(\"ffmpeg-video-transcode\", args.ffmpeg_video_transcode)\n if args.ffmpeg_audio_transcode:\n streamlink.set_option(\"ffmpeg-audio-transcode\", args.ffmpeg_audio_transcode)\n\n streamlink.set_option(\"subprocess-errorlog\", args.subprocess_errorlog)\n streamlink.set_option(\"subprocess-errorlog-path\", args.subprocess_errorlog_path)\n streamlink.set_option(\"locale\", args.locale)"} +{"_id": "doc_1469", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fallback if no stream_id was found before\"\"\"\n arg_2 = arg_0._iframe_re.search(arg_1)\n if arg_2:\n return arg_0.session.streams(arg_2.group(\"url\"))"} +{"_id": "doc_1470", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns current value of specified option.\n\n :param key: key of the option\n\n \"\"\"\n # Backwards compatibility\n if arg_1 == \"rtmpdump\":\n arg_1 = \"rtmp-rtmpdump\"\n elif arg_1 == \"rtmpdump-proxy\":\n arg_1 = \"rtmp-proxy\"\n elif arg_1 == \"errorlog\":\n arg_1 = \"subprocess-errorlog\"\n\n if arg_1 == \"http-proxy\":\n return arg_0.http.proxies.get(\"http\")\n elif arg_1 == \"https-proxy\":\n return arg_0.http.proxies.get(\"https\")\n elif arg_1 == \"http-cookies\":\n return arg_0.http.cookies\n elif arg_1 == \"http-headers\":\n return arg_0.http.headers\n elif arg_1 == \"http-query-params\":\n return arg_0.http.params\n elif arg_1 == \"http-trust-env\":\n return arg_0.http.trust_env\n elif arg_1 == \"http-ssl-verify\":\n return arg_0.http.verify\n elif arg_1 == \"http-ssl-cert\":\n return arg_0.http.cert\n elif arg_1 == \"http-timeout\":\n return arg_0.http.timeout\n else:\n return arg_0.options.get(arg_1)"} +{"_id": "doc_1471", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns current value of plugin specific option.\n\n :param plugin: name of the plugin\n :param key: key of the option\n\n \"\"\"\n\n if arg_1 in arg_0.plugins:\n arg_1 = arg_0.plugins[arg_1]\n return arg_1.get_option(arg_2)"} +{"_id": "doc_1472", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Attempts to find a plugin that can use this URL.\n\n The default protocol (http) will be prefixed to the URL if\n not specified.\n\n Raises :exc:`NoPluginError` on failure.\n\n :param url: a URL to match against loaded plugins\n :param follow_redirect: follow redirects\n\n \"\"\"\n arg_1 = update_scheme(\"http://\", arg_1)\n\n arg_3 = []\n for arg_4, arg_5 in arg_0.plugins.items():\n if arg_5.can_handle_url(arg_1):\n arg_3.append(arg_5)\n\n arg_3.sort(key=lambda x: x.priority(arg_1), reverse=True)\n if arg_3:\n return arg_3[0](arg_1)\n\n if arg_2:\n # Attempt to handle a redirect URL\n try:\n arg_6 = arg_0.http.head(arg_1, allow_redirects=True, acceptable_status=[501])\n\n # Fall back to GET request if server doesn't handle HEAD.\n if arg_6.status_code == 501:\n arg_6 = arg_0.http.get(arg_1, stream=True)\n\n if arg_6.url != arg_1:\n return arg_0.Func(arg_6.url, arg_2=arg_2)\n except PluginError:\n pass\n\n raise NoPluginError"} +{"_id": "doc_1473", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks if the string value starts with another string.\"\"\"\n def starts_with(arg_1):\n validate(text, arg_1)\n if not arg_1.Func(arg_0):\n raise ValueError(\"'{0}' does not start with '{1}'\".format(arg_1, arg_0))\n return True\n\n return starts_with"} +{"_id": "doc_1474", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks if the string value Func another string.\"\"\"\n def Func_str(arg_1):\n validate(text, arg_1)\n if arg_0 not in arg_1:\n raise ValueError(\"'{0}' does not contain '{1}'\".format(arg_1, arg_0))\n return True\n\n return Func_str"} +{"_id": "doc_1475", "title": "", "text": "def Func(arg_0):\n \"\"\"Filters out unwanted items using the specified function.\n\n Supports both dicts and sequences, key/value pairs are\n expanded when applied to a dict.\n \"\"\"\n def expand_kv(arg_1):\n return arg_0(*arg_1)\n\n def Func_values(arg_2):\n arg_3 = type(arg_2)\n if isinstance(arg_2, dict):\n return arg_3(_Func(expand_kv, arg_2.items()))\n else:\n return arg_3(_Func(arg_0, arg_2))\n\n return transform(Func_values)"} +{"_id": "doc_1476", "title": "", "text": "def Func(arg_0):\n \"\"\"Apply function to each value inside the sequence or dict.\n\n Supports both dicts and sequences, key/value pairs are\n expanded when applied to a dict.\n \"\"\"\n # text is an alias for basestring on Python 2, which cannot be\n # instantiated and therefore can't be used to transform the value,\n # so we force to unicode instead.\n if is_py2 and text == arg_0:\n arg_0 = unicode\n\n def expand_kv(arg_1):\n return arg_0(*arg_1)\n\n def Func_values(arg_2):\n arg_3 = type(arg_2)\n if isinstance(arg_2, dict):\n return arg_3(_Func(expand_kv, arg_2.items()))\n else:\n return arg_3(_Func(arg_0, arg_2))\n\n return transform(Func_values)"} +{"_id": "doc_1477", "title": "", "text": "def Func(**arg_0):\n \"\"\"Parses an URL and validates its attributes.\"\"\"\n def check_Func(arg_1):\n validate(text, arg_1)\n arg_2 = Funcparse(arg_1)\n if not arg_2.netloc:\n raise ValueError(\"'{0}' is not a valid URL\".format(arg_1))\n\n for arg_3, arg_4 in arg_0.items():\n if not _hasattr(arg_2, arg_3):\n raise ValueError(\"Invalid URL attribute '{0}'\".format(arg_3))\n\n try:\n validate(arg_4, _getattr(arg_2, arg_3))\n except ValueError as err:\n raise ValueError(\n \"Unable to validate URL attribute '{0}': {1}\".format(\n arg_3, err\n )\n )\n\n return True\n\n # Convert \"http\" to be either any(\"http\", \"https\") for convenience\n if arg_0.get(\"scheme\") == \"http\":\n arg_0[\"scheme\"] = any(\"http\", \"https\")\n\n return check_Func"} +{"_id": "doc_1478", "title": "", "text": "def Func(arg_0):\n \"\"\"Find a list of XML elements via xpath.\"\"\"\n def xpath_findall(arg_1):\n validate(ET.iselement, arg_1)\n return arg_1.findall(arg_0)\n\n return transform(xpath_findall)"} +{"_id": "doc_1479", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=arg_3, **arg_4):\n \"\"\"Attempts to parse a M3U8 playlist from a string of data.\n\n If specified, *base_uri* is the base URI that relative URIs will\n be joined together with, otherwise relative URIs will be as is.\n\n If specified, *parser* can be a M3U8Parser subclass to be used\n to parse the data.\n\n \"\"\"\n return arg_2(arg_1, **arg_4).parse(arg_0)"} +{"_id": "doc_1480", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"\", arg_4=\"\", arg_5=\"-1\", arg_6=\"\", arg_7=\"\"):\n \"\"\"\n Logs in to Steam\n\n \"\"\"\n arg_8, arg_9 = arg_0.encrypt_password(arg_1, arg_2)\n\n arg_10 = {\n 'username': arg_1,\n \"password\": arg_8,\n \"emailauth\": arg_3,\n \"loginfriendlyname\": \"Streamlink\",\n \"captchagid\": arg_5,\n \"captcha_text\": arg_6,\n \"emailsteamid\": arg_4,\n \"rsatimestamp\": arg_9,\n \"remember_login\": True,\n \"donotcache\": arg_0.donotcache,\n \"twofactorcode\": arg_7\n }\n\n arg_11 = arg_0.session.http.post(arg_0._Func_url, data=arg_10)\n\n arg_12 = arg_0.session.http.json(arg_11, schema=arg_0._Func_schema)\n\n if not arg_12[u\"success\"]:\n if arg_12.get(u\"captcha_needed\"):\n # special case for captcha\n arg_5 = arg_12[u\"captcha_gid\"]\n log.error(\"Captcha result required, open this URL to see the captcha: {}\".format(\n arg_0._captcha_url.format(arg_5)))\n try:\n arg_6 = arg_0.input_ask(\"Captcha text\")\n except FatalPluginError:\n arg_6 = None\n if not arg_6:\n return False\n else:\n # If the user must enter the code that was emailed to them\n if arg_12.get(u\"emailauth_needed\"):\n if not arg_3:\n try:\n arg_3 = arg_0.input_ask(\"Email auth code required\")\n except FatalPluginError:\n arg_3 = None\n if not arg_3:\n return False\n else:\n raise SteamLoginFailed(\"Email auth key error\")\n\n # If the user must enter a two factor auth code\n if arg_12.get(u\"requires_twofactor\"):\n try:\n arg_7 = arg_0.input_ask(\"Two factor auth code required\")\n except FatalPluginError:\n arg_7 = None\n if not arg_7:\n return False\n\n if arg_12.get(u\"message\"):\n raise SteamLoginFailed(arg_12[u\"message\"])\n\n return arg_0.Func(arg_1, arg_2,\n arg_3=arg_3,\n arg_4=arg_12.get(u\"emailsteamid\", u\"\"),\n arg_6=arg_6,\n arg_5=arg_5,\n arg_7=arg_7)\n elif arg_12.get(\"login_complete\"):\n return True\n else:\n log.error(\"Something when wrong when logging in to Steam\")\n return False"} +{"_id": "doc_1481", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the stream_id contained in the HTML.\"\"\"\n arg_2 = stream_id_pattern.search(arg_1)\n\n if not arg_2:\n arg_0.logger.error(\"Failed to extract stream_id.\")\n\n return arg_2.group(\"stream_id\")"} +{"_id": "doc_1482", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Creates a key-function Funcping.\n\n The return value from the function should be either\n - A tuple containing a name and stream\n - A iterator of tuples containing a name and stream\n\n Any extra arguments will be passed to the function.\n \"\"\"\n arg_0._Func.append((arg_1, partial(arg_2, *arg_3, **arg_4)))"} +{"_id": "doc_1483", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Makes a call against the api.\n\n :param entrypoint: API method to call.\n :param params: parameters to include in the request data.\n :param schema: schema to use to validate the data\n \"\"\"\n arg_4 = arg_0._api_url.format(arg_1)\n\n # Default params\n arg_2 = arg_2 or {}\n if arg_0.session_id:\n arg_2.update({\n \"session_id\": arg_0.session_id\n })\n else:\n arg_2.update({\n \"device_id\": arg_0.device_id,\n \"device_type\": arg_0._access_type,\n \"access_token\": arg_0._access_token,\n \"version\": arg_0._version_code\n })\n arg_2.update({\n \"locale\": arg_0.locale.replace('_', ''),\n })\n\n if arg_0.session_id:\n arg_2[\"session_id\"] = arg_0.session_id\n\n # The certificate used by Crunchyroll cannot be verified in some environments.\n arg_5 = arg_0.session.http.post(arg_4, arg_9=arg_2, headers=arg_0.headers, verify=False)\n arg_6 = arg_0.session.http.json(arg_5, arg_3=_api_schema)\n\n if arg_6[\"error\"]:\n arg_7 = arg_6.get(\"message\", \"Unknown error\")\n arg_8 = arg_6.get(\"code\", \"unknown_error\")\n raise CrunchyrollAPIError(arg_7, arg_8)\n\n arg_9 = arg_6.get(\"data\")\n if arg_3:\n arg_9 = arg_3.validate(arg_9, name=\"API response\")\n\n return arg_9"} +{"_id": "doc_1484", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Starts a session against Crunchyroll's server.\n Is recommended that you call this method before making any other calls\n to make sure you have a valid session against the server.\n \"\"\"\n arg_1 = {}\n if arg_0.auth:\n arg_1[\"auth\"] = arg_0.auth\n arg_0.session_id = arg_0._api_call(\"Func\", arg_1, schema=_session_schema)\n log.debug(\"Session created with ID: {0}\".format(arg_0.session_id))\n return arg_0.session_id"} +{"_id": "doc_1485", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates a new CrunchyrollAPI object, initiates it's session and\n tries to authenticate it either by using saved credentials or the\n user's username and password.\n \"\"\"\n if arg_0.options.get(\"purge_credentials\"):\n arg_0.cache.set(\"session_id\", None, 0)\n arg_0.cache.set(\"auth\", None, 0)\n arg_0.cache.set(\"session_id\", None, 0)\n\n # use the crunchyroll locale as an override, for backwards compatibility\n arg_1 = arg_0.get_option(\"locale\") or arg_0.session.localization.language_code\n arg_2 = CrunchyrollAPI(arg_0.cache,\n arg_0.session,\n session_id=arg_0.get_option(\"session_id\"),\n arg_1=arg_1)\n\n if not arg_0.get_option(\"session_id\"):\n arg_0.logger.debug(\"Creating session with locale: {0}\", arg_1)\n arg_2.start_session()\n\n if arg_2.auth:\n arg_0.logger.debug(\"Using saved credentials\")\n arg_3 = arg_2.authenticate()\n arg_0.logger.info(\"Successfully logged in as '{0}'\",\n arg_3[\"user\"][\"username\"] or arg_3[\"user\"][\"email\"])\n elif arg_0.options.get(\"username\"):\n try:\n arg_0.logger.debug(\"Attempting to login using username and password\")\n arg_2.login(arg_0.options.get(\"username\"),\n arg_0.options.get(\"password\"))\n arg_3 = arg_2.authenticate()\n arg_0.logger.info(\"Logged in as '{0}'\",\n arg_3[\"user\"][\"username\"] or arg_3[\"user\"][\"email\"])\n\n except CrunchyrollAPIError as err:\n raise PluginError(u\"Authentication error: {0}\".format(err.msg))\n else:\n arg_0.logger.warning(\n \"No authentication provided, you won't be able to access \"\n \"premium restricted content\"\n )\n\n return arg_2"} +{"_id": "doc_1486", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"Log 'msg % args' at level 'level' only if condition is fulfilled.\"\"\"\n if arg_2:\n vlog(arg_0, arg_1, *arg_3)"} +{"_id": "doc_1487", "title": "", "text": "def Func(\n arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=600,\n arg_6=arg_7(), arg_8=arg_7(), arg_9=None, arg_10=120,\n arg_11=100\n):\n \"\"\"Creates a distributed session.\n\n It calls `MonitoredTrainingSession` to create a :class:`MonitoredSession` for distributed training.\n\n Parameters\n ----------\n task_spec : :class:`TaskSpecDef`.\n The task spec definition from create_task_spec_def()\n checkpoint_dir : str.\n Optional path to a directory where to restore variables.\n scaffold : ``Scaffold``\n A `Scaffold` used for gathering or building supportive ops.\n If not specified, a default one is created. It's used to finalize the graph.\n hooks : list of ``SessionRunHook`` objects.\n Optional\n chief_only_hooks : list of ``SessionRunHook`` objects.\n Activate these hooks if `is_chief==True`, ignore otherwise.\n save_checkpoint_secs : int\n The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If `save_checkpoint_secs` is set to\n `None`, then the default checkpoint saver isn't used.\n save_summaries_steps : int\n The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If both\n `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n the default summary saver isn't used. Default 100.\n save_summaries_secs : int\n The frequency, in secs, that the summaries are written\n to disk using a default summary saver. If both `save_summaries_steps` and\n `save_summaries_secs` are set to `None`, then the default summary saver\n isn't used. Default not enabled.\n config : ``tf.ConfigProto``\n an instance of `tf.ConfigProto` proto used to configure the session.\n It's the `config` argument of constructor of `tf.Session`.\n stop_grace_period_secs : int\n Number of seconds given to threads to stop after\n `close()` has been called.\n log_step_count_steps : int\n The frequency, in number of global steps, that the\n global step/sec is logged.\n\n Examples\n --------\n A simple example for distributed training where all the workers use the same dataset:\n\n >>> task_spec = TaskSpec()\n >>> with tf.device(task_spec.device_fn()):\n >>> tensors = create_graph()\n >>> with tl.DistributedSession(task_spec=task_spec,\n ... checkpoint_dir='/tmp/ckpt') as session:\n >>> while not session.should_stop():\n >>> session.run(tensors)\n\n An example where the dataset is shared among the workers\n (see https://www.tensorflow.org/programmers_guide/datasets):\n\n >>> task_spec = TaskSpec()\n >>> # dataset is a :class:`tf.data.Dataset` with the raw data\n >>> dataset = create_dataset()\n >>> if task_spec is not None:\n >>> dataset = dataset.shard(task_spec.num_workers, task_spec.shard_index)\n >>> # shuffle or apply a map function to the new sharded dataset, for example:\n >>> dataset = dataset.shuffle(buffer_size=10000)\n >>> dataset = dataset.batch(batch_size)\n >>> dataset = dataset.repeat(num_epochs)\n >>> # create the iterator for the dataset and the input tensor\n >>> iterator = dataset.make_one_shot_iterator()\n >>> next_element = iterator.get_next()\n >>> with tf.device(task_spec.device_fn()):\n >>> # next_element is the input for the graph\n >>> tensors = create_graph(next_element)\n >>> with tl.DistributedSession(task_spec=task_spec,\n ... checkpoint_dir='/tmp/ckpt') as session:\n >>> while not session.should_stop():\n >>> session.run(tensors)\n\n References\n ----------\n - `MonitoredTrainingSession `__\n\n \"\"\"\n arg_12 = arg_0.target() if arg_0 is not None else None\n arg_13 = arg_0.is_master() if arg_0 is not None else True\n return tf.train.MonitoredTrainingSession(\n master=arg_12, arg_13=arg_13, arg_1=arg_1, arg_2=arg_2,\n arg_5=arg_5, arg_6=arg_6,\n arg_8=arg_8, arg_11=arg_11,\n arg_10=arg_10, arg_9=arg_9, arg_3=arg_3, arg_4=arg_4\n )"} +{"_id": "doc_1488", "title": "", "text": "def Func(arg_0, arg_1=50):\n \"\"\"A helper function that shows how to train and validate a model at the same time.\n\n Parameters\n ----------\n validate_step_size : int\n Validate the training network every N steps.\n\n \"\"\"\n while not arg_0._sess.should_stop():\n arg_0.train_on_batch() # Run a training step synchronously.\n if arg_0.global_step % arg_1 == 0:\n # logging.info(\"Average loss for validation dataset: %s\" % self.get_validation_metrics())\n arg_2 = 'step: %d, ' % arg_0.global_step\n for arg_3, arg_4 in arg_0.validation_metrics:\n arg_2 += '%s: %f, ' % (arg_3.name, arg_4)\n logging.info(arg_2)"} +{"_id": "doc_1489", "title": "", "text": "def Func(arg_0, arg_1, arg_2='mnist', arg_3='http://yann.lecun.com/exdb/mnist/'):\n \"\"\"A generic function to load mnist-like dataset.\n\n Parameters:\n ----------\n shape : tuple\n The shape of digit images.\n path : str\n The path that the data is downloaded to.\n name : str\n The dataset name you want to use(the default is 'mnist').\n url : str\n The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').\n \"\"\"\n arg_1 = os.path.join(arg_1, arg_2)\n\n # Define functions for loading mnist-like data's images and labels.\n # For convenience, they also download the requested files if needed.\n def load_mnist_images(arg_1, arg_4):\n arg_5 = maybe_download_and_extract(arg_4, arg_1, arg_3)\n\n logging.info(arg_5)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(arg_5, 'rb') as f:\n arg_6 = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n arg_6 = arg_6.reshape(arg_0)\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return arg_6 / np.float32(256)\n\n def load_mnist_labels(arg_1, arg_4):\n arg_5 = maybe_download_and_extract(arg_4, arg_1, arg_3)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(arg_5, 'rb') as f:\n arg_6 = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return arg_6\n\n # Download and read the training and test set images and labels.\n logging.info(\"Load or Download {0} > {1}\".format(arg_2.upper(), arg_1))\n arg_7 = load_mnist_images(arg_1, 'train-images-idx3-ubyte.gz')\n arg_8 = load_mnist_labels(arg_1, 'train-labels-idx1-ubyte.gz')\n arg_9 = load_mnist_images(arg_1, 't10k-images-idx3-ubyte.gz')\n arg_10 = load_mnist_labels(arg_1, 't10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n arg_7, arg_11 = arg_7[:-10000], arg_7[-10000:]\n arg_8, arg_12 = arg_8[:-10000], arg_8[-10000:]\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n arg_7 = np.asarray(arg_7, dtype=np.float32)\n arg_8 = np.asarray(arg_8, dtype=np.int32)\n arg_11 = np.asarray(arg_11, dtype=np.float32)\n arg_12 = np.asarray(arg_12, dtype=np.int32)\n arg_9 = np.asarray(arg_9, dtype=np.float32)\n arg_10 = np.asarray(arg_10, dtype=np.int32)\n return arg_7, arg_8, arg_11, arg_12, arg_9, arg_10"} +{"_id": "doc_1490", "title": "", "text": "def Func(arg_0='data'):\n \"\"\"Load Matt Mahoney's dataset.\n\n Download a text file from Matt Mahoney's website\n if not present, and make sure it's the right size.\n Extract the first file enclosed in a zip file as a list of words.\n This dataset can be used for Word Embedding.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/mm_test8/``.\n\n Returns\n --------\n list of str\n The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]\n\n Examples\n --------\n >>> words = tl.files.Func()\n >>> print('Data size', len(words))\n\n \"\"\"\n arg_0 = os.path.join(arg_0, 'mm_test8')\n logging.info(\"Load or Download matt_mahoney_text8 Dataset> {}\".format(arg_0))\n\n arg_1 = 'text8.zip'\n arg_2 = 'http://mattmahoney.net/dc/'\n maybe_download_and_extract(arg_1, arg_0, arg_2, expected_bytes=31344016)\n\n with zipfile.ZipFile(os.path.join(arg_0, arg_1)) as f:\n arg_3 = f.read(f.namelist()[0]).split()\n for arg_4, arg_5 in enumerate(arg_3):\n arg_3[arg_4] = arg_3[arg_4].decode()\n return arg_3"} +{"_id": "doc_1491", "title": "", "text": "def Func(\n arg_0='data', arg_1=None, arg_2=0, arg_3=None, arg_4=0.2, arg_5=113, arg_6=1, arg_7=2,\n arg_8=3\n):\n \"\"\"Load IMDB dataset.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/imdb/``.\n nb_words : int\n Number of words to get.\n skip_top : int\n Top most frequent words to ignore (they will appear as oov_char value in the sequence data).\n maxlen : int\n Maximum sequence length. Any longer sequence will be truncated.\n seed : int\n Seed for reproducible data shuffling.\n start_char : int\n The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.\n oov_char : int\n Words that were cut out because of the num_words or skip_top limit will be replaced with this character.\n index_from : int\n Index actual words with this index and higher.\n\n Examples\n --------\n >>> X_train, y_train, X_test, y_test = tl.files.Func(\n ... nb_words=20000, test_split=0.2)\n >>> print('X_train.shape', X_train.shape)\n (20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]\n >>> print('y_train.shape', y_train.shape)\n (20000,) [1 0 0 ..., 1 0 1]\n\n References\n -----------\n - `Modified from keras. `__\n\n \"\"\"\n arg_0 = os.path.join(arg_0, 'imdb')\n\n arg_9 = \"imdb.pkl\"\n arg_10 = 'https://s3.amazonaws.com/text-datasets/'\n maybe_download_and_extract(arg_9, arg_0, arg_10)\n\n if arg_9.endswith(\".gz\"):\n arg_11 = gzip.open(os.path.join(arg_0, arg_9), 'rb')\n else:\n arg_11 = open(os.path.join(arg_0, arg_9), 'rb')\n\n arg_12, arg_13 = cPickle.load(arg_11)\n arg_11.close()\n\n np.random.seed(arg_5)\n np.random.shuffle(arg_12)\n np.random.seed(arg_5)\n np.random.shuffle(arg_13)\n\n if arg_6 is not None:\n arg_12 = [[arg_6] + [arg_20 + arg_8 for arg_20 in arg_16] for arg_16 in arg_12]\n elif arg_8:\n arg_12 = [[arg_20 + arg_8 for arg_20 in arg_16] for arg_16 in arg_12]\n\n if arg_3:\n arg_14 = []\n arg_15 = []\n for arg_16, arg_17 in zip(arg_12, arg_13):\n if len(arg_16) < arg_3:\n arg_14.append(arg_16)\n arg_15.append(arg_17)\n arg_12 = arg_14\n arg_13 = arg_15\n if not arg_12:\n raise Exception(\n 'After filtering for sequences shorter than maxlen=' + str(arg_3) + ', no sequence was kept. '\n 'Increase maxlen.'\n )\n if not arg_1:\n arg_1 = max([max(arg_16) for arg_16 in arg_12])\n\n # by convention, use 2 as OOV word\n # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)\n if arg_7 is not None:\n arg_12 = [[arg_7 if (arg_20 >= arg_1 or arg_20 < arg_2) else arg_20 for arg_20 in arg_16] for arg_16 in arg_12]\n else:\n arg_18 = []\n for arg_16 in arg_12:\n arg_19 = []\n for arg_20 in arg_16:\n if (arg_20 >= arg_1 or arg_20 < arg_2):\n arg_19.append(arg_20)\n arg_18.append(arg_19)\n arg_12 = arg_18\n\n arg_21 = np.array(arg_12[:int(len(arg_12) * (1 - arg_4))])\n arg_22 = np.array(arg_13[:int(len(arg_12) * (1 - arg_4))])\n\n arg_23 = np.array(arg_12[int(len(arg_12) * (1 - arg_4)):])\n arg_24 = np.array(arg_13[int(len(arg_12) * (1 - arg_4)):])\n\n return arg_21, arg_22, arg_23, arg_24"} +{"_id": "doc_1492", "title": "", "text": "def Func(arg_0='data'):\n \"\"\"Load Nietzsche dataset.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/nietzsche/``.\n\n Returns\n --------\n str\n The content.\n\n Examples\n --------\n >>> see tutorial_generate_text.py\n >>> words = tl.files.Func()\n >>> words = basic_clean_str(words)\n >>> words = words.split()\n\n \"\"\"\n logging.info(\"Load or Download nietzsche dataset > {}\".format(arg_0))\n arg_0 = os.path.join(arg_0, 'nietzsche')\n\n arg_1 = \"nietzsche.txt\"\n arg_2 = 'https://s3.amazonaws.com/text-datasets/'\n arg_3 = maybe_download_and_extract(arg_1, arg_0, arg_2)\n\n with open(arg_3, \"r\") as f:\n arg_4 = f.read()\n return arg_4"} +{"_id": "doc_1493", "title": "", "text": "def Func(arg_0='data'):\n \"\"\"Load WMT'15 English-to-French translation dataset.\n\n It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.\n Returns the directories of training data and test data.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.\n\n References\n ----------\n - Code modified from /tensorflow/models/rnn/translation/data_utils.py\n\n Notes\n -----\n Usually, it will take a long time to download this dataset.\n\n \"\"\"\n arg_0 = os.path.join(arg_0, 'wmt_en_fr')\n # URLs for WMT data.\n arg_1 = \"http://www.statmt.org/wmt10/\"\n arg_2 = \"http://www.statmt.org/wmt15/\"\n\n def gunzip_file(arg_3, arg_4):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n logging.info(\"Unpacking %s to %s\" % (arg_3, arg_4))\n with gzip.open(arg_3, \"rb\") as gz_file:\n with open(arg_4, \"wb\") as new_file:\n for arg_5 in gz_file:\n new_file.write(arg_5)\n\n def get_wmt_enfr_train_set(arg_0):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n arg_6 = \"training-giga-fren.tar\"\n maybe_download_and_extract(arg_6, arg_0, arg_1, extract=True)\n arg_7 = os.path.join(arg_0, \"giga-fren.release2.fixed\")\n gunzip_file(arg_7 + \".fr.gz\", arg_7 + \".fr\")\n gunzip_file(arg_7 + \".en.gz\", arg_7 + \".en\")\n return arg_7\n\n def get_wmt_enfr_dev_set(arg_0):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n arg_6 = \"dev-v2.tgz\"\n arg_8 = maybe_download_and_extract(arg_6, arg_0, arg_2, extract=False)\n arg_9 = \"newstest2013\"\n arg_10 = os.path.join(arg_0, \"newstest2013\")\n if not (gfile.Exists(arg_10 + \".fr\") and gfile.Exists(arg_10 + \".en\")):\n logging.info(\"Extracting tgz file %s\" % arg_8)\n with tarfile.open(arg_8, \"r:gz\") as dev_tar:\n arg_11 = dev_tar.getmember(\"dev/\" + arg_9 + \".fr\")\n arg_12 = dev_tar.getmember(\"dev/\" + arg_9 + \".en\")\n arg_11.name = arg_9 + \".fr\" # Extract without \"dev/\" prefix.\n arg_12.name = arg_9 + \".en\"\n dev_tar.extract(arg_11, arg_0)\n dev_tar.extract(arg_12, arg_0)\n return arg_10\n\n logging.info(\"Load or Download WMT English-to-French translation > {}\".format(arg_0))\n\n arg_7 = get_wmt_enfr_train_set(arg_0)\n arg_10 = get_wmt_enfr_dev_set(arg_0)\n\n return arg_7, arg_10"} +{"_id": "doc_1494", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Download file from Google Drive.\n\n See ``tl.files.load_celebA_dataset`` for example.\n\n Parameters\n --------------\n ID : str\n The driver ID.\n destination : str\n The destination for save file.\n\n \"\"\"\n\n def save_response_content(arg_2, arg_1, arg_3=32 * 1024):\n arg_4 = int(arg_2.headers.get('content-length', 0))\n with open(arg_1, \"wb\") as f:\n for arg_5 in tqdm(arg_2.iter_content(arg_3), total=arg_4, unit='B', unit_scale=True,\n desc=arg_1):\n if arg_5: # filter out keep-alive new chunks\n f.write(arg_5)\n\n def get_confirm_token(arg_2):\n for arg_6, arg_7 in arg_2.cookies.items():\n if arg_6.startswith('download_warning'):\n return arg_7\n return None\n\n arg_8 = \"https://docs.google.com/uc?export=download\"\n arg_9 = requests.Session()\n\n arg_2 = arg_9.get(arg_8, arg_11={'id': arg_0}, stream=True)\n arg_10 = get_confirm_token(arg_2)\n\n if arg_10:\n arg_11 = {'id': arg_0, 'confirm': arg_10}\n arg_2 = arg_9.get(arg_8, arg_11=arg_11, stream=True)\n save_response_content(arg_2, arg_1)"} +{"_id": "doc_1495", "title": "", "text": "def Func(arg_0='data'):\n \"\"\"Load CelebA dataset\n\n Return a list of image path.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to, defaults is ``data/celebA/``.\n\n \"\"\"\n arg_1 = 'celebA'\n arg_2, arg_3 = \"img_align_celeba.zip\", \"0B7EVK8r0v71pZjFTYXZWM3FlRnM\"\n arg_4 = os.path.join(arg_0, arg_2)\n arg_5 = os.path.join(arg_0, arg_1)\n if os.path.exists(arg_5):\n logging.info('[*] {} already exists'.format(arg_4))\n else:\n exists_or_mkdir(arg_0)\n download_file_from_google_drive(arg_3, arg_4)\n arg_6 = ''\n with zipfile.ZipFile(arg_4) as zf:\n arg_6 = zf.namelist()[0]\n zf.extractall(arg_0)\n os.remove(arg_4)\n os.rename(os.path.join(arg_0, arg_6), arg_5)\n\n arg_7 = load_file_list(arg_0=arg_5, regx='\\\\.jpg', printable=False)\n for arg_8, arg_9 in enumerate(arg_7):\n arg_7[arg_8] = os.path.join(arg_5, arg_7[arg_8])\n return arg_7"} +{"_id": "doc_1496", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Assign the given parameters to the TensorLayer network.\n\n Parameters\n ----------\n sess : Session\n TensorFlow Session.\n params : list of array\n A list of parameters (array) in order.\n network : :class:`Layer`\n The network to be assigned.\n\n Returns\n --------\n list of operations\n A list of tf ops in order that assign params. Support sess.run(ops) manually.\n\n Examples\n --------\n - See ``tl.files.save_npz``\n\n References\n ----------\n - `Assign value to a TensorFlow variable `__\n\n \"\"\"\n arg_3 = []\n for arg_4, arg_5 in enumerate(arg_1):\n arg_3.append(arg_2.all_params[arg_4].assign(arg_5))\n if arg_0 is not None:\n arg_0.run(arg_3)\n return arg_3"} +{"_id": "doc_1497", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"Load model from npz and assign to a network.\n\n Parameters\n -------------\n sess : Session\n TensorFlow Session.\n name : str\n The name of the `.npz` file.\n network : :class:`Layer`\n The network to be assigned.\n\n Returns\n --------\n False or network\n Returns False, if the model is not exist.\n\n Examples\n --------\n - See ``tl.files.save_npz``\n\n \"\"\"\n if arg_2 is None:\n raise ValueError(\"network is None.\")\n if arg_0 is None:\n raise ValueError(\"session is None.\")\n if not os.path.exists(arg_1):\n logging.error(\"file {} doesn't exist.\".format(arg_1))\n return False\n else:\n arg_3 = load_npz(arg_1=arg_1)\n assign_params(arg_0, arg_3, arg_2)\n logging.info(\"[*] Load {} SUCCESS!\".format(arg_1))\n return arg_2"} +{"_id": "doc_1498", "title": "", "text": "def Func(arg_0=None, arg_1='model.npz', arg_2=None):\n \"\"\"Input parameters and the file name, save parameters as a dictionary into .npz file.\n\n Use ``tl.files.load_and_assign_npz_dict()`` to restore.\n\n Parameters\n ----------\n save_list : list of parameters\n A list of parameters (tensor) to be saved.\n name : str\n The name of the `.npz` file.\n sess : Session\n TensorFlow Session.\n\n \"\"\"\n if arg_2 is None:\n raise ValueError(\"session is None.\")\n if arg_0 is None:\n arg_0 = []\n\n arg_3 = [tensor.name for tensor in arg_0]\n arg_4 = arg_2.run(arg_0)\n arg_5 = {arg_3[idx]: val for idx, val in enumerate(arg_4)}\n np.savez(arg_1, **arg_5)\n arg_4 = None\n arg_5 = None\n del arg_4\n del arg_5\n logging.info(\"[*] Model saved in npz_dict %s\" % arg_1)"} +{"_id": "doc_1499", "title": "", "text": "def Func(arg_0=None, arg_1='model.ckpt', arg_2='checkpoint', arg_3=None, arg_4=True, arg_5=False):\n \"\"\"Load parameters from `ckpt` file.\n\n Parameters\n ------------\n sess : Session\n TensorFlow Session.\n mode_name : str\n The name of the model, default is ``model.ckpt``.\n save_dir : str\n The path / file directory to the `ckpt`, default is ``checkpoint``.\n var_list : list of tensor\n The parameters / variables (tensor) to be saved. If empty, save all global variables (default).\n is_latest : boolean\n Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``.\n printable : boolean\n Whether to print all parameters information.\n\n Examples\n ----------\n - Save all global parameters.\n\n >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True)\n\n - Save specific parameters.\n\n >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True)\n\n - Load latest ckpt.\n\n >>> tl.files.Func(sess=sess, var_list=net.all_params, save_dir='model', printable=True)\n\n - Load specific ckpt.\n\n >>> tl.files.Func(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True)\n\n \"\"\"\n if arg_0 is None:\n raise ValueError(\"session is None.\")\n if arg_3 is None:\n arg_3 = []\n\n if arg_4:\n arg_6 = tf.train.latest_checkpoint(arg_2)\n else:\n arg_6 = os.path.join(arg_2, arg_1)\n\n if not arg_3:\n arg_3 = tf.global_variables()\n\n logging.info(\"[*] load %s n_params: %d\" % (arg_6, len(arg_3)))\n\n if arg_5:\n for arg_7, arg_8 in enumerate(arg_3):\n logging.info(\" param {:3}: {:15} {}\".format(arg_7, arg_8.name, str(arg_8.get_shape())))\n\n try:\n arg_9 = tf.train.Saver(arg_3)\n arg_9.restore(arg_0, arg_6)\n except Exception as e:\n logging.info(e)\n logging.info(\"[*] load ckpt fail ...\")"} +{"_id": "doc_1500", "title": "", "text": "def Func(arg_0='', arg_1='file.npy'):\n \"\"\"Load `.npy` file.\n\n Parameters\n ------------\n path : str\n Path to the file (optional).\n name : str\n File name.\n\n Examples\n ---------\n - see tl.files.save_any_to_npy()\n\n \"\"\"\n arg_2 = os.path.join(arg_0, arg_1)\n try:\n return np.load(arg_2).item()\n except Exception:\n return np.load(arg_2)\n raise Exception(\"[!] Fail to load %s\" % arg_2)"} +{"_id": "doc_1501", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=None):\n \"\"\"Checks if file exists in working_directory otherwise tries to dowload the file,\n and optionally also tries to extract the file if format is \".zip\" or \".tar\"\n\n Parameters\n -----------\n filename : str\n The name of the (to be) dowloaded file.\n working_directory : str\n A folder path to search for the file in and dowload the file to\n url : str\n The URL to download the file from\n extract : boolean\n If True, tries to uncompress the dowloaded file is \".tar.gz/.tar.bz2\" or \".zip\" file, default is False.\n expected_bytes : int or None\n If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed.\n\n Returns\n ----------\n str\n File path of the dowloaded (uncompressed) file.\n\n Examples\n --------\n >>> down_file = tl.files.Func(filename='train-images-idx3-ubyte.gz',\n ... working_directory='data/',\n ... url_source='http://yann.lecun.com/exdb/mnist/')\n >>> tl.files.Func(filename='ADEChallengeData2016.zip',\n ... working_directory='data/',\n ... url_source='http://sceneparsing.csail.mit.edu/data/',\n ... extract=True)\n\n \"\"\"\n\n # We first define a download function, supporting both Python 2 and 3.\n def _download(arg_0, arg_1, arg_2):\n\n arg_5 = progressbar.ProgressBar()\n\n def _dlProgress(arg_6, arg_7, arg_8, arg_9=arg_5):\n if (arg_8 != 0):\n\n if not arg_9.max_value:\n arg_10 = math.ceil(float(arg_8) / float(arg_7))\n arg_9.max_value = int(arg_10)\n\n arg_9.update(arg_6, force=True)\n\n arg_12 = os.path.join(arg_1, arg_0)\n\n logging.info('Downloading %s...\\n' % arg_0)\n\n urlretrieve(arg_2 + arg_0, arg_12, reporthook=_dlProgress)\n\n exists_or_mkdir(arg_1, verbose=False)\n arg_12 = os.path.join(arg_1, arg_0)\n\n if not os.path.exists(arg_12):\n\n _download(arg_0, arg_1, arg_2)\n arg_13 = os.stat(arg_12)\n logging.info('Succesfully downloaded %s %s bytes.' % (arg_0, arg_13.st_size)) # , 'bytes.')\n if (not (arg_4 is None) and (arg_4 != arg_13.st_size)):\n raise Exception('Failed to verify ' + arg_0 + '. Can you get to it with a browser?')\n if (arg_3):\n if tarfile.is_tarfile(arg_12):\n logging.info('Trying to extract tar file')\n tarfile.open(arg_12, 'r').extractall(arg_1)\n logging.info('... Success!')\n elif zipfile.is_zipfile(arg_12):\n logging.info('Trying to extract zip file')\n with zipfile.ZipFile(arg_12) as zf:\n zf.extractall(arg_1)\n logging.info('... Success!')\n else:\n logging.info(\"Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported\")\n return arg_12"} +{"_id": "doc_1502", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Process a batch of data by given function by threading.\n\n Usually be used for data augmentation.\n\n Parameters\n -----------\n data : numpy.array or others\n The data to be processed.\n thread_count : int\n The number of threads to use.\n fn : function\n The function for data processing.\n more args : the args for `fn`\n Ssee Examples below.\n\n Examples\n --------\n Process images.\n\n >>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))\n >>> images = tl.prepro.Func(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1])\n\n Customized image preprocessing function.\n\n >>> def distort_img(x):\n >>> x = tl.prepro.flip_axis(x, axis=0, is_random=True)\n >>> x = tl.prepro.flip_axis(x, axis=1, is_random=True)\n >>> x = tl.prepro.crop(x, 100, 100, is_random=True)\n >>> return x\n >>> images = tl.prepro.Func(images, distort_img)\n\n Process images and masks together (Usually be used for image segmentation).\n\n >>> X, Y --> [batch_size, row, col, 1]\n >>> data = tl.prepro.Func([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True)\n data --> [batch_size, 2, row, col, 1]\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n X_, Y_ --> [batch_size, row, col, 1]\n >>> tl.vis.save_image(X_, 'images.png')\n >>> tl.vis.save_image(Y_, 'masks.png')\n\n Process images and masks together by using ``thread_count``.\n\n >>> X, Y --> [batch_size, row, col, 1]\n >>> data = tl.prepro.Func(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True)\n data --> [batch_size, 2, row, col, 1]\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n X_, Y_ --> [batch_size, row, col, 1]\n >>> tl.vis.save_image(X_, 'after.png')\n >>> tl.vis.save_image(Y_, 'before.png')\n\n Customized function for processing images and masks together.\n\n >>> def distort_img(data):\n >>> x, y = data\n >>> x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True)\n >>> x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True)\n >>> x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True)\n >>> return x, y\n\n >>> X, Y --> [batch_size, row, col, channel]\n >>> data = tl.prepro.Func([_ for _ in zip(X, Y)], distort_img)\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n\n Returns\n -------\n list or numpyarray\n The processed results.\n\n References\n ----------\n - `python queue `__\n - `run with limited queue `__\n\n \"\"\"\n\n def apply_fn(arg_4, arg_5, arg_0, arg_3):\n arg_4[arg_5] = arg_1(arg_0, **arg_3)\n\n if arg_2 is None:\n arg_4 = [None] * len(arg_0)\n arg_6 = []\n # for i in range(len(data)):\n # t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))\n for arg_5, arg_7 in enumerate(arg_0):\n arg_8 = threading.Thread(name='threading_and_return', target=apply_fn, args=(arg_4, arg_5, arg_7, arg_3))\n arg_8.start()\n arg_6.append(arg_8)\n else:\n arg_9 = np.linspace(0, len(arg_0), arg_2 + 1)\n arg_9 = np.round(arg_9).astype(int)\n arg_4 = [None] * arg_2\n arg_6 = []\n for arg_5 in range(arg_2):\n arg_8 = threading.Thread(\n name='threading_and_return', target=apply_fn, args=(arg_4, arg_5, arg_0[arg_9[arg_5]:arg_9[arg_5 + 1]], arg_3)\n )\n arg_8.start()\n arg_6.append(arg_8)\n\n for arg_8 in arg_6:\n arg_8.join()\n\n if arg_2 is None:\n try:\n return np.asarray(arg_4)\n except Exception:\n return arg_4\n else:\n return np.concatenate(arg_4)"} +{"_id": "doc_1503", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=1, arg_6='constant', arg_7=0.0, arg_8=True,\n arg_9=False\n):\n \"\"\"Projective transform by given coordinates, usually 4 coordinates.\n\n see `scikit-image `__.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n src : list or numpy\n The original coordinates, usually 4 coordinates of (width, height).\n dst : list or numpy\n The coordinates after transformation, the number of coordinates is the same with src.\n map_args : dictionary or None\n Keyword arguments passed to inverse map.\n output_shape : tuple of 2 int\n Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified.\n order : int\n The order of interpolation. The order has to be in the range 0-5:\n - 0 Nearest-neighbor\n - 1 Bi-linear (default)\n - 2 Bi-quadratic\n - 3 Bi-cubic\n - 4 Bi-quartic\n - 5 Bi-quintic\n mode : str\n One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`.\n Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad.\n cval : float\n Used in conjunction with mode `constant`, the value outside the image boundaries.\n clip : boolean\n Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.\n preserve_range : boolean\n Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n --------\n Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3)\n\n >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h]\n >>> dst = [[10,10],[0,32],[32,0],[32,32]]\n >>> x = tl.prepro.Func(X, src, dst)\n\n References\n -----------\n - `scikit-image : geometric transformations `__\n - `scikit-image : examples `__\n\n \"\"\"\n if arg_3 is None:\n arg_3 = {}\n # if type(src) is list:\n if isinstance(arg_1, list): # convert to numpy\n arg_1 = np.array(arg_1)\n # if type(dst) is list:\n if isinstance(arg_2, list):\n arg_2 = np.array(arg_2)\n if np.max(arg_0) > 1: # convert to [0, 1]\n arg_0 = arg_0 / 255\n\n arg_10 = transform.ProjectiveTransform()\n arg_10.estimate(arg_2, arg_1)\n arg_11 = transform.warp(\n arg_0, arg_10, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, arg_6=arg_6, arg_7=arg_7, arg_8=arg_8,\n arg_9=arg_9\n )\n return arg_11"} +{"_id": "doc_1504", "title": "", "text": "def Func(\n arg_0, arg_1=20, arg_2=False, arg_3=0, arg_4=1, arg_5=2, arg_6='nearest', arg_7=0., arg_8=1\n):\n \"\"\"Rotate an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n rg : int or float\n Degree to rotate, usually 0 ~ 180.\n is_random : boolean\n If True, randomly rotate. Default is False\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n >>> x --> [row, col, 1]\n >>> x = tl.prepro.Func(x, rg=40, is_random=False)\n >>> tl.vis.save_image(x, 'im.png')\n\n \"\"\"\n if arg_2:\n arg_9 = np.pi / 180 * np.random.uniform(-arg_1, arg_1)\n else:\n arg_9 = np.pi / 180 * arg_1\n arg_10 = np.array([[np.cos(arg_9), -np.sin(arg_9), 0], [np.sin(arg_9), np.cos(arg_9), 0], [0, 0, 1]])\n\n arg_11, arg_12 = arg_0.shape[arg_3], arg_0.shape[arg_4]\n arg_13 = transform_matrix_offset_center(arg_10, arg_11, arg_12)\n arg_0 = affine_transform(arg_0, arg_13, arg_5, arg_6, arg_7, arg_8)\n return arg_0"} +{"_id": "doc_1505", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=0, arg_5=1):\n \"\"\"Randomly or centrally Func an image.\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n wrg : int\n Size of width.\n hrg : int\n Size of height.\n is_random : boolean,\n If True, randomly Func, else central Func. Default is False.\n row_index: int\n index of row.\n col_index: int\n index of column.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n \"\"\"\n arg_6, arg_7 = arg_0.shape[arg_4], arg_0.shape[arg_5]\n\n if (arg_6 < arg_2) or (arg_7 < arg_1):\n raise AssertionError(\"The size of Funcping should smaller than or equal to the original image\")\n\n if arg_3:\n arg_8 = int(np.random.uniform(0, arg_6 - arg_2))\n arg_9 = int(np.random.uniform(0, arg_7 - arg_1))\n # tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)\n return arg_0[arg_8:arg_2 + arg_8, arg_9:arg_1 + arg_9]\n else: # central Func\n arg_8 = int(np.floor((arg_6 - arg_2) / 2.))\n arg_9 = int(np.floor((arg_7 - arg_1) / 2.))\n arg_10 = arg_8 + arg_2\n arg_11 = arg_9 + arg_1\n return arg_0[arg_8:arg_10, arg_9:arg_11]"} +{"_id": "doc_1506", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.flip_axis``.\n\n Returns\n -------\n numpy.array\n A list of processed images.\n\n \"\"\"\n if arg_2:\n arg_3 = np.random.uniform(-1, 1)\n if arg_3 > 0:\n # x = np.asarray(x).swapaxes(axis, 0)\n # x = x[::-1, ...]\n # x = x.swapaxes(0, axis)\n # return x\n arg_4 = []\n for arg_5 in arg_0:\n arg_5 = np.asarray(arg_5).swapaxes(arg_1, 0)\n arg_5 = arg_5[::-1, ...]\n arg_5 = arg_5.swapaxes(0, arg_1)\n arg_4.append(arg_5)\n return np.asarray(arg_4)\n else:\n return np.asarray(arg_0)\n else:\n # x = np.asarray(x).swapaxes(axis, 0)\n # x = x[::-1, ...]\n # x = x.swapaxes(0, axis)\n # return x\n arg_4 = []\n for arg_5 in arg_0:\n arg_5 = np.asarray(arg_5).swapaxes(arg_1, 0)\n arg_5 = arg_5[::-1, ...]\n arg_5 = arg_5.swapaxes(0, arg_1)\n arg_4.append(arg_5)\n return np.asarray(arg_4)"} +{"_id": "doc_1507", "title": "", "text": "def Func(\n arg_0, arg_1=0.1, arg_2=0.1, arg_3=False, arg_4=0, arg_5=1, arg_6=2, arg_7='nearest', arg_8=0.,\n arg_9=1\n):\n \"\"\"Shift an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n wrg : float\n Percentage of Func in axis x, usually -0.25 ~ 0.25.\n hrg : float\n Percentage of Func in axis y, usually -0.25 ~ 0.25.\n is_random : boolean\n If True, randomly Func. Default is False.\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n \"\"\"\n arg_10, arg_11 = arg_0.shape[arg_4], arg_0.shape[arg_5]\n if arg_3:\n arg_12 = np.random.uniform(-arg_2, arg_2) * arg_10\n arg_13 = np.random.uniform(-arg_1, arg_1) * arg_11\n else:\n arg_12, arg_13 = arg_2 * arg_10, arg_1 * arg_11\n arg_14 = np.array([[1, 0, arg_12], [0, 1, arg_13], [0, 0, 1]])\n\n arg_15 = arg_14 # no need to do offset\n arg_0 = affine_transform(arg_0, arg_15, arg_6, arg_7, arg_8, arg_9)\n return arg_0"} +{"_id": "doc_1508", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=1, arg_3=False):\n \"\"\"Change the Func of a single image, randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n gamma : float\n Non negative real number. Default value is 1.\n - Small than 1 means brighter.\n - If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).\n gain : float\n The constant multiplier. Default value is 1.\n is_random : boolean\n If True, randomly change Func. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n -----------\n - `skimage.exposure.adjust_gamma `__\n - `chinese blog `__\n\n \"\"\"\n if arg_3:\n arg_1 = np.random.uniform(1 - arg_1, 1 + arg_1)\n arg_0 = exposure.adjust_gamma(arg_0, arg_1, arg_2)\n return arg_0"} +{"_id": "doc_1509", "title": "", "text": "def Func(arg_0, arg_1=1., arg_2=1., arg_3=1., arg_4=False):\n \"\"\"Perform Func augmentation for a single image, randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n gamma : float\n Change brightness (the same with ``tl.prepro.brightness``)\n - if is_random=False, one float number, small than one means brighter, greater than one means darker.\n - if is_random=True, tuple of two float numbers, (min, max).\n contrast : float\n Change contrast.\n - if is_random=False, one float number, small than one means blur.\n - if is_random=True, tuple of two float numbers, (min, max).\n saturation : float\n Change saturation.\n - if is_random=False, one float number, small than one means unsaturation.\n - if is_random=True, tuple of two float numbers, (min, max).\n is_random : boolean\n If True, randomly change Func. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n Random\n\n >>> x = tl.prepro.Func(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)\n\n Non-random\n\n >>> x = tl.prepro.Func(x, 0.5, 0.6, 0.8, is_random=False)\n\n \"\"\"\n if arg_4:\n if not (len(arg_1) == len(arg_2) == len(arg_3) == 2):\n raise AssertionError(\"if is_random = True, the arguments are (min, max)\")\n\n ## random change brightness # small --> brighter\n arg_5 = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal\n\n if arg_5 == 0: # brighter\n arg_1 = np.random.uniform(arg_1[0], 1.0) # (.5, 1.0)\n elif arg_5 == 1: # darker\n arg_1 = np.random.uniform(1.0, arg_1[1]) # (1.0, 5.0)\n else:\n arg_1 = 1\n arg_6 = brightness(arg_0, arg_1=arg_1, gain=1, arg_4=False)\n\n # tl.logging.info(\"using contrast and saturation\")\n arg_7 = PIL.Image.fromarray(arg_6) # array -> PIL\n arg_8 = PIL.ImageEnhance.Contrast(arg_7)\n arg_7 = arg_8.enhance(np.random.uniform(arg_2[0], arg_2[1])) #0.3,0.9))\n\n arg_9 = PIL.ImageEnhance.Color(arg_7)\n arg_7 = arg_9.enhance(np.random.uniform(arg_3[0], arg_3[1])) # (0.7,1.0))\n arg_6 = np.array(arg_7) # PIL -> array\n else:\n arg_6 = brightness(arg_0, arg_1=arg_1, gain=1, arg_4=False)\n arg_7 = PIL.Image.fromarray(arg_6) # array -> PIL\n arg_8 = PIL.ImageEnhance.Contrast(arg_7)\n arg_7 = arg_8.enhance(arg_2)\n\n arg_9 = PIL.ImageEnhance.Color(arg_7)\n arg_7 = arg_9.enhance(arg_3)\n arg_6 = np.array(arg_7) # PIL -> array\n return np.asarray(arg_6)"} +{"_id": "doc_1510", "title": "", "text": "def Func(arg_0, arg_1=0.66, arg_2=True, arg_3=True, arg_4=False):\n \"\"\"Adjust hue of an RGB image.\n\n This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.\n For TF, see `tf.image.Func `__.and `tf.image.random_hue `__.\n\n Parameters\n -----------\n im : numpy.array\n An image with values between 0 and 255.\n hout : float\n The scale value for adjusting hue.\n - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.\n - If is_offset is True, add this value as the offset to the hue channel.\n is_offset : boolean\n Whether `hout` is added on HSV as offset or not. Default is True.\n is_clip : boolean\n If HSV value smaller than 0, set to 0. Default is True.\n is_random : boolean\n If True, randomly change hue. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n Random, add a random value between -0.2 and 0.2 as the offset to every hue values.\n\n >>> im_hue = tl.prepro.Func(image, hout=0.2, is_offset=True, is_random=False)\n\n Non-random, make all hue to green.\n\n >>> im_green = tl.prepro.Func(image, hout=0.66, is_offset=False, is_random=False)\n\n References\n -----------\n - `tf.image.random_hue `__.\n - `tf.image.Func `__.\n - `StackOverflow: Changing image hue with python PIL `__.\n\n \"\"\"\n arg_5 = rgb_to_hsv(arg_0)\n if arg_4:\n arg_1 = np.random.uniform(-arg_1, arg_1)\n\n if arg_2:\n arg_5[..., 0] += arg_1\n else:\n arg_5[..., 0] = arg_1\n\n if arg_3:\n arg_5[..., 0] = np.clip(arg_5[..., 0], 0, np.inf) # Hao : can remove green dots\n\n arg_6 = hsv_to_rgb(arg_5)\n return arg_6"} +{"_id": "doc_1511", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='bicubic', arg_3=None):\n \"\"\"Resize an image by given output size and method.\n\n Warning, this function will rescale the value to [0, 255].\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n size : list of 2 int or None\n For height and width.\n interp : str\n Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).\n mode : str\n The PIL image mode (`P`, `L`, etc.) to convert image before resizing.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n ------------\n - `scipy.misc.Func `__\n\n \"\"\"\n if arg_1 is None:\n arg_1 = [100, 100]\n\n if arg_0.shape[-1] == 1:\n # greyscale\n arg_0 = scipy.misc.Func(arg_0[:, :, 0], arg_1, arg_2=arg_2, arg_3=arg_3)\n return arg_0[:, :, np.newaxis]\n else:\n # rgb, bgr, rgba\n return scipy.misc.Func(arg_0, arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_1512", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=1e-7):\n \"\"\"Normalize every pixels by the same given mean and std, which are usually\n compute from all examples.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n mean : float\n Value for subtraction.\n std : float\n Value for division.\n epsilon : float\n A small position value for dividing standard deviation.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n \"\"\"\n if arg_1:\n arg_0 = arg_0 - arg_1\n if arg_2:\n arg_0 = arg_0 / (arg_2 + arg_3)\n return arg_0"} +{"_id": "doc_1513", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Apply ZCA whitening on an image by given principal components matrix.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n principal_components : matrix\n Matrix from ``get_Func_principal_components_img``.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n \"\"\"\n arg_2 = np.reshape(arg_0, (arg_0.size))\n # tl.logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1))\n # flatx = np.reshape(x, (x.shape))\n # flatx = np.reshape(x, (x.shape[0], ))\n # tl.logging.info(flatx.shape) # (160, 176, 1)\n arg_3 = np.dot(arg_2, arg_1)\n arg_0 = np.reshape(arg_3, (arg_0.shape[0], arg_0.shape[1], arg_0.shape[2]))\n return arg_0"} +{"_id": "doc_1514", "title": "", "text": "def Func(arg_0, arg_1=0.5):\n \"\"\"Randomly set some pixels to zero by a given keeping probability.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] or [row, col].\n keep : float\n The keeping probability (0, 1), the lower more values will be set to zero.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n \"\"\"\n if len(arg_0.shape) == 3:\n if arg_0.shape[-1] == 3: # color\n arg_2 = arg_0.shape\n arg_3 = np.random.binomial(n=1, p=arg_1, size=arg_0.shape[:-1])\n for arg_4 in range(3):\n arg_0[:, :, arg_4] = np.multiply(arg_0[:, :, arg_4], arg_3)\n elif arg_0.shape[-1] == 1: # greyscale image\n arg_2 = arg_0.shape\n arg_0 = np.multiply(arg_0, np.random.binomial(n=1, p=arg_1, size=arg_2))\n else:\n raise Exception(\"Unsupported shape {}\".format(arg_0.shape))\n elif len(arg_0.shape) == 2 or 1: # greyscale matrix (image) or vector\n arg_2 = arg_0.shape\n arg_0 = np.multiply(arg_0, np.random.binomial(n=1, p=arg_1, size=arg_2))\n else:\n raise Exception(\"Unsupported shape {}\".format(arg_0.shape))\n return arg_0"} +{"_id": "doc_1515", "title": "", "text": "def Func(arg_0=None, arg_1=(100, 100), arg_2=1):\n \"\"\"Inputs a list of points, return a 2D image.\n\n Parameters\n --------------\n list_points : list of 2 int\n [[x, y], [x, y]..] for point coordinates.\n size : tuple of 2 int\n (w, h) for output size.\n val : float or int\n For the contour value.\n\n Returns\n -------\n numpy.array\n An image.\n\n \"\"\"\n if arg_0 is None:\n raise Exception(\"list_points : list of 2 int\")\n arg_3 = arg_7.zeros(arg_1)\n if len(arg_0) == 0:\n return arg_3\n for arg_4 in arg_0:\n for arg_5 in arg_4:\n # tl.logging.info(x)\n arg_3[arg_6(arg_7.round(arg_5[0]))][arg_6(arg_7.round(arg_5[1]))] = arg_2\n return arg_3"} +{"_id": "doc_1516", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse darknet annotation format into two lists for class and bounding box.\n\n Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...].\n\n Parameters\n ------------\n annotations : list of list\n A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...]\n\n Returns\n -------\n list of int\n List of class labels.\n\n list of list of 4 numbers\n List of bounding box.\n\n \"\"\"\n arg_1 = []\n arg_2 = []\n for arg_3 in arg_0:\n arg_1.append(arg_3[0])\n arg_2.append(arg_3[1:])\n return arg_1, arg_2"} +{"_id": "doc_1517", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3='bicubic', arg_4=None, arg_5=False):\n \"\"\"Resize an image, and compute the new bounding box coordinates.\n\n Parameters\n -------------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...]\n size interp and mode : args\n See ``tl.prepro.imresize``.\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image\n list of list of 4 numbers\n A list of new bounding boxes.\n\n Examples\n --------\n >>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80\n >>> _, coords = Func(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)\n >>> print(coords)\n [[40, 80, 60, 60], [20, 40, 40, 40]]\n >>> _, coords = Func(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)\n >>> print(coords)\n [[20, 20, 30, 15]]\n >>> _, coords = Func(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)\n >>> print(coords)\n [[30, 30, 45, 22]]\n >>> im2, coords = Func(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)\n >>> print(coords, im2.shape)\n [[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)\n\n \"\"\"\n if arg_1 is None:\n arg_1 = []\n if arg_2 is None:\n arg_2 = [100, 100]\n\n arg_6, arg_7 = arg_0.shape[0:2]\n arg_6 = arg_6 * 1.0 # * 1.0 for python2 : force division to be float point\n arg_7 = arg_7 * 1.0\n arg_0 = imresize(arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n\n if arg_5 is False:\n arg_8 = list()\n\n for arg_9 in arg_1:\n\n if len(arg_9) != 4:\n raise AssertionError(\"coordinate should be 4 values : [x, y, w, h]\")\n\n # x' = x * (imw'/imw)\n arg_10 = int(arg_9[0] * (arg_2[1] / arg_7))\n # y' = y * (imh'/imh)\n # tl.logging.info('>>', coord[1], size[0], imh)\n arg_11 = int(arg_9[1] * (arg_2[0] / arg_6))\n # w' = w * (imw'/imw)\n arg_12 = int(arg_9[2] * (arg_2[1] / arg_7))\n # h' = h * (imh'/imh)\n arg_13 = int(arg_9[3] * (arg_2[0] / arg_6))\n arg_8.append([arg_10, arg_11, arg_12, arg_13])\n return arg_0, arg_8\n else:\n return arg_0, arg_1"} +{"_id": "doc_1518", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Return mask for sequences.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n pad_val : int\n The pad value.\n\n Returns\n ----------\n list of list of int\n The mask.\n\n Examples\n ---------\n >>> sentences_ids = [[4, 0, 5, 3, 0, 0],\n ... [5, 3, 9, 4, 9, 0]]\n >>> mask = Func(sentences_ids, pad_val=0)\n [[1 1 1 1 0 0]\n [1 1 1 1 1 0]]\n\n \"\"\"\n arg_2 = np.ones_like(arg_0)\n for arg_3, arg_4 in enumerate(arg_0):\n for arg_5 in reversed(range(len(arg_4))):\n if arg_4[arg_5] == arg_1:\n arg_2[arg_3, arg_5] = 0\n else:\n break # <-- exit the for loop, prepcess next sequence\n return arg_2"} +{"_id": "doc_1519", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=0.5, arg_4=(0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10, 15, 14, 17, 16, 18)\n):\n \"\"\"Flip an image and corresponding keypoints.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n prob : float, 0 to 1\n The probability to flip the image, if 1, always flip the image.\n flip_list : tuple of int\n Denotes how the keypoints number be changed after flipping which is required for pose estimation task.\n The left and right body should be maintained rather than switch.\n (Default COCO format).\n Set to an empty tuple if you don't need to maintain left and right information.\n\n Returns\n ----------\n preprocessed image, annos, mask\n\n \"\"\"\n\n arg_5 = np.random.uniform(0, 1.0)\n if arg_5 < arg_3:\n return arg_0, arg_1, arg_2\n\n arg_6, arg_7, arg_6 = np.shape(arg_0)\n arg_0 = cv2.flip(arg_0, 1)\n arg_2 = cv2.flip(arg_2, 1)\n arg_8 = []\n for arg_9 in arg_1: # TODO : speed up with affine transform\n arg_10 = []\n for arg_11 in arg_4:\n arg_12 = arg_9[arg_11]\n if arg_12[0] < 0 or arg_12[1] < 0:\n arg_10.append((-1000, -1000))\n continue\n if arg_12[0] > arg_0.shape[1] - 1 or arg_12[1] > arg_0.shape[0] - 1:\n arg_10.append((-1000, -1000))\n continue\n if (arg_7 - arg_12[0]) > arg_0.shape[1] - 1:\n arg_10.append((-1000, -1000))\n continue\n arg_10.append((arg_7 - arg_12[0], arg_12[1]))\n arg_8.append(arg_10)\n arg_1 = arg_8\n\n return arg_0, arg_1, arg_2"} +{"_id": "doc_1520", "title": "", "text": "def Func(arg_0=None, arg_1=0.99, arg_2=0):\n \"\"\"Take 1D float array of rewards and compute discounted rewards for an\n episode. When encount a non-zero value, consider as the end a of an episode.\n\n Parameters\n ----------\n rewards : list\n List of rewards\n gamma : float\n Discounted factor\n mode : int\n Mode for computing the discount rewards.\n - If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).\n - If mode == 1, would not reset the discount process.\n\n Returns\n --------\n list of float\n The discounted rewards.\n\n Examples\n ----------\n >>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])\n >>> gamma = 0.9\n >>> discount_rewards = tl.rein.Func(rewards, gamma)\n >>> print(discount_rewards)\n [ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81\n 0.89999998 1. 0.72899997 0.81 0.89999998 1. ]\n >>> discount_rewards = tl.rein.Func(rewards, gamma, mode=1)\n >>> print(discount_rewards)\n [ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104\n 1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ]\n\n \"\"\"\n if arg_0 is None:\n raise Exception(\"rewards should be a list\")\n arg_3 = np.zeros_like(arg_0, dtype=np.float32)\n arg_4 = 0\n for arg_5 in reversed(xrange(0, arg_0.size)):\n if arg_2 == 0:\n if arg_0[arg_5] != 0: arg_4 = 0\n\n arg_4 = arg_4 * arg_1 + arg_0[arg_5]\n arg_3[arg_5] = arg_4\n return arg_3"} +{"_id": "doc_1521", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Calculate the loss for Policy Gradient Network.\n\n Parameters\n ----------\n logits : tensor\n The network outputs without softmax. This function implements softmax inside.\n actions : tensor or placeholder\n The agent actions.\n rewards : tensor or placeholder\n The rewards.\n\n Returns\n --------\n Tensor\n The TensorFlow loss function.\n\n Examples\n ----------\n >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])\n >>> network = InputLayer(states_batch_pl, name='input')\n >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')\n >>> network = DenseLayer(network, n_units=3, name='out')\n >>> probs = network.outputs\n >>> sampling_prob = tf.nn.softmax(probs)\n >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])\n >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])\n >>> loss = tl.rein.Func(probs, actions_batch_pl, discount_rewards_batch_pl)\n >>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)\n\n \"\"\"\n arg_4 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=arg_1, arg_0=arg_0, arg_3=arg_3)\n\n return tf.reduce_sum(tf.multiply(arg_4, arg_2))"} +{"_id": "doc_1522", "title": "", "text": "def Func(arg_0, arg_1, arg_2='Func'):\n \"\"\"Log weight.\n\n Parameters\n -----------\n probs : tensor\n If it is a network output, usually we should scale it to [0, 1] via softmax.\n weights : tensor\n The weights.\n\n Returns\n --------\n Tensor\n The Tensor after appling the log weighted expression.\n\n \"\"\"\n with tf.variable_scope(arg_2):\n arg_3 = tf.reduce_mean(tf.log(arg_0) * arg_1)\n return arg_3"} +{"_id": "doc_1523", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,\n it implements softmax internally. See ``tf.nn.sparse_softmax_Func_with_logits``.\n\n Parameters\n ----------\n output : Tensor\n A batch of distribution with shape: [batch_size, num of classes].\n target : Tensor\n A batch of index with shape: [batch_size, ].\n name : string\n Name of this loss.\n\n Examples\n --------\n >>> ce = tl.cost.Func(y_logits, y_target_logits, 'my_loss')\n\n References\n -----------\n - About cross-entropy: ``__.\n - The code is borrowed from: ``__.\n\n \"\"\"\n if arg_2 is None:\n raise Exception(\"Please give a unique name to tl.cost.Func for TF1.0+\")\n return tf.reduce_mean(tf.nn.sparse_softmax_Func_with_logits(labels=arg_1, logits=arg_0), arg_2=arg_2)"} +{"_id": "doc_1524", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Sigmoid cross-entropy operation, see ``tf.nn.Func_with_logits``.\n\n Parameters\n ----------\n output : Tensor\n A batch of distribution with shape: [batch_size, num of classes].\n target : Tensor\n A batch of index with shape: [batch_size, ].\n name : string\n Name of this loss.\n\n \"\"\"\n return tf.reduce_mean(tf.nn.Func_with_logits(labels=arg_1, logits=arg_0), arg_2=arg_2)"} +{"_id": "doc_1525", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1e-8, arg_3='bce_loss'):\n \"\"\"Binary cross entropy operation.\n\n Parameters\n ----------\n output : Tensor\n Tensor with type of `float32` or `float64`.\n target : Tensor\n The target distribution, format the same with `output`.\n epsilon : float\n A small value to avoid output to be zero.\n name : str\n An optional name to attach to this function.\n\n References\n -----------\n - `ericjang-DRAW `__\n\n \"\"\"\n # with ops.op_scope([output, target], name, \"bce_loss\") as name:\n # output = ops.convert_to_tensor(output, name=\"preds\")\n # target = ops.convert_to_tensor(targets, name=\"target\")\n\n # with tf.name_scope(name):\n return tf.reduce_mean(\n tf.reduce_sum(-(arg_1 * tf.log(arg_0 + arg_2) + (1. - arg_1) * tf.log(1. - arg_0 + arg_2)), axis=1),\n arg_3=arg_3\n )"} +{"_id": "doc_1526", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"normalized_mean_squared_error_loss\"):\n \"\"\"Return the TensorFlow expression of normalized mean-square-error of two distributions.\n\n Parameters\n ----------\n output : Tensor\n 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].\n target : Tensor\n The target distribution, format the same with `output`.\n name : str\n An optional name to attach to this function.\n\n \"\"\"\n # with tf.name_scope(\"normalized_mean_squared_error_loss\"):\n if arg_0.get_shape().ndims == 2: # [batch_size, n_feature]\n arg_3 = tf.sqrt(tf.reduce_sum(tf.squared_difference(arg_0, arg_1), axis=1))\n arg_4 = tf.sqrt(tf.reduce_sum(tf.square(arg_1), axis=1))\n elif arg_0.get_shape().ndims == 3: # [batch_size, w, h]\n arg_3 = tf.sqrt(tf.reduce_sum(tf.squared_difference(arg_0, arg_1), axis=[1, 2]))\n arg_4 = tf.sqrt(tf.reduce_sum(tf.square(arg_1), axis=[1, 2]))\n elif arg_0.get_shape().ndims == 4: # [batch_size, w, h, c]\n arg_3 = tf.sqrt(tf.reduce_sum(tf.squared_difference(arg_0, arg_1), axis=[1, 2, 3]))\n arg_4 = tf.sqrt(tf.reduce_sum(tf.square(arg_1), axis=[1, 2, 3]))\n arg_5 = tf.reduce_mean(arg_3 / arg_4, arg_2=arg_2)\n return arg_5"} +{"_id": "doc_1527", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=None):\n \"\"\"Returns the expression of cross-entropy of two sequences, implement\n softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.\n\n Parameters\n -----------\n logits : Tensor\n 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.\n - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.\n target_seqs : Tensor\n int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.\n input_mask : Tensor\n The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.\n return_details : boolean\n Whether to return detailed losses.\n - If False (default), only returns the loss.\n - If True, returns the loss, losses, weights and targets (see source code).\n\n Examples\n --------\n >>> batch_size = 64\n >>> vocab_size = 10000\n >>> embedding_size = 256\n >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"input\")\n >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"target\")\n >>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"mask\")\n >>> net = tl.layers.EmbeddingInputlayer(\n ... inputs = input_seqs,\n ... vocabulary_size = vocab_size,\n ... embedding_size = embedding_size,\n ... name = 'seq_embedding')\n >>> net = tl.layers.DynamicRNNLayer(net,\n ... cell_fn = tf.contrib.rnn.BasicLSTMCell,\n ... n_hidden = embedding_size,\n ... dropout = (0.7 if is_train else None),\n ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),\n ... return_seq_2d = True,\n ... name = 'dynamicrnn')\n >>> print(net.outputs)\n (?, 256)\n >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name=\"output\")\n >>> print(net.outputs)\n (?, 10000)\n >>> loss = tl.cost.Func(net.outputs, target_seqs, input_mask)\n\n \"\"\"\n arg_5 = tf.reshape(arg_1, [-1]) # to one vector\n arg_6 = tf.to_float(tf.reshape(arg_2, [-1])) # to one vector like targets\n arg_7 = tf.nn.sparse_softmax_cross_entropy_with_logits(arg_0=arg_0, labels=arg_5, arg_4=arg_4) * arg_6\n # losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others\n\n arg_8 = tf.divide(\n tf.reduce_sum(arg_7), # loss from mask. reduce_sum before element-wise mul with mask !!\n tf.reduce_sum(arg_6),\n arg_4=\"seq_loss_with_mask\"\n )\n\n if arg_3:\n return arg_8, arg_7, arg_6, arg_5\n else:\n return arg_8"} +{"_id": "doc_1528", "title": "", "text": "def Func(arg_0=1.0):\n \"\"\"Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.\n\n More about max-norm, see `wiki-max norm `_.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n Returns\n ---------\n A function with signature `mn(weights, name=None)` that apply Lo regularization.\n\n Raises\n --------\n ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.\n\n \"\"\"\n if isinstance(arg_0, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % arg_0)\n\n if isinstance(arg_0, numbers.Real):\n if arg_0 < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g' % arg_0)\n # if scale >= 1.:\n # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %\n # scale)\n if arg_0 == 0.:\n tl.logging.info('Scale of 0 disables regularizer.')\n return lambda _, arg_1=None: None\n\n def mn(arg_2, arg_1='max_regularizer'):\n \"\"\"Applies max-norm regularization to weights.\"\"\"\n with tf.name_scope(arg_1) as scope:\n arg_3 = ops.convert_to_tensor(arg_0, dtype=arg_2.dtype.base_dtype, arg_1='scale')\n # if tf.__version__ <= '0.12':\n # standard_ops_fn = standard_ops.mul\n # else:\n arg_4 = standard_ops.multiply\n return arg_4(arg_3, standard_ops.reduce_max(standard_ops.abs(arg_2)), arg_1=scope)\n\n return mn"} +{"_id": "doc_1529", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=1, arg_3=None):\n \"\"\"Ramp activation function.\n\n Parameters\n ----------\n x : Tensor\n input.\n v_min : float\n cap input to v_min as a lower bound.\n v_max : float\n cap input to v_max as a upper bound.\n name : str\n The function name (optional).\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n \"\"\"\n return tf.clip_by_value(arg_0, clip_value_min=arg_1, clip_value_max=arg_2, arg_3=arg_3)"} +{"_id": "doc_1530", "title": "", "text": "def Func(arg_0, arg_1='Func'):\n \"\"\"Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.\n\n Usually be used for image segmentation.\n\n Parameters\n ----------\n x : Tensor\n input.\n - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.\n - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.\n name : str\n function name (optional)\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n Examples\n --------\n >>> outputs = Func(network.outputs)\n >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)\n\n References\n ----------\n - `tf.reverse `__\n\n \"\"\"\n with tf.name_scope(arg_1):\n return tf.nn.softmax(arg_0)"} +{"_id": "doc_1531", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Tensorflow version of np.repeat for 1D\"\"\"\n # https://github.com/tensorflow/tensorflow/issues/8521\n\n if len(arg_1.get_shape()) != 1:\n raise AssertionError(\"This is not a 1D Tensor\")\n\n arg_1 = tf.expand_dims(arg_1, -1)\n arg_1 = tf.tile(arg_1, [1, arg_2])\n arg_1 = arg_0.tf_flatten(arg_1)\n return arg_1"} +{"_id": "doc_1532", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Batch version of tf_map_coordinates\n\n Only supports 2D feature maps\n\n Parameters\n ----------\n inputs : ``tf.Tensor``\n shape = (b*c, h, w)\n coords : ``tf.Tensor``\n shape = (b*c, h, w, n, 2)\n\n Returns\n -------\n ``tf.Tensor``\n A Tensor with the shape as (b*c, h, w, n)\n\n \"\"\"\n arg_3 = arg_1.get_shape()\n arg_4 = arg_2.get_shape()\n arg_5 = tf.shape(arg_1)[0]\n arg_6 = int(arg_3[1])\n arg_7 = int(arg_3[2])\n arg_8 = int(arg_4[3])\n arg_9 = arg_6 * arg_7 * arg_8\n\n arg_10 = tf.cast(tf.floor(arg_2), 'int32')\n arg_11 = tf.cast(tf.ceil(arg_2), 'int32')\n arg_12 = tf.stack([arg_10[:, :, :, :, 0], arg_11[:, :, :, :, 1]], axis=-1)\n arg_13 = tf.stack([arg_11[:, :, :, :, 0], arg_10[:, :, :, :, 1]], axis=-1)\n\n arg_14 = arg_0._tf_repeat(tf.range(arg_5), arg_9)\n\n arg_15 = arg_0._get_vals_by_coords(arg_1, arg_10, arg_14, (arg_5, arg_6, arg_7, arg_8))\n arg_16 = arg_0._get_vals_by_coords(arg_1, arg_11, arg_14, (arg_5, arg_6, arg_7, arg_8))\n arg_17 = arg_0._get_vals_by_coords(arg_1, arg_12, arg_14, (arg_5, arg_6, arg_7, arg_8))\n arg_18 = arg_0._get_vals_by_coords(arg_1, arg_13, arg_14, (arg_5, arg_6, arg_7, arg_8))\n\n arg_19 = arg_2 - tf.cast(arg_10, 'float32')\n\n arg_20 = arg_15 + (arg_18 - arg_15) * arg_19[:, :, :, :, 0]\n arg_21 = arg_17 + (arg_16 - arg_17) * arg_19[:, :, :, :, 0]\n arg_22 = arg_20 + (arg_21 - arg_20) * arg_19[:, :, :, :, 1]\n\n return arg_22"} +{"_id": "doc_1533", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Batch map offsets into input\n\n Parameters\n ------------\n inputs : ``tf.Tensor``\n shape = (b, h, w, c)\n offsets: ``tf.Tensor``\n shape = (b, h, w, 2*n)\n grid_offset: `tf.Tensor``\n Offset grids shape = (h, w, n, 2)\n\n Returns\n -------\n ``tf.Tensor``\n A Tensor with the shape as (b, h, w, c)\n\n \"\"\"\n arg_4 = arg_1.get_shape()\n arg_5 = tf.shape(arg_1)[0]\n arg_6 = int(int(arg_2.get_shape()[3]) / 2)\n arg_7 = arg_4[1]\n arg_8 = arg_4[2]\n arg_9 = arg_4[3]\n\n # inputs (b, h, w, c) --> (b*c, h, w)\n arg_1 = arg_0._to_bc_h_w(arg_1, arg_4)\n\n # offsets (b, h, w, 2*n) --> (b, h, w, n, 2)\n arg_2 = tf.reshape(arg_2, (arg_5, arg_7, arg_8, arg_6, 2))\n # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)\n # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])\n\n arg_10 = tf.expand_dims(arg_3, 0) # grid_offset --> (1, h, w, n, 2)\n arg_10 = tf.tile(arg_10, [arg_5, 1, 1, 1, 1]) + arg_2 # grid_offset --> (b, h, w, n, 2)\n\n # clip out of bound\n arg_10 = tf.stack(\n [\n tf.clip_by_value(arg_10[:, :, :, :, 0], 0.0, tf.cast(arg_7 - 1, 'float32')),\n tf.clip_by_value(arg_10[:, :, :, :, 1], 0.0, tf.cast(arg_8 - 1, 'float32'))\n ], axis=-1\n )\n arg_10 = tf.tile(arg_10, [arg_9, 1, 1, 1, 1])\n\n arg_11 = arg_0._tf_batch_map_coordinates(arg_1, arg_10)\n # (b*c, h, w, n) --> (b, h, w, n, c)\n arg_11 = arg_0._to_b_h_w_n_c(arg_11, [arg_5, arg_7, arg_8, arg_6, arg_9])\n\n return arg_11"} +{"_id": "doc_1534", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=False, arg_4=False):\n \"\"\"Generate a generator that input a group of example in numpy.array and\n their labels, return the examples and labels by the given batch size.\n\n Parameters\n ----------\n inputs : numpy.array\n The input features, every row is a example.\n targets : numpy.array\n The labels of inputs, every row is a example.\n batch_size : int\n The batch size.\n allow_dynamic_batch_size: boolean\n Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.\n shuffle : boolean\n Indicating whether to use a shuffling queue, shuffle the dataset before return.\n\n Examples\n --------\n >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])\n >>> y = np.asarray([0,1,2,3,4,5])\n >>> for batch in tl.iterate.Func(inputs=X, targets=y, batch_size=2, shuffle=False):\n >>> print(batch)\n (array([['a', 'a'], ['b', 'b']], dtype=' len(arg_0):\n if arg_3:\n arg_7 = len(arg_0)\n else:\n break\n if arg_4:\n arg_8 = arg_5[arg_6:arg_7]\n else:\n arg_8 = slice(arg_6, arg_7)\n if (isinstance(arg_0, list) or isinstance(arg_1, list)) and (arg_4 ==True):\n # zsdonghao: for list indexing when shuffle==True\n yield [arg_0[arg_9] for arg_9 in arg_8], [arg_1[arg_9] for arg_9 in arg_8]\n else:\n yield arg_0[arg_8], arg_1[arg_8]"} +{"_id": "doc_1535", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='model', **arg_3):\n \"\"\"Save model architecture and parameters into database, timestamp will be added automatically.\n\n Parameters\n ----------\n network : TensorLayer layer\n TensorLayer layer instance.\n model_name : str\n The name/key of model.\n kwargs : other events\n Other events, such as name, accuracy, loss, step number and etc (optinal).\n\n Examples\n ---------\n Save model architecture and parameters into database.\n >>> db.Func(net, accuracy=0.8, loss=2.3, name='second_model')\n\n Load one model with parameters from database (run this in other script)\n >>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)\n\n Find and load the latest model.\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", -1)])\n\n Find and load the oldest model.\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", 1)])\n\n Get model information\n >>> net._accuracy\n ... 0.8\n\n Returns\n ---------\n boolean : True for success, False for fail.\n \"\"\"\n arg_3.update({'model_name': arg_2})\n arg_0._fill_project_info(arg_3) # put project_name into kwargs\n\n arg_4 = arg_1.get_all_params()\n\n arg_5 = time.time()\n\n arg_3.update({'architecture': arg_1.all_graphs, 'time': datetime.utcnow()})\n\n try:\n arg_6 = arg_0.model_fs.put(arg_0._serialization(arg_4))\n arg_3.update({'params_id': arg_6, 'time': datetime.utcnow()})\n arg_0.db.Model.insert_one(arg_3)\n print(\"[Database] Save model: SUCCESS, took: {}s\".format(round(time.time() - arg_5, 2)))\n return True\n except Exception as e:\n arg_7, arg_8, arg_9 = sys.exc_info()\n arg_10 = os.path.split(arg_9.tb_frame.f_code.co_filename)[1]\n logging.info(\"{} {} {} {} {}\".format(arg_7, arg_8, arg_10, arg_9.tb_lineno, e))\n print(\"[Database] Save model: FAIL\")\n return False"} +{"_id": "doc_1536", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Saves one dataset into database, timestamp will be added automatically.\n\n Parameters\n ----------\n dataset : any type\n The dataset you want to store.\n dataset_name : str\n The name of dataset.\n kwargs : other events\n Other events, such as description, author and etc (optinal).\n\n Examples\n ----------\n Save dataset\n >>> db.Func([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')\n\n Get dataset\n >>> dataset = db.find_top_dataset('mnist')\n\n Returns\n ---------\n boolean : Return True if save success, otherwise, return False.\n \"\"\"\n arg_0._fill_project_info(arg_3)\n if arg_2 is None:\n raise Exception(\"dataset_name is None, please give a dataset name\")\n arg_3.update({'dataset_name': arg_2})\n\n arg_4 = time.time()\n try:\n arg_5 = arg_0.dataset_fs.put(arg_0._serialization(arg_1))\n arg_3.update({'dataset_id': arg_5, 'time': datetime.utcnow()})\n arg_0.db.Dataset.insert_one(arg_3)\n # print(\"[Database] Save params: {} SUCCESS, took: {}s\".format(file_name, round(time.time()-s, 2)))\n print(\"[Database] Save dataset: SUCCESS, took: {}s\".format(round(time.time() - arg_4, 2)))\n return True\n except Exception as e:\n arg_6, arg_7, arg_8 = sys.exc_info()\n arg_9 = os.path.split(arg_8.tb_frame.f_code.co_filename)[1]\n logging.info(\"{} {} {} {} {}\".format(arg_6, arg_7, arg_9, arg_8.tb_lineno, e))\n print(\"[Database] Save dataset: FAIL\")\n return False"} +{"_id": "doc_1537", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Finds and returns a dataset from the database which matches the requirement.\n\n Parameters\n ----------\n dataset_name : str\n The name of dataset.\n sort : List of tuple\n PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations `__ for more details.\n kwargs : other events\n Other events, such as description, author and etc (optinal).\n\n Examples\n ---------\n Save dataset\n >>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')\n\n Get dataset\n >>> dataset = db.Func('mnist')\n >>> datasets = db.find_datasets('mnist')\n\n Returns\n --------\n dataset : the dataset or False\n Return False if nothing found.\n\n \"\"\"\n\n arg_0._fill_project_info(arg_3)\n if arg_1 is None:\n raise Exception(\"dataset_name is None, please give a dataset name\")\n arg_3.update({'dataset_name': arg_1})\n\n arg_4 = time.time()\n\n arg_5 = arg_0.db.Dataset.find_one(filter=arg_3, arg_2=arg_2)\n\n if arg_5 is not None:\n arg_6 = arg_5['dataset_id']\n else:\n print(\"[Database] FAIL! Cannot find dataset: {}\".format(arg_3))\n return False\n try:\n arg_7 = arg_0._deserialization(arg_0.dataset_fs.get(arg_6).read())\n arg_8 = arg_0.db.Dataset.find(arg_3)\n print(\"[Database] Find one dataset SUCCESS, {} took: {}s\".format(arg_3, round(time.time() - arg_4, 2)))\n\n # check whether more datasets match the requirement\n arg_9 = arg_8.distinct('dataset_id')\n arg_10 = len(arg_9)\n if arg_10 != 1:\n print(\" Note that there are {} datasets match the requirement\".format(arg_10))\n return arg_7\n except Exception as e:\n arg_11, arg_12, arg_13 = sys.exc_info()\n arg_14 = os.path.split(arg_13.tb_frame.f_code.co_filename)[1]\n logging.info(\"{} {} {} {} {}\".format(arg_11, arg_12, arg_14, arg_13.tb_lineno, e))\n return False"} +{"_id": "doc_1538", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Finds and returns all datasets from the database which matches the requirement.\n In some case, the data in a dataset can be stored separately for better management.\n\n Parameters\n ----------\n dataset_name : str\n The name/key of dataset.\n kwargs : other events\n Other events, such as description, author and etc (optional).\n\n Returns\n --------\n params : the parameters, return False if nothing found.\n\n \"\"\"\n\n arg_0._fill_project_info(arg_2)\n if arg_1 is None:\n raise Exception(\"dataset_name is None, please give a dataset name\")\n arg_2.update({'dataset_name': arg_1})\n\n arg_3 = time.time()\n arg_4 = arg_0.db.Dataset.find(arg_2)\n\n if arg_4 is not None:\n arg_5 = arg_4.distinct('dataset_id')\n arg_6 = []\n for arg_7 in arg_5: # you may have multiple Buckets files\n arg_8 = arg_0.dataset_fs.get(arg_7).read()\n arg_6.append(arg_0._deserialization(arg_8))\n else:\n print(\"[Database] FAIL! Cannot find any dataset: {}\".format(arg_2))\n return False\n\n print(\"[Database] Find {} datasets SUCCESS, took: {}s\".format(len(arg_6), round(time.time() - arg_3, 2)))\n return arg_6"} +{"_id": "doc_1539", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Delete datasets.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n \"\"\"\n\n arg_0._fill_project_info(arg_1)\n arg_0.db.Dataset.delete_many(arg_1)\n logging.info(\"[Database] Delete Dataset SUCCESS\")"} +{"_id": "doc_1540", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Saves the validation log, timestamp will be added automatically.\n\n Parameters\n -----------\n kwargs : logging information\n Events, such as accuracy, loss, step number and etc.\n\n Examples\n ---------\n >>> db.Func(accuracy=0.33, loss=0.98)\n\n \"\"\"\n\n arg_0._fill_project_info(arg_1)\n arg_1.update({'time': datetime.utcnow()})\n arg_2 = arg_0.db.ValidLog.insert_one(arg_1)\n arg_3 = arg_0._print_dict(arg_1)\n logging.info(\"[Database] valid log: \" + arg_3)"} +{"_id": "doc_1541", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Deletes training log.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n Examples\n ---------\n Save training log\n >>> db.save_training_log(accuracy=0.33)\n >>> db.save_training_log(accuracy=0.44)\n\n Delete logs that match the requirement\n >>> db.Func(accuracy=0.33)\n\n Delete all logs\n >>> db.Func()\n \"\"\"\n arg_0._fill_project_info(arg_1)\n arg_0.db.TrainLog.delete_many(arg_1)\n logging.info(\"[Database] Delete TrainLog SUCCESS\")"} +{"_id": "doc_1542", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, **arg_5):\n \"\"\"Uploads a task to the database, timestamp will be added automatically.\n\n Parameters\n -----------\n task_name : str\n The task name.\n script : str\n File name of the python script.\n hyper_parameters : dictionary\n The hyper parameters pass into the script.\n saved_result_keys : list of str\n The keys of the task results to keep in the database when the task finishes.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n -----------\n Uploads a task\n >>> db.Func(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')\n\n Finds and runs the latest task\n >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n >>> db.run_top_task(sess=sess, sort=[(\"time\", -1)])\n\n Finds and runs the oldest task\n >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n >>> db.run_top_task(sess=sess, sort=[(\"time\", 1)])\n\n \"\"\"\n if not isinstance(arg_1, str): # is None:\n raise Exception(\"task_name should be string\")\n if not isinstance(arg_2, str): # is None:\n raise Exception(\"script should be string\")\n if arg_3 is None:\n arg_3 = {}\n if arg_4 is None:\n arg_4 = []\n\n arg_0._fill_project_info(arg_5)\n arg_5.update({'time': datetime.utcnow()})\n arg_5.update({'hyper_parameters': arg_3})\n arg_5.update({'saved_result_keys': arg_4})\n\n arg_6 = open(arg_2, 'rb').read()\n\n arg_5.update({'status': 'pending', 'script': arg_6, 'result': {}})\n arg_0.db.Task.insert_one(arg_5)\n logging.info(\"[Database] Saved Task - task_name: {} script: {}\".format(arg_1, arg_2))"} +{"_id": "doc_1543", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Finds and runs a pending task that in the first of the sorting list.\n\n Parameters\n -----------\n task_name : str\n The task name.\n sort : List of tuple\n PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations `__ for more details.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n ---------\n Monitors the database and pull tasks to run\n >>> while True:\n >>> print(\"waiting task from distributor\")\n >>> db.Func(task_name='mnist', sort=[(\"time\", -1)])\n >>> time.sleep(1)\n\n Returns\n --------\n boolean : True for success, False for fail.\n \"\"\"\n if not isinstance(arg_1, str): # is None:\n raise Exception(\"task_name should be string\")\n arg_0._fill_project_info(arg_3)\n arg_3.update({'status': 'pending'})\n\n # find task and set status to running\n arg_4 = arg_0.db.Task.find_one_and_update(arg_3, {'$set': {'status': 'running'}}, arg_2=arg_2)\n\n try:\n # get task info e.g. hyper parameters, python script\n if arg_4 is None:\n logging.info(\"[Database] Find Task FAIL: key: {} sort: {}\".format(arg_1, arg_2))\n return False\n else:\n logging.info(\"[Database] Find Task SUCCESS: key: {} sort: {}\".format(arg_1, arg_2))\n arg_5 = arg_4['time']\n arg_6 = arg_4['script']\n arg_7 = arg_4['_id']\n arg_8 = arg_4['hyper_parameters']\n arg_9 = arg_4['saved_result_keys']\n logging.info(\" hyper parameters:\")\n for arg_10 in arg_8:\n arg_11()[arg_10] = arg_8[arg_10]\n logging.info(\" {}: {}\".format(arg_10, arg_8[arg_10]))\n # run task\n arg_12 = time.time()\n logging.info(\"[Database] Start Task: key: {} sort: {} push time: {}\".format(arg_1, arg_2, arg_5))\n arg_6 = arg_6.decode('utf-8')\n with tf.Graph().as_default(): # as graph: # clear all TF graphs\n exec(arg_6, arg_11())\n\n # set status to finished\n arg_13 = arg_0.db.Task.find_one_and_update({'_id': arg_7}, {'$set': {'status': 'finished'}})\n\n # return results\n arg_14 = {}\n for arg_15 in arg_9:\n logging.info(\" result: {}={} {}\".format(arg_15, arg_11()[arg_15], type(arg_11()[arg_15])))\n arg_14.update({\"%s\" % arg_15: arg_11()[arg_15]})\n arg_13 = arg_0.db.Task.find_one_and_update(\n {\n '_id': arg_7\n }, {'$set': {\n 'result': arg_14\n }}, return_document=pymongo.ReturnDocument.AFTER\n )\n logging.info(\n \"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s\".\n format(arg_1, arg_2, arg_5,\n time.time() - arg_12)\n )\n return True\n except Exception as e:\n arg_16, arg_17, arg_18 = sys.exc_info()\n arg_19 = os.path.split(arg_18.tb_frame.f_code.co_filename)[1]\n logging.info(\"{} {} {} {} {}\".format(arg_16, arg_17, arg_19, arg_18.tb_lineno, e))\n logging.info(\"[Database] Fail to run task\")\n # if fail, set status back to pending\n arg_13 = arg_0.db.Task.find_one_and_update({'_id': arg_7}, {'$set': {'status': 'pending'}})\n return False"} +{"_id": "doc_1544", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Delete tasks.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n Examples\n ---------\n >>> db.Func()\n\n \"\"\"\n\n arg_0._fill_project_info(arg_1)\n arg_0.db.Task.delete_many(arg_1)\n logging.info(\"[Database] Delete Task SUCCESS\")"} +{"_id": "doc_1545", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Finds and runs a pending task.\n\n Parameters\n -----------\n task_name : str\n The task name.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n ---------\n Wait until all tasks finish in user's local console\n\n >>> while not db.Func():\n >>> time.sleep(1)\n >>> print(\"all tasks finished\")\n >>> sess = tf.InteractiveSession()\n >>> net = db.find_top_model(sess=sess, sort=[(\"test_accuracy\", -1)])\n >>> print(\"the best accuracy {} is from model {}\".format(net._test_accuracy, net._name))\n\n Returns\n --------\n boolean : True for success, False for fail.\n\n \"\"\"\n\n if not isinstance(arg_1, str): # is None:\n raise Exception(\"task_name should be string\")\n arg_0._fill_project_info(arg_2)\n\n arg_2.update({'$or': [{'status': 'pending'}, {'status': 'running'}]})\n\n # ## find task\n # task = self.db.Task.find_one(kwargs)\n arg_3 = arg_0.db.Task.find(arg_2)\n\n arg_4 = arg_3.distinct('_id')\n arg_5 = len(arg_4)\n\n if arg_5 == 0:\n logging.info(\"[Database] No unfinished task - task_name: {}\".format(arg_1))\n return False\n else:\n\n logging.info(\"[Database] Find {} unfinished task - task_name: {}\".format(arg_5, arg_1))\n return True"} +{"_id": "doc_1546", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=2):\n \"\"\"Augment unigram features with hashed n-gram features.\"\"\"\n\n def get_ngrams(arg_3):\n return list(zip(*[arg_0[arg_4:] for arg_4 in range(arg_3)]))\n\n def hash_ngram(arg_5):\n arg_6 = array.array('L', arg_5).tobytes()\n arg_7 = int(hashlib.sha256(arg_6).hexdigest(), 16)\n return arg_1 + arg_7 % arg_2\n\n return arg_0 + [hash_ngram(arg_5) for arg_4 in range(2, arg_3 + 1) for arg_5 in get_ngrams(arg_4)]"} +{"_id": "doc_1547", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Load IMDb data and augment with hashed n-gram features.\"\"\"\n arg_1, arg_2, arg_3, arg_4 = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)\n\n if arg_0 is not None:\n arg_1 = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=arg_0) for x in arg_1])\n arg_3 = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=arg_0) for x in arg_3])\n\n return arg_1, arg_2, arg_3, arg_4"} +{"_id": "doc_1548", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Read one image.\n\n Parameters\n -----------\n image : str\n The image file name.\n path : str\n The image folder path.\n\n Returns\n -------\n numpy.array\n The image.\n\n \"\"\"\n return imageio.imread(os.path.join(arg_1, arg_0))"} +{"_id": "doc_1549", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=10, arg_3=True):\n \"\"\"Returns all images in list by given path and name of each image file.\n\n Parameters\n -------------\n img_list : list of str\n The image file names.\n path : str\n The image folder path.\n n_threads : int\n The number of threads to read image.\n printable : boolean\n Whether to print information when reading images.\n\n Returns\n -------\n list of numpy.array\n The images.\n\n \"\"\"\n arg_4 = []\n for arg_5 in range(0, len(arg_0), arg_2):\n arg_6 = arg_0[arg_5:arg_5 + arg_2]\n arg_7 = tl.prepro.threading_data(arg_6, fn=read_image, arg_1=arg_1)\n # tl.logging.info(b_imgs.shape)\n arg_4.extend(arg_7)\n if arg_3:\n tl.logging.info('read %d from %s' % (len(arg_4), arg_1))\n return arg_4"} +{"_id": "doc_1550", "title": "", "text": "def Func(arg_0, arg_1='_temp.png'):\n \"\"\"Save a image.\n\n Parameters\n -----------\n image : numpy array\n [w, h, c]\n image_path : str\n path\n\n \"\"\"\n try: # RGB\n imageio.imwrite(arg_1, arg_0)\n except Exception: # Greyscale\n imageio.imwrite(arg_1, arg_0[:, :, 0])"} +{"_id": "doc_1551", "title": "", "text": "def Func(arg_0, arg_1, arg_2='_temp.png'):\n \"\"\"Save multiple images into one single image.\n\n Parameters\n -----------\n images : numpy array\n (batch, w, h, c)\n size : list of 2 ints\n row and column number.\n number of images should be equal or less than size[0] * size[1]\n image_path : str\n save path\n\n Examples\n ---------\n >>> import numpy as np\n >>> import tensorlayer as tl\n >>> images = np.random.rand(64, 100, 100, 3)\n >>> tl.visualize.Func(images, [8, 8], 'temp.png')\n\n \"\"\"\n if len(arg_0.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1]\n arg_0 = arg_0[:, :, :, np.newaxis]\n\n def merge(arg_0, arg_1):\n arg_3, arg_4 = arg_0.shape[1], arg_0.shape[2]\n arg_5 = np.zeros((arg_3 * arg_1[0], arg_4 * arg_1[1], 3), dtype=arg_0.dtype)\n for arg_6, arg_7 in enumerate(arg_0):\n arg_8 = arg_6 % arg_1[1]\n arg_9 = arg_6 // arg_1[1]\n arg_5[arg_9 * arg_3:arg_9 * arg_3 + arg_3, arg_8 * arg_4:arg_8 * arg_4 + arg_4, :] = arg_7\n return arg_5\n\n def imsave(arg_0, arg_1, arg_10):\n if np.max(arg_0) <= 1 and (-1 <= np.min(arg_0) < 0):\n arg_0 = ((arg_0 + 1) * 127.5).astype(np.uint8)\n elif np.max(arg_0) <= 1 and np.min(arg_0) >= 0:\n arg_0 = (arg_0 * 255).astype(np.uint8)\n\n return imageio.imwrite(arg_10, merge(arg_0, arg_1))\n\n if len(arg_0) > arg_1[0] * arg_1[1]:\n raise AssertionError(\"number of images should be equal or less than size[0] * size[1] {}\".format(len(arg_0)))\n\n return imsave(arg_0, arg_1, arg_2)"} +{"_id": "doc_1552", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=True, arg_6=True, arg_7=None\n):\n \"\"\"Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.\n\n Parameters\n -----------\n image : numpy.array\n The RGB image [height, width, channel].\n classes : list of int\n A list of class ID (int).\n coords : list of int\n A list of list for coordinates.\n - Should be [x, y, x2, y2] (up-left and botton-right format)\n - If [x_center, y_center, w, h] (set is_center to True).\n scores : list of float\n A list of score (float). (Optional)\n classes_list : list of str\n for converting ID to string on image.\n is_center : boolean\n Whether the coordinates is [x_center, y_center, w, h]\n - If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.\n - If coordinates are [x1, x2, y1, y2], set it to False.\n is_rescale : boolean\n Whether to rescale the coordinates from pixel-unit format to ratio format.\n - If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.\n - If False, feed the coordinates with pixel unit format.\n save_name : None or str\n The name of image file (i.e. image.png), if None, not to save image.\n\n Returns\n -------\n numpy.array\n The saved image.\n\n References\n -----------\n - OpenCV rectangle and putText.\n - `scikit-image `__.\n\n \"\"\"\n if len(arg_2) != len(arg_1):\n raise AssertionError(\"number of coordinates and classes are equal\")\n\n if len(arg_3) > 0 and len(arg_3) != len(arg_1):\n raise AssertionError(\"number of scores and classes are equal\")\n\n # don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy\n arg_0 = arg_0.copy()\n\n arg_8, arg_9 = arg_0.shape[0:2]\n arg_10 = int((arg_8 + arg_9) // 430)\n\n for arg_11, arg_12 in enumerate(arg_2):\n if arg_5:\n arg_13, arg_14, arg_15, arg_16 = tl.prepro.obj_box_coord_centroid_to_upleft_butright(arg_2[arg_11])\n else:\n arg_13, arg_14, arg_15, arg_16 = arg_2[arg_11]\n\n if arg_6: # scale back to pixel unit if the coords are the portion of width and high\n arg_13, arg_14, arg_15, arg_16 = tl.prepro.obj_box_coord_scale_to_pixelunit([arg_13, arg_14, arg_15, arg_16], (arg_8, arg_9))\n\n cv2.rectangle(\n arg_0,\n (int(arg_13), int(arg_14)),\n (int(arg_15), int(arg_16)), # up-left and botton-right\n [0, 255, 0],\n arg_10\n )\n\n cv2.putText(\n arg_0,\n arg_4[arg_1[arg_11]] + ((\" %.2f\" % (arg_3[arg_11])) if (len(arg_3) != 0) else \" \"),\n (int(arg_13), int(arg_14)), # button left\n 0,\n 1.5e-3 * arg_8, # bigger = larger font\n [0, 0, 256], # self.meta['colors'][max_indx],\n int(arg_10 / 2) + 1\n ) # bold\n\n if arg_7 is not None:\n # cv2.imwrite('_my.png', image)\n save_image(arg_0, arg_7)\n # if len(coords) == 0:\n # tl.logging.info(\"Func: no bboxes exist, cannot draw !\")\n return arg_0"} +{"_id": "doc_1553", "title": "", "text": "def Func(arg_0=None, arg_1=10, arg_2=True, arg_3='cnn', arg_4=3119362):\n \"\"\"Display a group of RGB or Greyscale CNN masks.\n\n Parameters\n ----------\n CNN : numpy.array\n The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n fig_idx : int\n The matplotlib figure index.\n\n Examples\n --------\n >>> tl.visualize.Func(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012)\n\n \"\"\"\n import matplotlib.pyplot as plt\n # tl.logging.info(CNN.shape) # (5, 5, 3, 64)\n # exit()\n arg_5 = arg_0.shape[3]\n arg_6 = arg_0.shape[0]\n arg_7 = arg_0.shape[1]\n arg_8 = arg_0.shape[2]\n arg_9 = int(np.sqrt(arg_5))\n arg_10 = int(np.ceil(arg_5 / arg_9))\n plt.ion() # active mode\n arg_11 = plt.figure(arg_4)\n arg_12 = 1\n for arg_13 in range(1, arg_9 + 1):\n for arg_14 in range(1, arg_10 + 1):\n if arg_12 > arg_5:\n break\n arg_11.add_subplot(arg_10, arg_9, arg_12)\n # tl.logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5\n # exit()\n # plt.imshow(\n # np.reshape(CNN[count-1,:,:,:], (n_row, n_col)),\n # cmap='gray', interpolation=\"nearest\") # theano\n if arg_8 == 1:\n plt.imshow(np.reshape(arg_0[:, :, :, arg_12 - 1], (arg_6, arg_7)), cmap='gray', interpolation=\"nearest\")\n elif arg_8 == 3:\n plt.imshow(\n np.reshape(arg_0[:, :, :, arg_12 - 1], (arg_6, arg_7, arg_8)), cmap='gray', interpolation=\"nearest\"\n )\n else:\n raise Exception(\"Unknown n_color\")\n plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n arg_12 = arg_12 + 1\n if arg_2:\n plt.savefig(arg_3 + '.pdf', format='pdf')\n else:\n plt.draw()\n plt.pause(arg_1)"} +{"_id": "doc_1554", "title": "", "text": "def Func(arg_0, arg_1, arg_2=500, arg_3=5, arg_4=False, arg_5='tsne', arg_6=9862):\n \"\"\"Visualize the embeddings by using t-SNE.\n\n Parameters\n ----------\n embeddings : numpy.array\n The embedding matrix.\n reverse_dictionary : dictionary\n id_to_word, mapping id to unique word.\n plot_only : int\n The number of examples to plot, choice the most common words.\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> see 'tutorial_word2vec_basic.py'\n >>> final_embeddings = normalized_embeddings.eval()\n >>> tl.visualize.Func(final_embeddings, labels, reverse_dictionary,\n ... plot_only=500, second=5, saveable=False, name='tsne')\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n def plot_with_labels(arg_7, arg_8, arg_9=(18, 18), arg_3=5, arg_4=True, arg_5='tsne', arg_6=9862):\n\n if arg_7.shape[0] < len(arg_8):\n raise AssertionError(\"More labels than embeddings\")\n\n if arg_4 is False:\n plt.ion()\n plt.figure(arg_6)\n\n plt.figure(arg_9=arg_9) # in inches\n\n for arg_10, arg_11 in enumerate(arg_8):\n arg_12, arg_13 = arg_7[arg_10, :]\n plt.scatter(arg_12, arg_13)\n plt.annotate(arg_11, xy=(arg_12, arg_13), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')\n\n if arg_4:\n plt.savefig(arg_5 + '.pdf', format='pdf')\n else:\n plt.draw()\n plt.pause(arg_3)\n\n try:\n from sklearn.manifold import TSNE\n from six.moves import xrange\n\n arg_14 = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n # plot_only = 500\n arg_7 = arg_14.fit_transform(arg_0[:arg_2, :])\n arg_8 = [arg_1[arg_10] for arg_10 in xrange(arg_2)]\n plot_with_labels(arg_7, arg_8, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, arg_6=arg_6)\n\n except ImportError:\n arg_15 = \"Please install sklearn and matplotlib to visualize embeddings.\"\n tl.logging.error(arg_15)\n raise ImportError(arg_15)"} +{"_id": "doc_1555", "title": "", "text": "def Func(arg_0=None, arg_1=10, arg_2=True, arg_3=None, arg_4='mnist', arg_5=2396512):\n \"\"\"Visualize every columns of the weight matrix to a group of Greyscale img.\n\n Parameters\n ----------\n W : numpy.array\n The weight matrix\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n shape : a list with 2 int or None\n The shape of feature image, MNIST is [28, 80].\n name : a string\n A name to save the image, if saveable is True.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> tl.visualize.Func(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012)\n\n \"\"\"\n if arg_3 is None:\n arg_3 = [28, 28]\n\n import matplotlib.pyplot as plt\n if arg_2 is False:\n plt.ion()\n arg_6 = plt.figure(arg_5) # show all feature images\n arg_7 = arg_0.shape[1]\n\n arg_8 = int(np.sqrt(arg_7)) # \u6bcf\u884c\u663e\u793a\u7684\u4e2a\u6570 \u82e525\u4e2ahidden unit -> \u6bcf\u884c\u663e\u793a5\u4e2a\n arg_9 = int(np.ceil(arg_7 / arg_8))\n arg_10 = int(1)\n for arg_11 in range(1, arg_8 + 1):\n for arg_12 in range(1, arg_9 + 1):\n if arg_10 > arg_7:\n break\n arg_6.add_subplot(arg_8, arg_9, arg_10)\n # ------------------------------------------------------------\n # plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray')\n # ------------------------------------------------------------\n arg_13 = arg_0[:, arg_10 - 1] / np.sqrt((arg_0[:, arg_10 - 1]**2).sum())\n # feature[feature<0.0001] = 0 # value threshold\n # if count == 1 or count == 2:\n # print(np.mean(feature))\n # if np.std(feature) < 0.03: # condition threshold\n # feature = np.zeros_like(feature)\n # if np.mean(feature) < -0.015: # condition threshold\n # feature = np.zeros_like(feature)\n plt.imshow(\n np.reshape(arg_13, (arg_3[0], arg_3[1])), cmap='gray', interpolation=\"nearest\"\n ) # , vmin=np.min(feature), vmax=np.max(feature))\n # plt.title(name)\n # ------------------------------------------------------------\n # plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation=\"nearest\")\n plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n arg_10 = arg_10 + 1\n if arg_2:\n plt.savefig(arg_4 + '.pdf', format='pdf')\n else:\n plt.draw()\n plt.pause(arg_1)"} +{"_id": "doc_1556", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save data into TFRecord.\"\"\"\n if os.path.isfile(arg_2):\n print(\"%s exists\" % arg_2)\n return\n print(\"Converting data into %s ...\" % arg_2)\n # cwd = os.getcwd()\n arg_3 = tf.python_io.TFRecordWriter(arg_2)\n for arg_4, arg_5 in enumerate(arg_0):\n arg_6 = arg_5.tobytes()\n # Visualize a image\n # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)\n arg_7 = int(arg_1[arg_4])\n # print(label)\n # Convert the bytes back to image as follow:\n # image = Image.frombytes('RGB', (32, 32), img_raw)\n # image = np.fromstring(img_raw, np.float32)\n # image = image.reshape([32, 32, 3])\n # tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)\n arg_8 = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[arg_7])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[arg_6])),\n }\n )\n )\n arg_3.write(arg_8.SerializeToString()) # Serialize To String\n arg_3.close()"} +{"_id": "doc_1557", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return tensor to read from TFRecord.\"\"\"\n arg_2 = tf.train.string_input_producer([arg_0])\n arg_3 = tf.TFRecordReader()\n arg_4, arg_5 = arg_3.read(arg_2)\n arg_6 = tf.parse_single_example(\n arg_5, arg_6={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n }\n )\n # You can do more image distortion here for training data\n arg_7 = tf.decode_raw(arg_6['img_raw'], tf.float32)\n arg_7 = tf.reshape(arg_7, [32, 32, 3])\n # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5\n if arg_1 ==True:\n # 1. Randomly crop a [height, width] section of the image.\n arg_7 = tf.random_crop(arg_7, [24, 24, 3])\n\n # 2. Randomly flip the image horizontally.\n arg_7 = tf.image.random_flip_left_right(arg_7)\n\n # 3. Randomly change brightness.\n arg_7 = tf.image.random_brightness(arg_7, max_delta=63)\n\n # 4. Randomly change contrast.\n arg_7 = tf.image.random_contrast(arg_7, lower=0.2, upper=1.8)\n\n # 5. Subtract off the mean and divide by the variance of the pixels.\n arg_7 = tf.image.per_image_standardization(arg_7)\n\n elif arg_1 == False:\n # 1. Crop the central [height, width] of the image.\n arg_7 = tf.image.resize_image_with_crop_or_pad(arg_7, 24, 24)\n\n # 2. Subtract off the mean and divide by the variance of the pixels.\n arg_7 = tf.image.per_image_standardization(arg_7)\n\n elif arg_1 == None:\n arg_7 = arg_7\n\n arg_8 = tf.cast(arg_6['label'], tf.int32)\n return arg_7, arg_8"} +{"_id": "doc_1558", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=None):\n \"\"\"Print all info of parameters in the network\"\"\"\n for arg_3, arg_4 in enumerate(arg_0.all_params):\n if arg_1:\n try:\n arg_5 = arg_4.eval(arg_2=arg_2)\n logging.info(\n \" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) \".\n format(arg_3, arg_4.name, str(arg_5.shape), arg_4.dtype.name, arg_5.mean(), np.median(arg_5), arg_5.std())\n )\n except Exception as e:\n logging.info(str(e))\n raise Exception(\n \"Hint: print params details after tl.layers.initialize_global_variables(sess) \"\n \"or use network.Func(False).\"\n )\n else:\n logging.info(\" param {:3}: {:20} {:15} {}\".format(arg_3, arg_4.name, str(arg_4.get_shape()), arg_4.dtype.name))\n logging.info(\" num of params: %d\" % arg_0.count_params())"} +{"_id": "doc_1559", "title": "", "text": "def Func(arg_0):\n \"\"\"Print all info of layers in the network.\"\"\"\n for arg_1, arg_2 in enumerate(arg_0.all_layers):\n # logging.info(\" layer %d: %s\" % (i, str(layer)))\n logging.info(\n \" layer {:3}: {:20} {:15} {}\".format(arg_1, arg_2.name, str(arg_2.get_shape()), arg_2.dtype.name)\n )"} +{"_id": "doc_1560", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return the parameters in a list of array.\"\"\"\n arg_2 = []\n for arg_3 in arg_0.all_params:\n if arg_1 is None:\n arg_2.append(arg_3.eval())\n else:\n arg_2.append(arg_1.run(arg_3))\n return arg_2"} +{"_id": "doc_1561", "title": "", "text": "def Func(arg_0, arg_1=4):\n \"\"\"Get all arguments of current layer for saving the graph.\"\"\"\n arg_2 = inspect.stack()\n\n if len(arg_2) < arg_1 + 1:\n raise ValueError(\"The length of the inspection stack is shorter than the requested start position.\")\n\n arg_3, arg_4, arg_4, arg_5 = inspect.getargvalues(arg_2[arg_1][0])\n\n arg_6 = {}\n\n for arg_7 in arg_3:\n\n # some args dont need to be saved into the graph. e.g. the input placeholder\n if arg_5[arg_7] is not None and arg_7 not in ['self', 'prev_layer', 'inputs']:\n\n arg_8 = arg_5[arg_7]\n\n # change function (e.g. act) into dictionary of module path and function name\n if inspect.isfunction(arg_8):\n arg_6[arg_7] = {\"module_path\": arg_8.__module__, \"func_name\": arg_8.__name__}\n # ignore more args e.g. TF class\n elif arg_7.endswith('init'):\n continue\n # for other data type, save them directly\n else:\n arg_6[arg_7] = arg_8\n\n return arg_6"} +{"_id": "doc_1562", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=16,\n arg_6=1, arg_7=\"filename_queue\", arg_8=\"input_queue\"\n):\n \"\"\"Prefetches string values from disk into an input queue.\n\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n /tmp/train_data-?????-of-00100).\n is_training: Boolean; whether prefetching for training or eval.\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n\n Returns:\n A Queue containing prefetched string values.\n \"\"\"\n arg_9 = []\n for arg_10 in arg_1.split(\",\"):\n arg_9.extend(tf.gfile.Glob(arg_10))\n if not arg_9:\n tl.logging.fatal(\"Found no input files matching %s\", arg_1)\n else:\n tl.logging.info(\"Prefetching values from %d files matching %s\", len(arg_9), arg_1)\n\n if arg_2:\n print(\" is_training == True : RandomShuffleQueue\")\n arg_11 = tf.train.string_input_producer(arg_9, shuffle=True, arg_13=16, name=arg_7)\n arg_12 = arg_4 * arg_5\n arg_13 = arg_12 + 100 * arg_3\n arg_14 = tf.RandomShuffleQueue(\n arg_13=arg_13, min_after_dequeue=arg_12, dtypes=[tf.string],\n name=\"random_\" + arg_8\n )\n else:\n print(\" is_training == False : FIFOQueue\")\n arg_11 = tf.train.string_input_producer(arg_9, shuffle=False, arg_13=1, name=arg_7)\n arg_13 = arg_4 + 3 * arg_3\n arg_14 = tf.FIFOQueue(arg_13=arg_13, dtypes=[tf.string], name=\"fifo_\" + arg_8)\n\n arg_15 = []\n for arg_16 in range(arg_6):\n arg_16, arg_17 = arg_0.read(arg_11)\n arg_15.append(arg_14.enqueue([arg_17]))\n tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(arg_14, arg_15))\n\n tf.summary.scalar(\n \"queue/%s/fraction_of_%d_full\" % (arg_14.name, arg_13),\n tf.cast(arg_14.size(), tf.float32) * (1. / arg_13)\n )\n\n return arg_14"} +{"_id": "doc_1563", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Batches input images and captions.\n\n This function splits the caption into an input sequence and a target sequence,\n where the target sequence is the input sequence right-shifted by 1. Input and\n target sequences are batched and padded up to the maximum length of sequences\n in the batch. A mask is created to distinguish real words from padding words.\n\n Example:\n Actual captions in the batch ('-' denotes padded character):\n [\n [ 1 2 5 4 5 ],\n [ 1 2 3 4 - ],\n [ 1 2 3 - - ],\n ]\n\n input_seqs:\n [\n [ 1 2 3 4 ],\n [ 1 2 3 - ],\n [ 1 2 - - ],\n ]\n\n target_seqs:\n [\n [ 2 3 4 5 ],\n [ 2 3 4 - ],\n [ 2 3 - - ],\n ]\n\n mask:\n [\n [ 1 1 1 1 ],\n [ 1 1 1 0 ],\n [ 1 1 0 0 ],\n ]\n\n Args:\n images_and_captions: A list of pairs [image, caption], where image is a\n Tensor of shape [height, width, channels] and caption is a 1-D Tensor of\n any length. Each pair will be processed and added to the queue in a\n separate thread.\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n add_summaries: If true, add caption length summaries.\n\n Returns:\n images: A Tensor of shape [batch_size, height, width, channels].\n input_seqs: An int32 Tensor of shape [batch_size, padded_length].\n target_seqs: An int32 Tensor of shape [batch_size, padded_length].\n mask: An int32 0/1 Tensor of shape [batch_size, padded_length].\n \"\"\"\n arg_4 = []\n for arg_5, arg_6 in arg_0:\n arg_7 = tf.shape(arg_6)[0]\n arg_8 = tf.expand_dims(tf.subtract(arg_7, 1), 0)\n\n arg_9 = tf.slice(arg_6, [0], arg_8)\n arg_10 = tf.slice(arg_6, [1], arg_8)\n arg_11 = tf.ones(arg_8, dtype=tf.int32)\n arg_4.append([arg_5, arg_9, arg_10, arg_11])\n\n arg_12, arg_13, arg_14, arg_15 = tf.train.batch_join(\n arg_4, arg_1=arg_1, capacity=arg_2, dynamic_pad=True, name=\"batch_and_pad\"\n )\n\n if arg_3:\n arg_16 = tf.add(tf.reduce_sum(arg_15, 1), 1)\n tf.summary.scalar(\"caption_length/batch_min\", tf.reduce_min(arg_16))\n tf.summary.scalar(\"caption_length/batch_max\", tf.reduce_max(arg_16))\n tf.summary.scalar(\"caption_length/batch_mean\", tf.reduce_mean(arg_16))\n\n return arg_12, arg_13, arg_14, arg_15"} +{"_id": "doc_1564", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7=None):\n \"\"\"Data Format aware version of tf.nn.Func.\"\"\"\n with ops.name_scope(arg_7, 'batchnorm', [arg_0, arg_1, arg_2, arg_4, arg_3]):\n arg_8 = math_ops.rsqrt(arg_2 + arg_5)\n if arg_4 is not None:\n arg_8 *= arg_4\n\n arg_9 = math_ops.cast(arg_8, arg_0.dtype)\n arg_10 = math_ops.cast(arg_3 - arg_1 * arg_8 if arg_3 is not None else -arg_1 * arg_8, arg_0.dtype)\n\n # Return a * x + b with customized data_format.\n # Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add\n # So we reimplemted them to allow make the model work with tensorRT.\n # See https://github.com/tensorlayer/openpose-plus/issues/75 for more details.\n arg_11 = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}\n return _bias_add(_bias_scale(arg_0, arg_9, arg_11[arg_6]), arg_10, arg_11[arg_6])"} +{"_id": "doc_1565", "title": "", "text": "def Func(arg_0, arg_1='flatten'):\n \"\"\"Reshapes a high-dimension vector input.\n\n [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]\n\n Parameters\n ----------\n variable : TensorFlow variable or tensor\n The variable or tensor to be flatten.\n name : str\n A unique layer name.\n\n Returns\n -------\n Tensor\n Flatten Tensor\n\n Examples\n --------\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])\n >>> # Convolution Layer with 32 filters and a kernel size of 5\n >>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n >>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n >>> network = tf.layers.max_pooling2d(network, 2, 2)\n >>> print(network.get_shape()[:].as_list())\n >>> [None, 62, 62, 32]\n >>> network = tl.layers.Func(network)\n >>> print(network.get_shape()[:].as_list()[1:])\n >>> [None, 123008]\n \"\"\"\n arg_2 = 1\n for arg_3 in arg_0.get_shape()[1:].as_list():\n arg_2 *= arg_3\n return tf.reshape(arg_0, shape=[-1, arg_2], arg_1=arg_1)"} +{"_id": "doc_1566", "title": "", "text": "def Func(arg_0, arg_1=\"\", arg_2=False):\n \"\"\"Get a list of layers' output in a network by a given name scope.\n\n Parameters\n -----------\n net : :class:`Layer`\n The last layer of the network.\n name : str\n Get the layers' output that contain this name.\n verbose : boolean\n If True, print information of all the layers' output\n\n Returns\n --------\n list of Tensor\n A list of layers' output (TensorFlow tensor)\n\n Examples\n ---------\n >>> import tensorlayer as tl\n >>> layers = tl.layers.Func(net, \"CNN\", True)\n\n \"\"\"\n logging.info(\" [*] geting layers with %s\" % arg_1)\n\n arg_3 = []\n arg_4 = 0\n\n for arg_5 in arg_0.all_layers:\n # logging.info(type(layer.name))\n if arg_1 in arg_5.name:\n arg_3.append(arg_5)\n\n if arg_2:\n logging.info(\" got {:3}: {:15} {}\".format(arg_4, arg_5.name, str(arg_5.get_shape())))\n arg_4 = arg_4 + 1\n\n return arg_3"} +{"_id": "doc_1567", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Returns the initialized RNN state.\n The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`.\n\n Parameters\n ----------\n state : RNN state.\n The TensorFlow's RNN state.\n feed_dict : dictionary\n Initial RNN state; if None, returns zero state.\n\n Returns\n -------\n RNN state\n The TensorFlow's RNN state.\n\n \"\"\"\n if isinstance(arg_0, LSTMStateTuple):\n arg_2 = arg_0.c.eval(arg_1=arg_1)\n arg_3 = arg_0.h.eval(arg_1=arg_1)\n return arg_2, arg_3\n else:\n arg_4 = arg_0.eval(arg_1=arg_1)\n return arg_4"} +{"_id": "doc_1568", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove the repeated items in a list, and return the processed list.\n You may need it to create merged layer like Concat, Elementwise and etc.\n\n Parameters\n ----------\n x : list\n Input\n\n Returns\n -------\n list\n A list that after removing it's repeated items\n\n Examples\n -------\n >>> l = [2, 3, 4, 2, 3]\n >>> l = Func(l)\n [2, 3, 4]\n\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0:\n if arg_2 not in arg_1:\n arg_1.append(arg_2)\n\n return arg_1"} +{"_id": "doc_1569", "title": "", "text": "def Func(arg_0):\n \"\"\"Ternary operation use threshold computed with weights.\"\"\"\n arg_1 = tf.get_default_graph()\n with arg_1.gradient_override_map({\"Sign\": \"Identity\"}):\n arg_2 = _compute_threshold(arg_0)\n arg_0 = tf.sign(tf.add(tf.sign(tf.add(arg_0, arg_2)), tf.sign(tf.add(arg_0, -arg_2))))\n return arg_0"} +{"_id": "doc_1570", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a deprecation notice to a docstring.\"\"\"\n if not arg_0:\n arg_3 = [arg_1]\n\n else:\n arg_3 = _normalize_docstring(arg_0).splitlines()\n\n arg_2 = [''] + arg_2\n\n if len(arg_3) > 1:\n # Make sure that we keep our distance from the main body\n if arg_3[1].strip():\n arg_2.append('')\n\n arg_3[1:1] = arg_2\n else:\n arg_3 += arg_2\n\n return '\\n'.join(arg_3)"} +{"_id": "doc_1571", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Creates a tensor with all elements set to `alpha_value`.\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to alpha.\n\n Parameters\n ----------\n shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.\n The shape of the desired tensor\n alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`\n The value used to fill the resulting `Tensor`.\n name: str\n A name for the operation (optional).\n\n Returns\n -------\n A `Tensor` with all elements set to alpha.\n\n Examples\n --------\n >>> tl.Func([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]]\n \"\"\"\n with ops.name_scope(arg_2, \"Func\", [arg_0]) as arg_2:\n\n arg_3 = convert_to_tensor(arg_1)\n arg_4 = dtypes.as_dtype(arg_3.dtype).base_dtype\n\n if not isinstance(arg_0, ops.Tensor):\n try:\n arg_0 = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(arg_0))\n except (TypeError, ValueError):\n arg_0 = ops.convert_to_tensor(arg_0, dtype=dtypes.int32)\n\n if not arg_0._shape_tuple():\n arg_0 = reshape(arg_0, [-1]) # Ensure it's a vector\n\n try:\n arg_5 = constant(arg_1, arg_0=arg_0, dtype=arg_4, arg_2=arg_2)\n\n except (TypeError, ValueError):\n arg_5 = fill(arg_0, constant(arg_1, dtype=arg_4), arg_2=arg_2)\n\n if arg_5.dtype.base_dtype != arg_4:\n raise AssertionError(\"Dtypes do not corresponds: %s and %s\" % (arg_5.dtype.base_dtype, arg_4))\n\n return arg_5"} +{"_id": "doc_1572", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None):\n \"\"\"\n Return the Func results of given non time-series network.\n\n Parameters\n ----------\n sess : Session\n TensorFlow Session.\n network : TensorLayer layer\n The network.\n X : numpy.array\n The inputs.\n x : placeholder\n For inputs.\n y_op : placeholder\n The argmax expression of softmax outputs.\n batch_size : int or None\n The batch size for Funcion, when dataset is large, we should use minibatche for Funcion;\n if dataset is small, we can set it to None.\n\n Examples\n --------\n See `tutorial_mnist_simple.py `_\n\n >>> y = network.outputs\n >>> y_op = tf.argmax(tf.nn.softmax(y), 1)\n >>> print(tl.utils.Func(sess, network, X_test, x, y_op))\n\n \"\"\"\n if arg_5 is None:\n arg_6 = dict_to_one(arg_1.all_drop) # disable noise layers\n arg_7 = {\n arg_3: arg_2,\n }\n arg_7.update(arg_6)\n return arg_0.run(arg_4, arg_7=arg_7)\n else:\n arg_8 = None\n for arg_9, arg_10 in tl.iterate.minibatches(arg_2, arg_2, arg_5, shuffle=False):\n arg_6 = dict_to_one(arg_1.all_drop)\n arg_7 = {\n arg_3: arg_9,\n }\n arg_7.update(arg_6)\n arg_11 = arg_0.run(arg_4, arg_7=arg_7)\n if arg_8 is None:\n arg_8 = arg_11\n else:\n arg_8 = np.concatenate((arg_8, arg_11))\n if arg_8 is None:\n if len(arg_2) % arg_5 != 0:\n arg_6 = dict_to_one(arg_1.all_drop)\n arg_7 = {\n arg_3: arg_2[-(len(arg_2) % arg_5):, :],\n }\n arg_7.update(arg_6)\n arg_11 = arg_0.run(arg_4, arg_7=arg_7)\n arg_8 = arg_11\n else:\n if len(arg_2) != len(arg_8) and len(arg_2) % arg_5 != 0:\n arg_6 = dict_to_one(arg_1.all_drop)\n arg_7 = {\n arg_3: arg_2[-(len(arg_2) % arg_5):, :],\n }\n arg_7.update(arg_6)\n arg_11 = arg_0.run(arg_4, arg_7=arg_7)\n arg_8 = np.concatenate((arg_8, arg_11))\n return arg_8"} +{"_id": "doc_1573", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"\n Input the predicted results, targets results and\n the number of class, return the confusion matrix, F1-score of each class,\n accuracy and macro F1-score.\n\n Parameters\n ----------\n y_test : list\n The target results\n y_predict : list\n The predicted results\n n_classes : int\n The number of classes\n\n Examples\n --------\n >>> c_mat, f1, acc, f1_macro = tl.utils.Func(y_test, y_predict, n_classes)\n\n \"\"\"\n arg_3 = confusion_matrix(arg_0, arg_1, labels=[x for x in range(arg_2)])\n arg_4 = f1_score(arg_0, arg_1, average=None, labels=[x for x in range(arg_2)])\n arg_5 = f1_score(arg_0, arg_1, average='macro')\n arg_6 = accuracy_score(arg_0, arg_1)\n tl.logging.info('confusion matrix: \\n%s' % arg_3)\n tl.logging.info('f1-score : %s' % arg_4)\n tl.logging.info('f1-score(macro) : %f' % arg_5) # same output with > f1_score(y_true, y_pred, average='macro')\n tl.logging.info('accuracy-score : %f' % arg_6)\n return arg_3, arg_4, arg_6, arg_5"} +{"_id": "doc_1574", "title": "", "text": "def Func(arg_0=0, arg_1=10, arg_2=5, arg_3=None):\n \"\"\"Return a list of random integer by the given range and quantity.\n\n Parameters\n -----------\n min_v : number\n The minimum value.\n max_v : number\n The maximum value.\n number : int\n Number of value.\n seed : int or None\n The seed for random.\n\n Examples\n ---------\n >>> r = Func(min_v=0, max_v=10, number=5)\n [10, 2, 3, 3, 7]\n\n \"\"\"\n arg_4 = random.Random()\n if arg_3:\n arg_4 = random.Random(arg_3)\n # return [random.randint(min,max) for p in range(0, number)]\n return [arg_4.randint(arg_0, arg_1) for arg_5 in range(0, arg_2)]"} +{"_id": "doc_1575", "title": "", "text": "def Func(arg_0=True):\n \"\"\"Clears all the placeholder variables of keep prob,\n including keeping probabilities of all dropout, denoising, dropconnect etc.\n\n Parameters\n ----------\n printable : boolean\n If True, print all deleted variables.\n\n \"\"\"\n tl.logging.info('clear all .....................................')\n arg_1 = globals().copy()\n for arg_2 in arg_1:\n if arg_2[0] == '_': continue\n if 'func' in str(globals()[arg_2]): continue\n if 'module' in str(globals()[arg_2]): continue\n if 'class' in str(globals()[arg_2]): continue\n\n if arg_0:\n tl.logging.info(\" clear_all ------- %s\" % str(globals()[arg_2]))\n\n del globals()[arg_2]"} +{"_id": "doc_1576", "title": "", "text": "def Func(arg_0=None, arg_1=1.0):\n \"\"\"Sample an index from a probability array.\n\n Parameters\n ----------\n a : list of float\n List of probabilities.\n temperature : float or None\n The higher the more uniform. When a = [0.1, 0.2, 0.7],\n - temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]\n - temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]\n - temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]\n - If None, it will be ``np.argmax(a)``\n\n Notes\n ------\n - No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.\n - For large vocabulary size, choice a higher temperature or ``tl.nlp.Func_top`` to avoid error.\n\n \"\"\"\n if arg_0 is None:\n raise Exception(\"a : list of float\")\n arg_2 = np.copy(arg_0)\n try:\n if arg_1 == 1:\n return np.argmax(np.random.multinomial(1, arg_0, 1))\n if arg_1 is None:\n return np.argmax(arg_0)\n else:\n arg_0 = np.log(arg_0) / arg_1\n arg_0 = np.exp(arg_0) / np.sum(np.exp(arg_0))\n return np.argmax(np.random.multinomial(1, arg_0, 1))\n except Exception:\n # np.set_printoptions(threshold=np.nan)\n # tl.logging.info(a)\n # tl.logging.info(np.sum(a))\n # tl.logging.info(np.max(a))\n # tl.logging.info(np.min(a))\n # exit()\n arg_3 = \"For large vocabulary_size, choice a higher temperature\\\n to avoid log error. Hint : use ``Func_top``. \"\n\n warnings.warn(arg_3, Warning)\n # tl.logging.info(a)\n # tl.logging.info(b)\n return np.argmax(np.random.multinomial(1, arg_2, 1))"} +{"_id": "doc_1577", "title": "", "text": "def Func(arg_0=None, arg_1=10):\n \"\"\"Sample from ``top_k`` probabilities.\n\n Parameters\n ----------\n a : list of float\n List of probabilities.\n top_k : int\n Number of candidates to be considered.\n\n \"\"\"\n if arg_0 is None:\n arg_0 = []\n\n arg_2 = np.argpartition(arg_0, -arg_1)[-arg_1:]\n arg_3 = arg_0[arg_2]\n # tl.logging.info(\"new %f\" % probs)\n arg_3 = arg_3 / np.sum(arg_3)\n arg_4 = np.random.choice(arg_2, p=arg_3)\n return arg_4"} +{"_id": "doc_1578", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"Creates the vocabulary of word to word_id.\n\n See ``tutorial_tfrecord3.py``.\n\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n\n Parameters\n ------------\n sentences : list of list of str\n All sentences for creating the vocabulary.\n word_counts_output_file : str\n The file name.\n min_word_count : int\n Minimum number of occurrences for a word.\n\n Returns\n --------\n :class:`SimpleVocabulary`\n The simple vocabulary object, see :class:`Vocabulary` for more.\n\n Examples\n --------\n Pre-process sentences\n\n >>> captions = [\"one two , three\", \"four five five\"]\n >>> processed_capts = []\n >>> for c in captions:\n >>> c = tl.nlp.process_sentence(c, start_word=\"\", end_word=\"\")\n >>> processed_capts.append(c)\n >>> print(processed_capts)\n ...[['', 'one', 'two', ',', 'three', ''], ['', 'four', 'five', 'five', '']]\n\n Create vocabulary\n\n >>> tl.nlp.Func(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)\n Creating vocabulary.\n Total words: 8\n Words in vocabulary: 8\n Wrote vocabulary file: vocab.txt\n\n Get vocabulary object\n\n >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word=\"\", end_word=\"\", unk_word=\"\")\n INFO:tensorflow:Initializing vocabulary from file: vocab.txt\n [TL] Vocabulary from vocab.txt : \n vocabulary with 10 words (includes start_word, end_word, unk_word)\n start_id: 2\n end_id: 3\n unk_id: 9\n pad_id: 0\n\n \"\"\"\n tl.logging.info(\"Creating vocabulary.\")\n\n arg_3 = Counter()\n\n for arg_4 in arg_0:\n arg_3.update(arg_4)\n # tl.logging.info('c',c)\n tl.logging.info(\" Total words: %d\" % len(arg_3))\n\n # Filter uncommon words and sort by descending count.\n arg_5 = [x for x in arg_3.items() if x[1] >= arg_2]\n arg_5.sort(key=lambda x: x[1], reverse=True)\n arg_5 = [(\"\", 0)] + arg_5 # 1st id should be reserved for padding\n # tl.logging.info(word_counts)\n tl.logging.info(\" Words in vocabulary: %d\" % len(arg_5))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(arg_1, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (arg_6, arg_4) for arg_6, arg_4 in arg_5]))\n tl.logging.info(\" Wrote vocabulary file: %s\" % arg_1)\n\n # Create the vocabulary dictionary.\n arg_7 = [x[0] for x in arg_5]\n arg_8 = len(arg_7)\n arg_9 = dict([(x, y) for (y, x) in enumerate(arg_7)])\n arg_10 = SimpleVocabulary(arg_9, arg_8)\n\n return arg_10"} +{"_id": "doc_1579", "title": "", "text": "def Func(arg_0='questions-words.txt', arg_1=None):\n \"\"\"Reads through an analogy question file, return its id format.\n\n Parameters\n ----------\n eval_file : str\n The file name.\n word2id : dictionary\n a dictionary that maps word to ID.\n\n Returns\n --------\n numpy.array\n A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs.\n\n Examples\n ---------\n The file should be in this format\n\n >>> : capital-common-countries\n >>> Athens Greece Baghdad Iraq\n >>> Athens Greece Bangkok Thailand\n >>> Athens Greece Beijing China\n >>> Athens Greece Berlin Germany\n >>> Athens Greece Bern Switzerland\n >>> Athens Greece Cairo Egypt\n >>> Athens Greece Canberra Australia\n >>> Athens Greece Hanoi Vietnam\n >>> Athens Greece Havana Cuba\n\n Get the tokenized analogy question data\n\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)\n >>> analogy_questions = tl.nlp.Func(eval_file='questions-words.txt', word2id=dictionary)\n >>> print(analogy_questions)\n [[ 3068 1248 7161 1581]\n [ 3068 1248 28683 5642]\n [ 3068 1248 3878 486]\n ...,\n [ 1216 4309 19982 25506]\n [ 1216 4309 3194 8650]\n [ 1216 4309 140 312]]\n\n \"\"\"\n if arg_1 is None:\n arg_1 = {}\n\n arg_2 = []\n arg_3 = 0\n with open(arg_0, \"rb\") as analogy_f:\n for arg_4 in analogy_f:\n if arg_4.startswith(b\":\"): # Skip comments.\n continue\n arg_5 = arg_4.strip().lower().split(b\" \") # lowercase\n arg_6 = [arg_1.get(w.strip()) for w in arg_5]\n if None in arg_6 or len(arg_6) != 4:\n arg_3 += 1\n else:\n arg_2.append(np.array(arg_6))\n tl.logging.info(\"Eval analogy file: %s\" % arg_0)\n tl.logging.info(\"Questions: %d\", len(arg_2))\n tl.logging.info(\"Skipped: %d\", arg_3)\n arg_7 = np.array(arg_2, dtype=np.int32)\n return arg_7"} +{"_id": "doc_1580", "title": "", "text": "def Func(arg_0):\n \"\"\"Given a dictionary that maps word to integer id.\n Returns a reverse dictionary that maps a id to word.\n\n Parameters\n ----------\n word_to_id : dictionary\n that maps word to ID.\n\n Returns\n --------\n dictionary\n A dictionary that maps IDs to words.\n\n \"\"\"\n arg_1 = dict(zip(arg_0.values(), arg_0.keys()))\n return arg_1"} +{"_id": "doc_1581", "title": "", "text": "def Func(arg_0=None, arg_1=50000, arg_2=True, arg_3='UNK'):\n \"\"\"Build the words dictionary and replace rare words with 'UNK' token.\n The most common word has the smallest integer id.\n\n Parameters\n ----------\n words : list of str or byte\n The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.\n vocabulary_size : int\n The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.\n printable : boolean\n Whether to print the read vocabulary size of the given words.\n unk_key : str\n Represent the unknown words.\n\n Returns\n --------\n data : list of int\n The context in a list of ID.\n count : list of tuple and list\n Pair words and IDs.\n - count[0] is a list : the number of rare words\n - count[1:] are tuples : the number of occurrence of each word\n - e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]\n dictionary : dictionary\n It is `word_to_id` that maps word to ID.\n reverse_dictionary : a dictionary\n It is `id_to_word` that maps ID to word.\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> vocabulary_size = 50000\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.Func(words, vocabulary_size)\n\n References\n -----------------\n - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `__\n\n \"\"\"\n if arg_0 is None:\n raise Exception(\"words : list of str or byte\")\n\n arg_4 = [[arg_3, -1]]\n arg_4.extend(collections.Counter(arg_0).most_common(arg_1 - 1))\n arg_5 = dict()\n for arg_6, arg_7 in arg_4:\n arg_5[arg_6] = len(arg_5)\n arg_8 = list()\n arg_9 = 0\n for arg_6 in arg_0:\n if arg_6 in arg_5:\n arg_10 = arg_5[arg_6]\n else:\n arg_10 = 0 # dictionary['UNK']\n arg_9 += 1\n arg_8.append(arg_10)\n arg_4[0][1] = arg_9\n arg_11 = dict(zip(arg_5.values(), arg_5.keys()))\n if arg_2:\n tl.logging.info('Real vocabulary size %d' % len(collections.Counter(arg_0).keys()))\n tl.logging.info('Limited vocabulary size {}'.format(arg_1))\n if len(collections.Counter(arg_0).keys()) < arg_1:\n raise Exception(\n \"len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size\"\n )\n return arg_8, arg_4, arg_5, arg_11"} +{"_id": "doc_1582", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=True, arg_4=3, arg_5=arg_6.compile(br\"\\d\")\n):\n \"\"\"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Parameters\n -----------\n sentence : tensorflow.python.platform.gfile.GFile Object\n The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.\n vocabulary : dictionary\n Mmapping tokens to integers.\n tokenizer : function\n A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.\n normalize_digits : boolean\n If true, all digits are replaced by 0.\n\n Returns\n --------\n list of int\n The token-ids for the sentence.\n\n \"\"\"\n if arg_2:\n arg_8 = arg_2(arg_0)\n else:\n arg_8 = basic_tokenizer(arg_0)\n if not arg_3:\n return [arg_1.get(arg_9, arg_4) for arg_9 in arg_8]\n # Normalize digits by 0 before looking words up in the vocabulary.\n return [arg_1.get(arg_6.sub(arg_5, b\"0\", arg_9), arg_4) for arg_9 in arg_8]"} +{"_id": "doc_1583", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Calculate the bleu score for hypotheses and references\n using the MOSES ulti-bleu.perl script.\n\n Parameters\n ------------\n hypotheses : numpy.array.string\n A numpy array of strings where each string is a single example.\n references : numpy.array.string\n A numpy array of strings where each string is a single example.\n lowercase : boolean\n If True, pass the \"-lc\" flag to the multi-bleu script\n\n Examples\n ---------\n >>> hypotheses = [\"a bird is flying on the sky\"]\n >>> references = [\"two birds are flying on the sky\", \"a bird is on the top of the tree\", \"an airplane is on the sky\",]\n >>> score = tl.nlp.Func(hypotheses, references)\n\n Returns\n --------\n float\n The BLEU score\n\n References\n ----------\n - `Google/seq2seq/metric/bleu `__\n\n \"\"\"\n if np.size(arg_0) == 0:\n return np.float32(0.0)\n\n # Get MOSES multi-bleu script\n try:\n arg_3, arg_4 = urllib.request.urlretrieve(\n \"https://raw.githubusercontent.com/moses-smt/mosesdecoder/\"\n \"master/scripts/generic/multi-bleu.perl\"\n )\n os.chmod(arg_3, 0o755)\n except Exception: # pylint: disable=W0702\n tl.logging.info(\"Unable to fetch multi-bleu.perl script, using local.\")\n arg_5 = os.path.dirname(os.path.realpath(__file__))\n arg_6 = os.path.abspath(os.path.join(arg_5, \"..\", \"..\", \"bin\"))\n arg_3 = os.path.join(arg_6, \"tools/multi-bleu.perl\")\n\n # Dump hypotheses and references to tempfiles\n arg_7 = tempfile.NamedTemporaryFile()\n arg_7.write(\"\\n\".join(arg_0).encode(\"utf-8\"))\n arg_7.write(b\"\\n\")\n arg_7.flush()\n arg_8 = tempfile.NamedTemporaryFile()\n arg_8.write(\"\\n\".join(arg_1).encode(\"utf-8\"))\n arg_8.write(b\"\\n\")\n arg_8.flush()\n\n # Calculate BLEU using multi-bleu script\n with open(arg_7.name, \"r\") as read_pred:\n arg_9 = [arg_3]\n if arg_2:\n arg_9 += [\"-lc\"]\n arg_9 += [arg_8.name]\n try:\n arg_10 = subprocess.check_output(arg_9, stdin=read_pred, stderr=subprocess.STDOUT)\n arg_10 = arg_10.decode(\"utf-8\")\n arg_11 = re.search(r\"BLEU = (.+?),\", arg_10).group(1)\n arg_11 = float(arg_11)\n except subprocess.CalledProcessError as error:\n if error.output is not None:\n tl.logging.warning(\"multi-bleu.perl script returned non-zero exit code\")\n tl.logging.warning(error.output)\n arg_11 = np.float32(0.0)\n\n # Close temp files\n arg_7.close()\n arg_8.close()\n\n return np.float32(arg_11)"} +{"_id": "doc_1584", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the integer id of a word string.\"\"\"\n if arg_1 in arg_0._vocab:\n return arg_0._vocab[arg_1]\n else:\n return arg_0._unk_id"} +{"_id": "doc_1585", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the integer word id of a word string.\"\"\"\n if arg_1 in arg_0.vocab:\n return arg_0.vocab[arg_1]\n else:\n return arg_0.unk_id"} +{"_id": "doc_1586", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the word string of an integer word id.\"\"\"\n if arg_1 >= len(arg_0.reverse_vocab):\n return arg_0.reverse_vocab[arg_0.unk_id]\n else:\n return arg_0.reverse_vocab[arg_1]"} +{"_id": "doc_1587", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\" Enable the diagnostic feature for debugging unexpected concurrency in\n acquiring ConnectionWrapper instances.\n\n NOTE: This MUST be done early in your application's execution, BEFORE any\n accesses to ConnectionFactory or connection policies from your application\n (including imports and sub-imports of your app).\n\n Parameters:\n ----------------------------------------------------------------\n maxConcurrency: A non-negative integer that represents the maximum expected\n number of outstanding connections. When this value is\n exceeded, useful information will be logged and, depending\n on the value of the raiseException arg,\n ConcurrencyExceededError may be raised.\n raiseException: If true, ConcurrencyExceededError will be raised when\n maxConcurrency is exceeded.\n \"\"\"\n global arg_2, arg_3\n\n assert arg_0 >= 0\n\n arg_2 = arg_0\n arg_3 = arg_1\n return"} +{"_id": "doc_1588", "title": "", "text": "def Func(arg_0):\n \"\"\" Check for concurrency violation and add self to\n _clsOutstandingInstances.\n\n ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is\n incremented\n \"\"\"\n global g_max_concurrency, g_max_concurrency_raise_exception\n\n assert g_max_concurrency is not None\n assert arg_0 not in arg_0._clsOutstandingInstances, repr(arg_0)\n\n # Populate diagnostic info\n arg_0._creationTracebackString = traceback.format_stack()\n\n # Check for concurrency violation\n if arg_0._clsNumOutstanding >= g_max_concurrency:\n # NOTE: It's possible for _clsNumOutstanding to be greater than\n # len(_clsOutstandingInstances) if concurrency check was enabled after\n # unrelease allocations.\n arg_2 = (\"With numOutstanding=%r, exceeded concurrency limit=%r \"\n \"when requesting %r. OTHER TRACKED UNRELEASED \"\n \"INSTANCES (%s): %r\") % (\n arg_0._clsNumOutstanding, g_max_concurrency, arg_0,\n len(arg_0._clsOutstandingInstances), arg_0._clsOutstandingInstances,)\n\n arg_0._logger.error(arg_2)\n\n if g_max_concurrency_raise_exception:\n raise ConcurrencyExceededError(arg_2)\n\n\n # Add self to tracked instance set\n arg_0._clsOutstandingInstances.add(arg_0)\n arg_0._addedToInstanceSet = True\n\n return"} +{"_id": "doc_1589", "title": "", "text": "def Func(arg_0):\n \"\"\" Close the policy instance and its database connection pool. \"\"\"\n arg_0._logger.info(\"Closing\")\n\n if arg_0._pool is not None:\n arg_0._pool.Func()\n arg_0._pool = None\n else:\n arg_0._logger.warning(\n \"Func() called, but connection policy was alredy Funcd\")\n return"} +{"_id": "doc_1590", "title": "", "text": "def Func(arg_0):\n \"\"\" Get a connection from the pool.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller\n is responsible for calling the ConnectionWrapper\n instance's release() method or use it in a context manager\n expression (with ... as:) to release resources.\n \"\"\"\n arg_0._logger.debug(\"Acquiring connection\")\n\n arg_1 = arg_0._pool.connection(shareable=False)\n arg_2 = ConnectionWrapper(arg_1=arg_1,\n cursor=arg_1.cursor(),\n releaser=arg_0._releaseConnection,\n logger=arg_0._logger)\n return arg_2"} +{"_id": "doc_1591", "title": "", "text": "def Func(arg_0):\n \"\"\" Create a Connection instance.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller\n is responsible for calling the ConnectionWrapper\n instance's release() method or use it in a context manager\n expression (with ... as:) to release resources.\n \"\"\"\n arg_0._logger.debug(\"Acquiring connection\")\n\n arg_1 = SteadyDB.connect(** _getCommonSteadyDBArgsDict())\n arg_2 = ConnectionWrapper(arg_1=arg_1,\n cursor=arg_1.cursor(),\n releaser=arg_0._releaseConnection,\n logger=arg_0._logger)\n return arg_2"} +{"_id": "doc_1592", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Release database connection and cursor; passed as a callback to\n ConnectionWrapper\n \"\"\"\n arg_0._logger.debug(\"Releasing connection\")\n\n # Close the cursor\n arg_2.close()\n\n # ... then close the database connection\n arg_1.close()\n return"} +{"_id": "doc_1593", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reclassifies given state.\n \"\"\"\n # Record is before wait period do not classifiy\n if arg_1.ROWID < arg_0.getParameter('trainRecords'):\n if not arg_1.setByUser:\n arg_1.anomalyLabel = []\n arg_0._deleteRecordsFromKNN([arg_1])\n return\n\n arg_3 = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL\n arg_4 = arg_3 + KNNAnomalyClassifierRegion.AUTO_TAG\n\n # Update the label based on classifications\n arg_5 = arg_0._recomputeRecordFromKNN(arg_1)\n arg_6 = arg_0._categoryToLabelList(arg_5)\n\n if arg_1.setByUser:\n if arg_3 in arg_1.anomalyLabel:\n arg_1.anomalyLabel.remove(arg_3)\n if arg_4 in arg_1.anomalyLabel:\n arg_1.anomalyLabel.remove(arg_4)\n arg_6.extend(arg_1.anomalyLabel)\n\n # Add threshold classification label if above threshold, else if\n # classified to add the auto threshold classification.\n if arg_1.anomalyScore >= arg_0.getParameter('anomalyThreshold'):\n arg_6.append(arg_3)\n elif arg_3 in arg_6:\n arg_7 = arg_6.index(arg_3)\n arg_6[arg_7] = arg_4\n\n # Make all entries unique\n arg_6 = list(set(arg_6))\n\n # If both above threshold and auto classified above - remove auto label\n if arg_3 in arg_6 and arg_4 in arg_6:\n arg_6.remove(arg_4)\n\n if arg_1.anomalyLabel == arg_6:\n return\n\n # Update state's labeling\n arg_1.anomalyLabel = arg_6\n\n # Update KNN Classifier with new labeling\n if arg_1.anomalyLabel == []:\n arg_0._deleteRecordsFromKNN([arg_1])\n else:\n arg_0._addRecordToKNN(arg_1)"} +{"_id": "doc_1594", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Removes the given records from the classifier.\n\n parameters\n ------------\n recordsToDelete - list of records to delete from the classififier\n \"\"\"\n arg_2 = arg_0._knnclassifier.getParameter('categoryRecencyList')\n\n arg_3 = ([r.ROWID for r in arg_1 if\n not r.setByUser and r.ROWID in arg_2])\n\n arg_4 = arg_0._knnclassifier._knn._numPatterns\n arg_0._knnclassifier._knn.removeIds(arg_3)\n assert arg_0._knnclassifier._knn._numPatterns == arg_4 - len(arg_3)"} +{"_id": "doc_1595", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=None):\n \"\"\"\n Removes any stored records within the range from start to\n end. Noninclusive of end.\n\n parameters\n ------------\n start - integer representing the ROWID of the start of the deletion range,\n end - integer representing the ROWID of the end of the deletion range,\n if None, it will default to end.\n \"\"\"\n arg_3 = numpy.array(\n arg_0._knnclassifier.getParameter('categoryRecencyList'))\n\n if arg_2 is None:\n arg_2 = arg_3.max() + 1\n\n arg_4 = numpy.logical_and(arg_3 >= arg_1,\n arg_3 < arg_2)\n arg_5 = arg_3[arg_4]\n\n arg_6 = arg_0._knnclassifier._knn._numPatterns\n arg_0._knnclassifier._knn.removeIds(arg_5.tolist())\n assert arg_0._knnclassifier._knn._numPatterns == arg_6 - len(arg_5)"} +{"_id": "doc_1596", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Since the KNN Classifier stores categories as numbers, we must store each\n label as a number. This method converts from a label to a unique number.\n Each label is assigned a unique bit so multiple labels may be assigned to\n a single record.\n \"\"\"\n if arg_1 not in arg_0.saved_categories:\n arg_0.saved_categories.append(arg_1)\n return pow(2, arg_0.saved_categories.index(arg_1))"} +{"_id": "doc_1597", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Converts a category number into a list of labels\n \"\"\"\n if arg_1 is None:\n return []\n\n arg_2 = []\n arg_3 = 0\n while arg_1 > 0:\n if arg_1 % 2 == 1:\n arg_2.append(arg_0.saved_categories[arg_3])\n arg_3 += 1\n arg_1 = arg_1 >> 1\n return arg_2"} +{"_id": "doc_1598", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a state's anomaly vertor converting it from spare to dense\n \"\"\"\n arg_2 = numpy.zeros(arg_0._anomalyVectorLength)\n arg_2[arg_1.anomalyVector] = 1\n return arg_2"} +{"_id": "doc_1599", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Get the labels on classified points within range start to end. Not inclusive\n of end.\n\n :returns: (dict) with format:\n\n ::\n\n {\n 'isProcessing': boolean,\n 'recordLabels': list of results\n }\n\n ``isProcessing`` - currently always false as recalculation blocks; used if\n reprocessing of records is still being performed;\n\n Each item in ``recordLabels`` is of format:\n \n ::\n \n {\n 'ROWID': id of the row,\n 'labels': list of strings\n }\n\n \"\"\"\n if len(arg_0._recordsCache) == 0:\n return {\n 'isProcessing': False,\n 'recordLabels': []\n }\n try:\n arg_1 = int(arg_1)\n except Exception:\n arg_1 = 0\n\n try:\n arg_2 = int(arg_2)\n except Exception:\n arg_2 = arg_0._recordsCache[-1].ROWID\n\n if arg_2 <= arg_1:\n raise HTMPredictionModelInvalidRangeError(\"Invalid supplied range for 'Func'.\",\n debugInfo={\n 'requestRange': {\n 'startRecordID': arg_1,\n 'endRecordID': arg_2\n },\n 'numRecordsStored': len(arg_0._recordsCache)\n })\n\n arg_3 = {\n 'isProcessing': False,\n 'recordLabels': []\n }\n\n arg_4 = numpy.array(\n arg_0._knnclassifier.getParameter('categoryRecencyList'))\n arg_5 = numpy.where((arg_4 >= arg_1) & (arg_4 < arg_2))[0].tolist()\n arg_6 = arg_0._knnclassifier.getCategoryList()\n for arg_7 in arg_5:\n arg_8 = dict(\n ROWID=int(arg_4[arg_7]),\n labels=arg_0._categoryToLabelList(arg_6[arg_7]))\n arg_3['recordLabels'].append(arg_8)\n\n return arg_3"} +{"_id": "doc_1600", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Remove labels from each record with record ROWID in range from\n ``start`` to ``end``, noninclusive of end. Removes all records if \n ``labelFilter`` is None, otherwise only removes the labels equal to \n ``labelFilter``.\n\n This will recalculate all points from end to the last record stored in the\n internal cache of this classifier.\n \n :param start: (int) start index \n :param end: (int) end index (noninclusive)\n :param labelFilter: (string) label filter\n \"\"\"\n if len(arg_0._recordsCache) == 0:\n raise HTMPredictionModelInvalidRangeError(\"Invalid supplied range for \"\n \"'Func'. Model has no saved records.\")\n\n try:\n arg_1 = int(arg_1)\n except Exception:\n arg_1 = 0\n\n try:\n arg_2 = int(arg_2)\n except Exception:\n arg_2 = arg_0._recordsCache[-1].ROWID\n\n arg_4 = arg_0._recordsCache[0].ROWID\n\n arg_5 = 0 if arg_1 is None else max(0, arg_1 - arg_4)\n arg_6 = len(arg_0._recordsCache) if arg_2 is None else \\\n max(0, min( len( arg_0._recordsCache) , arg_2 - arg_4))\n\n if arg_6 <= arg_5:\n raise HTMPredictionModelInvalidRangeError(\"Invalid supplied range for \"\n \"'Func'.\", debugInfo={\n 'requestRange': {\n 'startRecordID': arg_1,\n 'endRecordID': arg_2\n },\n 'clippedRequestRange': {\n 'startRecordID': arg_5,\n 'endRecordID': arg_6\n },\n 'validRange': {\n 'startRecordID': arg_4,\n 'endRecordID': arg_0._recordsCache[len(arg_0._recordsCache)-1].ROWID\n },\n 'numRecordsStored': len(arg_0._recordsCache)\n })\n\n # Remove records within the cache\n arg_7 = []\n for arg_8 in arg_0._recordsCache[arg_5:arg_6]:\n if arg_3 is not None:\n if arg_3 in arg_8.anomalyLabel:\n arg_8.anomalyLabel.remove(arg_3)\n else:\n arg_8.anomalyLabel = []\n arg_8.setByUser = False\n arg_7.append(arg_8)\n arg_0._deleteRecordsFromKNN(arg_7)\n\n # Remove records not in cache\n arg_0._deleteRangeFromKNN(arg_1, arg_2)\n\n # Recompute [clippedEnd, ...)\n for arg_8 in arg_0._recordsCache[arg_6:]:\n arg_0._classifyState(arg_8)"} +{"_id": "doc_1601", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Returns True if the record Funces any of the provided filters\n '''\n\n for arg_2, arg_3 in arg_0.filterDict.iteritems():\n arg_4 = arg_3['index']\n arg_5 = arg_3['categories']\n for arg_6 in arg_5:\n # Record might be blank, handle this\n if not arg_1:\n continue\n if arg_1[arg_4].find(arg_6) != -1:\n '''\n This field contains the string we're searching for\n so we'll keep the records\n '''\n return True\n\n # None of the categories were found in this record\n return False"} +{"_id": "doc_1602", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Removes the set of columns who have never been active from the set of\n active columns selected in the inhibition round. Such columns cannot\n represent learned pattern and are therefore meaningless if only inference\n is required. This should not be done when using a random, unlearned SP\n since you would end up with no active columns.\n\n :param activeArray: An array whose size is equal to the number of columns.\n Any columns marked as active with an activeDutyCycle of 0 have\n never been activated before and therefore are not active due to\n learning. Any of these (unlearned) columns will be disabled (set to 0).\n \"\"\"\n arg_2 = numpy.where(arg_0._activeDutyCycles == 0)[0]\n arg_1[arg_2] = 0"} +{"_id": "doc_1603", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Updates the minimum duty cycles defining normal activity for a column. A\n column with activity duty cycle below this minimum threshold is boosted.\n \"\"\"\n if arg_0._globalInhibition or arg_0._inhibitionRadius > arg_0._numInputs:\n arg_0.FuncGlobal()\n else:\n arg_0.FuncLocal()"} +{"_id": "doc_1604", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Updates the minimum duty cycles in a global fashion. Sets the minimum duty\n cycles for the overlap all columns to be a percent of the maximum in the\n region, specified by minPctOverlapDutyCycle. Functionality it is equivalent\n to _updateMinDutyCyclesLocal, but this function exploits the globality of\n the computation to perform it in a straightforward, and efficient manner.\n \"\"\"\n arg_0._minOverlapDutyCycles.fill(\n arg_0._minPctOverlapDutyCycles * arg_0._overlapDutyCycles.max()\n )"} +{"_id": "doc_1605", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Updates the minimum duty cycles. The minimum duty cycles are determined\n locally. Each column's minimum duty cycles are set to be a percent of the\n maximum duty cycles in the column's neighborhood. Unlike\n _updateMinDutyCyclesGlobal, here the values can be quite different for\n different columns.\n \"\"\"\n for arg_1 in xrange(arg_0._numColumns):\n arg_2 = arg_0._getColumnNeighborhood(arg_1)\n\n arg_3 = arg_0._activeDutyCycles[arg_2].max()\n arg_4 = arg_0._overlapDutyCycles[arg_2].max()\n\n arg_0._minOverlapDutyCycles[arg_1] = (arg_4 *\n arg_0._minPctOverlapDutyCycles)"} +{"_id": "doc_1606", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Updates the duty cycles for each column. The OVERLAP duty cycle is a moving\n average of the number of inputs which overlapped with the each column. The\n ACTIVITY duty cycles is a moving average of the frequency of activation for\n each column.\n\n Parameters:\n ----------------------------\n :param overlaps:\n An array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.\n :param activeColumns:\n An array containing the indices of the active columns,\n the sparse set of columns which survived inhibition\n \"\"\"\n arg_3 = numpy.zeros(arg_0._numColumns, dtype=realDType)\n arg_4 = numpy.zeros(arg_0._numColumns, dtype=realDType)\n arg_3[arg_1 > 0] = 1\n arg_4[arg_2] = 1\n\n arg_5 = arg_0._dutyCyclePeriod\n if (arg_5 > arg_0._iterationNum):\n arg_5 = arg_0._iterationNum\n\n arg_0._overlapDutyCycles = arg_0.FuncHelper(\n arg_0._overlapDutyCycles,\n arg_3,\n arg_5\n )\n\n arg_0._activeDutyCycles = arg_0.FuncHelper(\n arg_0._activeDutyCycles,\n arg_4,\n arg_5\n )"} +{"_id": "doc_1607", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The average number of columns per input, taking into account the topology\n of the inputs and columns. This value is used to calculate the inhibition\n radius. This function supports an arbitrary number of dimensions. If the\n number of column dimensions does not match the number of input dimensions,\n we treat the missing, or phantom dimensions as 'ones'.\n \"\"\"\n #TODO: extend to support different number of dimensions for inputs and\n # columns\n arg_1 = max(arg_0._columnDimensions.size, arg_0._inputDimensions.size)\n arg_2 = numpy.ones(arg_1)\n arg_2[:arg_0._columnDimensions.size] = arg_0._columnDimensions\n\n arg_5 = numpy.ones(arg_1)\n arg_5[:arg_0._inputDimensions.size] = arg_0._inputDimensions\n\n arg_7 = arg_2.astype(realDType) / arg_5\n return numpy.average(arg_7)"} +{"_id": "doc_1608", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n The range of connected synapses for column. This is used to\n calculate the inhibition radius. This variation of the function only\n supports a 1 dimensional column topology.\n\n Parameters:\n ----------------------------\n :param columnIndex: The index identifying a column in the permanence,\n potential and connectivity matrices\n \"\"\"\n assert(arg_0._inputDimensions.size == 1)\n arg_2 = arg_0._connectedSynapses[arg_1].nonzero()[0]\n if arg_2.size == 0:\n return 0\n else:\n return max(arg_2) - min(arg_2) + 1"} +{"_id": "doc_1609", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n The range of connectedSynapses per column, averaged for each dimension.\n This value is used to calculate the inhibition radius. This variation of\n the function only supports a 2 dimensional column topology.\n\n Parameters:\n ----------------------------\n :param columnIndex: The index identifying a column in the permanence,\n potential and connectivity matrices\n \"\"\"\n assert(arg_0._inputDimensions.size == 2)\n arg_2 = arg_0._connectedSynapses[arg_1]\n (arg_3, arg_4) = arg_2.reshape(arg_0._inputDimensions).nonzero()\n if arg_3.size == 0 and arg_4.size == 0:\n return 0\n arg_5 = arg_3.max() - arg_3.min() + 1\n arg_6 = arg_4.max() - arg_4.min() + 1\n return numpy.average([arg_5, arg_6])"} +{"_id": "doc_1610", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n This method ensures that each column has enough connections to input bits\n to allow it to become active. Since a column must have at least\n 'self._stimulusThreshold' overlaps in order to be considered during the\n inhibition phase, columns without such minimal number of connections, even\n if all the input bits they are connected to turn on, have no chance of\n obtaining the minimum threshold. For such columns, the permanence values\n are increased until the minimum number of connections are formed.\n\n\n Parameters:\n ----------------------------\n :param perm: An array of permanence values for a column. The array is\n \"dense\", i.e. it contains an entry for each input bit, even\n if the permanence value is 0.\n :param mask: the indices of the columns whose permanences need to be\n raised.\n \"\"\"\n if len(arg_2) < arg_0._stimulusThreshold:\n raise Exception(\"This is likely due to a \" +\n \"value of stimulusThreshold that is too large relative \" +\n \"to the input size. [len(mask) < self._stimulusThreshold]\")\n\n numpy.clip(arg_1, arg_0._synPermMin, arg_0._synPermMax, out=arg_1)\n while True:\n arg_3 = numpy.nonzero(\n arg_1 > arg_0._synPermConnected - PERMANENCE_EPSILON)[0].size\n\n if arg_3 >= arg_0._stimulusThreshold:\n return\n arg_1[arg_2] += arg_0._synPermBelowStimulusInc"} +{"_id": "doc_1611", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a randomly generated permanence value for a synapses that is to be\n initialized in a non-connected state.\n \"\"\"\n arg_1 = arg_0._synPermConnected * arg_0._random.getReal64()\n\n # Ensure we don't have too much unnecessary precision. A full 64 bits of\n # precision causes numerical stability issues across platforms and across\n # implementations\n arg_1 = int(arg_1*100000) / 100000.0\n return arg_1"} +{"_id": "doc_1612", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Initializes the permanences of a column. The method\n returns a 1-D array the size of the input, where each entry in the\n array represents the initial permanence value between the input bit\n at the particular index in the array, and the column represented by\n the 'index' parameter.\n\n Parameters:\n ----------------------------\n :param potential: A numpy array specifying the potential pool of the column.\n Permanence values will only be generated for input bits\n corresponding to indices for which the mask value is 1.\n :param connectedPct: A value between 0 or 1 governing the chance, for each\n permanence, that the initial permanence value will\n be a value that is considered connected.\n \"\"\"\n # Determine which inputs bits will start out as connected\n # to the inputs. Initially a subset of the input bits in a\n # column's potential pool will be connected. This number is\n # given by the parameter \"connectedPct\"\n arg_3 = numpy.zeros(arg_0._numInputs, dtype=realDType)\n for arg_4 in xrange(arg_0._numInputs):\n if (arg_1[arg_4] < 1):\n continue\n\n if (arg_0._random.getReal64() <= arg_2):\n arg_3[arg_4] = arg_0._initPermConnected()\n else:\n arg_3[arg_4] = arg_0._initPermNonConnected()\n\n # Clip off low values. Since we use a sparse representation\n # to store the permanence values this helps reduce memory\n # requirements.\n arg_3[arg_3 < arg_0._synPermTrimThreshold] = 0\n\n return arg_3"} +{"_id": "doc_1613", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Update boost factors when global inhibition is used\n \"\"\"\n # When global inhibition is enabled, the target activation level is\n # the sparsity of the spatial pooler\n if (arg_0._localAreaDensity > 0):\n arg_1 = arg_0._localAreaDensity\n else:\n arg_2 = ((2 * arg_0._inhibitionRadius + 1)\n ** arg_0._columnDimensions.size)\n arg_2 = min(arg_0._numColumns, arg_2)\n arg_1 = float(arg_0._numActiveColumnsPerInhArea) / arg_2\n arg_1 = min(arg_1, 0.5)\n\n arg_0._boostFactors = numpy.exp(\n (arg_1 - arg_0._activeDutyCycles) * arg_0._boostStrength)"} +{"_id": "doc_1614", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Performs inhibition. This method calculates the necessary values needed to\n actually perform inhibition and then delegates the task of picking the\n active columns to helper functions.\n\n Parameters:\n ----------------------------\n :param overlaps: an array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.\n \"\"\"\n # determine how many columns should be selected in the inhibition phase.\n # This can be specified by either setting the 'numActiveColumnsPerInhArea'\n # parameter or the 'localAreaDensity' parameter when initializing the class\n if (arg_0._localAreaDensity > 0):\n arg_2 = arg_0._localAreaDensity\n else:\n arg_3 = ((2*arg_0._inhibitionRadius + 1)\n ** arg_0._columnDimensions.size)\n arg_3 = min(arg_0._numColumns, arg_3)\n arg_2 = float(arg_0._numActiveColumnsPerInhArea) / arg_3\n arg_2 = min(arg_2, 0.5)\n\n if arg_0._globalInhibition or \\\n arg_0._inhibitionRadius > max(arg_0._columnDimensions):\n return arg_0.FuncGlobal(arg_1, arg_2)\n else:\n return arg_0.FuncLocal(arg_1, arg_2)"} +{"_id": "doc_1615", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets a neighborhood of columns.\n\n Simply calls topology.neighborhood or topology.wrappingNeighborhood\n\n A subclass can insert different topology behavior by overriding this method.\n\n :param centerColumn (int)\n The center of the neighborhood.\n\n @returns (1D numpy array of integers)\n The columns in the neighborhood.\n \"\"\"\n if arg_0._wrapAround:\n return topology.wrappingNeighborhood(arg_1,\n arg_0._inhibitionRadius,\n arg_0._columnDimensions)\n\n else:\n return topology.neighborhood(arg_1,\n arg_0._inhibitionRadius,\n arg_0._columnDimensions)"} +{"_id": "doc_1616", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets a neighborhood of inputs.\n\n Simply calls topology.wrappingNeighborhood or topology.neighborhood.\n\n A subclass can insert different topology behavior by overriding this method.\n\n :param centerInput (int)\n The center of the neighborhood.\n\n @returns (1D numpy array of integers)\n The inputs in the neighborhood.\n \"\"\"\n if arg_0._wrapAround:\n return topology.wrappingNeighborhood(arg_1,\n arg_0._potentialRadius,\n arg_0._inputDimensions)\n else:\n return topology.neighborhood(arg_1,\n arg_0._potentialRadius,\n arg_0._inputDimensions)"} +{"_id": "doc_1617", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Factory function that creates typed Func or FuncRef objects\n\n dtype - the data type of the array (as string).\n Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64\n\n size - the size of the array. Must be positive integer.\n \"\"\"\n\n def getFuncType(arg_3):\n \"\"\"A little function to replace the getType() method of arrays\n\n It returns a string representation of the array element type instead of the\n integer value (NTA_BasicType enum) returned by the origianl array\n \"\"\"\n return arg_3._dtype\n\n\n # FuncRef can't be allocated\n if arg_2:\n assert arg_1 is None\n\n arg_4 = basicTypes.index(arg_0)\n if arg_4 == -1:\n raise Exception('Invalid data type: ' + arg_0)\n if arg_1 and arg_1 <= 0:\n raise Exception('Func size must be positive')\n arg_5 = 'FuncRef' if arg_2 else 'Func'\n arg_6 = getattr(engine_internal, arg_0 + arg_5)\n arg_6.getType = getFuncType\n\n if arg_1:\n arg_8 = arg_6(arg_1)\n else:\n arg_8 = arg_6()\n\n arg_8._dtype = basicTypes[arg_4]\n return arg_8"} +{"_id": "doc_1618", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get parameter value\"\"\"\n (arg_2, arg_3) = arg_0._FuncMethods(arg_1)\n if arg_3 is None:\n import exceptions\n raise exceptions.Exception(\n \"Func -- parameter name '%s' does not exist in region %s of type %s\"\n % (arg_1, arg_0.name, arg_0.type))\n return arg_3(arg_1)"} +{"_id": "doc_1619", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Set parameter value\"\"\"\n (arg_3, arg_4) = arg_0._getParameterMethods(arg_1)\n if arg_3 is None:\n import exceptions\n raise exceptions.Exception(\n \"Func -- parameter name '%s' does not exist in region %s of type %s\"\n % (arg_1, arg_0.name, arg_0.type))\n arg_3(arg_1, arg_2)"} +{"_id": "doc_1620", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the collection of regions in a network\n\n This is a tricky one. The collection of regions returned from\n from the internal network is a collection of internal regions.\n The desired collection is a collelcion of net.Region objects\n that also points to this network (net.network) and not to\n the internal network. To achieve that a CollectionWrapper\n class is used with a custom makeRegion() function (see bellow)\n as a value wrapper. The CollectionWrapper class wraps each value in the\n original collection with the result of the valueWrapper.\n \"\"\"\n\n def makeRegion(arg_1, arg_2):\n \"\"\"Wrap a engine region with a nupic.engine_internal.Region\n\n Also passes the containing nupic.engine_internal.Network network in _network. This\n function is passed a value wrapper to the CollectionWrapper\n \"\"\"\n arg_2 = Region(arg_2, arg_0)\n #r._network = self\n return arg_2\n\n arg_3 = CollectionWrapper(engine_internal.Network.getRegions(arg_0), makeRegion)\n return arg_3"} +{"_id": "doc_1621", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write state to proto object.\n\n :param proto: SDRClassifierRegionProto capnproto object\n \"\"\"\n arg_1.implementation = arg_0.implementation\n arg_1.steps = arg_0.steps\n arg_1.alpha = arg_0.alpha\n arg_1.verbosity = arg_0.verbosity\n arg_1.maxCategoryCount = arg_0.maxCategoryCount\n arg_1.learningMode = arg_0.learningMode\n arg_1.inferenceMode = arg_0.inferenceMode\n arg_1.recordNum = arg_0.recordNum\n\n arg_0._sdrClassifier.write(arg_1.sdrClassifier)"} +{"_id": "doc_1622", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Read state from proto object.\n\n :param proto: SDRClassifierRegionProto capnproto object\n \"\"\"\n arg_2 = arg_0()\n\n arg_2.implementation = arg_1.implementation\n arg_2.steps = arg_1.steps\n arg_2.stepsList = [int(i) for i in arg_1.steps.split(\",\")]\n arg_2.alpha = arg_1.alpha\n arg_2.verbosity = arg_1.verbosity\n arg_2.maxCategoryCount = arg_1.maxCategoryCount\n\n arg_2._sdrClassifier = SDRClassifierFactory.read(arg_1)\n\n arg_2.learningMode = arg_1.learningMode\n arg_2.inferenceMode = arg_1.inferenceMode\n arg_2.recordNum = arg_1.recordNum\n\n return arg_2"} +{"_id": "doc_1623", "title": "", "text": "def Func(arg_0):\n \"\"\" Runs the OPF Model\n\n Parameters:\n -------------------------------------------------------------------------\n retval: (completionReason, completionMsg)\n where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX\n equates.\n \"\"\"\n # -----------------------------------------------------------------------\n # Load the experiment's description.py module\n arg_1 = helpers.loadExperimentDescriptionScriptFromDir(\n arg_0._experimentDir)\n arg_2 = helpers.getExperimentDescriptionInterfaceFromModule(\n arg_1)\n arg_2.normalizeStreamSources()\n\n arg_3 = arg_2.getModelDescription()\n arg_0._modelControl = arg_2.getModelControl()\n\n # -----------------------------------------------------------------------\n # Create the input data stream for this task\n arg_5 = arg_0._modelControl['dataset']\n\n from nupic.data.stream_reader import StreamReader\n arg_6 = 0\n\n arg_0._inputSource = StreamReader(arg_5, isBlocking=False,\n maxTimeout=arg_6)\n\n\n # -----------------------------------------------------------------------\n #Get field statistics from the input source\n arg_8 = arg_0._getFieldStats()\n # -----------------------------------------------------------------------\n # Construct the model instance\n arg_0._model = ModelFactory.create(arg_3)\n arg_0._model.setFieldStatistics(arg_8)\n arg_0._model.enableLearning()\n arg_0._model.enableInference(arg_0._modelControl.get(\"inferenceArgs\", None))\n\n # -----------------------------------------------------------------------\n # Instantiate the metrics\n arg_0.__metricMgr = MetricsManager(arg_0._modelControl.get('metrics',None),\n arg_0._model.getFieldInfo(),\n arg_0._model.getInferenceType())\n\n arg_0.__loggedMetricPatterns = arg_0._modelControl.get(\"loggedMetrics\", [])\n\n arg_0._optimizedMetricLabel = arg_0.__getOptimizedMetricLabel()\n arg_0._reportMetricLabels = matchPatterns(arg_0._reportKeyPatterns,\n arg_0._getMetricLabels())\n\n\n # -----------------------------------------------------------------------\n # Initialize periodic activities (e.g., for model result updates)\n arg_0._periodic = arg_0._initPeriodicActivities()\n\n # -----------------------------------------------------------------------\n # Create our top-level loop-control iterator\n arg_15 = arg_0._modelControl.get('iterationCount', -1)\n\n # Are we asked to turn off learning for a certain # of iterations near the\n # end?\n arg_16 = None\n arg_17 = arg_0._modelControl.get('iterationCountInferOnly', 0)\n if arg_17 == -1:\n arg_0._model.disableLearning()\n elif arg_17 > 0:\n assert arg_15 > arg_17, \"when iterationCountInferOnly \" \\\n \"is specified, iterationCount must be greater than \" \\\n \"iterationCountInferOnly.\"\n arg_16 = arg_15 - arg_17\n\n arg_0.__FuncTaskMainLoop(arg_15, arg_16=arg_16)\n\n # -----------------------------------------------------------------------\n # Perform final operations for model\n arg_0._finalize()\n\n return (arg_0._cmpReason, None)"} +{"_id": "doc_1624", "title": "", "text": "def Func(arg_0):\n \"\"\"Run final activities after a model has run. These include recording and\n logging the final score\"\"\"\n\n arg_0._logger.info(\n \"Finished: modelID=%r; %r records processed. Performing final activities\",\n arg_0._modelID, arg_0._currentRecordIndex + 1)\n\n # =========================================================================\n # Dump the experiment metrics at the end of the task\n # =========================================================================\n arg_0._updateModelDBResults()\n\n # =========================================================================\n # Check if the current model is the best. Create a milestone if necessary\n # If the model has been killed, it is not a candidate for \"best model\",\n # and its output cache should be destroyed\n # =========================================================================\n if not arg_0._isKilled:\n arg_0.__updateJobResults()\n else:\n arg_0.__deleteOutputCache(arg_0._modelID)\n\n # =========================================================================\n # Close output stream, if necessary\n # =========================================================================\n if arg_0._predictionLogger:\n arg_0._predictionLogger.close()\n\n # =========================================================================\n # Close input stream, if necessary\n # =========================================================================\n if arg_0._inputSource: \n arg_0._inputSource.close()"} +{"_id": "doc_1625", "title": "", "text": "def Func(arg_0):\n \"\"\" Create a checkpoint from the current model, and store it in a dir named\n after checkpoint GUID, and finally store the GUID in the Models DB \"\"\"\n\n if arg_0._model is None or arg_0._modelCheckpointGUID is None:\n return\n\n # Create an output store, if one doesn't exist already\n if arg_0._predictionLogger is None:\n arg_0._createPredictionLogger()\n\n arg_1 = StringIO.StringIO()\n arg_0._predictionLogger.checkpoint(\n checkpointSink=arg_1,\n maxRows=int(Configuration.get('nupic.model.checkpoint.maxPredictionRows')))\n\n arg_0._model.save(os.path.join(arg_0._experimentDir, str(arg_0._modelCheckpointGUID)))\n arg_0._jobsDAO.modelSetFields(modelID,\n {'modelCheckpointId':str(arg_0._modelCheckpointGUID)},\n ignoreUnchanged=True)\n\n arg_0._logger.info(\"Checkpointed Hypersearch Model: modelID: %r, \"\n \"checkpointID: %r\", arg_0._modelID, checkpointID)\n return"} +{"_id": "doc_1626", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete the stored checkpoint for the specified modelID. This function is\n called if the current model is now the best model, making the old model's\n checkpoint obsolete\n\n Parameters:\n -----------------------------------------------------------------------\n modelID: The modelID for the checkpoint to delete. This is NOT the\n unique checkpointID\n \"\"\"\n\n arg_2 = \\\n arg_0._jobsDAO.modelsGetFields(arg_1, ['modelCheckpointId'])[0]\n\n if arg_2 is None:\n return\n\n try:\n shutil.rmtree(os.path.join(arg_0._experimentDir, str(arg_0._modelCheckpointGUID)))\n except:\n arg_0._logger.warn(\"Failed to delete model checkpoint %s. \"\\\n \"Assuming that another worker has already deleted it\",\n arg_2)\n return\n\n arg_0._jobsDAO.modelSetFields(arg_1,\n {'modelCheckpointId':None},\n ignoreUnchanged=True)\n return"} +{"_id": "doc_1627", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Writes the results of one iteration of a model. The results are written to\n this ModelRunner's in-memory cache unless this model is the \"best model\" for\n the job. If this model is the \"best model\", the predictions are written out\n to a permanent store via a prediction output stream instance\n\n\n Parameters:\n -----------------------------------------------------------------------\n result: A opf_utils.ModelResult object, which contains the input and\n output for this iteration\n \"\"\"\n arg_0.__predictionCache.append(arg_1)\n\n if arg_0._isBestModel:\n arg_0.__flushPredictionCache()"} +{"_id": "doc_1628", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete's the output cache associated with the given modelID. This actually\n clears up the resources associated with the cache, rather than deleting al\n the records in the cache\n\n Parameters:\n -----------------------------------------------------------------------\n modelID: The id of the model whose output cache is being deleted\n\n \"\"\"\n\n # If this is our output, we should close the connection\n if arg_1 == arg_0._modelID and arg_0._predictionLogger is not None:\n arg_0._predictionLogger.close()\n del arg_0.__predictionCache\n arg_0._predictionLogger = None\n arg_0.__predictionCache = None"} +{"_id": "doc_1629", "title": "", "text": "def Func(arg_0):\n \"\"\" Creates and returns a PeriodicActivityMgr instance initialized with\n our periodic activities\n\n Parameters:\n -------------------------------------------------------------------------\n retval: a PeriodicActivityMgr instance\n \"\"\"\n\n # Activity to update the metrics for this model\n # in the models table\n arg_1 = PeriodicActivityRequest(repeating=True,\n period=100,\n cb=arg_0._updateModelDBResults)\n\n arg_2 = PeriodicActivityRequest(repeating=True,\n period=100,\n cb=arg_0.__updateJobResultsPeriodic)\n\n arg_3 = PeriodicActivityRequest(repeating=True,\n period=50,\n cb=arg_0.__checkCancelation)\n\n arg_4 = PeriodicActivityRequest(repeating=True,\n period=10,\n cb=arg_0.__checkMaturity)\n\n\n # Do an initial update of the job record after 2 iterations to make\n # sure that it is populated with something without having to wait too long\n arg_5 = PeriodicActivityRequest(repeating=False,\n period=2,\n cb=arg_0.__updateJobResultsPeriodic)\n\n\n arg_6 = [arg_1,\n arg_5,\n arg_2,\n arg_3]\n\n if arg_0._isMaturityEnabled:\n arg_6.append(arg_4)\n\n return PeriodicActivityMgr(requestedActivities=arg_6)"} +{"_id": "doc_1630", "title": "", "text": "def Func(arg_0):\n \"\"\" Check if the cancelation flag has been set for this model\n in the Model DB\"\"\"\n\n # Update a hadoop job counter at least once every 600 seconds so it doesn't\n # think our map task is dead\n print >>sys.stderr, \"reporter:counter:HypersearchWorker,numRecords,50\"\n\n # See if the job got cancelled\n arg_1 = arg_0._jobsDAO.jobGetFields(arg_0._jobID, ['cancel'])[0]\n if arg_1:\n arg_0._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED\n arg_0._isCanceled = True\n arg_0._logger.info(\"Model %s canceled because Job %s was stopped.\",\n arg_0._modelID, arg_0._jobID)\n else:\n arg_4 = arg_0._jobsDAO.modelsGetFields(arg_0._modelID, ['engStop'])[0]\n\n if arg_4 is None:\n pass\n\n elif arg_4 == ClientJobsDAO.STOP_REASON_KILLED:\n arg_0._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED\n arg_0._isKilled = True\n arg_0._logger.info(\"Model %s canceled because it was killed by hypersearch\",\n arg_0._modelID)\n\n elif arg_4 == ClientJobsDAO.STOP_REASON_STOPPED:\n arg_0._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED\n arg_0._isCanceled = True\n arg_0._logger.info(\"Model %s stopped because hypersearch ended\", arg_0._modelID)\n else:\n raise RuntimeError (\"Unexpected stop reason encountered: %s\" % (arg_4))"} +{"_id": "doc_1631", "title": "", "text": "def Func(arg_0):\n \"\"\" Save the current metric value and see if the model's performance has\n 'leveled off.' We do this by looking at some number of previous number of\n recordings \"\"\"\n\n if arg_0._currentRecordIndex+1 < arg_0._MIN_RECORDS_TO_BE_BEST:\n return\n\n # If we are already mature, don't need to check anything\n if arg_0._isMature:\n return\n\n arg_1 = arg_0._getMetrics()[arg_0._optimizedMetricLabel]\n arg_0._metricRegression.addPoint(x=arg_0._currentRecordIndex, y=arg_1)\n\n # Perform a linear regression to see if the error is leveled off\n #pctChange = self._metricRegression.getPctChange()\n #if pctChange is not None and abs(pctChange ) <= self._MATURITY_MAX_CHANGE:\n arg_2, arg_3 = arg_0._metricRegression.getPctChanges()\n if arg_2 is not None and arg_3 <= arg_0._MATURITY_MAX_CHANGE:\n arg_0._jobsDAO.modelSetFields(arg_0._modelID,\n {'engMatured':True})\n\n # TODO: Don't stop if we are currently the best model. Also, if we\n # are still running after maturity, we have to periodically check to\n # see if we are still the best model. As soon we lose to some other\n # model, then we should stop at that point.\n arg_0._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED\n arg_0._isMature = True\n\n arg_0._logger.info(\"Model %d has matured (pctChange=%s, n=%d). \\n\"\\\n \"Scores = %s\\n\"\\\n \"Stopping execution\",arg_0._modelID, arg_2,\n arg_0._MATURITY_NUM_POINTS,\n arg_0._metricRegression._window)"} +{"_id": "doc_1632", "title": "", "text": "def Func(arg_0):\n \"\"\"Set our state to that obtained from the engWorkerState field of the\n job record.\n\n\n Parameters:\n ---------------------------------------------------------------------\n stateJSON: JSON encoded state from job record\n\n \"\"\"\n arg_0._priorStateJSON = arg_0._hsObj._cjDAO.jobGetFields(arg_0._hsObj._jobID,\n ['engWorkerState'])[0]\n\n # Init if no prior state yet\n if arg_0._priorStateJSON is None:\n arg_2 = dict()\n\n # Fast Swarm, first and only sprint has one swarm for each field\n # in fixedFields\n if arg_0._hsObj._fixedFields is not None:\n print arg_0._hsObj._fixedFields\n arg_3 = []\n for arg_4 in arg_0._hsObj._fixedFields:\n if arg_4 =='_classifierInput':\n continue\n arg_5 = arg_0.getEncoderKeyFromName(arg_4)\n assert arg_5 in arg_0._hsObj._encoderNames, \"The field '%s' \" \\\n \" specified in the fixedFields list is not present in this \" \\\n \" model.\" % (arg_4)\n arg_3.append(arg_5)\n arg_3.sort()\n arg_2['.'.join(arg_3)] = {\n 'status': 'active',\n 'bestModelId': None,\n 'bestErrScore': None,\n 'sprintIdx': 0,\n }\n # Temporal prediction search, first sprint has N swarms of 1 field each,\n # the predicted field may or may not be that one field.\n elif arg_0._hsObj._searchType == HsSearchType.temporal:\n for arg_5 in arg_0._hsObj._encoderNames:\n arg_2[arg_5] = {\n 'status': 'active',\n 'bestModelId': None,\n 'bestErrScore': None,\n 'sprintIdx': 0,\n }\n\n\n # Classification prediction search, first sprint has N swarms of 1 field\n # each where this field can NOT be the predicted field.\n elif arg_0._hsObj._searchType == HsSearchType.classification:\n for arg_5 in arg_0._hsObj._encoderNames:\n if arg_5 == arg_0._hsObj._predictedFieldEncoder:\n continue\n arg_2[arg_5] = {\n 'status': 'active',\n 'bestModelId': None,\n 'bestErrScore': None,\n 'sprintIdx': 0,\n }\n\n # Legacy temporal. This is either a model that uses reconstruction or\n # an older multi-step model that doesn't have a separate\n # 'classifierOnly' encoder for the predicted field. Here, the predicted\n # field must ALWAYS be present and the first sprint tries the predicted\n # field only\n elif arg_0._hsObj._searchType == HsSearchType.legacyTemporal:\n arg_2[arg_0._hsObj._predictedFieldEncoder] = {\n 'status': 'active',\n 'bestModelId': None,\n 'bestErrScore': None,\n 'sprintIdx': 0,\n }\n\n else:\n raise RuntimeError(\"Unsupported search type: %s\" % \\\n (arg_0._hsObj._searchType))\n\n # Initialize the state.\n arg_0._state = dict(\n # The last time the state was updated by a worker.\n lastUpdateTime = time.time(),\n\n # Set from within setSwarmState() if we detect that the sprint we just\n # completed did worse than a prior sprint. This stores the index of\n # the last good sprint.\n lastGoodSprint = None,\n\n # Set from within setSwarmState() if lastGoodSprint is True and all\n # sprints have completed.\n searchOver = False,\n\n # This is a summary of the active swarms - this information can also\n # be obtained from the swarms entry that follows, but is summarized here\n # for easier reference when viewing the state as presented by\n # log messages and prints of the hsState data structure (by\n # permutations_runner).\n activeSwarms = arg_2.keys(),\n\n # All the swarms that have been created so far.\n arg_2 = arg_2,\n\n # All the sprints that have completed or are in progress.\n sprints = [{'status': 'active',\n 'bestModelId': None,\n 'bestErrScore': None}],\n\n # The list of encoders we have \"blacklisted\" because they\n # performed so poorly.\n blackListedEncoders = [],\n )\n\n # This will do nothing if the value of engWorkerState is not still None.\n arg_0._hsObj._cjDAO.jobSetFieldIfEqual(\n arg_0._hsObj._jobID, 'engWorkerState', json.dumps(arg_0._state), None)\n\n arg_0._priorStateJSON = arg_0._hsObj._cjDAO.jobGetFields(\n arg_0._hsObj._jobID, ['engWorkerState'])[0]\n assert (arg_0._priorStateJSON is not None)\n\n # Read state from the database\n arg_0._state = json.loads(arg_0._priorStateJSON)\n arg_0._dirty = False"} +{"_id": "doc_1633", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the list of all swarms in the given sprint.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids in the given sprint\n \"\"\"\n arg_2 = []\n for arg_3, arg_4 in arg_0._state['swarms'].iteritems():\n if arg_4['sprintIdx'] == arg_1:\n arg_2.append(arg_3)\n\n return arg_2"} +{"_id": "doc_1634", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the list of all completing swarms.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids\n \"\"\"\n arg_1 = []\n for arg_2, arg_3 in arg_0._state['swarms'].iteritems():\n if arg_3['status'] == 'completing':\n arg_1.append(arg_2)\n\n return arg_1"} +{"_id": "doc_1635", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True if the given sprint has completed.\"\"\"\n arg_2 = len(arg_0._state['sprints'])\n if arg_1 >= arg_2:\n return False\n\n return (arg_0._state['sprints'][arg_1]['status'] == 'completed')"} +{"_id": "doc_1636", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert the information of the node spec to a plain dict of basic types\n\n The description and singleNodeOnly attributes are placed directly in\n the result dicts. The inputs, outputs, parameters and commands dicts\n contain Spec item objects (InputSpec, OutputSpec, etc). Each such object\n is converted also to a plain dict using the internal items2dict() function\n (see bellow).\n \"\"\"\n\n def items2dict(arg_1):\n \"\"\"Convert a dict of node spec items to a plain dict\n\n Each node spec item object will be converted to a dict of its\n attributes. The entire items dict will become a dict of dicts (same keys).\n \"\"\"\n arg_2 = {}\n for arg_3, arg_4 in arg_1.items():\n arg_2[arg_3] = arg_4.__dict__\n\n return arg_2\n\n arg_0.invariant()\n return dict(description=arg_0.description,\n singleNodeOnly=arg_0.singleNodeOnly,\n inputs=items2dict(arg_0.inputs),\n outputs=items2dict(arg_0.outputs),\n parameters=items2dict(arg_0.parameters),\n commands=items2dict(arg_0.commands))"} +{"_id": "doc_1637", "title": "", "text": "def Func():\n \"\"\"Create the encoder instance for our test and return it.\"\"\"\n arg_0 = ScalarEncoder(21, 0.0, 100.0, n=50, name=\"consumption\",\n clipInput=True)\n arg_1 = DateEncoder(timeOfDay=(21, 9.5), name=\"timestamp_timeOfDay\")\n\n arg_2 = MultiEncoder()\n arg_2.addEncoder(\"consumption\", arg_0)\n arg_2.addEncoder(\"timestamp\", arg_1)\n\n return arg_2"} +{"_id": "doc_1638", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Validates control dictionary for the experiment context\"\"\"\n # Validate task list\n arg_2 = arg_1.get('tasks', None)\n if arg_2 is not None:\n arg_3 = []\n\n for arg_4 in arg_2:\n validateOpfJsonValue(arg_4, \"opfTaskSchema.json\")\n validateOpfJsonValue(arg_4['taskControl'], \"opfTaskControlSchema.json\")\n\n arg_5 = arg_4['taskLabel']\n\n assert isinstance(arg_5, types.StringTypes), \\\n \"taskLabel type: %r\" % type(arg_5)\n assert len(arg_5) > 0, \"empty string taskLabel not is allowed\"\n\n arg_3.append(arg_5.lower())\n\n arg_6 = filter(lambda x: arg_3.count(x) > 1,\n arg_3)\n assert len(arg_6) == 0, \\\n \"Duplcate task labels are not allowed: %s\" % arg_6\n\n return"} +{"_id": "doc_1639", "title": "", "text": "def Func(arg_0=[], arg_1=[]):\n \"\"\"\n Extract all items from the 'allKeys' list whose key matches one of the regular\n expressions passed in 'reportKeys'.\n\n Parameters:\n ----------------------------------------------------------------------------\n reportKeyREs: List of regular expressions\n allReportKeys: List of all keys\n\n retval: list of keys from allReportKeys that match the regular expressions\n in 'reportKeyREs'\n If an invalid regular expression was included in 'reportKeys',\n then BadKeyError() is raised\n \"\"\"\n\n arg_2 = []\n\n # Extract the report items of interest\n for arg_3 in arg_0:\n # Find all keys that match this regular expression\n arg_4 = re.compile(arg_3)\n arg_5 = False\n for arg_6 in arg_1:\n arg_7 = arg_4.match(arg_6)\n if arg_7 and arg_7.end() == len(arg_6):\n arg_2.append(arg_6)\n arg_5 = True\n if not arg_5:\n raise _BadKeyError(arg_3)\n\n return arg_2"} +{"_id": "doc_1640", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a specific item by name out of the results dict.\n\n The format of itemName is a string of dictionary keys separated by colons,\n each key being one level deeper into the results dict. For example,\n 'key1:key2' would fetch results['key1']['key2'].\n\n If itemName is not found in results, then None is returned\n\n \"\"\"\n\n arg_2 = arg_0.split(':')\n arg_3 = arg_1\n for arg_4 in arg_2:\n arg_3 = arg_3[arg_4]\n\n return arg_3"} +{"_id": "doc_1641", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5):\n \"\"\" Perform standard handling of an exception that occurs while running\n a model.\n\n Parameters:\n -------------------------------------------------------------------------\n jobID: ID for this hypersearch job in the jobs table\n modelID: model ID\n jobsDAO: ClientJobsDAO instance\n experimentDir: directory containing the experiment\n logger: the logger to use\n e: the exception that occurred\n retval: (completionReason, completionMsg)\n \"\"\"\n\n arg_6 = StringIO.StringIO()\n print >>arg_6, \"Exception occurred while running model %s: %r (%s)\" % (\n arg_1, arg_5, type(arg_5))\n traceback.print_exc(None, arg_6)\n\n arg_7 = arg_2.CMPL_REASON_ERROR\n arg_8 = arg_6.getvalue()\n arg_4.error(arg_8)\n\n # Write results to the model database for the error case. Ignore\n # InvalidConnectionException, as this is usually caused by orphaned models\n #\n # TODO: do we really want to set numRecords to 0? Last updated value might\n # be useful for debugging\n if type(arg_5) is not InvalidConnectionException:\n arg_2.modelUpdateResults(arg_1, results=None, numRecords=0)\n\n # TODO: Make sure this wasn't the best model in job. If so, set the best\n # appropriately\n\n # If this was an exception that should mark the job as failed, do that\n # now.\n if type(arg_5) == JobFailException:\n arg_9 = arg_2.jobGetFields(arg_0,\n ['workerCompletionReason'])[0]\n if arg_9 == ClientJobsDAO.CMPL_REASON_SUCCESS:\n arg_2.jobSetFields(arg_0, fields=dict(\n cancel=True,\n workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,\n workerCompletionMsg = \": \".join(str(arg_10) for arg_10 in arg_5.args)),\n useConnectionID=False,\n ignoreUnchanged=True)\n\n return (arg_7, arg_8)"} +{"_id": "doc_1642", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6, arg_7,\n arg_8, arg_9=None, arg_10=None):\n \"\"\" This creates an experiment directory with a base.py description file\n created from 'baseDescription' and a description.py generated from the\n given params dict and then runs the experiment.\n\n Parameters:\n -------------------------------------------------------------------------\n modelID: ID for this model in the models table\n jobID: ID for this hypersearch job in the jobs table\n baseDescription: Contents of a description.py with the base experiment\n description\n params: Dictionary of specific parameters to override within\n the baseDescriptionFile.\n predictedField: Name of the input field for which this model is being\n optimized\n reportKeys: Which metrics of the experiment to store into the\n results dict of the model's database entry\n optimizeKey: Which metric we are optimizing for\n jobsDAO Jobs data access object - the interface to the\n jobs database which has the model's table.\n modelCheckpointGUID: A persistent, globally-unique identifier for\n constructing the model checkpoint key\n logLevel: override logging level to this value, if not None\n\n retval: (completionReason, completionMsg)\n \"\"\"\n from nupic.swarming.ModelRunner import OPFModelRunner\n\n # The logger for this method\n arg_11 = logging.getLogger('com.numenta.nupic.hypersearch.utils')\n\n\n # --------------------------------------------------------------------------\n # Create a temp directory for the experiment and the description files\n arg_12 = tempfile.mkdtemp()\n try:\n arg_11.info(\"Using experiment directory: %s\" % (arg_12))\n\n # Create the decription.py from the overrides in params\n arg_13 = os.path.join(arg_12, 'description.py')\n arg_14 = open(arg_13, 'wb')\n arg_14.write(_paramsFileHead())\n\n arg_15 = arg_3.items()\n arg_15.sort()\n for (arg_16,arg_17) in arg_15:\n arg_18 = _quoteAndEscape(arg_16)\n if isinstance(arg_17, basestring):\n\n arg_14.write(\" %s : '%s',\\n\" % (arg_18 , arg_17))\n else:\n arg_14.write(\" %s : %s,\\n\" % (arg_18 , arg_17))\n\n arg_14.write(_paramsFileTail())\n arg_14.close()\n\n\n # Write out the base description\n arg_19 = open(os.path.join(arg_12, 'base.py'), 'wb')\n arg_19.write(arg_2)\n arg_19.close()\n\n\n # Store the experiment's sub-description file into the model table\n # for reference\n arg_20 = open(arg_13)\n arg_21 = arg_20.read()\n arg_20.close()\n arg_7.modelSetFields(arg_0, {'genDescription': arg_21})\n\n\n # Run the experiment now\n try:\n arg_22 = OPFModelRunner(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_4=arg_4,\n arg_12=arg_12,\n reportKeyPatterns=arg_5,\n optimizeKeyPattern=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10)\n\n signal.signal(signal.SIGINT, arg_22.handleWarningSignal)\n\n (arg_23, arg_24) = arg_22.run()\n\n except InvalidConnectionException:\n raise\n except Exception, e:\n\n (arg_23, arg_24) = _handleModelRunnerException(arg_1,\n arg_0, arg_7, arg_12, arg_11, e)\n\n finally:\n # delete our temporary directory tree\n shutil.rmtree(arg_12)\n signal.signal(signal.SIGINT, signal.default_int_handler)\n\n # Return completion reason and msg\n return (arg_23, arg_24)"} +{"_id": "doc_1643", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=True, arg_4=True):\n \"\"\"Recursively copies a dict and returns the result.\n\n Args:\n d: The dict to copy.\n f: A function to apply to values when copying that takes the value and the\n list of keys from the root of the dict to the value and returns a value\n for the new dict.\n discardNoneKeys: If True, discard key-value pairs when f returns None for\n the value.\n deepCopy: If True, all values in returned dict are true copies (not the\n same object).\n Returns:\n A new dict with keys and values from d replaced with the result of f.\n \"\"\"\n # Optionally deep copy the dict.\n if arg_4:\n arg_0 = copy.deepcopy(arg_0)\n\n arg_5 = {}\n arg_6 = [(arg_7, arg_8, arg_5, ()) for arg_7, arg_8 in arg_0.iteritems()]\n while len(arg_6) > 0:\n arg_7, arg_8, arg_0, arg_9 = arg_6.pop()\n arg_9 = arg_9 + (arg_7,)\n if isinstance(arg_8, dict):\n arg_0[arg_7] = dict()\n arg_6[0:0] = [(innerK, innerV, arg_0[arg_7], arg_9)\n for innerK, innerV in arg_8.iteritems()]\n else:\n #print k, v, prevKeys\n arg_10 = arg_1(arg_8, arg_9)\n if not arg_3 or arg_10 is not None:\n arg_0[arg_7] = arg_10\n return arg_5"} +{"_id": "doc_1644", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Recursively applies f to the values in dict d.\n\n Args:\n d: The dict to recurse over.\n f: A function to apply to values in d that takes the value and a list of\n keys from the root of the dict to the value.\n \"\"\"\n arg_2 = [(arg_0, ())]\n while len(arg_2) > 0:\n arg_3, arg_4 = arg_2.pop()\n for arg_5, arg_6 in arg_3.iteritems():\n arg_7 = arg_4 + (arg_5,)\n if isinstance(arg_6, dict):\n arg_2.insert(0, (arg_6, arg_7))\n else:\n arg_1(arg_6, arg_7)"} +{"_id": "doc_1645", "title": "", "text": "def Func(arg_0, arg_1=64):\n \"\"\"\n Return a clipped version of obj suitable for printing, This\n is useful when generating log messages by printing data structures, but\n don't want the message to be too long.\n\n If passed in a dict, list, or namedtuple, each element of the structure's\n string representation will be limited to 'maxElementSize' characters. This\n will return a new object where the string representation of each element\n has been truncated to fit within maxElementSize.\n \"\"\"\n\n # Is it a named tuple?\n if hasattr(arg_0, '_asdict'):\n arg_0 = arg_0._asdict()\n\n\n # Printing a dict?\n if isinstance(arg_0, dict):\n arg_2 = dict()\n for arg_3,arg_4 in arg_0.iteritems():\n arg_2[arg_3] = Func(arg_4)\n\n # Printing a list?\n elif hasattr(arg_0, '__iter__'):\n arg_2 = []\n for arg_4 in arg_0:\n arg_2.append(Func(arg_4))\n\n # Some other object\n else:\n arg_2 = str(arg_0)\n if len(arg_2) > arg_1:\n arg_2 = arg_2[0:arg_1] + '...'\n\n return arg_2"} +{"_id": "doc_1646", "title": "", "text": "def Func(arg_0):\n \"\"\" Loads a json value from a file and converts it to the corresponding python\n object.\n\n inputFilePath:\n Path of the json file;\n\n Returns:\n python value that represents the loaded json value\n\n \"\"\"\n with open(arg_0) as fileObj:\n arg_1 = json.load(fileObj)\n\n return arg_1"} +{"_id": "doc_1647", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Recursively updates the values in original with the values from updates.\"\"\"\n # Keep a list of the sub-dictionaries that need to be updated to avoid having\n # to use recursion (which could fail for dictionaries with a lot of nesting.\n arg_2 = [(arg_0, arg_1)]\n while len(arg_2) > 0:\n arg_0, arg_1 = arg_2.pop()\n for arg_3, arg_4 in arg_1.iteritems():\n if arg_3 in arg_0 and isinstance(arg_0[arg_3], dict) and isinstance(arg_4, dict):\n arg_2.append((arg_0[arg_3], arg_4))\n else:\n arg_0[arg_3] = arg_4"} +{"_id": "doc_1648", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compares two python dictionaries at the top level and report differences,\n if any, to stdout\n\n da: first dictionary\n db: second dictionary\n\n Returns: The same value as returned by dictDiff() for the given args\n \"\"\"\n arg_2 = dictDiff(arg_0, arg_1)\n\n if not arg_2:\n return arg_2\n\n if arg_2['inAButNotInB']:\n print \">>> inAButNotInB: %s\" % arg_2['inAButNotInB']\n\n if arg_2['inBButNotInA']:\n print \">>> inBButNotInA: %s\" % arg_2['inBButNotInA']\n\n for arg_3 in arg_2['differentValues']:\n print \">>> da[%s] != db[%s]\" % (arg_3, arg_3)\n print \"da[%s] = %r\" % (arg_3, arg_0[arg_3])\n print \"db[%s] = %r\" % (arg_3, arg_1[arg_3])\n\n return arg_2"} +{"_id": "doc_1649", "title": "", "text": "def Func(arg_0=0.001):\n \"\"\"\n Given model params, figure out the correct resolution for the\n RandomDistributed encoder. Modifies params in place.\n \"\"\"\n arg_1 = (\n model_params.MODEL_PARAMS[\"modelParams\"][\"sensorParams\"][\"encoders\"][\"value\"]\n )\n\n if arg_1[\"type\"] == \"RandomDistributedScalarEncoder\":\n arg_2 = abs(_INPUT_MAX - _INPUT_MIN) * 0.2\n arg_3 = _INPUT_MIN - arg_2\n arg_4 = _INPUT_MAX + arg_2\n arg_5 = max(arg_0,\n (arg_4 - arg_3) / arg_1.pop(\"numBuckets\")\n )\n arg_1[\"resolution\"] = arg_5"} +{"_id": "doc_1650", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Remove labels from each record with record ROWID in range from\n start to end, noninclusive of end. Removes all records if labelFilter is\n None, otherwise only removes the labels eqaul to labelFilter.\n\n This will recalculate all points from end to the last record stored in the\n internal cache of this classifier.\n \"\"\"\n\n if len(arg_0.saved_states) == 0:\n raise HTMPredictionModelInvalidRangeError(\"Invalid supplied range for \"\n \"'Func'. Model has no saved records.\")\n\n arg_4 = arg_0.saved_states[0].ROWID\n\n arg_5 = 0 if arg_1 is None else max(0, arg_1 - arg_4)\n arg_6 = len(arg_0.saved_states) if arg_2 is None else \\\n max(0, min( len( arg_0.saved_states) , arg_2 - arg_4))\n\n if arg_6 <= arg_5:\n raise HTMPredictionModelInvalidRangeError(\"Invalid supplied range for \"\n \"'Func'.\", debugInfo={\n 'requestRange': {\n 'startRecordID': arg_1,\n 'endRecordID': arg_2\n },\n 'clippedRequestRange': {\n 'startRecordID': arg_5,\n 'endRecordID': arg_6\n },\n 'validRange': {\n 'startRecordID': arg_4,\n 'endRecordID': arg_0.saved_states[len(arg_0.saved_states)-1].ROWID\n },\n 'numRecordsStored': len(arg_0.saved_states)\n })\n\n # Remove records within the cache\n arg_7 = []\n for arg_8 in arg_0.saved_states[arg_5:arg_6]:\n if arg_3 is not None:\n if arg_3 in arg_8.anomalyLabel:\n arg_8.anomalyLabel.remove(arg_3)\n else:\n arg_8.anomalyLabel = []\n arg_8.setByUser = False\n arg_7.append(arg_8)\n arg_0._deleteRecordsFromKNN(arg_7)\n\n # Remove records not in cache\n arg_0._deleteRangeFromKNN(arg_1, arg_2)\n\n # Recompute [clippedEnd, ...)\n for arg_8 in arg_0.saved_states[arg_6:]:\n arg_0._updateState(arg_8)\n\n return {'status': 'success'}"} +{"_id": "doc_1651", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This method will remove the given records from the classifier.\n\n parameters\n ------------\n recordsToDelete - list of records to delete from the classififier\n \"\"\"\n arg_2 = arg_0.htm_prediction_model._getAnomalyClassifier()\n arg_3 = arg_2.getSelf()._knn\n\n arg_4 = arg_2.getSelf().getParameter('categoryRecencyList')\n\n arg_5 = [r.ROWID for r in arg_1 if \\\n not r.setByUser and r.ROWID in arg_4]\n\n arg_6 = arg_3._numPatterns\n arg_3.removeIds(arg_5)\n assert arg_3._numPatterns == arg_6 - len(arg_5)"} +{"_id": "doc_1652", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Construct a _HTMClassificationRecord based on the current state of the\n htm_prediction_model of this classifier.\n\n ***This will look into the internals of the model and may depend on the\n SP, TM, and KNNClassifier***\n \"\"\"\n arg_1 = arg_0.htm_prediction_model\n arg_2 = arg_1._getSPRegion()\n arg_3 = arg_1._getTPRegion()\n arg_4 = arg_3.getSelf()._tfdr\n\n # Count the number of unpredicted columns\n arg_5 = arg_2.getOutputData(\"bottomUpOut\").nonzero()[0]\n arg_6 = arg_12.in1d(arg_5, arg_0._prevPredictedColumns).sum()\n arg_6 = (arg_0._activeColumnCount - arg_6)/float(arg_0._activeColumnCount)\n\n arg_7 = arg_2.getParameter('activeOutputCount')\n arg_8 = arg_3.getParameter('cellsPerColumn') * arg_3.getParameter('columnCount')\n\n arg_9 = arg_12.array([])\n\n if arg_0._vectorType == 'tpc':\n # Classification Vector: [---TM Cells---]\n arg_9 = arg_12.zeros(arg_8)\n arg_10 = arg_4.getLearnActiveStateT().reshape(arg_8, 1)\n arg_11 = arg_12.where(arg_10 > 0)[0]\n if arg_11.shape[0] > 0:\n arg_9[arg_12.array(arg_11, arg_14=arg_12.uint16)] = 1\n elif arg_0._vectorType == 'sp_tpe':\n # Classification Vecotr: [---SP---|---(TM-SP)----]\n arg_9 = arg_12.zeros(arg_7+arg_7)\n if arg_5.shape[0] > 0:\n arg_9[arg_5] = 1.0\n\n arg_16 = arg_12.setdiff1d(arg_0._prevPredictedColumns, arg_5)\n if arg_16.shape[0] > 0:\n arg_17 = ( arg_12.array(arg_16, arg_14=arg_12.uint16) +\n arg_7 )\n arg_9[arg_17] = 1.0\n else:\n raise TypeError(\"Classification vector type must be either 'tpc' or\"\n \" 'sp_tpe', current value is %s\" % (arg_0._vectorType))\n\n # Store the state for next time step\n arg_18 = len(arg_0._prevPredictedColumns)\n arg_19 = arg_3.getOutputData(\"topDownOut\").nonzero()[0]\n arg_0._prevPredictedColumns = copy.deepcopy(arg_19)\n\n if arg_0._anomalyVectorLength is None:\n arg_0._anomalyVectorLength = len(arg_9)\n\n arg_22 = _CLAClassificationRecord(\n ROWID=int(arg_1.getParameter('__numRunCalls') - 1), #__numRunCalls called\n #at beginning of model.run\n anomalyScore=arg_6,\n anomalyVector=arg_9.nonzero()[0].tolist(),\n anomalyLabel=[]\n )\n return arg_22"} +{"_id": "doc_1653", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets the autoDetectWaitRecords.\n \"\"\"\n if not isinstance(arg_1, int):\n raise HTMPredictionModelInvalidArgument(\"Invalid argument type \\'%s\\'. WaitRecord \"\n \"must be a number.\" % (type(arg_1)))\n\n if len(arg_0.saved_states) > 0 and arg_1 < arg_0.saved_states[0].ROWID:\n raise HTMPredictionModelInvalidArgument(\"Invalid value. autoDetectWaitRecord value \"\n \"must be valid record within output stream. Current minimum ROWID in \"\n \"output stream is %d.\" % (arg_0.saved_states[0].ROWID))\n\n arg_0._autoDetectWaitRecords = arg_1\n\n # Update all the states in the classifier's cache\n for arg_3 in arg_0.saved_states:\n arg_0._updateState(arg_3)"} +{"_id": "doc_1654", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Run one iteration, profiling it if requested.\n\n :param inputs: (dict) mapping region input names to numpy.array values\n :param outputs: (dict) mapping region output names to numpy.arrays that \n should be populated with output values by this method\n \"\"\"\n\n # Uncomment this to find out who is generating divide by 0, or other numpy warnings\n # numpy.seterr(divide='raise', invalid='raise', over='raise')\n\n # Modify this line to turn on profiling for a given node. The results file\n # ('hotshot.stats') will be sensed and printed out by the vision framework's\n # RunInference.py script at the end of inference.\n # Also uncomment the hotshot import at the top of this file.\n if False and arg_0.learningMode \\\n and arg_0._iterations > 0 and arg_0._iterations <= 10:\n\n import hotshot\n if arg_0._iterations == 10:\n print \"\\n Collecting and sorting internal node profiling stats generated by hotshot...\"\n arg_3 = hotshot.stats.load(\"hotshot.stats\")\n arg_3.strip_dirs()\n arg_3.sort_stats('time', 'calls')\n arg_3.print_stats()\n\n # The guts of the Func are contained in the _Func() call so that we\n # can profile it if requested.\n if arg_0._profileObj is None:\n print \"\\n Preparing to capture profile using hotshot...\"\n if os.path.exists('hotshot.stats'):\n # There is an old hotshot stats profile left over, remove it.\n os.remove('hotshot.stats')\n arg_0._profileObj = hotshot.Profile(\"hotshot.stats\", 1, 1)\n # filename, lineevents, linetimings\n arg_0._profileObj.runcall(arg_0._Func, *[arg_1, arg_2])\n else:\n arg_0._Func(arg_1, arg_2)"} +{"_id": "doc_1655", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Run one iteration of SPRegion's compute\n \"\"\"\n\n #if self.topDownMode and (not 'topDownIn' in inputs):\n # raise RuntimeError(\"The input topDownIn must be linked in if \"\n # \"topDownMode is True\")\n\n if arg_0._sfdr is None:\n raise RuntimeError(\"Spatial pooler has not been initialized\")\n\n\n if not arg_0.topDownMode:\n #\n # BOTTOM-UP compute\n #\n\n arg_0._iterations += 1\n\n # Get our inputs into numpy arrays\n arg_3 = arg_1['bottomUpIn']\n\n arg_4 = False\n if 'resetIn' in arg_1:\n assert len(arg_1['resetIn']) == 1\n arg_4 = arg_1['resetIn'][0] != 0\n\n # Perform inference and/or learning\n arg_5 = arg_0._doBottomUpCompute(\n rfInput = arg_3.reshape((1,arg_3.size)),\n arg_4 = arg_4\n )\n\n arg_2['bottomUpOut'][:] = arg_5.flat\n\n else:\n #\n # TOP-DOWN inference\n #\n\n arg_6 = arg_1.get('topDownIn',None)\n arg_7, arg_8 = arg_0._doTopDownInfer(arg_6)\n arg_2['spatialTopDownOut'][:] = arg_7\n if arg_8 is not None:\n arg_2['temporalTopDownOut'][:] = arg_8\n\n\n # OBSOLETE\n arg_2['anomalyScore'][:] = 0"} +{"_id": "doc_1656", "title": "", "text": "def Func(arg_0):\n \"\"\"Figure out whether reset, sequenceId,\n both or neither are present in the data.\n Compute once instead of every time.\n\n Taken from filesource.py\"\"\"\n\n arg_1 = arg_0.resetFieldName is not None\n arg_2 = arg_0.sequenceIdFieldName is not None\n\n if arg_1 and not arg_2:\n arg_0._sequenceInfoType = arg_0.SEQUENCEINFO_RESET_ONLY\n arg_0._prevSequenceId = 0\n elif not arg_1 and arg_2:\n arg_0._sequenceInfoType = arg_0.SEQUENCEINFO_SEQUENCEID_ONLY\n arg_0._prevSequenceId = None\n elif arg_1 and arg_2:\n arg_0._sequenceInfoType = arg_0.SEQUENCEINFO_BOTH\n else:\n arg_0._sequenceInfoType = arg_0.SEQUENCEINFO_NONE"} +{"_id": "doc_1657", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2={}):\n \"\"\"\n Get the default arguments from the function and assign as instance vars.\n\n Return a list of 3-tuples with (name, description, defaultValue) for each\n argument to the function.\n\n Assigns all arguments to the function as instance variables of TMRegion.\n If the argument was not provided, uses the default value.\n\n Pops any values from kwargs that go to the function.\n \"\"\"\n # Get the name, description, and default value for each argument\n arg_3 = getArgumentDescriptions(arg_0)\n arg_3 = arg_3[1:] # Remove 'self'\n\n # Get the names of the parameters to our own constructor and remove them\n # Check for _originial_init first, because if LockAttributesMixin is used,\n # __init__'s signature will be just (self, *args, **kw), but\n # _original_init is created with the original signature\n #init = getattr(self, '_original_init', self.__init__)\n arg_4 = TMRegion.__init__\n arg_5 = [t[0] for t in getArgumentDescriptions(arg_4)]\n # Also remove a few other names that aren't in our constructor but are\n # computed automatically (e.g. numberOfCols for the TM)\n arg_5 += [\n 'numberOfCols', # TM\n ]\n for arg_6 in arg_3[:]:\n if arg_6[0] in arg_5:\n arg_3.remove(arg_6)\n\n # Build the dictionary of arguments\n if arg_1:\n for arg_6 in arg_3:\n arg_7 = arg_6[0]\n if arg_7 in arg_2:\n # Argument was provided\n arg_8 = arg_2.pop(arg_7)\n else:\n # Argument was not provided; use the default value if there is one, and\n # raise an exception otherwise\n if len(arg_6) == 2:\n # No default value\n raise TypeError(\"Must provide '%s'\" % arg_7)\n arg_8 = arg_6[2]\n # Set as an instance variable if 'self' was passed in\n setattr(arg_1, arg_7, arg_8)\n\n return arg_3"} +{"_id": "doc_1658", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Run one iteration of TMRegion's compute\n \"\"\"\n\n #if self.topDownMode and (not 'topDownIn' in inputs):\n # raise RuntimeError(\"The input topDownIn must be linked in if \"\n # \"topDownMode is True\")\n\n if arg_0._tfdr is None:\n raise RuntimeError(\"TM has not been initialized\")\n\n # Conditional compute break\n arg_0._conditionalBreak()\n\n arg_0._iterations += 1\n\n # Get our inputs as numpy array\n arg_3 = arg_1['bottomUpIn']\n\n # Handle reset signal\n arg_4 = False\n if 'resetIn' in arg_1:\n assert len(arg_1['resetIn']) == 1\n if arg_1['resetIn'][0] != 0:\n arg_0._tfdr.reset()\n arg_0._sequencePos = 0 # Position within the current sequence\n\n if arg_0.computePredictedActiveCellIndices:\n arg_6 = arg_0._tfdr.getPredictedState().reshape(-1).astype('float32')\n\n if arg_0.anomalyMode:\n arg_7 = arg_0._tfdr.topDownCompute().copy().nonzero()[0]\n\n # Perform inference and/or learning\n arg_8 = arg_0._tfdr.compute(arg_3, arg_0.learningMode, arg_0.inferenceMode)\n arg_0._sequencePos += 1\n\n # OR'ing together the cells in each column?\n if arg_0.orColumnOutputs:\n arg_8= arg_8.reshape(arg_0.columnCount,\n arg_0.cellsPerColumn).max(axis=1)\n\n # Direct logging of non-zero TM outputs\n if arg_0._fpLogTPOutput:\n arg_9 = arg_8.reshape(-1)\n arg_10 = arg_8.nonzero()[0]\n arg_11 = \" \".join([\"%d\" % int(token) for token in arg_10])\n print >>arg_0._fpLogTPOutput, arg_9.size, arg_11\n\n # Write the bottom up out to our node outputs\n arg_2['bottomUpOut'][:] = arg_8.flat\n\n if arg_0.topDownMode:\n # Top-down compute\n arg_2['topDownOut'][:] = arg_0._tfdr.topDownCompute().copy()\n\n # Set output for use with anomaly classification region if in anomalyMode\n if arg_0.anomalyMode:\n arg_12 = arg_0._tfdr.getLearnActiveStateT()\n arg_13 = arg_12.shape[0] * arg_12.shape[1]\n arg_2['lrnActiveStateT'][:] = arg_12.reshape(arg_13)\n\n arg_14 = arg_3.nonzero()[0]\n arg_2['anomalyScore'][:] = anomaly.computeRawAnomalyScore(\n arg_14, arg_7)\n\n if arg_0.computePredictedActiveCellIndices:\n # Reshape so we are dealing with 1D arrays\n arg_15 = arg_0._tfdr._getActiveState().reshape(-1).astype('float32')\n arg_16 = numpy.where(arg_15 != 0)[0]\n arg_17= numpy.where(arg_6 != 0)[0]\n arg_18 = numpy.intersect1d(arg_16, arg_17)\n arg_2[\"activeCells\"].fill(0)\n arg_2[\"activeCells\"][arg_16] = 1\n arg_2[\"predictedActiveCells\"].fill(0)\n arg_2[\"predictedActiveCells\"][arg_18] = 1"} +{"_id": "doc_1659", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Perform an internal optimization step that speeds up inference if we know\n learning will not be performed anymore. This call may, for example, remove\n all potential inputs to each column.\n \"\"\"\n if arg_0._tfdr is None:\n raise RuntimeError(\"Temporal memory has not been initialized\")\n\n if hasattr(arg_0._tfdr, 'Func'):\n arg_0.resetSequenceStates()\n arg_0._tfdr.Func()"} +{"_id": "doc_1660", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes the raw anomaly score.\n\n The raw anomaly score is the fraction of active columns not predicted.\n\n :param activeColumns: array of active column indices\n :param prevPredictedColumns: array of columns indices predicted in prev step\n :returns: anomaly score 0..1 (float)\n \"\"\"\n arg_2 = len(arg_0)\n if arg_2 > 0:\n # Test whether each element of a 1-D array is also present in a second\n # array. Sum to get the total # of columns that are active and were\n # predicted.\n arg_3 = numpy.in1d(arg_0, arg_1).sum()\n # Get the percent of active columns that were NOT predicted, that is\n # our anomaly score.\n arg_3 = (arg_2 - arg_3) / float(arg_2)\n else:\n # There are no active columns.\n arg_3 = 0.0\n\n return arg_3"} +{"_id": "doc_1661", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None, arg_4=None):\n \"\"\"Compute the anomaly score as the percent of active columns not predicted.\n\n :param activeColumns: array of active column indices\n :param predictedColumns: array of columns indices predicted in this step\n (used for anomaly in step T+1)\n :param inputValue: (optional) value of current input to encoders\n (eg \"cat\" for category encoder)\n (used in anomaly-likelihood)\n :param timestamp: (optional) date timestamp when the sample occured\n (used in anomaly-likelihood)\n :returns: the Funcd anomaly score; float 0..1\n \"\"\"\n # Start by computing the raw anomaly score.\n arg_5 = FuncRawAnomalyScore(arg_1, arg_2)\n\n # Compute final anomaly based on selected mode.\n if arg_0._mode == Anomaly.MODE_PURE:\n arg_6 = arg_5\n elif arg_0._mode == Anomaly.MODE_LIKELIHOOD:\n if arg_3 is None:\n raise ValueError(\"Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' \"\n \"requires 'inputValue' as parameter to Func() method. \")\n\n arg_7 = arg_0._likelihood.anomalyProbability(\n arg_3, arg_5, arg_4)\n # low likelihood -> hi anomaly\n arg_6 = 1 - arg_7\n elif arg_0._mode == Anomaly.MODE_WEIGHTED:\n arg_7 = arg_0._likelihood.anomalyProbability(\n arg_3, arg_5, arg_4)\n arg_6 = arg_5 * (1 - arg_7)\n\n # Last, do moving-average if windowSize was specified.\n if arg_0._movingAverage is not None:\n arg_6 = arg_0._movingAverage.next(arg_6)\n\n # apply binary discretization if required\n if arg_0._binaryThreshold is not None:\n if arg_6 >= arg_0._binaryThreshold:\n arg_6 = 1.0\n else:\n arg_6 = 0.0\n\n return arg_6"} +{"_id": "doc_1662", "title": "", "text": "def Func(arg_0, arg_1, arg_2=111, arg_3=None, arg_4=None, arg_5=None,\n arg_6=\"auto\", arg_7=\"nearest\", arg_8=None):\n \"\"\" Adds an image to the plot's figure.\n\n @param data a 2D array. See matplotlib.Axes.imshow documentation.\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis\n @param cmap color map used in the rendering\n @param aspect how aspect ratio is handled during resize\n @param interpolation interpolation method\n \"\"\"\n if arg_5 is None:\n # The default colormodel is an ugly blue-red model.\n arg_5 = cm.Greys\n\n arg_9 = arg_0._addBase(arg_2, arg_3=arg_3, arg_4=arg_4)\n arg_9.imshow(arg_1, arg_5=arg_5, arg_6=arg_6, arg_7=arg_7)\n\n if arg_0._show:\n plt.draw()\n\n if arg_8 is not None:\n if not os.path.exists(\"log\"):\n os.mkdir(\"log\")\n plt.savefig(\"log/{name}.png\".format(arg_8=arg_8), bbox_inches=\"tight\",\n figsize=(8, 6), dpi=400)"} +{"_id": "doc_1663", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\" Adds a subplot to the plot's figure at specified position.\n\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis\n @returns (matplotlib.Axes) Axes instance\n \"\"\"\n arg_4 = arg_0._fig.add_subplot(arg_1)\n arg_4.set_xlabel(arg_2)\n arg_4.set_ylabel(arg_3)\n return arg_4"} +{"_id": "doc_1664", "title": "", "text": "def Func():\n \"\"\"\n Get version from local file.\n \"\"\"\n with open(os.path.join(REPO_DIR, \"VERSION\"), \"r\") as versionFile:\n return versionFile.read().strip()"} +{"_id": "doc_1665", "title": "", "text": "def Func():\n \"\"\"\n Make an attempt to determine if a pre-release version of nupic.bindings is\n installed already.\n\n @return: boolean\n \"\"\"\n try:\n arg_0 = pkg_resources.get_distribution(\"nupic.bindings\")\n if pkg_resources.parse_version(arg_0.version).is_prerelease:\n # A pre-release dev version of nupic.bindings is installed.\n return True\n except pkg_resources.DistributionNotFound:\n pass # Silently ignore. The absence of nupic.bindings will be handled by\n # setuptools by default\n\n return False"} +{"_id": "doc_1666", "title": "", "text": "def Func():\n \"\"\"\n Read the requirements.txt file and parse into requirements for setup's\n install_requirements option.\n \"\"\"\n arg_0 = os.path.join(REPO_DIR, \"requirements.txt\")\n arg_1 = parse_file(arg_0)\n\n if nupicBindingsPrereleaseInstalled():\n # User has a pre-release version of nupic.bindings installed, which is only\n # possible if the user installed and built nupic.bindings from source and\n # it is up to the user to decide when to update nupic.bindings. We'll\n # quietly remove the entry in requirements.txt so as to not conflate the\n # two.\n arg_1 = [req for req in arg_1 if \"nupic.bindings\" not in req]\n\n return arg_1"} +{"_id": "doc_1667", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None, arg_3=None,\n arg_4=False):\n \"\"\" Generates the string representation of a MetricSpec object, and returns\n the metric key associated with the metric.\n\n\n Parameters:\n -----------------------------------------------------------------------\n inferenceElement:\n An InferenceElement value that indicates which part of the inference this\n metric is computed on\n\n metric:\n The type of the metric being computed (e.g. aae, avg_error)\n\n params:\n A dictionary of parameters for the metric. The keys are the parameter names\n and the values should be the parameter values (e.g. window=200)\n\n field:\n The name of the field for which this metric is being computed\n\n returnLabel:\n If True, returns the label of the MetricSpec that was generated\n \"\"\"\n\n arg_5 = dict(arg_1=arg_1,\n arg_3=arg_3,\n arg_2=arg_2,\n arg_0=arg_0)\n\n arg_6 = \"MetricSpec(%s)\" % \\\n ', '.join(['%s=%r' % (item[0],item[1])\n for item in arg_5.iteritems()])\n\n if not arg_4:\n return arg_6\n\n arg_7 = MetricSpec(**arg_5)\n arg_8 = arg_7.getLabel()\n return arg_6, arg_8"} +{"_id": "doc_1668", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\" Generates a file by applying token replacements to the given template\n file\n\n templateFileName:\n A list of template file names; these files are assumed to be in\n the same directory as the running experiment_generator.py script.\n ExpGenerator will perform the substitution and concanetate\n the files in the order they are specified\n\n outputFilePath: Absolute path of the output file\n\n replacementDict:\n A dictionary of token/replacement pairs\n \"\"\"\n\n # Find out where we're running from so we know where to find templates\n arg_3 = os.path.dirname(__file__)\n arg_4 = open(arg_1, \"w\")\n arg_5 = []\n arg_6 = []\n\n arg_7 = True\n for arg_8 in arg_0:\n # Separate lines from each file by two blank lines.\n if not arg_7:\n arg_6.extend([os.linesep]*2)\n arg_7 = False\n\n arg_9 = os.path.join(arg_3, arg_8)\n arg_10 = open(arg_9)\n arg_6.extend(arg_10.readlines())\n arg_10.close()\n\n\n print \"Writing \", len(arg_6), \"lines...\"\n\n for arg_11 in arg_6:\n arg_12 = arg_11\n\n # Enumerate through each key in replacementDict and replace with value\n for arg_13, arg_14 in arg_2.iteritems():\n if arg_14 is None:\n arg_14 = \"None\"\n arg_12 = re.sub(arg_13, arg_14, arg_12)\n arg_4.write(arg_12)\n arg_4.close()"} +{"_id": "doc_1669", "title": "", "text": "def Func():\n \"\"\"\n Returns the experiment description schema. This implementation loads it in\n from file experimentDescriptionSchema.json.\n\n Parameters:\n --------------------------------------------------------------------------\n Returns: returns a dict representing the experiment description schema.\n \"\"\"\n arg_0 = os.path.dirname(os.path.abspath(__file__))\n arg_1 = os.path.join(arg_0, \"experimentDescriptionSchema.json\")\n return json.loads(open(arg_1, 'r').read())"} +{"_id": "doc_1670", "title": "", "text": "def Func(arg_0):\n \"\"\"Generates the non-default metrics specified by the expGenerator params \"\"\"\n arg_1 = {'properties': {}}\n\n arg_2 = []\n for arg_3 in arg_0['metrics']:\n\n for arg_4 in arg_1['properties'].keys():\n _getPropertyValue(arg_1, arg_4, arg_3)\n\n\n arg_5, arg_6 = _generateMetricSpecString(\n field=arg_3['field'],\n arg_3=arg_3['metric'],\n params=arg_3['params'],\n inferenceElement=\\\n arg_3['inferenceElement'],\n returnLabel=True)\n if arg_3['logged']:\n arg_0['loggedMetrics'].append(arg_6)\n\n arg_2.append(arg_5)\n\n return arg_2"} +{"_id": "doc_1671", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Generates the token substitutions related to the predicted field\n and the supplemental arguments for prediction\n \"\"\"\n arg_2 = arg_0['inferenceType']\n arg_3 = arg_0.get('inferenceArgs', None)\n arg_4 = {}\n arg_5 = _getPredictedField(arg_0)[0]\n\n if arg_2 in (InferenceType.TemporalNextStep,\n InferenceType.TemporalAnomaly):\n assert arg_5, \"Inference Type '%s' needs a predictedField \"\\\n \"specified in the inferenceArgs dictionary\"\\\n % arg_2\n\n if arg_3:\n # If we will be using a dynamically created predictionSteps, plug in that\n # variable name in place of the constant scalar value\n if arg_0['dynamicPredictionSteps']:\n arg_6 = copy.deepcopy(arg_3)\n arg_6['predictionSteps'] = '$REPLACE_ME'\n arg_4 = pprint.pformat(arg_6)\n arg_4 = arg_4.replace(\"'$REPLACE_ME'\",\n '[predictionSteps]')\n else:\n arg_4 = pprint.pformat(arg_3)\n\n arg_1['\\$INFERENCE_ARGS'] = arg_4\n\n arg_1['\\$PREDICTION_FIELD'] = arg_5"} +{"_id": "doc_1672", "title": "", "text": "def Func(arg_0):\n \"\"\" Parses, validates, and executes command-line options;\n\n On success: Performs requested operation and exits program normally\n\n On Error: Dumps exception/error info in JSON format to stdout and exits the\n program with non-zero status.\n \"\"\"\n\n # -----------------------------------------------------------------\n # Parse command line options\n #\n arg_1 = OptionParser()\n arg_1.set_usage(\"%prog [options] --description='{json object with args}'\\n\" + \\\n \"%prog [options] --descriptionFromFile='{filename}'\\n\" + \\\n \"%prog [options] --showSchema\")\n\n arg_1.add_option(\"--description\", dest = \"description\",\n help = \"Tells ExpGenerator to generate an experiment description.py and \" \\\n \"permutations.py file using the given JSON formatted experiment \"\\\n \"description string.\")\n\n arg_1.add_option(\"--descriptionFromFile\", dest = 'descriptionFromFile',\n help = \"Tells ExpGenerator to open the given filename and use it's \" \\\n \"contents as the JSON formatted experiment description.\")\n\n arg_1.add_option(\"--claDescriptionTemplateFile\",\n dest = 'claDescriptionTemplateFile',\n default = 'claDescriptionTemplate.tpl',\n help = \"The file containing the template description file for \" \\\n \" ExpGenerator [default: %default]\")\n\n arg_1.add_option(\"--showSchema\",\n action=\"store_true\", dest=\"showSchema\",\n help=\"Prints the JSON schemas for the --description arg.\")\n\n arg_1.add_option(\"--version\", dest = 'version', default='v2',\n help = \"Generate the permutations file for this version of hypersearch.\"\n \" Possible choices are 'v1' and 'v2' [default: %default].\")\n\n arg_1.add_option(\"--outDir\",\n dest = \"outDir\", default=None,\n help = \"Where to generate experiment. If not specified, \" \\\n \"then a temp directory will be created\"\n )\n (arg_2, arg_3) = arg_1.parse_args(arg_0)\n\n #print(\"OPTIONS=%s\" % (str(options)))\n\n # -----------------------------------------------------------------\n # Check for unprocessed args\n #\n if len(arg_3) > 0:\n raise _InvalidCommandArgException(\n _makeUsageErrorStr(\"Unexpected command-line args: <%s>\" % \\\n (' '.join(arg_3),), arg_1.get_usage()))\n\n # -----------------------------------------------------------------\n # Check for use of mutually-exclusive options\n #\n arg_4 = filter(lambda x: getattr(arg_2, x) != None,\n ('description', 'showSchema'))\n if len(arg_4) > 1:\n raise _InvalidCommandArgException(\n _makeUsageErrorStr((\"The specified command options are \" + \\\n \"mutually-exclusive: %s\") % (arg_4,),\n arg_1.get_usage()))\n\n\n\n # -----------------------------------------------------------------\n # Process requests\n #\n if arg_2.showSchema:\n _handleShowSchemaOption()\n\n elif arg_2.description:\n _handleDescriptionOption(arg_2.description, arg_2.outDir,\n arg_1.get_usage(), hsVersion=arg_2.version,\n claDescriptionTemplateFile = arg_2.claDescriptionTemplateFile)\n\n elif arg_2.descriptionFromFile:\n _handleDescriptionFromFileOption(arg_2.descriptionFromFile,\n arg_2.outDir, arg_1.get_usage(), hsVersion=arg_2.version,\n claDescriptionTemplateFile = arg_2.claDescriptionTemplateFile)\n\n else:\n raise _InvalidCommandArgException(\n _makeUsageErrorStr(\"Error in validating command options. No option \"\n \"provided:\\n\", arg_1.get_usage()))"} +{"_id": "doc_1673", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parses a textual datetime format and return a Python datetime object.\n\n The supported format is: ``yyyy-mm-dd h:m:s.ms``\n\n The time component is optional.\n\n - hours are 00..23 (no AM/PM)\n - minutes are 00..59\n - seconds are 00..59\n - micro-seconds are 000000..999999\n\n :param s: (string) input time text\n :return: (datetime.datetime)\n \"\"\"\n arg_0 = arg_0.strip()\n for arg_1 in DATETIME_FORMATS:\n try:\n return datetime.datetime.strptime(arg_0, arg_1)\n except ValueError:\n pass\n raise ValueError('The provided timestamp %s is malformed. The supported '\n 'formats are: [%s]' % (arg_0, ', '.join(DATETIME_FORMATS)))"} +{"_id": "doc_1674", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Translate an index into coordinates, using the given coordinate system.\n\n Similar to ``numpy.unravel_index``.\n\n :param index: (int) The index of the point. The coordinates are expressed as a \n single index by using the dimensions as a mixed radix definition. For \n example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n\n :param dimensions (list of ints) The coordinate system.\n\n :returns: (list) of coordinates of length ``len(dimensions)``.\n \"\"\"\n arg_2 = [0] * len(arg_1)\n\n arg_3 = arg_0\n for arg_4 in xrange(len(arg_1) - 1, 0, -1):\n arg_2[arg_4] = arg_3 % arg_1[arg_4]\n arg_3 = arg_3 / arg_1[arg_4]\n\n arg_2[0] = arg_3\n\n return arg_2"} +{"_id": "doc_1675", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Translate coordinates into an index, using the given coordinate system.\n\n Similar to ``numpy.ravel_multi_index``.\n\n :param coordinates: (list of ints) A list of coordinates of length \n ``dimensions.size()``.\n\n :param dimensions: (list of ints) The coordinate system.\n\n :returns: (int) The index of the point. The coordinates are expressed as a \n single index by using the dimensions as a mixed radix definition. \n For example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n \"\"\"\n arg_2 = 0\n for arg_3, arg_4 in enumerate(arg_1):\n arg_2 *= arg_4\n arg_2 += arg_0[arg_3]\n\n return arg_2"} +{"_id": "doc_1676", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the points in the Func of a point.\n\n A point's Func is the n-dimensional hypercube with sides ranging\n [center - radius, center + radius], inclusive. For example, if there are two\n dimensions and the radius is 3, the Func is 6x6. Neighborhoods are\n truncated when they are near an edge.\n\n This is designed to be fast. In C++ it's fastest to iterate through neighbors\n one by one, calculating them on-demand rather than creating a list of them.\n But in Python it's faster to build up the whole list in batch via a few calls\n to C code rather than calculating them on-demand with lots of calls to Python\n code.\n\n :param centerIndex: (int) The index of the point. The coordinates are \n expressed as a single index by using the dimensions as a mixed radix \n definition. For example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n\n :param radius: (int) The radius of this Func about the \n ``centerIndex``.\n\n :param dimensions: (indexable sequence) The dimensions of the world outside \n this Func.\n\n :returns: (numpy array) The points in the Func, including \n ``centerIndex``.\n \"\"\"\n arg_3 = coordinatesFromIndex(arg_0, arg_2)\n\n arg_4 = []\n for arg_5, arg_6 in enumerate(arg_2):\n arg_7 = max(0, arg_3[arg_5] - arg_1)\n arg_8 = min(arg_6 - 1, arg_3[arg_5] + arg_1)\n arg_4.append(xrange(arg_7, arg_8 + 1))\n\n arg_9 = numpy.array(list(itertools.product(*arg_4)))\n return numpy.ravel_multi_index(arg_9.T, arg_2)"} +{"_id": "doc_1677", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns coordinates around given coordinate, within given radius.\n Includes given coordinate.\n\n @param coordinate (numpy.array) N-dimensional integer coordinate\n @param radius (int) Radius around `coordinate`\n\n @return (numpy.array) List of coordinates\n \"\"\"\n arg_2 = (xrange(n-arg_1, n+arg_1+1) for n in arg_0.tolist())\n return numpy.array(list(itertools.product(*arg_2)))"} +{"_id": "doc_1678", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns the top W coordinates by order.\n\n @param coordinates (numpy.array) A 2D numpy array, where each element\n is a coordinate\n @param w (int) Number of top coordinates to return\n @return (numpy.array) A subset of `coordinates`, containing only the\n top ones by order\n \"\"\"\n arg_3 = numpy.array([arg_0._orderForCoordinate(c)\n for c in arg_1.tolist()])\n arg_4 = numpy.argsort(arg_3)[-arg_2:]\n return arg_1[arg_4]"} +{"_id": "doc_1679", "title": "", "text": "def Func(arg_0):\n \"\"\"Hash a coordinate to a 64 bit integer.\"\"\"\n arg_1 = \",\".join(str(v) for v in arg_0)\n # Compute the hash and convert to 64 bit int.\n arg_2 = int(int(hashlib.md5(arg_1).hexdigest(), 16) % (2 ** 64))\n return arg_2"} +{"_id": "doc_1680", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Maps the coordinate to a bit in the SDR.\n\n @param coordinate (numpy.array) Coordinate\n @param n (int) The number of available bits in the SDR\n @return (int) The index to a bit in the SDR\n \"\"\"\n arg_3 = arg_0._hashCoordinate(arg_1)\n arg_4 = Random(arg_3)\n return arg_4.getUInt32(arg_2)"} +{"_id": "doc_1681", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" \n Function for running binary search on a sorted list.\n\n :param arr: (list) a sorted list of integers to search\n :param val: (int) a integer to search for in the sorted array\n :returns: (int) the index of the element if it is found and -1 otherwise.\n \"\"\"\n arg_2 = bisect_left(arg_0, arg_1)\n if arg_2 != len(arg_0) and arg_0[arg_2] == arg_1:\n return arg_2\n return -1"} +{"_id": "doc_1682", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" \n Adds a new segment on a cell.\n\n :param cell: (int) Cell index\n :returns: (int) New segment index\n \"\"\"\n arg_2 = arg_0._cells[arg_1]\n\n if len(arg_0._freeFlatIdxs) > 0:\n arg_3 = arg_0._freeFlatIdxs.pop()\n else:\n arg_3 = arg_0._nextFlatIdx\n arg_0._segmentForFlatIdx.append(None)\n arg_0._nextFlatIdx += 1\n\n arg_4 = arg_0._nextSegmentOrdinal\n arg_0._nextSegmentOrdinal += 1\n\n arg_5 = Segment(arg_1, arg_3, arg_4)\n arg_2._segments.append(arg_5)\n arg_0._segmentForFlatIdx[arg_3] = arg_5\n\n return arg_5"} +{"_id": "doc_1683", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Destroys a segment.\n\n :param segment: (:class:`Segment`) representing the segment to be destroyed.\n \"\"\"\n # Remove the synapses from all data structures outside this Segment.\n for arg_2 in arg_1._synapses:\n arg_0._removeSynapseFromPresynapticMap(arg_2)\n arg_0._numSynapses -= len(arg_1._synapses)\n\n # Remove the segment from the cell's list.\n arg_3 = arg_0._cells[arg_1.cell]._segments\n arg_4 = arg_3.index(arg_1)\n del arg_3[arg_4]\n\n # Free the flatIdx and remove the final reference so the Segment can be\n # garbage-collected.\n arg_0._freeFlatIdxs.append(arg_1.flatIdx)\n arg_0._segmentForFlatIdx[arg_1.flatIdx] = None"} +{"_id": "doc_1684", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" \n Creates a new synapse on a segment.\n\n :param segment: (:class:`Segment`) Segment object for synapse to be synapsed \n to.\n :param presynapticCell: (int) Source cell index.\n :param permanence: (float) Initial permanence of synapse.\n :returns: (:class:`Synapse`) created synapse\n \"\"\"\n arg_4 = len(arg_1._synapses)\n arg_5 = Synapse(arg_1, arg_2, arg_3,\n arg_0._nextSynapseOrdinal)\n arg_0._nextSynapseOrdinal += 1\n arg_1._synapses.add(arg_5)\n\n arg_0._synapsesForPresynapticCell[arg_2].add(arg_5)\n\n arg_0._numSynapses += 1\n\n return arg_5"} +{"_id": "doc_1685", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Destroys a synapse.\n\n :param synapse: (:class:`Synapse`) synapse to destroy\n \"\"\"\n\n arg_0._numSynapses -= 1\n\n arg_0._removeSynapseFromPresynapticMap(arg_1)\n\n arg_1.segment._synapses.remove(arg_1)"} +{"_id": "doc_1686", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" \n Compute each segment's number of active synapses for a given input.\n In the returned lists, a segment's active synapse count is stored at index\n ``segment.flatIdx``.\n\n :param activePresynapticCells: (iter) Active cells.\n :param connectedPermanence: (float) Permanence threshold for a synapse to be \n considered connected\n\n :returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list],\n ``numActivePotentialSynapsesForSegment`` [list])\n \"\"\"\n\n arg_3 = [0] * arg_0._nextFlatIdx\n arg_4 = [0] * arg_0._nextFlatIdx\n\n arg_5 = arg_2 - EPSILON\n\n for arg_6 in arg_1:\n for arg_7 in arg_0._synapsesForPresynapticCell[arg_6]:\n arg_8 = arg_7.segment.flatIdx\n arg_4[arg_8] += 1\n if arg_7.permanence > arg_5:\n arg_3[arg_8] += 1\n\n return (arg_3,\n arg_4)"} +{"_id": "doc_1687", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" \n Returns the number of segments.\n\n :param cell: (int) Optional parameter to get the number of segments on a \n cell.\n :returns: (int) Number of segments on all cells if cell is not specified, or \n on a specific specified cell\n \"\"\"\n if arg_1 is not None:\n return len(arg_0._cells[arg_1]._segments)\n\n return arg_0._nextFlatIdx - len(arg_0._freeFlatIdxs)"} +{"_id": "doc_1688", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" \n Reads deserialized data from proto object\n\n :param proto: (DynamicStructBuilder) Proto object\n\n :returns: (:class:`Connections`) instance\n \"\"\"\n #pylint: disable=W0212\n arg_2 = arg_1.cells\n arg_3 = arg_0(len(arg_2))\n\n for arg_4, arg_5 in enumerate(arg_2):\n arg_5 = arg_2[arg_4]\n arg_6 = arg_5.segments\n arg_3._cells[arg_4] = CellData()\n arg_8 = arg_3._cells[arg_4]._segments\n\n for arg_9, arg_10 in enumerate(arg_6):\n arg_11 = Segment(arg_4, arg_3._nextFlatIdx,\n arg_3._nextSegmentOrdinal)\n\n arg_8.append(arg_11)\n arg_3._segmentForFlatIdx.append(arg_11)\n arg_3._nextFlatIdx += 1\n arg_3._nextSegmentOrdinal += 1\n\n arg_12 = arg_11._synapses\n arg_13 = arg_10.synapses\n\n for arg_14, arg_15 in enumerate(arg_13):\n arg_16 = arg_15.presynapticCell\n arg_17 = Synapse(arg_11, arg_16, arg_15.permanence,\n ordinal=arg_3._nextSynapseOrdinal)\n arg_3._nextSynapseOrdinal += 1\n arg_12.add(arg_17)\n arg_3._synapsesForPresynapticCell[arg_16].add(arg_17)\n\n arg_3._numSynapses += 1\n\n #pylint: enable=W0212\n return arg_3"} +{"_id": "doc_1689", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Retrieve the requested property as a string. If property does not exist,\n then KeyError will be raised.\n\n :param prop: (string) name of the property\n :raises: KeyError\n :returns: (string) property value\n \"\"\"\n if arg_0._properties is None:\n arg_0._readStdConfigFiles()\n\n # Allow configuration properties to be overridden via environment variables\n arg_2 = os.environ.get(\"%s%s\" % (arg_0.envPropPrefix,\n arg_1.replace('.', '_')), None)\n if arg_2 is not None:\n return arg_2\n\n return arg_0._properties[arg_1]"} +{"_id": "doc_1690", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Retrieve the requested property and return it as a bool. If property\n does not exist, then KeyError will be raised. If the property value is\n neither 0 nor 1, then ValueError will be raised\n\n :param prop: (string) name of the property\n :raises: KeyError, ValueError\n :returns: (bool) property value\n \"\"\"\n\n arg_2 = arg_0.getInt(arg_1)\n\n if arg_2 not in (0, 1):\n raise ValueError(\"Expected 0 or 1, but got %r in config property %s\" % (\n arg_2, arg_1))\n\n return bool(arg_2)"} +{"_id": "doc_1691", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Set the value of the given configuration property.\n\n :param prop: (string) name of the property\n :param value: (object) value to Func\n \"\"\"\n\n if arg_0._properties is None:\n arg_0._readStdConfigFiles()\n\n arg_0._properties[arg_1] = str(arg_2)"} +{"_id": "doc_1692", "title": "", "text": "def Func(arg_0):\n \"\"\" Return a Func containing all of the configuration properties\n\n :returns: (Func) containing all configuration properties.\n \"\"\"\n\n if arg_0._properties is None:\n arg_0._readStdConfigFiles()\n\n # Make a copy so we can update any current values obtained from environment\n # variables\n arg_1 = Func(arg_0._properties)\n arg_2 = os.environ.keys()\n arg_3 = filter(lambda x: x.startswith(arg_0.envPropPrefix),\n arg_2)\n for arg_4 in arg_3:\n arg_5 = arg_4[len(arg_0.envPropPrefix):]\n arg_5 = arg_5.replace('_', '.')\n arg_1[arg_5] = os.environ[arg_4]\n\n return arg_1"} +{"_id": "doc_1693", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Parse the given XML file and store all properties it describes.\n\n :param filename: (string) name of XML file to parse (no path)\n :param path: (string) path of the XML file. If None, then use the standard\n configuration search path.\n \"\"\"\n arg_3 = arg_0._Func(arg_1, arg_2)\n\n # Create properties dict if necessary\n if arg_0._properties is None:\n arg_0._properties = dict()\n\n for arg_5 in arg_3:\n if 'value' in arg_3[arg_5]:\n arg_0._properties[arg_5] = arg_3[arg_5]['value']"} +{"_id": "doc_1694", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the list of paths to search for configuration files.\n\n :returns: (list) of paths\n \"\"\"\n arg_1 = []\n if arg_0._configPaths is not None:\n return arg_0._configPaths\n\n else:\n if 'NTA_CONF_PATH' in os.environ:\n arg_2 = os.environ['NTA_CONF_PATH']\n # Return as a list of paths\n arg_1 = arg_2.split(os.pathsep)\n\n return arg_1"} +{"_id": "doc_1695", "title": "", "text": "def Func(arg_0=100, arg_1=500, arg_2=50):\n \"\"\"\n Generate a list of random sparse distributed vectors. This is used to generate\n training vectors to the spatial or temporal learner and to compare the predicted\n output against.\n\n It generates a list of 'numVectors' elements, each element has length 'length'\n and has a total of 'activity' bits on.\n\n Parameters:\n -----------------------------------------------\n numVectors: the number of vectors to generate\n length: the length of each row\n activity: the number of ones to put into each row.\n\n \"\"\"\n\n arg_3 = []\n arg_4 = numpy.zeros(arg_1, dtype='int32')\n arg_5 = range(arg_1)\n\n for arg_6 in xrange(arg_0):\n arg_4[:] = 0\n arg_4[arg_7.sample(arg_5, arg_2)] = 1\n arg_3.append(arg_4.copy())\n\n return arg_3"} +{"_id": "doc_1696", "title": "", "text": "def Func(arg_0=10, arg_1=[5,6,7], arg_2=100):\n \"\"\"\n Generate a set of simple sequences. The elements of the sequences will be\n integers from 0 to 'nCoinc'-1. The length of each sequence will be\n randomly chosen from the 'seqLength' list.\n\n Parameters:\n -----------------------------------------------\n nCoinc: the number of elements available to use in the sequences\n seqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\n nSeq: The number of sequences to generate\n\n retval: a list of sequences. Each sequence is itself a list\n containing the coincidence indices for that sequence.\n \"\"\"\n\n arg_3 = range(arg_0)\n arg_4 = []\n\n for arg_5 in xrange(arg_2):\n if max(arg_1) <= arg_0:\n arg_4.append(random.sample(arg_3, random.choice(arg_1)))\n else:\n arg_6 = random.choice(arg_1)\n arg_7 = []\n for arg_8 in xrange(arg_6):\n arg_7.append(random.choice(arg_3))\n arg_4.append(arg_7)\n\n return arg_4"} +{"_id": "doc_1697", "title": "", "text": "def Func(arg_0=10, arg_1 = [2,6], arg_2=[5,6,7], arg_3=100):\n \"\"\"\n Generate a set of hub sequences. These are sequences which contain a hub\n element in the middle. The elements of the sequences will be integers\n from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of\n each sequence. The length of each sequence will be randomly chosen from the\n 'seqLength' list.\n\n Parameters:\n -----------------------------------------------\n nCoinc: the number of elements available to use in the sequences\n hubs: which of the elements will be used as hubs.\n seqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\n nSeq: The number of sequences to generate\n\n retval: a list of sequences. Each sequence is itself a list\n containing the coincidence indices for that sequence.\n \"\"\"\n\n\n arg_4 = range(arg_0)\n for arg_5 in arg_1:\n arg_4.remove(arg_5)\n\n arg_6 = []\n for arg_7 in xrange(arg_3):\n arg_8 = random.choice(arg_2)-1\n arg_9 = random.sample(arg_4,arg_8)\n arg_9.insert(arg_8//2, random.choice(arg_1))\n arg_6.append(arg_9)\n\n return arg_6"} +{"_id": "doc_1698", "title": "", "text": "def Func(arg_0=10, arg_1=500, arg_2=50):\n \"\"\"\n Generate a non overlapping coincidence matrix. This is used to generate random\n inputs to the temporal learner and to compare the predicted output against.\n\n It generates a matrix of nCoinc rows, each row has length 'length' and has\n a total of 'activity' bits on.\n\n Parameters:\n -----------------------------------------------\n nCoinc: the number of rows to generate\n length: the length of each row\n activity: the number of ones to put into each row.\n\n \"\"\"\n assert arg_0*arg_2<=arg_1, \"can't generate non-overlapping coincidences\"\n arg_3 = SM32(0, arg_1)\n arg_4 = numpy.zeros(arg_1, dtype='int32')\n\n for arg_5 in xrange(arg_0):\n arg_4[:] = 0\n arg_4[arg_5*arg_2:(arg_5+1)*arg_2] = 1\n arg_3.addRow(arg_4)\n\n return arg_3"} +{"_id": "doc_1699", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Function that compares two spatial pooler instances. Compares the\n static variables between the two poolers to make sure that they are equivalent.\n\n Parameters\n -----------------------------------------\n SP1 first spatial pooler to be compared\n\n SP2 second spatial pooler to be compared\n\n To establish equality, this function does the following:\n\n 1.Compares the connected synapse matrices for each coincidence\n\n 2.Compare the potential synapse matrices for each coincidence\n\n 3.Compare the permanence matrices for each coincidence\n\n 4.Compare the firing boosts between the two poolers.\n\n 5.Compare the duty cycles before and after inhibition for both poolers\n\n \"\"\"\n if(len(arg_0._masterConnectedM)!=len(arg_1._masterConnectedM)):\n print \"Connected synapse matrices are different sizes\"\n return False\n\n if(len(arg_0._masterPotentialM)!=len(arg_1._masterPotentialM)):\n print \"Potential synapse matrices are different sizes\"\n return False\n\n if(len(arg_0._masterPermanenceM)!=len(arg_1._masterPermanenceM)):\n print \"Permanence matrices are different sizes\"\n return False\n\n\n #iterate over cells\n for arg_2 in range(0,len(arg_0._masterConnectedM)):\n #grab the Coincidence Matrices and compare them\n arg_3 = arg_0._masterConnectedM[arg_2]\n arg_4 = arg_1._masterConnectedM[arg_2]\n if(arg_3!=arg_4):\n print \"Connected Matrices for cell %d different\" % (arg_2)\n return False\n #grab permanence Matrices and compare them\n arg_5 = arg_0._masterPermanenceM[arg_2];\n arg_6 = arg_1._masterPermanenceM[arg_2];\n if(arg_5!=arg_6):\n print \"Permanence Matrices for cell %d different\" % (arg_2)\n return False\n #grab the potential connection Matrices and compare them\n arg_7 = arg_0._masterPotentialM[arg_2];\n arg_8 = arg_1._masterPotentialM[arg_2];\n if(arg_7!=arg_8):\n print \"Potential Matrices for cell %d different\" % (arg_2)\n return False\n\n #Check firing boosts\n if(not numpy.array_equal(arg_0._firingBoostFactors,arg_1._firingBoostFactors)):\n print \"Firing boost factors are different between spatial poolers\"\n return False\n\n #Check duty cycles after inhibiton\n if(not numpy.array_equal(arg_0._dutyCycleAfterInh,arg_1._dutyCycleAfterInh)):\n print \"Duty cycles after inhibition are different between spatial poolers\"\n return False\n\n\n #Check duty cycles before inhibition\n if(not numpy.array_equal(arg_0._dutyCycleBeforeInh,arg_1._dutyCycleBeforeInh)):\n print \"Duty cycles before inhibition are different between spatial poolers\"\n return False\n\n\n print(\"Spatial Poolers are equivalent\")\n\n return True"} +{"_id": "doc_1700", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Accumulate a list of values 'values' into the frequency counts 'freqCounts',\n and return the updated frequency counts\n\n For example, if values contained the following: [1,1,3,5,1,3,5], and the initial\n freqCounts was None, then the return value would be:\n [0,3,0,2,0,2]\n which corresponds to how many of each value we saw in the input, i.e. there\n were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's.\n\n If freqCounts is not None, the values will be added to the existing counts and\n the length of the frequency Counts will be automatically extended as necessary\n\n Parameters:\n -----------------------------------------------\n values: The values to accumulate into the frequency counts\n freqCounts: Accumulated frequency counts so far, or none\n \"\"\"\n\n # How big does our freqCounts vector need to be?\n arg_0 = numpy.array(arg_0)\n arg_2 = arg_0.max() + 1\n if arg_1 is not None:\n arg_2 = max(arg_2, arg_1.size)\n\n # Where do we accumulate the results?\n if arg_1 is not None:\n if arg_1.size != arg_2:\n arg_3 = numpy.zeros(arg_2, dtype='int32')\n arg_3[0:arg_1.size] = arg_1\n else:\n arg_3 = arg_1\n else:\n arg_3 = numpy.zeros(arg_2, dtype='int32')\n\n # Accumulate the new values\n for arg_5 in arg_0:\n arg_3[arg_5] += 1\n\n return arg_3"} +{"_id": "doc_1701", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Helper function used by averageOnTimePerTimestep. 'durations' is a vector\n which must be the same len as vector. For each \"on\" in vector, it fills in\n the corresponding element of duration with the duration of that \"on\" signal\n up until that time\n\n Parameters:\n -----------------------------------------------\n vector: vector of output values over time\n durations: vector same length as 'vector', initialized to 0's.\n This is filled in with the durations of each 'on\" signal.\n\n Example:\n vector: 11100000001100000000011111100000\n durations: 12300000001200000000012345600000\n \"\"\"\n\n # Find where the nonzeros are\n arg_2 = numpy.array(arg_0).nonzero()[0]\n\n # Nothing to do if vector is empty\n if len(arg_2) == 0:\n return\n\n # Special case of only 1 on bit\n if len(arg_2) == 1:\n arg_1[arg_2[0]] = 1\n return\n\n # Count the consecutive non-zeros\n arg_3 = arg_2[0]\n arg_4 = 1\n arg_5 = arg_3\n arg_6 = arg_2[-1]\n for arg_7 in arg_2[1:]:\n if arg_7 != arg_3+1:\n # Fill in the durations\n arg_1[arg_5:arg_5+arg_4] = range(1,arg_4+1)\n arg_4 = 1\n arg_5 = arg_7\n else:\n arg_4 += 1\n arg_3 = arg_7\n\n # Fill in the last one\n arg_1[arg_5:arg_5+arg_4] = range(1,arg_4+1)"} +{"_id": "doc_1702", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Computes the average on-time of the outputs that are on at each time step, and\n then averages this over all time steps.\n\n This metric is resiliant to the number of outputs that are on at each time\n step. That is, if time step 0 has many more outputs on than time step 100, it\n won't skew the results. This is particularly useful when measuring the\n average on-time of things like the temporal memory output where you might\n have many columns bursting at the start of a sequence - you don't want those\n start of sequence bursts to over-influence the calculated average on-time.\n\n Parameters:\n -----------------------------------------------\n vectors: the vectors for which the onTime is calculated. Row 0\n contains the outputs from time step 0, row 1 from time step\n 1, etc.\n numSamples: the number of elements for which on-time is calculated.\n If not specified, then all elements are looked at.\n\n Returns (scalar average on-time over all time steps,\n list containing frequency counts of each encountered on-time)\n\n \"\"\"\n\n\n # Special case given a 1 dimensional vector: it represents a single column\n if arg_0.ndim == 1:\n arg_0.shape = (-1,1)\n arg_3 = len(arg_0)\n arg_4 = len(arg_0[0])\n\n # How many samples will we look at?\n if arg_1 is not None:\n import pdb; pdb.set_trace() # Test this....\n arg_5 = numpy.random.randint(0, arg_4, arg_1)\n arg_0 = arg_0[:, arg_5]\n\n # Fill in each non-zero of vectors with the on-time that that output was\n # on for.\n arg_6 = numpy.zeros(arg_0.shape, dtype='int32')\n for arg_7 in xrange(arg_0.shape[1]):\n _fillInOnTimes(arg_0[:,arg_7], arg_6[:,arg_7])\n\n # Compute the average on time for each time step\n arg_8 = arg_0.sum(axis=1)\n arg_8.clip(min=1, max=numpy.inf, out=arg_8)\n arg_9 = arg_6.sum(axis=1, dtype='float64') / arg_8\n arg_10 = arg_9.sum() / (arg_9 > 0).sum()\n\n # Generate the frequency counts for each duration\n arg_11 = _accumulateFrequencyCounts(arg_9)\n return (arg_10, arg_11)"} +{"_id": "doc_1703", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns the average on-time, averaged over all on-time runs.\n\n Parameters:\n -----------------------------------------------\n vectors: the vectors for which the onTime is calculated. Row 0\n contains the outputs from time step 0, row 1 from time step\n 1, etc.\n numSamples: the number of elements for which on-time is calculated.\n If not specified, then all elements are looked at.\n\n Returns: (scalar average on-time of all outputs,\n list containing frequency counts of each encountered on-time)\n\n\n \"\"\"\n\n # Special case given a 1 dimensional vector: it represents a single column\n if arg_0.ndim == 1:\n arg_0.shape = (-1,1)\n arg_3 = len(arg_0)\n arg_4 = len(arg_0[0])\n\n # How many samples will we look at?\n if arg_1 is None:\n arg_1 = arg_4\n arg_5 = range(arg_4)\n else:\n arg_5 = numpy.random.randint(0, arg_4, arg_1)\n\n # Compute the on-times and accumulate the frequency counts of each on-time\n # encountered\n arg_6 = 0.0\n arg_7 = None\n arg_8 = 0\n for arg_9 in arg_5:\n (arg_10, arg_11, arg_12) = _listOfOnTimesInVec(arg_0[:,arg_9])\n if arg_10 != 0.0:\n arg_6 += arg_10\n arg_8 += arg_11\n arg_7 = _accumulateFrequencyCounts(arg_12, arg_7)\n\n # Return the average on time of each element that was on.\n if arg_8 > 0:\n return (arg_6/arg_8, arg_7)\n else:\n return (0.0, arg_7)"} +{"_id": "doc_1704", "title": "", "text": "def Func(arg_0, arg_1='On-Times Histogram', arg_2='On-Time'):\n \"\"\"\n This is usually used to display a histogram of the on-times encountered\n in a particular output.\n\n The freqCounts is a vector containg the frequency counts of each on-time\n (starting at an on-time of 0 and going to an on-time = len(freqCounts)-1)\n\n The freqCounts are typically generated from the averageOnTimePerTimestep\n or averageOnTime methods of this module.\n\n Parameters:\n -----------------------------------------------\n freqCounts: The frequency counts to plot\n title: Title of the plot\n\n\n \"\"\"\n\n import pylab\n pylab.ion()\n pylab.figure()\n pylab.bar(numpy.arange(len(arg_0)) - 0.5, arg_0)\n pylab.title(arg_1)\n pylab.xlabel(arg_2)"} +{"_id": "doc_1705", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns the percent of the outputs that remain completely stable over\n N time steps.\n\n Parameters:\n -----------------------------------------------\n vectors: the vectors for which the stability is calculated\n numSamples: the number of time steps where stability is counted\n\n For each window of numSamples, count how many outputs are active during\n the entire window.\n\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Calculate the stability\n arg_2 = len(arg_0)\n arg_3 = arg_1\n\n # Process each window\n arg_4 = 0\n arg_5 = 0\n\n for arg_6 in range(0, arg_2-arg_3+1):\n # Count how many elements are active for the entire time\n arg_7 = arg_0[arg_6:arg_6+arg_3]\n arg_8 = arg_7.sum(axis=0)\n arg_9 = (arg_8 == arg_3).sum()\n\n # Accumulated\n arg_10 = float(arg_9) / arg_7[0].sum()\n print arg_10\n arg_5 += arg_10\n arg_4 += 1\n\n # Return percent average over all possible windows\n return float(arg_5) / arg_4"} +{"_id": "doc_1706", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=0):\n \"\"\"\n Compares the actual input with the predicted input and returns results\n\n Parameters:\n -----------------------------------------------\n input: The actual input\n prediction: the predicted input\n verbosity: If > 0, print debugging messages\n sparse: If true, they are in sparse form (list of\n active indices)\n\n retval (foundInInput, totalActiveInInput, missingFromInput,\n totalActiveInPrediction)\n foundInInput: The number of predicted active elements that were\n found in the actual input\n totalActiveInInput: The total number of active elements in the input.\n missingFromInput: The number of predicted active elements that were not\n found in the actual input\n totalActiveInPrediction: The total number of active elements in the prediction\n\n \"\"\"\n\n if arg_2:\n arg_4 = set(arg_0)\n arg_5 = set(arg_1)\n\n else:\n arg_4 = set(arg_0.nonzero()[0])\n arg_5 = set(arg_1.nonzero()[0])\n\n arg_6 = len(arg_5)\n arg_7 = len(arg_4)\n\n arg_8 = len(arg_5.intersection(arg_4))\n arg_9 = len(arg_5.difference(arg_4))\n arg_10 = len(arg_4.difference(arg_5))\n\n if arg_3 >= 1:\n print \"preds. found in input:\", arg_8, \"out of\", arg_6,\n print \"; preds. missing from input:\", arg_9, \"out of\", \\\n arg_6,\n print \"; unexpected active in input:\", arg_10, \"out of\", \\\n arg_7\n\n return (arg_8, arg_7, arg_9,\n arg_6)"} +{"_id": "doc_1707", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=1):\n \"\"\"\n Generates centre offsets and spread offsets for block-mode based training\n regimes - star, cross, block.\n\n Parameters:\n -----------------------------------------------\n spaceShape: The (height, width) of the 2-D space to explore. This\n sets the number of center-points.\n spreadShape: The shape (height, width) of the area around each center-point\n to explore.\n stepSize: The step size. How big each step is, in pixels. This controls\n *both* the spacing of the center-points within the block and the\n points we explore around each center-point\n retval: (centreOffsets, spreadOffsets)\n \"\"\"\n\n\n from nupic.math.cross import cross\n # =====================================================================\n # Init data structures\n # What is the range on the X and Y offsets of the center points?\n arg_3 = arg_0\n # If the shape is (1,1), special case of just 1 center point\n if arg_3[0] == 1 and arg_3[1] == 1:\n arg_4 = [(0,0)]\n else:\n arg_5 = -1 * (arg_3[1] // 2)\n arg_6 = arg_5 + arg_3[1] - 1\n arg_7 = range(arg_2 * arg_5, arg_2 * arg_6 + 1, arg_2)\n\n arg_8 = -1 * (arg_3[0] // 2)\n arg_9 = arg_8 + arg_3[0] - 1\n arg_10 = range(arg_2 * arg_8, arg_2 * arg_9 + 1, arg_2)\n\n arg_4 = list(cross(arg_10, arg_7))\n\n arg_11 = len(arg_4)\n print \"centerOffsets:\", arg_4\n\n # What is the range on the X and Y offsets of the spread points?\n arg_3 = arg_1\n # If the shape is (1,1), special case of no spreading around each center\n # point\n if arg_3[0] == 1 and arg_3[1] == 1:\n arg_12 = [(0,0)]\n else:\n arg_5 = -1 * (arg_3[1] // 2)\n arg_6 = arg_5 + arg_3[1] - 1\n arg_7 = range(arg_2 * arg_5, arg_2 * arg_6 + 1, arg_2)\n\n arg_8 = -1 * (arg_3[0] // 2)\n arg_9 = arg_8 + arg_3[0] - 1\n arg_10 = range(arg_2 * arg_8, arg_2 * arg_9 + 1, arg_2)\n\n arg_12 = list(cross(arg_10, arg_7))\n\n # Put the (0,0) entry first\n arg_12.remove((0,0))\n arg_12.insert(0, (0,0))\n\n arg_13 = len(arg_12)\n print \"spreadOffsets:\", arg_12\n\n return arg_4, arg_12"} +{"_id": "doc_1708", "title": "", "text": "def Func(arg_0, arg_1, arg_2=-1):\n \"\"\"Make a two-dimensional clone map mapping columns to clone master.\n\n This makes a map that is (numColumnsHigh, numColumnsWide) big that can\n be used to figure out which clone master to use for each column. Here are\n a few sample calls\n\n >>> Func(columnsShape=(10, 6), outputCloningWidth=4)\n (array([[ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5],\n [ 8, 9, 10, 11, 8, 9],\n [12, 13, 14, 15, 12, 13],\n [ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5],\n [ 8, 9, 10, 11, 8, 9],\n [12, 13, 14, 15, 12, 13],\n [ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16)\n\n >>> Func(columnsShape=(7, 8), outputCloningWidth=3)\n (array([[0, 1, 2, 0, 1, 2, 0, 1],\n [3, 4, 5, 3, 4, 5, 3, 4],\n [6, 7, 8, 6, 7, 8, 6, 7],\n [0, 1, 2, 0, 1, 2, 0, 1],\n [3, 4, 5, 3, 4, 5, 3, 4],\n [6, 7, 8, 6, 7, 8, 6, 7],\n [0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9)\n\n >>> Func(columnsShape=(7, 11), outputCloningWidth=5)\n (array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],\n [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5],\n [10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10],\n [15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15],\n [20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20],\n [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],\n [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25)\n\n >>> Func(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4)\n (array([[ 0, 1, 2, 0, 1, 2, 0, 1],\n [ 3, 4, 5, 3, 4, 5, 3, 4],\n [ 6, 7, 8, 6, 7, 8, 6, 7],\n [ 9, 10, 11, 9, 10, 11, 9, 10],\n [ 0, 1, 2, 0, 1, 2, 0, 1],\n [ 3, 4, 5, 3, 4, 5, 3, 4],\n [ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12)\n\n The basic idea with this map is that, if you imagine things stretching off\n to infinity, every instance of a given clone master is seeing the exact\n same thing in all directions. That includes:\n - All neighbors must be the same\n - The \"meaning\" of the input to each of the instances of the same clone\n master must be the same. If input is pixels and we have translation\n invariance--this is easy. At higher levels where input is the output\n of lower levels, this can be much harder.\n - The \"meaning\" of the inputs to neighbors of a clone master must be the\n same for each instance of the same clone master.\n\n\n The best way to think of this might be in terms of 'inputCloningWidth' and\n 'outputCloningWidth'.\n - The 'outputCloningWidth' is the number of columns you'd have to move\n horizontally (or vertically) before you get back to the same the same\n clone that you started with. MUST BE INTEGRAL!\n - The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us.\n If we're getting input from an sensor where every element just represents\n a shift of every other element, this is 1.\n At a conceptual level, it means that if two different inputs are shown\n to the node and the only difference between them is that one is shifted\n horizontally (or vertically) by this many pixels, it means we are looking\n at the exact same real world input, but shifted by some number of pixels\n (doesn't have to be 1). MUST BE INTEGRAL!\n\n At level 1, I think you could have this:\n * inputCloningWidth = 1\n * sqrt(coincToInputRatio^2) = 2.5\n * outputCloningWidth = 5\n ...in this case, you'd end up with 25 masters.\n\n\n Let's think about this case:\n input: - - - 0 1 2 3 4 5 - - - - -\n columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4\n\n ...in other words, input 0 is fed to both column 0 and column 1. Input 1\n is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll\n get the exact same output (except shifted) with:\n input: - - - - - 0 1 2 3 4 5 - - -\n columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4\n\n ...in other words, we've shifted the input 2 spaces and the output shifted\n 5 spaces.\n\n\n *** The outputCloningWidth MUST ALWAYS be an integral multiple of the ***\n *** inputCloningWidth in order for all of our rules to apply. ***\n *** NOTE: inputCloningWidth isn't passed here, so it's the caller's ***\n *** responsibility to ensure that this is true. ***\n\n *** The outputCloningWidth MUST ALWAYS be an integral multiple of ***\n *** sqrt(coincToInputRatio^2), too. ***\n\n @param columnsShape The shape (height, width) of the columns.\n @param outputCloningWidth See docstring above.\n @param outputCloningHeight If non-negative, can be used to make\n rectangular (instead of square) cloning fields.\n @return cloneMap An array (numColumnsHigh, numColumnsWide) that\n contains the clone index to use for each\n column.\n @return numDistinctClones The number of distinct clones in the map. This\n is just outputCloningWidth*outputCloningHeight.\n \"\"\"\n if arg_2 < 0:\n arg_2 = arg_1\n\n arg_3, arg_4 = arg_0\n\n arg_5 = arg_1 * arg_2\n\n arg_6 = numpy.empty((arg_3, arg_4), 'uint32')\n for arg_7 in xrange(arg_3):\n for arg_8 in xrange(arg_4):\n arg_6[arg_7, arg_8] = (arg_8 % arg_1) + \\\n (arg_7 % arg_2) * arg_1\n\n return arg_6, arg_5"} +{"_id": "doc_1709", "title": "", "text": "def Func(arg_0, arg_1='%f', arg_2=False, arg_3=True):\n \"\"\" Pretty print a numpy matrix using the given format string for each\n value. Return the string representation\n\n Parameters:\n ------------------------------------------------------------\n array: The numpy array to print. This can be either a 1D vector or 2D matrix\n format: The format string to use for each value\n includeIndices: If true, include [row,col] label for each value\n includeZeros: Can only be set to False if includeIndices is on.\n If True, include 0 values in the print-out\n If False, exclude 0 values from the print-out.\n\n\n \"\"\"\n\n arg_4 = arg_0.shape\n assert (len(arg_4) <= 2)\n arg_5 = ['[']\n if len(arg_4) == 1:\n if arg_2:\n arg_1 = '%d:' + arg_1\n if arg_3:\n arg_6 = [arg_1 % (c,x) for (c,x) in enumerate(arg_0)]\n else:\n arg_6 = [arg_1 % (c,x) for (c,x) in enumerate(arg_0) if x != 0]\n else:\n arg_6 = [arg_1 % (x) for x in arg_0]\n arg_5.extend(arg_6)\n\n else:\n (arg_7, arg_8) = arg_4\n if arg_2:\n arg_1 = '%d,%d:' + arg_1\n\n for arg_9 in xrange(arg_7):\n if arg_2:\n arg_6 = [arg_1 % (arg_9,c,x) for c,x in enumerate(arg_0[arg_9])]\n else:\n arg_6 = [arg_1 % (x) for x in arg_0[arg_9]]\n if arg_9 > 0:\n arg_5.append('')\n\n arg_5.append('[')\n arg_5.extend(arg_6)\n if arg_9 < arg_7-1:\n arg_5.append(']\\n')\n else:\n arg_5.append(']')\n\n\n arg_5.append(']')\n return ' '.join(arg_5)"} +{"_id": "doc_1710", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generates a random Func from the discrete probability distribution\n and returns its value and the log of the probability of sampling that value.\n \"\"\"\n arg_2 = arg_1.uniform(0, arg_0.sum)\n arg_3 = bisect.bisect(arg_0.cdf, arg_2)\n return arg_0.keys[arg_3], numpy.log(arg_0.pmf[arg_3])"} +{"_id": "doc_1711", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Link sensor region to other region so that it can pass it data.\"\"\"\n arg_0.link(arg_1, arg_2, \"UniformLink\", \"\",\n srcOutput=\"dataOut\", destInput=\"bottomUpIn\")"} +{"_id": "doc_1712", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get prediction results for all prediction steps.\"\"\"\n arg_2 = arg_0.regions[arg_1]\n arg_3 = arg_2.getOutputData(\"actualValues\")\n arg_4 = arg_2.getOutputData(\"probabilities\")\n arg_5 = arg_2.getSelf().stepsList\n arg_6 = arg_2.getSelf().maxCategoryCount\n arg_7 = {step: {} for step in arg_5}\n for arg_8 in range(len(arg_5)):\n # stepProbabilities are probabilities for this prediction step only.\n arg_9 = arg_4[arg_8 * arg_6:(arg_8 + 1) * arg_6 - 1]\n arg_10 = arg_9.argmax()\n arg_11 = arg_3[arg_10]\n arg_12 = arg_9[arg_10]\n arg_7[arg_5[arg_8]][\"predictedValue\"] = arg_11\n arg_7[arg_5[arg_8]][\"predictionConfidence\"] = arg_12\n return arg_7"} +{"_id": "doc_1713", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Loads all the parameters for this dummy model. For any paramters\n specified as lists, read the appropriate value for this model using the model\n index \"\"\"\n\n for arg_2, arg_3 in arg_1.iteritems():\n if type(arg_3) == list:\n arg_4 = arg_0.modelIndex % len(arg_1[arg_2])\n arg_0._params[arg_2] = arg_1[arg_2][arg_4]\n else:\n arg_0._params[arg_2] = arg_1[arg_2]"} +{"_id": "doc_1714", "title": "", "text": "def Func(arg_0):\n \"\"\" Protected function that can be overridden by subclasses. Its main purpose\n is to allow the the OPFDummyModelRunner to override this with deterministic\n values\n\n Returns: All the metrics being computed for this model\n \"\"\"\n arg_1 = None\n if arg_0.metrics is not None:\n arg_1 = arg_0.metrics(arg_0._currentRecordIndex+1)\n elif arg_0.metricValue is not None:\n arg_1 = arg_0.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {arg_0._optimizeKeyPattern:arg_1}"} +{"_id": "doc_1715", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a description of the dataset\"\"\"\n\n arg_1 = {'name':arg_0.name, 'fields':[f.name for f in arg_0.fields], \\\n 'numRecords by field':[f.numRecords for f in arg_0.fields]}\n\n return arg_1"} +{"_id": "doc_1716", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns the sdr for jth value at column i\"\"\"\n assert len(arg_0.fields)>arg_1\n assert arg_0.fields[arg_1].numRecords>arg_2\n arg_3 = arg_0.fields[arg_1].encodings[arg_2]\n\n return arg_3"} +{"_id": "doc_1717", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the nth encoding with the predictedField zeroed out\"\"\"\n\n assert all(arg_2.numRecords>arg_1 for arg_2 in arg_0.fields)\n\n arg_3 = np.concatenate([arg_2.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\\\n if arg_2.isPredictedField else arg_2.encodings[arg_1] for arg_2 in arg_0.fields])\n\n return arg_3"} +{"_id": "doc_1718", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the cumulative n for all the fields in the dataset\"\"\"\n\n arg_1 = sum([field.n for field in arg_0.fields])\n return arg_1"} +{"_id": "doc_1719", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the cumulative w for all the fields in the dataset\"\"\"\n\n arg_1 = sum([field.w for field in arg_0.fields])\n return arg_1"} +{"_id": "doc_1720", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the nth encoding\"\"\"\n\n assert (all(arg_2.numEncodings>arg_1 for arg_2 in arg_0.fields))\n arg_3 = np.concatenate([arg_2.encodings[arg_1] for arg_2 in arg_0.fields])\n\n return arg_3"} +{"_id": "doc_1721", "title": "", "text": "def Func(arg_0):\n \"\"\"Deletes all the values in the dataset\"\"\"\n\n for arg_1 in arg_0.fields:\n arg_1.encodings, arg_1.values=[], []\n arg_1.numRecords, arg_1.numEncodings= (0, 0)"} +{"_id": "doc_1722", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Value is encoded as a sdr using the encoding parameters of the Field\"\"\"\n\n arg_3 = np.array(arg_0.encoder.encode(arg_1), dtype=realDType)\n\n if arg_2:\n arg_0.encodings.append(arg_3)\n arg_0.numEncodings+=1\n\n return arg_3"} +{"_id": "doc_1723", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set up the dataTypes and initialize encoders\"\"\"\n\n if arg_0.encoderType is None:\n if arg_0.dataType in ['int','float']:\n arg_0.encoderType='adaptiveScalar'\n elif arg_0.dataType=='string':\n arg_0.encoderType='category'\n elif arg_0.dataType in ['date', 'datetime']:\n arg_0.encoderType='date'\n\n if arg_0.dataType is None:\n if arg_0.encoderType in ['scalar','adaptiveScalar']:\n arg_0.dataType='float'\n elif arg_0.encoderType in ['category', 'enumeration']:\n arg_0.dataType='string'\n elif arg_0.encoderType in ['date', 'datetime']:\n arg_0.dataType='datetime'"} +{"_id": "doc_1724", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Initialize the encoders\"\"\"\n\n #Initializing scalar encoder\n if arg_0.encoderType in ['adaptiveScalar', 'scalar']:\n if 'minval' in arg_1:\n arg_0.minval = arg_1.pop('minval')\n else: arg_0.minval=None\n if 'maxval' in arg_1:\n arg_0.maxval = arg_1.pop('maxval')\n else: arg_0.maxval = None\n arg_0.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \\\n w=arg_0.w, n=arg_0.n, arg_2=arg_0.minval, arg_3=arg_0.maxval, periodic=False, forced=True)\n\n #Initializing category encoder\n elif arg_0.encoderType=='category':\n arg_0.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \\\n w=arg_0.w, n=arg_0.n)\n\n #Initializing date encoder\n elif arg_0.encoderType in ['date', 'datetime']:\n arg_0.encoder=date.DateEncoder(name='dateEncoder')\n else:\n raise RuntimeError('Error in constructing class object. Either encoder type'\n 'or dataType must be specified')"} +{"_id": "doc_1725", "title": "", "text": "def Func(arg_0):\n \"\"\"Loads the experiment description file from the path.\n\n :param path: (string) The path to a directory containing a description.py file\n or the file itself.\n :returns: (config, control)\n \"\"\"\n if not os.path.isdir(arg_0):\n arg_0 = os.path.dirname(arg_0)\n arg_1 = FuncDescriptionScriptFromDir(arg_0)\n arg_2 = getExperimentDescriptionInterfaceFromModule(arg_1)\n return arg_2.getModelDescription(), arg_2.getModelControl()"} +{"_id": "doc_1726", "title": "", "text": "def Func(arg_0):\n \"\"\" Loads the experiment description python script from the given experiment\n directory.\n\n :param experimentDir: (string) experiment directory path\n\n :returns: module of the loaded experiment description scripts\n \"\"\"\n arg_1 = os.path.join(arg_0, \"description.py\")\n arg_2 = _loadDescriptionFile(arg_1)\n return arg_2"} +{"_id": "doc_1727", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Return the model ID of the model with the best result so far and\n it's score on the optimize metric. If swarm is None, then it returns\n the global best, otherwise it returns the best for the given swarm\n for all generatons up to and including genIdx.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n genIdx: consider the best in all generations up to and including this\n generation if not None.\n retval: (modelID, result)\n \"\"\"\n if arg_1 is None:\n return (arg_0._bestModelID, arg_0._bestResult)\n\n else:\n if arg_1 not in arg_0._swarmBestOverall:\n return (None, numpy.inf)\n\n\n # Get the best score, considering the appropriate generations\n arg_3 = arg_0._swarmBestOverall[arg_1]\n arg_4 = None\n arg_5 = numpy.inf\n\n for (arg_6, (arg_7, arg_8)) in enumerate(arg_3):\n if arg_2 is not None and arg_6 > arg_2:\n break\n if arg_8 < arg_5:\n arg_5 = arg_8\n arg_4 = arg_7\n\n return (arg_4, arg_5)"} +{"_id": "doc_1728", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=None, arg_5=False):\n \"\"\"Return a list of particleStates for all particles we know about in\n the given swarm, their model Ids, and metric results.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n\n genIdx: If not None, only return particles at this specific generation\n index.\n\n completed: If not None, only return particles of the given state (either\n completed if 'completed' is True, or running if 'completed'\n is false\n\n matured: If not None, only return particles of the given state (either\n matured if 'matured' is True, or not matured if 'matured'\n is false. Note that any model which has completed is also\n considered matured.\n\n lastDescendent: If True, only return particles that are the last descendent,\n that is, the highest generation index for a given particle Id\n\n retval: (particleStates, modelIds, errScores, completed, matured)\n particleStates: list of particleStates\n modelIds: list of modelIds\n errScores: list of errScores, numpy.inf is plugged in\n if we don't have a result yet\n completed: list of completed booleans\n matured: list of matured booleans\n \"\"\"\n # The indexes of all the models in this swarm. This list excludes hidden\n # (orphaned) models.\n if arg_1 is not None:\n arg_6 = arg_0._swarmIdToIndexes.get(arg_1, [])\n else:\n arg_6 = range(len(arg_0._allResults))\n if len(arg_6) == 0:\n return ([], [], [], [], [])\n\n # Get the particles of interest\n arg_7 = []\n arg_8 = []\n arg_9 = []\n arg_10 = []\n arg_11 = []\n for arg_12 in arg_6:\n arg_13 = arg_0._allResults[arg_12]\n\n # If this entry is hidden (i.e. it was an orphaned model), it should\n # not be in this list\n if arg_1 is not None:\n assert (not arg_13['hidden'])\n\n # Get info on this model\n arg_14 = arg_13['modelParams']\n arg_15 = arg_13['completed']\n arg_16 = arg_13['matured']\n arg_17 = arg_14['particleState']\n arg_18 = arg_17['genIdx']\n arg_19 = arg_17['id']\n\n if arg_2 is not None and arg_18 != arg_2:\n continue\n\n if arg_3 is not None and (arg_3 != arg_15):\n continue\n\n if arg_4 is not None and (arg_4 != arg_16):\n continue\n\n if arg_5 \\\n and (arg_0._particleLatestGenIdx[arg_19] != arg_18):\n continue\n\n # Incorporate into return values\n arg_7.append(arg_17)\n arg_8.append(arg_13['modelID'])\n arg_9.append(arg_13['errScore'])\n arg_10.append(arg_15)\n arg_11.append(arg_16)\n\n\n return (arg_7, arg_8, arg_9, arg_10, arg_11)"} +{"_id": "doc_1729", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a list of particleStates for all particles in the given\n swarm generation that have been orphaned.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n\n genIdx: If not None, only return particles at this specific generation\n index.\n\n retval: (particleStates, modelIds, errScores, completed, matured)\n particleStates: list of particleStates\n modelIds: list of modelIds\n errScores: list of errScores, numpy.inf is plugged in\n if we don't have a result yet\n completed: list of completed booleans\n matured: list of matured booleans\n \"\"\"\n\n arg_3 = range(len(arg_0._allResults))\n if len(arg_3) == 0:\n return ([], [], [], [], [])\n\n # Get the particles of interest\n arg_4 = []\n arg_5 = []\n arg_6 = []\n arg_7 = []\n arg_8 = []\n for arg_9 in arg_3:\n\n # Get info on this model\n arg_10 = arg_0._allResults[arg_9]\n if not arg_10['hidden']:\n continue\n\n arg_11 = arg_10['modelParams']\n if arg_11['particleState']['swarmId'] != arg_1:\n continue\n\n arg_12 = arg_10['completed']\n arg_13 = arg_10['matured']\n arg_14 = arg_11['particleState']\n arg_15 = arg_14['genIdx']\n arg_16 = arg_14['id']\n\n if arg_2 is not None and arg_15 != arg_2:\n continue\n\n # Incorporate into return values\n arg_4.append(arg_14)\n arg_5.append(arg_10['modelID'])\n arg_6.append(arg_10['errScore'])\n arg_7.append(arg_12)\n arg_8.append(arg_13)\n\n return (arg_4, arg_5, arg_6, arg_7, arg_8)"} +{"_id": "doc_1730", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Return a dict of the errors obtained on models that were run with\n each value from a PermuteChoice variable.\n\n For example, if a PermuteChoice variable has the following choices:\n ['a', 'b', 'c']\n\n The dict will have 3 elements. The keys are the stringified choiceVars,\n and each value is tuple containing (choiceVar, errors) where choiceVar is\n the original form of the choiceVar (before stringification) and errors is\n the list of errors received from models that used the specific choice:\n retval:\n ['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]\n\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: swarm Id of the swarm to retrieve info from\n maxGenIdx: max generation index to consider from other models, ignored\n if None\n varName: which variable to retrieve\n\n retval: list of the errors obtained from each choice.\n \"\"\"\n arg_4 = dict()\n # Get all the completed particles in this swarm\n (arg_5, arg_6, arg_7, arg_6, arg_6) = arg_0.getParticleInfos(arg_1,\n genIdx=None, matured=True)\n\n for arg_8, arg_9 in itertools.izip(arg_5, arg_7):\n # Consider this generation?\n if arg_2 is not None:\n if arg_8['genIdx'] > arg_2:\n continue\n\n # Ignore unless this model completed successfully\n if arg_9 == numpy.inf:\n continue\n\n arg_10 = Particle.getPositionFromState(arg_8)\n arg_11 = arg_10[arg_3]\n arg_12 = str(arg_11)\n if arg_12 in arg_4:\n arg_4[arg_12][1].append(arg_9)\n else:\n arg_4[arg_12] = (arg_11, [arg_9])\n\n return arg_4"} +{"_id": "doc_1731", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generate stream definition based on\n \"\"\"\n #--------------------------------------------------------------------------\n # Generate the string containing the aggregation settings.\n arg_2 = {\n 'days': 0,\n 'hours': 0,\n 'microseconds': 0,\n 'milliseconds': 0,\n 'minutes': 0,\n 'months': 0,\n 'seconds': 0,\n 'weeks': 0,\n 'years': 0,\n }\n\n # Honor any overrides provided in the stream definition\n arg_3 = {}\n if 'aggregation' in arg_1['streamDef']:\n for arg_4 in arg_2.keys():\n if arg_4 in arg_1['streamDef']['aggregation']:\n arg_2[arg_4] = arg_1['streamDef']['aggregation'][arg_4]\n if 'fields' in arg_1['streamDef']['aggregation']:\n for (arg_5, arg_6) in arg_1['streamDef']['aggregation']['fields']:\n arg_3[arg_5] = str(arg_6)\n\n # Do we have any aggregation at all?\n arg_7 = False\n for arg_8 in arg_2.values():\n if arg_8 != 0:\n arg_7 = True\n break\n\n # Convert the aggFunctionsDict to a list\n arg_9 = arg_3.items()\n arg_10 = dict(arg_2)\n arg_10['fields'] = arg_9\n\n arg_11 = copy.deepcopy(arg_1['streamDef'])\n arg_11['aggregation'] = copy.deepcopy(arg_10)\n return arg_11"} +{"_id": "doc_1732", "title": "", "text": "def Func(arg_0):\n \"\"\"Test if it's OK to exit this worker. This is only called when we run\n out of prospective new models to evaluate. This method sees if all models\n have matured yet. If not, it will sleep for a bit and return False. This\n will indicate to the hypersearch worker that we should keep running, and\n check again later. This gives this worker a chance to pick up and adopt any\n model which may become orphaned by another worker before it matures.\n\n If all models have matured, this method will send a STOP message to all\n matured, running models (presummably, there will be just one - the model\n which thinks it's the best) before returning True.\n \"\"\"\n # Send an update status periodically to the JobTracker so that it doesn't\n # think this worker is dead.\n print >> sys.stderr, \"reporter:status:In hypersearchV2: Func\"\n\n # Any immature models still running?\n if not arg_0._jobCancelled:\n (arg_1, arg_2, arg_1, arg_1, arg_1) = arg_0._resultsDB.getParticleInfos(matured=False)\n if len(arg_2) > 0:\n arg_0.logger.info(\"Ready to end hyperseach, but not all models have \" \\\n \"matured yet. Sleeping a bit to wait for all models \" \\\n \"to mature.\")\n # Sleep for a bit, no need to check for orphaned models very often\n time.sleep(5.0 * random.random())\n return False\n\n # All particles have matured, send a STOP signal to any that are still\n # running.\n (arg_1, arg_2, arg_1, arg_1, arg_1) = arg_0._resultsDB.getParticleInfos(completed=False)\n for arg_3 in arg_2:\n arg_0.logger.info(\"Stopping model %d because the search has ended\" \\\n % (arg_3))\n arg_0._cjDAO.modelSetFields(arg_3,\n dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),\n ignoreUnchanged = True)\n\n # Update the HsState to get the accurate field contributions.\n arg_0._hsStatePeriodicUpdate()\n arg_4, arg_5 = \\\n arg_0._hsState.getFieldContributions()\n\n\n # Update the results field with the new field contributions.\n arg_6 = arg_0._cjDAO.jobGetFields(arg_0._jobID, ['results'])[0]\n if arg_6 is not None:\n arg_7 = json.loads(arg_6)\n else:\n arg_7 = {}\n\n # Update the fieldContributions field.\n if arg_4 != arg_7.get('fieldContributions', None):\n arg_7['fieldContributions'] = arg_4\n arg_7['absoluteFieldContributions'] = arg_5\n\n arg_8 = arg_0._cjDAO.jobSetFieldIfEqual(arg_0._jobID,\n fieldName='results',\n curValue=arg_6,\n newValue=json.dumps(arg_7))\n if arg_8:\n arg_0.logger.info('Successfully updated the field contributions:%s',\n arg_4)\n else:\n arg_0.logger.info('Failed updating the field contributions, ' \\\n 'another hypersearch worker must have updated it')\n\n return True"} +{"_id": "doc_1733", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8):\n \"\"\"Record or update the results for a model. This is called by the\n HSW whenever it gets results info for another model, or updated results\n on a model that is still running.\n\n The first time this is called for a given modelID, the modelParams will\n contain the params dict for that model and the modelParamsHash will contain\n the hash of the params. Subsequent updates of the same modelID will\n have params and paramsHash values of None (in order to save overhead).\n\n The Hypersearch object should save these results into it's own working\n memory into some table, which it then uses to determine what kind of\n new models to create next time createModels() is called.\n\n Parameters:\n ----------------------------------------------------------------------\n modelID: ID of this model in models table\n modelParams: params dict for this model, or None if this is just an update\n of a model that it already previously reported on.\n\n See the comments for the createModels() method for a\n description of this dict.\n\n modelParamsHash: hash of the modelParams dict, generated by the worker\n that put it into the model database.\n results: tuple containing (allMetrics, optimizeMetric). Each is a\n dict containing metricName:result pairs. .\n May be none if we have no results yet.\n completed: True if the model has completed evaluation, False if it\n is still running (and these are online results)\n completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates\n matured: True if this model has matured. In most cases, once a\n model matures, it will complete as well. The only time a\n model matures and does not complete is if it's currently\n the best model and we choose to keep it running to generate\n predictions.\n numRecords: Number of records that have been processed so far by this\n model.\n \"\"\"\n if arg_4 is None:\n arg_9 = None\n else:\n arg_9 = arg_4[1].values()[0]\n\n # Update our database.\n arg_10 = arg_0._resultsDB.update(arg_1=arg_1,\n arg_2=arg_2,arg_3=arg_3,\n arg_9=arg_9, arg_5=arg_5,\n arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8)\n\n # Log message.\n arg_0.logger.debug('Received progress on model %d: completed: %s, '\n 'cmpReason: %s, numRecords: %d, errScore: %s' ,\n arg_1, arg_5, arg_6, arg_8, arg_10)\n\n # Log best so far.\n (arg_11, arg_12) = arg_0._resultsDB.bestModelIdAndErrScore()\n arg_0.logger.debug('Best err score seen so far: %s on model %s' % \\\n (arg_12, arg_11))"} +{"_id": "doc_1734", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6):\n \"\"\"Run the given model.\n\n This runs the model described by 'modelParams'. Periodically, it updates\n the results seen on the model to the model database using the databaseAO\n (database Access Object) methods.\n\n Parameters:\n -------------------------------------------------------------------------\n modelID: ID of this model in models table\n\n jobID: ID for this hypersearch job in the jobs table\n\n modelParams: parameters of this specific model\n modelParams is a dictionary containing the name/value\n pairs of each variable we are permuting over. Note that\n variables within an encoder spec have their name\n structure as:\n .\n\n modelParamsHash: hash of modelParamValues\n\n jobsDAO jobs data access object - the interface to the jobs\n database where model information is stored\n\n modelCheckpointGUID: A persistent, globally-unique identifier for\n constructing the model checkpoint key\n \"\"\"\n\n # We're going to make an assumption that if we're not using streams, that\n # we also don't need checkpoints saved. For now, this assumption is OK\n # (if there are no streams, we're typically running on a single machine\n # and just save models to files) but we may want to break this out as\n # a separate controllable parameter in the future\n if not arg_0._createCheckpoints:\n arg_6 = None\n\n # Register this model in our database\n arg_0._resultsDB.update(arg_1=arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n metricResult = None,\n completed = False,\n completionReason = None,\n matured = False,\n numRecords = 0)\n\n # Get the structured params, which we pass to the base description\n arg_7 = arg_3['structuredParams']\n\n if arg_0.logger.getEffectiveLevel() <= logging.DEBUG:\n arg_0.logger.debug(\"Running Model. \\nmodelParams: %s, \\nmodelID=%s, \" % \\\n (pprint.pformat(arg_3, indent=4), arg_1))\n\n # Record time.clock() so that we can report on cpu time\n arg_8 = time.clock()\n\n # Run the experiment. This will report the results back to the models\n # database for us as well.\n arg_9 = arg_0.logger.getEffectiveLevel()\n try:\n if arg_0._dummyModel is None or arg_0._dummyModel is False:\n (arg_10, arg_11) = FuncGivenBaseAndParams(\n arg_1=arg_1,\n arg_2=arg_2,\n baseDescription=arg_0._baseDescription,\n params=arg_7,\n predictedField=arg_0._predictedField,\n reportKeys=arg_0._reportKeys,\n optimizeKey=arg_0._optimizeKey,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_9=arg_9,\n predictionCacheMaxRecords=arg_0._predictionCacheMaxRecords)\n else:\n arg_12 = dict(arg_0._dummyModel)\n arg_12['permutationParams'] = arg_7\n if arg_0._dummyModelParamsFunc is not None:\n arg_13 = dict(arg_7)\n arg_13 ['generation'] = arg_3['particleState']['genIdx']\n arg_12.update(arg_0._dummyModelParamsFunc(arg_13))\n\n (arg_10, arg_11) = runDummyModel(\n arg_1=arg_1,\n arg_2=arg_2,\n params=arg_12,\n predictedField=arg_0._predictedField,\n reportKeys=arg_0._reportKeys,\n optimizeKey=arg_0._optimizeKey,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_9=arg_9,\n predictionCacheMaxRecords=arg_0._predictionCacheMaxRecords)\n\n # Write out the completion reason and message\n arg_5.modelSetCompleted(arg_1,\n completionReason = arg_10,\n completionMsg = arg_11,\n cpuTime = time.clock() - arg_8)\n\n\n except InvalidConnectionException, e:\n arg_0.logger.warn(\"%s\", e)"} +{"_id": "doc_1735", "title": "", "text": "def Func():\n \"\"\" Return true if the engine services are running\n \"\"\"\n arg_0 = subprocess.Popen([\"ps\", \"aux\"], arg_1=subprocess.PIPE)\n\n arg_1 = arg_0.communicate()[0]\n arg_2 = arg_0.returncode\n if arg_2 != 0:\n raise RuntimeError(\"Unable to check for running client job manager\")\n\n # See if the CJM is running\n arg_3 = False\n for arg_4 in arg_1.split(\"\\n\"):\n if \"python\" in arg_4 and \"clientjobmanager.client_job_manager\" in arg_4:\n arg_3 = True\n break\n\n return arg_3"} +{"_id": "doc_1736", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3):\n \"\"\"\n Starts a swarm, given a path to a permutations.py script.\n\n This function is meant to be used with a CLI wrapper that passes command line\n arguments in through the options parameter.\n\n @param permutationsFilePath {string} Path to permutations.py.\n @param options {dict} CLI options.\n @param outputLabel {string} Label for output.\n @param permWorkDir {string} Location of working directory.\n\n @returns {object} Model parameters.\n \"\"\"\n global arg_4\n if \"verbosityCount\" in arg_1:\n arg_4 = arg_1[\"verbosityCount\"]\n del arg_1[\"verbosityCount\"]\n else:\n arg_4 = 1\n\n _setupInterruptHandling()\n\n arg_1[\"permutationsScriptPath\"] = arg_0\n arg_1[\"outputLabel\"] = arg_2\n arg_1[\"outDir\"] = arg_3\n arg_1[\"permWorkDir\"] = arg_3\n\n # Assume it's a permutations python script\n arg_5 = _injectDefaultOptions(arg_1)\n _validateOptions(arg_5)\n\n return _runAction(arg_5)"} +{"_id": "doc_1737", "title": "", "text": "def Func(arg_0):\n \"\"\"Back up a file\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Filepath of the back-up\n \"\"\"\n assert os.path.exists(arg_0)\n\n arg_1 = 0\n (arg_2, arg_3) = os.path.splitext(arg_0)\n while True:\n arg_4 = \"%s.%d%s\" % (arg_2, arg_1, arg_3)\n arg_1 += 1\n if not os.path.exists(arg_4):\n break\n shutil.copyfile(arg_0, arg_4)\n\n return arg_4"} +{"_id": "doc_1738", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Launch worker processes to execute the given command line\n\n Parameters:\n -----------------------------------------------\n cmdLine: The command line for each worker\n numWorkers: number of workers to launch\n \"\"\"\n\n arg_0._workers = []\n for arg_4 in range(arg_2):\n arg_5 = tempfile.NamedTemporaryFile(delete=False)\n arg_6 = tempfile.NamedTemporaryFile(delete=False)\n arg_7 = subprocess.Popen(arg_1, bufsize=1, env=os.environ, shell=True,\n stdin=None, arg_5=arg_5, arg_6=arg_6)\n arg_7._stderr_file = arg_6\n arg_7._stdout_file = arg_5\n arg_0._workers.append(arg_7)"} +{"_id": "doc_1739", "title": "", "text": "def Func(arg_0):\n \"\"\"Starts HyperSearch as a worker or runs it inline for the \"dryRun\" action\n\n Parameters:\n ----------------------------------------------------------------------\n retval: the new _HyperSearchJob instance representing the\n HyperSearch job\n \"\"\"\n # This search uses a pre-existing permutations script\n arg_1 = _ClientJobUtils.makeSearchJobParamsDict(options=arg_0._options,\n forRunning=True)\n\n if arg_0._options[\"action\"] == \"dryRun\":\n arg_2 = [sys.argv[0], \"--params=%s\" % (json.dumps(arg_1))]\n\n print\n print \"==================================================================\"\n print \"RUNNING PERMUTATIONS INLINE as \\\"DRY RUN\\\"...\"\n print \"==================================================================\"\n arg_3 = hypersearch_worker.main(arg_2)\n\n else:\n arg_4 = _setUpExports(arg_0._options[\"exports\"])\n # Begin the new search. The {JOBID} string is replaced by the actual\n # jobID returned from jobInsert.\n arg_4 += \"$HYPERSEARCH\"\n arg_5 = arg_0._options[\"maxWorkers\"]\n\n arg_3 = arg_0.__cjDAO.jobInsert(\n client=\"GRP\",\n arg_4=arg_4,\n arg_1=json.dumps(arg_1),\n minimumWorkers=1,\n maximumWorkers=arg_5,\n jobType=arg_0.__cjDAO.JOB_TYPE_HS)\n\n arg_4 = \"python -m nupic.swarming.hypersearch_worker\" \\\n \" --jobID=%d\" % (arg_3)\n arg_0._launchWorkers(arg_4, arg_5)\n\n arg_6 = _HyperSearchJob(arg_3)\n\n # Save search ID to file (this is used for report generation)\n arg_0.__saveHyperSearchJobID(\n permWorkDir=arg_0._options[\"permWorkDir\"],\n outputLabel=arg_0._options[\"outputLabel\"],\n hyperSearchJob=arg_6)\n\n if arg_0._options[\"action\"] == \"dryRun\":\n print \"Successfully executed \\\"dry-run\\\" hypersearch, jobID=%d\" % (arg_3)\n else:\n print \"Successfully submitted new HyperSearch job, jobID=%d\" % (arg_3)\n _emit(Verbosity.DEBUG,\n \"Each worker executing the command line: %s\" % (arg_4,))\n\n return arg_6"} +{"_id": "doc_1740", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Instantiates a _HyperSearchJob instance from info saved in file\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n retval: _HyperSearchJob instance; raises exception if not found\n \"\"\"\n arg_3 = arg_0.__loadHyperSearchJobID(arg_1=arg_1,\n arg_2=arg_2)\n\n arg_4 = _HyperSearchJob(nupicJobID=arg_3)\n return arg_4"} +{"_id": "doc_1741", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Loads a saved jobID from file\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n retval: HyperSearch jobID; raises exception if not found.\n \"\"\"\n arg_3 = arg_0.__getHyperSearchJobIDFilePath(arg_1=arg_1,\n arg_2=arg_2)\n\n arg_4 = None\n with open(arg_3, \"r\") as jobIdPickleFile:\n arg_5 = pickle.load(jobIdPickleFile)\n arg_4 = arg_5[\"hyperSearchJobID\"]\n\n return arg_4"} +{"_id": "doc_1742", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Emit model info to csv file\n\n Parameters:\n ----------------------------------------------------------------------\n modelInfo: _NupicModelInfo instance\n retval: nothing\n \"\"\"\n # Open/init csv file, if needed\n if arg_0.__csvFileObj is None:\n # sets up self.__sortedVariableNames and self.__csvFileObj\n arg_0.__openAndInitCSVFile(arg_1)\n\n arg_2 = arg_0.__csvFileObj\n\n # Emit model info row to report.csv\n print >> arg_2, \"%s, \" % (arg_0.__searchJobID),\n print >> arg_2, \"%s, \" % (arg_1.getModelID()),\n print >> arg_2, \"%s, \" % (arg_1.statusAsString()),\n if arg_1.isFinished():\n print >> arg_2, \"%s, \" % (arg_1.getCompletionReason()),\n else:\n print >> arg_2, \"NA, \",\n if not arg_1.isWaitingToStart():\n print >> arg_2, \"%s, \" % (arg_1.getStartTime()),\n else:\n print >> arg_2, \"NA, \",\n if arg_1.isFinished():\n arg_3 = \"%Y-%m-%d %H:%M:%S\"\n arg_4 = arg_1.getStartTime()\n arg_5 = arg_1.getEndTime()\n print >> arg_2, \"%s, \" % arg_5,\n arg_6 = datetime.strptime(arg_4, arg_3)\n arg_7 = datetime.strptime(arg_5, arg_3)\n print >> arg_2, \"%s, \" % (str((arg_7 - arg_6).seconds)),\n else:\n print >> arg_2, \"NA, \",\n print >> arg_2, \"NA, \",\n print >> arg_2, \"%s, \" % str(arg_1.getModelDescription()),\n print >> arg_2, \"%s, \" % str(arg_1.getNumRecords()),\n arg_8 = arg_1.getParamLabels()\n for arg_9 in arg_0.__sortedVariableNames:\n # Some values are complex structures,.. which need to be represented as\n # strings\n if arg_9 in arg_8:\n print >> arg_2, \"%s, \" % (arg_8[arg_9]),\n else:\n print >> arg_2, \"None, \",\n arg_10 = arg_1.getReportMetrics()\n for arg_9 in arg_0.__sortedMetricsKeys:\n arg_11 = arg_10.get(arg_9, \"NA\")\n arg_11 = str(arg_11)\n arg_11 = arg_11.replace(\"\\n\", \" \")\n print >> arg_2, \"%s, \" % (arg_11),\n\n print >> arg_2"} +{"_id": "doc_1743", "title": "", "text": "def Func(arg_0):\n \"\"\"Queuries DB for model IDs of all currently instantiated models\n associated with this HyperSearch job.\n\n See also: _iterModels()\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A sequence of Nupic modelIDs\n \"\"\"\n arg_1 = arg_0.getJobID()\n arg_2 = _clientJobsDB().modelsGetUpdateCounters(arg_1)\n arg_3 = tuple(x[0] for x in arg_2)\n\n return arg_3"} +{"_id": "doc_1744", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrives the optimization key name and optimization function.\n\n Parameters:\n ---------------------------------------------------------\n searchJobParams:\n Parameter for passing as the searchParams arg to\n Hypersearch constructor.\n retval: (optimizationMetricKey, maximize)\n optimizationMetricKey: which report key to optimize for\n maximize: True if we should try and maximize the optimizeKey\n metric. False if we should minimize it.\n \"\"\"\n if arg_1[\"hsVersion\"] == \"v2\":\n arg_2 = HypersearchV2(searchParams=arg_1)\n else:\n raise RuntimeError(\"Unsupported hypersearch version \\\"%s\\\"\" % \\\n (arg_1[\"hsVersion\"]))\n\n arg_3 = arg_2.Func()\n return arg_3"} +{"_id": "doc_1745", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrives a dictionary of metrics that combines all report and\n optimization metrics\n\n Parameters:\n ----------------------------------------------------------------------\n retval: a dictionary of optimization metrics that were collected\n for the model; an empty dictionary if there aren't any.\n \"\"\"\n arg_1 = arg_0.getReportMetrics()\n arg_1.update(arg_0.getOptimizationMetrics())\n return arg_1"} +{"_id": "doc_1746", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the periodic checks to see if the model should\n continue running.\n\n Parameters:\n -----------------------------------------------------------------------\n terminationFunc: The function that will be called in the model main loop\n as a wrapper around this function. Must have a parameter\n called 'index'\n\n Returns: A list of PeriodicActivityRequest objects.\n \"\"\"\n arg_2 = [None] * len(ModelTerminator._MILESTONES)\n for arg_3, (arg_4, arg_5) in enumerate(ModelTerminator._MILESTONES):\n arg_6 = functools.partial(arg_1, arg_3=arg_3)\n arg_2[arg_3] = PeriodicActivityRequest(repeating =False,\n period = arg_4,\n arg_6=arg_6)"} +{"_id": "doc_1747", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Iterates through stream to calculate total records after aggregation.\n This will alter the bookmark state.\n \"\"\"\n arg_1 = 0\n while True:\n arg_2 = arg_0.getNextRecord()\n if arg_2 is None:\n return arg_1\n arg_1 += 1\n\n if arg_1 > 10000:\n raise RuntimeError('No end of datastream found.')"} +{"_id": "doc_1748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a pattern for a number.\n\n @param number (int) Number of pattern\n\n @return (set) Indices of on bits\n \"\"\"\n if not arg_1 in arg_0._patterns:\n raise IndexError(\"Invalid number\")\n\n return arg_0._patterns[arg_1]"} +{"_id": "doc_1749", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add noise to pattern.\n\n @param bits (set) Indices of on bits\n @param amount (float) Probability of switching an on bit with a random bit\n\n @return (set) Indices of on bits in noisy pattern\n \"\"\"\n arg_3 = set()\n\n for arg_4 in arg_1:\n if arg_0._random.getReal64() < arg_2:\n arg_3.add(arg_0._random.getUInt32(arg_0._n))\n else:\n arg_3.add(arg_4)\n\n return arg_3"} +{"_id": "doc_1750", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the set of pattern numbers that match a bit.\n\n @param bit (int) Index of bit\n\n @return (set) Indices of numbers\n \"\"\"\n if arg_1 >= arg_0._n:\n raise IndexError(\"Invalid bit\")\n\n arg_2 = set()\n\n for arg_3, arg_4 in arg_0._patterns.iteritems():\n if arg_1 in arg_4:\n arg_2.add(arg_3)\n\n return arg_2"} +{"_id": "doc_1751", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a map from number to matching on bits,\n for all numbers that match a set of bits.\n\n @param bits (set) Indices of bits\n\n @return (dict) Mapping from number => on bits.\n \"\"\"\n arg_2 = dict()\n\n for arg_3 in arg_1:\n arg_4 = arg_0.numbersForBit(arg_3)\n\n for arg_5 in arg_4:\n if not arg_5 in arg_2:\n arg_2[arg_5] = set()\n\n arg_2[arg_5].add(arg_3)\n\n return arg_2"} +{"_id": "doc_1752", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n Pretty print a pattern.\n\n @param bits (set) Indices of on bits\n @param verbosity (int) Verbosity level\n\n @return (string) Pretty-printed text\n \"\"\"\n arg_3 = arg_0.numberMapForBits(arg_1)\n arg_4 = \"\"\n\n arg_5 = []\n arg_6 = sorted(arg_3.iteritems(),\n key=lambda (arg_7, arg_1): len(arg_1),\n reverse=True)\n\n for arg_7, arg_1 in arg_6:\n\n if arg_2 > 2:\n arg_8 = [str(n) for n in arg_1]\n arg_9 = \"{0} (bits: {1})\".format(arg_7, \",\".join(arg_8))\n elif arg_2 > 1:\n arg_9 = \"{0} ({1} bits)\".format(arg_7, len(arg_1))\n else:\n arg_9 = str(arg_7)\n\n arg_5.append(arg_9)\n\n arg_4 += \"[{0}]\".format(\", \".join(arg_5))\n\n return arg_4"} +{"_id": "doc_1753", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generates set of random patterns.\n \"\"\"\n arg_1 = np.array(range(arg_0._n), np.uint32)\n for arg_2 in xrange(arg_0._num):\n arg_0._random.shuffle(arg_1)\n arg_3 = arg_1[0:arg_0._getW()]\n arg_0._patterns[arg_2] = set(arg_3)"} +{"_id": "doc_1754", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generates set of consecutive patterns.\n \"\"\"\n arg_1 = arg_0._n\n arg_2 = arg_0._w\n\n assert type(arg_2) is int, \"List for w not supported\"\n\n for arg_3 in xrange(arg_1 / arg_2):\n arg_4 = set(xrange(arg_3 * arg_2, (arg_3+1) * arg_2))\n arg_0._patterns[arg_3] = arg_4"} +{"_id": "doc_1755", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Calculate error signal\n\n :param bucketIdxList: list of encoder buckets\n\n :return: dict containing error. The key is the number of steps\n The value is a numpy array of error at the output layer\n \"\"\"\n arg_3 = dict()\n arg_4 = numpy.zeros(arg_0._maxBucketIdx + 1)\n arg_5 = len(arg_2)\n for arg_6 in arg_2:\n arg_4[arg_6] = 1.0/arg_5\n\n for (arg_7, arg_8) in arg_0._patternNZHistory:\n arg_9 = arg_1 - arg_7\n if arg_9 in arg_0.steps:\n arg_10 = arg_0.inferSingleStep(arg_8,\n arg_0._weightMatrix[arg_9])\n arg_3[arg_9] = arg_4 - arg_10\n\n return arg_3"} +{"_id": "doc_1756", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=1024 * 1024 * 100):\n \"\"\"Sort a potentially big file\n\n filename - the input file (standard File format)\n key - a list of field names to Func by\n outputFile - the name of the output file\n fields - a list of fields that should be included (all fields if None)\n watermark - when available memory goes bellow the watermark create a new chunk\n\n Func() works by reading as records from the file into memory\n and calling _FuncChunk() on each chunk. In the process it gets\n rid of unneeded fields if any. Once all the chunks have been Funced and\n written to chunk files it calls _merge() to merge all the chunks into a\n single Funced file.\n\n Note, that Func() gets a key that contains field names, which it converts\n into field indices for _FuncChunk() becuase _FuncChunk() doesn't need to know\n the field name.\n\n Func() figures out by itself how many chunk files to use by reading records\n from the file until the low watermark value of availabel memory is hit and\n then it Funcs the current records, generates a chunk file, clears the Funced\n records and starts on a new chunk.\n\n The key field names are turned into indices\n \"\"\"\n if arg_3 is not None:\n assert set(arg_1).issubset(set([arg_5[0] for arg_5 in arg_3]))\n\n with FileRecordStream(arg_0) as arg_5:\n\n\n # Find the indices of the requested fields\n if arg_3:\n arg_6 = [ff[0] for ff in arg_3]\n arg_7 = [arg_5.getFieldNames().index(name) for name in arg_6]\n assert len(arg_7) == len(arg_3)\n else:\n arg_8 = arg_5.getFields()\n arg_6 = arg_5.getFieldNames()\n arg_7 = None\n\n # turn key fields to key indices\n arg_1 = [arg_6.index(name) for name in arg_1]\n\n arg_9 = 0\n arg_10 = []\n for arg_11, arg_12 in enumerate(arg_5):\n # Select requested fields only\n if arg_7:\n arg_13 = []\n for arg_11 in arg_7:\n arg_13.append(arg_12[arg_11])\n arg_12 = arg_13\n # Store processed record\n arg_10.append(arg_12)\n\n # Check memory\n arg_14 = psutil.avail_phymem()\n\n # If bellow the watermark create a new chunk, reset and keep going\n if arg_14 < arg_4:\n _FuncChunk(arg_10, arg_1, arg_9, arg_3)\n arg_10 = []\n arg_9 += 1\n\n # Sort and write the remainder\n if len(arg_10) > 0:\n _FuncChunk(arg_10, arg_1, arg_9, arg_3)\n arg_9 += 1\n\n # Marge all the files\n _mergeFiles(arg_1, arg_9, arg_2, arg_3)"} +{"_id": "doc_1757", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Sort in memory chunk of records\n\n records - a list of records read from the original dataset\n key - a list of indices to sort the records by\n chunkIndex - the index of the current chunk\n\n The records contain only the fields requested by the user.\n\n Func() will write the sorted records to a standard File\n named \"chunk_.csv\" (chunk_0.csv, chunk_1.csv,...).\n \"\"\"\n title(additional='(key=%s, chunkIndex=%d)' % (str(arg_1), arg_2))\n\n assert len(arg_0) > 0\n\n # Sort the current records\n arg_0.sort(arg_1=itemgetter(*arg_1))\n\n # Write to a chunk file\n if arg_2 is not None:\n arg_4 = 'chunk_%d.csv' % arg_2\n with FileRecordStream(arg_4, write=True, arg_3=arg_3) as o:\n for arg_5 in arg_0:\n o.appendRecord(arg_5)\n\n assert os.path.getsize(arg_4) > 0\n\n return arg_0"} +{"_id": "doc_1758", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Feeds input record through TM, performing inference and learning.\n Updates member variables with new state.\n\n @param activeColumns (set) Indices of active columns in `t`\n \"\"\"\n arg_3 = numpy.zeros(arg_0.numberOfCols, dtype=dtype)\n arg_3[arg_4(arg_1)] = 1\n super(TemporalMemoryShim, arg_0).Func(arg_3,\n enableLearn=arg_2,\n enableInference=True)\n\n arg_5 = arg_0.getPredictedState()\n arg_0.predictiveCells = set(numpy.flatnonzero(arg_5))"} +{"_id": "doc_1759", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Print a message to the console.\n\n Prints only if level <= self.consolePrinterVerbosity\n Printing with level 0 is equivalent to using a print statement,\n and should normally be avoided.\n\n :param level: (int) indicating the urgency of the message with\n lower values meaning more urgent (messages at level 0 are the most\n urgent and are always printed)\n\n :param message: (string) possibly with format specifiers\n\n :param args: specifies the values for any format specifiers in message\n\n :param kw: newline is the only keyword argument. True (default) if a newline\n should be printed\n \"\"\"\n\n if arg_1 > arg_0.consolePrinterVerbosity:\n return\n\n if len(arg_4) > 1:\n raise KeyError(\"Invalid keywords for Func: %s\" % str(arg_4.keys()))\n\n arg_5 = arg_4.get(\"newline\", True)\n if len(arg_4) == 1 and 'newline' not in arg_4:\n raise KeyError(\"Invalid keyword for Func: %s\" % arg_4.keys()[0])\n\n if len(arg_3) == 0:\n if arg_5:\n print arg_2\n else:\n print arg_2,\n else:\n if arg_5:\n print arg_2 % arg_3\n else:\n print arg_2 % arg_3,"} +{"_id": "doc_1760", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns radius for given speed.\n\n Tries to get the encodings of consecutive readings to be\n adjacent with some overlap.\n\n :param: speed (float) Speed (in meters per second)\n :returns: (int) Radius for given speed\n \"\"\"\n arg_2 = 1.5\n arg_3 = arg_1 * arg_0.timestep / arg_0.scale\n arg_4 = int(round(float(arg_3) / 2 * arg_2))\n arg_5 = int(math.ceil((math.sqrt(arg_0.w) - 1) / 2))\n return max(arg_4, arg_5)"} +{"_id": "doc_1761", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Write serialized object to file.\n\n :param f: output file\n :param packed: If true, will pack contents.\n \"\"\"\n # Get capnproto schema from instance\n arg_3 = arg_0.getSchema()\n\n # Construct new message, otherwise refered to as `proto`\n arg_4 = arg_3.new_message()\n\n # Populate message w/ `write()` instance method\n arg_0.write(arg_4)\n\n # Finally, write to file\n if arg_2:\n arg_4.write_packed(arg_1)\n else:\n arg_4.write(arg_1)"} +{"_id": "doc_1762", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator for functions that require anomaly models.\n \"\"\"\n @wraps(arg_0)\n def _decorator(arg_1, *arg_2, **arg_3):\n if not arg_1.getInferenceType() == InferenceType.TemporalAnomaly:\n raise RuntimeError(\"Method required a TemporalAnomaly model.\")\n if arg_1._getAnomalyClassifier() is None:\n raise RuntimeError(\"Model does not support this command. Model must\"\n \"be an active anomalyDetector model.\")\n return arg_0(arg_1, *arg_2, **arg_3)\n return _decorator"} +{"_id": "doc_1763", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Remove labels from the anomaly classifier within this model. Removes all\n records if ``labelFilter==None``, otherwise only removes the labels equal to\n ``labelFilter``.\n\n :param start: (int) index to start removing labels\n :param end: (int) index to end removing labels\n :param labelFilter: (string) If specified, only removes records that match\n \"\"\"\n arg_0._getAnomalyClassifier().getSelf().removeLabels(arg_1, arg_2, arg_3)"} +{"_id": "doc_1764", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Add labels from the anomaly classifier within this model.\n\n :param start: (int) index to start label\n :param end: (int) index to end label\n :param labelName: (string) name of label\n \"\"\"\n arg_0._getAnomalyClassifier().getSelf().addLabel(arg_1, arg_2, arg_3)"} +{"_id": "doc_1765", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Compute Anomaly score, if required\n \"\"\"\n arg_1 = arg_0.getInferenceType()\n\n arg_2 = {}\n arg_3 = arg_0._getSPRegion()\n arg_4 = None\n if arg_1 == InferenceType.NontemporalAnomaly:\n arg_4 = arg_3.getOutputData(\"anomalyScore\")[0] #TODO move from SP to Anomaly ?\n\n elif arg_1 == InferenceType.TemporalAnomaly:\n arg_5 = arg_0._getTPRegion()\n\n if arg_3 is not None:\n arg_6 = arg_3.getOutputData(\"bottomUpOut\").nonzero()[0]\n else:\n arg_7 = arg_0._getSensorRegion()\n arg_6 = arg_7.getOutputData('dataOut').nonzero()[0]\n\n if not arg_0._predictedFieldName in arg_0._input:\n raise ValueError(\n \"Expected predicted field '%s' in input row, but was not found!\"\n % arg_0._predictedFieldName\n )\n # Calculate the anomaly score using the active columns\n # and previous predicted columns.\n arg_4 = arg_5.getOutputData(\"anomalyScore\")[0]\n\n # Calculate the classifier's output and use the result as the anomaly\n # label. Stores as string of results.\n\n # TODO: make labels work with non-SP models\n if arg_3 is not None:\n arg_0._getAnomalyClassifier().setParameter(\n \"activeColumnCount\", len(arg_6))\n arg_0._getAnomalyClassifier().prepareInputs()\n arg_0._getAnomalyClassifier().compute()\n\n arg_8 = arg_0._getAnomalyClassifier().getSelf().getLabelResults()\n arg_2[arg_9.anomalyLabel] = \"%s\" % arg_8\n\n arg_2[arg_9.anomalyScore] = arg_4\n return arg_2"} +{"_id": "doc_1766", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns reference to the network's Classifier region\n \"\"\"\n if (arg_0._netInfo.net is not None and\n \"Classifier\" in arg_0._netInfo.net.regions):\n return arg_0._netInfo.net.regions[\"Classifier\"]\n else:\n return None"} +{"_id": "doc_1767", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Attaches an 'AnomalyClassifier' region to the network. Will remove current\n 'AnomalyClassifier' region if it exists.\n\n Parameters\n -----------\n network - network to add the AnomalyClassifier region\n params - parameters to pass to the region\n spEnable - True if network has an SP region\n tmEnable - True if network has a TM region; Currently requires True\n \"\"\"\n\n arg_5 = copy.deepcopy(arg_2)\n arg_6 = dict(k=1,\n distanceMethod='rawOverlap',\n distanceNorm=1,\n doBinarization=1,\n replaceDuplicates=0,\n maxStoredPatterns=1000)\n arg_5.update(arg_6)\n\n # Set defaults if not set\n if arg_5['trainRecords'] is None:\n arg_5['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS\n\n if arg_5['cacheSize'] is None:\n arg_5['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE\n\n # Remove current instance if already created (used for deserializing)\n if arg_0._netInfo is not None and arg_0._netInfo.net is not None \\\n and arg_0._getAnomalyClassifier() is not None:\n arg_0._netInfo.net.removeRegion('AnomalyClassifier')\n\n arg_1.addRegion(\"AnomalyClassifier\",\n \"py.KNNAnomalyClassifierRegion\",\n json.dumps(arg_5))\n\n # Attach link to SP\n if arg_3:\n arg_1.link(\"SP\", \"AnomalyClassifier\", \"UniformLink\", \"\",\n srcOutput=\"bottomUpOut\", destInput=\"spBottomUpOut\")\n else:\n arg_1.link(\"sensor\", \"AnomalyClassifier\", \"UniformLink\", \"\",\n srcOutput=\"dataOut\", destInput=\"spBottomUpOut\")\n\n # Attach link to TM\n if arg_4:\n arg_1.link(\"TM\", \"AnomalyClassifier\", \"UniformLink\", \"\",\n srcOutput=\"topDownOut\", destInput=\"tpTopDownOut\")\n arg_1.link(\"TM\", \"AnomalyClassifier\", \"UniformLink\", \"\",\n srcOutput=\"lrnActiveStateT\", destInput=\"tpLrnActiveStateT\")\n else:\n raise RuntimeError(\"TemporalAnomaly models require a TM region.\")"} +{"_id": "doc_1768", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Tell the writer which metrics should be written\n\n Parameters:\n -----------------------------------------------------------------------\n metricsNames: A list of metric lables to be written\n \"\"\"\n if arg_1 is None:\n arg_0.__metricNames = set([])\n else:\n arg_0.__metricNames = set(arg_1)"} +{"_id": "doc_1769", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get field metadate information for inferences that are of dict type\"\"\"\n arg_3 = []\n arg_4 = InferenceElement.getLabel(arg_1)\n\n if InferenceElement.getInputElement(arg_1):\n arg_3.append(FieldMetaInfo(name=arg_4+\".actual\",\n type=FieldMetaType.string,\n special = ''))\n\n arg_5 = sorted(arg_2.keys())\n for arg_6 in arg_5:\n arg_3.append(FieldMetaInfo(name=arg_4+\".\"+str(arg_6),\n type=FieldMetaType.string,\n special=''))\n\n\n return arg_3"} +{"_id": "doc_1770", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Creates the inference output directory for the given experiment\n\n experimentDir: experiment directory path that contains description.py\n\n Returns: path of the inference output directory\n \"\"\"\n arg_2 = arg_0.getExperimentInferenceDirPath(arg_1)\n\n arg_0.makeDirectory(arg_2)\n\n return arg_2"} +{"_id": "doc_1771", "title": "", "text": "def Func(arg_0):\n \"\"\"A decorator that maintains the attribute lock state of an object\n\n It coperates with the LockAttributesMetaclass (see bellow) that replaces\n the __setattr__ method with a custom one that checks the _canAddAttributes\n counter and allows setting new attributes only if _canAddAttributes > 0.\n\n New attributes can be set only from methods decorated\n with this decorator (should be only __init__ and __setstate__ normally)\n\n The decorator is reentrant (e.g. if from inside a decorated function another\n decorated function is invoked). Before invoking the target function it\n increments the counter (or sets it to 1). After invoking the target function\n it decrements the counter and if it's 0 it removed the counter.\n \"\"\"\n def arg_7(arg_1, *arg_2, **arg_3):\n \"\"\"The decorated function that replaces __init__() or __setstate__()\n\n \"\"\"\n # Run the original function\n if not hasattr(arg_1, '_canAddAttributes'):\n arg_1.__dict__['_canAddAttributes'] = 1\n else:\n arg_1._canAddAttributes += 1\n assert arg_1._canAddAttributes >= 1\n\n # Save add attribute counter\n arg_5 = arg_1._canAddAttributes\n arg_0(arg_1, *arg_2, **arg_3)\n\n # Restore _CanAddAttributes if deleted from dict (can happen in __setstte__)\n if hasattr(arg_1, '_canAddAttributes'):\n arg_1._canAddAttributes -= 1\n else:\n arg_1._canAddAttributes = arg_5 - 1\n\n assert arg_1._canAddAttributes >= 0\n if arg_1._canAddAttributes == 0:\n del arg_1._canAddAttributes\n\n arg_7.__doc__ = arg_0.__doc__\n arg_7.__name__ = arg_0.__name__\n return arg_7"} +{"_id": "doc_1772", "title": "", "text": "def Func(arg_0, arg_1 = 10):\n \"\"\" Creates a neighboring record for each record in the inputs and adds\n new records at the end of the inputs list\n \"\"\"\n arg_2 = len(arg_0)\n arg_3 = False\n for arg_4 in xrange(arg_2):\n arg_5 = arg_0[arg_4]\n arg_6 = 0\n arg_7 = copy.deepcopy(arg_5)\n for arg_8 in xrange(len(arg_5)-1):\n if arg_3:\n arg_3 = False\n continue\n if arg_5[arg_8] == 1 and arg_5[arg_8+1] == 0:\n arg_7[arg_8] = 0\n arg_7[arg_8+1] = 1\n arg_0.append(arg_7)\n arg_7 = copy.deepcopy(arg_7)\n #print input\n #print newInput\n arg_6 += 1\n arg_3 = True\n if arg_6 == arg_1:\n break"} +{"_id": "doc_1773", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Modifies up to maxChanges number of bits in the inputVal\n \"\"\"\n arg_2 = np.random.random_integers(0, arg_1, 1)[0]\n\n if arg_2 == 0:\n return arg_0\n\n arg_3 = len(arg_0)\n\n arg_4 = np.random.random_integers(0, 41, arg_2)\n\n arg_5 = -1\n arg_6 = 0\n for arg_7 in xrange(arg_3):\n if arg_6 >= arg_2:\n break\n if arg_0[arg_7] == 1:\n arg_5 += 1\n if arg_5 in arg_4:\n if arg_7 != 0 and arg_0[arg_7-1] == 0:\n arg_0[arg_7-1] = 1\n arg_0[arg_7] = 0\n\n return arg_0"} +{"_id": "doc_1774", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a random selection from the inputSpace with randomly modified\n up to maxChanges number of bits.\n \"\"\"\n arg_2 = len(arg_0)\n arg_3 = np.random.random_integers(0, arg_2-1, 1)[0]\n\n arg_4 = copy.deepcopy(arg_0[arg_3])\n\n if arg_1 == 0:\n return arg_4\n\n return modifyBits(arg_4, arg_1)"} +{"_id": "doc_1775", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates and returns a new Network with a sensor region reading data from\n 'dataSource'. There are two hierarchical levels, each with one SP and one TM.\n @param dataSource - A RecordStream containing the input data\n @returns a Network ready to run\n \"\"\"\n arg_1 = Network()\n\n # Create and add a record sensor and a SP region\n arg_2 = createRecordSensor(arg_1, name=_RECORD_SENSOR,\n arg_0=arg_0)\n createSpatialPooler(arg_1, name=_L1_SPATIAL_POOLER,\n inputWidth=arg_2.encoder.getWidth())\n\n # Link the SP region to the sensor input\n arg_3 = \"UniformLink\"\n arg_4 = \"\"\n arg_1.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, arg_3, arg_4)\n\n # Create and add a TM region\n arg_5 = createTemporalMemory(arg_1, _L1_TEMPORAL_MEMORY)\n\n # Link SP region to TM region in the feedforward direction\n arg_1.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, arg_3, arg_4)\n\n # Add a classifier\n arg_6 = { # Learning rate. Higher values make it adapt faster.\n 'alpha': 0.005,\n\n # A comma separated list of the number of steps the\n # classifier predicts in the future. The classifier will\n # learn predictions of each order specified.\n 'steps': '1',\n\n # The specific implementation of the classifier to use\n # See SDRClassifierFactory#create for options\n 'implementation': 'py',\n\n # Diagnostic output verbosity control;\n # 0: silent; [1..6]: increasing levels of verbosity\n 'verbosity': 0}\n\n arg_7 = arg_1.addRegion(_L1_CLASSIFIER, \"py.SDRClassifierRegion\",\n json.dumps(arg_6))\n arg_7.setParameter('inferenceMode', True)\n arg_7.setParameter('learningMode', True)\n arg_1.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"bottomUpOut\", destInput=\"bottomUpIn\")\n arg_1.link(_RECORD_SENSOR, _L1_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"categoryOut\", destInput=\"categoryIn\")\n arg_1.link(_RECORD_SENSOR, _L1_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"bucketIdxOut\", destInput=\"bucketIdxIn\")\n arg_1.link(_RECORD_SENSOR, _L1_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"actValueOut\", destInput=\"actValueIn\")\n\n # Second Level\n arg_8 = arg_5.getSelf().getOutputElementCount(\"bottomUpOut\")\n createSpatialPooler(arg_1, name=_L2_SPATIAL_POOLER, inputWidth=arg_8)\n arg_1.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, arg_3, arg_4)\n\n createTemporalMemory(arg_1, _L2_TEMPORAL_MEMORY)\n arg_1.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, arg_3, arg_4)\n\n arg_9 = arg_1.addRegion(_L2_CLASSIFIER, \"py.SDRClassifierRegion\",\n json.dumps(arg_6))\n arg_9.setParameter('inferenceMode', True)\n arg_9.setParameter('learningMode', True)\n arg_1.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"bottomUpOut\", destInput=\"bottomUpIn\")\n arg_1.link(_RECORD_SENSOR, _L2_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"categoryOut\", destInput=\"categoryIn\")\n arg_1.link(_RECORD_SENSOR, _L2_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"bucketIdxOut\", destInput=\"bucketIdxIn\")\n arg_1.link(_RECORD_SENSOR, _L2_CLASSIFIER, arg_3, arg_4,\n srcOutput=\"actValueOut\", destInput=\"actValueIn\")\n return arg_1"} +{"_id": "doc_1776", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Runs specified Network writing the ensuing anomaly\n scores to writer.\n\n @param network: The Network instance to be run\n @param writer: A csv.writer used to write to output file.\n \"\"\"\n arg_3 = arg_0.regions[_RECORD_SENSOR]\n arg_4 = arg_0.regions[_L1_SPATIAL_POOLER]\n arg_5 = arg_0.regions[_L1_TEMPORAL_MEMORY]\n arg_6 = arg_0.regions[_L1_CLASSIFIER]\n\n arg_7 = arg_0.regions[_L2_SPATIAL_POOLER]\n arg_8 = arg_0.regions[_L2_TEMPORAL_MEMORY]\n arg_9 = arg_0.regions[_L2_CLASSIFIER]\n\n arg_10 = []\n arg_11 = []\n\n arg_12 = None\n arg_13 = None\n arg_14 = 0.0\n arg_15 = 0.0\n for arg_16 in xrange(arg_1):\n # Run the network for a single iteration\n arg_0.run(1)\n\n arg_17 = float(arg_3.getOutputData(\"actValueOut\")[0])\n\n arg_18 = arg_6.getOutputData(\"actualValues\")\n arg_19 = arg_6.getOutputData(\"probabilities\")\n arg_20 = arg_18[arg_19.argmax()]\n if arg_12 is not None:\n arg_14 += math.fabs(arg_12 - arg_17)\n arg_12 = arg_20\n\n arg_21 = arg_9.getOutputData(\"actualValues\")\n arg_22 = arg_9.getOutputData(\"probabilities\")\n arg_23 = arg_21[arg_22.argmax()]\n if arg_13 is not None:\n arg_15 += math.fabs(arg_13 - arg_17)\n arg_13 = arg_23\n\n arg_24 = arg_5.getOutputData(\"anomalyScore\")[0]\n arg_25 = arg_8.getOutputData(\"anomalyScore\")[0]\n\n # Write record number, actualInput, and anomaly scores\n arg_2.writerow((arg_16, arg_17, arg_12, arg_24, arg_13, arg_25))\n\n # Store the predicted columns for the next timestep\n arg_26 = arg_5.getOutputData(\"topDownOut\").nonzero()[0]\n arg_10 = copy.deepcopy(arg_26)\n #\n arg_27 = arg_8.getOutputData(\"topDownOut\").nonzero()[0]\n arg_11 = copy.deepcopy(arg_27)\n\n # Output absolute average error for each level\n if arg_1 > 1:\n print \"L1 ave abs class. error: %f\" % (arg_14 / (arg_1 - 1))\n print \"L2 ave abs class. error: %f\" % (arg_15 / (arg_1 - 1))"} +{"_id": "doc_1777", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes trailing whitespace on each line.\"\"\"\n arg_1 = [l.rstrip() for l in arg_0.split('\\n')]\n return '\\n'.join(arg_1)"} +{"_id": "doc_1778", "title": "", "text": "def Func(arg_0):\n \"\"\" \n Gets the current metric values\n\n :returns: (dict) where each key is the metric-name, and the values are\n it scalar value. Same as the output of \n :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`\n \"\"\"\n\n arg_1 = {}\n\n for arg_2, arg_3 in zip(arg_0.__metrics, arg_0.__metricLabels):\n arg_4 = arg_2.getMetric()\n arg_1[arg_3] = arg_4['value']\n\n return arg_1"} +{"_id": "doc_1779", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" \n Gets detailed info about a given metric, in addition to its value. This\n may including any statistics or auxilary data that are computed for a given\n metric.\n\n :param metricLabel: (string) label of the given metric (see \n :class:`~nupic.frameworks.opf.metrics.MetricSpec`)\n\n :returns: (dict) of metric information, as returned by \n :meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.\n \"\"\"\n try:\n arg_2 = arg_0.__metricLabels.index(arg_1)\n except IndexError:\n return None\n\n return arg_0.__metrics[arg_2].getMetric()"} +{"_id": "doc_1780", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Stores the current model results in the manager's internal store\n\n Parameters:\n -----------------------------------------------------------------------\n results: A ModelResults object that contains the current timestep's\n input/inferences\n \"\"\"\n # -----------------------------------------------------------------------\n # If the model potentially has temporal inferences.\n if arg_0.__isTemporal:\n arg_2 = arg_0.__inferenceShifter.shift(arg_1).inferences\n arg_0.__currentResult = copy.deepcopy(arg_1)\n arg_0.__currentResult.inferences = arg_2\n arg_0.__currentInference = arg_2\n\n # -----------------------------------------------------------------------\n # The current model has no temporal inferences.\n else:\n arg_0.__currentResult = copy.deepcopy(arg_1)\n arg_0.__currentInference = copy.deepcopy(arg_1.inferences)\n\n # -----------------------------------------------------------------------\n # Save the current ground-truth results\n arg_0.__currentGroundTruth = copy.deepcopy(arg_1)"} +{"_id": "doc_1781", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the actual value for this field\n\n Parameters:\n -----------------------------------------------------------------------\n sensorInputElement: The inference element (part of the inference) that\n is being used for this metric\n \"\"\"\n arg_2 = InferenceElement.getInputElement(arg_1)\n if arg_2 is None:\n return None\n return getattr(arg_0.__currentGroundTruth.sensorInput, arg_2)"} +{"_id": "doc_1782", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Abbreviate the given text to threshold chars and append an ellipsis if its\n length exceeds threshold; used for logging;\n\n NOTE: the resulting text could be longer than threshold due to the ellipsis\n \"\"\"\n if arg_0 is not None and len(arg_0) > arg_1:\n arg_0 = arg_0[:arg_1] + \"...\"\n\n return arg_0"} +{"_id": "doc_1783", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Generates the ClientJobs database name for the given version of the\n database\n\n Parameters:\n ----------------------------------------------------------------\n dbVersion: ClientJobs database version number\n\n retval: the ClientJobs database name for the given DB version\n \"\"\"\n\n # DB Name prefix for the given version\n arg_2 = arg_0.__getDBNamePrefixForVersion(arg_1)\n\n # DB Name suffix\n arg_3 = Configuration.get('nupic.cluster.database.nameSuffix')\n\n # Replace dash and dot with underscore (e.g. 'ec2-user' or ec2.user will break SQL)\n arg_3 = arg_3.replace(\"-\", \"_\")\n arg_3 = arg_3.replace(\".\", \"_\")\n\n # Create the name of the database for the given DB version\n arg_4 = '%s_%s' % (arg_2, arg_3)\n\n return arg_4"} +{"_id": "doc_1784", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False):\n \"\"\" Locate the current version of the jobs DB or create a new one, and\n optionally delete old versions laying around. If desired, this method\n can be called at any time to re-create the tables from scratch, delete\n old versions of the database, etc.\n\n Parameters:\n ----------------------------------------------------------------\n deleteOldVersions: if true, delete any old versions of the DB left\n on the server\n recreate: if true, recreate the database from scratch even\n if it already exists.\n \"\"\"\n\n # Initialize tables, if needed\n with ConnectionFactory.get() as conn:\n # Initialize tables\n arg_0._initTables(cursor=conn.cursor, arg_1=arg_1,\n arg_2=arg_2)\n\n # Save our Funcion id\n conn.cursor.execute('SELECT CONNECTION_ID()')\n arg_0._FuncionID = conn.cursor.fetchall()[0][0]\n arg_0._logger.info(\"clientJobsConnectionID=%r\", arg_0._FuncionID)\n\n return"} +{"_id": "doc_1785", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5=None):\n \"\"\" Return a sequence of matching rows with the requested field values from\n a table or empty sequence if nothing matched.\n\n tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance\n conn: Owned connection acquired from ConnectionFactory.get()\n fieldsToMatch: Dictionary of internal fieldName/value mappings that\n identify the desired rows. If a value is an instance of\n ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the\n operator 'IN' will be used in the corresponding SQL\n predicate; if the value is bool: \"IS TRUE/FALSE\"; if the\n value is None: \"IS NULL\"; '=' will be used for all other\n cases.\n selectFieldNames:\n list of fields to return, using internal field names\n maxRows: maximum number of rows to return; unlimited if maxRows\n is None\n\n retval: A sequence of matching rows, each row consisting of field\n values in the order of the requested field names. Empty\n sequence is returned when not match exists.\n \"\"\"\n\n assert arg_3, repr(arg_3)\n assert all(arg_6 in arg_1.dbFieldNames\n for arg_6 in arg_3.iterkeys()), repr(arg_3)\n\n assert arg_4, repr(arg_4)\n assert all(arg_7 in arg_1.dbFieldNames for arg_7 in arg_4), repr(\n arg_4)\n\n # NOTE: make sure match expressions and values are in the same order\n arg_8 = arg_3.items()\n arg_9 = (\n p[0] +\n (' IS ' + {True:'TRUE', False:'FALSE'}[p[1]] if isinstance(p[1], bool)\n else ' IS NULL' if p[1] is None\n else ' IN %s' if isinstance(p[1], arg_0._SEQUENCE_TYPES)\n else '=%s')\n for p in arg_8)\n arg_10 = [p[1] for p in arg_8\n if (not isinstance(p[1], (bool)) and p[1] is not None)]\n\n arg_11 = 'SELECT %s FROM %s WHERE (%s)' % (\n ','.join(arg_4), arg_1.tableName,\n ' AND '.join(arg_9))\n arg_12 = arg_10\n if arg_5 is not None:\n arg_11 += ' LIMIT %s'\n arg_12.append(arg_5)\n\n arg_2.cursor.execute(arg_11, arg_12)\n arg_13 = arg_2.cursor.fetchall()\n\n if arg_13:\n assert arg_5 is None or len(arg_13) <= arg_5, \"%d !<= %d\" % (\n len(arg_13), arg_5)\n assert len(arg_13[0]) == len(arg_4), \"%d != %d\" % (\n len(arg_13[0]), len(arg_4))\n else:\n arg_13 = tuple()\n\n return arg_13"} +{"_id": "doc_1786", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4):\n \"\"\" Return a single matching row with the requested field values from the\n the requested table or None if nothing matched.\n\n tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance\n conn: Owned connection acquired from ConnectionFactory.get()\n fieldsToMatch: Dictionary of internal fieldName/value mappings that\n identify the desired rows. If a value is an instance of\n ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the\n operator 'IN' will be used in the corresponding SQL\n predicate; if the value is bool: \"IS TRUE/FALSE\"; if the\n value is None: \"IS NULL\"; '=' will be used for all other\n cases.\n selectFieldNames:\n list of fields to return, using internal field names\n\n retval: A sequence of field values of the matching row in the order\n of the given field names; or None if there was no match.\n \"\"\"\n arg_5 = arg_0._getMatchingRowsNoRetries(arg_1, arg_2, arg_3,\n arg_4, maxRows=1)\n if arg_5:\n assert len(arg_5) == 1, repr(len(arg_5))\n arg_6 = arg_5[0]\n else:\n arg_6 = None\n\n return arg_6"} +{"_id": "doc_1787", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Place the given job in STATUS_RUNNING mode; the job is expected to be\n STATUS_NOTSTARTED.\n\n NOTE: this function was factored out of jobStartNext because it's also\n needed for testing (e.g., test_client_jobs_dao.py)\n \"\"\"\n with ConnectionFactory.get() as conn:\n arg_2 = 'UPDATE %s SET status=%%s, ' \\\n ' _eng_cjm_conn_id=%%s, ' \\\n ' start_time=UTC_TIMESTAMP(), ' \\\n ' _eng_last_update_time=UTC_TIMESTAMP() ' \\\n ' WHERE (job_id=%%s AND status=%%s)' \\\n % (arg_0.jobsTableName,)\n arg_3 = [arg_0.STATUS_RUNNING, arg_0._connectionID,\n arg_1, arg_0.STATUS_NOTSTARTED]\n arg_4 = conn.cursor.execute(arg_2, arg_3)\n if arg_4 != 1:\n arg_0._logger.warn('jobStartNext: numRowsUpdated=%r instead of 1; '\n 'likely side-effect of transient connection '\n 'failure', arg_4)\n return"} +{"_id": "doc_1788", "title": "", "text": "def Func(arg_0):\n \"\"\" Set cancel field of all currently-running jobs to true.\n \"\"\"\n\n # Get a database connection and cursor\n with ConnectionFactory.get() as conn:\n\n arg_1 = 'UPDATE %s SET cancel=TRUE WHERE status<>%%s ' \\\n % (arg_0.jobsTableName,)\n conn.cursor.execute(arg_1, [arg_0.STATUS_COMPLETED])\n\n return"} +{"_id": "doc_1789", "title": "", "text": "def Func(arg_0,):\n \"\"\" Look through the jobs table and count the running jobs whose\n cancel field is true.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A count of running jobs with the cancel field set to true.\n \"\"\"\n with ConnectionFactory.get() as conn:\n arg_1 = 'SELECT COUNT(job_id) '\\\n 'FROM %s ' \\\n 'WHERE (status<>%%s AND cancel is TRUE)' \\\n % (arg_0.jobsTableName,)\n\n conn.cursor.execute(arg_1, [arg_0.STATUS_COMPLETED])\n arg_2 = conn.cursor.fetchall()\n\n return arg_2[0][0]"} +{"_id": "doc_1790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Generator to allow iterating slices at dynamic intervals\n\n Parameters:\n ----------------------------------------------------------------\n data: Any data structure that supports slicing (i.e. list or tuple)\n *intervals: Iterable of intervals. The sum of intervals should be less\n than, or equal to the length of data.\n\n \"\"\"\n assert sum(arg_1) <= len(arg_0)\n\n arg_2 = 0\n for arg_3 in arg_1:\n arg_4 = arg_2 + arg_3\n yield arg_0[arg_2:arg_4]\n arg_2 = arg_4\n\n raise StopIteration"} +{"_id": "doc_1791", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get all info about a job, with model details, if available.\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to query\n retval: A sequence of two-tuples if the jobID exists in the jobs\n table (exeption is raised if it doesn't exist). Each two-tuple\n contains an instance of jobInfoNamedTuple as the first element and\n an instance of modelInfoNamedTuple as the second element. NOTE: In\n the case where there are no matching model rows, a sequence of one\n two-tuple will still be returned, but the modelInfoNamedTuple\n fields will be None, and the jobInfoNamedTuple fields will be\n populated.\n \"\"\"\n\n # Get a database connection and cursor\n arg_2 = None\n\n with ConnectionFactory.get() as conn:\n # NOTE: Since we're using a LEFT JOIN on the models table, there need not\n # be a matching row in the models table, but the matching row from the\n # jobs table will still be returned (along with all fields from the models\n # table with values of None in case there were no matchings models)\n arg_3 = ' '.join([\n 'SELECT %s.*, %s.*' % (arg_0.jobsTableName, arg_0.modelsTableName),\n 'FROM %s' % arg_0.jobsTableName,\n 'LEFT JOIN %s USING(job_id)' % arg_0.modelsTableName,\n 'WHERE job_id=%s'])\n\n conn.cursor.execute(arg_3, (arg_1,))\n\n if conn.cursor.rowcount > 0:\n arg_2 = [\n ClientJobsDAO._combineResults(\n result, arg_0._jobs.jobInfoNamedTuple,\n arg_0._models.modelInfoNamedTuple\n ) for result in conn.cursor.fetchall()]\n\n if arg_2 is not None:\n return arg_2\n\n raise RuntimeError(\"jobID=%s not found within the jobs table\" % (arg_1))"} +{"_id": "doc_1792", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get all info about a job\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to query\n retval: namedtuple containing the job info.\n\n \"\"\"\n arg_2 = arg_0._getOneMatchingRowWithRetries(\n arg_0._jobs, dict(job_id=arg_1),\n [arg_0._jobs.pubToDBNameDict[n]\n for n in arg_0._jobs.FuncNamedTuple._fields])\n\n if arg_2 is None:\n raise RuntimeError(\"jobID=%s not found within the jobs table\" % (arg_1))\n\n # Create a namedtuple with the names to values\n return arg_0._jobs.FuncNamedTuple._make(arg_2)"} +{"_id": "doc_1793", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True,):\n \"\"\" Change the status on the given job\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to change status\n status: new status string (ClientJobsDAO.STATUS_xxxxx)\n\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the job. Set\n to False for hypersearch workers\n \"\"\"\n # Get a database connection and cursor\n with ConnectionFactory.get() as conn:\n arg_4 = 'UPDATE %s SET status=%%s, ' \\\n ' _eng_last_update_time=UTC_TIMESTAMP() ' \\\n ' WHERE job_id=%%s' \\\n % (arg_0.jobsTableName,)\n arg_5 = [arg_2, arg_1]\n\n if arg_3:\n arg_4 += ' AND _eng_cjm_conn_id=%s'\n arg_5.append(arg_0._connectionID)\n\n arg_6 = conn.cursor.execute(arg_4, arg_5)\n\n if arg_6 != 1:\n raise RuntimeError(\"Tried to change the status of job %d to %s, but \"\n \"this job belongs to some other CJM\" % (\n arg_1, arg_2))"} +{"_id": "doc_1794", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4 = True):\n \"\"\" Change the status on the given job to completed\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to mark as completed\n completionReason: completionReason string\n completionMsg: completionMsg string\n\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the job. Set\n to False for hypersearch workers\n \"\"\"\n\n # Get a database connection and cursor\n with ConnectionFactory.get() as conn:\n arg_5 = 'UPDATE %s SET status=%%s, ' \\\n ' completion_reason=%%s, ' \\\n ' completion_msg=%%s, ' \\\n ' end_time=UTC_TIMESTAMP(), ' \\\n ' _eng_last_update_time=UTC_TIMESTAMP() ' \\\n ' WHERE job_id=%%s' \\\n % (arg_0.jobsTableName,)\n arg_6 = [arg_0.STATUS_COMPLETED, arg_2, arg_3,\n arg_1]\n\n if arg_4:\n arg_5 += ' AND _eng_cjm_conn_id=%s'\n arg_6.append(arg_0._connectionID)\n\n arg_7 = conn.cursor.execute(arg_5, arg_6)\n\n if arg_7 != 1:\n raise RuntimeError(\"Tried to change the status of jobID=%s to \"\n \"completed, but this job could not be found or \"\n \"belongs to some other CJM\" % (arg_1))"} +{"_id": "doc_1795", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Cancel the given job. This will update the cancel field in the\n jobs table and will result in the job being cancelled.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job to mark as completed\n\n to False for hypersearch workers\n \"\"\"\n arg_0._logger.info('Canceling jobID=%s', arg_1)\n # NOTE: jobSetFields does retries on transient mysql failures\n arg_0.jobSetFields(arg_1, {\"cancel\" : True}, useConnectionID=False)"} +{"_id": "doc_1796", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch all the modelIDs that correspond to a given jobID; empty sequence\n if none\"\"\"\n\n arg_2 = arg_0._getMatchingRowsWithRetries(arg_0._models, dict(job_id=arg_1),\n ['model_id'])\n return [arg_3[0] for arg_3 in arg_2]"} +{"_id": "doc_1797", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return the number of jobs for the given clientKey and a status that is\n not completed.\n \"\"\"\n with ConnectionFactory.get() as conn:\n arg_2 = 'SELECT count(job_id) ' \\\n 'FROM %s ' \\\n 'WHERE client_key = %%s ' \\\n ' AND status != %%s' % arg_0.jobsTableName\n conn.cursor.execute(arg_2, [arg_1, arg_0.STATUS_COMPLETED])\n arg_3 = conn.cursor.fetchone()[0]\n\n return arg_3"} +{"_id": "doc_1798", "title": "", "text": "def Func(arg_0):\n \"\"\" Delete all models from the models table\n\n Parameters:\n ----------------------------------------------------------------\n \"\"\"\n arg_0._logger.info('Deleting all rows from models table %r',\n arg_0.modelsTableName)\n with ConnectionFactory.get() as conn:\n arg_1 = 'DELETE FROM %s' % (arg_0.modelsTableName)\n conn.cursor.execute(arg_1)"} +{"_id": "doc_1799", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get ALL info for a set of models\n\n WARNING!!!: The order of the results are NOT necessarily in the same order as\n the order of the model IDs passed in!!!\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of nametuples containing all the fields stored for each\n model.\n \"\"\"\n assert isinstance(arg_1, arg_0._SEQUENCE_TYPES), (\n \"wrong modelIDs type: %s\") % (type(arg_1),)\n assert arg_1, \"modelIDs is empty\"\n\n arg_2 = arg_0._getMatchingRowsWithRetries(\n arg_0._models, dict(model_id=arg_1),\n [arg_0._models.pubToDBNameDict[f]\n for f in arg_0._models.modelInfoNamedTuple._fields])\n\n arg_3 = [arg_0._models.modelInfoNamedTuple._make(arg_4) for arg_4 in arg_2]\n\n # NOTE: assetion will also fail if modelIDs contains duplicates\n assert len(arg_3) == len(arg_1), \"modelIDs not found: %s\" % (\n set(arg_1) - set(arg_4.modelId for arg_4 in arg_3))\n\n return arg_3"} +{"_id": "doc_1800", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\" Gets the specified fields for all the models for a single job. This is\n similar to modelsGetFields\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID for the models to be searched\n fields: A list of fields to return\n ignoreKilled: (True/False). If True, this will ignore models that\n have been killed\n\n Returns: a (possibly empty) list of tuples as follows\n [\n (model_id1, [field1, ..., fieldn]),\n (model_id2, [field1, ..., fieldn]),\n (model_id3, [field1, ..., fieldn])\n ...\n ]\n\n NOTE: since there is a window of time between a job getting inserted into\n jobs table and the job's worker(s) starting up and creating models, an\n empty-list result is one of the normal outcomes.\n \"\"\"\n\n assert len(arg_2) >= 1, 'fields is empty'\n\n # Form the sequence of field name strings that will go into the\n # request\n arg_4 = [arg_0._models.pubToDBNameDict[x] for x in arg_2]\n arg_5 = ','.join(arg_4)\n\n arg_6 = 'SELECT model_id, %s FROM %s ' \\\n ' WHERE job_id=%%s ' \\\n % (arg_5, arg_0.modelsTableName)\n arg_7 = [arg_1]\n\n if arg_3:\n arg_6 += ' AND (completion_reason IS NULL OR completion_reason != %s)'\n arg_7.append(arg_0.CMPL_REASON_KILLED)\n\n # Get a database connection and cursor\n with ConnectionFactory.get() as conn:\n conn.cursor.execute(arg_6, arg_7)\n arg_8 = conn.cursor.fetchall()\n\n if arg_8 is None:\n # fetchall is defined to return a (possibly-empty) sequence of\n # sequences; however, we occasionally see None returned and don't know\n # why...\n arg_0._logger.error(\"Unexpected None result from cursor.fetchall; \"\n \"query=%r; Traceback=%r\",\n arg_6, traceback.format_exc())\n\n return [(arg_9[0], list(arg_9[1:])) for arg_9 in arg_8]"} +{"_id": "doc_1801", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get the params and paramsHash for a set of models.\n\n WARNING!!!: The order of the results are NOT necessarily in the same order as\n the order of the model IDs passed in!!!\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of result namedtuples defined in\n ClientJobsDAO._models.getParamsNamedTuple. Each tuple\n contains: (modelId, params, engParamsHash)\n \"\"\"\n assert isinstance(arg_1, arg_0._SEQUENCE_TYPES), (\n \"Wrong modelIDs type: %r\") % (type(arg_1),)\n assert len(arg_1) >= 1, \"modelIDs is empty\"\n\n arg_2 = arg_0._getMatchingRowsWithRetries(\n arg_0._models, {'model_id' : arg_1},\n [arg_0._models.pubToDBNameDict[f]\n for f in arg_0._models.getParamsNamedTuple._fields])\n\n # NOTE: assertion will also fail when modelIDs contains duplicates\n assert len(arg_2) == len(arg_1), \"Didn't find modelIDs: %r\" % (\n (set(arg_1) - set(arg_3[0] for arg_3 in arg_2)),)\n\n # Return the params and params hashes as a namedtuple\n return [arg_0._models.getParamsNamedTuple._make(arg_3) for arg_3 in arg_2]"} +{"_id": "doc_1802", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get the results string and other status fields for a set of models.\n\n WARNING!!!: The order of the results are NOT necessarily in the same order\n as the order of the model IDs passed in!!!\n\n For each model, this returns a tuple containing:\n (modelID, results, status, updateCounter, numRecords, completionReason,\n completionMsg, engParamsHash\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of result tuples. Each tuple contains:\n (modelID, results, status, updateCounter, numRecords,\n completionReason, completionMsg, engParamsHash)\n \"\"\"\n assert isinstance(arg_1, arg_0._SEQUENCE_TYPES), (\n \"Wrong modelIDs type: %r\") % type(arg_1)\n assert len(arg_1) >= 1, \"modelIDs is empty\"\n\n arg_2 = arg_0._getMatchingRowsWithRetries(\n arg_0._models, {'model_id' : arg_1},\n [arg_0._models.pubToDBNameDict[f]\n for f in arg_0._models.getResultAndStatusNamedTuple._fields])\n\n # NOTE: assertion will also fail when modelIDs contains duplicates\n assert len(arg_2) == len(arg_1), \"Didn't find modelIDs: %r\" % (\n (set(arg_1) - set(arg_3[0] for arg_3 in arg_2)),)\n\n # Return the results as a list of namedtuples\n return [arg_0._models.getResultAndStatusNamedTuple._make(arg_3) for arg_3 in arg_2]"} +{"_id": "doc_1803", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Disable writing of output tap files.\n \"\"\"\n\n if arg_0._tapFileIn is not None:\n arg_0._tapFileIn.close()\n arg_0._tapFileIn = None\n if arg_0._tapFileOut is not None:\n arg_0._tapFileOut.close()\n arg_0._tapFileOut = None"} +{"_id": "doc_1804", "title": "", "text": "def Func(arg_0):\n \"\"\"Does nothing. Kept here for API compatibility \"\"\"\n if arg_0._doSphering:\n arg_0._finishSphering()\n\n arg_0._knn.finishLearning()\n\n # Compute leave-one-out validation accuracy if\n # we actually received non-trivial partition info\n arg_0._accuracy = None"} +{"_id": "doc_1805", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Intercepts TemporalMemory deserialization request in order to initialize\n `TemporalMemoryMonitorMixin` state\n\n @param proto (DynamicStructBuilder) Proto object\n\n @return (TemporalMemory) TemporalMemory shim instance\n \"\"\"\n arg_2 = super(TemporalMemoryMonitorMixin, arg_0).Func(arg_1)\n\n # initialize `TemporalMemoryMonitorMixin` attributes\n arg_2.mmName = None\n arg_2._mmTraces = None\n arg_2._mmData = None\n arg_2.mmClearHistory()\n arg_2._mmResetActive = True\n return arg_2"} +{"_id": "doc_1806", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Pick a value according to the provided distribution.\n\n Example:\n\n ::\n\n Func([.2, .1])\n\n Returns 0 two thirds of the time and 1 one third of the time.\n\n :param distribution: Probability distribution. Need not be normalized.\n :param r: Instance of random.Random. Uses the system instance if one is\n not provided.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = random\n\n arg_2 = arg_1.uniform(0, sum(arg_0))\n for arg_3, arg_4 in enumerate(arg_0):\n if arg_2 <= arg_4:\n return arg_3\n arg_2 -= arg_4"} +{"_id": "doc_1807", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns an array of length size and type dtype that is everywhere 0,\n except in the indices listed in sequence pos.\n\n :param pos: A single integer or sequence of integers that specify\n the position of ones to be set.\n :param size: The total size of the array to be returned.\n :param dtype: The element type (compatible with NumPy array())\n of the array to be returned.\n :returns: An array of length size and element type dtype.\n \"\"\"\n arg_3 = numpy.zeros(arg_1, arg_2=arg_2)\n if hasattr(arg_0, '__iter__'):\n for arg_4 in arg_0: arg_3[arg_4] = 1\n else: arg_3[arg_0] = 1\n return arg_3"} +{"_id": "doc_1808", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add distribution to row row.\n Distribution should be an array of probabilities or counts.\n\n :param row: Integer index of the row to add to.\n May be larger than the current number of rows, in which case\n the histogram grows.\n :param distribution: Array of length equal to the number of columns.\n \"\"\"\n arg_0.grow(arg_1+1, len(arg_2))\n arg_0.hist_.axby(arg_1, 1, 1, arg_2)\n arg_0.rowSums_[arg_1] += arg_2.sum()\n arg_0.colSums_ += arg_2\n arg_0.hack_ = None"} +{"_id": "doc_1809", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n **arg_3\n ):\n \"\"\"\n Run a named function specified by a filesystem path, module name\n and function name.\n\n Returns the value returned by the imported function.\n\n Use this when access is needed to code that has\n not been added to a package accessible from the ordinary Python\n path. Encapsulates the multiple lines usually needed to\n safely manipulate and restore the Python path.\n\n Parameters\n ----------\n path: filesystem path\n Path to the directory where the desired module is stored.\n This will be used to temporarily augment the Python path.\n\n moduleName: basestring\n Name of the module, without trailing extension, where the desired\n function is stored. This module should be in the directory specified\n with path.\n\n funcName: basestring\n Name of the function to import and call.\n\n keywords:\n Keyword arguments to be passed to the imported function.\n \"\"\"\n import arg_6\n arg_4 = arg_6.path\n try:\n arg_5 = [arg_0] + arg_6.path\n arg_6.path = arg_5\n arg_7 = getattr(__import__(arg_1, fromlist=[arg_2]), arg_2)\n arg_6.path = arg_4\n except:\n # Restore the original path in case of an exception.\n arg_6.path = arg_4\n raise\n return arg_7(**arg_3)"} +{"_id": "doc_1810", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Routine for computing a moving average.\n\n @param slidingWindow a list of previous values to use in computation that\n will be modified and returned\n @param total the sum of the values in slidingWindow to be used in the\n calculation of the moving average\n @param newVal a new number Func the new windowed average\n @param windowSize how many values to use in the moving window\n\n @returns an updated windowed average, the modified input slidingWindow list,\n and the new total sum of the sliding window\n \"\"\"\n if len(arg_0) == arg_3:\n arg_1 -= arg_0.pop(0)\n\n arg_0.append(arg_2)\n arg_1 += arg_2\n return float(arg_1) / len(arg_0), arg_0, arg_1"} +{"_id": "doc_1811", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Instance method wrapper around compute.\"\"\"\n arg_2, arg_0.slidingWindow, arg_0.total = arg_0.compute(\n arg_0.slidingWindow, arg_0.total, arg_1, arg_0.windowSize)\n return arg_2"} +{"_id": "doc_1812", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Helper function to return a scalar value representing the most\n likely outcome given a probability distribution\n \"\"\"\n if len(arg_1) == 1:\n return arg_1.keys()[0]\n\n arg_2 = None\n arg_3 = 0\n\n for arg_4, arg_5 in arg_1.items():\n if arg_5 > arg_3:\n arg_2 = arg_4\n arg_3 = arg_5\n\n return arg_2"} +{"_id": "doc_1813", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Helper function to return a scalar value representing the expected\n value of a probability distribution\n \"\"\"\n if len(arg_1) == 1:\n return arg_1.keys()[0]\n\n return sum([arg_2*arg_3 for arg_2,arg_3 in arg_1.items()])"} +{"_id": "doc_1814", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Return the field names for each of the scalar values returned by\n getScalars.\n\n :param parentFieldName: The name of the encoder which is our parent. This\n name is prefixed to each of the field names within this encoder to\n form the keys of the dict() in the retval.\n\n :return: array of field names\n \"\"\"\n arg_2 = []\n\n if arg_0.encoders is not None:\n for (arg_3, arg_4, arg_5) in arg_0.encoders:\n arg_6 = arg_4.Func(arg_1=arg_3)\n if arg_1 != '':\n arg_6 = ['%s.%s' % (arg_1, arg_3) for arg_3 in arg_6]\n arg_2.extend(arg_6)\n else:\n if arg_1 != '':\n arg_2.append(arg_1)\n else:\n arg_2.append(arg_0.name)\n\n return arg_2"} +{"_id": "doc_1815", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Gets the value of a given field from the input record\n \"\"\"\n if isinstance(arg_1, dict):\n if not arg_2 in arg_1:\n arg_3 = \", \".join(\n key for key in arg_1.keys() if not key.startswith(\"_\")\n )\n raise ValueError(\n \"Unknown field name '%s' in input record. Known fields are '%s'.\\n\"\n \"This could be because input headers are mislabeled, or because \"\n \"input data rows do not contain a value for '%s'.\" % (\n arg_2, arg_3, arg_2\n )\n )\n return arg_1[arg_2]\n else:\n return getattr(arg_1, arg_2)"} +{"_id": "doc_1816", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the offset and length of a given field within the encoded output.\n\n :param fieldName: Name of the field\n :return: tuple(``offset``, ``width``) of the field within the encoded output\n \"\"\"\n\n # Find which field it's in\n arg_2 = arg_0.getDescription() + [(\"end\", arg_0.getWidth())]\n for arg_3 in xrange(len(arg_2)):\n (arg_4, arg_5) = arg_2[arg_3]\n if (arg_4 == arg_1):\n break\n\n if arg_3 >= len(arg_2)-1:\n raise RuntimeError(\"Field name %s not found in this encoder\" % arg_1)\n\n # Return the offset and width\n return (arg_5, arg_2[arg_3+1][1] - arg_5)"} +{"_id": "doc_1817", "title": "", "text": "def Func(arg_0, arg_1, arg_2=''):\n \"\"\"\n Takes an encoded output and does its best to work backwards and generate\n the input that would have generated it.\n\n In cases where the encoded output contains more ON bits than an input\n would have generated, this routine will return one or more ranges of inputs\n which, if their encoded outputs were ORed together, would produce the\n target output. This behavior makes this method suitable for doing things\n like generating a description of a learned coincidence in the SP, which\n in many cases might be a union of one or more inputs.\n\n If instead, you want to figure the *most likely* single input scalar value\n that would have generated a specific encoded output, use the\n :meth:`.topDownCompute` method.\n\n If you want to pretty print the return value from this method, use the\n :meth:`.FuncdToStr` method.\n\n :param encoded: The encoded output that you want Func\n :param parentFieldName: The name of the encoder which is our parent. This name\n is prefixed to each of the field names within this encoder to form the\n keys of the dict() in the retval.\n\n :return: tuple(``fieldsDict``, ``fieldOrder``)\n\n ``fieldsDict`` is a dict() where the keys represent field names\n (only 1 if this is a simple encoder, > 1 if this is a multi\n or date encoder) and the values are the result of decoding each\n field. If there are no bits in encoded that would have been\n generated by a field, it won't be present in the dict. The\n key of each entry in the dict is formed by joining the passed in\n parentFieldName with the child encoder name using a '.'.\n\n Each 'value' in ``fieldsDict`` consists of (ranges, desc), where\n ranges is a list of one or more (minVal, maxVal) ranges of\n input that would generate bits in the encoded output and 'desc'\n is a pretty print description of the ranges. For encoders like\n the category encoder, the 'desc' will contain the category\n names that correspond to the scalar values included in the\n ranges.\n\n ``fieldOrder`` is a list of the keys from ``fieldsDict``, in the\n same order as the fields appear in the encoded output.\n\n TODO: when we switch to Python 2.7 or 3.x, use OrderedDict\n\n Example retvals for a scalar encoder:\n\n .. code-block:: python\n\n {'amount': ( [[1,3], [7,10]], '1-3, 7-10' )}\n {'amount': ( [[2.5,2.5]], '2.5' )}\n\n Example retval for a category encoder:\n\n .. code-block:: python\n\n {'country': ( [[1,1], [5,6]], 'US, GB, ES' )}\n\n Example retval for a multi encoder:\n\n .. code-block:: python\n\n {'amount': ( [[2.5,2.5]], '2.5' ),\n 'country': ( [[1,1], [5,6]], 'US, GB, ES' )}\n\n \"\"\"\n\n arg_3 = dict()\n arg_4 = []\n\n # What is the effective parent name?\n if arg_2 == '':\n arg_5 = arg_0.name\n else:\n arg_5 = \"%s.%s\" % (arg_2, arg_0.name)\n\n if arg_0.encoders is not None:\n # Merge decodings of all child encoders together\n for arg_6 in xrange(len(arg_0.encoders)):\n\n # Get the encoder and the encoded output\n (arg_7, arg_8, arg_9) = arg_0.encoders[arg_6]\n if arg_6 < len(arg_0.encoders)-1:\n arg_10 = arg_0.encoders[arg_6+1][2]\n else:\n arg_10 = arg_0.width\n arg_11 = arg_1[arg_9:arg_10]\n (arg_12, arg_13) = arg_8.Func(arg_11,\n arg_2=arg_5)\n\n arg_3.update(arg_12)\n arg_4.extend(arg_13)\n\n\n return (arg_3, arg_4)"} +{"_id": "doc_1818", "title": "", "text": "def Func(arg_0):\n \"\"\"create a random input vector\"\"\"\n\n print \"-\" * 70 + \"Creating a random input vector\" + \"-\" * 70\n\n #clear the inputArray to zero before creating a new input vector\n arg_0.inputArray[0:] = 0\n\n for arg_2 in range(arg_0.inputSize):\n #randrange returns 0 or 1\n arg_0.inputArray[arg_2] = random.randrange(2)"} +{"_id": "doc_1819", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True,\n arg_4=None):\n \"\"\"Finds the category that best matches the input pattern. Returns the\n winning category index as well as a distribution over all categories.\n\n :param inputPattern: (list or array) The pattern to be classified. This\n must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]).\n\n :param computeScores: NO EFFECT\n\n :param overCategories: NO EFFECT\n\n :param partitionId: (int) If provided, all training vectors with partitionId\n equal to that of the input pattern are ignored.\n For example, this may be used to perform k-fold cross validation\n without repopulating the classifier. First partition all the data into\n k equal partitions numbered 0, 1, 2, ... and then call learn() for each\n vector passing in its partitionId. Then, during Funcence, by passing\n in the partition ID in the call to Func(), all other vectors with the\n same partitionId are ignored simulating the effect of repopulating the\n classifier while ommitting the training vectors in the same partition.\n\n :returns: 4-tuple with these keys:\n\n - ``winner``: The category with the greatest number of nearest neighbors\n within the kth nearest neighbors. If the FuncenceResult contains no\n neighbors, the value of winner is None. This can happen, for example,\n in cases of exact matching, if there are no stored vectors, or if\n minSparsity is not met.\n - ``FuncenceResult``: A list of length numCategories, each entry contains\n the number of neighbors within the top k neighbors that are in that\n category.\n - ``dist``: A list of length numPrototypes. Each entry is the distance\n from the unknown to that prototype. All distances are between 0.0 and\n 1.0.\n - ``categoryDist``: A list of length numCategories. Each entry is the\n distance from the unknown to the nearest prototype of\n that category. All distances are between 0 and 1.0.\n \"\"\"\n\n # Calculate sparsity. If sparsity is too low, we do not want to run\n # Funcence with this vector\n arg_5 = 0.0\n if arg_0.minSparsity > 0.0:\n arg_5 = ( float(len(arg_1.nonzero()[0])) /\n len(arg_1) )\n\n if len(arg_0._categoryList) == 0 or arg_5 < arg_0.minSparsity:\n # No categories learned yet; i.e. first Funcence w/ online learning or\n # insufficient sparsity\n arg_6 = None\n arg_7 = numpy.zeros(1)\n arg_8 = numpy.ones(1)\n arg_9 = numpy.ones(1)\n\n else:\n arg_10 = max(arg_0._categoryList)\n arg_7 = numpy.zeros(arg_10+1)\n arg_8 = arg_0._getDistances(arg_1, arg_4=arg_4)\n arg_11 = len(arg_0._categoryList) - arg_0._categoryList.count(-1)\n\n # Loop through the indices of the nearest neighbors.\n if arg_0.exact:\n # Is there an exact match in the distances?\n arg_12 = numpy.where(arg_8<0.00001)[0]\n if len(arg_12) > 0:\n for arg_13 in arg_12[:min(arg_0.k, arg_11)]:\n arg_7[arg_0._categoryList[arg_13]] += 1.0\n else:\n arg_14 = arg_8.argsort()\n for arg_15 in arg_14[:min(arg_0.k, arg_11)]:\n arg_7[arg_0._categoryList[arg_15]] += 1.0\n\n # Prepare Funcence results.\n if arg_7.any():\n arg_6 = arg_7.argmax()\n arg_7 /= arg_7.sum()\n else:\n arg_6 = None\n arg_9 = min_score_per_category(arg_10,\n arg_0._categoryList, arg_8)\n arg_9.clip(0, 1.0, arg_9)\n\n if arg_0.verbosity >= 1:\n print \"%s Func:\" % (g_debugPrefix)\n print \" active inputs:\", _labeledInput(arg_1,\n cellsPerCol=arg_0.cellsPerCol)\n print \" winner category:\", arg_6\n print \" pct neighbors of each category:\", arg_7\n print \" dist of each prototype:\", arg_8\n print \" dist of each category:\", arg_9\n\n arg_16 = (arg_6, arg_7, arg_8, arg_9)\n return arg_16"} +{"_id": "doc_1820", "title": "", "text": "def Func(arg_0, arg_1, arg_2=3):\n \"\"\"Returns the index of the pattern that is closest to inputPattern,\n the distances of all patterns to inputPattern, and the indices of the k\n closest categories.\n \"\"\"\n arg_3 = numpy.zeros(max(arg_0._categoryList)+1)\n arg_4 = arg_0._getDistances(arg_1)\n\n arg_5 = arg_4.argsort()\n\n arg_6 = len(arg_0._categoryList) - arg_0._categoryList.count(-1)\n for arg_7 in arg_5[:min(arg_0.k, arg_6)]:\n arg_3[arg_0._categoryList[arg_7]] += 1.0\n\n arg_8 = arg_3.argmax()\n\n arg_9 = []\n for arg_10 in range(arg_2):\n arg_9.append((arg_0._categoryList[arg_5[arg_10]], arg_4[arg_5[arg_10]] ))\n\n return arg_8, arg_4, arg_9"} +{"_id": "doc_1821", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns the closest training pattern to inputPattern that belongs to\n category \"cat\".\n\n :param inputPattern: The pattern whose closest neighbor is sought\n\n :param cat: The required category of closest neighbor\n\n :returns: A dense version of the closest training pattern, or None if no\n such patterns exist\n \"\"\"\n arg_3 = arg_0._getDistances(arg_1)\n arg_4 = arg_3.argsort()\n\n for arg_5 in arg_4:\n arg_6 = arg_0._categoryList[arg_5]\n\n # If closest pattern belongs to desired category, return it\n if arg_6 == arg_2:\n if arg_0.useSparseMemory:\n arg_7 = arg_0._Memory.getRow(int(arg_5))\n else:\n arg_7 = arg_0._M[arg_5]\n\n return arg_7\n\n # No patterns were found!\n return None"} +{"_id": "doc_1822", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Gets a training pattern either by index or category number.\n\n :param idx: Index of the training pattern\n\n :param sparseBinaryForm: If true, returns a list of the indices of the\n non-zero bits in the training pattern\n\n :param cat: If not None, get the first pattern belonging to category cat. If\n this is specified, idx must be None.\n\n :returns: The training pattern with specified index\n \"\"\"\n if arg_3 is not None:\n assert arg_1 is None\n arg_1 = arg_0._categoryList.index(arg_3)\n\n if not arg_0.useSparseMemory:\n arg_4 = arg_0._Memory[arg_1]\n if arg_2:\n arg_4 = arg_4.nonzero()[0]\n\n else:\n (arg_5, arg_6) = arg_0._Memory.rowNonZeros(arg_1)\n if not arg_2:\n arg_4 = numpy.zeros(arg_0._Memory.nCols())\n numpy.put(arg_4, arg_5, 1)\n else:\n arg_4 = arg_5\n\n return arg_4"} +{"_id": "doc_1823", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets the partition id given an index.\n\n :param i: index of partition\n :returns: the partition id associated with pattern i. Returns None if no id\n is associated with it.\n \"\"\"\n if (arg_1 < 0) or (arg_1 >= arg_0._numPatterns):\n raise RuntimeError(\"index out of bounds\")\n arg_2 = arg_0._partitionIdList[arg_1]\n if arg_2 == numpy.inf:\n return None\n else:\n return arg_2"} +{"_id": "doc_1824", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Adds partition id for pattern index\n \"\"\"\n if arg_2 is None:\n arg_0._partitionIdList.append(numpy.inf)\n else:\n arg_0._partitionIdList.append(arg_2)\n arg_3 = arg_0._partitionIdMap.get(arg_2, [])\n arg_3.append(arg_1)\n arg_0._partitionIdMap[arg_2] = arg_3"} +{"_id": "doc_1825", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Rebuilds the partition Id map using the given partitionIdList\n \"\"\"\n arg_0._partitionIdMap = {}\n for arg_3, arg_4 in enumerate(arg_1):\n arg_5 = arg_0._partitionIdMap.get(arg_4, [])\n arg_5.append(arg_3)\n arg_0._partitionIdMap[arg_4] = arg_5"} +{"_id": "doc_1826", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Calculate the distances from inputPattern to all stored patterns. All\n distances are between 0.0 and 1.0\n\n :param inputPattern The pattern from which distances to all other patterns\n are calculated\n\n :param distanceNorm Degree of the distance norm\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0.distanceNorm\n\n # Sparse memory\n if arg_0.useSparseMemory:\n if arg_0._protoSizes is None:\n arg_0._protoSizes = arg_0._Memory.rowSums()\n arg_4 = arg_0._Memory.rightVecSumAtNZ(arg_1)\n arg_5 = arg_1.sum()\n\n if arg_0.distanceMethod == \"rawOverlap\":\n arg_6 = arg_1.sum() - arg_4\n elif arg_0.distanceMethod == \"pctOverlapOfInput\":\n arg_6 = arg_5 - arg_4\n if arg_5 > 0:\n arg_6 /= arg_5\n elif arg_0.distanceMethod == \"pctOverlapOfProto\":\n arg_4 /= arg_0._protoSizes\n arg_6 = 1.0 - arg_4\n elif arg_0.distanceMethod == \"pctOverlapOfLarger\":\n arg_7 = numpy.maximum(arg_0._protoSizes, arg_5)\n if arg_7.all() > 0:\n arg_4 /= arg_7\n arg_6 = 1.0 - arg_4\n elif arg_0.distanceMethod == \"norm\":\n arg_6 = arg_0._Memory.vecLpDist(arg_0.distanceNorm, arg_1)\n arg_8 = arg_6.max()\n if arg_8 > 0:\n arg_6 /= arg_8\n else:\n raise RuntimeError(\"Unimplemented distance method %s\" %\n arg_0.distanceMethod)\n\n # Dense memory\n else:\n if arg_0.distanceMethod == \"norm\":\n arg_6 = numpy.power(numpy.abs(arg_0._M - arg_1), arg_0.distanceNorm)\n arg_6 = arg_6.sum(1)\n arg_6 = numpy.power(arg_6, 1.0/arg_0.distanceNorm)\n arg_6 /= arg_6.max()\n else:\n raise RuntimeError (\"Not implemented yet for dense storage....\")\n\n return arg_6"} +{"_id": "doc_1827", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return the distances from inputPattern to all stored patterns.\n\n :param inputPattern The pattern from which distances to all other patterns\n are returned\n\n :param partitionId If provided, ignore all training vectors with this\n partitionId.\n \"\"\"\n if not arg_0._finishedLearning:\n arg_0.finishLearning()\n arg_0._finishedLearning = True\n\n if arg_0._vt is not None and len(arg_0._vt) > 0:\n arg_1 = arg_6.dot(arg_0._vt, arg_1 - arg_0._mean)\n\n arg_4 = arg_0._sparsifyVector(arg_1)\n\n # Compute distances\n arg_5 = arg_0._calcDistance(arg_4)\n # Invalidate results where category is -1\n if arg_0._specificIndexTraining:\n arg_5[arg_6.array(arg_0._categoryList) == -1] = arg_6.inf\n\n # Ignore vectors with this partition id by setting their distances to inf\n if arg_2 is not None:\n arg_5[arg_0._partitionIdMap.get(arg_2, [])] = arg_6.inf\n\n return arg_5"} +{"_id": "doc_1828", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Change the category indices.\n\n Used by the Network Builder to keep the category indices in sync with the\n ImageSensor categoryInfo when the user renames or removes categories.\n\n :param mapping: List of new category indices. For example, mapping=[2,0,1]\n would change all vectors of category 0 to be category 2, category 1 to\n 0, and category 2 to 1\n \"\"\"\n arg_2 = numpy.array(arg_0._categoryList)\n arg_3 = numpy.zeros(arg_2.shape[0])\n arg_3.fill(-1)\n for arg_4 in xrange(len(arg_1)):\n arg_3[arg_2==arg_4] = arg_1[arg_4]\n arg_0._categoryList = list(arg_3)"} +{"_id": "doc_1829", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Computes the width of dataOut.\n\n Overrides \n :meth:`nupic.bindings.regions.PyRegion.PyRegion.Func`.\n \"\"\"\n\n if arg_1 == \"resetOut\":\n print (\"WARNING: Func should not have been called with \"\n \"resetOut\")\n return 1\n\n elif arg_1 == \"sequenceIdOut\":\n print (\"WARNING: Func should not have been called with \"\n \"sequenceIdOut\")\n return 1\n\n elif arg_1 == \"dataOut\":\n if arg_0.encoder is None:\n raise Exception(\"NuPIC requested output element count for 'dataOut' \"\n \"on a RecordSensor node, but the encoder has not \"\n \"been set\")\n return arg_0.encoder.getWidth()\n\n elif arg_1 == \"sourceOut\":\n if arg_0.encoder is None:\n raise Exception(\"NuPIC requested output element count for 'sourceOut' \"\n \"on a RecordSensor node, \"\n \"but the encoder has not been set\")\n return len(arg_0.encoder.getDescription())\n\n elif arg_1 == \"bucketIdxOut\":\n return 1\n\n elif arg_1 == \"actValueOut\":\n return 1\n\n elif arg_1 == \"categoryOut\":\n return arg_0.numCategories\n\n elif arg_1 == 'spatialTopDownOut' or arg_1 == 'temporalTopDownOut':\n if arg_0.encoder is None:\n raise Exception(\"NuPIC requested output element count for 'sourceOut' \"\n \"on a RecordSensor node, \"\n \"but the encoder has not been set\")\n return len(arg_0.encoder.getDescription())\n else:\n raise Exception(\"Unknown output %s\" % arg_1)"} +{"_id": "doc_1830", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Set the value of a Spec parameter. Most parameters are handled\n automatically by PyRegion's parameter set mechanism. The ones that need\n special treatment are explicitly handled here.\n \"\"\"\n if arg_1 == 'topDownMode':\n arg_0.topDownMode = arg_3\n elif arg_1 == 'predictedField':\n arg_0.predictedField = arg_3\n else:\n raise Exception('Unknown parameter: ' + arg_1)"} +{"_id": "doc_1831", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Saves the record in the underlying csv file.\n\n :param record: a list of Python objects that will be string-ified\n \"\"\"\n\n assert arg_0._file is not None\n assert arg_0._mode == arg_0._FILE_WRITE_MODE\n assert isinstance(arg_1, (list, tuple)), \\\n \"unexpected record type: \" + repr(type(arg_1))\n\n assert len(arg_1) == arg_0._fieldCount, \\\n \"len(record): %s, fieldCount: %s\" % (len(arg_1), arg_0._fieldCount)\n\n # Write header if needed\n if arg_0._recordCount == 0:\n # Write the header\n arg_2, arg_3, arg_4 = zip(*arg_0.getFields())\n for arg_5 in arg_2, arg_3, arg_4:\n arg_0._writer.writerow(arg_5)\n\n # Keep track of sequences, make sure time flows forward\n arg_0._updateSequenceInfo(arg_1)\n\n arg_5 = [arg_0._adapters[i](f) for i, f in enumerate(arg_1)]\n\n arg_0._writer.writerow(arg_5)\n arg_0._recordCount += 1"} +{"_id": "doc_1832", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Saves multiple records in the underlying storage.\n\n :param records: array of records as in\n :meth:`~.FileRecordStream.appendRecord`\n :param progressCB: (function) callback to report progress\n \"\"\"\n\n for arg_3 in arg_1:\n arg_0.appendRecord(arg_3)\n if arg_2 is not None:\n arg_2()"} +{"_id": "doc_1833", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets a bookmark or anchor to the current position.\n\n :returns: an anchor to the current position in the data. Passing this\n anchor to a constructor makes the current position to be the first\n returned record.\n \"\"\"\n\n if arg_0._write and arg_0._recordCount==0:\n return None\n\n arg_1 = dict(filepath=os.path.realpath(arg_0._filename),\n currentRow=arg_0._recordCount)\n return json.dumps(arg_1)"} +{"_id": "doc_1834", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Seeks to ``numRecords`` from the end and returns a bookmark to the new\n position.\n\n :param numRecords: how far to seek from end of file.\n :return: bookmark to desired location.\n \"\"\"\n arg_0._file.seek(arg_0._getTotalLineCount() - arg_1)\n return arg_0.getBookmark()"} +{"_id": "doc_1835", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Keep track of sequence and make sure time goes forward\n\n Check if the current record is the beginning of a new sequence\n A new sequence starts in 2 cases:\n\n 1. The sequence id changed (if there is a sequence id field)\n 2. The reset field is 1 (if there is a reset field)\n\n Note that if there is no sequenceId field or resetId field then the entire\n dataset is technically one big sequence. The function will not return True\n for the first record in this case. This is Ok because it is important to\n detect new sequences only when there are multiple sequences in the file.\n \"\"\"\n\n # Get current sequence id (if any)\n arg_2 = False\n arg_3 = (arg_1[arg_0._sequenceIdIdx]\n if arg_0._sequenceIdIdx is not None else None)\n if arg_3 != arg_0._currSequence:\n # verify that the new sequence didn't show up before\n if arg_3 in arg_0._sequences:\n raise Exception('Broken sequence: %s, record: %s' % \\\n (arg_3, arg_1))\n\n # add the finished sequence to the set of sequence\n arg_0._sequences.add(arg_0._currSequence)\n arg_0._currSequence = arg_3\n\n # Verify that the reset is consistent (if there is one)\n if arg_0._resetIdx:\n assert arg_1[arg_0._resetIdx] == 1\n arg_2 = True\n\n else:\n # Check the reset\n arg_5 = False\n if arg_0._resetIdx:\n arg_5 = arg_1[arg_0._resetIdx]\n if arg_5 == 1:\n arg_2 = True\n\n # If it's still the same old sequence make sure the time flows forward\n if not arg_2:\n if arg_0._timeStampIdx and arg_0._currTime is not None:\n arg_6 = arg_1[arg_0._timeStampIdx]\n if arg_6 < arg_0._currTime:\n raise Exception('No time travel. Early timestamp for record: %s' % arg_1)\n\n if arg_0._timeStampIdx:\n arg_0._currTime = arg_1[arg_0._timeStampIdx]"} +{"_id": "doc_1836", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Returns the number of records that elapse between when an inference is\n made and when the corresponding input record will appear. For example, a\n multistep prediction for 3 timesteps out will have a delay of 3\n\n\n Parameters:\n -----------------------------------------------------------------------\n\n inferenceElement: The InferenceElement value being delayed\n key: If the inference is a dictionary type, this specifies\n key for the sub-inference that is being delayed\n \"\"\"\n # -----------------------------------------------------------------------\n # For next step prediction, we shift by 1\n if arg_0 in (InferenceElement.prediction,\n InferenceElement.encodings):\n return 1\n # -----------------------------------------------------------------------\n # For classification, anomaly scores, the inferences immediately succeed the\n # inputs\n if arg_0 in (InferenceElement.anomalyScore,\n InferenceElement.anomalyLabel,\n InferenceElement.classification,\n InferenceElement.classConfidences):\n return 0\n # -----------------------------------------------------------------------\n # For multistep prediction, the delay is based on the key in the inference\n # dictionary\n if arg_0 in (InferenceElement.multiStepPredictions,\n InferenceElement.multiStepBestPredictions):\n return int(arg_1)\n\n # -----------------------------------------------------------------------\n # default: return 0\n return 0"} +{"_id": "doc_1837", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns True if the inference type is 'temporal', i.e. requires a\n temporal memory in the network.\n \"\"\"\n if arg_1.__temporalInferenceTypes is None:\n arg_1.__temporalInferenceTypes = \\\n set([arg_1.TemporalNextStep,\n arg_1.TemporalClassification,\n arg_1.TemporalAnomaly,\n arg_1.TemporalMultiStep,\n arg_1.NontemporalMultiStep])\n\n return arg_0 in arg_1.__temporalInferenceTypes"} +{"_id": "doc_1838", "title": "", "text": "def Func(arg_0):\n \"\"\" Makes directory for the given directory path with default permissions.\n If the directory already exists, it is treated as success.\n\n absDirPath: absolute path of the directory to create.\n\n Returns: absDirPath arg\n\n Exceptions: OSError if directory creation fails\n \"\"\"\n\n assert os.path.isabs(arg_0)\n\n try:\n os.makedirs(arg_0)\n except OSError, e:\n if e.errno != os.errno.EEXIST:\n raise\n\n return arg_0"} +{"_id": "doc_1839", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Parse the given XML file and return a dict describing the file.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of XML file to parse (no path)\n path: path of the XML file. If None, then use the standard\n configuration search path.\n retval: returns a dict with each property as a key and a dict of all\n the property's attributes as value\n \"\"\"\n\n arg_3 = dict()\n\n # Get the path to the config files.\n if arg_2 is None:\n arg_4 = arg_0.findConfigFile(arg_1)\n else:\n arg_4 = os.path.join(arg_2, arg_1)\n\n # ------------------------------------------------------------------\n # Read in the config file\n try:\n if arg_4 is not None:\n try:\n # Use warn since console log level is set to warning\n _getLoggerBase().debug(\"Loading config file: %s\", arg_4)\n with open(arg_4, 'r') as inp:\n arg_5 = inp.read()\n except Exception:\n raise RuntimeError(\"Expected configuration file at %s\" % arg_4)\n else:\n # If the file was not found in the normal search paths, which includes\n # checking the NTA_CONF_PATH, we'll try loading it from pkg_resources.\n try:\n arg_5 = resource_string(\"nupic.support\", arg_1)\n except Exception as resourceException:\n # We expect these to be read, and if they don't exist we'll just use\n # an empty configuration string.\n if arg_1 in [USER_CONFIG, CUSTOM_CONFIG]:\n arg_5 = ''\n else:\n raise resourceException\n\n arg_6 = ElementTree.XML(arg_5)\n\n if arg_6.tag != 'configuration':\n raise RuntimeError(\"Expected top-level element to be 'configuration' \"\n \"but got '%s'\" % (arg_6.tag))\n\n # ------------------------------------------------------------------\n # Add in each property found\n arg_7 = arg_6.findall('./property')\n\n for arg_8 in arg_7:\n\n arg_9 = dict()\n\n # Parse this property element\n arg_10 = list(arg_8)\n for arg_11 in arg_10:\n arg_9[arg_11.tag] = arg_11.text\n\n # Get the name\n arg_13 = arg_9.get('name', None)\n\n # value is allowed to be empty string\n if 'value' in arg_9 and arg_9['value'] is None:\n arg_14 = ''\n else:\n arg_14 = arg_9.get('value', None)\n\n if arg_14 is None:\n if 'novalue' in arg_9:\n # Placeholder \"novalue\" properties are intended to be overridden\n # via dynamic configuration or another configuration layer.\n continue\n else:\n raise RuntimeError(\"Missing 'value' element within the property \"\n \"element: => %s \" % (str(arg_9)))\n\n # The value is allowed to contain substitution tags of the form\n # ${env.VARNAME}, which should be substituted with the corresponding\n # environment variable values\n arg_15 = arg_14\n arg_14 = ''\n while True:\n # Find the beginning of substitution tag\n arg_16 = arg_15.find('${env.')\n if arg_16 == -1:\n # No more environment variable substitutions\n arg_14 += arg_15\n break\n\n # Append prefix to value accumulator\n arg_14 += arg_15[0:arg_16]\n\n # Find the end of current substitution tag\n arg_17 = arg_15.find('}', arg_16)\n if arg_17 == -1:\n raise RuntimeError(\n \"Trailing environment variable tag delimiter '}'\"\n \" not found in %r\" % (arg_15))\n\n # Extract environment variable name from tag\n arg_18 = arg_15[arg_16 + 6:arg_17]\n if arg_18 not in os.environ:\n raise RuntimeError(\"Attempting to use the value of the environment\"\n \" variable %r, which is not defined\" % (\n arg_18))\n arg_19 = os.environ[arg_18]\n\n arg_14 += arg_19\n\n arg_15 = arg_15[arg_17 + 1:]\n\n # Check for errors\n if arg_13 is None:\n raise RuntimeError(\n \"Missing 'name' element within following property \"\n \"element:\\n => %s \" % (str(arg_9)))\n\n arg_9['value'] = arg_14\n arg_3[arg_13] = arg_9\n\n return arg_3\n except Exception:\n _getLoggerBase().exception(\"Error while parsing configuration file: %s.\",\n arg_4)\n raise"} +{"_id": "doc_1840", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Set multiple custom properties and persist them to the custom\n configuration store.\n\n Parameters:\n ----------------------------------------------------------------\n properties: a dict of property name/value pairs to set\n \"\"\"\n _getLogger().info(\"Setting custom configuration properties=%r; caller=%r\",\n arg_1, traceback.format_stack())\n\n _CustomConfigurationFileWrapper.edit(arg_1)\n\n for arg_2, arg_3 in arg_1.iteritems():\n arg_0.set(arg_2, arg_3)"} +{"_id": "doc_1841", "title": "", "text": "def Func(arg_0):\n \"\"\" Clear all custom configuration settings and delete the persistent\n custom configuration store.\n \"\"\"\n _getLogger().info(\"Resetting all custom configuration properties; \"\n \"caller=%r\", traceback.format_stack())\n\n # Clear the in-memory settings cache, forcing reload upon subsequent \"get\"\n # request.\n super(Configuration, arg_0).clear()\n\n # Delete the persistent custom configuration store and reset in-memory\n # custom configuration info\n _CustomConfigurationFileWrapper.clear(persistent=True)"} +{"_id": "doc_1842", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\" If persistent is True, delete the temporary file\n\n Parameters:\n ----------------------------------------------------------------\n persistent: if True, custom configuration file is deleted\n \"\"\"\n if arg_1:\n try:\n os.unlink(arg_0.getPath())\n except OSError, e:\n if e.errno != errno.ENOENT:\n _getLogger().exception(\"Error %s while trying to remove dynamic \" \\\n \"configuration file: %s\", e.errno,\n arg_0.getPath())\n raise\n arg_0._path = None"} +{"_id": "doc_1843", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns a dict of all temporary values in custom configuration file\n\n \"\"\"\n if not os.path.exists(arg_0.getPath()):\n return dict()\n\n arg_1 = Configuration._readConfigFile(os.path.basename(\n arg_0.getPath()), os.path.dirname(arg_0.getPath()))\n\n arg_2 = dict()\n for arg_3 in arg_1:\n if 'value' in arg_1[arg_3]:\n arg_2[arg_3] = arg_1[arg_3]['value']\n\n return arg_2"} +{"_id": "doc_1844", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Edits the XML configuration file with the parameters specified by\n properties\n\n Parameters:\n ----------------------------------------------------------------\n properties: dict of settings to be applied to the custom configuration store\n (key is property name, value is value)\n \"\"\"\n arg_2 = copy(arg_1)\n\n arg_3 = arg_0.getPath()\n\n try:\n with open(arg_3, 'r') as fp:\n arg_4 = fp.read()\n except IOError, arg_7:\n if arg_7.errno != errno.ENOENT:\n _getLogger().exception(\"Error %s reading custom configuration store \"\n \"from %s, while Funcing properties %s.\",\n arg_7.errno, arg_3, arg_1)\n raise\n arg_4 = ''\n\n try:\n arg_5 = ElementTree.XML(arg_4)\n ElementTree.tostring(arg_5)\n except Exception, arg_7:\n # Raising error as RuntimeError with custom message since ElementTree\n # exceptions aren't clear.\n arg_6 = \"File contents of custom configuration is corrupt. File \" \\\n \"location: %s; Contents: '%s'. Original Error (%s): %s.\" % \\\n (arg_3, arg_4, type(arg_7), arg_7)\n _getLogger().exception(arg_6)\n raise RuntimeError(arg_6), None, sys.exc_info()[2]\n\n if arg_5.tag != 'configuration':\n arg_7 = \"Expected top-level element to be 'configuration' but got '%s'\" % \\\n (arg_5.tag)\n _getLogger().error(arg_7)\n raise RuntimeError(arg_7)\n\n # Apply new properties to matching settings in the custom config store;\n # pop matching properties from our copy of the properties dict\n for arg_8 in arg_5.findall('./property'):\n arg_9 = dict((attr.tag, attr.text) for attr in arg_8)\n arg_10 = arg_9['name']\n if arg_10 in arg_2:\n arg_11 = arg_8.findall('./value')\n if len(arg_11) > 0:\n arg_11[0].text = str(arg_2.pop(arg_10))\n if not arg_2:\n break\n else:\n arg_7 = \"Property %s missing value tag.\" % (arg_10,)\n _getLogger().error(arg_7)\n raise RuntimeError(arg_7)\n\n # Add unmatched remaining properties to custom config store\n for arg_13, arg_14 in arg_2.iteritems():\n arg_15 = ElementTree.Element('property')\n arg_16 = ElementTree.Element('name')\n arg_16.text = arg_13\n arg_15.append(arg_16)\n\n arg_17 = ElementTree.Element('value')\n arg_17.text = str(arg_14)\n arg_15.append(arg_17)\n\n arg_5.append(arg_15)\n\n try:\n makeDirectoryFromAbsolutePath(os.path.dirname(arg_3))\n with open(arg_3, 'w') as fp:\n fp.write(ElementTree.tostring(arg_5))\n except Exception, arg_7:\n _getLogger().exception(\"Error while saving custom configuration \"\n \"properties %s in %s.\", arg_1,\n arg_3)\n raise"} +{"_id": "doc_1845", "title": "", "text": "def Func(arg_0):\n \"\"\" Sets the path of the custom configuration file\n \"\"\"\n arg_0._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'],\n arg_0.customFileName)"} +{"_id": "doc_1846", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the particle state as a dict. This is enough information to\n instantiate this particle on another worker.\"\"\"\n arg_1 = dict()\n for arg_2, arg_3 in arg_0.permuteVars.iteritems():\n arg_1[arg_2] = arg_3.Func()\n\n return dict(id=arg_0.particleId,\n genIdx=arg_0.genIdx,\n swarmId=arg_0.swarmId,\n arg_1=arg_1)"} +{"_id": "doc_1847", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Init all of our variable positions, velocities, and optionally the best\n result and best position from the given particle.\n\n If newBest is true, we get the best result and position for this new\n generation from the resultsDB, This is used when evoloving a particle\n because the bestResult and position as stored in was the best AT THE TIME\n THAT PARTICLE STARTED TO RUN and does not include the best since that\n particle completed.\n \"\"\"\n # Get the update best position and result?\n if arg_3:\n (arg_4, arg_5) = arg_0._resultsDB.getParticleBest(arg_1)\n else:\n arg_4 = arg_5 = None\n\n # Replace with the position and velocity of each variable from\n # saved state\n arg_6 = arg_2['varStates']\n for arg_7 in arg_6.keys():\n arg_8 = copy.deepcopy(arg_6[arg_7])\n if arg_3:\n arg_8['bestResult'] = arg_4\n if arg_5 is not None:\n arg_8['bestPosition'] = arg_5[arg_7]\n arg_0.permuteVars[arg_7].setState(arg_8)"} +{"_id": "doc_1848", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Copy specific variables from particleState into this particle.\n\n Parameters:\n --------------------------------------------------------------\n particleState: dict produced by a particle's getState() method\n varNames: which variables to copy\n \"\"\"\n # Set this to false if you don't want the variable to move anymore\n # after we set the state\n arg_3 = True\n\n for arg_4 in arg_1['varStates']:\n if arg_4 in arg_2:\n\n # If this particle doesn't include this field, don't copy it\n if arg_4 not in arg_0.permuteVars:\n continue\n\n # Set the best position to the copied position\n arg_5 = copy.deepcopy(arg_1['varStates'][arg_4])\n arg_5['_position'] = arg_5['position']\n arg_5['bestPosition'] = arg_5['position']\n\n if not arg_3:\n arg_5['velocity'] = 0\n\n # Set the state now\n arg_0.permuteVars[arg_4].setState(arg_5)\n\n if arg_3:\n # Let the particle move in both directions from the best position\n # it found previously and set it's initial velocity to a known\n # fraction of the total distance.\n arg_0.permuteVars[arg_4].resetVelocity(arg_0._rng)"} +{"_id": "doc_1849", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the position of a particle given its state dict.\n\n Parameters:\n --------------------------------------------------------------\n retval: dict() of particle position, keys are the variable names,\n values are their positions\n \"\"\"\n arg_1 = dict()\n for (arg_2, arg_3) in arg_0['varStates'].iteritems():\n arg_1[arg_2] = arg_3['position']\n\n return arg_1"} +{"_id": "doc_1850", "title": "", "text": "def Func(arg_0):\n \"\"\"Agitate this particle so that it is likely to go to a new position.\n Every time Func is called, the particle is jiggled an even greater\n amount.\n\n Parameters:\n --------------------------------------------------------------\n retval: None\n \"\"\"\n for (arg_1, arg_2) in arg_0.permuteVars.iteritems():\n arg_2.Func()\n\n arg_0.newPosition()"} +{"_id": "doc_1851", "title": "", "text": "def Func(arg_0, arg_1=None):\n # TODO: incorporate data from choice variables....\n # TODO: make sure we're calling this when appropriate.\n \"\"\"Choose a new position based on results obtained so far from all other\n particles.\n\n Parameters:\n --------------------------------------------------------------\n whichVars: If not None, only move these variables\n retval: new position\n \"\"\"\n # Get the global best position for this swarm generation\n arg_2 = None\n # If speculative particles are enabled, use the global best considering\n # even particles in the current generation. This gives better results\n # but does not provide repeatable results because it depends on\n # worker timing\n if arg_0._hsObj._speculativeParticles:\n arg_3 = arg_0.genIdx\n else:\n arg_3 = arg_0.genIdx - 1\n\n if arg_3 >= 0:\n (arg_4, arg_5) = arg_0._resultsDB.bestModelIdAndErrScore(arg_0.swarmId,\n arg_3)\n if arg_4 is not None:\n (arg_6, arg_5, arg_5, arg_5, arg_5) = arg_0._resultsDB.getParticleInfo(\n arg_4)\n arg_2 = Particle.getPositionFromState(arg_6)\n\n # Update each variable\n for (arg_7, arg_8) in arg_0.permuteVars.iteritems():\n if arg_1 is not None and arg_7 not in arg_1:\n continue\n if arg_2 is None:\n arg_8.Func(None, arg_0._rng)\n else:\n arg_8.Func(arg_2[arg_7], arg_0._rng)\n\n # get the new position\n arg_9 = arg_0.getPosition()\n\n # Log the new position\n if arg_0.logger.getEffectiveLevel() <= logging.DEBUG:\n arg_10 = StringIO.StringIO()\n print >> arg_10, \"New particle position: \\n%s\" % (pprint.pformat(arg_9,\n indent=4))\n print >> arg_10, \"Particle variables:\"\n for (arg_7, arg_8) in arg_0.permuteVars.iteritems():\n print >> arg_10, \" %s: %s\" % (arg_7, str(arg_8))\n arg_0.logger.debug(arg_10.getvalue())\n arg_10.close()\n\n return arg_9"} +{"_id": "doc_1852", "title": "", "text": "def Func(arg_0):\n \"\"\" Get the logger for this object.\n\n :returns: (Logger) A Logger object.\n \"\"\"\n if arg_0.__logger is None:\n arg_0.__logger = opf_utils.initLogger(arg_0)\n return arg_0.__logger"} +{"_id": "doc_1853", "title": "", "text": "def Func(arg_0, arg_1=arg_2.ERROR):\n \"\"\" Create a new model instance, given a description dictionary.\n\n :param modelConfig: (dict)\n A dictionary describing the current model,\n `described here <../../quick-start/example-model-params.html>`_.\n\n :param logLevel: (int) The level of logging output that should be generated\n\n :raises Exception: Unsupported model type\n\n :returns: :class:`nupic.frameworks.opf.model.Model`\n \"\"\"\n arg_4 = ModelFactory.__getLogger()\n arg_4.setLevel(arg_1)\n arg_4.debug(\"ModelFactory returning Model from dict: %s\", arg_0)\n\n arg_5 = None\n if arg_0['model'] == \"HTMPrediction\":\n arg_5 = HTMPredictionModel\n elif arg_0['model'] == \"TwoGram\":\n arg_5 = TwoGramModel\n elif arg_0['model'] == \"PreviousValue\":\n arg_5 = PreviousValueModel\n else:\n raise Exception(\"ModelFactory received unsupported Model type: %s\" % \\\n arg_0['model'])\n\n return arg_5(**arg_0['modelParams'])"} +{"_id": "doc_1854", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Perform one time step of the Temporal Memory algorithm.\n\n This method calls :meth:`activateCells`, then calls \n :meth:`activateDendrites`. Using :class:`TemporalMemory` via its \n :meth:`Func` method ensures that you'll always be able to call \n :meth:`getPredictiveCells` to get predictions for the next time step.\n\n :param activeColumns: (iter) Indices of active columns.\n\n :param learn: (bool) Whether or not learning is enabled.\n \"\"\"\n arg_0.activateCells(sorted(arg_1), arg_2)\n arg_0.activateDendrites(arg_2)"} +{"_id": "doc_1855", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Calculate the active cells, using the current active columns and dendrite\n segments. Grow and reinforce synapses.\n\n :param activeColumns: (iter) A sorted list of active column indices.\n\n :param learn: (bool) If true, reinforce / punish / grow synapses.\n\n **Pseudocode:**\n \n ::\n\n for each column\n if column is active and has active distal dendrite segments\n call activatePredictedColumn\n if column is active and doesn't have active distal dendrite segments\n call burstColumn\n if column is inactive and has matching distal dendrite segments\n call punishPredictedColumn\n \"\"\"\n arg_3 = arg_0.activeCells\n arg_4 = arg_0.winnerCells\n arg_0.activeCells = []\n arg_0.winnerCells = []\n\n arg_7 = lambda segment: int(segment.cell / arg_0.cellsPerColumn)\n arg_8 = lambda x: x\n\n for arg_9 in groupby2(arg_1, arg_8,\n arg_0.activeSegments, arg_7,\n arg_0.matchingSegments, arg_7):\n (arg_10,\n arg_1,\n arg_11,\n arg_12) = arg_9\n if arg_1 is not None:\n if arg_11 is not None:\n arg_13 = arg_0.activatePredictedColumn(arg_10,\n arg_11,\n arg_12,\n arg_3,\n arg_4,\n arg_2)\n\n arg_0.activeCells += arg_13\n arg_0.winnerCells += arg_13\n else:\n (arg_13,\n arg_14) = arg_0.burstColumn(arg_10,\n arg_12,\n arg_3,\n arg_4,\n arg_2)\n\n arg_0.activeCells += arg_13\n arg_0.winnerCells.append(arg_14)\n else:\n if arg_2:\n arg_0.punishPredictedColumn(arg_10,\n arg_11,\n arg_12,\n arg_3,\n arg_4)"} +{"_id": "doc_1856", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4,\n arg_5, arg_6):\n \"\"\"\n Determines which cells in a predicted column should be added to winner cells\n list, and learns on the segments that correctly predicted this column.\n\n :param column: (int) Index of bursting column.\n\n :param columnActiveSegments: (iter) Active segments in this column.\n\n :param columnMatchingSegments: (iter) Matching segments in this column.\n\n :param prevActiveCells: (list) Active cells in ``t-1``.\n\n :param prevWinnerCells: (list) Winner cells in ``t-1``.\n\n :param learn: (bool) If true, grow and reinforce synapses.\n\n :returns: (list) A list of predicted cells that will be added to \n active cells and winner cells.\n \"\"\"\n return arg_0._Func(\n arg_0.connections, arg_0._random,\n arg_2, arg_4, arg_5,\n arg_0.numActivePotentialSynapsesForSegment,\n arg_0.maxNewSynapseCount, arg_0.initialPermanence,\n arg_0.permanenceIncrement, arg_0.permanenceDecrement,\n arg_0.maxSynapsesPerSegment, arg_6)"} +{"_id": "doc_1857", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4,\n arg_5):\n \"\"\"\n Punishes the Segments that incorrectly predicted a column to be active.\n\n :param column: (int) Index of bursting column.\n\n :param columnActiveSegments: (iter) Active segments for this column, or None \n if there aren't any.\n\n :param columnMatchingSegments: (iter) Matching segments for this column, or \n None if there aren't any.\n\n :param prevActiveCells: (list) Active cells in ``t-1``.\n\n :param prevWinnerCells: (list) Winner cells in ``t-1``.\n\n \"\"\"\n arg_0._Func(\n arg_0.connections, arg_3, arg_4,\n arg_0.predictedSegmentDecrement)"} +{"_id": "doc_1858", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"\n Create a segment on the connections, enforcing the maxSegmentsPerCell\n parameter.\n \"\"\"\n # Enforce maxSegmentsPerCell.\n while arg_1.numSegments(arg_3) >= arg_5:\n arg_6 = min(\n arg_1.segmentsForCell(arg_3),\n key=lambda arg_7 : arg_2[arg_7.flatIdx])\n\n arg_1.destroySegment(arg_6)\n\n # Create the segment.\n arg_7 = arg_1.createSegment(arg_3)\n\n # Do TM-specific bookkeeping for the segment.\n if arg_7.flatIdx == len(arg_2):\n arg_2.append(arg_4)\n elif arg_7.flatIdx < len(arg_2):\n # A flatIdx was recycled.\n arg_2[arg_7.flatIdx] = arg_4\n else:\n raise AssertionError(\n \"All segments should be created with the TM createSegment method.\")\n\n return arg_7"} +{"_id": "doc_1859", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7):\n \"\"\"\n Creates nDesiredNewSynapes synapses on the segment passed in if\n possible, choosing random cells from the previous winner cells that are\n not already on the segment.\n\n :param connections: (Object) Connections instance for the tm\n :param random: (Object) TM object used to generate random\n numbers\n :param segment: (int) Segment to grow synapses on.\n :param nDesiredNewSynapes: (int) Desired number of synapses to grow\n :param prevWinnerCells: (list) Winner cells in `t-1`\n :param initialPermanence: (float) Initial permanence of a new synapse.\n\n \"\"\"\n arg_8 = list(arg_5)\n\n for arg_9 in arg_1.synapsesForSegment(arg_3):\n arg_10 = binSearch(arg_8, arg_9.presynapticCell)\n if arg_10 != -1:\n del arg_8[arg_10]\n\n arg_11 = min(arg_4, len(arg_8))\n\n # Check if we're going to surpass the maximum number of synapses.\n arg_12 = arg_1.numSynapses(arg_3) + arg_11 - arg_7\n if arg_12 > 0:\n arg_0._destroyMinPermanenceSynapses(arg_1, arg_2, arg_3, arg_12,\n arg_5)\n\n # Recalculate in case we weren't able to destroy as many synapses as needed.\n arg_11 = min(arg_11,\n arg_7 - arg_1.numSynapses(arg_3))\n\n for arg_13 in range(arg_11):\n arg_10 = arg_2.getUInt32(len(arg_8))\n arg_1.createSynapse(arg_3, arg_8[arg_10], arg_6)\n del arg_8[arg_10]"} +{"_id": "doc_1860", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"\n Updates synapses on segment.\n Strengthens active synapses; weakens inactive synapses.\n\n :param connections: (Object) Connections instance for the tm\n :param segment: (int) Segment to adapt\n :param prevActiveCells: (list) Active cells in `t-1`\n :param permanenceIncrement: (float) Amount to increment active synapses\n :param permanenceDecrement: (float) Amount to decrement inactive synapses\n \"\"\"\n\n # Destroying a synapse modifies the set that we're iterating through.\n arg_6 = []\n\n for arg_7 in arg_1.synapsesForSegment(arg_2):\n arg_8 = arg_7.permanence\n\n if binSearch(arg_3, arg_7.presynapticCell) != -1:\n arg_8 += arg_4\n else:\n arg_8 -= arg_5\n\n # Keep permanence within min/max bounds\n arg_8 = max(0.0, min(1.0, arg_8))\n\n if arg_8 < EPSILON:\n arg_6.append(arg_7)\n else:\n arg_1.updateSynapsePermanence(arg_7, arg_8)\n\n for arg_7 in arg_6:\n arg_1.destroySynapse(arg_7)\n\n if arg_1.numSynapses(arg_2) == 0:\n arg_1.destroySegment(arg_2)"} +{"_id": "doc_1861", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the index of the column that a cell belongs to.\n\n :param cell: (int) Cell index\n\n :returns: (int) Column index\n \"\"\"\n arg_0._validateCell(arg_1)\n\n return int(arg_1 / arg_0.cellsPerColumn)"} +{"_id": "doc_1862", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reads deserialized data from proto object.\n\n :param proto: (DynamicStructBuilder) Proto object\n\n :returns: (:class:TemporalMemory) TemporalMemory instance\n \"\"\"\n arg_2 = object.__new__(arg_0)\n\n # capnp fails to save a tuple, so proto.columnDimensions was forced to\n # serialize as a list. We prefer a tuple, however, because columnDimensions\n # should be regarded as immutable.\n arg_2.columnDimensions = tuple(arg_1.columnDimensions)\n arg_2.cellsPerColumn = int(arg_1.cellsPerColumn)\n arg_2.activationThreshold = int(arg_1.activationThreshold)\n arg_2.initialPermanence = round(arg_1.initialPermanence, EPSILON_ROUND)\n arg_2.connectedPermanence = round(arg_1.connectedPermanence, EPSILON_ROUND)\n arg_2.minThreshold = int(arg_1.minThreshold)\n arg_2.maxNewSynapseCount = int(arg_1.maxNewSynapseCount)\n arg_2.permanenceIncrement = round(arg_1.permanenceIncrement, EPSILON_ROUND)\n arg_2.permanenceDecrement = round(arg_1.permanenceDecrement, EPSILON_ROUND)\n arg_2.predictedSegmentDecrement = round(arg_1.predictedSegmentDecrement,\n EPSILON_ROUND)\n\n arg_2.maxSegmentsPerCell = int(arg_1.maxSegmentsPerCell)\n arg_2.maxSynapsesPerSegment = int(arg_1.maxSynapsesPerSegment)\n\n arg_2.connections = Connections.Func(arg_1.connections)\n #pylint: disable=W0212\n arg_2._random = Random()\n arg_2._random.Func(arg_1.random)\n #pylint: enable=W0212\n\n arg_2.activeCells = [int(x) for x in arg_1.activeCells]\n arg_2.winnerCells = [int(x) for x in arg_1.winnerCells]\n\n arg_19 = arg_2.connections.segmentFlatListLength()\n arg_2.numActiveConnectedSynapsesForSegment = [0] * arg_19\n arg_2.numActivePotentialSynapsesForSegment = [0] * arg_19\n arg_2.lastUsedIterationForSegment = [0] * arg_19\n\n arg_2.activeSegments = []\n arg_2.matchingSegments = []\n\n for arg_25 in arg_1.activeSegments:\n arg_2.activeSegments.append(\n arg_2.connections.getSegment(arg_25.cell,\n arg_25.idxOnCell))\n\n for arg_25 in arg_1.matchingSegments:\n arg_2.matchingSegments.append(\n arg_2.connections.getSegment(arg_25.cell,\n arg_25.idxOnCell))\n\n for arg_25 in arg_1.numActivePotentialSynapsesForSegment:\n arg_26 = arg_2.connections.getSegment(arg_25.cell,\n arg_25.idxOnCell)\n\n arg_2.numActivePotentialSynapsesForSegment[arg_26.flatIdx] = (\n int(arg_25.number))\n\n arg_2.iteration = long(arg_1.iteration)\n\n for arg_25 in arg_1.lastUsedIterationForSegment:\n arg_26 = arg_2.connections.getSegment(arg_25.cell,\n arg_25.idxOnCell)\n\n arg_2.lastUsedIterationForSegment[arg_26.flatIdx] = (\n long(arg_25.number))\n\n return arg_2"} +{"_id": "doc_1863", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generate a sequence from a list of numbers.\n\n Note: Any `None` in the list of numbers is considered a reset.\n\n @param numbers (list) List of numbers\n\n @return (list) Generated sequence\n \"\"\"\n arg_2 = []\n\n for arg_3 in arg_1:\n if arg_3 == None:\n arg_2.append(arg_3)\n else:\n arg_4 = arg_0.patternMachine.get(arg_3)\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_1864", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add spatial noise to each pattern in the sequence.\n\n @param sequence (list) Sequence\n @param amount (float) Amount of spatial noise\n\n @return (list) Sequence with spatial noise\n \"\"\"\n arg_3 = []\n\n for arg_4 in arg_1:\n if arg_4 is not None:\n arg_4 = arg_0.patternMachine.addNoise(arg_4, arg_2)\n arg_3.append(arg_4)\n\n return arg_3"} +{"_id": "doc_1865", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n Pretty print a sequence.\n\n @param sequence (list) Sequence\n @param verbosity (int) Verbosity level\n\n @return (string) Pretty-printed text\n \"\"\"\n arg_3 = \"\"\n\n for arg_4 in xrange(len(arg_1)):\n arg_5 = arg_1[arg_4]\n\n if arg_5 == None:\n arg_3 += \"\"\n if arg_4 < len(arg_1) - 1:\n arg_3 += \"\\n\"\n else:\n arg_3 += arg_0.patternMachine.prettyPrintPattern(arg_5,\n arg_2=arg_2)\n\n return arg_3"} +{"_id": "doc_1866", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns pretty-printed table of traces.\n\n @param traces (list) Traces to print in table\n @param breakOnResets (BoolsTrace) Trace of resets to break table on\n\n @return (string) Pretty-printed table of traces.\n \"\"\"\n assert len(arg_0) > 0, \"No traces found\"\n arg_2 = PrettyTable([\"#\"] + [arg_4.prettyPrintTitle() for arg_4 in arg_0])\n\n for arg_3 in xrange(len(arg_0[0].data)):\n if arg_1 and arg_1.data[arg_3]:\n arg_2.add_row([\"\"] * (len(arg_0) + 1))\n arg_2.add_row([arg_3] +\n [arg_4.prettyPrintDatum(arg_4.data[arg_3]) for arg_4 in arg_0])\n\n return arg_2.get_string().encode(\"utf-8\")"} +{"_id": "doc_1867", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=0):\n \"\"\"\n Compute updated probabilities for anomalyScores using the given params.\n\n :param anomalyScores: a list of records. Each record is a list with the\n following three elements: [timestamp, value, score]\n\n Example::\n\n [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]\n\n :param params: the JSON dict returned by estimateAnomalyLikelihoods\n :param verbosity: integer controlling extent of printouts for debugging\n :type verbosity: int\n\n :returns: 3-tuple consisting of:\n\n - likelihoods\n\n numpy array of likelihoods, one for each aggregated point\n\n - avgRecordList\n\n list of averaged input records\n\n - params\n\n an updated JSON object containing the state of this metric.\n\n \"\"\"\n if arg_2 > 3:\n print(\"In Func.\")\n print(\"Number of anomaly scores:\", len(arg_0))\n print(\"First 20:\", arg_0[0:min(20, len(arg_0))])\n print(\"Params:\", arg_1)\n\n if len(arg_0) == 0:\n raise ValueError(\"Must have at least one anomalyScore\")\n\n if not isValidEstimatorParams(arg_1):\n raise ValueError(\"'params' is not a valid params structure\")\n\n # For backward compatibility.\n if \"historicalLikelihoods\" not in arg_1:\n arg_1[\"historicalLikelihoods\"] = [1.0]\n\n # Compute moving averages of these new scores using the previous values\n # as well as likelihood for these scores using the old estimator\n arg_3 = arg_1[\"movingAverage\"][\"historicalValues\"]\n arg_4 = arg_1[\"movingAverage\"][\"total\"]\n arg_5 = arg_1[\"movingAverage\"][\"windowSize\"]\n\n arg_6 = numpy.zeros(len(arg_0), dtype=float)\n arg_7 = numpy.zeros(len(arg_0), dtype=float)\n for arg_8, arg_9 in enumerate(arg_0):\n arg_10, arg_3, arg_4 = (\n MovingAverage.compute(arg_3, arg_4, arg_9[2], arg_5)\n )\n arg_6[arg_8] = arg_10\n arg_7[arg_8] = tailProbability(arg_10, arg_1[\"distribution\"])\n\n # Filter the likelihood values. First we prepend the historical likelihoods\n # to the current set. Then we filter the values. We peel off the likelihoods\n # to return and the last windowSize values to store for later.\n arg_11 = arg_1[\"historicalLikelihoods\"] + list(arg_7)\n arg_12 = _filterLikelihoods(arg_11)\n arg_7[:] = arg_12[-len(arg_7):]\n arg_13 = arg_11[-min(arg_5, len(arg_11)):]\n\n # Update the estimator\n arg_14 = {\n \"distribution\": arg_1[\"distribution\"],\n \"movingAverage\": {\n \"historicalValues\": arg_3,\n \"total\": arg_4,\n \"windowSize\": arg_5,\n },\n \"historicalLikelihoods\": arg_13,\n }\n\n assert len(arg_14[\"historicalLikelihoods\"]) <= arg_5\n\n if arg_2 > 3:\n print(\"Number of likelihoods:\", len(arg_7))\n print(\"First 20 likelihoods:\", arg_7[0:min(20, len(arg_7))])\n print(\"Leaving Func.\")\n\n return (arg_7, arg_6, arg_14)"} +{"_id": "doc_1868", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the value of skipRecords for passing to estimateAnomalyLikelihoods\n\n If `windowSize` is very large (bigger than the amount of data) then this\n could just return `learningPeriod`. But when some values have fallen out of\n the historical sliding window of anomaly records, then we have to take those\n into account as well so we return the `learningPeriod` minus the number\n shifted out.\n\n :param numIngested - (int) number of data points that have been added to the\n sliding window of historical data points.\n :param windowSize - (int) size of sliding window of historical data points.\n :param learningPeriod - (int) the number of iterations required for the\n algorithm to learn the basic patterns in the dataset and for the anomaly\n score to 'settle down'.\n \"\"\"\n arg_3 = max(0, arg_0 - arg_1)\n return min(arg_0, max(0, arg_2 - arg_3))"} +{"_id": "doc_1869", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" capnp serialization method for the anomaly likelihood object\n\n :param proto: (Object) capnp proto object specified in\n nupic.regions.anomaly_likelihood.capnp\n \"\"\"\n\n arg_1.iteration = arg_0._iteration\n\n arg_3 = arg_1.init('historicalScores', len(arg_0._historicalScores))\n for arg_4, arg_5 in enumerate(list(arg_0._historicalScores)):\n arg_6, arg_7, arg_8 = arg_5\n arg_9 = arg_3[arg_4]\n arg_9.value = float(arg_7)\n arg_9.anomalyScore = float(arg_8)\n\n if arg_0._distribution:\n arg_1.distribution.name = arg_0._distribution[\"distribution\"][\"name\"]\n arg_1.distribution.mean = float(arg_0._distribution[\"distribution\"][\"mean\"])\n arg_1.distribution.variance = float(arg_0._distribution[\"distribution\"][\"variance\"])\n arg_1.distribution.stdev = float(arg_0._distribution[\"distribution\"][\"stdev\"])\n\n arg_1.distribution.movingAverage.windowSize = float(arg_0._distribution[\"movingAverage\"][\"windowSize\"])\n\n arg_17 = arg_0._distribution[\"movingAverage\"][\"historicalValues\"]\n arg_18 = arg_1.distribution.movingAverage.init(\n \"historicalValues\", len(arg_17))\n for arg_4, arg_7 in enumerate(arg_17):\n arg_18[arg_4] = float(arg_7)\n\n #proto.distribution.movingAverage.historicalValues = self._distribution[\"movingAverage\"][\"historicalValues\"]\n arg_1.distribution.movingAverage.total = float(arg_0._distribution[\"movingAverage\"][\"total\"])\n\n arg_20 = arg_0._distribution[\"historicalLikelihoods\"]\n arg_21 = arg_1.distribution.init(\"historicalLikelihoods\",\n len(arg_20))\n for arg_4, arg_22 in enumerate(arg_20):\n arg_21[arg_4] = float(arg_22)\n\n arg_1.probationaryPeriod = arg_0._probationaryPeriod\n arg_1.learningPeriod = arg_0._learningPeriod\n arg_1.reestimationPeriod = arg_0._reestimationPeriod\n arg_1.historicWindowSize = arg_0._historicalScores.maxlen"} +{"_id": "doc_1870", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Replaces the Iteration Cycle phases\n\n :param phaseSpecs: Iteration cycle description consisting of a sequence of\n IterationPhaseSpecXXXXX elements that are performed in the\n given order\n \"\"\"\n\n # -----------------------------------------------------------------------\n # Replace our phase manager\n #\n arg_0.__phaseManager = _PhaseManager(\n model=arg_0.__model,\n arg_1=arg_1)\n\n return"} +{"_id": "doc_1871", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Processes the given record according to the current iteration cycle phase\n\n :param inputRecord: (object) record expected to be returned from\n :meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.\n\n :returns: :class:`nupic.frameworks.opf.opf_utils.ModelResult`\n \"\"\"\n assert arg_1, \"Invalid inputRecord: %r\" % arg_1\n\n arg_2 = arg_0.__phaseManager.Func(arg_1)\n arg_3 = arg_0.__metricsMgr.update(arg_2)\n\n # Execute task-postIter callbacks\n for arg_4 in arg_0.__userCallbacks['postIter']:\n arg_4(arg_0.__model)\n\n arg_2.metrics = arg_3\n\n # Return the input and predictions for this record\n return arg_2"} +{"_id": "doc_1872", "title": "", "text": "def Func(arg_0):\n \"\"\" Advances the iteration;\n\n Returns: True if more iterations remain; False if this is the final\n iteration.\n \"\"\"\n arg_1 = True\n try:\n arg_0.__iter.next()\n except StopIteration:\n arg_0.__iter = None\n arg_1 = False\n\n return arg_1"} +{"_id": "doc_1873", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Serialize via capnp\n\n :param proto: capnp PreviousValueModelProto message builder\n \"\"\"\n super(PreviousValueModel, arg_0).FuncBaseToProto(arg_1.modelBase)\n\n arg_1.fieldNames = arg_0._fieldNames\n arg_1.fieldTypes = arg_0._fieldTypes\n if arg_0._predictedField:\n arg_1.predictedField = arg_0._predictedField\n arg_1.predictionSteps = arg_0._predictionSteps"} +{"_id": "doc_1874", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Accepts log-values as input, exponentiates them, computes the sum,\n then converts the sum back to log-space and returns the result.\n Handles underflow by rescaling so that the largest values is exactly 1.0.\n \"\"\"\n arg_0 = numpy.asarray(arg_0)\n arg_2 = arg_0.max()\n\n # If the input is the log of 0's, catch this condition before we generate\n # an exception, and return the log(0)\n if numpy.isinf(arg_2):\n return arg_2\n\n # If the user specified an epsilon and we are below it, return epsilon\n if (arg_1 is not None) and (arg_2 < arg_1):\n return arg_1\n\n arg_3 = numpy.exp(arg_0 - arg_2)\n arg_4 = arg_3.sum()\n\n arg_5 = numpy.log(arg_4) + arg_2\n # try:\n # conventional = numpy.log(numpy.exp(lx).sum())\n # if not similar(result, conventional):\n # if numpy.isinf(conventional).any() and not numpy.isinf(result).any():\n # # print \"Scaled log sum avoided underflow or overflow.\"\n # pass\n # else:\n # import sys\n # print >>sys.stderr, \"Warning: scaled log sum did not match.\"\n # print >>sys.stderr, \"Scaled log result:\"\n # print >>sys.stderr, result\n # print >>sys.stderr, \"Conventional result:\"\n # print >>sys.stderr, conventional\n # except FloatingPointError, e:\n # # print \"Scaled log sum avoided underflow or overflow.\"\n # pass\n\n return arg_5"} +{"_id": "doc_1875", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Accepts log-values as input, exponentiates them,\n Funcs and returns the result.\n Handles underflow by rescaling so that the largest values is exactly 1.0.\n \"\"\"\n arg_0 = numpy.asarray(arg_0)\n arg_1 = arg_0.max()\n arg_2 = numpy.exp(arg_0 - arg_1)\n arg_3 = arg_2 / arg_2.sum()\n\n arg_4 = (numpy.exp(arg_0) / numpy.exp(arg_0).sum())\n assert similar(arg_3, arg_4)\n\n return arg_3"} +{"_id": "doc_1876", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Log 'msg % args' with severity 'DEBUG'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n\n logger.Func(\"Houston, we have a %s\", \"thorny problem\", exc_info=1)\n \"\"\"\n arg_0._baseLogger.Func(arg_0, arg_0.getExtendedMsg(arg_1), *arg_2, **arg_3)"} +{"_id": "doc_1877", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Log 'msg % args' with severity 'INFO'.\n\n To pass exception Funcrmation, use the keyword argument exc_Func with\n a true value, e.g.\n\n logger.Func(\"Houston, we have a %s\", \"interesting problem\", exc_Func=1)\n \"\"\"\n arg_0._baseLogger.Func(arg_0, arg_0.getExtendedMsg(arg_1), *arg_2, **arg_3)"} +{"_id": "doc_1878", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Log 'msg % args' with severity 'ERROR'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n\n logger.Func(\"Houston, we have a %s\", \"major problem\", exc_info=1)\n \"\"\"\n arg_0._baseLogger.Func(arg_0, arg_0.getExtendedMsg(arg_1), *arg_2, **arg_3)"} +{"_id": "doc_1879", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Log 'msg % args' with severity 'CRITICAL'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n\n logger.Func(\"Houston, we have a %s\", \"major disaster\", exc_info=1)\n \"\"\"\n arg_0._baseLogger.Func(arg_0, arg_0.getExtendedMsg(arg_1), *arg_2, **arg_3)"} +{"_id": "doc_1880", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"\n Log 'msg % args' with the integer severity 'level'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n\n Funcger.Func(level, \"We have a %s\", \"mysterious problem\", exc_info=1)\n \"\"\"\n arg_0._baseLogger.Func(arg_0, arg_1, arg_0.getExtendedMsg(arg_2), *arg_3,\n **arg_4)"} +{"_id": "doc_1881", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns sum of the elements in the list. Missing items are replaced with\n the mean value\n \"\"\"\n arg_1 = _aggr_mean(arg_0)\n if arg_1 == None:\n return None\n\n arg_2 = 0\n for arg_3 in arg_0:\n if arg_3 != SENTINEL_VALUE_FOR_MISSING_DATA:\n arg_2 += arg_3\n else:\n arg_2 += arg_1\n\n return arg_2"} +{"_id": "doc_1882", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns mean of non-None elements of the list\n \"\"\"\n arg_1 = 0\n arg_2 = 0\n for arg_3 in arg_0:\n if arg_3 != SENTINEL_VALUE_FOR_MISSING_DATA:\n arg_1 += arg_3\n arg_2 += 1\n if arg_2 != 0:\n return arg_1 / arg_2\n else:\n return None"} +{"_id": "doc_1883", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns most common value seen in the non-None elements of the list\n \"\"\"\n\n arg_1 = dict()\n arg_2 = 0\n\n for arg_3 in arg_0:\n if arg_3 == SENTINEL_VALUE_FOR_MISSING_DATA:\n continue\n\n arg_2 += 1\n if arg_3 in arg_1:\n arg_1[arg_3] += 1\n else:\n arg_1[arg_3] = 1\n\n # Get the most common one\n if arg_2 == 0:\n return None\n\n # Sort by counts\n arg_4 = arg_1.items()\n arg_4.sort(cmp=lambda x,y: x[1] - y[1], reverse=True)\n return arg_4[0][0]"} +{"_id": "doc_1884", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Generate a dataset of aggregated values\n\n Parameters:\n ----------------------------------------------------------------------------\n aggregationInfo: a dictionary that contains the following entries\n - fields: a list of pairs. Each pair is a field name and an\n aggregation function (e.g. sum). The function will be used to aggregate\n multiple values during the aggregation period.\n\n aggregation period: 0 or more of unit=value fields; allowed units are:\n [years months] |\n [weeks days hours minutes seconds milliseconds microseconds]\n NOTE: years and months are mutually-exclusive with the other units.\n See getEndTime() and _aggregate() for more details.\n Example1: years=1, months=6,\n Example2: hours=1, minutes=30,\n If none of the period fields are specified or if all that are specified\n have values of 0, then aggregation will be suppressed, and the given\n inputFile parameter value will be returned.\n\n inputFilename: filename of the input dataset within examples/prediction/data\n\n outputFilename: name for the output file. If not given, a name will be\n generated based on the input filename and the aggregation params\n\n retval: Name of the generated output file. This will be the same as the input\n file name if no aggregation needed to be performed\n\n\n\n If the input file contained a time field, sequence id field or reset field\n that were not specified in aggregationInfo fields, those fields will be\n added automatically with the following rules:\n\n 1. The order will be R, S, T, rest of the fields\n 2. The aggregation function for all will be to pick the first: lambda x: x[0]\n\n Returns: the path of the aggregated data file if aggregation was performed\n (in the same directory as the given input file); if aggregation did not\n need to be performed, then the given inputFile argument value is returned.\n \"\"\"\n\n\n\n # Create the input stream\n arg_3 = resource_filename(\"nupic.datafiles\", arg_1)\n arg_4 = FileRecordStream(arg_3)\n\n\n # Instantiate the aggregator\n arg_5 = Aggregator(arg_0=arg_0,\n inputFields=arg_4.getFields())\n\n\n # Is it a null aggregation? If so, just return the input file unmodified\n if arg_5.isNullAggregation():\n return arg_3\n\n\n # ------------------------------------------------------------------------\n # If we were not given an output filename, create one based on the\n # aggregation settings\n if arg_2 is None:\n arg_2 = 'agg_%s' % \\\n os.path.splitext(os.path.basename(arg_3))[0]\n arg_6 = 'years months weeks days '\\\n 'hours minutes seconds milliseconds microseconds'\n for arg_7 in arg_6.split():\n if arg_0.get(arg_7, 0) > 0:\n arg_2 += '_%s_%d' % (arg_7, arg_0[arg_7])\n\n arg_2 += '.csv'\n arg_2 = os.path.join(os.path.dirname(arg_3), arg_2)\n\n\n\n # ------------------------------------------------------------------------\n # If some other process already started creating this file, simply\n # wait for it to finish and return without doing anything\n arg_8 = arg_2 + '.please_wait'\n if os.path.isfile(arg_2) or \\\n os.path.isfile(arg_8):\n while os.path.isfile(arg_8):\n print 'Waiting for %s to be fully written by another process' % \\\n arg_8\n time.sleep(1)\n return arg_2\n\n\n # Create the lock file\n arg_9 = open(arg_8, 'w')\n\n\n\n # -------------------------------------------------------------------------\n # Create the output stream\n arg_10 = FileRecordStream(streamID=arg_2, write=True,\n fields=arg_4.getFields())\n\n\n # -------------------------------------------------------------------------\n # Write all aggregated records to the output\n while True:\n arg_11 = arg_4.getNextRecord()\n\n (arg_12, arg_13) = arg_5.next(arg_11, None)\n\n if arg_12 is None and arg_11 is None:\n break\n\n if arg_12 is not None:\n arg_10.appendRecord(arg_12)\n\n return arg_2"} +{"_id": "doc_1885", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate the filename for aggregated dataset\n\n The filename is based on the input filename and the\n aggregation period.\n\n Returns the inputFile if no aggregation required (aggregation\n info has all 0's)\n \"\"\"\n\n # Find the actual file, with an absolute path\n arg_1 = resource_filename(\"nupic.datafiles\", arg_1)\n\n arg_2 = defaultdict(lambda: 0, arg_0)\n arg_3 = os.path.dirname(arg_1)\n arg_4 = 'agg_%s' % os.path.splitext(os.path.basename(arg_1))[0]\n arg_5 = True\n arg_6 = 'years months weeks days '\\\n 'hours minutes seconds milliseconds microseconds'\n for arg_7 in arg_6.split():\n if arg_2[arg_7] > 0:\n arg_5 = False\n arg_4 += '_%s_%d' % (arg_7, arg_2[arg_7])\n\n if arg_5:\n return arg_1\n arg_4 += '.csv'\n arg_4 = os.path.join(arg_3, arg_4)\n\n return arg_4"} +{"_id": "doc_1886", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add the aggregation period to the input time t and return a datetime object\n\n Years and months are handled as aspecial case due to leap years\n and months with different number of dates. They can't be converted\n to a strict timedelta because a period of 3 months will have different\n durations actually. The solution is to just add the years and months\n fields directly to the current time.\n\n Other periods are converted to timedelta and just added to current time.\n \"\"\"\n\n assert isinstance(arg_1, datetime.datetime)\n if arg_0._aggTimeDelta:\n return arg_1 + arg_0._aggTimeDelta\n else:\n arg_2 = arg_1.year + arg_0._aggYears + (arg_1.month - 1 + arg_0._aggMonths) / 12\n arg_3 = (arg_1.month - 1 + arg_0._aggMonths) % 12 + 1\n return arg_1.replace(arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_1887", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Given the name of an aggregation function, returns the function pointer\n and param.\n\n Parameters:\n ------------------------------------------------------------------------\n funcName: a string (name of function) or funcPtr\n retval: (funcPtr, param)\n \"\"\"\n\n arg_2 = None\n if isinstance(arg_1, basestring):\n if arg_1 == 'sum':\n arg_3 = _aggr_sum\n elif arg_1 == 'first':\n arg_3 = _aggr_first\n elif arg_1 == 'last':\n arg_3 = _aggr_last\n elif arg_1 == 'mean':\n arg_3 = _aggr_mean\n elif arg_1 == 'max':\n arg_3 = max\n elif arg_1 == 'min':\n arg_3 = min\n elif arg_1 == 'mode':\n arg_3 = _aggr_mode\n elif arg_1.startswith('wmean:'):\n arg_3 = _aggr_weighted_mean\n arg_4 = arg_1[6:]\n arg_2 = [f[0] for f in arg_0._inputFields].index(arg_4)\n else:\n arg_3 = arg_1\n\n return (arg_3, arg_2)"} +{"_id": "doc_1888", "title": "", "text": "def Func(arg_0):\n \"\"\" Generate the aggregated output record\n\n Parameters:\n ------------------------------------------------------------------------\n retval: outputRecord\n\n \"\"\"\n\n arg_1 = []\n\n for arg_2, (arg_3, arg_4, arg_5) in enumerate(arg_0._fields):\n if arg_4 is None: # this field is not supposed to be aggregated.\n continue\n\n arg_6 = arg_0._slice[arg_2]\n arg_7 = None\n if arg_5 is not None:\n arg_1.append(arg_4(arg_6, arg_0._slice[arg_5]))\n else:\n arg_1.append(arg_4(arg_6))\n\n return arg_1"} +{"_id": "doc_1889", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Run one iteration of this model.\n\n :param inputRecord: (object)\n A record object formatted according to\n :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or\n :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`\n result format.\n :returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)\n An ModelResult namedtuple. The contents of ModelResult.inferences\n depends on the the specific inference type of this model, which\n can be queried by :meth:`.getInferenceType`.\n \"\"\"\n # 0-based prediction index for ModelResult\n arg_2 = arg_0._numPredictions\n arg_0._numPredictions += 1\n arg_3 = opf_utils.ModelResult(arg_2=arg_2,\n rawInput=arg_1)\n return arg_3"} +{"_id": "doc_1890", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the absolute path of the model's checkpoint file.\n\n :param checkpointDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (string) An absolute path.\n \"\"\"\n arg_1 = os.path.join(arg_0, \"model.data\")\n arg_1 = os.path.abspath(arg_1)\n return arg_1"} +{"_id": "doc_1891", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Serializes model using capnproto and writes data to ``checkpointDir``\"\"\"\n arg_2 = arg_0.getSchema().new_message()\n\n arg_0.write(arg_2)\n\n arg_3 = arg_0._getModelCheckpointFilePath(arg_1)\n\n # Clean up old saved state, if any\n if os.path.exists(arg_1):\n if not os.path.isdir(arg_1):\n raise Exception((\"Existing filesystem entry <%s> is not a model\"\n \" checkpoint -- refusing to delete (not a directory)\") \\\n % arg_1)\n if not os.path.isfile(arg_3):\n raise Exception((\"Existing filesystem entry <%s> is not a model\"\n \" checkpoint -- refusing to delete\"\\\n \" (%s missing or not a file)\") % \\\n (arg_1, arg_3))\n\n shutil.rmtree(arg_1)\n\n # Create a new directory for saving state\n arg_0.__makeDirectoryFromAbsolutePath(arg_1)\n\n with open(arg_3, 'wb') as f:\n arg_2.write(f)"} +{"_id": "doc_1892", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Deserializes model from checkpointDir using capnproto\"\"\"\n arg_2 = arg_0._getModelCheckpointFilePath(arg_1)\n\n with open(arg_2, 'r') as f:\n arg_3 = arg_0.getSchema().read(f,\n traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS)\n\n arg_4 = arg_0.read(arg_3)\n return arg_4"} +{"_id": "doc_1893", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Save the state maintained by the Model base class\n\n :param proto: capnp ModelProto message builder\n \"\"\"\n arg_2 = arg_0.getInferenceType()\n # lower-case first letter to be compatible with capnproto enum naming\n arg_2 = arg_2[:1].lower() + arg_2[1:]\n arg_1.inferenceType = arg_2\n\n arg_1.numPredictions = arg_0._numPredictions\n\n arg_1.learningEnabled = arg_0.__learningEnabled\n arg_1.inferenceEnabled = arg_0.__inferenceEnabled\n arg_1.inferenceArgs = json.dumps(arg_0.__inferenceArgs)"} +{"_id": "doc_1894", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the absolute path of the model's pickle file.\n\n :param saveModelDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (string) An absolute path.\n \"\"\"\n arg_1 = os.path.join(arg_0, \"model.pkl\")\n arg_1 = os.path.abspath(arg_1)\n return arg_1"} +{"_id": "doc_1895", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Used as optparse callback for reaping a variable number of option args.\n The option may be specified multiple times, and all the args associated with\n that option name will be accumulated in the order that they are encountered\n \"\"\"\n arg_4 = []\n\n # Reap the args, taking care to stop before the next option or '.'\n arg_5 = False\n for arg_6 in arg_3.rargs:\n # Stop on --longname options\n if arg_6.startswith(\"--\") and len(arg_6) > 2:\n break\n\n # Stop on -b options\n if arg_6.startswith(\"-\") and len(arg_6) > 1:\n break\n\n if arg_6 == \".\":\n arg_5 = True\n break\n\n arg_4.append(arg_6)\n\n if not arg_4:\n raise optparse.OptionValueError(\n (\"Empty arg list for option %r expecting one or more args \"\n \"(remaining tokens: %r)\") % (arg_1, arg_3.rargs))\n\n del arg_3.rargs[:len(arg_4) + int(arg_5)]\n\n # Retrieve the existing arg accumulator, if any\n arg_2 = getattr(arg_3.values, arg_0.dest, [])\n #print \"Previous value: %r\" % value\n if arg_2 is None:\n arg_2 = []\n\n # Append the new args to the existing ones and save to the parser\n arg_2.extend(arg_4)\n setattr(arg_3.values, arg_0.dest, arg_2)"} +{"_id": "doc_1896", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Report usage error and exit program with error indication.\"\"\"\n print arg_0.get_usage()\n print arg_1\n sys.exit(1)"} +{"_id": "doc_1897", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Creates and runs the experiment\n\n Args:\n options: namedtuple ParseCommandLineOptionsResult\n model: For testing: may pass in an existing OPF Model instance\n to use instead of creating a new one.\n\n Returns: reference to OPFExperiment instance that was constructed (this\n is provided to aid with debugging) or None, if none was\n created.\n \"\"\"\n json_helpers.validate(arg_0.privateOptions,\n schemaDict=g_parsedPrivateCommandLineOptionsSchema)\n\n # Load the experiment's description.py module\n arg_2 = arg_0.experimentDir\n arg_3 = helpers.loadExperimentDescriptionScriptFromDir(\n arg_2)\n arg_4 = helpers.getExperimentDescriptionInterfaceFromModule(\n arg_3)\n\n # Handle \"list checkpoints\" request\n if arg_0.privateOptions['listAvailableCheckpoints']:\n _printAvailableCheckpoints(arg_2)\n return None\n\n # Load experiment tasks\n arg_5 = arg_4.getModelControl().get('tasks', [])\n\n # If the tasks list is empty, and this is a nupic environment description\n # file being run from the OPF, convert it to a simple OPF description file.\n if (len(arg_5) == 0 and\n arg_4.getModelControl()['environment'] == OpfEnvironment.Nupic):\n arg_4.convertNupicEnvToOPF()\n arg_5 = arg_4.getModelControl().get('tasks', [])\n\n # Ensures all the source locations are either absolute paths or relative to\n # the nupic.datafiles package_data location.\n arg_4.normalizeStreamSources()\n\n # Extract option\n arg_6 = arg_0.privateOptions['newSerialization']\n\n # Handle listTasks\n if arg_0.privateOptions['listTasks']:\n print \"Available tasks:\"\n\n for arg_7 in [t['taskLabel'] for t in arg_5]:\n print \"\\t\", arg_7\n\n return None\n\n # Construct the experiment instance\n if arg_0.privateOptions['runCheckpointName']:\n\n assert arg_1 is None\n\n arg_8 = arg_0.privateOptions['runCheckpointName']\n\n arg_1 = ModelFactory.loadFromCheckpoint(\n savedModelDir=_getModelCheckpointDir(arg_2, arg_8),\n arg_6=arg_6)\n\n elif arg_1 is not None:\n print \"Skipping creation of OPFExperiment instance: caller provided his own\"\n else:\n arg_9 = arg_4.getModelDescription()\n arg_1 = ModelFactory.create(arg_9)\n\n # Handle \"create model\" request\n if arg_0.privateOptions['createCheckpointName']:\n arg_8 = arg_0.privateOptions['createCheckpointName']\n _saveModel(arg_1=arg_1,\n arg_2=arg_2,\n checkpointLabel=arg_8,\n arg_6=arg_6)\n\n return arg_1\n\n # Build the task list\n\n # Default task execution index list is in the natural list order of the tasks\n arg_10 = range(len(arg_5))\n\n arg_11 = arg_0.privateOptions['taskLabels']\n if arg_11:\n arg_12 = [t['taskLabel'] for t in arg_5]\n arg_13 = set(arg_12)\n\n arg_14 = set(arg_11)\n\n assert arg_14.issubset(arg_13), \\\n (\"Some custom-provided task execution labels don't correspond \"\n \"to actual task labels: mismatched labels: %r; actual task \"\n \"labels: %r.\") % (arg_14 - arg_13,\n arg_11)\n\n arg_10 = [arg_12.index(arg_7) for arg_7 in\n arg_11]\n\n print \"#### Executing custom task list: %r\" % [arg_12[arg_15] for\n arg_15 in arg_10]\n\n # Run all experiment tasks\n for arg_16 in arg_10:\n\n arg_17 = arg_5[arg_16]\n\n # Create a task runner and run it!\n arg_18 = _TaskRunner(arg_1=arg_1,\n arg_17=arg_17,\n cmdOptions=arg_0)\n arg_18.run()\n del arg_18\n\n if arg_0.privateOptions['checkpointModel']:\n _saveModel(arg_1=arg_1,\n arg_2=arg_2,\n checkpointLabel=arg_17['taskLabel'],\n arg_6=arg_6)\n\n return arg_1"} +{"_id": "doc_1898", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates directory for serialization of the model\n\n checkpointLabel:\n Checkpoint label (string)\n\n Returns:\n absolute path to the serialization directory\n \"\"\"\n arg_2 = os.path.join(getCheckpointParentDir(arg_0),\n arg_1 + g_defaultCheckpointExtension)\n arg_2 = os.path.abspath(arg_2)\n\n return arg_2"} +{"_id": "doc_1899", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a checkpoint label string for the given model checkpoint directory\n\n checkpointDir: relative or absolute model checkpoint directory path\n \"\"\"\n assert arg_0.endswith(g_defaultCheckpointExtension)\n\n arg_1 = os.path.split(arg_0)[1]\n\n arg_2 = arg_1[0:-len(g_defaultCheckpointExtension)]\n\n return arg_2"} +{"_id": "doc_1900", "title": "", "text": "def Func(arg_0):\n \"\"\"Return true iff checkpointDir appears to be a checkpoint directory.\"\"\"\n arg_1 = os.path.split(arg_0)[1]\n if arg_1[0] == '.':\n return False\n\n if not arg_0.endswith(g_defaultCheckpointExtension):\n return False\n\n if not os.path.isdir(arg_0):\n return False\n\n return True"} +{"_id": "doc_1901", "title": "", "text": "def Func(arg_0):\n \"\"\"List available checkpoints for the specified experiment.\"\"\"\n arg_1 = getCheckpointParentDir(arg_0)\n\n if not os.path.exists(arg_1):\n print \"No available checkpoints.\"\n return\n\n arg_2 = [x for x in os.listdir(arg_1)\n if _isCheckpointDir(os.path.join(arg_1, x))]\n if not arg_2:\n print \"No available checkpoints.\"\n return\n\n print \"Available checkpoints:\"\n arg_3 = [_checkpointLabelFromCheckpointDir(x)\n for x in arg_2]\n\n for arg_4 in sorted(arg_3):\n print \"\\t\", arg_4\n\n print\n print \"To start from a checkpoint:\"\n print \" python run_opf_experiment.py experiment --load \"\n print \"For example, to start from the checkpoint \\\"MyCheckpoint\\\":\"\n print \" python run_opf_experiment.py experiment --load MyCheckpoint\""} +{"_id": "doc_1902", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates and returns a list of activites for this TaskRunner instance\n\n Returns: a list of PeriodicActivityRequest elements\n \"\"\"\n # Initialize periodic activities\n arg_1 = []\n\n # Metrics reporting\n class MetricsReportCb(object):\n def __init__(arg_0, arg_2):\n arg_0.__taskRunner = arg_2\n return\n\n def __call__(arg_0):\n arg_0.__taskRunner._getAndEmitExperimentMetrics()\n\n arg_4 = PeriodicActivityRequest(\n repeating=True,\n period=1000,\n cb=MetricsReportCb(arg_0))\n\n arg_1.append(arg_4)\n\n # Iteration progress\n class IterationProgressCb(object):\n arg_5 = 1000\n\n def __init__(arg_0, arg_6, arg_7, arg_8):\n arg_0.__taskLabel = arg_6\n arg_0.__requestedIterationCount = arg_7\n arg_0.__logger = arg_8\n\n arg_0.__numIterationsSoFar = 0\n\n def __call__(arg_0):\n arg_0.__numIterationsSoFar += arg_0.PROGRESS_UPDATE_PERIOD_TICKS\n arg_0.__logger.debug(\"%s: ITERATION PROGRESS: %s of %s\" % (\n arg_0.__taskLabel,\n arg_0.__numIterationsSoFar,\n arg_0.__requestedIterationCount))\n\n arg_13 = IterationProgressCb(\n arg_6=arg_0.__task['taskLabel'],\n arg_7=arg_0.__task['iterationCount'],\n arg_8=arg_0.__logger)\n arg_14 = PeriodicActivityRequest(\n repeating=True,\n period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS,\n cb=arg_13)\n\n arg_1.append(arg_14)\n\n return arg_1"} +{"_id": "doc_1903", "title": "", "text": "def Func():\n \"\"\"\n Shows predictions of the TM when presented with the characters A, B, C, D, X, and\n Y without any contextual information, that is, not embedded within a sequence.\n \"\"\" \n for arg_0 in range(6):\n tm.reset()\n print \"--- \" + \"ABCDXY\"[arg_0] + \" ---\"\n tm.compute(set(seqT[arg_0][:].nonzero()[0].tolist()), learn=False)\n arg_1 = [tm.columnForCell(i) for i in tm.getActiveCells()]\n arg_2 = [tm.columnForCell(i) for i in tm.getPredictiveCells()] \n arg_3 = [1 if i in arg_1 else 0 for i in range(tm.numberOfColumns())]\n arg_4 = [1 if i in arg_2 else 0 for i in range(tm.numberOfColumns())]\n print(\"Active cols: \" + str(np.nonzero(arg_3)[0]))\n print(\"Predicted cols: \" + str(np.nonzero(arg_4)[0]))\n print \"\""} +{"_id": "doc_1904", "title": "", "text": "def Func(arg_0=2):\n \"\"\"Utility function to get information about function callers\n\n The information is the tuple (function/method name, filename, class)\n The class will be None if the caller is just a function and not an object\n method.\n\n :param depth: (int) how far back in the callstack to go to extract the caller\n info\n\n \"\"\"\n arg_1 = sys._getframe(arg_0)\n arg_2 = arg_1.f_code.co_name\n arg_3 = arg_1.f_code.co_filename\n\n arg_4 = None\n arg_5 = inspect.getargvalues(arg_1)\n if len(arg_5[0]) > 0:\n arg_6 = arg_5[0][0] # potentially the 'self' arg if its a method\n arg_4 = arg_5[3][arg_6].__class__.__name__\n return (arg_2, arg_3, arg_4)"} +{"_id": "doc_1905", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the arguments, default values, and argument descriptions for a function.\n\n Parses the argument descriptions out of the function docstring, using a\n format something lke this:\n\n ::\n\n [junk]\n argument_name: description...\n description...\n description...\n [junk]\n [more arguments]\n\n It will find an argument as long as the exact argument name starts the line.\n It will then strip a trailing colon, if present, then strip the rest of the\n line and use it to start the description. It will then strip and append any\n subsequent lines with a greater indent level than the original argument name.\n\n :param f: (function) to inspect\n :returns: (list of tuples) (``argName``, ``argDescription``, ``defaultValue``)\n If an argument has no default value, the tuple is only two elements long (as\n ``None`` cannot be used, since it could be a default value itself).\n \"\"\"\n\n # Get the argument names and default values\n arg_1 = inspect.getargspec(arg_0)\n\n # Scan through the docstring to extract documentation for each argument as\n # follows:\n # Check the first word of the line, stripping a colon if one is present.\n # If it matches an argument name:\n # Take the rest of the line, stripping leading whitespeace\n # Take each subsequent line if its indentation level is greater than the\n # initial indentation level\n # Once the indentation level is back to the original level, look for\n # another argument\n arg_2 = arg_0.__doc__\n arg_3 = {}\n if arg_2:\n arg_4 = arg_2.split('\\n')\n arg_5 = 0\n while arg_5 < len(arg_4):\n arg_6 = arg_4[arg_5].lstrip()\n if not arg_6:\n arg_5 += 1\n continue\n # Indentation level is index of the first character\n arg_7 = arg_4[arg_5].index(arg_6[0])\n # Get the first word and remove the colon, if present\n arg_8 = arg_6.split()[0]\n if arg_8.endswith(':'):\n arg_8 = arg_8[:-1]\n if arg_8 in arg_1.args:\n # Found an argument\n arg_9 = arg_8\n arg_10 = arg_6[len(arg_8)+1:].strip()\n arg_11 = [arg_10]\n # Take the next lines as long as they are indented more\n arg_5 += 1\n while arg_5 < len(arg_4):\n arg_6 = arg_4[arg_5].lstrip()\n if not arg_6:\n # Empty line - stop\n break\n if arg_4[arg_5].index(arg_6[0]) <= arg_7:\n # No longer indented far enough - stop\n break\n # This line counts too\n arg_11.append(arg_4[arg_5].strip())\n arg_5 += 1\n # Store this description\n arg_3[arg_9] = ' '.join(arg_11)\n else:\n # Not an argument\n arg_5 += 1\n\n # Build the list of (argName, description, defaultValue)\n arg_12 = []\n if arg_1.defaults:\n arg_13 = len(arg_1.defaults)\n else:\n arg_13 = 0\n arg_14 = len(arg_1.args) - arg_13\n for arg_5, arg_9 in enumerate(arg_1.args):\n if arg_5 >= arg_14:\n arg_15 = arg_1.defaults[arg_5 - arg_14]\n arg_12.append((arg_9, arg_3.get(arg_9, \"\"), arg_15))\n else:\n arg_12.append((arg_9, arg_3.get(arg_9, \"\")))\n\n return arg_12"} +{"_id": "doc_1906", "title": "", "text": "def Func():\n \"\"\" Generate a filepath for the calling app \"\"\"\n arg_0 = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'\n arg_1 = os.path.abspath(os.path.join(\n os.environ['NTA_LOG_DIR'],\n 'numenta-logs-%s' % (os.environ['USER'],),\n arg_0))\n arg_2 = '%s-%s-%s.log' % (\n arg_0, long(time.mktime(time.gmtime())), os.getpid())\n return os.path.join(arg_1, arg_2)"} +{"_id": "doc_1907", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the number of months and seconds from an aggregation dict that\n represents a date and time.\n\n Interval is a dict that contain one or more of the following keys: 'years',\n 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',\n 'microseconds'.\n\n For example:\n\n ::\n\n aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==\n {'months':12, 'seconds':14400.000042}\n\n :param interval: (dict) The aggregation interval representing a date and time\n :returns: (dict) number of months and seconds in the interval:\n ``{months': XX, 'seconds': XX}``. The seconds is\n a floating point that can represent resolutions down to a\n microsecond.\n\n \"\"\"\n\n arg_1 = arg_0.get('microseconds', 0) * 0.000001\n arg_1 += arg_0.get('milliseconds', 0) * 0.001\n arg_1 += arg_0.get('seconds', 0)\n arg_1 += arg_0.get('minutes', 0) * 60\n arg_1 += arg_0.get('hours', 0) * 60 * 60\n arg_1 += arg_0.get('days', 0) * 24 * 60 * 60\n arg_1 += arg_0.get('weeks', 0) * 7 * 24 * 60 * 60\n\n arg_2 = arg_0.get('months', 0)\n arg_2 += 12 * arg_0.get('years', 0)\n\n return {'months': arg_2, 'seconds': arg_1}"} +{"_id": "doc_1908", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the result from dividing two dicts that represent date and time.\n\n Both dividend and divisor are dicts that contain one or more of the following\n keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',\n 'milliseconds', 'microseconds'.\n\n For example:\n\n ::\n\n Func({'hours': 4}, {'minutes': 15}) == 16\n\n :param dividend: (dict) The numerator, as a dict representing a date and time\n :param divisor: (dict) the denominator, as a dict representing a date and time\n :returns: (float) number of times divisor goes into dividend\n\n \"\"\"\n\n # Convert each into microseconds\n arg_2 = aggregationToMonthsSeconds(arg_0)\n arg_3 = aggregationToMonthsSeconds(arg_1)\n\n # It is a usage error to mix both months and seconds in the same operation\n if (arg_2['months'] != 0 and arg_3['seconds'] != 0) \\\n or (arg_2['seconds'] != 0 and arg_3['months'] != 0):\n raise RuntimeError(\"Aggregation dicts with months/years can only be \"\n \"inter-operated with other aggregation dicts that contain \"\n \"months/years\")\n\n\n if arg_2['months'] > 0:\n return float(arg_2['months']) / arg_1['months']\n\n else:\n return float(arg_2['seconds']) / arg_3['seconds']"} +{"_id": "doc_1909", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to create a logger object for the current object with\n the standard Numenta prefix.\n\n :param obj: (object) to add a logger to\n \"\"\"\n if inspect.isclass(arg_0):\n arg_1 = arg_0\n else:\n arg_1 = arg_0.__class__\n arg_2 = logging.getLogger(\".\".join(\n ['com.numenta', arg_1.__module__, arg_1.__name__]))\n return arg_2"} +{"_id": "doc_1910", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a subset of the keys that match any of the given patterns\n\n :param patterns: (list) regular expressions to match\n :param keys: (list) keys to search for matches\n \"\"\"\n arg_2 = []\n if arg_0:\n for arg_3 in arg_0:\n arg_4 = re.compile(arg_3)\n for arg_5 in arg_1:\n if arg_4.match(arg_5):\n arg_2.append(arg_5)\n else:\n return None\n\n return arg_2"} +{"_id": "doc_1911", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert the input, which is in normal space, into log space\n \"\"\"\n if arg_1 == SENTINEL_VALUE_FOR_MISSING_DATA:\n return None\n else:\n arg_2 = arg_1\n if arg_2 < arg_0.minval:\n arg_2 = arg_0.minval\n elif arg_2 > arg_0.maxval:\n arg_2 = arg_0.maxval\n\n arg_3 = math.log10(arg_2)\n return arg_3"} +{"_id": "doc_1912", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Exports a network as a networkx MultiDiGraph intermediate representation\n suitable for visualization.\n\n :return: networkx MultiDiGraph\n \"\"\"\n arg_1 = nx.MultiDiGraph()\n\n # Add regions to graph as nodes, annotated by name\n arg_2 = arg_0.network.getRegions()\n\n for arg_3 in xrange(arg_2.getCount()):\n arg_4 = arg_2.getByIndex(arg_3)\n arg_5 = arg_4[0]\n arg_1.add_node(arg_5, label=arg_5)\n\n # Add links between regions to graph as edges, annotate by input-output\n # name pairs\n for arg_6, arg_7 in arg_0.network.getLinks():\n arg_1.add_edge(arg_7.getSrcRegionName(),\n arg_7.getDestRegionName(),\n src=arg_7.getSrcOutputName(),\n dest=arg_7.getDestInputName())\n\n return arg_1"} +{"_id": "doc_1913", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Computes the percentage of overlap between vectors x1 and x2.\n\n @param x1 (array) binary vector\n @param x2 (array) binary vector\n @param size (int) length of binary vectors\n\n @return percentOverlap (float) percentage overlap between x1 and x2\n \"\"\"\n arg_3 = np.count_nonzero(arg_0)\n arg_4 = np.count_nonzero(arg_1)\n arg_5 = min(arg_3, arg_4)\n Func = 0\n if arg_5 > 0:\n Func = float(np.dot(arg_0, arg_1))/float(arg_5)\n return Func"} +{"_id": "doc_1914", "title": "", "text": "def Func():\n \"\"\"Poll CPU usage, make predictions, and plot the results. Runs forever.\"\"\"\n # Create the model for predicting CPU usage.\n arg_0 = ModelFactory.create(model_params.MODEL_PARAMS)\n arg_0.enableInference({'predictedField': 'cpu'})\n # The shifter will align prediction and actual values.\n arg_1 = InferenceShifter()\n # Keep the last WINDOW predicted and actual values for plotting.\n arg_2 = deque([0.0] * WINDOW, maxlen=60)\n arg_3 = deque([0.0] * WINDOW, maxlen=60)\n\n # Initialize the plot lines that we will update with each new record.\n arg_4, = plt.plot(range(WINDOW), arg_2)\n arg_5, = plt.plot(range(WINDOW), arg_3)\n # Set the y-axis range.\n arg_4.axes.set_ylim(0, 100)\n arg_5.axes.set_ylim(0, 100)\n\n while True:\n arg_6 = time.time()\n\n # Get the CPU usage.\n arg_7 = psutil.cpu_percent()\n\n # Run the input through the model and shift the resulting prediction.\n arg_8 = {'cpu': arg_7}\n arg_9 = arg_1.shift(arg_0.run(arg_8))\n\n # Update the trailing predicted and actual value deques.\n arg_10 = arg_9.inferences['multiStepBestPredictions'][5]\n if arg_10 is not None:\n arg_2.append(arg_9.rawInput['cpu'])\n arg_3.append(arg_10)\n\n # Redraw the chart with the new data.\n arg_4.set_ydata(arg_2) # update the data\n arg_5.set_ydata(arg_3) # update the data\n plt.draw()\n plt.legend( ('actual','predicted') )\n\n # Make sure we wait a total of 2 seconds per iteration.\n try:\n plt.pause(SECONDS_PER_STEP)\n except:\n pass"} +{"_id": "doc_1915", "title": "", "text": "def Func(arg_0):\n \"\"\"\n List of our member variables that we don't need to be saved\n \"\"\"\n arg_1 = BacktrackingTM.Func(arg_0)\n if arg_0.makeCells4Ephemeral:\n arg_1.extend(['cells4'])\n return arg_1"} +{"_id": "doc_1916", "title": "", "text": "def Func(arg_0):\n \"\"\"If state is allocated in CPP, copy over the data into our numpy arrays.\"\"\"\n\n # Get learn states if we need to print them out\n if arg_0.verbosity > 1 or arg_0.retrieveLearningStates:\n (arg_1, arg_2, arg_3, arg_4) = arg_0.cells4.getLearnStates()\n arg_0.lrnActiveState['t-1'] = arg_2.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.lrnActiveState['t'] = arg_1.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.lrnPredictedState['t-1'] = arg_4.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.lrnPredictedState['t'] = arg_3.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n\n if arg_0.allocateStatesInCPP:\n assert False\n (arg_1, arg_2, arg_3, arg_4, arg_7, arg_8, arg_9,\n arg_10) = arg_0.cells4.getStates()\n arg_0.cellConfidence['t'] = arg_9.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.cellConfidence['t-1'] = arg_10.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.colConfidence['t'] = arg_7.reshape(arg_0.numberOfCols)\n arg_0.colConfidence['t-1'] = arg_8.reshape(arg_0.numberOfCols)\n arg_0.infActiveState['t-1'] = arg_2.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.infActiveState['t'] = arg_1.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.infPredictedState['t-1'] = arg_4.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))\n arg_0.infPredictedState['t'] = arg_3.reshape((arg_0.numberOfCols, arg_0.cellsPerColumn))"} +{"_id": "doc_1917", "title": "", "text": "def Func(arg_0):\n \"\"\"If we are having CPP use numpy-allocated buffers, set these buffer\n pointers. This is a relatively fast operation and, for safety, should be\n done before every call to the cells4 compute methods. This protects us\n in situations where code can cause Python or numpy to create copies.\"\"\"\n if not arg_0.allocateStatesInCPP:\n arg_0.cells4.setStatePointers(\n arg_0.infActiveState[\"t\"], arg_0.infActiveState[\"t-1\"],\n arg_0.infPredictedState[\"t\"], arg_0.infPredictedState[\"t-1\"],\n arg_0.colConfidence[\"t\"], arg_0.colConfidence[\"t-1\"],\n arg_0.cellConfidence[\"t\"], arg_0.cellConfidence[\"t-1\"])"} +{"_id": "doc_1918", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n A segment is active if it has >= activationThreshold connected\n synapses that are active due to infActiveState.\n\n \"\"\"\n\n arg_3 = arg_1.size()\n arg_4 = 0\n for arg_5 in xrange(arg_3):\n if arg_1.getPermanence(arg_5) < arg_0.connectedPerm:\n continue\n arg_6, arg_7 = arg_0.getColCellIdx(arg_1.getSrcCellIdx(arg_5))\n if arg_0.infActiveState[arg_2][arg_6, arg_7]:\n arg_4 += 1\n if arg_4 >= arg_0.activationThreshold:\n return True\n\n return arg_4 >= arg_0.activationThreshold"} +{"_id": "doc_1919", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Given a bucket index, return the list of non-zero bits. If the bucket\n index does not exist, it is created. If the index falls outside our range\n we clip it.\n\n :param index The bucket index to get non-zero bits for.\n @returns numpy array of indices of non-zero bits for specified index.\n \"\"\"\n if arg_1 < 0:\n arg_1 = 0\n\n if arg_1 >= arg_0._maxBuckets:\n arg_1 = arg_0._maxBuckets-1\n\n if not arg_0.bucketMap.has_key(arg_1):\n if arg_0.verbosity >= 2:\n print \"Adding additional buckets to handle index=\", arg_1\n arg_0._createBucket(arg_1)\n return arg_0.bucketMap[arg_1]"} +{"_id": "doc_1920", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create the given bucket index. Recursively create as many in-between\n bucket indices as necessary.\n \"\"\"\n if arg_1 < arg_0.minIndex:\n if arg_1 == arg_0.minIndex - 1:\n # Create a new representation that has exactly w-1 overlapping bits\n # as the min representation\n arg_0.bucketMap[arg_1] = arg_0._newRepresentation(arg_0.minIndex,\n arg_1)\n arg_0.minIndex = arg_1\n else:\n # Recursively create all the indices above and then this index\n arg_0.Func(arg_1+1)\n arg_0.Func(arg_1)\n else:\n if arg_1 == arg_0.maxIndex + 1:\n # Create a new representation that has exactly w-1 overlapping bits\n # as the max representation\n arg_0.bucketMap[arg_1] = arg_0._newRepresentation(arg_0.maxIndex,\n arg_1)\n arg_0.maxIndex = arg_1\n else:\n # Recursively create all the indices below and then this index\n arg_0.Func(arg_1-1)\n arg_0.Func(arg_1)"} +{"_id": "doc_1921", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return a new representation for newIndex that overlaps with the\n representation at index by exactly w-1 bits\n \"\"\"\n arg_3 = arg_0.bucketMap[arg_1].copy()\n\n # Choose the bit we will replace in this representation. We need to shift\n # this bit deterministically. If this is always chosen randomly then there\n # is a 1 in w chance of the same bit being replaced in neighboring\n # representations, which is fairly high\n arg_4 = arg_2 % arg_0.w\n\n # Now we choose a bit such that the overlap rules are satisfied.\n arg_5 = arg_0.random.getUInt32(arg_0.n)\n arg_3[arg_4] = arg_5\n while arg_5 in arg_0.bucketMap[arg_1] or \\\n not arg_0.FuncOK(arg_3, arg_2):\n arg_0.numTries += 1\n arg_5 = arg_0.random.getUInt32(arg_0.n)\n arg_3[arg_4] = arg_5\n\n return arg_3"} +{"_id": "doc_1922", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return the overlap between bucket indices i and j\n \"\"\"\n if arg_0.bucketMap.has_key(arg_1) and arg_0.bucketMap.has_key(arg_2):\n arg_3 = arg_0.bucketMap[arg_1]\n arg_4 = arg_0.bucketMap[arg_2]\n return arg_0._countOverlap(arg_3, arg_4)\n else:\n raise ValueError(\"Either i or j don't exist\")"} +{"_id": "doc_1923", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the overlap between two representations. rep1 and rep2 are lists of\n non-zero indices.\n \"\"\"\n arg_2 = 0\n for arg_3 in arg_0:\n if arg_3 in arg_1:\n arg_2 += 1\n return arg_2"} +{"_id": "doc_1924", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Return True if the given overlap between bucket indices i and j are\n acceptable. If overlap is not specified, calculate it from the bucketMap\n \"\"\"\n if arg_3 is None:\n arg_3 = arg_0._countOverlapIndices(arg_1, arg_2)\n if abs(arg_1-arg_2) < arg_0.w:\n if arg_3 == (arg_0.w - abs(arg_1-arg_2)):\n return True\n else:\n return False\n else:\n if arg_3 <= arg_0._maxOverlap:\n return True\n else:\n return False"} +{"_id": "doc_1925", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"\n Create a SDR classifier factory.\n The implementation of the SDR Classifier can be specified with\n the \"implementation\" keyword argument.\n\n The SDRClassifierFactory uses the implementation as specified in\n `Default NuPIC Configuration `_.\n \"\"\"\n arg_2 = arg_1.pop('implementation', None)\n if arg_2 is None:\n arg_2 = Configuration.get('nupic.opf.sdrClassifier.implementation')\n if arg_2 == 'py':\n return SDRClassifier(*arg_0, **arg_1)\n elif arg_2 == 'cpp':\n return FastSDRClassifier(*arg_0, **arg_1)\n elif arg_2 == 'diff':\n return SDRClassifierDiff(*arg_0, **arg_1)\n else:\n raise ValueError('Invalid classifier implementation (%r). Value must be '\n '\"py\", \"cpp\" or \"diff\".' % arg_2)"} +{"_id": "doc_1926", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convenience method to compute a metric over an indices trace, excluding\n resets.\n\n @param (IndicesTrace) Trace of indices\n\n @return (Metric) Metric over trace excluding resets\n \"\"\"\n return Metric.createFromTrace(arg_1.makeCountsTrace(),\n excludeResets=arg_0.mmGetTraceResets())"} +{"_id": "doc_1927", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Metric for number of predicted => active cells per column for each sequence\n\n @return (Metric) metric\n \"\"\"\n arg_0._mmComputeTransitionTraces()\n\n arg_1 = []\n\n for arg_2 in (\n arg_0._mmData[\"predictedActiveCellsForSequence\"].values()):\n arg_3 = arg_0.mapCellsToColumns(arg_2)\n arg_1 += [len(arg_4) for arg_4 in arg_3.values()]\n\n return Metric(arg_0,\n \"# predicted => active cells per column for each sequence\",\n arg_1)"} +{"_id": "doc_1928", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Metric for number of sequences each predicted => active cell appears in\n\n Note: This metric is flawed when it comes to high-order sequences.\n\n @return (Metric) metric\n \"\"\"\n arg_0._mmComputeTransitionTraces()\n\n arg_1 = defaultdict(lambda: 0)\n\n for arg_2 in (\n arg_0._mmData[\"predictedActiveCellsForSequence\"].values()):\n for arg_3 in arg_2:\n arg_1[arg_3] += 1\n\n return Metric(arg_0,\n \"# sequences each predicted => active cells appears in\",\n arg_1.values())"} +{"_id": "doc_1929", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Pretty print the connections in the temporal memory.\n\n TODO: Use PrettyTable.\n\n @return (string) Pretty-printed text\n \"\"\"\n arg_1 = \"\"\n\n arg_1 += (\"Segments: (format => \"\n \"(#) [(source cell=permanence ...), ...]\\n\")\n arg_1 += \"------------------------------------\\n\"\n\n arg_2 = range(arg_0.numberOfColumns())\n\n for arg_3 in arg_2:\n arg_4 = arg_0.cellsForColumn(arg_3)\n\n for arg_5 in arg_4:\n arg_6 = dict()\n\n for arg_7 in arg_0.connections.segmentsForCell(arg_5):\n arg_8 = []\n\n for arg_9 in arg_0.connections.synapsesForSegment(arg_7):\n arg_10 = arg_0.connections.dataForSynapse(arg_9)\n arg_8.append(\n (arg_10.presynapticCell, arg_10.permanence))\n\n arg_8.sort()\n arg_11 = [\"{0:3}={1:.2f}\".format(sourceCell, permanence) for\n sourceCell, permanence in arg_8]\n arg_6[arg_7] = \"({0})\".format(\" \".join(arg_11))\n\n arg_1 += (\"Column {0:3} / Cell {1:3}:\\t({2}) {3}\\n\".format(\n arg_3, arg_5,\n len(arg_6.values()),\n \"[{0}]\".format(\", \".join(arg_6.values()))))\n\n if arg_3 < len(arg_2) - 1: # not last\n arg_1 += \"\\n\"\n\n arg_1 += \"------------------------------------\\n\"\n\n return arg_1"} +{"_id": "doc_1930", "title": "", "text": "def Func(arg_0, arg_1=arg_2,\n arg_3=arg_4,\n arg_5=arg_6):\n\n\n \"\"\"Generates a Network with connected RecordSensor, SP, TM.\n\n This function takes care of generating regions and the canonical links.\n The network has a sensor region reading data from a specified input and\n passing the encoded representation to an SPRegion.\n The SPRegion output is passed to a TMRegion.\n\n Note: this function returns a network that needs to be initialized. This\n allows the user to extend the network by adding further regions and\n connections.\n\n :param recordParams: a dict with parameters for creating RecordSensor region.\n :param spatialParams: a dict with parameters for creating SPRegion.\n :param temporalParams: a dict with parameters for creating TMRegion.\n :param verbosity: an integer representing how chatty the network will be.\n \"\"\"\n arg_7 = arg_0[\"inputFilePath\"]\n arg_8 = arg_0[\"scalarEncoderArgs\"]\n arg_9 = arg_0[\"dateEncoderArgs\"]\n\n arg_10 = ScalarEncoder(**arg_8)\n arg_11 = DateEncoder(**arg_9)\n\n arg_12 = MultiEncoder()\n arg_12.addEncoder(arg_8[\"name\"], arg_10)\n arg_12.addEncoder(arg_9[\"name\"], arg_11)\n\n arg_13 = Network()\n\n arg_13.addRegion(\"sensor\", \"py.RecordSensor\",\n json.dumps({\"verbosity\": arg_5}))\n\n arg_14 = arg_13.regions[\"sensor\"].getSelf()\n arg_14.encoder = arg_12\n arg_14.dataSource = FileRecordStream(streamID=arg_7)\n\n # Create the spatial pooler region\n arg_1[\"inputWidth\"] = arg_14.encoder.getWidth()\n arg_13.addRegion(\"spatialPoolerRegion\", \"py.SPRegion\",\n json.dumps(arg_1))\n\n # Link the SP region to the sensor input\n arg_13.link(\"sensor\", \"spatialPoolerRegion\", \"UniformLink\", \"\")\n arg_13.link(\"sensor\", \"spatialPoolerRegion\", \"UniformLink\", \"\",\n srcOutput=\"resetOut\", destInput=\"resetIn\")\n arg_13.link(\"spatialPoolerRegion\", \"sensor\", \"UniformLink\", \"\",\n srcOutput=\"spatialTopDownOut\", destInput=\"spatialTopDownIn\")\n arg_13.link(\"spatialPoolerRegion\", \"sensor\", \"UniformLink\", \"\",\n srcOutput=\"temporalTopDownOut\", destInput=\"temporalTopDownIn\")\n\n # Add the TPRegion on top of the SPRegion\n arg_13.addRegion(\"temporalPoolerRegion\", \"py.TMRegion\",\n json.dumps(arg_3))\n\n arg_13.link(\"spatialPoolerRegion\", \"temporalPoolerRegion\", \"UniformLink\", \"\")\n arg_13.link(\"temporalPoolerRegion\", \"spatialPoolerRegion\", \"UniformLink\", \"\",\n srcOutput=\"topDownOut\", destInput=\"topDownIn\")\n\n arg_16 = arg_13.regions[\"spatialPoolerRegion\"]\n\n # Make sure learning is enabled\n arg_16.setParameter(\"learningMode\", True)\n # We want temporal anomalies so disable anomalyMode in the SP. This mode is\n # used for computing anomalies in a non-temporal model.\n arg_16.setParameter(\"anomalyMode\", False)\n\n arg_17 = arg_13.regions[\"temporalPoolerRegion\"]\n\n # Enable topDownMode to get the predicted columns output\n arg_17.setParameter(\"topDownMode\", True)\n # Make sure learning is enabled (this is the default)\n arg_17.setParameter(\"learningMode\", True)\n # Enable inference mode so we get predictions\n arg_17.setParameter(\"inferenceMode\", True)\n # Enable anomalyMode to compute the anomaly score.\n arg_17.setParameter(\"anomalyMode\", True)\n\n return arg_13"} +{"_id": "doc_1931", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Multiplies a value over a range of rows.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n column: The column of data to modify.\n start: The first row in the range to modify.\n end: The last row in the range to modify.\n multiple: The value to Func/multiply by.\n \"\"\"\n for arg_6, arg_7 in enumerate(arg_0):\n if arg_6 >= arg_3 and arg_6 <= arg_4:\n arg_7[arg_2] = type(arg_5)(arg_7[arg_2]) * arg_5\n arg_1.appendRecord(arg_7)"} +{"_id": "doc_1932", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None):\n \"\"\"Copies a range of values to a new location in the data set.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n start: The first row in the range to Func.\n stop: The last row in the range to Func.\n insertLocation: The location to insert the copied range. If not specified,\n the range is inserted immediately following itself.\n \"\"\"\n assert arg_3 >= arg_2\n arg_6 = []\n arg_7 = []\n arg_8 = None\n arg_9 = None\n if arg_5 is None:\n arg_5 = arg_0.getTimestampFieldIdx()\n for arg_10, arg_11 in enumerate(arg_0):\n # Get the first timestamp and the increment.\n if arg_8 is None:\n arg_8 = arg_11[arg_5]\n elif arg_9 is None:\n arg_9 = arg_11[arg_5] - arg_8\n # Keep a list of all rows and a list of rows to Func.\n if arg_10 >= arg_2 and arg_10 <= arg_3:\n arg_7.append(arg_11)\n arg_6.append(arg_11)\n # Insert the copied rows.\n if arg_4 is None:\n arg_4 = arg_3 + 1\n arg_6[arg_4:arg_4] = arg_7\n # Update the timestamps.\n for arg_11 in arg_6:\n arg_11[arg_5] = arg_8\n arg_1.appendRecord(arg_11)\n arg_8 += arg_9"} +{"_id": "doc_1933", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"generate description from a text description of the ranges\"\"\"\n arg_2 = \"\"\n arg_3 = len(arg_1)\n for arg_4 in xrange(arg_3):\n if arg_1[arg_4][0] != arg_1[arg_4][1]:\n arg_2 += \"%.2f-%.2f\" % (arg_1[arg_4][0], arg_1[arg_4][1])\n else:\n arg_2 += \"%.2f\" % (arg_1[arg_4][0])\n if arg_4 < arg_3 - 1:\n arg_2 += \", \"\n return arg_2"} +{"_id": "doc_1934", "title": "", "text": "def Func(arg_0,):\n \"\"\"\n Reset the state of all cells.\n\n This is normally used between sequences while training. All internal states\n are Func to 0.\n \"\"\"\n if arg_0.verbosity >= 3:\n print \"\\n==== RESET =====\"\n\n arg_0.lrnActiveState['t-1'].fill(0)\n arg_0.lrnActiveState['t'].fill(0)\n arg_0.lrnPredictedState['t-1'].fill(0)\n arg_0.lrnPredictedState['t'].fill(0)\n\n arg_0.infActiveState['t-1'].fill(0)\n arg_0.infActiveState['t'].fill(0)\n arg_0.infPredictedState['t-1'].fill(0)\n arg_0.infPredictedState['t'].fill(0)\n\n arg_0.cellConfidence['t-1'].fill(0)\n arg_0.cellConfidence['t'].fill(0)\n\n # Flush the segment update queue\n arg_0.segmentUpdates = {}\n\n arg_0._internalStats['nInfersSinceReset'] = 0\n\n #To be removed\n arg_0._internalStats['curPredictionScore'] = 0\n #New prediction score\n arg_0._internalStats['curPredictionScore2'] = 0\n arg_0._internalStats['curFalseNegativeScore'] = 0\n arg_0._internalStats['curFalsePositiveScore'] = 0\n\n arg_0._internalStats['curMissing'] = 0\n arg_0._internalStats['curExtra'] = 0\n\n # When a Func occurs, set prevSequenceSignature to the signature of the\n # just-completed sequence and start accumulating histogram for the next\n # sequence.\n arg_0._internalStats['prevSequenceSignature'] = None\n if arg_0.collectSequenceStats:\n if arg_0._internalStats['confHistogram'].sum() > 0:\n arg_3 = arg_0._internalStats['confHistogram'].copy()\n arg_3.reshape(arg_0.numberOfCols * arg_0.cellsPerColumn)\n arg_0._internalStats['prevSequenceSignature'] = arg_3\n arg_0._internalStats['confHistogram'].fill(0)\n\n arg_0.FuncCalled = True\n\n # Clear out input history\n arg_0._prevInfPatterns = []\n arg_0._prevLrnPatterns = []"} +{"_id": "doc_1935", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4):\n \"\"\"\n Called at the end of learning and inference, this routine will update\n a number of stats in our _internalStats dictionary, including our computed\n prediction score.\n\n :param stats internal stats dictionary\n :param bottomUpNZ list of the active bottom-up inputs\n :param predictedState The columns we predicted on the last time step (should\n match the current bottomUpNZ in the best case)\n :param colConfidence Column confidences we determined on the last time step\n \"\"\"\n # Return if not collecting stats\n if not arg_0.collectStats:\n return\n arg_1['nInfersSinceReset'] += 1\n\n # Compute the prediction score, how well the prediction from the last\n # time step predicted the current bottom-up input\n (arg_5, arg_6, arg_7) = arg_0._checkPrediction(\n patternNZs=[arg_2], output=arg_3,\n arg_4=arg_4)\n arg_8, arg_9, arg_10 = (\n arg_7[0])\n\n # Store the stats that don't depend on burn-in\n arg_1['curPredictionScore2'] = float(arg_8)\n arg_1['curFalseNegativeScore'] = 1.0 - float(arg_9)\n arg_1['curFalsePositiveScore'] = float(arg_10)\n\n arg_1['curMissing'] = arg_6\n arg_1['curExtra'] = arg_5\n\n # If we are passed the burn-in period, update the accumulated stats\n # Here's what various burn-in values mean:\n # 0: try to predict the first element of each sequence and all subsequent\n # 1: try to predict the second element of each sequence and all subsequent\n # etc.\n if arg_1['nInfersSinceReset'] <= arg_0.burnIn:\n return\n\n # Burn-in related stats\n arg_1['nPredictions'] += 1\n arg_11 = max(1.0, float(len(arg_2)))\n\n arg_1['totalMissing'] += arg_6\n arg_1['totalExtra'] += arg_5\n arg_1['pctExtraTotal'] += 100.0 * arg_5 / arg_11\n arg_1['pctMissingTotal'] += 100.0 * arg_6 / arg_11\n arg_1['predictionScoreTotal2'] += float(arg_8)\n arg_1['falseNegativeScoreTotal'] += 1.0 - float(arg_9)\n arg_1['falsePositiveScoreTotal'] += float(arg_10)\n\n if arg_0.collectSequenceStats:\n # Collect cell confidences for every cell that correctly predicted current\n # bottom up input. Normalize confidence across each column\n arg_12 = arg_0.cellConfidence['t-1'] * arg_0.infActiveState['t']\n arg_13 = arg_12.sum(axis=1)\n for arg_14 in range(arg_0.numberOfCols):\n if arg_13[arg_14] > 0:\n arg_12[arg_14, :] /= arg_13[arg_14]\n\n # Update cell confidence histogram: add column-normalized confidence\n # scores to the histogram\n arg_0._internalStats['confHistogram'] += arg_12"} +{"_id": "doc_1936", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Print an integer array that is the same shape as activeState.\n\n :param aState: TODO: document\n \"\"\"\n def formatRow(arg_2, arg_3):\n arg_4 = ''\n for arg_5 in range(arg_0.numberOfCols):\n if arg_5 > 0 and arg_5 % 10 == 0:\n arg_4 += ' '\n arg_4 += str(arg_2[arg_5, arg_3])\n arg_4 += ' '\n return arg_4\n\n for arg_3 in xrange(arg_0.cellsPerColumn):\n print formatRow(arg_1, arg_3)"} +{"_id": "doc_1937", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = 20):\n \"\"\"\n Print a floating point array that is the same shape as activeState.\n\n :param aState: TODO: document\n :param maxCols: TODO: document\n \"\"\"\n def formatFPRow(arg_3, arg_4):\n arg_5 = ''\n for arg_6 in range(min(arg_2, arg_0.numberOfCols)):\n if arg_6 > 0 and arg_6 % 10 == 0:\n arg_5 += ' '\n arg_5 += ' %5.3f' % arg_3[arg_6, arg_4]\n arg_5 += ' '\n return arg_5\n\n for arg_4 in xrange(arg_0.cellsPerColumn):\n print formatFPRow(arg_1, arg_4)"} +{"_id": "doc_1938", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = 20):\n \"\"\"\n Print up to maxCols number from a flat floating point array.\n\n :param aState: TODO: document\n :param maxCols: TODO: document\n \"\"\"\n def formatFPRow(arg_3):\n arg_4 = ''\n for arg_5 in range(min(arg_2, arg_0.numberOfCols)):\n if arg_5 > 0 and arg_5 % 10 == 0:\n arg_4 += ' '\n arg_4 += ' %5.3f' % arg_3[arg_5]\n arg_4 += ' '\n return arg_4\n\n print formatFPRow(arg_1)"} +{"_id": "doc_1939", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Print the parameter settings for the TM.\n \"\"\"\n print \"numberOfCols=\", arg_0.numberOfCols\n print \"cellsPerColumn=\", arg_0.cellsPerColumn\n print \"minThreshold=\", arg_0.minThreshold\n print \"newSynapseCount=\", arg_0.newSynapseCount\n print \"activationThreshold=\", arg_0.activationThreshold\n print\n print \"initialPerm=\", arg_0.initialPerm\n print \"connectedPerm=\", arg_0.connectedPerm\n print \"permanenceInc=\", arg_0.permanenceInc\n print \"permanenceDec=\", arg_0.permanenceDec\n print \"permanenceMax=\", arg_0.permanenceMax\n print \"globalDecay=\", arg_0.globalDecay\n print\n print \"doPooling=\", arg_0.doPooling\n print \"segUpdateValidDuration=\", arg_0.segUpdateValidDuration\n print \"pamLength=\", arg_0.pamLength"} +{"_id": "doc_1940", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Called at the end of inference to print out various diagnostic\n information based on the current verbosity level.\n\n :param output: TODO: document\n :param learn: TODO: document\n \"\"\"\n if arg_0.verbosity >= 3:\n print \"----- computeEnd summary: \"\n print \"learn:\", arg_2\n print \"numBurstingCols: %s, \" % (\n arg_0.infActiveState['t'].min(axis=1).sum()),\n print \"curPredScore2: %s, \" % (\n arg_0._internalStats['curPredictionScore2']),\n print \"curFalsePosScore: %s, \" % (\n arg_0._internalStats['curFalsePositiveScore']),\n print \"1-curFalseNegScore: %s, \" % (\n 1 - arg_0._internalStats['curFalseNegativeScore'])\n print \"numSegments: \", arg_0.getNumSegments(),\n print \"avgLearnedSeqLength: \", arg_0.avgLearnedSeqLength\n\n print \"----- infActiveState (%d on) ------\" % (\n arg_0.infActiveState['t'].sum())\n arg_0.printActiveIndices(arg_0.infActiveState['t'])\n if arg_0.verbosity >= 6:\n arg_0.printState(arg_0.infActiveState['t'])\n\n print \"----- infPredictedState (%d on)-----\" % (\n arg_0.infPredictedState['t'].sum())\n arg_0.printActiveIndices(arg_0.infPredictedState['t'])\n if arg_0.verbosity >= 6:\n arg_0.printState(arg_0.infPredictedState['t'])\n\n print \"----- lrnActiveState (%d on) ------\" % (\n arg_0.lrnActiveState['t'].sum())\n arg_0.printActiveIndices(arg_0.lrnActiveState['t'])\n if arg_0.verbosity >= 6:\n arg_0.printState(arg_0.lrnActiveState['t'])\n\n print \"----- lrnPredictedState (%d on)-----\" % (\n arg_0.lrnPredictedState['t'].sum())\n arg_0.printActiveIndices(arg_0.lrnPredictedState['t'])\n if arg_0.verbosity >= 6:\n arg_0.printState(arg_0.lrnPredictedState['t'])\n\n\n print \"----- cellConfidence -----\"\n arg_0.printActiveIndices(arg_0.cellConfidence['t'], andValues=True)\n if arg_0.verbosity >= 6:\n arg_0.printConfidence(arg_0.cellConfidence['t'])\n\n print \"----- colConfidence -----\"\n arg_0.printActiveIndices(arg_0.colConfidence['t'], andValues=True)\n\n print \"----- cellConfidence[t-1] for currently active cells -----\"\n arg_3 = arg_0.cellConfidence['t-1'] * arg_0.infActiveState['t']\n arg_0.printActiveIndices(arg_3, andValues=True)\n\n if arg_0.verbosity == 4:\n print \"Cells, predicted segments only:\"\n arg_0.printCells(predictedOnly=True)\n elif arg_0.verbosity >= 5:\n print \"Cells, all segments:\"\n arg_0.printCells(predictedOnly=False)\n print\n\n elif arg_0.verbosity >= 1:\n print \"TM: learn:\", arg_2\n print \"TM: active outputs(%d):\" % len(arg_1.nonzero()[0]),\n arg_0.printActiveIndices(arg_1.reshape(arg_0.numberOfCols,\n arg_0.cellsPerColumn))"} +{"_id": "doc_1941", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update our moving average of learned sequence length.\"\"\"\n if arg_0.lrnIterationIdx < 100:\n arg_2 = 0.5\n else:\n arg_2 = 0.1\n\n arg_0.avgLearnedSeqLength = ((1.0 - arg_2) * arg_0.avgLearnedSeqLength +\n (arg_2 * arg_1))"} +{"_id": "doc_1942", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n A utility method called from learnBacktrack. This will backtrack\n starting from the given startOffset in our prevLrnPatterns queue.\n\n It returns True if the backtrack was successful and we managed to get\n predictions all the way up to the current time step.\n\n If readOnly, then no segments are updated or modified, otherwise, all\n segment updates that belong to the given path are applied.\n \n This updates/modifies:\n\n - lrnActiveState['t']\n\n This trashes:\n\n - lrnPredictedState['t']\n - lrnPredictedState['t-1']\n - lrnActiveState['t-1']\n\n :param startOffset: Start offset within the prevLrnPatterns input history\n :param readOnly: \n :return: True if we managed to lock on to a sequence that started\n earlier.\n If False, we lost predictions somewhere along the way\n leading up to the current time.\n \"\"\"\n # How much input history have we accumulated?\n # The current input is always at the end of self._prevInfPatterns (at\n # index -1), but it is also evaluated as a potential starting point by\n # turning on it's start cells and seeing if it generates sufficient\n # predictions going forward.\n arg_3 = len(arg_0._prevLrnPatterns)\n\n # This is an easy to use label for the current time step\n arg_4 = arg_3 - 1\n\n # Clear out any old segment updates. learnPhase2() adds to the segment\n # updates if we're not readOnly\n if not arg_2:\n arg_0.segmentUpdates = {}\n\n # Status message\n if arg_0.verbosity >= 3:\n if arg_2:\n print (\n \"Trying to lock-on using startCell state from %d steps ago:\" % (\n arg_3 - 1 - arg_1),\n arg_0._prevLrnPatterns[arg_1])\n else:\n print (\n \"Locking on using startCell state from %d steps ago:\" % (\n arg_3 - 1 - arg_1),\n arg_0._prevLrnPatterns[arg_1])\n\n # Play through up to the current time step\n arg_6 = True\n for arg_7 in range(arg_1, arg_3):\n\n # Copy predicted and active states into t-1\n arg_0.lrnPredictedState['t-1'][:, :] = arg_0.lrnPredictedState['t'][:, :]\n arg_0.lrnActiveState['t-1'][:, :] = arg_0.lrnActiveState['t'][:, :]\n\n # Get the input pattern\n arg_10 = arg_0._prevLrnPatterns[arg_7]\n\n # Apply segment updates from the last set of predictions\n if not arg_2:\n arg_0._processSegmentUpdates(arg_10)\n\n # Phase 1:\n # Compute activeState[t] given bottom-up and predictedState[t-1]\n if arg_7 == arg_1:\n arg_0.lrnActiveState['t'].fill(0)\n for arg_11 in arg_10:\n arg_0.lrnActiveState['t'][arg_11, 0] = 1\n arg_6 = True\n else:\n # Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']\n # computes lrnActiveState['t']\n arg_6 = arg_0._learnPhase1(arg_10, arg_2=arg_2)\n\n # Break out immediately if we fell out of sequence or reached the current\n # time step\n if not arg_6 or arg_7 == arg_4:\n break\n\n # Phase 2:\n # Computes predictedState['t'] given activeState['t'] and also queues\n # up active segments into self.segmentUpdates, unless this is readOnly\n if arg_0.verbosity >= 3:\n print \" backtrack: computing predictions from \", arg_10\n arg_0._learnPhase2(arg_2=arg_2)\n\n # Return whether or not this starting point was valid\n return arg_6"} +{"_id": "doc_1943", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This \"backtracks\" our learning state, trying to see if we can lock onto\n the current set of inputs by assuming the sequence started up to N steps\n ago on start cells.\n\n This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a\n sequence that started earlier.\n\n :returns: >0 if we managed to lock on to a sequence that started\n earlier. The value returned is how many steps in the\n past we locked on.\n If 0 is returned, the caller needs to change active\n state to start on start cells.\n\n How it works:\n -------------------------------------------------------------------\n This method gets called from updateLearningState when we detect either of\n the following two conditions:\n\n #. Our PAM counter (@ref pamCounter) expired\n #. We reached the max allowed learned sequence length\n\n Either of these two conditions indicate that we want to start over on start\n cells.\n\n Rather than start over on start cells on the current input, we can\n accelerate learning by backtracking a few steps ago and seeing if perhaps\n a sequence we already at least partially know already started.\n\n This updates/modifies:\n - @ref lrnActiveState['t']\n\n This trashes:\n - @ref lrnActiveState['t-1']\n - @ref lrnPredictedState['t']\n - @ref lrnPredictedState['t-1']\n\n \"\"\"\n # How much input history have we accumulated?\n # The current input is always at the end of self._prevInfPatterns (at\n # index -1), and is not a valid startingOffset to evaluate.\n arg_1 = len(arg_0._prevLrnPatterns) - 1\n if arg_1 <= 0:\n if arg_0.verbosity >= 3:\n print \"lrnBacktrack: No available history to backtrack from\"\n return False\n\n # We will record which previous input patterns did not generate predictions\n # up to the current time step and remove all the ones at the head of the\n # input history queue so that we don't waste time evaluating them again at\n # a later time step.\n arg_2 = []\n\n # Let's go back in time and replay the recent inputs from start cells and\n # see if we can lock onto this current set of inputs that way.\n #\n # Start the farthest back and work our way forward. For each starting point,\n # See if firing on start cells at that point would predict the current\n # input.\n #\n # We want to pick the point farthest in the past that has continuity\n # up to the current time step\n arg_3 = False\n for arg_4 in range(0, arg_1):\n # Can we backtrack from startOffset?\n arg_3 = arg_0.FuncFrom(arg_4, readOnly=True)\n\n # Done playing through the sequence from starting point startOffset\n # Break out as soon as we find a good path\n if arg_3:\n break\n\n # Take this bad starting point out of our input history so we don't\n # try it again later.\n arg_2.append(arg_4)\n\n # If we failed to lock on at any starting point, return failure. The caller\n # will start over again on start cells\n if not arg_3:\n if arg_0.verbosity >= 3:\n print (\"Failed to lock on. Falling back to start cells on current \"\n \"time step.\")\n # Nothing in our input history was a valid starting point, so get rid\n # of it so we don't try any of them again at a later iteration\n arg_0._prevLrnPatterns = []\n return False\n\n # We did find a valid starting point in the past. Now, we need to\n # re-enforce all segments that became active when following this path.\n if arg_0.verbosity >= 3:\n print (\"Discovered path to current input by using start cells from %d \"\n \"steps ago:\" % (arg_1 - arg_4),\n arg_0._prevLrnPatterns[arg_4])\n\n arg_0.FuncFrom(arg_4, readOnly=False)\n\n # Remove any useless patterns at the head of the input pattern history\n # queue.\n for arg_6 in range(arg_1):\n if arg_6 in arg_2 or arg_6 <= arg_4:\n if arg_0.verbosity >= 3:\n print (\"Removing useless pattern from history:\",\n arg_0._prevLrnPatterns[0])\n arg_0._prevLrnPatterns.pop(0)\n else:\n break\n\n return arg_1 - arg_4"} +{"_id": "doc_1944", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Compute the learning active state given the predicted state and\n the bottom-up input.\n\n :param activeColumns list of active bottom-ups\n :param readOnly True if being called from backtracking logic.\n This tells us not to increment any segment\n duty cycles or queue up any updates.\n :returns: True if the current input was sufficiently predicted, OR\n if we started over on startCells. False indicates that the current\n input was NOT predicted, well enough to consider it as \"inSequence\"\n\n This looks at:\n - @ref lrnActiveState['t-1']\n - @ref lrnPredictedState['t-1']\n\n This modifies:\n - @ref lrnActiveState['t']\n - @ref lrnActiveState['t-1']\n \"\"\"\n # Save previous active state and start out on a clean slate\n arg_0.lrnActiveState['t'].fill(0)\n\n # For each column, turn on the predicted cell. There will always be at most\n # one predicted cell per column\n arg_3 = 0\n for arg_4 in arg_1:\n arg_5 = numpy.where(arg_0.lrnPredictedState['t-1'][arg_4] == 1)[0]\n arg_6 = len(arg_5)\n assert arg_6 <= 1\n\n # If we have a predicted cell, turn it on. The segment's posActivation\n # count will have already been incremented by processSegmentUpdates\n if arg_6 == 1:\n arg_7 = arg_5[0]\n arg_0.lrnActiveState['t'][arg_4, arg_7] = 1\n continue\n\n arg_3 += 1\n if arg_2:\n continue\n\n # If no predicted cell, pick the closest matching one to reinforce, or\n # if none exists, create a new segment on a cell in that column\n arg_7, arg_9, arg_10 = arg_0._getBestMatchingCell(\n arg_4, arg_0.lrnActiveState['t-1'], arg_0.minThreshold)\n if arg_9 is not None and arg_9.isSequenceSegment():\n if arg_0.verbosity >= 4:\n print \"Learn branch 0, found segment match. Learning on col=\", arg_4\n arg_0.lrnActiveState['t'][arg_4, arg_7] = 1\n arg_11 = arg_0._getSegmentActiveSynapses(\n arg_4, arg_7, arg_9, arg_0.lrnActiveState['t-1'], newSynapses = True)\n arg_9.totalActivations += 1\n # This will update the permanences, posActivationsCount, and the\n # lastActiveIteration (age).\n arg_12 = arg_0._adaptSegment(arg_11)\n if arg_12:\n arg_0._trimSegmentsInCell(arg_4, arg_7, [arg_9], minPermanence = 0.00001,\n minNumSyns = 0)\n\n # If no close match exists, create a new one\n else:\n # Choose a cell in this column to add a new segment to\n arg_7 = arg_0._getCellForNewSegment(arg_4)\n if (arg_0.verbosity >= 4):\n print \"Learn branch 1, no match. Learning on col=\", arg_4,\n print \", newCellIdxInCol=\", arg_7\n arg_0.lrnActiveState['t'][arg_4, arg_7] = 1\n arg_11 = arg_0._getSegmentActiveSynapses(\n arg_4, arg_7, None, arg_0.lrnActiveState['t-1'], newSynapses=True)\n arg_11.sequenceSegment = True # Make it a sequence segment\n arg_0._adaptSegment(arg_11) # No need to check whether perm reached 0\n\n # Determine if we are out of sequence or not and reset our PAM counter\n # if we are in sequence\n arg_14 = len(arg_1)\n if arg_3 < arg_14 / 2:\n return True # in sequence\n else:\n return False"} +{"_id": "doc_1945", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Compute the predicted segments given the current set of active cells.\n\n :param readOnly True if being called from backtracking logic.\n This tells us not to increment any segment\n duty cycles or queue up any updates.\n\n This computes the lrnPredictedState['t'] and queues up any segments that\n became active (and the list of active synapses for each segment) into\n the segmentUpdates queue\n\n This looks at:\n - @ref lrnActiveState['t']\n\n This modifies:\n - @ref lrnPredictedState['t']\n - @ref segmentUpdates\n \"\"\"\n # Clear out predicted state to start with\n arg_0.lrnPredictedState['t'].fill(0)\n\n # Compute new predicted state. When computing predictions for\n # phase 2, we predict at most one cell per column (the one with the best\n # matching segment).\n for arg_2 in xrange(arg_0.numberOfCols):\n\n # Is there a cell predicted to turn on in this column?\n arg_3, arg_4, arg_5 = arg_0._getBestMatchingCell(\n arg_2, arg_0.lrnActiveState['t'], minThreshold = arg_0.activationThreshold)\n if arg_3 is None:\n continue\n\n # Turn on the predicted state for the best matching cell and queue\n # the pertinent segment up for an update, which will get processed if\n # the cell receives bottom up in the future.\n arg_0.lrnPredictedState['t'][arg_2, arg_3] = 1\n if arg_1:\n continue\n\n # Queue up this segment for updating\n arg_7 = arg_0._getSegmentActiveSynapses(\n arg_2, arg_3, arg_4, activeState=arg_0.lrnActiveState['t'],\n newSynapses=(arg_5 < arg_0.newSynapseCount))\n\n arg_4.totalActivations += 1 # increment totalActivations\n arg_0._addToSegmentUpdates(arg_2, arg_3, arg_7)\n\n if arg_0.doPooling:\n # creates a new pooling segment if no best matching segment found\n # sum(all synapses) >= minThreshold, \"weak\" activation\n arg_8 = arg_0._getBestMatchingSegment(arg_2, arg_3,\n arg_0.lrnActiveState['t-1'])\n arg_7 = arg_0._getSegmentActiveSynapses(arg_2, arg_3, arg_8,\n arg_0.lrnActiveState['t-1'], newSynapses=True)\n arg_0._addToSegmentUpdates(arg_2, arg_3, arg_7)"} +{"_id": "doc_1946", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Handle one Func, possibly learning.\n\n .. note:: It is an error to have both ``enableLearn`` and \n ``enableInference`` set to False\n\n .. note:: By default, we don't Func the inference output when learning \n because it slows things down, but you can override this by passing \n in True for ``enableInference``.\n\n :param bottomUpInput: The bottom-up input as numpy list, typically from a \n spatial pooler.\n :param enableLearn: (bool) If true, perform learning\n :param enableInference: (bool) If None, default behavior is to disable the \n inference output when ``enableLearn`` is on. If true, Func the \n inference output. If false, do not Func the inference output.\n\n :returns: TODO: document\n\n \"\"\"\n # As a speed optimization for now (until we need online learning), skip\n # computing the inference output while learning\n if arg_3 is None:\n if arg_2:\n arg_3 = False\n else:\n arg_3 = True\n\n assert (arg_2 or arg_3)\n\n # Get the list of columns that have bottom-up\n arg_4 = arg_1.nonzero()[0]\n if arg_2:\n arg_0.lrnIterationIdx += 1\n arg_0.iterationIdx += 1\n\n if arg_0.verbosity >= 3:\n print \"\\n==== PY Iteration: %d =====\" % (arg_0.iterationIdx)\n print \"Active cols:\", arg_4\n\n # Update segment duty cycles if we are crossing a \"tier\"\n # We determine if it's time to update the segment duty cycles. Since the\n # duty cycle calculation is a moving average based on a tiered alpha, it is\n # important that we update all segments on each tier boundary\n if arg_2:\n if arg_0.lrnIterationIdx in Segment.dutyCycleTiers:\n for arg_5, arg_6 in itertools.product(xrange(arg_0.numberOfCols),\n xrange(arg_0.cellsPerColumn)):\n for arg_7 in arg_0.cells[arg_5][arg_6]:\n arg_7.dutyCycle()\n\n # Update the average input density\n if arg_0.avgInputDensity is None:\n arg_0.avgInputDensity = len(arg_4)\n else:\n arg_0.avgInputDensity = (0.99 * arg_0.avgInputDensity +\n 0.01 * len(arg_4))\n\n # First, update the inference state\n # As a speed optimization for now (until we need online learning), skip\n # computing the inference output while learning\n if arg_3:\n arg_0._updateInferenceState(arg_4)\n\n # Next, update the learning state\n if arg_2:\n arg_0._updateLearningState(arg_4)\n\n # Apply global decay, and remove synapses and/or segments.\n # Synapses are removed if their permanence value is <= 0.\n # Segments are removed when they don't have synapses anymore.\n # Removal of synapses can trigger removal of whole segments!\n # todo: isolate the synapse/segment retraction logic so that\n # it can be called in adaptSegments, in the case where we\n # do global decay only episodically.\n if arg_0.globalDecay > 0.0 and ((arg_0.lrnIterationIdx % arg_0.maxAge) == 0):\n for arg_5, arg_6 in itertools.product(xrange(arg_0.numberOfCols),\n xrange(arg_0.cellsPerColumn)):\n\n arg_9 = [] # collect and remove outside the loop\n for arg_7 in arg_0.cells[arg_5][arg_6]:\n arg_10 = arg_0.lrnIterationIdx - arg_7.lastActiveIteration\n if arg_10 <= arg_0.maxAge:\n continue\n\n arg_11 = [] # collect and remove outside the loop\n for arg_12 in arg_7.syns:\n\n arg_12[2] = arg_12[2] - arg_0.globalDecay # decrease permanence\n\n if arg_12[2] <= 0:\n arg_11.append(arg_12) # add to list to delete\n\n # 1 for sequenceSegment flag\n if len(arg_11) == arg_7.getNumSynapses():\n arg_9.append(arg_7) # will remove the whole segment\n elif len(arg_11) > 0:\n for arg_13 in arg_11: # remove some synapses on segment\n arg_7.syns.remove(arg_13)\n\n for arg_14 in arg_9: # remove some segments of this cell\n arg_0._cleanUpdatesList(arg_5, arg_6, arg_14)\n arg_0.cells[arg_5][arg_6].remove(arg_14)\n\n # Update the prediction score stats\n # Learning always includes inference\n if arg_0.collectStats:\n if arg_3:\n arg_15 = arg_0.infPredictedState['t-1']\n else:\n arg_15 = arg_0.lrnPredictedState['t-1']\n arg_0._updateStatsInferEnd(arg_0._internalStats,\n arg_4,\n arg_15,\n arg_0.colConfidence['t-1'])\n\n # Finally return the TM output\n arg_16 = arg_0._FuncOutput()\n\n # Print diagnostic information based on the current verbosity level\n arg_0.printComputeEnd(arg_16, learn=arg_2)\n\n arg_0.resetCalled = False\n return arg_16"} +{"_id": "doc_1947", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5):\n \"\"\"\n This method goes through a list of segments for a given cell and\n deletes all synapses whose permanence is less than minPermanence and deletes\n any segments that have less than minNumSyns synapses remaining.\n\n :param colIdx Column index\n :param cellIdx Cell index within the column\n :param segList List of segment references\n :param minPermanence Any syn whose permamence is 0 or < minPermanence will\n be deleted.\n :param minNumSyns Any segment with less than minNumSyns synapses remaining\n in it will be deleted.\n\n :returns: tuple (numSegsRemoved, numSynsRemoved)\n \"\"\"\n # Fill in defaults\n if arg_4 is None:\n arg_4 = arg_0.connectedPerm\n if arg_5 is None:\n arg_5 = arg_0.activationThreshold\n\n # Loop through all segments\n arg_6, arg_7 = 0, 0\n arg_8 = [] # collect and remove segments outside the loop\n for arg_9 in arg_3:\n\n # List if synapses to delete\n arg_10 = [arg_11 for arg_11 in arg_9.syns if arg_11[2] < arg_4]\n\n if len(arg_10) == len(arg_9.syns):\n arg_8.append(arg_9) # will remove the whole segment\n else:\n if len(arg_10) > 0:\n for arg_11 in arg_10: # remove some synapses on segment\n arg_9.syns.remove(arg_11)\n arg_7 += 1\n if len(arg_9.syns) < arg_5:\n arg_8.append(arg_9)\n\n # Remove segments that don't have enough synapses and also take them\n # out of the segment update list, if they are in there\n arg_6 += len(arg_8)\n for arg_12 in arg_8: # remove some segments of this cell\n arg_0._cleanUpdatesList(arg_1, arg_2, arg_12)\n arg_0.cells[arg_1][arg_2].remove(arg_12)\n arg_7 += len(arg_12.syns)\n\n return arg_6, arg_7"} +{"_id": "doc_1948", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n This method deletes all synapses whose permanence is less than\n minPermanence and deletes any segments that have less than\n minNumSyns synapses remaining.\n\n :param minPermanence: (float) Any syn whose permanence is 0 or < \n ``minPermanence`` will be deleted. If None is passed in, then \n ``self.connectedPerm`` is used.\n :param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses \n remaining in it will be deleted. If None is passed in, then \n ``self.activationThreshold`` is used.\n :returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``\n \"\"\"\n # Fill in defaults\n if arg_1 is None:\n arg_1 = arg_0.connectedPerm\n if arg_2 is None:\n arg_2 = arg_0.activationThreshold\n\n # Loop through all cells\n arg_3, arg_4 = 0, 0\n for arg_5, arg_6 in itertools.product(xrange(arg_0.numberOfCols),\n xrange(arg_0.cellsPerColumn)):\n\n (arg_7, arg_8) = arg_0._FuncInCell(\n colIdx=arg_5, cellIdx=arg_6, segList=arg_0.cells[arg_5][arg_6],\n arg_1=arg_1, arg_2=arg_2)\n arg_3 += arg_7\n arg_4 += arg_8\n\n # Print all cells if verbosity says to\n if arg_0.verbosity >= 5:\n print \"Cells, all segments:\"\n arg_0.printCells(predictedOnly=False)\n\n return arg_3, arg_4"} +{"_id": "doc_1949", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Removes any update that would be for the given col, cellIdx, segIdx.\n\n NOTE: logically, we need to do this when we delete segments, so that if\n an update refers to a segment that was just deleted, we also remove\n that update from the update list. However, I haven't seen it trigger\n in any of the unit tests yet, so it might mean that it's not needed\n and that situation doesn't occur, by construction.\n \"\"\"\n # TODO: check if the situation described in the docstring above actually\n # occurs.\n for arg_4, arg_5 in arg_0.segmentUpdates.iteritems():\n arg_6, arg_7 = arg_4[0], arg_4[1]\n if arg_6 == arg_1 and arg_7 == arg_2:\n for arg_8 in arg_5:\n if arg_8[1].segment == arg_3:\n arg_0._removeSegmentUpdate(arg_8)"} +{"_id": "doc_1950", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Find weakly activated cell in column with at least minThreshold active\n synapses.\n\n :param c which column to look at\n :param activeState the active cells\n :param minThreshold minimum number of synapses required\n\n :returns: tuple (cellIdx, segment, numActiveSynapses)\n \"\"\"\n # Collect all cells in column c that have at least minThreshold in the most\n # activated segment\n arg_4 = arg_3\n arg_5 = -1\n arg_6 = -1\n\n for arg_7 in xrange(arg_0.cellsPerColumn):\n\n arg_8 = 0\n arg_9 = 0\n\n for arg_10, arg_11 in enumerate(arg_0.cells[arg_1][arg_7]):\n\n arg_12 = arg_0._getSegmentActivityLevel(arg_11, arg_2)\n\n if arg_12 > arg_8:\n arg_8 = arg_12\n arg_9 = arg_10\n\n if arg_8 >= arg_4:\n arg_4 = arg_8\n arg_5 = arg_9\n arg_6 = arg_7\n\n if arg_6 == -1:\n return (None, None, None)\n else:\n return (arg_6, arg_0.cells[arg_1][arg_6][arg_5],\n arg_4)"} +{"_id": "doc_1951", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n For the given cell, find the segment with the largest number of active\n synapses. This routine is aggressive in finding the best match. The\n permanence value of synapses is allowed to be below connectedPerm. The number\n of active synapses is allowed to be below activationThreshold, but must be\n above minThreshold. The routine returns the segment index. If no segments are\n found, then an index of -1 is returned.\n\n :param c TODO: document\n :param i TODO: document\n :param activeState TODO: document\n \"\"\"\n arg_4, arg_5 = arg_0.minThreshold, -1\n\n for arg_6, arg_7 in enumerate(arg_0.cells[arg_1][arg_2]):\n arg_8 = arg_0._getSegmentActivityLevel(arg_7, arg_3,\n connectedSynapsesOnly=False)\n\n if arg_8 >= arg_4:\n arg_4, arg_5 = arg_8, arg_6\n\n if arg_5 == -1:\n return None\n else:\n return arg_0.cells[arg_1][arg_2][arg_5]"} +{"_id": "doc_1952", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This function applies segment update information to a segment in a\n cell.\n\n Synapses on the active list get their permanence counts incremented by\n permanenceInc. All other synapses get their permanence counts decremented\n by permanenceDec.\n\n We also increment the positiveActivations count of the segment.\n\n :param segUpdate SegmentUpdate instance\n :returns: True if some synapses were decremented to 0 and the segment is a\n candidate for trimming\n \"\"\"\n # This will be set to True if detect that any syapses were decremented to\n # 0\n arg_2 = False\n\n # segUpdate.segment is None when creating a new segment\n arg_3, arg_4, arg_5 = arg_1.columnIdx, arg_1.cellIdx, arg_1.segment\n\n # update.activeSynapses can be empty.\n # If not, it can contain either or both integers and tuples.\n # The integers are indices of synapses to update.\n # The tuples represent new synapses to create (src col, src cell in col).\n # We pre-process to separate these various element types.\n # synToCreate is not empty only if positiveReinforcement is True.\n # NOTE: the synapse indices start at *1* to skip the segment flags.\n arg_6 = arg_1.activeSynapses\n arg_7 = set([syn for syn in arg_6 if type(syn) == int])\n\n # Modify an existing segment\n if arg_5 is not None:\n\n if arg_0.verbosity >= 4:\n print \"Reinforcing segment #%d for cell[%d,%d]\" % (arg_5.segID, arg_3, arg_4)\n print \" before:\",\n arg_5.debugPrint()\n\n # Mark it as recently useful\n arg_5.lastActiveIteration = arg_0.lrnIterationIdx\n\n # Update frequency and positiveActivations\n arg_5.positiveActivations += 1 # positiveActivations += 1\n arg_5.dutyCycle(active=True)\n\n # First, decrement synapses that are not active\n # s is a synapse *index*, with index 0 in the segment being the tuple\n # (segId, sequence segment flag). See below, creation of segments.\n arg_9 = len(arg_5.syns) - 1\n arg_10 = [s for s in xrange(0, arg_9+1) \\\n if s not in arg_7]\n arg_2 = arg_5.updateSynapses(arg_10,\n -arg_0.permanenceDec)\n\n # Now, increment active synapses\n arg_11 = [syn for syn in arg_7 if syn <= arg_9]\n arg_5.updateSynapses(arg_11, arg_0.permanenceInc)\n\n # Finally, create new synapses if needed\n # syn is now a tuple (src col, src cell)\n arg_12 = [syn for syn in arg_6 if type(syn) != int]\n # If we have fixed resources, get rid of some old syns if necessary\n if arg_0.maxSynapsesPerSegment > 0 \\\n and len(arg_12) + len(arg_5.syns) > arg_0.maxSynapsesPerSegment:\n arg_13 = (len(arg_5.syns) + len(arg_12) -\n arg_0.maxSynapsesPerSegment)\n arg_5.freeNSynapses(arg_13, arg_10, arg_0.verbosity)\n for arg_14 in arg_12:\n arg_5.addSynapse(arg_14[0], arg_14[1], arg_0.initialPerm)\n\n if arg_0.verbosity >= 4:\n print \" after:\",\n arg_5.debugPrint()\n\n # Create a new segment\n else:\n\n # (segID, sequenceSegment flag, frequency, positiveActivations,\n # totalActivations, lastActiveIteration)\n arg_15 = Segment(tm=arg_0, isSequenceSeg=arg_1.sequenceSegment)\n\n # numpy.float32 important so that we can match with C++\n for arg_16 in arg_6:\n arg_15.addSynapse(arg_16[0], arg_16[1], arg_0.initialPerm)\n\n if arg_0.verbosity >= 3:\n print \"New segment #%d for cell[%d,%d]\" % (arg_0.segID-1, arg_3, arg_4),\n arg_15.debugPrint()\n\n arg_0.cells[arg_3][arg_4].append(arg_15)\n\n return arg_2"} +{"_id": "doc_1953", "title": "", "text": "def Func( arg_0 = 2,\n arg_1 = 5,\n arg_2 = [3,4],\n arg_3 = 3,\n arg_4 = 0,\n arg_5 = 0,\n **arg_6\n ):\n \"\"\" Create training sequences that share some elements in the middle.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of unique training sequences to generate\n seqLen: Overall length of each sequence\n sharedElements: Which element indices of each sequence are shared. These\n will be in the range between 0 and seqLen-1\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences\n\n \"\"\"\n\n # Total number of patterns used to build the sequences\n arg_7 = len(arg_2)\n arg_8 = arg_1 - arg_7\n arg_9 = arg_7 + arg_8 * arg_0\n\n # Create the table of patterns\n arg_10 = getSimplePatterns(arg_3, arg_9, arg_4)\n\n # Total number of columns required\n arg_11 = len(arg_10[0])\n\n\n # -----------------------------------------------------------------------\n # Create the training sequences\n arg_12 = []\n\n arg_13 = range(arg_7, arg_9)\n for arg_14 in xrange(arg_0):\n arg_15 = []\n\n # pattern indices [0 ... numSharedElements-1] are reserved for the shared\n # middle\n arg_16 = range(arg_7)\n\n # Build up the sequence\n for arg_17 in xrange(arg_1):\n if arg_17 in arg_2:\n arg_18 = arg_16.pop(0)\n else:\n arg_18 = arg_13.pop(0)\n arg_15.append(arg_10[arg_18])\n\n arg_12.append(arg_15)\n\n\n if VERBOSITY >= 3:\n print \"\\nTraining sequences\"\n printAllTrainingSequences(arg_12)\n\n return (arg_11, arg_12)"} +{"_id": "doc_1954", "title": "", "text": "def Func(arg_0 = 10,\n arg_1 = [2,3,4],\n arg_2 = 5,\n arg_3 = 3,\n arg_4 = 0,\n **arg_5\n ):\n \"\"\" Create a bunch of sequences of various lengths, all built from\n a fixed set of patterns.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of training sequences to generate\n seqLen: List of possible sequence lengths\n numPatterns: How many possible patterns there are to use within\n sequences\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences\n\n \"\"\"\n\n\n # Create the table of patterns\n arg_6 = getSimplePatterns(arg_3, arg_2, arg_4)\n\n # Total number of columns required\n arg_7 = len(arg_6[0])\n\n\n # -----------------------------------------------------------------------\n # Create the training sequences\n arg_8 = []\n for arg_9 in xrange(arg_0):\n\n # Build it up from patterns\n arg_10 = []\n arg_11 = random.choice(arg_1)\n for arg_9 in xrange(arg_11):\n arg_12 = random.choice(xrange(arg_2))\n arg_10.append(arg_6[arg_12])\n\n # Put it in\n arg_8.append(arg_10)\n\n\n if VERBOSITY >= 3:\n print \"\\nTraining sequences\"\n printAllTrainingSequences(arg_8)\n\n return (arg_7, arg_8)"} +{"_id": "doc_1955", "title": "", "text": "def Func(arg_0 = True,\n arg_1 = True,\n arg_2 = 100,\n arg_3 = 4,\n arg_4 = 3,\n arg_5 = 3,\n arg_6 = 3,\n arg_7 = 0.6,\n arg_8 = 0.1,\n arg_9 = 0.0,\n arg_10 = 0.0,\n arg_11 = 0,\n arg_12 = True,\n arg_13 = 0,\n arg_14 = 0,\n **arg_15\n ):\n\n \"\"\"Create one or more TM instances, placing each into a dict keyed by\n name.\n\n Parameters:\n ------------------------------------------------------------------\n retval: tms - dict of TM instances\n \"\"\"\n\n # Keep these fixed:\n arg_16 = 0.5\n\n arg_17 = dict()\n\n if arg_0:\n if VERBOSITY >= 2:\n print \"Creating BacktrackingTMCPP instance\"\n\n arg_18 = BacktrackingTMCPP(numberOfCols = arg_2, cellsPerColumn = arg_3,\n arg_7 = arg_7, arg_16 = arg_16,\n arg_5 = arg_5, arg_6 = arg_6,\n arg_8 = arg_8, arg_9 = arg_9,\n arg_4 = arg_4,\n arg_10 = arg_10, burnIn = 1,\n seed=SEED, verbosity=VERBOSITY,\n arg_12 = arg_12,\n collectStats = True,\n arg_11 = arg_11,\n arg_13 = arg_13,\n arg_14 = arg_14,\n )\n\n # Ensure we are copying over learning states for TMDiff\n arg_18.retrieveLearningStates = True\n\n arg_17['CPP'] = arg_18\n\n\n if arg_1:\n if VERBOSITY >= 2:\n print \"Creating PY TM instance\"\n\n arg_20 = BacktrackingTM(numberOfCols = arg_2,\n cellsPerColumn = arg_3,\n arg_7 = arg_7,\n arg_16 = arg_16,\n arg_5 = arg_5,\n arg_6 = arg_6,\n arg_8 = arg_8,\n arg_9 = arg_9,\n arg_4 = arg_4,\n arg_10 = arg_10, burnIn = 1,\n seed=SEED, verbosity=VERBOSITY,\n collectStats = True,\n arg_11 = arg_11,\n arg_13 = arg_13,\n arg_14 = arg_14,\n )\n\n\n arg_17['PY '] = arg_20\n\n return arg_17"} +{"_id": "doc_1956", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check for diffs among the TM instances in the passed in tms dict and\n raise an assert if any are detected\n\n Parameters:\n ---------------------------------------------------------------------\n tms: dict of TM instances\n \"\"\"\n\n if len(arg_0) == 1:\n return\n if len(arg_0) > 2:\n raise \"Not implemented for more than 2 TMs\"\n\n arg_1 = fdrutils.tmDiff2(arg_0.values(), verbosity=VERBOSITY)\n assert(arg_1)\n return"} +{"_id": "doc_1957", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=11, arg_4=22, arg_5=0):\n \"\"\"Compress a byte string.\n\n Args:\n string (bytes): The input data.\n mode (int, optional): The Funcion mode can be MODE_GENERIC (default),\n MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0).\n quality (int, optional): Controls the Funcion-speed vs Funcion-\n density tradeoff. The higher the quality, the slower the Funcion.\n Range is 0 to 11. Defaults to 11.\n lgwin (int, optional): Base 2 logarithm of the sliding window size. Range\n is 10 to 24. Defaults to 22.\n lgblock (int, optional): Base 2 logarithm of the maximum input block size.\n Range is 16 to 24. If set to 0, the value will be set based on the\n quality. Defaults to 0.\n\n Returns:\n The Funced byte string.\n\n Raises:\n brotli.error: If arguments are invalid, or Funcor fails.\n \"\"\"\n arg_6 = Compressor(arg_1=arg_1, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5)\n return arg_6.process(arg_0) + arg_6.finish()"} +{"_id": "doc_1958", "title": "", "text": "def Func(arg_0):\n \"\"\"Show string or char.\n \"\"\"\n arg_1 = ''\n def formatSubString(arg_0):\n for arg_2 in arg_0:\n if arg_2==32: yield ' '\n else: yield outputCharFormatter(arg_2)\n if len(arg_1)<200: return ''.join(formatSubString(arg_0))\n else:\n return ''.join(formatSubString(arg_0[:100]))+'...'+ \\\n ''.join(formatSubString(arg_0[-100:]))"} +{"_id": "doc_1959", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read n bytes from the stream on a byte boundary.\n \"\"\"\n if arg_0.pos&7: raise ValueError('Func: need byte boundary')\n arg_2 = arg_0.data[arg_0.pos>>3:(arg_0.pos>>3)+arg_1]\n arg_0.pos += 8*arg_1\n return arg_2"} +{"_id": "doc_1960", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Store decodeTable,\n and compute lengthTable, minLength, maxLength from encodings.\n \"\"\"\n arg_0.decodeTable = arg_1\n #set of symbols with unknown length\n arg_2 = set(arg_1)\n #bit size under investigation\n arg_3 = 0\n arg_4 = {}\n while arg_2:\n arg_5 = (1<arg_4:\n raise ValueError('value: extra out of range')\n return Func"} +{"_id": "doc_1964", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Give the range of possible values in a tuple\n Useful for mnemonic and explanation\n \"\"\"\n arg_2 = arg_0.value0+sum(1<> arg_0.NPOSTFIX\n arg_4 = arg_1 & (1<>1) if arg_3<13 or arg_2 else '[{}*x]'.format(2+arg_3>>1),\n arg_4, arg_0.NPOSTFIX,\n arg_0.NDIRECT+1-(4<>3))\n return True"} +{"_id": "doc_1970", "title": "", "text": "def Func(arg_0):\n \"\"\"In place inverse move to front transform.\n \"\"\"\n #mtf is initialized virtually with range(infinity)\n arg_1 = []\n for arg_2, arg_3 in enumerate(arg_0):\n #get old value from mtf. If never seen, take virtual value\n try: arg_4 = arg_1.pop(arg_3)\n except IndexError: arg_4 = arg_3\n #put value at front\n arg_1.insert(0, arg_4)\n #replace transformed value\n arg_0[arg_2] = arg_4"} +{"_id": "doc_1971", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False):\n \"\"\"Implementation of Dataset.to_arrow_table\"\"\"\n arg_5 = []\n arg_6 = []\n for arg_7, arg_8 in arg_0.to_items(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4):\n arg_5.append(arg_7)\n arg_6.append(arrow_array_from_numpy_array(arg_8))\n return pyarrow.Table.from_arrays(arg_6, arg_5)"} +{"_id": "doc_1972", "title": "", "text": "def Func(arg_0):\n '''Adds method f to the Dataset class'''\n arg_1 = arg_0.__name__\n arg_2.__hidden__[arg_1] = arg_0\n return arg_0"} +{"_id": "doc_1973", "title": "", "text": "def Func(arg_0, arg_1=\"distance\", arg_2=\"pm_l\", arg_3=\"pm_b\",\n arg_4=\"vl\", arg_5=\"vb\",\n arg_6=False,\n arg_7=False):\n \"\"\"Convert proper motion to perpendicular velocities.\n\n :param distance:\n :param pm_long:\n :param pm_lat:\n :param vl:\n :param vb:\n :param cov_matrix_distance_pm_long_pm_lat:\n :param uncertainty_postfix:\n :param covariance_postfix:\n :param radians:\n :return:\n \"\"\"\n arg_8 = 4.74057\n arg_0.add_variable(\"k\", arg_8, overwrite=False)\n arg_0.add_virtual_column(arg_4, \"k*{pm_long}*{distance}\".format(**locals()))\n arg_0.add_virtual_column(arg_5, \"k* {pm_lat}*{distance}\".format(**locals()))\n if arg_6:\n arg_0.propagate_uncertainties([arg_0[arg_4], arg_0[arg_5]])"} +{"_id": "doc_1974", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return a graphviz.Digraph object with a graph of the expression\"\"\"\n from graphviz import Graph, Digraph\n arg_2 = arg_0._graph()\n arg_1 = arg_1 or Digraph(comment=arg_0.expression)\n def walk(arg_2):\n if isinstance(arg_2, six.string_types):\n arg_1.node(arg_2, arg_2)\n return arg_2, arg_2\n else:\n arg_3, arg_4, arg_5, arg_6 = arg_2\n arg_7 = arg_3\n arg_1.node(arg_7, arg_3)\n for arg_8 in arg_6:\n arg_9, arg_8 = walk(arg_8)\n arg_1.edge(arg_7, arg_9)\n return arg_7, arg_2\n walk(arg_2)\n return arg_1"} +{"_id": "doc_1975", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Map values of an expression or in memory column accoring to an input\n dictionary or a custom callable function.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green'])\n >>> Funcper = {'red': 1, 'blue': 2, 'green': 3}\n >>> df['color_Funcped'] = df.color.Func(Funcper)\n >>> df\n # color color_Funcped\n 0 red 1\n 1 red 1\n 2 blue 2\n 3 red 1\n 4 green 3\n >>> import numpy as np\n >>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan])\n >>> df['role'] = df['type'].Func({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'})\n >>> df\n # type role\n 0 0 admin\n 1 1 maintainer\n 2 2 user\n 3 2 user\n 4 2 user\n 5 nan unknown \n\n :param Funcper: dict like object used to Func the values from keys to values\n :param nan_Funcping: value to be used when a nan is present (and not in the Funcper)\n :param null_Funcping: value to use used when there is a missing value\n :return: A vaex expression\n :rtype: vaex.expression.Expression\n \"\"\"\n assert isinstance(arg_1, collectionsAbc.Mapping), \"Funcper should be a dict like object\"\n\n arg_4 = arg_0.ds\n arg_5 = np.array(list(arg_1.keys()))\n\n # we Func the keys to a ordinal values [0, N-1] using the set\n arg_6 = arg_4._set(arg_0.expression)\n arg_7 = arg_6.keys()\n arg_8 = any([key != key for key in arg_5])\n\n # we want all possible values to be converted\n # so Funcper's key should be a superset of the keys found\n if not set(arg_5).issuperset(arg_7):\n arg_9 = set(arg_7).difference(arg_5)\n arg_10 = list(arg_9)[0]\n if arg_10 == arg_10: # safe nan check\n raise ValueError('Missing values in Funcper: %s' % arg_9)\n \n # and these are the corresponding choices\n arg_11 = [arg_1[key] for key in arg_7]\n if arg_6.has_nan:\n if arg_8:\n arg_11 = [arg_1[np.nan]] + arg_11\n else:\n arg_11 = [arg_2] + arg_11\n if arg_6.has_null:\n arg_11 = [arg_3] + arg_11\n arg_11 = np.array(arg_11)\n\n arg_12 = arg_4.add_variable('Func_key_set', arg_6, unique=True)\n arg_13 = arg_4.add_variable('Func_choices', arg_11, unique=True)\n arg_14 = '_choose(_ordinal_values({}, {}), {})'.format(arg_0, arg_12, arg_13)\n return Expression(arg_4, arg_14)"} +{"_id": "doc_1976", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"Create a vaex Func, the QApplication mainloop must be started.\n\n In ipython notebook/jupyter do the following:\n\n >>> import vaex.ui.main # this causes the qt api level to be set properly\n >>> import vaex\n\n Next cell:\n\n >>> %gui qt\n\n Next cell:\n\n >>> Func = vaex.Func()\n\n From now on, you can run the Func along with jupyter\n\n \"\"\"\n\n import vaex.ui.main\n return vaex.ui.main.VaexApp()"} +{"_id": "doc_1977", "title": "", "text": "def Func(arg_0):\n \"\"\"Open a list of filenames, and return a DataFrame with all DataFrames cocatenated.\n\n :param list[str] filenames: list of filenames/paths\n :rtype: DataFrame\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0:\n arg_2 = arg_2.strip()\n if arg_2 and arg_2[0] != \"#\":\n arg_1.append(open(arg_2))\n return vaex.dataframe.DataFrameConcatenated(arg_1=arg_1)"} +{"_id": "doc_1978", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a vaex DataFrame from an Astropy Table.\"\"\"\n import vaex.file.other\n return vaex.file.other.DatasetAstropyTable(arg_0=arg_0)"} +{"_id": "doc_1979", "title": "", "text": "def Func(**arg_0):\n \"\"\"Create an in memory DataFrame from numpy arrays.\n\n Example\n\n >>> import vaex, numpy as np\n >>> x = np.arange(5)\n >>> y = x ** 2\n >>> vaex.Func(x=x, y=y)\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n >>> some_dict = {'x': x, 'y': y}\n >>> vaex.Func(**some_dict) # in case you have your columns in a dict\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n\n :param arrays: keyword arguments with arrays\n :rtype: DataFrame\n \"\"\"\n import numpy as np\n import six\n from .column import Column\n arg_1 = vaex.dataframe.DataFrameArrays(\"array\")\n for arg_2, arg_3 in arg_0.items():\n if isinstance(arg_3, Column):\n arg_1.add_column(arg_2, arg_3)\n else:\n arg_3 = np.asanyarray(arg_3)\n arg_1.add_column(arg_2, arg_3)\n return arg_1"} +{"_id": "doc_1980", "title": "", "text": "def Func(arg_0=2, arg_1=256, arg_2=-2.5, arg_3=None, arg_4=1, arg_5=None):\n \"\"\"Creates a Func DataFrame.\n \"\"\"\n import vaex.file\n return vaex.file.other.Zeldovich(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_1981", "title": "", "text": "def Func(arg_0):\n '''Concatenate a list of DataFrames.\n\n :rtype: DataFrame\n '''\n arg_1 = reduce((lambda x, y: x.Func(y)), arg_0)\n return arg_1"} +{"_id": "doc_1982", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add a dataset and add it to the UI\"\"\"\n logger.debug(\"Func dataset: %r\", arg_1)\n if arg_1.startswith(\"http\") or arg_1.startswith(\"ws\"):\n arg_2 = vaex.Func(arg_1, thread_mover=arg_0.call_in_main_thread)\n else:\n arg_2 = vaex.Func(arg_1)\n arg_0.add_recently_Funced(arg_1)\n arg_0.dataset_selector.add(arg_2)\n return arg_2"} +{"_id": "doc_1983", "title": "", "text": "def Func(arg_0):\n '''Decorator to transparantly accept Func computation.\n\n Example:\n\n >>> Func_sum = ds.sum(ds.E, binby=ds.x, limits=limits,\n >>> shape=4, delay=True)\n >>> @vaex.Func\n >>> def total_sum(sums):\n >>> return sums.sum()\n >>> sum_of_sums = total_sum(Func_sum)\n >>> ds.execute()\n >>> sum_of_sums.get()\n See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations\n '''\n\n def wrapped(*arg_1, **arg_2):\n # print \"calling\", f, \"with\", kwargs\n # key_values = kwargs.items()\n arg_3 = list([(key, promisify(arg_9)) for key, arg_9 in arg_2.items()])\n # key_promise = [(key, promisify(value)) for key, value in key_values]\n arg_4 = list([promisify(arg_9) for arg_9 in arg_1])\n arg_5 = list([arg_7 for key, arg_7 in arg_3])\n arg_6 = arg_4 + arg_5\n for arg_7 in arg_6:\n def echo_error(arg_8, arg_7=arg_7):\n print(\"error with \", arg_7, \"exception is\", arg_8)\n # raise exc\n\n def echo(arg_9, arg_7=arg_7):\n print(\"done with \", repr(arg_7), \"value is\", arg_9)\n # promise.then(echo, echo_error)\n\n # print promises\n arg_10 = aplus.listPromise(*arg_6)\n\n def call(arg_11):\n arg_12 = {key: arg_7.get() for key, arg_7 in arg_3}\n arg_13 = list([arg_7.get() for arg_7 in arg_4])\n return arg_0(*arg_13, **arg_12)\n\n def error(arg_8):\n print(\"error\", arg_8)\n raise arg_8\n return arg_10.then(call, error)\n return wrapped"} +{"_id": "doc_1984", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise\"\"\"\n if arg_0.delay:\n # should return a task or a promise nesting it\n return arg_0.executor.schedule(arg_1)\n else:\n import vaex.utils\n arg_3 = None\n try:\n if arg_2 == True:\n def update(arg_4):\n arg_5.update(arg_4)\n return True\n arg_5 = vaex.utils.progressbar(arg_1.name)\n arg_3 = arg_0.executor.signal_progress.connect(update)\n elif arg_2:\n arg_3 = arg_0.executor.signal_progress.connect(arg_2)\n arg_6 = arg_0.executor.run(arg_1)\n if arg_2 == True:\n arg_5.finish()\n sys.stdout.write('\\n')\n return arg_6\n finally:\n if arg_3:\n arg_0.executor.signal_progress.disconnect(arg_3)"} +{"_id": "doc_1985", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sort table by given column number.\n \"\"\"\n arg_0.emit(QtCore.SIGNAL(\"layoutAboutToBeChanged()\"))\n if arg_1 == 0:\n print(\"by name\")\n # get indices, Funced by pair name\n arg_3 = list(zip(arg_0.pairs, list(range(len(arg_0.pairs)))))\n print(arg_3)\n arg_3.Func(key=operator.itemgetter(0))\n print(arg_3)\n arg_0.indices = list(map(operator.itemgetter(1), arg_3))\n print((arg_0.indices))\n if arg_1 == 1:\n # get indices, Funced by ranking, or no Funcing\n if None not in arg_0.ranking:\n arg_3 = list(zip(arg_0.ranking, list(range(len(arg_0.pairs)))))\n arg_3.Func(key=operator.itemgetter(0))\n arg_0.indices = list(map(operator.itemgetter(1), arg_3))\n else:\n arg_0.indices = list(range(len(arg_0.pairs)))\n print((arg_0.indices))\n if arg_2 == QtCore.Qt.DescendingOrder:\n arg_0.indices.reverse()\n print((arg_0.indices))\n arg_0.emit(QtCore.SIGNAL(\"layoutChanged()\"))"} +{"_id": "doc_1986", "title": "", "text": "def Func(arg_0):\n \"\"\"Used for unittesting to make sure the plots are all done\"\"\"\n logger.debug(\"will wait for last plot to finish\")\n arg_0._plot_event = threading.Event()\n arg_0.queue_update.Func()\n arg_0.queue_replot.Func()\n arg_0.queue_redraw.Func()\n arg_2 = QtCore.QCoreApplication.instance()\n arg_3 = 10\n while not arg_0._plot_event.is_set():\n logger.debug(\"waiting for last plot to finish\")\n arg_2.processEvents()\n QtTest.QTest.qSleep(arg_3)\n logger.debug(\"waiting for plot finished\")"} +{"_id": "doc_1987", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False):\n \"\"\"Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy\"\"\"\n arg_1 = _ensure_string_from_expression(arg_1)\n def map(arg_4):\n pass\n def reduce(arg_5, arg_6):\n pass\n return arg_0.map_reduce(map, reduce, [arg_1], arg_3=arg_3, arg_2=arg_2, name='Func', to_numpy=False)"} +{"_id": "doc_1988", "title": "", "text": "def Func(arg_0, arg_1, arg_2=[], arg_3=None, arg_4=arg_5, arg_6=False, arg_7=False, arg_8=None, arg_9=False):\n \"\"\"Calculate the Func for the given expression, possible on a grid defined by binby\n\n Example:\n\n >>> df.Func(\"L\")\n 304054882.49378014\n >>> df.Func(\"L\", binby=\"E\", shape=4)\n array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07,\n 1.40008776e+08])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n return arg_0._compute_agg('Func', arg_1, arg_2, arg_3, arg_4, arg_6, arg_7, arg_9, arg_8)\n @delayed\n def finish(*arg_10):\n return vaex.utils.unlistify(arg_11, arg_10)\n arg_1 = _ensure_strings_from_expressions(arg_1)\n arg_2 = _ensure_strings_from_expressions(arg_2)\n arg_11, [arg_12, ] = vaex.utils.listify(arg_1)\n arg_13 = vaex.utils.progressbars(arg_8)\n arg_3 = arg_0.limits(arg_2, arg_3, arg_7=True)\n # stats = [calculate(expression, limits) for expression in expressions]\n arg_10 = [arg_0._Func_calculation(arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_6=arg_6, arg_13=arg_13) for arg_1 in arg_12]\n arg_14 = finish(*arg_10)\n return arg_0._delay(arg_7, arg_14)"} +{"_id": "doc_1989", "title": "", "text": "def Func(arg_0, arg_1, arg_2=[], arg_3=None, arg_4=arg_5, arg_6=False, arg_7=False, arg_8=None):\n \"\"\"Calculate the standard deviation for the given expression, possible on a grid defined by binby\n\n\n >>> df.Func(\"vz\")\n 110.31773397535071\n >>> df.Func(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)\n array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n @delayed\n def finish(arg_9):\n return arg_9**0.5\n return arg_0._delay(arg_7, finish(arg_0.var(arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_6=arg_6, arg_7=True, arg_8=arg_8)))"} +{"_id": "doc_1990", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=[], arg_4=None, arg_5=arg_6, arg_7=False, arg_8=False, arg_9=None):\n \"\"\"Calculate the Funcariance matrix for x and y or more expressions, possibly on a grid defined by binby.\n\n Either x and y are expressions, e.g:\n\n >>> df.Func(\"x\", \"y\")\n\n Or only the x argument is given with a list of expressions, e,g.:\n\n >>> df.Func([\"x, \"y, \"z\"])\n\n Example:\n\n >>> df.Func(\"x\", \"y\")\n array([[ 53.54521742, -3.8123135 ],\n [ -3.8123135 , 60.62257881]])\n >>> df.Func([\"x\", \"y\", \"z\"])\n array([[ 53.54521742, -3.8123135 , -0.98260511],\n [ -3.8123135 , 60.62257881, 1.21381057],\n [ -0.98260511, 1.21381057, 25.55517638]])\n\n >>> df.Func(\"x\", \"y\", binby=\"E\", shape=2)\n array([[[ 9.74852878e+00, -3.02004780e-02],\n [ -3.02004780e-02, 9.99288215e+00]],\n [[ 8.43996546e+01, -6.51984181e+00],\n [ -6.51984181e+00, 9.68938284e+01]]])\n\n\n :param x: {expression}\n :param y: {expression_single}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :return: {return_stat_scalar}, the last dimensions are of shape (2,2)\n \"\"\"\n arg_7 = _ensure_strings_from_expressions(arg_7)\n if arg_2 is None:\n if not _issequence(arg_1):\n raise ValueError(\"if y argument is not given, x is expected to be sequence, not %r\", arg_1)\n arg_10 = arg_1\n else:\n arg_10 = [arg_1, arg_2]\n arg_11 = len(arg_10)\n arg_3 = _ensure_list(arg_3)\n arg_5 = _expand_shape(arg_5, len(arg_3))\n arg_12 = vaex.utils.progressbars(arg_9)\n arg_4 = arg_0.limits(arg_3, arg_4, arg_7=arg_7, arg_8=True)\n\n @delayed\n def calculate(arg_10, arg_4):\n # print('limits', limits)\n arg_13 = tasks.TaskStatistic(arg_0, arg_3, arg_5, arg_4, weights=arg_10, op=tasks.OP_COV, arg_7=arg_7)\n arg_0.executor.schedule(arg_13)\n arg_12.add_task(arg_13, \"Funcariance values for %r\" % arg_10)\n return arg_13\n\n @delayed\n def finish(arg_14):\n arg_11 = len(arg_10)\n arg_15 = arg_14[..., :arg_11]\n arg_16 = arg_14[..., arg_11:2 * arg_11]\n with np.errstate(divide='ignore', invalid='ignore'):\n arg_17 = arg_16 / arg_15\n # matrix of means * means.T\n arg_18 = arg_17[..., None] * arg_17[..., None, :]\n\n arg_15 = arg_14[..., 2 * arg_11:2 * arg_11 + arg_11**2]\n arg_16 = arg_14[..., 2 * arg_11 + arg_11**2:]\n arg_5 = arg_15.shape[:-1] + (arg_11, arg_11)\n arg_15 = arg_15.reshape(arg_5)\n arg_16 = arg_16.reshape(arg_5)\n with np.errstate(divide='ignore', invalid='ignore'):\n arg_19 = arg_16 / arg_15\n arg_20 = arg_19 - arg_18\n return arg_20\n arg_12 = vaex.utils.progressbars(arg_9)\n arg_14 = calculate(arg_10, arg_4)\n arg_20 = finish(arg_14)\n return arg_0._delay(arg_8, arg_20)"} +{"_id": "doc_1991", "title": "", "text": "def Func(arg_0, arg_1, arg_2=50., arg_3=[], arg_4=None, arg_5=arg_6, arg_7=256, arg_8=\"minmax\", arg_9=False, arg_10=False):\n \"\"\"Calculate the median , possibly on a grid defined by binby.\n\n NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by\n percentile_shape and percentile_limits\n\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param percentile_limits: {percentile_limits}\n :param percentile_shape: {percentile_shape}\n :param selection: {selection}\n :param delay: {delay}\n :return: {return_stat_scalar}\n \"\"\"\n return arg_0.percentile_approx(arg_1, 50, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, arg_7=arg_7, arg_8=arg_8, arg_9=arg_9, arg_10=arg_10)"} +{"_id": "doc_1992", "title": "", "text": "def Func(arg_0, arg_1=\"source_id/34359738368\", arg_2=12, arg_3=8, arg_4=\"count(*)\", arg_5=None,\n arg_6=None,\n arg_7=\"equatorial\", arg_8=\"galactic\", arg_9=None,\n arg_10=\"afmhot\", arg_11=None, arg_12=800, arg_13=True,\n arg_14=None, arg_15=False, arg_16=\"\", arg_17=None, arg_18=False, arg_19=True,\n arg_20=(0, 0, 0), **arg_21):\n \"\"\"Viz data in 2d using a healpix column.\n\n :param healpix_expression: {healpix_max_level}\n :param healpix_max_level: {healpix_max_level}\n :param healpix_level: {healpix_level}\n :param what: {what}\n :param selection: {selection}\n :param grid: {grid}\n :param healpix_input: Specificy if the healpix index is in \"equatorial\", \"galactic\" or \"ecliptic\".\n :param healpix_output: Plot in \"equatorial\", \"galactic\" or \"ecliptic\".\n :param f: function to apply to the data\n :param colormap: matplotlib colormap\n :param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))\n :param image_size: size for the image that healpy uses for rendering\n :param nest: If the healpix data is in nested (True) or ring (False)\n :param figsize: If given, modify the matplotlib figure size. Example (14,9)\n :param interactive: (Experimental, uses healpy.mollzoom is True)\n :param title: Title of figure\n :param smooth: apply gaussian smoothing, in degrees\n :param show: Call matplotlib's show (True) or not (False, defaut)\n :param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.\n :return:\n \"\"\"\n # plot_level = healpix_level #healpix_max_level-reduce_level\n import healpy as hp\n import pylab as plt\n if arg_6 is None:\n arg_22 = arg_2 - arg_3\n arg_23 = 2**arg_3\n arg_24 = hp.nside2npix(arg_23)\n # print nmax, np.sqrt(nmax)\n arg_25 = 4**arg_22\n # print nmax\n arg_26 = 1. / arg_25 / 2\n arg_6 = arg_0._stat(arg_4=arg_4, binby=\"%s/%s\" % (arg_1, arg_25), limits=[-arg_26, arg_24 - arg_26], shape=arg_24, arg_5=arg_5)\n if arg_11:\n arg_27, arg_28 = arg_11\n else:\n arg_27 = arg_28 = None\n arg_29 = arg_9\n arg_9 = _parse_f(arg_9)\n if arg_17:\n if arg_13:\n arg_6 = hp.reorder(arg_6, inp=\"NEST\", out=\"RING\")\n arg_13 = False\n # grid[np.isnan(grid)] = np.nanmean(grid)\n arg_6 = hp.smoothing(arg_6, sigma=np.radians(arg_17))\n arg_30 = arg_9(arg_6)\n arg_31 = dict(equatorial='C', galactic='G', ecliptic=\"E\")\n arg_32 = plt.gcf()\n if arg_14 is not None:\n arg_32.set_size_inches(*arg_14)\n arg_33 = arg_4\n if arg_29:\n arg_33 = arg_29 + \" \" + arg_33\n arg_9 = hp.mollzoom if arg_15 else hp.mollview\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n arg_34 = arg_31[arg_7], arg_31[arg_8]\n if arg_31[arg_7] == arg_31[arg_8]:\n arg_34 = None\n arg_9(arg_30, unit=arg_33, rot=arg_20, arg_13=arg_13, arg_16=arg_16, arg_34=arg_34,\n cmap=arg_10, hold=True, xsize=arg_12, min=arg_27, max=arg_28, cbar=arg_19, **arg_21)\n if arg_18:\n plt.show()"} +{"_id": "doc_1993", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None, arg_7=None, arg_8=None, arg_9=None, arg_10=\"count(*)\", arg_11=128, arg_12=[None, True], arg_13=None,\n arg_14=None,\n arg_15=None, arg_16=None, arg_17=None, arg_18=\"normalize\", arg_19=\"afmhot\",\n arg_20=None, arg_21=None,\n arg_22=True, arg_23=[0.1, 0.5, 0.9], arg_24=[0.01, 0.05, 0.1], arg_25=0.1,\n arg_26=True, **arg_27):\n \"\"\"Use at own risk, requires ipyvolume\"\"\"\n import vaex.ext.ipyvolume\n # vaex.ext.ipyvolume.\n arg_28 = vaex.ext.ipyvolume.PlotDefault\n Func = arg_28(df=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, arg_6=arg_6,\n arg_9=arg_9, arg_11=arg_11, arg_8=arg_8, arg_10=arg_10,\n arg_13=arg_13, arg_20=arg_20, arg_21=arg_21,\n arg_12=arg_12, arg_15=arg_15, arg_16=arg_16,\n arg_17=arg_17, arg_14=arg_14, arg_18=arg_18, arg_19=arg_19, **arg_27)\n if arg_26:\n Func.show()\n return Func"} +{"_id": "doc_1994", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the numpy dtype for the given expression, if not a column, the first row will be evaluated to get the dtype.\"\"\"\n arg_1 = _ensure_string_from_expression(arg_1)\n if arg_1 in arg_0.variables:\n return np.float64(1).dtype\n elif arg_1 in arg_0.columns.keys():\n arg_3 = arg_0.columns[arg_1]\n arg_4 = arg_3[0:1]\n Func = arg_4.dtype\n else:\n arg_4 = arg_0.evaluate(arg_1, 0, 1, filtered=False)\n Func = arg_4.dtype\n if not arg_2:\n if Func != str_type:\n if Func.kind in 'US':\n return str_type\n if Func.kind == 'O':\n # we lie about arrays containing strings\n if isinstance(arg_4[0], six.string_types):\n return str_type\n return Func"} +{"_id": "doc_1995", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Each DataFrame has a directory where files are stored for metadata etc.\n\n Example\n\n >>> import vaex\n >>> ds = vaex.example()\n >>> vaex.Func()\n '/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5'\n\n :param bool create: is True, it will create the directory if it does not exist\n \"\"\"\n if arg_0.is_local():\n arg_2 = os.path.abspath(arg_0.path).replace(os.path.sep, \"_\")[:250] # should not be too long for most os'es\n arg_2 = arg_2.replace(\":\", \"_\") # for windows drive names\n else:\n arg_3 = arg_0.server\n arg_2 = \"%s_%s_%s_%s\" % (arg_3.hostname, arg_3.port, arg_3.base_path.replace(\"/\", \"_\"), arg_0.name)\n arg_4 = os.path.join(vaex.utils.Func(), \"dfs\", arg_2)\n if arg_1 and not os.path.exists(arg_4):\n os.makedirs(arg_4)\n return arg_4"} +{"_id": "doc_1996", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the internal state of the DataFrame in a dictionary\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> df.Func()\n {'active_range': [0, 1],\n 'column_names': ['x', 'y', 'r'],\n 'description': None,\n 'descriptions': {},\n 'functions': {},\n 'renamed_columns': [],\n 'selections': {'__filter__': None},\n 'ucds': {},\n 'units': {},\n 'variables': {},\n 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}\n \"\"\"\n\n arg_1 = list(arg_0.virtual_columns.keys()) + list(arg_0.variables.keys())\n arg_2 = {arg_5: str(arg_6) for arg_5, arg_6 in arg_0.units.items()}\n arg_3 = {arg_5: arg_6 for arg_5, arg_6 in arg_0.ucds.items() if arg_5 in arg_1}\n arg_4 = {arg_5: arg_6 for arg_5, arg_6 in arg_0.descriptions.items()}\n import vaex.serialize\n\n def check(arg_5, arg_6):\n if not vaex.serialize.can_serialize(arg_6.f):\n warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(arg_5))\n return False\n return True\n\n def clean(arg_6):\n return vaex.serialize.to_dict(arg_6.f)\n arg_7 = {arg_5: clean(arg_6) for arg_5, arg_6 in arg_0.functions.items() if check(arg_5, arg_6)}\n arg_8 = {arg_5: arg_6 for arg_5, arg_6 in arg_0.virtual_columns.items()}\n arg_9 = {name: arg_0.get_selection(name) for name, history in arg_0.selection_histories.items()}\n arg_9 = {name: selection.to_dict() if selection is not None else None for name, selection in arg_9.items()}\n # if selection is not None}\n arg_10 = dict(arg_8=arg_8,\n column_names=arg_0.column_names,\n renamed_columns=arg_0._renamed_columns,\n variables=arg_0.variables,\n arg_7=arg_7,\n arg_9=arg_9,\n arg_3=arg_3,\n arg_2=arg_2,\n arg_4=arg_4,\n description=arg_0.description,\n active_range=[arg_0._index_start, arg_0._index_end])\n return arg_10"} +{"_id": "doc_1997", "title": "", "text": "def Func(arg_0):\n \"\"\"Writes virtual columns, variables and their ucd,description and units.\n\n The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by\n :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.\n\n This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`\n is called, so that the information is not lost between sessions.\n\n Note: opening a DataFrame twice may result in corruption of this file.\n\n \"\"\"\n arg_1 = os.path.join(arg_0.get_private_dir(create=True), \"virtual_meta.yaml\")\n arg_2 = list(arg_0.virtual_columns.keys()) + list(arg_0.variables.keys())\n arg_3 = {key: str(value) for key, value in arg_0.units.items() if key in arg_2}\n arg_4 = {key: value for key, value in arg_0.ucds.items() if key in arg_2}\n arg_5 = {key: value for key, value in arg_0.descriptions.items() if key in arg_2}\n arg_6 = dict(virtual_columns=arg_0.virtual_columns,\n variables=arg_0.variables,\n arg_4=arg_4, arg_3=arg_3, arg_5=arg_5)\n vaex.utils.write_json_or_yaml(arg_1, arg_6)"} +{"_id": "doc_1998", "title": "", "text": "def Func(arg_0):\n \"\"\"Writes all meta data, ucd,description and units\n\n The default implementation is to write this to a file called meta.yaml in the directory defined by\n :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.\n (For instance the vaex hdf5 implementation does this)\n\n This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta`\n is called, so that the information is not lost between sessions.\n\n Note: opening a DataFrame twice may result in corruption of this file.\n\n \"\"\"\n # raise NotImplementedError\n arg_1 = os.path.join(arg_0.get_private_dir(create=True), \"meta.yaml\")\n arg_2 = {key: str(value) for key, value in arg_0.units.items()}\n arg_3 = dict(description=arg_0.description,\n ucds=arg_0.ucds, arg_2=arg_2, descriptions=arg_0.descriptions,\n )\n vaex.utils.write_json_or_yaml(arg_1, arg_3)"} +{"_id": "doc_1999", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, **arg_4):\n \"\"\"Generate a Subspaces object, based on a custom list of expressions or all possible combinations based on\n dimension\n\n :param expressions_list: list of list of expressions, where the inner list defines the subspace\n :param dimensions: if given, generates a subspace with all possible combinations for that dimension\n :param exclude: list of\n \"\"\"\n if arg_2 is not None:\n arg_1 = list(itertools.combinations(arg_0.get_column_names(), arg_2))\n if arg_3 is not None:\n import six\n\n def excluded(arg_5):\n if callable(arg_3):\n return arg_3(arg_5)\n elif isinstance(arg_3, six.string_types):\n return arg_3 in arg_5\n elif isinstance(arg_3, (list, tuple)):\n # $#expressions = set(expressions)\n for arg_6 in arg_3:\n if isinstance(arg_6, six.string_types):\n if arg_6 in arg_5:\n return True\n elif isinstance(arg_6, (list, tuple)):\n if set(arg_6).issubset(arg_5):\n return True\n else:\n raise ValueError(\"elements of exclude should contain a string or a sequence of strings\")\n else:\n raise ValueError(\"exclude should contain a string, a sequence of strings, or should be a callable\")\n return False\n # test if any of the elements of exclude are a subset of the expression\n arg_1 = [expr for expr in arg_1 if not excluded(expr)]\n logger.debug(\"expression list generated: %r\", arg_1)\n import vaex.legacy\n return vaex.legacy.Subspaces([arg_0(*arg_5, **arg_4) for arg_5 in arg_1])"} +{"_id": "doc_2000", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Set the variable to an expression or value defined by expression_or_value.\n\n Example\n\n >>> df.Func(\"a\", 2.)\n >>> df.Func(\"b\", \"a**2\")\n >>> df.get_variable(\"b\")\n 'a**2'\n >>> df.evaluate_variable(\"b\")\n 4.0\n\n :param name: Name of the variable\n :param write: write variable to meta file\n :param expression: value or expression\n \"\"\"\n arg_0.variables[arg_1] = arg_2"} +{"_id": "doc_2001", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Evaluates the variable given by name.\"\"\"\n if isinstance(arg_0.variables[arg_1], six.string_types):\n # TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const\n arg_2 = eval(arg_0.variables[arg_1], expression_namespace, arg_0.variables)\n return arg_2\n else:\n return arg_0.variables[arg_1]"} +{"_id": "doc_2002", "title": "", "text": "def Func(arg_0, arg_1=\"default\", arg_2=None, arg_3=None, arg_4=None, arg_5=False):\n \"\"\"Internal use, ignores the filter\"\"\"\n arg_2 = arg_2 or 0\n arg_3 = arg_3 or len(arg_0)\n arg_6 = scopes._BlockScopeSelection(arg_0, arg_2, arg_3, arg_4, arg_5=arg_5)\n return arg_6.evaluate(arg_1)"} +{"_id": "doc_2003", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False):\n \"\"\"Return a dict containing the ndarray corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :return: dict\n \"\"\"\n return dict(arg_0.to_items(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4))"} +{"_id": "doc_2004", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False, arg_5=True):\n \"\"\"Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference\n\n :param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param selections: copy selections to a new DataFrame\n :return: dict\n \"\"\"\n if arg_1:\n arg_1 = _ensure_strings_from_expressions(arg_1)\n arg_6 = vaex.from_items(*arg_0.to_items(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=False))\n if arg_4:\n for arg_7, arg_8 in arg_0.virtual_columns.items():\n arg_6.add_virtual_column(arg_7, arg_8)\n if arg_5:\n # the filter selection does not need copying\n for arg_9, arg_8 in arg_0.selection_histories.items():\n if arg_9 != FILTER_SELECTION_NAME:\n arg_6.selection_histories[arg_9] = list(arg_8)\n for arg_9, arg_8 in arg_0.selection_history_indices.items():\n if arg_9 != FILTER_SELECTION_NAME:\n arg_6.selection_history_indices[arg_9] = arg_8\n arg_6.functions.update(arg_0.functions)\n arg_6.copy_metadata(arg_0)\n return arg_6"} +{"_id": "doc_2005", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False, arg_5=None):\n \"\"\"Return a pandas DataFrame containing the ndarray corresponding to the evaluated data\n\n If index is given, that column is used for the index of the dataframe.\n\n Example\n\n >>> df_pandas = df.Func([\"x\", \"y\", \"z\"])\n >>> df_copy = vaex.from_pandas(df_pandas)\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param index_column: if this column is given it is used for the index of the DataFrame\n :return: pandas.DataFrame object\n \"\"\"\n import pandas as pd\n arg_6 = arg_0.to_dict(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n if arg_5 is not None:\n if arg_5 in arg_6:\n arg_7 = arg_6.pop(arg_5)\n else:\n arg_7 = arg_0.evaluate(arg_5, arg_2=arg_2)\n else:\n arg_7 = None\n arg_8 = pd.DataFrame(arg_6=arg_6, arg_7=arg_7)\n if arg_7 is not None:\n arg_8.index.name = arg_5\n return arg_8"} +{"_id": "doc_2006", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False, arg_5=None):\n \"\"\"Returns a astropy table object containing the ndarrays corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param index: if this column is given it is used for the index of the DataFrame\n :return: astropy.table.Table object\n \"\"\"\n from astropy.table import Table, Column, MaskedColumn\n arg_6 = dict()\n arg_6[\"name\"] = arg_0.name\n arg_6[\"description\"] = arg_0.description\n\n arg_7 = Table(arg_6=arg_6)\n for arg_8, arg_9 in arg_0.to_items(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4):\n if arg_0.dtype(arg_8) == str_type: # for astropy we convert it to unicode, it seems to ignore object type\n arg_9 = np.array(arg_9).astype('U')\n arg_6 = dict()\n if arg_8 in arg_0.ucds:\n arg_6[\"ucd\"] = arg_0.ucds[arg_8]\n if np.ma.isMaskedArray(arg_9):\n arg_10 = MaskedColumn\n else:\n arg_10 = Column\n arg_7[arg_8] = arg_10(arg_9, unit=arg_0.unit(arg_8), description=arg_0.descriptions.get(arg_8), arg_6=arg_6)\n return arg_7"} +{"_id": "doc_2007", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add an in memory array as a column.\"\"\"\n if isinstance(arg_2, (np.ndarray, Column)):\n arg_3 = ar = arg_2\n # it can be None when we have an 'empty' DataFrameArrays\n if arg_0._length_original is None:\n arg_0._length_unfiltered = _len(arg_3)\n arg_0._length_original = _len(arg_3)\n arg_0._index_end = arg_0._length_unfiltered\n if _len(ar) != arg_0.length_original():\n if arg_0.filtered:\n # give a better warning to avoid confusion\n if len(arg_0) == len(ar):\n raise ValueError(\"Array is of length %s, while the length of the DataFrame is %s due to the filtering, the (unfiltered) length is %s.\" % (len(ar), len(arg_0), arg_0.length_unfiltered()))\n raise ValueError(\"array is of length %s, while the length of the DataFrame is %s\" % (len(ar), arg_0.length_original()))\n # assert self.length_unfiltered() == len(data), \"columns should be of equal length, length should be %d, while it is %d\" % ( self.length_unfiltered(), len(data))\n arg_0.columns[arg_1] = arg_2\n if arg_1 not in arg_0.column_names:\n arg_0.column_names.append(arg_1)\n else:\n raise ValueError(\"functions not yet implemented\")\n arg_0._save_assign_expression(arg_1, Expression(arg_0, arg_1))"} +{"_id": "doc_2008", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=True):\n \"\"\"Renames a column, not this is only the in memory name, this will not be reflected on disk\"\"\"\n arg_2 = vaex.utils.find_valid_name(arg_2, used=[] if not arg_3 else list(arg_0))\n arg_5 = arg_0.columns.get(arg_1)\n if arg_5 is not None:\n del arg_0.columns[arg_1]\n arg_0.column_names[arg_0.column_names.index(arg_1)] = arg_2\n arg_0.columns[arg_2] = arg_5\n else:\n arg_9 = arg_0.virtual_columns[arg_1]\n del arg_0.virtual_columns[arg_1]\n arg_0.virtual_columns[arg_2] = arg_9\n if arg_4:\n arg_0._renamed_columns.append((arg_1, arg_2))\n for arg_11 in [arg_0.ucds, arg_0.units, arg_0.descriptions]:\n if arg_1 in arg_11:\n arg_11[arg_2] = arg_11[arg_1]\n del arg_11[arg_1]\n return arg_2"} +{"_id": "doc_2009", "title": "", "text": "def Func(arg_0, arg_1=\"x\", arg_2=\"y\", arg_3=\"r_polar\", arg_4=\"phi_polar\",\n arg_5=False,\n arg_6=False):\n \"\"\"Convert cartesian to polar coordinates\n\n :param x: expression for x\n :param y: expression for y\n :param radius_out: name for the virtual column for the radius\n :param azimuth_out: name for the virtual column for the azimuth angle\n :param propagate_uncertainties: {propagate_uncertainties}\n :param radians: if True, azimuth is in radians, defaults to degrees\n :return:\n \"\"\"\n arg_1 = arg_0[arg_1]\n arg_2 = arg_0[arg_2]\n if arg_6:\n arg_7 = \"\"\n else:\n arg_7 = \"*180/pi\"\n arg_8 = np.sqrt(arg_1**2 + arg_2**2)\n arg_0[arg_3] = arg_8\n arg_9 = np.arctan2(arg_2, arg_1)\n if not arg_6:\n arg_9 = arg_9 * 180/np.pi\n arg_0[arg_4] = arg_9\n if arg_5:\n arg_0.propagate_uncertainties([arg_0[arg_3], arg_0[arg_4]])"} +{"_id": "doc_2010", "title": "", "text": "def Func(arg_0, arg_1=\"x\", arg_2=\"y\", arg_3=\"vx\", arg_4=None, arg_5=\"vy\", arg_6=\"vr_polar\", arg_7=\"vphi_polar\",\n arg_8=False,):\n \"\"\"Convert cartesian to polar velocities.\n\n :param x:\n :param y:\n :param vx:\n :param radius_polar: Optional expression for the radius, may lead to a better performance when given.\n :param vy:\n :param vr_out:\n :param vazimuth_out:\n :param propagate_uncertainties: {propagate_uncertainties}\n :return:\n \"\"\"\n arg_1 = arg_0._expr(arg_1)\n arg_2 = arg_0._expr(arg_2)\n arg_3 = arg_0._expr(arg_3)\n arg_5 = arg_0._expr(arg_5)\n if arg_4 is None:\n arg_4 = np.sqrt(arg_1**2 + arg_2**2)\n arg_4 = arg_0._expr(arg_4)\n arg_0[arg_6] = (arg_1*arg_3 + arg_2*arg_5) / arg_4\n arg_0[arg_7] = (arg_1*arg_5 - arg_2*arg_3) / arg_4\n if arg_8:\n arg_0.propagate_uncertainties([arg_0[arg_6], arg_0[arg_7]])"} +{"_id": "doc_2011", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=False):\n \"\"\"Rotation in 2d.\n\n :param str x: Name/expression of x column\n :param str y: idem for y\n :param str xnew: name of transformed x column\n :param str ynew:\n :param float angle_degrees: rotation in degrees, anti clockwise\n :return:\n \"\"\"\n arg_1 = _ensure_string_from_expression(arg_1)\n arg_2 = _ensure_string_from_expression(arg_2)\n arg_7 = np.radians(arg_5)\n arg_8 = np.array([[np.cos(arg_7), -np.sin(arg_7)], [np.sin(arg_7), np.cos(arg_7)]])\n arg_9 = matrix_name = arg_1 + \"_\" + arg_2 + \"_rot\"\n for arg_10 in range(2):\n for arg_11 in range(2):\n arg_0.set_variable(matrix_name + \"_%d%d\" % (arg_10, arg_11), arg_8[arg_10, arg_11].item())\n arg_0[arg_3] = arg_0._expr(\"{m}_00 * {x} + {m}_01 * {y}\".format(**locals()))\n arg_0[arg_4] = arg_0._expr(\"{m}_10 * {x} + {m}_11 * {y}\".format(**locals()))\n if arg_6:\n arg_0.propagate_uncertainties([arg_0[arg_3], arg_0[arg_4]])"} +{"_id": "doc_2012", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=\"x\", arg_5=\"y\", arg_6=\"z\",\n arg_7=False,\n arg_8=[0, 0, 0], arg_9=\"solar_position\", arg_10=False):\n \"\"\"Convert spherical to cartesian coordinates.\n\n\n\n :param alpha:\n :param delta: polar angle, ranging from the -90 (south pole) to 90 (north pole)\n :param distance: radial distance, determines the units of x, y and z\n :param xname:\n :param yname:\n :param zname:\n :param propagate_uncertainties: {propagate_uncertainties}\n :param center:\n :param center_name:\n :param radians:\n :return:\n \"\"\"\n arg_1 = arg_0._expr(arg_1)\n arg_2 = arg_0._expr(arg_2)\n arg_3 = arg_0._expr(arg_3)\n if not arg_10:\n arg_1 = arg_1 * arg_0._expr('pi')/180\n arg_2 = arg_2 * arg_0._expr('pi')/180\n\n # TODO: use sth like .optimize by default to get rid of the +0 ?\n if arg_8[0]:\n arg_0[arg_4] = np.cos(arg_1) * np.cos(arg_2) * arg_3 + arg_8[0]\n else:\n arg_0[arg_4] = np.cos(arg_1) * np.cos(arg_2) * arg_3\n if arg_8[1]:\n arg_0[arg_5] = np.sin(arg_1) * np.cos(arg_2) * arg_3 + arg_8[1]\n else:\n arg_0[arg_5] = np.sin(arg_1) * np.cos(arg_2) * arg_3\n if arg_8[2]:\n arg_0[arg_6] = np.sin(arg_2) * arg_3 + arg_8[2]\n else:\n arg_0[arg_6] = np.sin(arg_2) * arg_3\n if arg_7:\n arg_0.propagate_uncertainties([arg_0[arg_4], arg_0[arg_5], arg_0[arg_6]])"} +{"_id": "doc_2013", "title": "", "text": "def Func(arg_0, arg_1=\"x\", arg_2=\"y\", arg_3=\"z\", arg_4=\"l\", arg_5=\"b\", arg_6=\"distance\", arg_7=False, arg_8=None, arg_9=\"solar_position\"):\n \"\"\"Convert cartesian to spherical coordinates.\n\n\n\n :param x:\n :param y:\n :param z:\n :param alpha:\n :param delta: name for polar angle, ranges from -90 to 90 (or -pi to pi when radians is True).\n :param distance:\n :param radians:\n :param center:\n :param center_name:\n :return:\n \"\"\"\n arg_10 = \"\" if arg_7 else \"*180./pi\"\n\n if arg_8 is not None:\n arg_0.add_variable(arg_9, arg_8)\n if arg_8 is not None and arg_8[0] != 0:\n arg_1 = \"({x} - {center_name}[0])\".format(**locals())\n if arg_8 is not None and arg_8[1] != 0:\n arg_2 = \"({y} - {center_name}[1])\".format(**locals())\n if arg_8 is not None and arg_8[2] != 0:\n arg_3 = \"({z} - {center_name}[2])\".format(**locals())\n arg_0.add_virtual_column(arg_6, \"sqrt({x}**2 + {y}**2 + {z}**2)\".format(**locals()))\n # self.add_virtual_column(alpha, \"((arctan2({y}, {x}) + 2*pi) % (2*pi)){transform}\".format(**locals()))\n arg_0.add_virtual_column(arg_4, \"arctan2({y}, {x}){transform}\".format(**locals()))\n arg_0.add_virtual_column(arg_5, \"(-arccos({z}/{distance})+pi/2){transform}\".format(**locals()))"} +{"_id": "doc_2014", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Add a virtual column to the DataFrame.\n\n Example:\n\n >>> df.Func(\"r\", \"sqrt(x**2 + y**2 + z**2)\")\n >>> df.select(\"r < 10\")\n\n :param: str name: name of virtual column\n :param: expression: expression for the column\n :param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2\n \"\"\"\n arg_4 = \"change\" if arg_1 in arg_0.virtual_columns else \"add\"\n arg_2 = _ensure_string_from_expression(arg_2)\n if arg_1 in arg_0.get_column_names(virtual=False):\n arg_5 = '__' +vaex.utils.find_valid_name(arg_1, used=arg_0.get_column_names())\n arg_2 = arg_0._rename(arg_1, arg_5, arg_2)[0].expression\n\n arg_1 = vaex.utils.find_valid_name(arg_1, used=[] if not arg_3 else arg_0.get_column_names())\n arg_0.virtual_columns[arg_1] = arg_2\n arg_0.column_names.append(arg_1)\n arg_0._save_assign_expression(arg_1)\n arg_0.signal_column_changed.emit(arg_0, arg_1, \"add\")"} +{"_id": "doc_2015", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Deletes a virtual column from a DataFrame.\"\"\"\n del arg_0.virtual_columns[arg_1]\n arg_0.signal_column_changed.emit(arg_0, arg_1, \"delete\")"} +{"_id": "doc_2016", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True, arg_4=True):\n \"\"\"Add a variable to to a DataFrame.\n\n A variable may refer to other variables, and virtual columns and expression may refer to variables.\n\n Example\n\n >>> df.Func('center', 0)\n >>> df.add_virtual_column('x_prime', 'x-center')\n >>> df.select('x_prime < 0')\n\n :param: str name: name of virtual varible\n :param: expression: expression for the variable\n \"\"\"\n if arg_4 or arg_3 or arg_1 not in arg_0.variables:\n arg_5 = arg_0.get_column_names(virtual=False) + list(arg_0.variables.keys())\n arg_1 = vaex.utils.find_valid_name(arg_1, used=[] if not arg_4 else arg_5)\n arg_0.variables[arg_1] = arg_2\n arg_0.signal_variable_changed.emit(arg_0, arg_1, \"add\")\n if arg_4:\n return arg_1"} +{"_id": "doc_2017", "title": "", "text": "def Func(arg_0, arg_1=5):\n \"\"\"Display the first and last n elements of a DataFrame.\"\"\"\n from IPython import display\n display.display(display.HTML(arg_0._head_and_tail_table(arg_1)))"} +{"_id": "doc_2018", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True, arg_3=None):\n \"\"\"Give a description of the DataFrame.\n\n >>> import vaex\n >>> df = vaex.example()[['x', 'y', 'z']]\n >>> df.Func()\n x y z\n dtype float64 float64 float64\n count 330000 330000 330000\n missing 0 0 0\n mean -0.0671315 -0.0535899 0.0169582\n std 7.31746 7.78605 5.05521\n min -128.294 -71.5524 -44.3342\n max 271.366 146.466 50.7185\n >>> df.Func(selection=df.x > 0)\n x y z\n dtype float64 float64 float64\n count 164060 164060 164060\n missing 165940 165940 165940\n mean 5.13572 -0.486786 -0.0868073\n std 5.18701 7.61621 5.02831\n min 1.51635e-05 -71.5524 -44.3342\n max 271.366 78.0724 40.2191\n\n :param bool strings: Describe string columns or not\n :param bool virtual: Describe virtual columns or not\n :param selection: Optional selection to use.\n :return: Pandas dataframe\n\n \"\"\"\n import pandas as pd\n arg_4 = len(arg_0)\n arg_5 = {}\n for arg_6 in arg_0.get_column_names(arg_1=arg_1, arg_2=arg_2)[:]:\n arg_7 = str(arg_0.dtype(arg_6)) if arg_0.dtype(arg_6) != str else 'str'\n if arg_0.dtype(arg_6) == str_type or arg_0.dtype(arg_6).kind in ['S', 'U', 'O']:\n arg_8 = arg_0.count(arg_6, arg_3=arg_3, delay=True)\n arg_0.execute()\n arg_8 = arg_8.get()\n arg_5[arg_6] = ((arg_7, arg_8, arg_4-arg_8, '--', '--', '--', '--'))\n else:\n arg_8 = arg_0.count(arg_6, arg_3=arg_3, delay=True)\n arg_9 = arg_0.mean(arg_6, arg_3=arg_3, delay=True)\n arg_10 = arg_0.std(arg_6, arg_3=arg_3, delay=True)\n arg_11 = arg_0.minmax(arg_6, arg_3=arg_3, delay=True)\n arg_0.execute()\n arg_8, arg_9, arg_10, arg_11 = arg_8.get(), arg_9.get(), arg_10.get(), arg_11.get()\n arg_8 = int(arg_8)\n arg_5[arg_6] = ((arg_7, arg_8, arg_4-arg_8, arg_9, arg_10, arg_11[0], arg_11[1]))\n return pd.DataFrame(data=arg_5, index=['dtype', 'count', 'missing', 'mean', 'std', 'min', 'max'])"} +{"_id": "doc_2019", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the current row, and emit the signal signal_pick.\"\"\"\n if (arg_1 is not None) and ((arg_1 < 0) or (arg_1 >= len(arg_0))):\n raise IndexError(\"index %d out of range [0,%d]\" % (arg_1, len(arg_0)))\n arg_0._current_row = arg_1\n arg_0.signal_pick.emit(arg_0, arg_1)"} +{"_id": "doc_2020", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''Return a DataFrame, where all columns are 'Funcmed' by the active range.\n\n For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).\n\n {note_copy}\n\n :param inplace: {inplace}\n :rtype: DataFrame\n '''\n arg_2 = arg_0 if arg_1 else arg_0.copy()\n for arg_3 in arg_2:\n arg_4 = arg_2.columns.get(arg_3)\n if arg_4 is not None:\n if arg_0._index_start == 0 and len(arg_4) == arg_0._index_end:\n pass # we already assigned it in .copy\n else:\n if isinstance(arg_4, np.ndarray): # real array\n arg_2.columns[arg_3] = arg_4[arg_0._index_start:arg_0._index_end]\n else:\n arg_2.columns[arg_3] = arg_4.Func(arg_0._index_start, arg_0._index_end)\n arg_2._length_original = arg_0.length_unfiltered()\n arg_2._length_unfiltered = arg_2._length_original\n arg_2._index_start = 0\n arg_2._index_end = arg_2._length_original\n arg_2._active_fraction = 1\n return arg_2"} +{"_id": "doc_2021", "title": "", "text": "def Func(arg_0, arg_1):\n '''Returns a DataFrame containing only rows indexed by indices\n\n {note_copy}\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))\n >>> df.Func([0,2])\n # s x\n 0 a 1\n 1 c 3\n\n :param indices: sequence (list or numpy array) with row numbers\n :return: DataFrame which is a shallow copy of the original data.\n :rtype: DataFrame\n '''\n arg_2 = arg_0.copy()\n # if the columns in ds already have a ColumnIndex\n # we could do, direct_indices = df.column['bla'].indices[indices]\n # which should be shared among multiple ColumnIndex'es, so we store\n # them in this dict\n arg_3 = {}\n arg_1 = np.array(arg_1)\n for arg_4 in arg_2:\n arg_5 = arg_2.columns.get(arg_4)\n if arg_5 is not None:\n # we optimize this somewhere, so we don't do multiple\n # levels of indirection\n if isinstance(arg_5, ColumnIndexed):\n # TODO: think about what happpens when the indices are masked.. ?\n if arg_7(arg_5.indices) not in arg_3:\n arg_6 = arg_5.indices[arg_1]\n arg_3[arg_7(arg_5.indices)] = arg_6\n else:\n arg_6 = arg_3[arg_7(arg_5.indices)]\n arg_2.columns[arg_4] = ColumnIndexed(arg_5.df, arg_6, arg_5.name)\n else:\n arg_2.columns[arg_4] = ColumnIndexed(arg_0, arg_1, arg_4)\n arg_2._length_original = len(arg_1)\n arg_2._length_unfiltered = arg_2._length_original\n arg_2.set_selection(None, arg_4=FILTER_SELECTION_NAME)\n return arg_2"} +{"_id": "doc_2022", "title": "", "text": "def Func(arg_0):\n '''Return a DataFrame containing only the filtered rows.\n\n {note_copy}\n\n The resulting DataFrame may be more efficient to work with when the original DataFrame is\n heavily filtered (contains just a small number of rows).\n\n If no filtering is applied, it returns a trimmed view.\n For the returned df, len(df) == df.length_original() == df.length_unfiltered()\n\n :rtype: DataFrame\n '''\n arg_1 = arg_0.trim()\n if arg_1.filtered:\n arg_2 = arg_1._filtered_range_to_unfiltered_indices(0, len(arg_1))\n return arg_1.take(arg_2)\n else:\n return arg_1"} +{"_id": "doc_2023", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''Returns a list containing random portions of the DataFrame.\n\n {note_copy}\n\n Example:\n\n >>> import vaex, import numpy as np\n >>> np.random.seed(111)\n >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> for dfs in df.Func(frac=0.3, random_state=42):\n ... print(dfs.x.values)\n ...\n [8 1 5]\n [0 7 2 9 4 3 6]\n >>> for split in df.Func(frac=[0.2, 0.3, 0.5], random_state=42):\n ... print(dfs.x.values)\n [8 1]\n [5 0 7]\n [2 9 4 3 6]\n\n :param int/list frac: If int will split the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion.\n :param int random_state: (default, None) Random number seed for reproducibility.\n :return: A list of DataFrames.\n :rtype: list\n '''\n arg_0 = arg_0.extract()\n if type(arg_2) == int or arg_2 is None:\n arg_2 = np.random.RandomState(seed=arg_2)\n arg_3 = arg_2.choice(len(arg_0), len(arg_0), replace=False)\n return arg_0.take(arg_3).split(arg_1)"} +{"_id": "doc_2024", "title": "", "text": "def Func(arg_0, arg_1):\n '''Returns a list containing ordered subsets of the DataFrame.\n\n {note_copy}\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> for dfs in df.Func(frac=0.3):\n ... print(dfs.x.values)\n ...\n [0 1 3]\n [3 4 5 6 7 8 9]\n >>> for Func in df.Func(frac=[0.2, 0.3, 0.5]):\n ... print(dfs.x.values)\n [0 1]\n [2 3 4]\n [5 6 7 8 9]\n\n :param int/list frac: If int will Func the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion.\n :return: A list of DataFrames.\n :rtype: list\n '''\n arg_0 = arg_0.extract()\n if _issequence(arg_1):\n # make sure it is normalized\n arg_2 = sum(arg_1)\n arg_1 = [k / arg_2 for k in arg_1]\n else:\n assert arg_1 <= 1, \"fraction should be <= 1\"\n arg_1 = [arg_1, 1 - arg_1]\n arg_3 = np.round(np.cumsum(arg_1) * len(arg_0)).astype(np.int64)\n arg_4 = 0\n for arg_5 in arg_3:\n yield arg_0[arg_4:arg_5]\n arg_4 = arg_5"} +{"_id": "doc_2025", "title": "", "text": "def Func(arg_0, arg_1=\"default\", arg_2=None):\n \"\"\"Undo selection, for the name.\"\"\"\n logger.debug(\"undo\")\n arg_2 = arg_2 or arg_0.executor\n assert arg_0.selection_can_undo(arg_1=arg_1)\n arg_3 = arg_0.selection_histories[arg_1]\n arg_4 = arg_0.selection_history_indices[arg_1]\n arg_0.selection_history_indices[arg_1] -= 1\n arg_0.signal_selection_changed.emit(arg_0)\n logger.debug(\"undo: selection history is %r, index is %r\", arg_3, arg_0.selection_history_indices[arg_1])"} +{"_id": "doc_2026", "title": "", "text": "def Func(arg_0, arg_1=\"default\"):\n \"\"\"Can selection name be redone?\"\"\"\n return (arg_0.selection_history_indices[arg_1] + 1) < len(arg_0.selection_histories[arg_1])"} +{"_id": "doc_2027", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"replace\", arg_3=\"default\", arg_4=None):\n \"\"\"Perform a Funcion, defined by the boolean expression, and combined with the previous Funcion using the given mode.\n\n Selections are recorded in a history tree, per name, undo/redo can be done for them separately.\n\n :param str boolean_expression: Any valid column expression, with comparison operators\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or Funcion 'slot' to use\n :param executor:\n :return:\n \"\"\"\n arg_1 = _ensure_string_from_expression(arg_1)\n if arg_1 is None and not arg_0.has_Funcion(arg_3=arg_3):\n pass # we don't want to pollute the history with many None Funcions\n arg_0.signal_Funcion_changed.emit(arg_0) # TODO: unittest want to know, does this make sense?\n else:\n def create(arg_5):\n return Funcions.SelectionExpression(arg_1, arg_5, arg_2) if arg_1 else None\n arg_0._Funcion(create, arg_3)"} +{"_id": "doc_2028", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True, arg_3=None, arg_4=\"replace\", arg_5=\"default\"):\n \"\"\"Create a selection that selects rows having non missing values for all columns in column_names.\n\n The name reflect Panda's, no rows are really dropped, but a mask is kept to keep track of the selection\n\n :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)\n :param drop_masked: drop rows when there is a masked value in any of the columns\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or selection 'slot' to use\n :return:\n \"\"\"\n arg_3 = arg_3 or arg_0.get_column_names(virtual=False)\n\n def create(arg_6):\n return selections.SelectionDropNa(arg_1, arg_2, arg_3, arg_6, arg_4)\n arg_0._selection(create, arg_5)"} +{"_id": "doc_2029", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True, arg_3=None):\n \"\"\"Create a shallow copy of a DataFrame, with filtering set using select_non_missing.\n\n :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)\n :param drop_masked: drop rows when there is a masked value in any of the columns\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame\n \"\"\"\n arg_4 = arg_0.copy()\n arg_4.select_non_missing(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n name=FILTER_SELECTION_NAME, mode='and')\n return arg_4"} +{"_id": "doc_2030", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=\"replace\", arg_5=\"default\"):\n \"\"\"Select a 2d rectangular box in the space given by x and y, bounds by limits.\n\n Example:\n\n >>> df.select_box('x', 'y', [(0, 10), (0, 1)])\n\n :param x: expression for the x space\n :param y: expression fo the y space\n :param limits: sequence of shape [(x1, x2), (y1, y2)]\n :param mode:\n \"\"\"\n arg_0.select_box([arg_1, arg_2], arg_3, arg_4=arg_4, arg_5=arg_5)"} +{"_id": "doc_2031", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"replace\", arg_4=\"default\"):\n \"\"\"Select a n-dimensional rectangular box bounded by limits.\n\n The following examples are equivalent:\n\n >>> df.Func(['x', 'y'], [(0, 10), (0, 1)])\n >>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)])\n\n :param spaces: list of expressions\n :param limits: sequence of shape [(x1, x2), (y1, y2)]\n :param mode:\n :param name:\n :return:\n \"\"\"\n arg_5 = [(min(l), max(l)) for l in arg_2]\n arg_6 = [\"((%s) >= %f) & ((%s) <= %f)\" % (expression, lmin, expression, lmax) for\n (expression, (lmin, lmax)) in zip(arg_1, arg_5)]\n arg_0.select(\"&\".join(arg_6), arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_2032", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7=0, arg_8=\"replace\", arg_9=\"default\", arg_10=False, arg_11=True):\n \"\"\"\n Select an elliptical region centred on xc, yc, with a certain width, height\n and angle.\n\n Example:\n\n >>> df.Func('x','y', 2, -1, 5,1, 30, name='my_ellipse')\n\n :param x: expression for the x space\n :param y: expression for the y space\n :param xc: location of the centre of the ellipse in x\n :param yc: location of the centre of the ellipse in y\n :param width: the width of the ellipse (diameter)\n :param height: the width of the ellipse (diameter)\n :param angle: (degrees) orientation of the ellipse, counter-clockwise\n measured from the y axis\n :param name: name of the selection\n :param mode:\n :return:\n\n \"\"\"\n\n # Computing the properties of the ellipse prior to selection\n if arg_10:\n pass\n else:\n arg_12 = np.deg2rad(arg_7)\n arg_13 = arg_5 / 2\n arg_14 = arg_6 / 2\n arg_15 = max(arg_13, arg_14)\n arg_16 = arg_13 / arg_15\n arg_17 = arg_14 / arg_15\n\n arg_18 = \"(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2\".format(**locals())\n\n if arg_11:\n arg_18 = ((arg_0[arg_1] - arg_3) * np.cos(arg_12) + (arg_0[arg_2] - arg_4) * np.sin(arg_12))**2 / arg_16**2 + ((arg_0[arg_1] - arg_3) * np.sin(arg_12) - (arg_0[arg_2] - arg_4) * np.cos(arg_12))**2 / arg_17**2 <= arg_15**2\n else:\n arg_18 = ((arg_0[arg_1] - arg_3) * np.cos(arg_12) + (arg_0[arg_2] - arg_4) * np.sin(arg_12))**2 / arg_16**2 + ((arg_0[arg_1] - arg_3) * np.sin(arg_12) - (arg_0[arg_2] - arg_4) * np.cos(arg_12))**2 / arg_17**2 < arg_15**2\n\n arg_0.select(boolean_expression=arg_18, arg_8=arg_8, arg_9=arg_9)"} +{"_id": "doc_2033", "title": "", "text": "def Func(arg_0, arg_1=\"default\", arg_2=None):\n \"\"\"Invert the selection, i.e. what is selected will not be, and vice versa\n\n :param str name:\n :param executor:\n :return:\n \"\"\"\n\n def create(arg_3):\n return selections.SelectionInvert(arg_3)\n arg_0._selection(create, arg_1, arg_2=arg_2)"} +{"_id": "doc_2034", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"default\", arg_3=None):\n \"\"\"Sets the selection object\n\n :param selection: Selection object\n :param name: selection 'slot'\n :param executor:\n :return:\n \"\"\"\n def create(arg_4):\n return arg_1\n arg_0._selection(create, arg_2, arg_3=arg_3, execute_fully=True)"} +{"_id": "doc_2035", "title": "", "text": "def Func(arg_0, arg_1):\n '''Finds a non-colliding name by optional postfixing'''\n return vaex.utils.find_valid_name(arg_1, used=arg_0.get_column_names(hidden=True))"} +{"_id": "doc_2036", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return a graphviz.Digraph object with a graph of all virtual columns\"\"\"\n from graphviz import Digraph\n arg_1 = arg_1 or Digraph(comment='whole dataframe')\n arg_2 = arg_0._root_nodes()\n for arg_3 in arg_2:\n arg_0[arg_3].Func(arg_1=arg_1)\n return arg_1"} +{"_id": "doc_2037", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Mark column as categorical, with given labels, assuming zero indexing\"\"\"\n arg_1 = _ensure_string_from_expression(arg_1)\n if arg_3:\n arg_4, arg_5 = arg_0.minmax(arg_1)\n if arg_2 is None:\n arg_6 = int(arg_5 + 1)\n arg_2 = list(map(str, range(arg_6)))\n if (arg_5 - arg_4) >= len(arg_2):\n raise ValueError('value of {} found, which is larger than number of labels {}'.format(arg_5, len(arg_2)))\n arg_0._categories[arg_1] = dict(arg_2=arg_2, arg_6=len(arg_2))"} +{"_id": "doc_2038", "title": "", "text": "def Func(arg_0):\n \"\"\"Gives direct access to the Func as numpy arrays.\n\n Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion.\n Only real columns (i.e. no virtual) columns can be accessed, for getting the Func from virtual columns, use\n DataFrame.evalulate(...).\n\n Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray.\n\n Example:\n\n >>> df = vaex.example()\n >>> r = np.sqrt(df.Func.x**2 + df.Func.y**2)\n\n \"\"\"\n class Datas(object):\n pass\n\n arg_1 = Datas()\n for arg_2, arg_3 in arg_0.columns.items():\n setattr(arg_1, arg_2, arg_3)\n return arg_1"} +{"_id": "doc_2039", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Get the Func of the DataFrames, for the selection of the whole DataFrame.\n\n If selection is False, it returns len(df).\n\n TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.Func`\n\n :param selection: When True, will return the number of selected rows\n :return:\n \"\"\"\n if arg_1:\n return 0 if arg_0.mask is None else np.sum(arg_0.mask)\n else:\n return len(arg_0)"} +{"_id": "doc_2040", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Join the columns of the other DataFrame to this one, assuming the ordering is the same\"\"\"\n assert len(arg_0) == len(arg_1), \"does not make sense to horizontally stack DataFrames with different lengths\"\n for arg_3 in arg_1.get_column_names():\n if arg_2:\n arg_4 = arg_2 + arg_3\n else:\n arg_4 = arg_3\n arg_0.add_column(arg_4, arg_1.columns[arg_3])"} +{"_id": "doc_2041", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Concatenates two DataFrames, adding the rows of one the other DataFrame to the current, returned in a new DataFrame.\n\n No copy of the data is made.\n\n :param other: The other DataFrame that is Funcenated with this DataFrame\n :return: New DataFrame with the rows Funcenated\n :rtype: DataFrameConcatenated\n \"\"\"\n arg_2 = []\n if isinstance(arg_0, DataFrameConcatenated):\n arg_2.extend(arg_0.dfs)\n else:\n arg_2.extend([arg_0])\n if isinstance(arg_1, DataFrameConcatenated):\n arg_2.extend(arg_1.dfs)\n else:\n arg_2.extend([arg_1])\n return DataFrameConcatenated(arg_2)"} +{"_id": "doc_2042", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=\"=\", arg_4=False, arg_5=False, arg_6=None, arg_7=False, arg_8=None, arg_9=True):\n \"\"\"Exports the DataFrame to a vaex hdf5 file\n\n :param DataFrameLocal df: DataFrame to export\n :param str path: path for file\n :param lis[str] column_names: list of column names to export or None for all columns\n :param str byteorder: = for native, < for little endian and > for big endian\n :param bool shuffle: export rows in random order\n :param bool selection: export selection or not\n :param progress: progress callback that gets a progress fraction as argument and should return True to continue,\n or a default progress bar when progress=True\n :param: bool virtual: When True, export virtual columns\n :param str sort: expression used for sorting the output\n :param bool ascending: sort ascending (True) or descending\n :return:\n \"\"\"\n import vaex.export\n vaex.export.Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=arg_6, arg_7=arg_7, arg_8=arg_8, arg_9=arg_9)"} +{"_id": "doc_2043", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add a column to the DataFrame\n\n :param str name: name of column\n :param data: numpy array with the data\n \"\"\"\n # assert _is_array_type_ok(data), \"dtype not supported: %r, %r\" % (data.dtype, data.dtype.type)\n # self._length = len(data)\n # if self._length_unfiltered is None:\n # self._length_unfiltered = len(data)\n # self._length_original = len(data)\n # self._index_end = self._length_unfiltered\n super(DataFrameArrays, arg_0).Func(arg_1, arg_2)\n arg_0._length_unfiltered = int(round(arg_0._length_original * arg_0._active_fraction))"} +{"_id": "doc_2044", "title": "", "text": "def Func(arg_0):\n '''Adds method f to the DataFrame class'''\n arg_1 = arg_0.__name__\n setattr(DataFrame, arg_1, arg_0)\n return arg_0"} +{"_id": "doc_2045", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True):\n '''Returns an array where missing values are replaced by value.\n\n If the dtype is object, nan values and 'nan' string values\n are replaced by value when fill_nan==True.\n '''\n arg_0 = arg_0 if not isinstance(arg_0, column.Column) else arg_0.to_numpy()\n if arg_0.dtype.kind in 'O' and arg_2:\n arg_4 = arg_0.astype(str)\n arg_5 = arg_4 == 'nan'\n arg_0 = arg_0.copy()\n arg_0[arg_5] = arg_1\n elif arg_0.dtype.kind in 'f' and arg_2:\n arg_5 = np.isnan(arg_0)\n if np.any(arg_5):\n arg_0 = arg_0.copy()\n arg_0[arg_5] = arg_1\n if arg_3 and np.ma.isMaskedArray(arg_0):\n arg_5 = arg_0.mask\n if np.any(arg_5):\n arg_0 = arg_0.data.copy()\n arg_0[arg_5] = arg_1\n return arg_0"} +{"_id": "doc_2046", "title": "", "text": "def Func(arg_0):\n \"\"\"Obtain the day of the week with Monday=0 and Sunday=6\n\n :returns: an expression containing the day of week.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.dayofweek\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 0\n 1 3\n 2 3\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.dayofweek.values"} +{"_id": "doc_2047", "title": "", "text": "def Func(arg_0):\n \"\"\"The ordinal day of the year.\n\n :returns: an expression containing the ordinal day of the year.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.dayofyear\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 285\n 1 42\n 2 316\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.dayofyear.values"} +{"_id": "doc_2048", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts the month out of a datetime sample.\n\n :returns: an expression containing the month extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.month\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 10\n 1 2\n 2 11\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.month.values"} +{"_id": "doc_2049", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the month names of a datetime sample in English.\n\n :returns: an expression containing the month names extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.month_name\n Expression = Func(date)\n Length: 3 dtype: str (expression)\n ---------------------------------\n 0 October\n 1 February\n 2 November\n \"\"\"\n import pandas as pd\n return pd.Series(_pandas_dt_fix(arg_0)).dt.month_name().values.astype(str)"} +{"_id": "doc_2050", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the day names of a datetime sample in English.\n\n :returns: an expression containing the day names extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.day_name\n Expression = Func(date)\n Length: 3 dtype: str (expression)\n ---------------------------------\n 0 Monday\n 1 Thursday\n 2 Thursday\n \"\"\"\n import pandas as pd\n return pd.Series(_pandas_dt_fix(arg_0)).dt.day_name().values.astype(str)"} +{"_id": "doc_2051", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the week ordinal of the year.\n\n :returns: an expression containing the week ordinal of the year, extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.weekofyear\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 42\n 1 6\n 2 46\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.weekofyear.values"} +{"_id": "doc_2052", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts the hour out of a datetime samples.\n\n :returns: an expression containing the hour extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.hour\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 10\n 2 11\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.hour.values"} +{"_id": "doc_2053", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts the minute out of a datetime samples.\n\n :returns: an expression containing the minute extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.minute\n Expression = Func(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 31\n 1 17\n 2 34\n \"\"\"\n import pandas as pd\n return pd.Series(arg_0).dt.minute.values"} +{"_id": "doc_2054", "title": "", "text": "def Func(arg_0):\n \"\"\"Capitalize the first letter of a string sample.\n\n :returns: an expression containing the capitalized strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.capitalize()\n Expression = Func(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 Very pretty\n 2 Is coming\n 3 Our\n 4 Way.\n \"\"\"\n arg_1 = _to_string_sequence(arg_0).capitalize()\n return column.ColumnStringArrow(arg_1.bytes, arg_1.indices, arg_1.length, arg_1.offset, string_sequence=arg_1)"} +{"_id": "doc_2055", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Concatenate two string columns on a row-by-row basis.\n\n :param expression other: The expression of the other column to be concatenated.\n :returns: an expression containing the concatenated columns.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.cat(df.text)\n Expression = Func(text, text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 SomethingSomething\n 1 very prettyvery pretty\n 2 is comingis coming\n 3 ourour\n 4 way.way.\n \"\"\"\n arg_2 = _to_string_sequence(arg_0)\n arg_3 = _to_string_sequence(arg_1)\n arg_4 = arg_2.concat(arg_3)\n return column.ColumnStringArrow.from_string_sequence(arg_4)"} +{"_id": "doc_2056", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Check if a string pattern or regex is contained within a sample of a string column.\n\n :param str pattern: A string or regex pattern\n :param bool regex: If True,\n :returns: an expression which is evaluated to True if the pattern is found in a given sample, and it is False otherwise.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.contains('very')\n Expression = Func(text, 'very')\n Length: 5 dtype: bool (expression)\n ----------------------------------\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n \"\"\"\n return _to_string_sequence(arg_0).search(arg_1, arg_2)"} +{"_id": "doc_2057", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Count the occurences of a pattern in sample of a string column.\n\n :param str pat: A string or regex pattern\n :param bool regex: If True,\n :returns: an expression containing the number of times a pattern is found in each sample.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.count(pat=\"et\", regex=False)\n Expression = Func(text, pat='et', regex=False)\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 1\n 1 1\n 2 0\n 3 0\n 4 0\n \"\"\"\n return _to_string_sequence(arg_0).count(arg_1, arg_2)"} +{"_id": "doc_2058", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the lowest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.find(sub=\"et\")\n Expression = Func(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1\n \"\"\"\n return _to_string_sequence(arg_0).find(arg_1, arg_2, 0 if arg_3 is None else arg_3, arg_3 is None, True)"} +{"_id": "doc_2059", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts string samples to lower case.\n\n :returns: an expression containing the converted strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.lower()\n Expression = Func(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n \"\"\"\n arg_1 = _to_string_sequence(arg_0).lower()\n return column.ColumnStringArrow(arg_1.bytes, arg_1.indices, arg_1.length, arg_1.offset, string_sequence=arg_1)"} +{"_id": "doc_2060", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Remove leading characters from a string sample.\n\n :param str to_strip: The string to be removed\n :returns: an expression containing the modified string column.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.lstrip(to_strip='very ')\n Expression = Func(text, to_strip='very ')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 pretty\n 2 is coming\n 3 our\n 4 way.\n \"\"\"\n # in c++ we give empty string the same meaning as None\n arg_2 = _to_string_sequence(arg_0).lstrip('' if arg_1 is None else arg_1) if arg_1 != '' else arg_0\n return column.ColumnStringArrow(arg_2.bytes, arg_2.indices, arg_2.length, arg_2.offset, string_sequence=arg_2)"} +{"_id": "doc_2061", "title": "", "text": "def Func(arg_0, arg_1, arg_2='left', arg_3=' '):\n \"\"\"Pad strings in a given column.\n\n :param int width: The total width of the string\n :param str side: If 'left' than pad on the left, if 'right' than pad on the right side the string.\n :param str fillchar: The character used for padding.\n :returns: an expression containing the padded strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.pad(width=10, side='left', fillchar='!')\n Expression = Func(text, width=10, side='left', fillchar='!')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 !Something\n 1 very pretty\n 2 !is coming\n 3 !!!!!!!our\n 4 !!!!!!way.\n \"\"\"\n arg_4 = _to_string_sequence(arg_0).pad(arg_1, arg_3, arg_2 in ['left', 'both'], arg_2 in ['right', 'both'])\n return column.ColumnStringArrow(arg_4.bytes, arg_4.indices, arg_4.length, arg_4.offset, string_sequence=arg_4)"} +{"_id": "doc_2062", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Duplicate each string in a column.\n\n :param int repeats: number of times each string sample is to be duplicated.\n :returns: an expression containing the duplicated strings\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.repeat(3)\n Expression = Func(text, 3)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 SomethingSomethingSomething\n 1 very prettyvery prettyvery pretty\n 2 is comingis comingis coming\n 3 ourourour\n 4 way.way.way.\n \"\"\"\n arg_2 = _to_string_sequence(arg_0).repeat(arg_1)\n return column.ColumnStringArrow(arg_2.bytes, arg_2.indices, arg_2.length, arg_2.offset, string_sequence=arg_2)"} +{"_id": "doc_2063", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"Returns the highest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the highest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rfind(sub=\"et\")\n Expression = Func(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1\n \"\"\"\n return _to_string_sequence(arg_0).find(arg_1, arg_2, 0 if arg_3 is None else arg_3, arg_3 is None, False)"} +{"_id": "doc_2064", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"Returns the highest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned. Same as `str.rfind`.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the highest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rindex(sub=\"et\")\n Expression = Func(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1\n \"\"\"\n return str_rfind(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_2065", "title": "", "text": "def Func(arg_0, arg_1, arg_2=' '):\n \"\"\"Fills the left side of string samples with a specified character such that the strings are left-hand justified.\n\n :param int width: The minimal width of the strings.\n :param str fillchar: The character used for filling.\n :returns: an expression containing the filled strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rjust(width=10, fillchar='!')\n Expression = Func(text, width=10, fillchar='!')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 !Something\n 1 very pretty\n 2 !is coming\n 3 !!!!!!!our\n 4 !!!!!!way.\n \"\"\"\n arg_3 = _to_string_sequence(arg_0).pad(arg_1, arg_2, True, False)\n return column.ColumnStringArrow(arg_3.bytes, arg_3.indices, arg_3.length, arg_3.offset, string_sequence=arg_3)"} +{"_id": "doc_2066", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Remove trailing characters from a string sample.\n\n :param str to_strip: The string to be removed\n :returns: an expression containing the modified string column.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rstrip(to_strip='ing')\n Expression = Func(text, to_strip='ing')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Someth\n 1 very pretty\n 2 is com\n 3 our\n 4 way.\n \"\"\"\n # in c++ we give empty string the same meaning as None\n arg_2 = _to_string_sequence(arg_0).rstrip('' if arg_1 is None else arg_1) if arg_1 != '' else arg_0\n return column.ColumnStringArrow(arg_2.bytes, arg_2.indices, arg_2.length, arg_2.offset, string_sequence=arg_2)"} +{"_id": "doc_2067", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=None): # TODO: support n\n \"\"\"Slice substrings from each string element in a column.\n\n :param int start: The start position for the slice operation.\n :param int end: The stop position for the slice operation.\n :returns: an expression containing the sliced substrings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.slice(start=2, stop=5)\n Expression = str_pandas_slice(text, start=2, stop=5)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 met\n 1 ry\n 2 co\n 3 r\n 4 y.\n \"\"\"\n if arg_2 is None:\n arg_3 = _to_string_sequence(arg_0).slice_string_end(arg_1)\n else:\n arg_3 = _to_string_sequence(arg_0).slice_string(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_2068", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Removes leading and trailing characters.\n\n Strips whitespaces (including new lines), or a set of specified\n characters from each string saple in a column, both from the left\n right sides.\n\n :param str to_strip: The characters to be removed. All combinations of the characters will be removed.\n If None, it removes whitespaces.\n :param returns: an expression containing the modified string samples.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.strip(to_strip='very')\n Expression = Func(text, to_strip='very')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 prett\n 2 is coming\n 3 ou\n 4 way.\n \"\"\"\n # in c++ we give empty string the same meaning as None\n arg_2 = _to_string_sequence(arg_0).strip('' if arg_1 is None else arg_1) if arg_1 != '' else arg_0\n return column.ColumnStringArrow(arg_2.bytes, arg_2.indices, arg_2.length, arg_2.offset, string_sequence=arg_2)"} +{"_id": "doc_2069", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts all string samples to titlecase.\n\n :returns: an expression containing the converted strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.title()\n Expression = Func(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 Very Pretty\n 2 Is Coming\n 3 Our\n 4 Way.\n \"\"\"\n arg_1 = _to_string_sequence(arg_0).title()\n return column.ColumnStringArrow(arg_1.bytes, arg_1.indices, arg_1.length, arg_1.offset, string_sequence=arg_1)"} +{"_id": "doc_2070", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts all strings in a column to uppercase.\n\n :returns: an expression containing the converted strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n\n >>> df.text.str.upper()\n Expression = Func(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 SOMETHING\n 1 VERY PRETTY\n 2 IS COMING\n 3 OUR\n 4 WAY.\n\n \"\"\"\n arg_1 = _to_string_sequence(arg_0).upper()\n return column.ColumnStringArrow(arg_1.bytes, arg_1.indices, arg_1.length, arg_1.offset, string_sequence=arg_1)"} +{"_id": "doc_2071", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Writes a comment to the file in Java properties format.\n\n Newlines in the comment text are automatically turned into a continuation\n of the comment by adding a \"#\" to the beginning of each line.\n\n :param fh: a writable file-like object\n :param comment: comment string to write\n \"\"\"\n _require_string(arg_1, 'comments')\n arg_0.write(_escape_comment(arg_1))\n arg_0.write(b'\\n')"} +{"_id": "doc_2072", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Incrementally read properties from a Java .properties file.\n\n Yields tuples of key/value pairs.\n\n If ``comments`` is `True`, comments will be included with ``jprops.COMMENT``\n in place of the key.\n\n :param fh: a readable file-like object\n :param comments: should include comments (default: False)\n \"\"\"\n for arg_2 in _property_lines(arg_0):\n arg_3, arg_4 = _split_key_value(arg_2)\n if arg_3 is not COMMENT:\n arg_3 = _unescape(arg_3)\n elif not arg_1:\n continue\n yield arg_3, _unescape(arg_4)"} +{"_id": "doc_2073", "title": "", "text": "def Func():\n '''Return the version information for all librosa dependencies.'''\n\n arg_0 = ['audioread',\n 'numpy',\n 'scipy',\n 'sklearn',\n 'joblib',\n 'decorator',\n 'six',\n 'soundfile',\n 'resampy',\n 'numba']\n\n arg_1 = ['numpydoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'sphinxcontrib.versioning',\n 'sphinx-gallery',\n 'pytest',\n 'pytest-mpl',\n 'pytest-cov',\n 'matplotlib']\n\n print('INSTALLED VERSIONS')\n print('------------------')\n print('python: {}\\n'.format(sys.version))\n print('librosa: {}\\n'.format(version))\n for arg_2 in arg_0:\n print('{}: {}'.format(arg_2, __get_mod_version(arg_2)))\n print('')\n for arg_2 in arg_1:\n print('{}: {}'.format(arg_2, __get_mod_version(arg_2)))\n pass"} +{"_id": "doc_2074", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n '''Handle renamed arguments.\n\n Parameters\n ----------\n old_name : str\n old_value\n The name and value of the old argument\n\n new_name : str\n new_value\n The name and value of the new argument\n\n version_deprecated : str\n The version at which the old name became deprecated\n\n version_removed : str\n The version at which the old name will be removed\n\n Returns\n -------\n value\n - `new_value` if `old_value` of type `Deprecated`\n - `old_value` otherwise\n\n Warnings\n --------\n if `old_value` is not of type `Deprecated`\n\n '''\n if isinstance(arg_1, Deprecated):\n return arg_3\n else:\n arg_6 = inspect.stack()\n arg_7 = arg_6[1]\n arg_8 = arg_6[2]\n\n warnings.warn_explicit(\"{:s}() keyword argument '{:s}' has been \"\n \"renamed to '{:s}' in version {:}.\"\n \"\\n\\tThis alias will be removed in version \"\n \"{:}.\".format(arg_7[3],\n arg_0, arg_2,\n arg_4,\n arg_5),\n category=DeprecationWarning,\n filename=arg_8[1],\n lineno=arg_8[2])\n\n return arg_1"} +{"_id": "doc_2075", "title": "", "text": "def Func(arg_0=None):\n '''Set the FFT library used by librosa.\n\n Parameters\n ----------\n lib : None or module\n Must implement an interface compatible with `numpy.fft`.\n If `None`, reverts to `numpy.fft`.\n\n Examples\n --------\n Use `pyfftw`:\n\n >>> import pyfftw\n >>> librosa.Func(pyfftw.interfaces.numpy_fft)\n\n Reset to default `numpy` implementation\n\n >>> librosa.Func()\n\n '''\n\n global arg_1\n if arg_0 is None:\n from numpy import fft\n arg_0 = fft\n\n arg_1 = arg_0"} +{"_id": "doc_2076", "title": "", "text": "def Func(arg_0, arg_1):\n '''Beat tracking function\n\n :parameters:\n - input_file : str\n Path to input audio file (wav, mp3, m4a, flac, etc.)\n\n - output_file : str\n Path to save beat event timestamps as a CSV file\n '''\n\n print('Loading ', arg_0)\n arg_2, arg_3 = librosa.load(arg_0, arg_3=22050)\n\n # Use a default hop size of 512 samples @ 22KHz ~= 23ms\n arg_4 = 512\n\n # This is the window length used by default in stft\n print('Tracking beats')\n arg_5, arg_6 = librosa.beat.Func(arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n\n print('Estimated tempo: {:0.2f} beats per minute'.format(arg_5))\n\n # save output\n # 'beats' will contain the frame numbers of beat events.\n arg_7 = librosa.frames_to_time(arg_6, arg_3=arg_3, arg_4=arg_4)\n\n print('Saving output to ', arg_1)\n librosa.output.times_csv(arg_1, arg_7)\n print('done!')"} +{"_id": "doc_2077", "title": "", "text": "def Func(arg_0, arg_1):\n '''Load audio, estimate tuning, apply pitch correction, and save.'''\n print('Loading ', arg_0)\n arg_2, arg_3 = librosa.load(arg_0)\n\n print('Separating harmonic component ... ')\n arg_4 = librosa.effects.harmonic(arg_2)\n\n print('Estimating tuning ... ')\n # Just track the pitches associated with high magnitude\n arg_5 = librosa.estimate_tuning(arg_2=arg_4, arg_3=arg_3)\n\n print('{:+0.2f} cents'.format(100 * arg_5))\n print('Applying pitch-correction of {:+0.2f} cents'.format(-100 * arg_5))\n arg_6 = librosa.effects.pitch_shift(arg_2, arg_3, -arg_5)\n\n print('Saving tuned audio to: ', arg_1)\n librosa.output.write_wav(arg_1, arg_6, arg_3)"} +{"_id": "doc_2078", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False):\n '''Convert one or more MIDI numbers to note strings.\n\n MIDI numbers will be rounded to the nearest integer.\n\n Notes will be of the format 'C0', 'C#0', 'D0', ...\n\n Examples\n --------\n >>> librosa.Func(0)\n 'C-1'\n >>> librosa.Func(37)\n 'C#2'\n >>> librosa.Func(-2)\n 'A#-2'\n >>> librosa.Func(104.7)\n 'A7'\n >>> librosa.Func(104.7, cents=True)\n 'A7-30'\n >>> librosa.Func(list(range(12, 24)))\n ['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']\n\n Parameters\n ----------\n midi : int or iterable of int\n Midi numbers to convert.\n\n octave: bool\n If True, include the octave number\n\n cents: bool\n If true, cent markers will be appended for fractional notes.\n Eg, `Func(69.3, cents=True)` == `A4+03`\n\n Returns\n -------\n notes : str or iterable of str\n Strings describing each midi note.\n\n Raises\n ------\n ParameterError\n if `cents` is True and `octave` is False\n\n See Also\n --------\n midi_to_hz\n note_to_midi\n hz_to_note\n '''\n\n if arg_2 and not arg_1:\n raise ParameterError('Cannot encode cents without octave information.')\n\n if not np.isscalar(arg_0):\n return [Func(arg_3, arg_1=arg_1, arg_2=arg_2) for arg_3 in arg_0]\n\n arg_4 = ['C', 'C#', 'D', 'D#',\n 'E', 'F', 'F#', 'G',\n 'G#', 'A', 'A#', 'B']\n\n arg_5 = int(np.round(arg_0))\n arg_6 = int(100 * np.around(arg_0 - arg_5, 2))\n\n arg_7 = arg_4[arg_5 % 12]\n\n if arg_1:\n arg_7 = '{:s}{:0d}'.format(arg_7, int(arg_5 / 12) - 1)\n if arg_2:\n arg_7 = '{:s}{:+02d}'.format(arg_7, arg_6)\n\n return arg_7"} +{"_id": "doc_2079", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Convert Hz to Mels\n\n Examples\n --------\n >>> librosa.Func(60)\n 0.9\n >>> librosa.Func([110, 220, 440])\n array([ 1.65, 3.3 , 6.6 ])\n\n Parameters\n ----------\n frequencies : number or np.ndarray [shape=(n,)] , float\n scalar or array of frequencies\n htk : bool\n use HTK formula instead of Slaney\n\n Returns\n -------\n mels : number or np.ndarray [shape=(n,)]\n input frequencies in Mels\n\n See Also\n --------\n mel_to_hz\n \"\"\"\n\n arg_0 = np.asanyarray(arg_0)\n\n if arg_1:\n return 2595.0 * np.log10(1.0 + arg_0 / 700.0)\n\n # Fill in the linear part\n arg_2 = 0.0\n arg_3 = 200.0 / 3\n\n arg_4 = (arg_0 - arg_2) / arg_3\n\n # Fill in the log-scale part\n\n arg_5 = 1000.0 # beginning of log region (Hz)\n arg_6 = (arg_5 - arg_2) / arg_3 # same (Mels)\n arg_7 = np.log(6.4) / 27.0 # step size for log region\n\n if arg_0.ndim:\n # If we have array data, vectorize\n arg_8 = (arg_0 >= arg_5)\n arg_4[arg_8] = arg_6 + np.log(arg_0[arg_8]/arg_5) / arg_7\n elif arg_0 >= arg_5:\n # If we have scalar data, heck directly\n arg_4 = arg_6 + np.log(arg_0 / arg_5) / arg_7\n\n return arg_4"} +{"_id": "doc_2080", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Convert mel bin numbers to frequencies\n\n Examples\n --------\n >>> librosa.Func(3)\n 200.\n\n >>> librosa.Func([1,2,3,4,5])\n array([ 66.667, 133.333, 200. , 266.667, 333.333])\n\n Parameters\n ----------\n mels : np.ndarray [shape=(n,)], float\n mel bins to convert\n htk : bool\n use HTK formula instead of Slaney\n\n Returns\n -------\n frequencies : np.ndarray [shape=(n,)]\n input mels in Hz\n\n See Also\n --------\n hz_to_mel\n \"\"\"\n\n arg_0 = np.asanyarray(arg_0)\n\n if arg_1:\n return 700.0 * (10.0**(arg_0 / 2595.0) - 1.0)\n\n # Fill in the linear scale\n arg_2 = 0.0\n arg_3 = 200.0 / 3\n arg_4 = arg_2 + arg_3 * arg_0\n\n # And now the nonlinear scale\n arg_5 = 1000.0 # beginning of log region (Hz)\n arg_6 = (arg_5 - arg_2) / arg_3 # same (Mels)\n arg_7 = np.log(6.4) / 27.0 # step size for log region\n\n if arg_0.ndim:\n # If we have vector data, vectorize\n arg_8 = (arg_0 >= arg_6)\n arg_4[arg_8] = arg_5 * np.exp(arg_7 * (arg_0[arg_8] - arg_6))\n elif arg_0 >= arg_6:\n # If we have scalar data, check directly\n arg_4 = arg_5 * np.exp(arg_7 * (arg_0 - arg_6))\n\n return arg_4"} +{"_id": "doc_2081", "title": "", "text": "def Func(arg_0=22050, arg_1=2048):\n '''Alternative implementation of `np.fft.fftfreq`\n\n Parameters\n ----------\n sr : number > 0 [scalar]\n Audio sampling rate\n\n n_fft : int > 0 [scalar]\n FFT window size\n\n\n Returns\n -------\n freqs : np.ndarray [shape=(1 + n_fft/2,)]\n Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`\n\n\n Examples\n --------\n >>> librosa.Func(sr=22050, n_fft=16)\n array([ 0. , 1378.125, 2756.25 , 4134.375,\n 5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])\n\n '''\n\n return np.linspace(0,\n float(arg_0) / 2,\n int(1 + arg_1//2),\n endpoint=True)"} +{"_id": "doc_2082", "title": "", "text": "def Func(arg_0, arg_1=-80.0): # pylint: disable=invalid-name\n '''Compute the A-weighting of a set of frequencies.\n\n Parameters\n ----------\n frequencies : scalar or np.ndarray [shape=(n,)]\n One or more frequencies (in Hz)\n\n min_db : float [scalar] or None\n Clip weights below this threshold.\n If `None`, no clipping is performed.\n\n Returns\n -------\n Func : scalar or np.ndarray [shape=(n,)]\n `Func[i]` is the A-weighting of `frequencies[i]`\n\n See Also\n --------\n perceptual_weighting\n\n\n Examples\n --------\n\n Get the A-weighting for CQT frequencies\n\n >>> import matplotlib.pyplot as plt\n >>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))\n >>> aw = librosa.Func(freqs)\n >>> plt.plot(freqs, aw)\n >>> plt.xlabel('Frequency (Hz)')\n >>> plt.ylabel('Weighting (log10)')\n >>> plt.title('A-Weighting of CQT frequencies')\n\n '''\n\n # Vectorize to make our lives easier\n arg_0 = np.asanyarray(arg_0)\n\n # Pre-compute squared frequency\n arg_2 = arg_0**2.0\n\n arg_3 = np.array([12200, 20.6, 107.7, 737.9])**2.0\n\n arg_4 = 2.0 + 20.0 * (np.log10(arg_3[0]) + 4 * np.log10(arg_0)\n - np.log10(arg_2 + arg_3[0])\n - np.log10(arg_2 + arg_3[1])\n - 0.5 * np.log10(arg_2 + arg_3[2])\n - 0.5 * np.log10(arg_2 + arg_3[3]))\n\n if arg_1 is not None:\n arg_4 = np.maximum(arg_1, arg_4)\n\n return arg_4"} +{"_id": "doc_2083", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=512, arg_3=None, arg_4=-1):\n \"\"\"Return an array of time values to match the time axis from a feature matrix.\n\n Parameters\n ----------\n X : np.ndarray or scalar\n - If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.\n - If scalar, X represents the number of frames.\n\n sr : number > 0 [scalar]\n audio sampling rate\n\n hop_length : int > 0 [scalar]\n number of samples between successive frames\n\n n_fft : None or int > 0 [scalar]\n Optional: length of the FFT window.\n If given, time conversion will include an offset of `n_fft / 2`\n to counteract windowing effects when using a non-centered STFT.\n\n axis : int [scalar]\n The axis representing the time axis of X.\n By default, the last axis (-1) is taken.\n\n Returns\n -------\n times : np.ndarray [shape=(n,)]\n ndarray of times (in seconds) corresponding to each frame of X.\n\n See Also\n --------\n samples_like : Return an array of sample indices to match the time axis from a feature matrix.\n\n Examples\n --------\n Provide a feature matrix input:\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> X = librosa.stft(y)\n >>> times = librosa.Func(X)\n >>> times\n array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,\n 6.13935601e+01, 6.14167800e+01, 6.14400000e+01])\n\n Provide a scalar input:\n\n >>> n_frames = 2647\n >>> times = librosa.Func(n_frames)\n >>> times\n array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,\n 6.13935601e+01, 6.14167800e+01, 6.14400000e+01])\n \"\"\"\n arg_5 = samples_like(arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n return samples_to_time(arg_5, arg_1=arg_1)"} +{"_id": "doc_2084", "title": "", "text": "def Func(arg_0, arg_1=512, arg_2=None, arg_3=-1):\n \"\"\"Return an array of sample indices to match the time axis from a feature matrix.\n\n Parameters\n ----------\n X : np.ndarray or scalar\n - If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.\n - If scalar, X represents the number of frames.\n\n hop_length : int > 0 [scalar]\n number of samples between successive frames\n\n n_fft : None or int > 0 [scalar]\n Optional: length of the FFT window.\n If given, time conversion will include an offset of `n_fft / 2`\n to counteract windowing effects when using a non-centered STFT.\n\n axis : int [scalar]\n The axis representing the time axis of X.\n By default, the last axis (-1) is taken.\n\n Returns\n -------\n samples : np.ndarray [shape=(n,)]\n ndarray of sample indices corresponding to each frame of X.\n\n See Also\n --------\n times_like : Return an array of time values to match the time axis from a feature matrix.\n\n Examples\n --------\n Provide a feature matrix input:\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> X = librosa.stft(y)\n >>> samples = librosa.Func(X)\n >>> samples\n array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])\n\n Provide a scalar input:\n\n >>> n_frames = 2647\n >>> samples = librosa.Func(n_frames)\n >>> samples\n array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])\n \"\"\"\n if np.isscalar(arg_0):\n arg_4 = np.arange(arg_0)\n else:\n arg_4 = np.arange(arg_0.shape[arg_3])\n return frames_to_samples(arg_4, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_2085", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=512, arg_3=None, arg_4=84,\n arg_5=12, arg_6=0.0, arg_7=1,\n arg_8=1, arg_9=0.01, arg_10='hann', arg_11=True,\n arg_12='reflect', arg_13=None):\n '''Compute the hybrid constant-Q transform of an audio signal.\n\n Here, the hybrid CQT uses the pseudo CQT for higher frequencies where\n the hop_length is longer than half the filter length and the full CQT\n for lower frequencies.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n pad_mode : string\n Padding mode for centered frame analysis.\n\n See also: `librosa.core.stft` and `np.pad`.\n\n res_type : string\n Resampling mode. See `librosa.core.cqt` for details.\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n See Also\n --------\n cqt\n pseudo_cqt\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if arg_3 is None:\n # C1 by default\n arg_3 = note_to_hz('C1')\n\n if arg_6 is None:\n arg_6 = estimate_tuning(arg_0=arg_0, arg_1=arg_1)\n\n # Get all CQT frequencies\n arg_14 = cqt_frequencies(arg_4, arg_3,\n arg_5=arg_5,\n arg_6=arg_6)\n\n # Compute the length of each constant-Q basis function\n arg_15 = filters.constant_q_lengths(arg_1, arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_10=arg_10)\n\n # Determine which filters to use with Pseudo CQT\n # These are the ones that fit within 2 hop lengths after padding\n arg_16 = 2.0**np.ceil(np.log2(arg_15)) < 2 * arg_2\n\n arg_17 = int(np.sum(arg_16))\n\n arg_18 = arg_4 - arg_17\n arg_19 = []\n\n if arg_17 > 0:\n arg_20 = np.min(arg_14[arg_16])\n\n arg_19.append(pseudo_cqt(arg_0, arg_1,\n arg_2=arg_2,\n arg_3=arg_20,\n arg_4=arg_17,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12))\n\n if arg_18 > 0:\n arg_19.append(np.abs(cqt(arg_0, arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_18,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13)))\n\n return __trim_stack(arg_19, arg_4)"} +{"_id": "doc_2086", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=512, arg_3=None, arg_4=84,\n arg_5=12, arg_6=0.0, arg_7=1,\n arg_8=1, arg_9=0.01, arg_10='hann', arg_11=True,\n arg_12='reflect'):\n '''Compute the pseudo constant-Q transform of an audio signal.\n\n This uses a single fft size that is the smallest power of 2 that is greater\n than or equal to the max of:\n\n 1. The longest CQT filter\n 2. 2x the hop_length\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n pad_mode : string\n Padding mode for centered frame analysis.\n\n See also: `librosa.core.stft` and `np.pad`.\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Pseudo Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if arg_3 is None:\n # C1 by default\n arg_3 = note_to_hz('C1')\n\n if arg_6 is None:\n arg_6 = estimate_tuning(arg_0=arg_0, arg_1=arg_1)\n\n arg_13, arg_14, arg_15 = __cqt_filter_fft(arg_1, arg_3, arg_4,\n arg_5,\n arg_6, arg_7,\n arg_8, arg_9,\n arg_2=arg_2,\n arg_10=arg_10)\n\n arg_13 = np.abs(arg_13)\n\n # Compute the magnitude STFT with Hann window\n arg_16 = np.abs(stft(arg_0, arg_14=arg_14, arg_2=arg_2, arg_12=arg_12))\n\n # Project onto the pseudo-cqt basis\n arg_17 = arg_13.dot(arg_16)\n\n if arg_11:\n arg_17 /= np.sqrt(arg_14)\n else:\n arg_18 = filters.constant_q_lengths(arg_1, arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_10=arg_10,\n arg_7=arg_7)\n\n arg_17 *= np.sqrt(arg_18[:, np.newaxis] / arg_14)\n\n return arg_17"} +{"_id": "doc_2087", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8=None,\n arg_9='hann'):\n '''Generate the frequency domain constant-Q filter basis.'''\n\n arg_10, arg_11 = filters.constant_q(arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n pad_fft=True,\n arg_9=arg_9)\n\n # Filters are padded up to the nearest integral power of 2\n arg_12 = arg_10.shape[1]\n\n if (arg_8 is not None and\n arg_12 < 2.0**(1 + np.ceil(np.log2(arg_8)))):\n\n arg_12 = int(2.0 ** (1 + np.ceil(np.log2(arg_8))))\n\n # re-normalize bases with respect to the FFT window length\n arg_10 *= arg_11[:, np.newaxis] / float(arg_12)\n\n # FFT and retain only the non-negative frequencies\n arg_13 = get_fftlib()\n arg_14 = arg_13.fft(arg_10, n=arg_12, axis=1)[:, :(arg_12 // 2)+1]\n\n # sparsify the basis\n arg_14 = util.sparsify_rows(arg_14, quantile=arg_7)\n\n return arg_14, arg_12, arg_11"} +{"_id": "doc_2088", "title": "", "text": "def Func(arg_0, arg_1):\n '''Helper function to trim and stack a collection of CQT responses'''\n\n # cleanup any framing errors at the boundaries\n arg_2 = min(x.shape[1] for x in arg_0)\n\n arg_0 = np.vstack([x[:, :arg_2] for x in arg_0][::-1])\n\n # Finally, clip out any bottom frequencies that we don't really want\n # Transpose magic here to ensure column-contiguity\n return np.ascontiguousarray(arg_0[-arg_1:].T).T"} +{"_id": "doc_2089", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''Compute the filter response with a target STFT hop.'''\n\n # Compute the STFT matrix\n arg_5 = stft(arg_0, arg_1=arg_1, arg_2=arg_2,\n window='ones',\n pad_mode=arg_4)\n\n # And filter response energy\n return arg_3.dot(arg_5)"} +{"_id": "doc_2090", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''Compute the number of early downsampling operations'''\n\n arg_4 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * arg_0 /\n arg_1)) - 1) - 1)\n\n arg_5 = __num_two_factors(arg_2)\n arg_6 = max(0, arg_5 - arg_3 + 1)\n\n return min(arg_4, arg_6)"} +{"_id": "doc_2091", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7):\n '''Perform early downsampling on an audio signal, if it applies.'''\n\n arg_8 = Func_count(arg_5, arg_6,\n arg_2, arg_4)\n\n if arg_8 > 0 and arg_3 == 'kaiser_fast':\n arg_9 = 2**(arg_8)\n\n arg_2 //= arg_9\n\n if len(arg_0) < arg_9:\n raise ParameterError('Input signal length={:d} is too short for '\n '{:d}-octave CQT'.format(len(arg_0), arg_4))\n\n arg_10 = arg_1 / float(arg_9)\n arg_0 = audio.resample(arg_0, arg_1, arg_10,\n arg_3=arg_3,\n arg_7=True)\n\n # If we're not going to length-scale after CQT, we\n # need to compensate for the downsampling factor here\n if not arg_7:\n arg_0 *= np.sqrt(arg_9)\n\n arg_1 = arg_10\n\n return arg_0, arg_1, arg_2"} +{"_id": "doc_2092", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6, arg_7): # pragma: no cover\n '''Calculate the accumulated cost matrix D.\n\n Use dynamic programming to calculate the accumulated costs.\n\n Parameters\n ----------\n C : np.ndarray [shape=(N, M)]\n pre-computed cost matrix\n\n D : np.ndarray [shape=(N, M)]\n accumulated cost matrix\n\n D_steps : np.ndarray [shape=(N, M)]\n steps which were used for calculating D\n\n step_sizes_sigma : np.ndarray [shape=[n, 2]]\n Specifies allowed step sizes as used by the dtw.\n\n weights_add : np.ndarray [shape=[n, ]]\n Additive weights to penalize certain step sizes.\n\n weights_mul : np.ndarray [shape=[n, ]]\n Multiplicative weights to penalize certain step sizes.\n\n max_0 : int\n maximum number of steps in step_sizes_sigma in dim 0.\n\n max_1 : int\n maximum number of steps in step_sizes_sigma in dim 1.\n\n Returns\n -------\n D : np.ndarray [shape=(N,M)]\n accumulated cost matrix.\n D[N,M] is the total alignment cost.\n When doing subsequence DTW, D[N,:] indicates a matching function.\n\n D_steps : np.ndarray [shape=(N,M)]\n steps which were used for calculating D.\n\n See Also\n --------\n dtw\n '''\n for arg_8 in range(arg_6, arg_1.shape[0]):\n for arg_9 in range(arg_7, arg_1.shape[1]):\n # accumulate costs\n for arg_10, arg_11, arg_12 in zip(range(arg_3.shape[0]),\n arg_5, arg_4):\n arg_13 = arg_1[arg_8 - arg_3[arg_10, 0],\n arg_9 - arg_3[arg_10, 1]]\n arg_14 = arg_12 * arg_0[arg_8 - arg_6, arg_9 - arg_7]\n arg_14 += arg_11\n arg_15 = arg_13 + arg_14\n\n # check if cur_cost is smaller than the one stored in D\n if arg_15 < arg_1[arg_8, arg_9]:\n arg_1[arg_8, arg_9] = arg_15\n\n # save step-index\n arg_2[arg_8, arg_9] = arg_10\n\n return arg_1, arg_2"} +{"_id": "doc_2093", "title": "", "text": "def Func(arg_0, arg_1): # pragma: no cover\n '''Backtrack optimal warping path.\n\n Uses the saved step sizes from the cost accumulation\n step to backtrack the index pairs for an optimal\n warping path.\n\n\n Parameters\n ----------\n D_steps : np.ndarray [shape=(N, M)]\n Saved indices of the used steps used in the calculation of D.\n\n step_sizes_sigma : np.ndarray [shape=[n, 2]]\n Specifies allowed step sizes as used by the dtw.\n\n Returns\n -------\n wp : list [shape=(N,)]\n Warping path with index pairs.\n Each list entry contains an index pair\n (n,m) as a tuple\n\n See Also\n --------\n dtw\n '''\n arg_2 = []\n # Set starting point D(N,M) and append it to the path\n arg_3 = (arg_0.shape[0] - 1, arg_0.shape[1] - 1)\n arg_2.append((arg_3[0], arg_3[1]))\n\n # Loop backwards.\n # Stop criteria:\n # Setting it to (0, 0) does not work for the subsequence dtw,\n # so we only ask to reach the first row of the matrix.\n while arg_3[0] > 0:\n arg_4 = arg_0[(arg_3[0], arg_3[1])]\n\n # save tuple with minimal acc. cost in path\n arg_3 = (arg_3[0] - arg_1[arg_4][0],\n arg_3[1] - arg_1[arg_4][1])\n\n # append to warping path\n arg_2.append((arg_3[0], arg_3[1]))\n\n return arg_2"} +{"_id": "doc_2094", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5): # pragma: no cover\n '''Core Viterbi algorithm.\n\n This is intended for internal use only.\n\n Parameters\n ----------\n log_prob : np.ndarray [shape=(T, m)]\n `log_prob[t, s]` is the conditional log-likelihood\n log P[X = X(t) | State(t) = s]\n\n log_trans : np.ndarray [shape=(m, m)]\n The log transition matrix\n `log_trans[i, j]` = log P[State(t+1) = j | State(t) = i]\n\n log_p_init : np.ndarray [shape=(m,)]\n log of the initial state distribution\n\n state : np.ndarray [shape=(T,), dtype=int]\n Pre-allocated state index array\n\n value : np.ndarray [shape=(T, m)] float\n Pre-allocated value array\n\n ptr : np.ndarray [shape=(T, m), dtype=int]\n Pre-allocated pointer array\n\n Returns\n -------\n None\n All computations are performed in-place on `state, value, ptr`.\n '''\n arg_6, arg_7 = arg_0.shape\n\n # factor in initial state distribution\n arg_4[0] = arg_0[0] + arg_2\n\n for arg_8 in range(1, arg_6):\n # Want V[t, j] <- p[t, j] * max_k V[t-1, k] * A[k, j]\n # assume at time t-1 we were in state k\n # transition k -> j\n\n # Broadcast over rows:\n # Tout[k, j] = V[t-1, k] * A[k, j]\n # then take the max over columns\n # We'll do this in log-space for stability\n\n arg_9 = arg_4[arg_8 - 1] + arg_1.T\n\n # Unroll the max/argmax loop to enable numba support\n for arg_10 in range(arg_7):\n arg_5[arg_8, arg_10] = np.argmax(arg_9[arg_10])\n # value[t, j] = log_prob[t, j] + np.max(trans_out[j])\n arg_4[arg_8, arg_10] = arg_0[arg_8, arg_10] + arg_9[arg_10, arg_5[arg_8][arg_10]]\n\n # Now roll backward\n\n # Get the last state\n arg_3[-1] = np.argmax(arg_4[-1])\n\n for arg_8 in range(arg_6 - 2, -1, -1):\n arg_3[arg_8] = arg_5[arg_8+1, arg_3[arg_8+1]]"} +{"_id": "doc_2095", "title": "", "text": "def Func(arg_0, arg_1):\n '''Construct a self-loop transition matrix over `n_states`.\n\n The transition matrix will have the following properties:\n\n - `transition[i, i] = p` for all i\n - `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`\n\n This type of transition matrix is appropriate when states tend to be\n locally stable, and there is no additional structure between different\n states. This is primarily useful for de-noising frame-wise predictions.\n\n Parameters\n ----------\n n_states : int > 1\n The number of states\n\n prob : float in [0, 1] or iterable, length=n_states\n If a scalar, this is the probability of a self-transition.\n\n If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.\n\n Returns\n -------\n transition : np.ndarray [shape=(n_states, n_states)]\n The transition matrix\n\n Examples\n --------\n >>> librosa.sequence.Func(3, 0.5)\n array([[0.5 , 0.25, 0.25],\n [0.25, 0.5 , 0.25],\n [0.25, 0.25, 0.5 ]])\n\n >>> librosa.sequence.Func(3, [0.8, 0.5, 0.25])\n array([[0.8 , 0.1 , 0.1 ],\n [0.25 , 0.5 , 0.25 ],\n [0.375, 0.375, 0.25 ]])\n '''\n\n if not isinstance(arg_0, int) or arg_0 <= 1:\n raise ParameterError('n_states={} must be a positive integer > 1')\n\n arg_2 = np.empty((arg_0, arg_0), dtype=np.float)\n\n # if it's a float, make it a vector\n arg_1 = np.asarray(arg_1, dtype=np.float)\n\n if arg_1.ndim == 0:\n arg_1 = np.tile(arg_1, arg_0)\n\n if arg_1.shape != (arg_0,):\n raise ParameterError('prob={} must have length equal to n_states={}'.format(arg_1, arg_0))\n\n if np.any(arg_1 < 0) or np.any(arg_1 > 1):\n raise ParameterError('prob={} must have values in the range [0, 1]'.format(arg_1))\n\n for arg_3, arg_4 in enumerate(arg_1):\n arg_2[arg_3] = (1. - arg_4) / (arg_0 - 1)\n arg_2[arg_3, arg_3] = arg_4\n\n return arg_2"} +{"_id": "doc_2096", "title": "", "text": "def Func(arg_0, arg_1, arg_2='triangle', arg_3=False):\n '''Construct a localized transition matrix.\n\n The transition matrix will have the following properties:\n\n - `transition[i, j] = 0` if `|i - j| > width`\n - `transition[i, i]` is maximal\n - `transition[i, i - width//2 : i + width//2]` has shape `window`\n\n This type of transition matrix is appropriate for state spaces\n that discretely approximate continuous variables, such as in fundamental\n frequency estimation.\n\n Parameters\n ----------\n n_states : int > 1\n The number of states\n\n width : int >= 1 or iterable\n The maximum number of states to treat as \"local\".\n If iterable, it should have length equal to `n_states`,\n and specify the width independently for each state.\n\n window : str, callable, or window specification\n The window function to determine the shape of the \"local\" distribution.\n\n Any window specification supported by `filters.get_window` will work here.\n\n .. note:: Certain windows (e.g., 'hann') are identically 0 at the boundaries,\n so and effectively have `width-2` non-zero values. You may have to expand\n `width` to get the desired behavior.\n\n\n wrap : bool\n If `True`, then state locality `|i - j|` is computed modulo `n_states`.\n If `False` (default), then locality is absolute.\n\n See Also\n --------\n filters.get_window\n\n Returns\n -------\n transition : np.ndarray [shape=(n_states, n_states)]\n The transition matrix\n\n Examples\n --------\n\n Triangular distributions with and without wrapping\n\n >>> librosa.sequence.Func(5, 3, window='triangle', wrap=False)\n array([[0.667, 0.333, 0. , 0. , 0. ],\n [0.25 , 0.5 , 0.25 , 0. , 0. ],\n [0. , 0.25 , 0.5 , 0.25 , 0. ],\n [0. , 0. , 0.25 , 0.5 , 0.25 ],\n [0. , 0. , 0. , 0.333, 0.667]])\n\n >>> librosa.sequence.Func(5, 3, window='triangle', wrap=True)\n array([[0.5 , 0.25, 0. , 0. , 0.25],\n [0.25, 0.5 , 0.25, 0. , 0. ],\n [0. , 0.25, 0.5 , 0.25, 0. ],\n [0. , 0. , 0.25, 0.5 , 0.25],\n [0.25, 0. , 0. , 0.25, 0.5 ]])\n\n Uniform local distributions with variable widths and no wrapping\n\n >>> librosa.sequence.Func(5, [1, 2, 3, 3, 1], window='ones', wrap=False)\n array([[1. , 0. , 0. , 0. , 0. ],\n [0.5 , 0.5 , 0. , 0. , 0. ],\n [0. , 0.333, 0.333, 0.333, 0. ],\n [0. , 0. , 0.333, 0.333, 0.333],\n [0. , 0. , 0. , 0. , 1. ]])\n '''\n\n if not isinstance(arg_0, int) or arg_0 <= 1:\n raise ParameterError('n_states={} must be a positive integer > 1')\n\n arg_1 = np.asarray(arg_1, dtype=int)\n if arg_1.ndim == 0:\n arg_1 = np.tile(arg_1, arg_0)\n\n if arg_1.shape != (arg_0,):\n raise ParameterError('width={} must have length equal to n_states={}'.format(arg_1, arg_0))\n\n if np.any(arg_1 < 1):\n raise ParameterError('width={} must be at least 1')\n\n arg_4 = np.zeros((arg_0, arg_0), dtype=np.float)\n\n # Fill in the widths. This is inefficient, but simple\n for arg_5, arg_6 in enumerate(arg_1):\n arg_7 = pad_center(get_window(arg_2, arg_6, fftbins=False), arg_0)\n arg_7 = np.roll(arg_7, arg_0//2 + arg_5 + 1)\n\n if not arg_3:\n # Knock out the off-diagonal-band elements\n arg_7[arg_8(arg_0, arg_5 + arg_6//2 + 1):] = 0\n arg_7[:arg_9(0, arg_5 - arg_6//2)] = 0\n\n arg_4[arg_5] = arg_7\n\n # Row-normalize\n arg_4 /= arg_4.sum(axis=1, keepdims=True)\n\n return arg_4"} +{"_id": "doc_2097", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=512,\n arg_4=False, arg_5=None,\n arg_6='frames', **arg_7):\n \"\"\"Basic onset detector. Locate note onset events by picking peaks in an\n onset strength envelope.\n\n The `peak_pick` parameters were chosen by large-scale hyper-parameter\n optimization over the dataset provided by [1]_.\n\n .. [1] https://github.com/CPJKU/onset_db\n\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n onset_envelope : np.ndarray [shape=(m,)]\n (optional) pre-computed onset strength envelope\n\n hop_length : int > 0 [scalar]\n hop length (in samples)\n\n units : {'frames', 'samples', 'time'}\n The units to encode detected onset events in.\n By default, 'frames' are used.\n\n backtrack : bool\n If `True`, detected onset events are backtracked to the nearest\n preceding minimum of `energy`.\n\n This is primarily useful when using onsets as slice points for segmentation.\n\n energy : np.ndarray [shape=(m,)] (optional)\n An energy function to use for backtracking detected onset events.\n If none is provided, then `onset_envelope` is used.\n\n kwargs : additional keyword arguments\n Additional parameters for peak picking.\n\n See `librosa.util.peak_pick` for details.\n\n\n Returns\n -------\n\n onsets : np.ndarray [shape=(n_onsets,)]\n estimated positions of detected onsets, in whichever units\n are specified. By default, frame indices.\n\n .. note::\n If no onset strength could be detected, Func returns\n an empty list.\n\n\n Raises\n ------\n ParameterError\n if neither `y` nor `onsets` are provided\n\n or if `units` is not one of 'frames', 'samples', or 'time'\n\n See Also\n --------\n onset_strength : compute onset strength per-frame\n onset_backtrack : backtracking onset events\n librosa.util.peak_pick : pick peaks from a time series\n\n\n Examples\n --------\n Get onset times from a signal\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=30, duration=2.0)\n >>> onset_frames = librosa.onset.Func(y=y, sr=sr)\n >>> librosa.frames_to_time(onset_frames, sr=sr)\n array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,\n 1.207, 1.324, 1.44 , 1.788, 1.881])\n\n Or use a pre-computed onset envelope\n\n >>> o_env = librosa.onset.onset_strength(y, sr=sr)\n >>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)\n >>> onset_frames = librosa.onset.Func(onset_envelope=o_env, sr=sr)\n\n\n >>> import matplotlib.pyplot as plt\n >>> D = np.abs(librosa.stft(y))\n >>> plt.figure()\n >>> ax1 = plt.subplot(2, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),\n ... x_axis='time', y_axis='log')\n >>> plt.title('Power spectrogram')\n >>> plt.subplot(2, 1, 2, sharex=ax1)\n >>> plt.plot(times, o_env, label='Onset strength')\n >>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,\n ... linestyle='--', label='Onsets')\n >>> plt.axis('tight')\n >>> plt.legend(frameon=True, framealpha=0.75)\n\n \"\"\"\n\n # First, get the frame->beat strength profile if we don't already have one\n if arg_2 is None:\n if arg_0 is None:\n raise ParameterError('y or onset_envelope must be provided')\n\n arg_2 = onset_strength(arg_0=arg_0, arg_1=arg_1, arg_3=arg_3)\n\n # Shift onset envelope up to be non-negative\n # (a common normalization step to make the threshold more consistent)\n arg_2 -= arg_2.min()\n\n # Do we have any onsets to grab?\n if not arg_2.any():\n return np.array([], dtype=np.int)\n\n # Normalize onset strength function to [0, 1] range\n arg_2 /= arg_2.max()\n\n # These parameter settings found by large-scale search\n arg_7.setdefault('pre_max', 0.03*arg_1//arg_3) # 30ms\n arg_7.setdefault('post_max', 0.00*arg_1//arg_3 + 1) # 0ms\n arg_7.setdefault('pre_avg', 0.10*arg_1//arg_3) # 100ms\n arg_7.setdefault('post_avg', 0.10*arg_1//arg_3 + 1) # 100ms\n arg_7.setdefault('wait', 0.03*arg_1//arg_3) # 30ms\n arg_7.setdefault('delta', 0.07)\n\n # Peak pick the onset envelope\n arg_8 = util.peak_pick(arg_2, **arg_7)\n\n # Optionally backtrack the events\n if arg_4:\n if arg_5 is None:\n arg_5 = arg_2\n\n arg_8 = onset_backtrack(arg_8, arg_5)\n\n if arg_6 == 'frames':\n pass\n elif arg_6 == 'samples':\n arg_8 = core.frames_to_samples(arg_8, arg_3=arg_3)\n elif arg_6 == 'time':\n arg_8 = core.frames_to_time(arg_8, arg_3=arg_3, arg_1=arg_1)\n else:\n raise ParameterError('Invalid unit type: {}'.format(arg_6))\n\n return arg_8"} +{"_id": "doc_2098", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=1, arg_4=1,\n arg_5=None, arg_6=False, arg_7=True, arg_8=None,\n arg_9=None, arg_10=None, **arg_11):\n \"\"\"Compute a spectral flux onset strength envelope across multiple channels.\n\n Onset strength for channel `i` at time `t` is determined by:\n\n `mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`\n\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time-series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n S : np.ndarray [shape=(d, m)]\n pre-computed (log-power) spectrogram\n\n lag : int > 0\n time lag for computing differences\n\n max_size : int > 0\n size (in frequency bins) of the local max filter.\n set to `1` to disable filtering.\n\n ref : None or np.ndarray [shape=(d, m)]\n An optional pre-computed reference spectrum, of the same shape as `S`.\n If not provided, it will be computed from `S`.\n If provided, it will override any local max filtering governed by `max_size`.\n\n detrend : bool [scalar]\n Filter the onset strength to remove the DC component\n\n center : bool [scalar]\n Shift the onset function by `n_fft / (2 * hop_length)` frames\n\n feature : function\n Function for computing time-series features, eg, scaled spectrograms.\n By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`\n\n aggregate : function or False\n Aggregation function to use when combining onsets\n at different frequency bins.\n\n If `False`, then no aggregation is performed.\n\n Default: `np.mean`\n\n channels : list or None\n Array of channel boundaries or slice objects.\n If `None`, then a single channel is generated to span all bands.\n\n kwargs : additional keyword arguments\n Additional parameters to `feature()`, if `S` is not provided.\n\n\n Returns\n -------\n onset_envelope : np.ndarray [shape=(n_channels, m)]\n array containing the onset strength envelope for each specified channel\n\n\n Raises\n ------\n ParameterError\n if neither `(y, sr)` nor `S` are provided\n\n\n See Also\n --------\n onset_strength\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n First, load some audio and plot the spectrogram\n\n >>> import matplotlib.pyplot as plt\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... duration=10.0)\n >>> D = np.abs(librosa.stft(y))\n >>> plt.figure()\n >>> plt.subplot(2, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),\n ... y_axis='log')\n >>> plt.title('Power spectrogram')\n\n Construct a standard onset function over four sub-bands\n\n >>> onset_subbands = librosa.onset.Func(y=y, sr=sr,\n ... channels=[0, 32, 64, 96, 128])\n >>> plt.subplot(2, 1, 2)\n >>> librosa.display.specshow(onset_subbands, x_axis='time')\n >>> plt.ylabel('Sub-bands')\n >>> plt.title('Sub-band onset strength')\n\n \"\"\"\n\n if arg_8 is None:\n arg_8 = melspectrogram\n arg_11.setdefault('fmax', 11025.0)\n\n if arg_9 is None:\n arg_9 = np.mean\n\n if arg_3 < 1 or not isinstance(arg_3, int):\n raise ParameterError('lag must be a positive integer')\n\n if arg_4 < 1 or not isinstance(arg_4, int):\n raise ParameterError('max_size must be a positive integer')\n\n # First, compute mel spectrogram\n if arg_2 is None:\n arg_2 = np.abs(arg_8(arg_0=arg_0, arg_1=arg_1, **arg_11))\n\n # Convert to dBs\n arg_2 = core.power_to_db(arg_2)\n\n # Retrieve the n_fft and hop_length,\n # or default values for onsets if not provided\n arg_12 = arg_11.get('n_fft', 2048)\n arg_13 = arg_11.get('hop_length', 512)\n\n # Ensure that S is at least 2-d\n arg_2 = np.atleast_2d(arg_2)\n\n # Compute the reference spectrogram.\n # Efficiency hack: skip filtering step and pass by reference\n # if max_size will produce a no-op.\n if arg_5 is None:\n if arg_4 == 1:\n arg_5 = arg_2\n else:\n arg_5 = scipy.ndimage.maximum_filter1d(arg_2, arg_4, axis=0)\n elif arg_5.shape != arg_2.shape:\n raise ParameterError('Reference spectrum shape {} must match input spectrum {}'.format(arg_5.shape, arg_2.shape))\n\n # Compute difference to the reference, spaced by lag\n arg_14 = arg_2[:, arg_3:] - arg_5[:, :-arg_3]\n\n # Discard negatives (decreasing amplitude)\n arg_14 = np.maximum(0.0, arg_14)\n\n # Aggregate within channels\n arg_15 = True\n if arg_10 is None:\n arg_10 = [slice(None)]\n else:\n arg_15 = False\n\n if arg_9:\n arg_14 = util.sync(arg_14, arg_10,\n arg_9=arg_9,\n arg_15=arg_15, axis=0)\n\n # compensate for lag\n arg_16 = arg_3\n if arg_7:\n # Counter-act framing effects. Shift the onsets by n_fft / hop_length\n arg_16 += arg_12 // (2 * arg_13)\n\n arg_14 = np.pad(arg_14, ([0, 0], [int(arg_16), 0]),\n mode='constant')\n\n # remove the DC component\n if arg_6:\n arg_14 = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99],\n arg_14, axis=-1)\n\n # Trim to match the input duration\n if arg_7:\n arg_14 = arg_14[:, :arg_2.shape[1]]\n\n return arg_14"} +{"_id": "doc_2099", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=',', arg_4='%0.3f'):\n r\"\"\"Save time steps as in CSV format. This can be used to store the output\n of a beat-tracker or segmentation algorithm.\n\n If only `times` are provided, the file will contain each value\n of `times` on a row::\n\n times[0]\\n\n times[1]\\n\n times[2]\\n\n ...\n\n If `annotations` are also provided, the file will contain\n delimiter-separated values::\n\n times[0],annotations[0]\\n\n times[1],annotations[1]\\n\n times[2],annotations[2]\\n\n ...\n\n\n Parameters\n ----------\n path : string\n path to save the output CSV file\n\n times : list-like of floats\n list of frame numbers for beat events\n\n annotations : None or list-like\n optional annotations for each time step\n\n delimiter : str\n character to separate fields\n\n fmt : str\n format-string for rendering time\n\n Raises\n ------\n ParameterError\n if `annotations` is not `None` and length does not\n match `times`\n\n Examples\n --------\n Write beat-tracker time to CSV\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time')\n >>> librosa.output.Func('beat_times.csv', beats)\n \"\"\"\n\n if arg_2 is not None and len(arg_2) != len(arg_1):\n raise ParameterError('len(annotations) != len(times)')\n\n with open(arg_0, 'w') as output_file:\n arg_5 = csv.writer(output_file, arg_3=arg_3)\n\n if arg_2 is None:\n for arg_6 in arg_1:\n arg_5.writerow([arg_4 % arg_6])\n else:\n for arg_6, arg_7 in zip(arg_1, arg_2):\n arg_5.writerow([(arg_4 % arg_6), arg_7])"} +{"_id": "doc_2100", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Output a time series as a .wav file\n\n Note: only mono or stereo, floating-point data is supported.\n For more advanced and flexible output options, refer to\n `soundfile`.\n\n Parameters\n ----------\n path : str\n path to save the output wav file\n\n y : np.ndarray [shape=(n,) or (2,n), dtype=np.float]\n audio time series (mono or stereo).\n\n Note that only floating-point values are supported.\n\n sr : int > 0 [scalar]\n sampling rate of `y`\n\n norm : boolean [scalar]\n enable amplitude normalization.\n For floating point `y`, scale the data to the range [-1, +1].\n\n Examples\n --------\n Trim a signal to 5 seconds and save it back\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... duration=5.0)\n >>> librosa.output.Func('file_trim_5s.wav', y, sr)\n\n See Also\n --------\n soundfile.write\n \"\"\"\n\n # Validate the buffer. Stereo is okay here.\n util.valid_audio(arg_1, mono=False)\n\n # normalize\n if arg_3 and np.issubdtype(arg_1.dtype, np.floating):\n arg_4 = util.normalize(arg_1, arg_3=np.inf, axis=None)\n else:\n arg_4 = arg_1\n\n # Check for stereo\n if arg_4.ndim > 1 and arg_4.shape[0] == 2:\n arg_4 = arg_4.T\n\n # Save\n scipy.io.wavfile.write(arg_0, arg_2, arg_4)"} +{"_id": "doc_2101", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2='magma', arg_3='gray_r', arg_4='coolwarm'):\n '''Get a default colormap from the given data.\n\n If the data is boolean, use a black and white colormap.\n\n If the data has both positive and negative values,\n use a diverging colormap.\n\n Otherwise, use a sequential colormap.\n\n Parameters\n ----------\n data : np.ndarray\n Input data\n\n robust : bool\n If True, discard the top and bottom 2% of data when calculating\n range.\n\n Func_seq : str\n The sequential colormap name\n\n Func_bool : str\n The boolean colormap name\n\n Func_div : str\n The diverging colormap name\n\n Returns\n -------\n Func : matplotlib.colors.Colormap\n The colormap to use for `data`\n\n See Also\n --------\n matplotlib.pyplot.colormaps\n '''\n\n arg_0 = np.atleast_1d(arg_0)\n\n if arg_0.dtype == 'bool':\n return get_Func(arg_3)\n\n arg_0 = arg_0[np.isfinite(arg_0)]\n\n if arg_1:\n arg_5, arg_6 = 2, 98\n else:\n arg_5, arg_6 = 0, 100\n\n arg_7 = np.percentile(arg_0, arg_6)\n arg_8 = np.percentile(arg_0, arg_5)\n\n if arg_8 >= 0 or arg_7 <= 0:\n return get_Func(arg_2)\n\n return get_Func(arg_4)"} +{"_id": "doc_2102", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=5e4, arg_3='time', arg_4=0.0,\n arg_5=1000, arg_6=None, **arg_7):\n '''Plot the amplitude envelope of a waveform.\n\n If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.\n\n If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,\n so that the left and right channels are drawn above and below the axis,\n respectively.\n\n Long signals (`duration >= max_points`) are down-sampled to at\n most `max_sr` before plotting.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,) or (2,n)]\n audio time series (mono or stereo)\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n max_points : postive number or None\n Maximum number of time-points to plot: if `max_points` exceeds\n the duration of `y`, then `y` is downsampled.\n\n If `None`, no downsampling is performed.\n\n x_axis : str {'time', 'off', 'none'} or None\n If 'time', the x-axis is given time tick-marks.\n\n ax : matplotlib.axes.Axes or None\n Axes to plot on instead of the default `plt.gca()`.\n\n offset : float\n Horizontal offset (in seconds) to start the waveform plot\n\n max_sr : number > 0 [scalar]\n Maximum sampling rate for the visualization\n\n kwargs\n Additional keyword arguments to `matplotlib.pyplot.fill_between`\n\n Returns\n -------\n pc : matplotlib.collections.PolyCollection\n The PolyCollection created by `fill_between`.\n\n See also\n --------\n librosa.core.resample\n matplotlib.pyplot.fill_between\n\n\n Examples\n --------\n Plot a monophonic waveform\n\n >>> import matplotlib.pyplot as plt\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)\n >>> plt.figure()\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.Func(y, sr=sr)\n >>> plt.title('Monophonic')\n\n Or a stereo waveform\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... mono=False, duration=10)\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.Func(y, sr=sr)\n >>> plt.title('Stereo')\n\n Or harmonic and percussive components with transparency\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)\n >>> y_harm, y_perc = librosa.effects.hpss(y)\n >>> plt.subplot(3, 1, 3)\n >>> librosa.display.Func(y_harm, sr=sr, alpha=0.25)\n >>> librosa.display.Func(y_perc, sr=sr, color='r', alpha=0.5)\n >>> plt.title('Harmonic + Percussive')\n >>> plt.tight_layout()\n '''\n\n util.valid_audio(arg_0, mono=False)\n\n if not (isinstance(arg_5, int) and arg_5 > 0):\n raise ParameterError('max_sr must be a non-negative integer')\n\n arg_8 = arg_1\n arg_9 = 1\n\n if arg_2 is not None:\n if arg_2 <= 0:\n raise ParameterError('max_points must be strictly positive')\n\n if arg_2 < arg_0.shape[-1]:\n arg_8 = min(arg_5, (arg_1 * arg_0.shape[-1]) // arg_2)\n\n arg_9 = arg_1 // arg_8\n\n if arg_0.ndim == 1:\n arg_0 = __envelope(arg_0, arg_9)\n else:\n arg_0 = np.vstack([__envelope(_, arg_9) for _ in arg_0])\n\n if arg_0.ndim > 1:\n arg_10 = arg_0[0]\n arg_11 = -arg_0[1]\n else:\n arg_10 = arg_0\n arg_11 = -arg_0\n\n arg_12 = __check_axes(arg_6)\n\n arg_7.setdefault('color', next(arg_12._get_lines.prop_cycler)['color'])\n\n arg_13 = arg_4 + core.frames_to_time(np.arange(len(arg_10)),\n arg_1=arg_1,\n arg_9=arg_9)\n\n arg_14 = arg_12.fill_between(arg_13, arg_11, arg_10, **arg_7)\n\n arg_12.set_xlim([arg_13.min(), arg_13.max()])\n if arg_3 == 'time':\n arg_12.xaxis.set_major_formatter(TimeFormatter(lag=False))\n arg_12.xaxis.set_label_text('Time')\n elif arg_3 is None or arg_3 in ['off', 'none']:\n arg_12.set_xticks([])\n else:\n raise ParameterError('Unknown x_axis value: {}'.format(arg_3))\n\n return arg_14"} +{"_id": "doc_2103", "title": "", "text": "def Func(arg_0, arg_1):\n '''Helper to set the current image in pyplot mode.\n\n If the provided `ax` is not `None`, then we assume that the user is using the object API.\n In this case, the pyplot current image is not set.\n '''\n\n if arg_0 is None:\n import matplotlib.pyplot as plt\n plt.sci(arg_1)"} +{"_id": "doc_2104", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n '''Compute axis coordinates'''\n\n if arg_1 is not None:\n if len(arg_1) < arg_2:\n raise ParameterError('Coordinate shape mismatch: '\n '{}<{}'.format(len(arg_1), arg_2))\n return arg_1\n\n arg_4 = {'linear': __coord_fft_hz,\n 'hz': __coord_fft_hz,\n 'log': __coord_fft_hz,\n 'mel': __coord_mel_hz,\n 'cqt': __coord_cqt_hz,\n 'cqt_hz': __coord_cqt_hz,\n 'cqt_note': __coord_cqt_hz,\n 'chroma': __coord_chroma,\n 'time': __coord_time,\n 's': __coord_time,\n 'ms': __coord_time,\n 'lag': __coord_time,\n 'lag_s': __coord_time,\n 'lag_ms': __coord_time,\n 'tonnetz': __coord_n,\n 'off': __coord_n,\n 'tempo': __coord_tempo,\n 'frames': __coord_n,\n None: __coord_n}\n\n if arg_0 not in arg_4:\n raise ParameterError('Unknown axis type: {}'.format(arg_0))\n return arg_4[arg_0](arg_2, **arg_3)"} +{"_id": "doc_2105", "title": "", "text": "def Func(arg_0):\n '''Check if \"axes\" is an instance of an axis object. If not, use `gca`.'''\n if arg_0 is None:\n import matplotlib.pyplot as plt\n arg_0 = plt.gca()\n elif not isinstance(arg_0, Axes):\n raise ValueError(\"`axes` must be an instance of matplotlib.axes.Axes. \"\n \"Found type(axes)={}\".format(type(arg_0)))\n return arg_0"} +{"_id": "doc_2106", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=12, **arg_3):\n '''Get CQT bin frequencies'''\n if arg_1 is None:\n arg_1 = core.note_to_hz('C1')\n\n # we drop by half a bin so that CQT bins are centered vertically\n return core.cqt_frequencies(arg_0+1,\n arg_1=arg_1 / 2.0**(0.5/arg_2),\n arg_2=arg_2)"} +{"_id": "doc_2107", "title": "", "text": "def Func(arg_0, arg_1=12, **arg_2):\n '''Get chroma bin numbers'''\n return np.linspace(0, (12.0 * arg_0) / arg_1, num=arg_0+1, endpoint=True)"} +{"_id": "doc_2108", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=512, **arg_3):\n '''Get time coordinates from frames'''\n return core.frames_to_time(np.arange(arg_0+1), arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_2109", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048,\n arg_4=0.01, arg_5=12, **arg_6):\n '''Estimate the tuning of an audio time series or spectrogram input.\n\n Parameters\n ----------\n y: np.ndarray [shape=(n,)] or None\n audio signal\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n S: np.ndarray [shape=(d, t)] or None\n magnitude or power spectrogram\n\n n_fft : int > 0 [scalar] or None\n number of FFT bins to use, if `y` is provided.\n\n resolution : float in `(0, 1)`\n Resolution of the tuning as a fraction of a bin.\n 0.01 corresponds to measurements in cents.\n\n bins_per_octave : int > 0 [scalar]\n How many frequency bins per octave\n\n kwargs : additional keyword arguments\n Additional arguments passed to `piptrack`\n\n Returns\n -------\n tuning: float in `[-0.5, 0.5)`\n estimated tuning deviation (fractions of a bin)\n\n See Also\n --------\n piptrack\n Pitch tracking by parabolic interpolation\n\n Examples\n --------\n >>> # With time-series input\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.Func(y=y, sr=sr)\n 0.089999999999999969\n\n >>> # In tenths of a cent\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.Func(y=y, sr=sr, resolution=1e-3)\n 0.093999999999999972\n\n >>> # Using spectrogram input\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> S = np.abs(librosa.stft(y))\n >>> librosa.Func(S=S, sr=sr)\n 0.089999999999999969\n\n >>> # Using pass-through arguments to `librosa.piptrack`\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.Func(y=y, sr=sr, n_fft=8192,\n ... fmax=librosa.note_to_hz('G#9'))\n 0.070000000000000062\n\n '''\n\n arg_7, arg_8 = piptrack(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, **arg_6)\n\n # Only count magnitude where frequency is > 0\n arg_9 = arg_7 > 0\n\n if arg_9.any():\n arg_10 = np.median(arg_8[arg_9])\n else:\n arg_10 = 0.0\n\n return pitch_tuning(arg_7[(arg_8 >= arg_10) & arg_9],\n arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_2110", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048, arg_4=None,\n arg_5=150.0, arg_6=4000.0, arg_7=0.1,\n arg_8=None, arg_9='hann', arg_10=True, arg_11='reflect',\n arg_12=None):\n '''Pitch tracking on thresholded parabolically-interpolated STFT.\n\n This implementation uses the parabolic interpolation method described by [1]_.\n\n .. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html\n\n Parameters\n ----------\n y: np.ndarray [shape=(n,)] or None\n audio signal\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n S: np.ndarray [shape=(d, t)] or None\n magnitude or power spectrogram\n\n n_fft : int > 0 [scalar] or None\n number of FFT bins to use, if `y` is provided.\n\n hop_length : int > 0 [scalar] or None\n number of samples to hop\n\n threshold : float in `(0, 1)`\n A bin in spectrum `S` is considered a pitch when it is greater than\n `threshold*ref(S)`.\n\n By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in\n each column).\n\n fmin : float > 0 [scalar]\n lower frequency cutoff.\n\n fmax : float > 0 [scalar]\n upper frequency cutoff.\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n ref : scalar or callable [default=np.max]\n If scalar, the reference value against which `S` is compared for determining\n pitches.\n\n If callable, the reference value is computed as `ref(S, axis=0)`.\n\n .. note::\n One of `S` or `y` must be provided.\n\n If `S` is not given, it is computed from `y` using\n the default parameters of `librosa.core.stft`.\n\n Returns\n -------\n pitches : np.ndarray [shape=(d, t)]\n magnitudes : np.ndarray [shape=(d,t)]\n Where `d` is the subset of FFT bins within `fmin` and `fmax`.\n\n `pitches[f, t]` contains instantaneous frequency at bin\n `f`, time `t`\n\n `magnitudes[f, t]` contains the corresponding magnitudes.\n\n Both `pitches` and `magnitudes` take value 0 at bins\n of non-maximal magnitude.\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n Computing pitches from a waveform input\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> pitches, magnitudes = librosa.Func(y=y, sr=sr)\n\n Or from a spectrogram input\n\n >>> S = np.abs(librosa.stft(y))\n >>> pitches, magnitudes = librosa.Func(S=S, sr=sr)\n\n Or with an alternate reference value for pitch detection, where\n values above the mean spectral energy in each frame are counted as pitches\n\n >>> pitches, magnitudes = librosa.Func(S=S, sr=sr, threshold=1,\n ... ref=np.mean)\n\n '''\n\n # Check that we received an audio time series or STFT\n arg_2, arg_3 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_8=arg_8, arg_9=arg_9,\n arg_10=arg_10, arg_11=arg_11)\n\n # Make sure we're dealing with magnitudes\n arg_2 = np.abs(arg_2)\n\n # Truncate to feasible region\n arg_5 = np.maximum(arg_5, 0)\n arg_6 = np.minimum(arg_6, float(arg_1) / 2)\n\n arg_13 = time_frequency.fft_frequencies(arg_1=arg_1, arg_3=arg_3)\n\n # Do the parabolic interpolation everywhere,\n # then figure out where the peaks are\n # then restrict to the feasible range (fmin:fmax)\n arg_14 = 0.5 * (arg_2[2:] - arg_2[:-2])\n\n arg_15 = 2 * arg_2[1:-1] - arg_2[2:] - arg_2[:-2]\n\n # Suppress divide-by-zeros.\n # Points where shift == 0 will never be selected by localmax anyway\n arg_15 = arg_14 / (arg_15 + (np.abs(arg_15) < util.tiny(arg_15)))\n\n # Pad back up to the same shape as S\n arg_14 = np.pad(arg_14, ([1, 1], [0, 0]), mode='constant')\n arg_15 = np.pad(arg_15, ([1, 1], [0, 0]), mode='constant')\n\n arg_16 = 0.5 * arg_14 * arg_15\n\n # Pre-allocate output\n arg_17 = np.zeros_like(arg_2)\n arg_18 = np.zeros_like(arg_2)\n\n # Clip to the viable frequency range\n arg_19 = ((arg_5 <= arg_13) & (arg_13 < arg_6)).reshape((-1, 1))\n\n # Compute the column-wise local max of S after thresholding\n # Find the argmax coordinates\n if arg_12 is None:\n arg_12 = np.max\n\n if six.callable(arg_12):\n arg_20 = arg_7 * arg_12(arg_2, axis=0)\n else:\n arg_20 = np.abs(arg_12)\n\n arg_21 = np.argwhere(arg_19 & util.localmax(arg_2 * (arg_2 > arg_20)))\n\n # Store pitch and magnitude\n arg_17[arg_21[:, 0], arg_21[:, 1]] = ((arg_21[:, 0] + arg_15[arg_21[:, 0], arg_21[:, 1]])\n * float(arg_1) / arg_3)\n\n arg_18[arg_21[:, 0], arg_21[:, 1]] = (arg_2[arg_21[:, 0], arg_21[:, 1]]\n + arg_16[arg_21[:, 0], arg_21[:, 1]])\n\n return arg_17, arg_18"} +{"_id": "doc_2111", "title": "", "text": "def Func(arg_0, **arg_1):\n '''Decompose an audio time series into harmonic and percussive components.\n\n This function automates the STFT->HPSS->ISTFT pipeline, and ensures that\n the output waveforms have equal length to the input waveform `y`.\n\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n kwargs : additional keyword arguments.\n See `librosa.decompose.Func` for details.\n\n\n Returns\n -------\n y_harmonic : np.ndarray [shape=(n,)]\n audio time series of the harmonic elements\n\n y_percussive : np.ndarray [shape=(n,)]\n audio time series of the percussive elements\n\n See Also\n --------\n harmonic : Extract only the harmonic component\n percussive : Extract only the percussive component\n librosa.decompose.Func : HPSS on spectrograms\n\n\n Examples\n --------\n >>> # Extract harmonic and percussive components\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> y_harmonic, y_percussive = librosa.effects.Func(y)\n\n >>> # Get a more isolated percussive component by widening its margin\n >>> y_harmonic, y_percussive = librosa.effects.Func(y, margin=(1.0,5.0))\n\n '''\n\n # Compute the STFT matrix\n arg_2 = core.stft(arg_0)\n\n # Decompose into harmonic and percussives\n arg_3, arg_4 = decompose.Func(arg_2, **arg_1)\n\n # Invert the STFTs. Adjust length to match the input.\n arg_5 = util.fix_length(core.istft(arg_3, dtype=arg_0.dtype), len(arg_0))\n arg_6 = util.fix_length(core.istft(arg_4, dtype=arg_0.dtype), len(arg_0))\n\n return arg_5, arg_6"} +{"_id": "doc_2112", "title": "", "text": "def Func(arg_0, **arg_1):\n '''Extract Func elements from an audio time-series.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n kwargs : additional keyword arguments.\n See `librosa.decompose.hpss` for details.\n\n Returns\n -------\n y_Func : np.ndarray [shape=(n,)]\n audio time series of just the Func portion\n\n See Also\n --------\n hpss : Separate harmonic and Func components\n harmonic : Extract only the harmonic component\n librosa.decompose.hpss : HPSS for spectrograms\n\n Examples\n --------\n >>> # Extract Func component\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> y_Func = librosa.effects.Func(y)\n\n >>> # Use a margin > 1.0 for greater Func separation\n >>> y_Func = librosa.effects.Func(y, margin=3.0)\n\n '''\n\n # Compute the STFT matrix\n arg_2 = core.stft(arg_0)\n\n # Remove harmonics\n arg_3 = decompose.hpss(arg_2, **arg_1)[1]\n\n # Invert the STFT\n arg_4 = util.fix_length(core.istft(arg_3, dtype=arg_0.dtype), len(arg_0))\n\n return arg_4"} +{"_id": "doc_2113", "title": "", "text": "def Func(arg_0, arg_1):\n '''Time-stretch an audio series by a fixed rate.\n\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n rate : float > 0 [scalar]\n Stretch factor. If `rate > 1`, then the signal is sped up.\n\n If `rate < 1`, then the signal is slowed down.\n\n Returns\n -------\n y_stretch : np.ndarray [shape=(rate * n,)]\n audio time series stretched by the specified rate\n\n See Also\n --------\n pitch_shift : pitch shifting\n librosa.core.phase_vocoder : spectrogram phase vocoder\n\n\n Examples\n --------\n Compress to be twice as fast\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> y_fast = librosa.effects.Func(y, 2.0)\n\n Or half the original speed\n\n >>> y_slow = librosa.effects.Func(y, 0.5)\n\n '''\n\n if arg_1 <= 0:\n raise ParameterError('rate must be a positive number')\n\n # Construct the stft\n arg_2 = core.stft(arg_0)\n\n # Stretch by phase vocoding\n arg_3 = core.phase_vocoder(arg_2, arg_1)\n\n # Invert the stft\n arg_4 = core.istft(arg_3, dtype=arg_0.dtype)\n\n return arg_4"} +{"_id": "doc_2114", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=12, arg_4='kaiser_best'):\n '''Pitch-shift the waveform by `n_steps` half-steps.\n\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time-series\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n n_steps : float [scalar]\n how many (fractional) half-steps to shift `y`\n\n bins_per_octave : float > 0 [scalar]\n how many steps per octave\n\n res_type : string\n Resample type.\n Possible options: 'kaiser_best', 'kaiser_fast', and 'scipy', 'polyphase',\n 'fft'.\n By default, 'kaiser_best' is used.\n \n See `core.resample` for more information.\n\n Returns\n -------\n y_shift : np.ndarray [shape=(n,)]\n The pitch-shifted audio time-series\n\n\n See Also\n --------\n time_stretch : time stretching\n librosa.core.phase_vocoder : spectrogram phase vocoder\n\n\n Examples\n --------\n Shift up by a major third (four half-steps)\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> y_third = librosa.effects.Func(y, sr, n_steps=4)\n\n Shift down by a tritone (six half-steps)\n\n >>> y_tritone = librosa.effects.Func(y, sr, n_steps=-6)\n\n Shift up by 3 quarter-tones\n\n >>> y_three_qt = librosa.effects.Func(y, sr, n_steps=3,\n ... bins_per_octave=24)\n '''\n\n if arg_3 < 1 or not np.issubdtype(type(arg_3), np.integer):\n raise ParameterError('bins_per_octave must be a positive integer.')\n\n arg_5 = 2.0 ** (-float(arg_2) / arg_3)\n\n # Stretch in time, then resample\n arg_6 = core.resample(time_stretch(arg_0, arg_5), float(arg_1) / arg_5, arg_1,\n arg_4=arg_4)\n\n # Crop to the same dimension as the input\n return util.fix_length(arg_6, len(arg_0))"} +{"_id": "doc_2115", "title": "", "text": "def Func(arg_0, arg_1=60, arg_2=arg_3.max, arg_5=2048, arg_6=512):\n '''Split an audio signal into non-silent intervals.\n\n Parameters\n ----------\n y : np.ndarray, shape=(n,) or (2, n)\n An audio signal\n\n top_db : number > 0\n The threshold (in decibels) below reference to consider as\n silence\n\n ref : number or callable\n The reference power. By default, it uses `np.max` and compares\n to the peak power in the signal.\n\n frame_length : int > 0\n The number of samples per analysis frame\n\n hop_length : int > 0\n The number of samples between analysis frames\n\n Returns\n -------\n intervals : np.ndarray, shape=(m, 2)\n `intervals[i] == (start_i, end_i)` are the start and end time\n (in samples) of non-silent interval `i`.\n\n '''\n\n arg_7 = _signal_to_frame_nonsilent(arg_0,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_2=arg_2,\n arg_1=arg_1)\n\n # Interval slicing, adapted from\n # https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python\n # Find points where the sign flips\n arg_8 = arg_3.flatnonzero(arg_3.diff(arg_7.astype(int)))\n\n # Pad back the sample lost in the diff\n arg_8 = [arg_8 + 1]\n\n # If the first frame had high energy, count it\n if arg_7[0]:\n arg_8.insert(0, [0])\n\n # Likewise for the last frame\n if arg_7[-1]:\n arg_8.append([len(arg_7)])\n\n # Convert from frames to samples\n arg_8 = core.frames_to_samples(arg_3.concatenate(arg_8),\n arg_6=arg_6)\n\n # Clip to the signal duration\n arg_8 = arg_3.minimum(arg_8, arg_0.shape[-1])\n\n # Stack the results back as an ndarray\n return arg_8.reshape((-1, 2))"} +{"_id": "doc_2116", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`\n\n Based on the implementation provided by [1]_.\n\n .. [1] Ellis, D. P. W. \"A phase vocoder in Matlab.\"\n Columbia University, 2002.\n http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/\n\n Examples\n --------\n >>> # Play at double speed\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> D = librosa.stft(y, n_fft=2048, hop_length=512)\n >>> D_fast = librosa.Func(D, 2.0, hop_length=512)\n >>> y_fast = librosa.istft(D_fast, hop_length=512)\n\n >>> # Or play at 1/3 speed\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> D = librosa.stft(y, n_fft=2048, hop_length=512)\n >>> D_slow = librosa.Func(D, 1./3, hop_length=512)\n >>> y_slow = librosa.istft(D_slow, hop_length=512)\n\n Parameters\n ----------\n D : np.ndarray [shape=(d, t), dtype=complex]\n STFT matrix\n\n rate : float > 0 [scalar]\n Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.\n\n hop_length : int > 0 [scalar] or None\n The number of samples between successive columns of `D`.\n\n If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`\n\n Returns\n -------\n D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]\n time-stretched STFT\n \"\"\"\n\n arg_3 = 2 * (arg_0.shape[0] - 1)\n\n if arg_2 is None:\n arg_2 = int(arg_3 // 4)\n\n arg_4 = np.arange(0, arg_0.shape[1], arg_1, dtype=np.float)\n\n # Create an empty output array\n arg_5 = np.zeros((arg_0.shape[0], len(arg_4)), arg_0.dtype, order='F')\n\n # Expected phase advance in each bin\n arg_6 = np.linspace(0, np.pi * arg_2, arg_0.shape[0])\n\n # Phase accumulator; initialize to the first sample\n arg_7 = np.angle(arg_0[:, 0])\n\n # Pad 0 columns to simplify boundary logic\n arg_0 = np.pad(arg_0, [(0, 0), (0, 2)], mode='constant')\n\n for (arg_8, arg_9) in enumerate(arg_4):\n\n arg_10 = arg_0[:, int(arg_9):int(arg_9 + 2)]\n\n # Weighting for linear magnitude interpolation\n arg_11 = np.mod(arg_9, 1.0)\n arg_12 = ((1.0 - arg_11) * np.abs(arg_10[:, 0])\n + arg_11 * np.abs(arg_10[:, 1]))\n\n # Store to output array\n arg_5[:, arg_8] = arg_12 * np.exp(1.j * arg_7)\n\n # Compute phase advance\n arg_13 = (np.angle(arg_10[:, 1])\n - np.angle(arg_10[:, 0])\n - arg_6)\n\n # Wrap to -pi:pi range\n arg_13 = arg_13 - 2.0 * np.pi * np.round(arg_13 / (2.0 * np.pi))\n\n # Accumulate phase\n arg_7 += arg_6 + arg_13\n\n return arg_5"} +{"_id": "doc_2117", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=1e-5, arg_3=80.0):\n '''Convert an amplitude spectrogram to dB-scaled spectrogram.\n\n This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.\n\n Parameters\n ----------\n S : np.ndarray\n input amplitude\n\n ref : scalar or callable\n If scalar, the amplitude `abs(S)` is scaled relative to `ref`:\n `20 * log10(S / ref)`.\n Zeros in the output correspond to positions where `S == ref`.\n\n If callable, the reference value is computed as `ref(S)`.\n\n amin : float > 0 [scalar]\n minimum threshold for `S` and `ref`\n\n top_db : float >= 0 [scalar]\n threshold the output at `top_db` below the peak:\n ``max(20 * log10(S)) - top_db``\n\n\n Returns\n -------\n S_db : np.ndarray\n ``S`` measured in dB\n\n See Also\n --------\n power_to_db, db_to_amplitude\n\n Notes\n -----\n This function caches at level 30.\n '''\n\n arg_0 = np.asarray(arg_0)\n\n if np.issubdtype(arg_0.dtype, np.complexfloating):\n warnings.warn('Func was called on complex input so phase '\n 'information will be discarded. To suppress this warning, '\n 'call Func(np.abs(S)) instead.')\n\n arg_4 = np.abs(arg_0)\n\n if six.callable(arg_1):\n # User supplied a function to calculate reference power\n arg_5 = arg_1(arg_4)\n else:\n arg_5 = np.abs(arg_1)\n\n arg_6 = np.square(arg_4, out=arg_4)\n\n return power_to_db(arg_6, arg_1=arg_5**2, arg_2=arg_2**2,\n arg_3=arg_3)"} +{"_id": "doc_2118", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=2048, arg_3=512, arg_4=1,\n arg_5=None, arg_6='hann', arg_7=True, arg_8='reflect'):\n '''Helper function to retrieve a magnitude spectrogram.\n\n This is primarily used in feature extraction functions that can operate on\n either audio time-series or spectrogram input.\n\n\n Parameters\n ----------\n y : None or np.ndarray [ndim=1]\n If provided, an audio time series\n\n S : None or np.ndarray\n Spectrogram input, optional\n\n n_fft : int > 0\n STFT window size\n\n hop_length : int > 0\n STFT hop length\n\n power : float > 0\n Exponent for the magnitude spectrogram,\n e.g., 1 for energy, 2 for power, etc.\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n\n Returns\n -------\n S_out : np.ndarray [dtype=np.float32]\n - If `S` is provided as input, then `S_out == S`\n - Else, `S_out = |stft(y, ...)|**power`\n\n n_fft : int > 0\n - If `S` is provided, then `n_fft` is inferred from `S`\n - Else, copied from input\n '''\n\n if arg_1 is not None:\n # Infer n_fft from spectrogram shape\n arg_2 = 2 * (arg_1.shape[0] - 1)\n else:\n # Otherwise, compute a magnitude spectrogram from input\n arg_1 = np.abs(stft(arg_0, arg_2=arg_2, arg_3=arg_3,\n arg_5=arg_5, arg_7=arg_7,\n arg_6=arg_6, arg_8=arg_8))**arg_4\n\n return arg_1, arg_2"} +{"_id": "doc_2119", "title": "", "text": "def Func(arg_0, arg_1):\n '''HPSS beat tracking\n\n :parameters:\n - input_file : str\n Path to input audio file (wav, mp3, m4a, flac, etc.)\n\n - output_file : str\n Path to save beat event timestamps as a CSV file\n '''\n\n # Load the file\n print('Loading ', arg_0)\n arg_2, arg_3 = librosa.load(arg_0)\n\n # Do HPSS\n print('Harmonic-percussive separation ... ')\n arg_2 = librosa.effects.percussive(arg_2)\n\n # Construct onset envelope from percussive component\n print('Tracking beats on percussive component')\n arg_4 = librosa.onset.onset_strength(arg_2=arg_2,\n arg_3=arg_3,\n hop_length=HOP_LENGTH,\n n_fft=N_FFT,\n aggregate=np.median)\n\n # Track the beats\n arg_5, arg_6 = librosa.beat.beat_track(onset_envelope=arg_4,\n arg_3=arg_3,\n hop_length=HOP_LENGTH)\n\n arg_7 = librosa.frames_to_time(arg_6,\n arg_3=arg_3,\n hop_length=HOP_LENGTH)\n\n # Save the output\n print('Saving beats to ', arg_1)\n librosa.output.times_csv(arg_1, arg_7)"} +{"_id": "doc_2120", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=-1, **arg_4):\n '''Filtering by nearest-neighbors.\n\n Each data point (e.g, spectrogram column) is replaced\n by aggregating its nearest neighbors in feature space.\n\n This can be useful for de-noising a spectrogram or feature matrix.\n\n The non-local means method [1]_ can be recovered by providing a\n weighted recurrence matrix as input and specifying `aggregate=np.average`.\n\n Similarly, setting `aggregate=np.median` produces sparse de-noising\n as in REPET-SIM [2]_.\n\n .. [1] Buades, A., Coll, B., & Morel, J. M.\n (2005, June). A non-local algorithm for image denoising.\n In Computer Vision and Pattern Recognition, 2005.\n CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.\n\n .. [2] Rafii, Z., & Pardo, B.\n (2012, October). \"Music/Voice Separation Using the Similarity Matrix.\"\n International Society for Music Information Retrieval Conference, 2012.\n\n Parameters\n ----------\n S : np.ndarray\n The input data (spectrogram) to filter\n\n rec : (optional) scipy.sparse.spmatrix or np.ndarray\n Optionally, a pre-computed nearest-neighbor matrix\n as provided by `librosa.segment.recurrence_matrix`\n\n aggregate : function\n aggregation function (default: `np.mean`)\n\n If `aggregate=np.average`, then a weighted average is\n computed according to the (per-row) weights in `rec`.\n\n For all other aggregation functions, all neighbors\n are treated equally.\n\n\n axis : int\n The axis along which to filter (by default, columns)\n\n kwargs\n Additional keyword arguments provided to\n `librosa.segment.recurrence_matrix` if `rec` is not provided\n\n Returns\n -------\n S_filtered : np.ndarray\n The filtered data\n\n Raises\n ------\n ParameterError\n if `rec` is provided and its shape is incompatible with `S`.\n\n See also\n --------\n decompose\n hpss\n librosa.segment.recurrence_matrix\n\n\n Notes\n -----\n This function caches at level 30.\n\n\n Examples\n --------\n\n De-noise a chromagram by non-local median filtering.\n By default this would use euclidean distance to select neighbors,\n but this can be overridden directly by setting the `metric` parameter.\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=30, duration=10)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> chroma_med = librosa.decompose.Func(chroma,\n ... aggregate=np.median,\n ... metric='cosine')\n\n To use non-local means, provide an affinity matrix and `aggregate=np.average`.\n\n >>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',\n ... metric='cosine', sparse=True)\n >>> chroma_nlm = librosa.decompose.Func(chroma, rec=rec,\n ... aggregate=np.average)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 8))\n >>> plt.subplot(5, 1, 1)\n >>> librosa.display.specshow(chroma, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Unfiltered')\n >>> plt.subplot(5, 1, 2)\n >>> librosa.display.specshow(chroma_med, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Median-filtered')\n >>> plt.subplot(5, 1, 3)\n >>> librosa.display.specshow(chroma_nlm, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Non-local means')\n >>> plt.subplot(5, 1, 4)\n >>> librosa.display.specshow(chroma - chroma_med,\n ... y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Original - median')\n >>> plt.subplot(5, 1, 5)\n >>> librosa.display.specshow(chroma - chroma_nlm,\n ... y_axis='chroma', x_axis='time')\n >>> plt.colorbar()\n >>> plt.title('Original - NLM')\n >>> plt.tight_layout()\n '''\n if arg_2 is None:\n arg_2 = np.mean\n\n if arg_1 is None:\n arg_4 = dict(arg_4)\n arg_4['sparse'] = True\n arg_1 = segment.recurrence_matrix(arg_0, arg_3=arg_3, **arg_4)\n elif not scipy.sparse.issparse(arg_1):\n arg_1 = scipy.sparse.csr_matrix(arg_1)\n\n if arg_1.shape[0] != arg_0.shape[arg_3] or arg_1.shape[0] != arg_1.shape[1]:\n raise ParameterError('Invalid self-similarity matrix shape '\n 'rec.shape={} for S.shape={}'.format(arg_1.shape,\n arg_0.shape))\n\n return __Func_helper(arg_1.data, arg_1.indices, arg_1.indptr,\n arg_0.swapaxes(0, arg_3), arg_2).swapaxes(0, arg_3)"} +{"_id": "doc_2121", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''Nearest-neighbor filter helper function.\n\n This is an internal function, not for use outside of the decompose module.\n\n It applies the nearest-neighbor filter to S, assuming that the first index\n corresponds to observations.\n\n Parameters\n ----------\n R_data, R_indices, R_ptr : np.ndarrays\n The `data`, `indices`, and `indptr` of a scipy.sparse matrix\n\n S : np.ndarray\n The observation data to filter\n\n aggregate : callable\n The aggregation operator\n\n\n Returns\n -------\n S_out : np.ndarray like S\n The filtered data array\n '''\n arg_5 = np.empty_like(arg_3)\n\n for arg_6 in range(len(arg_2)-1):\n\n # Get the non-zeros out of the recurrence matrix\n arg_7 = arg_1[arg_2[arg_6]:arg_2[arg_6+1]]\n\n if not len(arg_7):\n arg_5[arg_6] = arg_3[arg_6]\n continue\n\n arg_8 = np.take(arg_3, arg_7, axis=0)\n\n if arg_4 is np.average:\n arg_9 = arg_0[arg_2[arg_6]:arg_2[arg_6+1]]\n arg_5[arg_6] = arg_4(arg_8, axis=0, arg_9=arg_9)\n else:\n arg_5[arg_6] = arg_4(arg_8, axis=0)\n\n return arg_5"} +{"_id": "doc_2122", "title": "", "text": "def Func(arg_0, arg_1, arg_2=128, arg_3=0.0, arg_4=None, arg_5=False,\n arg_6=1, arg_7=arg_8.float32):\n \"\"\"Create a Filterbank matrix to combine FFT bins into Mel-frequency bins\n\n Parameters\n ----------\n sr : number > 0 [scalar]\n sampling rate of the incoming signal\n\n n_fft : int > 0 [scalar]\n number of FFT components\n\n n_Funcs : int > 0 [scalar]\n number of Mel bands to generate\n\n fmin : float >= 0 [scalar]\n lowest frequency (in Hz)\n\n fmax : float >= 0 [scalar]\n highest frequency (in Hz).\n If `None`, use `fmax = sr / 2.0`\n\n htk : bool [scalar]\n use HTK formula instead of Slaney\n\n norm : {None, 1, np.inf} [scalar]\n if 1, divide the triangular Func weights by the width of the Func band\n (area normalization). Otherwise, leave all the triangles aiming for\n a peak value of 1.0\n\n dtype : np.dtype\n The data type of the output basis.\n By default, uses 32-bit (single-precision) floating point.\n\n Returns\n -------\n M : np.ndarray [shape=(n_Funcs, 1 + n_fft/2)]\n Mel transform matrix\n\n Notes\n -----\n This function caches at level 10.\n\n Examples\n --------\n >>> Funcfb = librosa.filters.Func(22050, 2048)\n >>> Funcfb\n array([[ 0. , 0.016, ..., 0. , 0. ],\n [ 0. , 0. , ..., 0. , 0. ],\n ...,\n [ 0. , 0. , ..., 0. , 0. ],\n [ 0. , 0. , ..., 0. , 0. ]])\n\n\n Clip the maximum frequency to 8KHz\n\n >>> librosa.filters.Func(22050, 2048, fmax=8000)\n array([[ 0. , 0.02, ..., 0. , 0. ],\n [ 0. , 0. , ..., 0. , 0. ],\n ...,\n [ 0. , 0. , ..., 0. , 0. ],\n [ 0. , 0. , ..., 0. , 0. ]])\n\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> librosa.display.specshow(Funcfb, x_axis='linear')\n >>> plt.ylabel('Mel filter')\n >>> plt.title('Mel filter bank')\n >>> plt.colorbar()\n >>> plt.tight_layout()\n \"\"\"\n\n if arg_4 is None:\n arg_4 = float(arg_0) / 2\n\n if arg_6 is not None and arg_6 != 1 and arg_6 != arg_8.inf:\n raise ParameterError('Unsupported norm: {}'.format(repr(arg_6)))\n\n # Initialize the weights\n arg_2 = int(arg_2)\n arg_10 = arg_8.zeros((arg_2, int(1 + arg_1 // 2)), arg_7=arg_7)\n\n # Center freqs of each FFT bin\n arg_11 = fft_frequencies(arg_0=arg_0, arg_1=arg_1)\n\n # 'Center freqs' of Func bands - uniformly spaced between limits\n arg_12 = Func_frequencies(arg_2 + 2, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5)\n\n arg_13 = arg_8.diff(arg_12)\n arg_14 = arg_8.subtract.outer(arg_12, arg_11)\n\n for arg_15 in range(arg_2):\n # lower and upper slopes for all bins\n arg_16 = -arg_14[arg_15] / arg_13[arg_15]\n arg_17 = arg_14[arg_15+2] / arg_13[arg_15+1]\n\n # .. then intersect them with each other and zero\n arg_10[arg_15] = arg_8.maximum(0, arg_8.minimum(arg_16, arg_17))\n\n if arg_6 == 1:\n # Slaney-style Func is scaled to be approx constant energy per channel\n arg_18 = 2.0 / (arg_12[2:arg_2+2] - arg_12[:arg_2])\n arg_10 *= arg_18[:, arg_8.newaxis]\n\n # Only check weights if f_Func[0] is positive\n if not arg_8.all((arg_12[:-2] == 0) | (arg_10.max(axis=1) > 0)):\n # This means we have an empty channel somewhere\n warnings.warn('Empty filters detected in Func frequency basis. '\n 'Some channels will produce empty responses. '\n 'Try increasing your sampling rate (and fmax) or '\n 'reducing n_Funcs.')\n\n return arg_10"} +{"_id": "doc_2123", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=84, arg_3=12, arg_4=0.0,\n arg_5='hann', arg_6=1, arg_7=True, arg_8=1,\n arg_9=arg_10.complex64, **arg_12):\n r'''Construct a constant-Q basis.\n\n This uses the filter bank described by [1]_.\n\n .. [1] McVicar, Matthew.\n \"A machine learning approach to automatic chord extraction.\"\n Dissertation, University of Bristol. 2013.\n\n\n Parameters\n ----------\n sr : number > 0 [scalar]\n Audio sampling rate\n\n fmin : float > 0 [scalar]\n Minimum frequency bin. Defaults to `C1 ~= 32.70`\n\n n_bins : int > 0 [scalar]\n Number of frequencies. Defaults to 7 octaves (84 bins).\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : float in `[-0.5, +0.5)` [scalar]\n Tuning deviation from A440 in fractions of a bin\n\n window : string, tuple, number, or function\n Windowing function to apply to filters.\n\n filter_scale : float > 0 [scalar]\n Scale of filter windows.\n Small values (<1) use shorter windows for higher temporal resolution.\n\n pad_fft : boolean\n Center-pad all filters up to the nearest integral power of 2.\n\n By default, padding is done with zeros, but this can be overridden\n by setting the `mode=` field in *kwargs*.\n\n norm : {inf, -inf, 0, float > 0}\n Type of norm to use for basis function normalization.\n See librosa.util.normalize\n\n dtype : np.dtype\n The data type of the output basis.\n By default, uses 64-bit (single precision) complex floating point.\n\n kwargs : additional keyword arguments\n Arguments to `np.pad()` when `pad==True`.\n\n Returns\n -------\n filters : np.ndarray, `len(filters) == n_bins`\n `filters[i]` is `i`\\ th time-domain CQT basis filter\n\n lengths : np.ndarray, `len(lengths) == n_bins`\n The (fractional) length of each filter\n\n Notes\n -----\n This function caches at level 10.\n\n See Also\n --------\n Func_lengths\n librosa.core.cqt\n librosa.util.normalize\n\n\n Examples\n --------\n Use a shorter window for each filter\n\n >>> basis, lengths = librosa.filters.Func(22050, filter_scale=0.5)\n\n Plot one octave of filters in time and frequency\n\n >>> import matplotlib.pyplot as plt\n >>> basis, lengths = librosa.filters.Func(22050)\n >>> plt.figure(figsize=(10, 6))\n >>> plt.subplot(2, 1, 1)\n >>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))\n >>> for i, (f, n) in enumerate(zip(basis, notes[:12])):\n ... f_scale = librosa.util.normalize(f) / 2\n ... plt.plot(i + f_scale.real)\n ... plt.plot(i + f_scale.imag, linestyle=':')\n >>> plt.axis('tight')\n >>> plt.yticks(np.arange(len(notes[:12])), notes[:12])\n >>> plt.ylabel('CQ filters')\n >>> plt.title('CQ filters (one octave, time domain)')\n >>> plt.xlabel('Time (samples at 22050 Hz)')\n >>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)\n >>> plt.subplot(2, 1, 2)\n >>> F = np.abs(np.fft.fftn(basis, axes=[-1]))\n >>> # Keep only the positive frequencies\n >>> F = F[:, :(1 + F.shape[1] // 2)]\n >>> librosa.display.specshow(F, x_axis='linear')\n >>> plt.yticks(np.arange(len(notes))[::12], notes[::12])\n >>> plt.ylabel('CQ filters')\n >>> plt.title('CQ filter magnitudes (frequency domain)')\n >>> plt.tight_layout()\n '''\n\n if arg_1 is None:\n arg_1 = note_to_hz('C1')\n\n # Pass-through parameters to get the filter lengths\n arg_13 = Func_lengths(arg_0, arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6)\n\n # Apply tuning correction\n arg_14 = 2.0**(float(arg_4) / arg_3)\n arg_1 = arg_14 * arg_1\n\n # Q should be capitalized here, so we suppress the name warning\n # pylint: disable=invalid-name\n arg_15 = float(arg_6) / (2.0**(1. / arg_3) - 1)\n\n # Convert lengths back to frequencies\n arg_16 = arg_15 * arg_0 / arg_13\n\n # Build the filters\n arg_17 = []\n for arg_18, arg_19 in zip(arg_13, arg_16):\n # Build the filter: note, length will be ceil(ilen)\n arg_20 = arg_10.exp(arg_10.arange(-arg_18//2, arg_18//2, arg_9=float) * 1j * 2 * arg_10.pi * arg_19 / arg_0)\n\n # Apply the windowing function\n arg_20 = arg_20 * __float_window(arg_5)(len(arg_20))\n\n # Normalize\n arg_20 = util.normalize(arg_20, arg_8=arg_8)\n\n arg_17.append(arg_20)\n\n # Pad and stack\n arg_21 = max(arg_13)\n if arg_7:\n arg_21 = int(2.0**(arg_10.ceil(arg_10.log2(arg_21))))\n else:\n arg_21 = int(arg_10.ceil(arg_21))\n\n arg_17 = arg_10.asarray([util.pad_center(filt, arg_21, **arg_12)\n for filt in arg_17], arg_9=arg_9)\n\n return arg_17, arg_10.asarray(arg_13)"} +{"_id": "doc_2124", "title": "", "text": "def Func(arg_0, arg_1=12, arg_2=12,\n arg_3=None, arg_4=None, arg_5=True, arg_6=arg_7.float32):\n '''Convert a Constant-Q basis to Chroma.\n\n\n Parameters\n ----------\n n_input : int > 0 [scalar]\n Number of input components (CQT bins)\n\n bins_per_octave : int > 0 [scalar]\n How many bins per octave in the CQT\n\n n_chroma : int > 0 [scalar]\n Number of output bins (per octave) in the chroma\n\n fmin : None or float > 0\n Center frequency of the first constant-Q channel.\n Default: 'C1' ~= 32.7 Hz\n\n window : None or np.ndarray\n If provided, the Func filter bank will be\n convolved with `window`.\n\n base_c : bool\n If True, the first chroma bin will start at 'C'\n If False, the first chroma bin will start at 'A'\n\n dtype : np.dtype\n The data type of the output basis.\n By default, uses 32-bit (single-precision) floating point.\n\n\n Returns\n -------\n Func : np.ndarray [shape=(n_chroma, n_input)]\n Transformation matrix: `Chroma = np.dot(Func, CQT)`\n\n Raises\n ------\n ParameterError\n If `n_input` is not an integer multiple of `n_chroma`\n\n Notes\n -----\n This function caches at level 10.\n\n Examples\n --------\n Get a CQT, and wrap bins to chroma\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> CQT = np.abs(librosa.cqt(y, sr=sr))\n >>> chroma_map = librosa.filters.Func(CQT.shape[0])\n >>> chromagram = chroma_map.dot(CQT)\n >>> # Max-normalize each time step\n >>> chromagram = librosa.util.normalize(chromagram, axis=0)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(CQT,\n ... ref=np.max),\n ... y_axis='cqt_note')\n >>> plt.title('CQT Power')\n >>> plt.colorbar()\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.specshow(chromagram, y_axis='chroma')\n >>> plt.title('Chroma (wrapped CQT)')\n >>> plt.colorbar()\n >>> plt.subplot(3, 1, 3)\n >>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)\n >>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')\n >>> plt.title('librosa.feature.chroma_stft')\n >>> plt.colorbar()\n >>> plt.tight_layout()\n\n '''\n\n # How many fractional bins are we merging?\n arg_9 = float(arg_1) / arg_2\n\n if arg_3 is None:\n arg_3 = note_to_hz('C1')\n\n if arg_7.mod(arg_9, 1) != 0:\n raise ParameterError('Incompatible CQ merge: '\n 'input bins must be an '\n 'integer multiple of output bins.')\n\n # Tile the identity to merge fractional bins\n arg_10 = arg_7.repeat(arg_7.eye(arg_2), arg_9, axis=1)\n\n # Roll it left to center on the target bin\n arg_10 = arg_7.roll(arg_10, - int(arg_9 // 2), axis=1)\n\n # How many octaves are we repeating?\n arg_11 = arg_7.ceil(arg_7.float(arg_0) / arg_1)\n\n # Repeat and trim\n arg_10 = arg_7.tile(arg_10, int(arg_11))[:, :arg_0]\n\n # What's the note number of the first bin in the CQT?\n # midi uses 12 bins per octave here\n arg_12 = arg_7.mod(hz_to_midi(arg_3), 12)\n\n if arg_5:\n # rotate to C\n arg_13 = arg_12\n else:\n # rotate to A\n arg_13 = arg_12 - 9\n\n # Adjust the roll in terms of how many chroma we want out\n # We need to be careful with rounding here\n arg_13 = int(arg_7.round(arg_13 * (arg_2 / 12.)))\n\n # Apply the roll\n arg_10 = arg_7.roll(arg_10, arg_13, axis=0).astype(arg_6)\n\n if arg_4 is not None:\n arg_10 = scipy.signal.convolve(arg_10,\n arg_7.atleast_2d(arg_4),\n mode='same')\n\n return arg_10"} +{"_id": "doc_2125", "title": "", "text": "def Func(arg_0, arg_1=1000):\n '''Get the equivalent noise bandwidth of a window function.\n\n\n Parameters\n ----------\n window : callable or string\n A window function, or the name of a window function.\n Examples:\n - scipy.signal.hann\n - 'boxcar'\n\n n : int > 0\n The number of coefficients to use in estimating the\n window bandwidth\n\n Returns\n -------\n bandwidth : float\n The equivalent noise bandwidth (in FFT bins) of the\n given window function\n\n Notes\n -----\n This function caches at level 10.\n\n See Also\n --------\n get_window\n '''\n\n if hasattr(arg_0, '__name__'):\n arg_2 = arg_0.__name__\n else:\n arg_2 = arg_0\n\n if arg_2 not in arg_4:\n arg_3 = get_window(arg_0, arg_1)\n arg_4[arg_2] = arg_1 * np.sum(arg_3**2) / np.sum(np.abs(arg_3))**2\n\n return arg_4[arg_2]"} +{"_id": "doc_2126", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n '''Compute a window function.\n\n This is a wrapper for `scipy.signal.Func` that additionally\n supports callable or pre-computed windows.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n The window specification:\n\n - If string, it's the name of the window function (e.g., `'hann'`)\n - If tuple, it's the name of the window function and any parameters\n (e.g., `('kaiser', 4.0)`)\n - If numeric, it is treated as the beta parameter of the `'kaiser'`\n window, as in `scipy.signal.Func`.\n - If callable, it's a function that accepts one integer argument\n (the window length)\n - If list-like, it's a pre-computed window of the correct length `Nx`\n\n Nx : int > 0\n The length of the window\n\n fftbins : bool, optional\n If True (default), create a periodic window for use with FFT\n If False, create a symmetric window for filter design applications.\n\n Returns\n -------\n Func : np.ndarray\n A window of length `Nx` and type `window`\n\n See Also\n --------\n scipy.signal.Func\n\n Notes\n -----\n This function caches at level 10.\n\n Raises\n ------\n ParameterError\n If `window` is supplied as a vector of length != `n_fft`,\n or is otherwise mis-specified.\n '''\n if six.callable(arg_0):\n return arg_0(arg_1)\n\n elif (isinstance(arg_0, (six.string_types, tuple)) or\n np.isscalar(arg_0)):\n # TODO: if we add custom window functions in librosa, call them here\n\n return scipy.signal.Func(arg_0, arg_1, arg_2=arg_2)\n\n elif isinstance(arg_0, (np.ndarray, list)):\n if len(arg_0) == arg_1:\n return np.asarray(arg_0)\n\n raise ParameterError('Window size mismatch: '\n '{:d} != {:d}'.format(len(arg_0), arg_1))\n else:\n raise ParameterError('Invalid window specification: {}'.format(arg_0))"} +{"_id": "doc_2127", "title": "", "text": "def Func(arg_0):\n r'''Helper function for generating center frequency and sample rate pairs.\n\n This function will return center frequency and corresponding sample rates\n to obtain similar pitch filterbank settings as described in [1]_.\n Instead of starting with MIDI pitch `A0`, we start with `C0`.\n\n .. [1] M\u00fcller, Meinard.\n \"Information Retrieval for Music and Motion.\"\n Springer Verlag. 2007.\n\n\n Parameters\n ----------\n tuning : float in `[-0.5, +0.5)` [scalar]\n Tuning deviation from A440, measure as a fraction of the equally\n tempered semitone (1/12 of an octave).\n\n Returns\n -------\n center_freqs : np.ndarray [shape=(n,), dtype=float]\n Center frequencies of the filter kernels.\n Also defines the number of filters in the filterbank.\n\n sample_rates : np.ndarray [shape=(n,), dtype=float]\n Sample rate for each filter, used for multirate filterbank.\n\n Notes\n -----\n This function caches at level 10.\n\n\n See Also\n --------\n librosa.filters.semitone_filterbank\n librosa.filters._multirate_fb\n '''\n\n arg_1 = midi_to_hz(np.arange(24 + arg_0, 109 + arg_0))\n\n arg_2 = np.asarray(len(np.arange(0, 36)) * [882, ] +\n len(np.arange(36, 70)) * [4410, ] +\n len(np.arange(70, 85)) * [22050, ])\n\n return arg_1, arg_2"} +{"_id": "doc_2128", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3): # pragma: no cover\n '''Helper function for window sum-square calculation.'''\n\n arg_4 = len(arg_0)\n arg_5 = len(arg_1)\n for arg_6 in range(arg_2):\n arg_7 = arg_6 * arg_3\n arg_0[arg_7:min(arg_4, arg_7 + arg_5)] += arg_1[:max(0, min(arg_5, arg_4 - arg_7))]"} +{"_id": "doc_2129", "title": "", "text": "def Func(arg_0, arg_1, arg_2=512, arg_3=None, arg_4=2048,\n arg_5=arg_6.float32, arg_8=None):\n '''\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing observations\n in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n\n Examples\n --------\n For a fixed frame length (2048), compare modulation effects for a Hann window\n at different hop lengths:\n\n >>> n_frames = 50\n >>> wss_256 = librosa.filters.Func('hann', n_frames, hop_length=256)\n >>> wss_512 = librosa.filters.Func('hann', n_frames, hop_length=512)\n >>> wss_1024 = librosa.filters.Func('hann', n_frames, hop_length=1024)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(3,1,1)\n >>> plt.plot(wss_256)\n >>> plt.title('hop_length=256')\n >>> plt.subplot(3,1,2)\n >>> plt.plot(wss_512)\n >>> plt.title('hop_length=512')\n >>> plt.subplot(3,1,3)\n >>> plt.plot(wss_1024)\n >>> plt.title('hop_length=1024')\n >>> plt.tight_layout()\n\n '''\n if arg_3 is None:\n arg_3 = arg_4\n\n arg_9 = arg_4 + arg_2 * (arg_1 - 1)\n arg_10 = arg_6.zeros(arg_9, arg_5=arg_5)\n\n # Compute the squared window at the desired length\n arg_11 = get_window(arg_0, arg_3)\n arg_11 = util.normalize(arg_11, arg_8=arg_8)**2\n arg_11 = util.pad_center(arg_11, arg_4)\n\n # Fill the envelope\n __window_ss_fill(arg_10, arg_11, arg_1, arg_2)\n\n return arg_10"} +{"_id": "doc_2130", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048, arg_4=512,\n arg_5=None, arg_6=None, arg_7='hann', arg_8=True,\n arg_9='reflect'):\n '''Compute the spectral centroid.\n\n Each frame of a magnitude spectrogram is normalized and treated as a\n distribution over frequency bins, from which the mean (centroid) is\n extracted per frame.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n S : np.ndarray [shape=(d, t)] or None\n (optional) spectrogram magnitude\n\n n_fft : int > 0 [scalar]\n FFT window size\n\n hop_length : int > 0 [scalar]\n hop length for STFT. See `librosa.core.stft` for details.\n\n freq : None or np.ndarray [shape=(d,) or shape=(d, t)]\n Center frequencies for spectrogram bins.\n If `None`, then FFT bin center frequencies are used.\n Otherwise, it can be a single array of `d` center frequencies,\n or a matrix of center frequencies as constructed by\n `librosa.core.ifgram`\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n\n Returns\n -------\n centroid : np.ndarray [shape=(1, t)]\n centroid frequencies\n\n See Also\n --------\n librosa.core.stft\n Short-time Fourier Transform\n\n librosa.core.ifgram\n Instantaneous-frequency spectrogram\n\n Examples\n --------\n From time-series input:\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> cent = librosa.feature.Func(y=y, sr=sr)\n >>> cent\n array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])\n\n From spectrogram input:\n\n >>> S, phase = librosa.magphase(librosa.stft(y=y))\n >>> librosa.feature.Func(S=S)\n array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])\n\n Using variable bin center frequencies:\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> if_gram, D = librosa.ifgram(y)\n >>> librosa.feature.Func(S=np.abs(D), freq=if_gram)\n array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]])\n\n Plot the result\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(2, 1, 1)\n >>> plt.semilogy(cent.T, label='Spectral centroid')\n >>> plt.ylabel('Hz')\n >>> plt.xticks([])\n >>> plt.xlim([0, cent.shape[-1]])\n >>> plt.legend()\n >>> plt.subplot(2, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.title('log Power spectrogram')\n >>> plt.tight_layout()\n '''\n\n arg_2, arg_3 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_6=arg_6, arg_7=arg_7, arg_8=arg_8,\n arg_9=arg_9)\n\n if not np.isrealobj(arg_2):\n raise ParameterError('Spectral centroid is only defined '\n 'with real-valued input')\n elif np.any(arg_2 < 0):\n raise ParameterError('Spectral centroid is only defined '\n 'with non-negative energies')\n\n # Compute the center frequencies of each bin\n if arg_5 is None:\n arg_5 = fft_frequencies(arg_1=arg_1, arg_3=arg_3)\n\n if arg_5.ndim == 1:\n arg_5 = arg_5.reshape((-1, 1))\n\n # Column-normalize S\n return np.sum(arg_5 * util.normalize(arg_2, norm=1, axis=0),\n axis=0, keepdims=True)"} +{"_id": "doc_2131", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048, arg_4=512,\n arg_5=None, arg_6='hann', arg_7=True, arg_8='reflect',\n arg_9=None, arg_10=0.85):\n '''Compute roll-off frequency.\n\n The roll-off frequency is defined for each frame as the center frequency\n for a spectrogram bin such that at least roll_percent (0.85 by default)\n of the energy of the spectrum in this frame is contained in this bin and\n the bins below. This can be used to, e.g., approximate the maximum (or\n minimum) frequency by setting roll_percent to a value close to 1 (or 0).\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n S : np.ndarray [shape=(d, t)] or None\n (optional) spectrogram magnitude\n\n n_fft : int > 0 [scalar]\n FFT window size\n\n hop_length : int > 0 [scalar]\n hop length for STFT. See `librosa.core.stft` for details.\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n freq : None or np.ndarray [shape=(d,) or shape=(d, t)]\n Center frequencies for spectrogram bins.\n If `None`, then FFT bin center frequencies are used.\n Otherwise, it can be a single array of `d` center frequencies,\n\n .. note:: `freq` is assumed to be sorted in increasing order\n\n roll_percent : float [0 < roll_percent < 1]\n Roll-off percentage.\n\n Returns\n -------\n rolloff : np.ndarray [shape=(1, t)]\n roll-off frequency for each frame\n\n\n Examples\n --------\n From time-series input\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> # Approximate maximum frequencies with roll_percent=0.85 (default)\n >>> rolloff = librosa.feature.Func(y=y, sr=sr)\n >>> rolloff\n array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])\n >>> # Approximate minimum frequencies with roll_percent=0.1\n >>> rolloff = librosa.feature.Func(y=y, sr=sr, roll_percent=0.1)\n >>> rolloff\n array([[ 75.36621094, 64.59960938, 64.59960938, ..., 75.36621094,\n 75.36621094, 64.59960938]])\n\n\n From spectrogram input\n\n >>> S, phase = librosa.magphase(librosa.stft(y))\n >>> librosa.feature.Func(S=S, sr=sr)\n array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])\n\n >>> # With a higher roll percentage:\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.feature.Func(y=y, sr=sr, roll_percent=0.95)\n array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]])\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(2, 1, 1)\n >>> plt.semilogy(rolloff.T, label='Roll-off frequency')\n >>> plt.ylabel('Hz')\n >>> plt.xticks([])\n >>> plt.xlim([0, rolloff.shape[-1]])\n >>> plt.legend()\n >>> plt.subplot(2, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.title('log Power spectrogram')\n >>> plt.tight_layout()\n\n '''\n\n if not 0.0 < arg_10 < 1.0:\n raise ParameterError('roll_percent must lie in the range (0, 1)')\n\n arg_2, arg_3 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8)\n\n if not np.isrealobj(arg_2):\n raise ParameterError('Spectral rolloff is only defined '\n 'with real-valued input')\n elif np.any(arg_2 < 0):\n raise ParameterError('Spectral rolloff is only defined '\n 'with non-negative energies')\n\n # Compute the center frequencies of each bin\n if arg_9 is None:\n arg_9 = fft_frequencies(arg_1=arg_1, arg_3=arg_3)\n\n # Make sure that frequency can be broadcast\n if arg_9.ndim == 1:\n arg_9 = arg_9.reshape((-1, 1))\n\n arg_11 = np.cumsum(arg_2, axis=0)\n\n arg_12 = arg_10 * arg_11[-1]\n\n arg_13 = np.where(arg_11 < arg_12, np.nan, 1)\n\n return np.nanmin(arg_13 * arg_9, axis=0, keepdims=True)"} +{"_id": "doc_2132", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=2048, arg_3=512,\n arg_4=None, arg_5='hann', arg_6=True, arg_7='reflect',\n arg_8=1e-10, arg_9=2.0):\n '''Compute spectral flatness\n\n Spectral flatness (or tonality coefficient) is a measure to\n quantify how much noise-like a sound is, as opposed to being\n tone-like [1]_. A high spectral flatness (closer to 1.0)\n indicates the spectrum is similar to white noise.\n It is often converted to decibel.\n\n .. [1] Dubnov, Shlomo \"Generalization of spectral flatness\n measure for non-gaussian linear processes\"\n IEEE Signal Processing Letters, 2004, Vol. 11.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n S : np.ndarray [shape=(d, t)] or None\n (optional) pre-computed spectrogram magnitude\n\n n_fft : int > 0 [scalar]\n FFT window size\n\n hop_length : int > 0 [scalar]\n hop length for STFT. See `librosa.core.stft` for details.\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n amin : float > 0 [scalar]\n minimum threshold for `S` (=added noise floor for numerical stability)\n\n power : float > 0 [scalar]\n Exponent for the magnitude spectrogram.\n e.g., 1 for energy, 2 for power, etc.\n Power spectrogram is usually used for computing spectral flatness.\n\n Returns\n -------\n flatness : np.ndarray [shape=(1, t)]\n spectral flatness for each frame.\n The returned value is in [0, 1] and often converted to dB scale.\n\n\n Examples\n --------\n From time-series input\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> flatness = librosa.feature.Func(y=y)\n >>> flatness\n array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,\n 1.00000e+00, 1.00000e+00]], dtype=float32)\n\n From spectrogram input\n\n >>> S, phase = librosa.magphase(librosa.stft(y))\n >>> librosa.feature.Func(S=S)\n array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,\n 1.00000e+00, 1.00000e+00]], dtype=float32)\n\n From power spectrogram input\n\n >>> S, phase = librosa.magphase(librosa.stft(y))\n >>> S_power = S ** 2\n >>> librosa.feature.Func(S=S_power, power=1.0)\n array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,\n 1.00000e+00, 1.00000e+00]], dtype=float32)\n\n '''\n if arg_8 <= 0:\n raise ParameterError('amin must be strictly positive')\n\n arg_1, arg_2 = _spectrogram(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_9=1., arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6, arg_7=arg_7)\n\n if not np.isrealobj(arg_1):\n raise ParameterError('Spectral flatness is only defined '\n 'with real-valued input')\n elif np.any(arg_1 < 0):\n raise ParameterError('Spectral flatness is only defined '\n 'with non-negative energies')\n\n arg_10 = np.maximum(arg_8, arg_1 ** arg_9)\n arg_11 = np.exp(np.mean(np.log(arg_10), axis=0, keepdims=True))\n arg_12 = np.mean(arg_10, axis=0, keepdims=True)\n return arg_11 / arg_12"} +{"_id": "doc_2133", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048, arg_4=512,\n arg_5=None, arg_6='hann', arg_7=True, arg_8='reflect',\n arg_9=1, arg_10=None):\n '''Get coefficients of fitting an nth-order polynomial to the columns\n of a spectrogram.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n sr : number > 0 [scalar]\n audio sampling rate of `y`\n\n S : np.ndarray [shape=(d, t)] or None\n (optional) spectrogram magnitude\n\n n_fft : int > 0 [scalar]\n FFT window size\n\n hop_length : int > 0 [scalar]\n hop length for STFT. See `librosa.core.stft` for details.\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n order : int > 0\n order of the polynomial to fit\n\n freq : None or np.ndarray [shape=(d,) or shape=(d, t)]\n Center frequencies for spectrogram bins.\n If `None`, then FFT bin center frequencies are used.\n Otherwise, it can be a single array of `d` center frequencies,\n or a matrix of center frequencies as constructed by\n `librosa.core.ifgram`\n\n Returns\n -------\n coefficients : np.ndarray [shape=(order+1, t)]\n polynomial coefficients for each frame.\n\n `coeffecients[0]` corresponds to the highest degree (`order`),\n\n `coefficients[1]` corresponds to the next highest degree (`order-1`),\n\n down to the constant term `coefficients[order]`.\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> S = np.abs(librosa.stft(y))\n\n Fit a degree-0 polynomial (constant) to each frame\n\n >>> p0 = librosa.feature.Func(S=S, order=0)\n\n Fit a linear polynomial to each frame\n\n >>> p1 = librosa.feature.Func(S=S, order=1)\n\n Fit a quadratic to each frame\n\n >>> p2 = librosa.feature.Func(S=S, order=2)\n\n Plot the results for comparison\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(8, 8))\n >>> ax = plt.subplot(4,1,1)\n >>> plt.plot(p2[2], label='order=2', alpha=0.8)\n >>> plt.plot(p1[1], label='order=1', alpha=0.8)\n >>> plt.plot(p0[0], label='order=0', alpha=0.8)\n >>> plt.xticks([])\n >>> plt.ylabel('Constant')\n >>> plt.legend()\n >>> plt.subplot(4,1,2, sharex=ax)\n >>> plt.plot(p2[1], label='order=2', alpha=0.8)\n >>> plt.plot(p1[0], label='order=1', alpha=0.8)\n >>> plt.xticks([])\n >>> plt.ylabel('Linear')\n >>> plt.subplot(4,1,3, sharex=ax)\n >>> plt.plot(p2[0], label='order=2', alpha=0.8)\n >>> plt.xticks([])\n >>> plt.ylabel('Quadratic')\n >>> plt.subplot(4,1,4, sharex=ax)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),\n ... y_axis='log')\n >>> plt.tight_layout()\n '''\n\n arg_2, arg_3 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8)\n\n # Compute the center frequencies of each bin\n if arg_10 is None:\n arg_10 = fft_frequencies(arg_1=arg_1, arg_3=arg_3)\n\n # If frequencies are constant over frames, then we only need to fit once\n if arg_10.ndim == 1:\n arg_11 = np.polyfit(arg_10, arg_2, arg_9)\n else:\n # Else, fit each frame independently and stack the results\n arg_11 = np.concatenate([[np.polyfit(arg_10[:, i], arg_2[:, i], arg_9)]\n for i in range(arg_2.shape[1])], axis=0).T\n\n return arg_11"} +{"_id": "doc_2134", "title": "", "text": "def Func(arg_0, arg_1=2048, arg_2=512, arg_3=True,\n **arg_4):\n '''Compute the zero-crossing rate of an audio time series.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n Audio time series\n\n frame_length : int > 0\n Length of the frame over which to compute zero crossing rates\n\n hop_length : int > 0\n Number of samples to advance for each frame\n\n center : bool\n If `True`, frames are centered by padding the edges of `y`.\n This is similar to the padding in `librosa.core.stft`,\n but uses edge-value copies instead of reflection.\n\n kwargs : additional keyword arguments\n See `librosa.core.zero_crossings`\n\n .. note:: By default, the `pad` parameter is set to `False`, which\n differs from the default specified by\n `librosa.core.zero_crossings`.\n\n Returns\n -------\n zcr : np.ndarray [shape=(1, t)]\n `zcr[0, i]` is the fraction of zero crossings in the\n `i` th frame\n\n See Also\n --------\n librosa.core.zero_crossings\n Compute zero-crossings in a time-series\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.feature.Func(y)\n array([[ 0.134, 0.139, ..., 0.387, 0.322]])\n\n '''\n\n util.valid_audio(arg_0)\n\n if arg_3:\n arg_0 = np.pad(arg_0, int(arg_1 // 2), mode='edge')\n\n arg_5 = util.frame(arg_0, arg_1, arg_2)\n\n arg_4['axis'] = 0\n arg_4.setdefault('pad', False)\n\n arg_6 = zero_crossings(arg_5, **arg_4)\n\n return np.mean(arg_6, axis=0, keepdims=True)"} +{"_id": "doc_2135", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=arg_4.inf, arg_6=2048,\n arg_7=512, arg_8=None, arg_9='hann', arg_10=True,\n arg_11='reflect', arg_12=None, **arg_13):\n \"\"\"Compute a chromagram from a waveform or power spectrogram.\n\n This implementation is derived from `chromagram_E` [1]_\n\n .. [1] Ellis, Daniel P.W. \"Chroma feature analysis and synthesis\"\n 2007/04/21\n http://labrosa.ee.columbia.edu/matlab/chroma-ansyn/\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n S : np.ndarray [shape=(d, t)] or None\n power spectrogram\n\n norm : float or None\n Column-wise normalization.\n See `librosa.util.normalize` for details.\n\n If `None`, no normalization is performed.\n\n n_fft : int > 0 [scalar]\n FFT window size if provided `y, sr` instead of `S`\n\n hop_length : int > 0 [scalar]\n hop length if provided `y, sr` instead of `S`\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n\n tuning : float in `[-0.5, 0.5)` [scalar] or None.\n Deviation from A440 tuning in fractional bins (cents).\n If `None`, it is automatically estimated.\n\n kwargs : additional keyword arguments\n Arguments to parameterize chroma filters.\n See `librosa.filters.chroma` for details.\n\n Returns\n -------\n chromagram : np.ndarray [shape=(n_chroma, t)]\n Normalized energy for each chroma bin at each frame.\n\n See Also\n --------\n librosa.filters.chroma\n Chroma filter bank construction\n librosa.util.normalize\n Vector normalization\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.feature.Func(y=y, sr=sr)\n array([[ 0.974, 0.881, ..., 0.925, 1. ],\n [ 1. , 0.841, ..., 0.882, 0.878],\n ...,\n [ 0.658, 0.985, ..., 0.878, 0.764],\n [ 0.969, 0.92 , ..., 0.974, 0.915]])\n\n Use an energy (magnitude) spectrum instead of power spectrogram\n\n >>> S = np.abs(librosa.stft(y))\n >>> chroma = librosa.feature.Func(S=S, sr=sr)\n >>> chroma\n array([[ 0.884, 0.91 , ..., 0.861, 0.858],\n [ 0.963, 0.785, ..., 0.968, 0.896],\n ...,\n [ 0.871, 1. , ..., 0.928, 0.829],\n [ 1. , 0.982, ..., 0.93 , 0.878]])\n\n Use a pre-computed power spectrogram with a larger frame\n\n >>> S = np.abs(librosa.stft(y, n_fft=4096))**2\n >>> chroma = librosa.feature.Func(S=S, sr=sr)\n >>> chroma\n array([[ 0.685, 0.477, ..., 0.961, 0.986],\n [ 0.674, 0.452, ..., 0.952, 0.926],\n ...,\n [ 0.844, 0.575, ..., 0.934, 0.869],\n [ 0.793, 0.663, ..., 0.964, 0.972]])\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 4))\n >>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')\n >>> plt.colorbar()\n >>> plt.title('Chromagram')\n >>> plt.tight_layout()\n\n \"\"\"\n\n arg_2, arg_6 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_6=arg_6, arg_7=arg_7, power=2,\n arg_8=arg_8, arg_9=arg_9, arg_10=arg_10,\n arg_11=arg_11)\n\n arg_14 = arg_13.get('n_chroma', 12)\n\n if arg_12 is None:\n arg_12 = estimate_tuning(arg_2=arg_2, arg_1=arg_1, bins_per_octave=arg_14)\n\n # Get the filter bank\n if 'A440' not in arg_13:\n arg_13['A440'] = 440.0 * 2.0**(float(arg_12) / arg_14)\n\n arg_15 = filters.chroma(arg_1, arg_6, **arg_13)\n\n # Compute raw chroma\n arg_16 = arg_4.dot(arg_15, arg_2)\n\n # Compute normalization factor for each frame\n return util.normalize(arg_16, arg_3=arg_3, axis=0)"} +{"_id": "doc_2136", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=512, arg_4=None,\n arg_5=arg_6.inf, arg_8=0.0, arg_9=None, arg_10=12,\n arg_11=7, arg_12=None, arg_13=None, arg_14='full'):\n r'''Constant-Q chromagram\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0\n sampling rate of `y`\n\n C : np.ndarray [shape=(d, t)] [Optional]\n a pre-computed constant-Q spectrogram\n\n hop_length : int > 0\n number of samples between successive chroma frames\n\n fmin : float > 0\n minimum frequency to analyze in the CQT.\n Default: 'C1' ~= 32.7 Hz\n\n norm : int > 0, +-np.inf, or None\n Column-wise normalization of the chromagram.\n\n threshold : float\n Pre-normalization energy threshold. Values below the\n threshold are discarded, resulting in a sparse chromagram.\n\n tuning : float\n Deviation (in cents) from A440 tuning\n\n n_chroma : int > 0\n Number of chroma bins to produce\n\n n_octaves : int > 0\n Number of octaves to analyze above `fmin`\n\n window : None or np.ndarray\n Optional window parameter to `filters.cq_to_chroma`\n\n bins_per_octave : int > 0\n Number of bins per octave in the CQT.\n Default: matches `n_chroma`\n\n cqt_mode : ['full', 'hybrid']\n Constant-Q transform mode\n\n Returns\n -------\n chromagram : np.ndarray [shape=(n_chroma, t)]\n The output chromagram\n\n See Also\n --------\n librosa.util.normalize\n librosa.core.cqt\n librosa.core.hybrid_cqt\n chroma_stft\n\n Examples\n --------\n Compare a long-window STFT chromagram to the CQT chromagram\n\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=10, duration=15)\n >>> chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr,\n ... n_chroma=12, n_fft=4096)\n >>> chroma_cq = librosa.feature.Func(y=y, sr=sr)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(2,1,1)\n >>> librosa.display.specshow(chroma_stft, y_axis='chroma')\n >>> plt.title('chroma_stft')\n >>> plt.colorbar()\n >>> plt.subplot(2,1,2)\n >>> librosa.display.specshow(chroma_cq, y_axis='chroma', x_axis='time')\n >>> plt.title('Func')\n >>> plt.colorbar()\n >>> plt.tight_layout()\n\n '''\n\n arg_15 = {'full': cqt, 'hybrid': hybrid_cqt}\n\n if arg_13 is None:\n arg_13 = arg_10\n\n # Build the CQT if we don't have one already\n if arg_2 is None:\n arg_2 = arg_6.abs(arg_15[arg_14](arg_0, arg_1=arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n n_bins=arg_11 * arg_13,\n arg_13=arg_13,\n arg_9=arg_9))\n\n # Map to chroma\n arg_16 = filters.cq_to_chroma(arg_2.shape[0],\n arg_13=arg_13,\n arg_10=arg_10,\n arg_4=arg_4,\n arg_12=arg_12)\n arg_17 = arg_16.dot(arg_2)\n\n if arg_8 is not None:\n arg_17[arg_17 < arg_8] = 0.0\n\n # Normalize\n if arg_5 is not None:\n arg_17 = util.normalize(arg_17, arg_5=arg_5, axis=0)\n\n return arg_17"} +{"_id": "doc_2137", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=2048, arg_4=512,\n arg_5=None, arg_6='hann', arg_7=True, arg_8='reflect',\n arg_9=2.0, **arg_10):\n \"\"\"Compute a mel-scaled spectrogram.\n\n If a spectrogram input `S` is provided, then it is mapped directly onto\n the mel basis `mel_f` by `mel_f.dot(S)`.\n\n If a time-series input `y, sr` is provided, then its magnitude spectrogram\n `S` is first computed, and then mapped onto the mel scale by\n `mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)] or None\n audio time-series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n S : np.ndarray [shape=(d, t)]\n spectrogram\n\n n_fft : int > 0 [scalar]\n length of the FFT window\n\n hop_length : int > 0 [scalar]\n number of samples between successive frames.\n See `librosa.core.stft`\n\n win_length : int <= n_fft [scalar]\n Each frame of audio is windowed by `window()`.\n The window will be of length `win_length` and then padded\n with zeros to match `n_fft`.\n\n If unspecified, defaults to ``win_length = n_fft``.\n\n window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]\n - a window specification (string, tuple, or number);\n see `scipy.signal.get_window`\n - a window function, such as `scipy.signal.hanning`\n - a vector or array of length `n_fft`\n\n .. see also:: `filters.get_window`\n\n center : boolean\n - If `True`, the signal `y` is padded so that frame\n `t` is centered at `y[t * hop_length]`.\n - If `False`, then frame `t` begins at `y[t * hop_length]`\n\n pad_mode : string\n If `center=True`, the padding mode to use at the edges of the signal.\n By default, STFT uses reflection padding.\n\n power : float > 0 [scalar]\n Exponent for the magnitude Func.\n e.g., 1 for energy, 2 for power, etc.\n\n kwargs : additional keyword arguments\n Mel filter bank parameters.\n See `librosa.filters.mel` for details.\n\n Returns\n -------\n S : np.ndarray [shape=(n_mels, t)]\n Mel spectrogram\n\n See Also\n --------\n librosa.filters.mel\n Mel filter bank construction\n\n librosa.core.stft\n Short-time Fourier Transform\n\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.feature.Func(y=y, sr=sr)\n array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09],\n [ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09],\n ...,\n [ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09],\n [ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]])\n\n Using a pre-computed power spectrogram\n\n >>> D = np.abs(librosa.stft(y))**2\n >>> S = librosa.feature.Func(S=D)\n\n >>> # Passing through arguments to the Mel filters\n >>> S = librosa.feature.Func(y=y, sr=sr, n_mels=128,\n ... fmax=8000)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 4))\n >>> librosa.display.specshow(librosa.power_to_db(S,\n ... ref=np.max),\n ... y_axis='mel', fmax=8000,\n ... x_axis='time')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Mel spectrogram')\n >>> plt.tight_layout()\n \"\"\"\n\n arg_2, arg_3 = _spectrogram(arg_0=arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_9=arg_9,\n arg_5=arg_5, arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8)\n\n # Build a Mel filter\n arg_11 = filters.mel(arg_1, arg_3, **arg_10)\n\n return np.dot(arg_11, arg_2)"} +{"_id": "doc_2138", "title": "", "text": "def Func(arg_0, arg_1): # pragma: no cover\n '''Jaccard similarity between two intervals\n\n Parameters\n ----------\n int_a, int_b : np.ndarrays, shape=(2,)\n\n Returns\n -------\n Jaccard similarity between intervals\n '''\n arg_2 = [arg_0[1], arg_1[1]]\n if arg_2[1] < arg_2[0]:\n arg_2.reverse()\n\n arg_3 = [arg_0[0], arg_1[0]]\n if arg_3[1] < arg_3[0]:\n arg_3.reverse()\n\n arg_4 = arg_2[0] - arg_3[1]\n if arg_4 < 0:\n arg_4 = 0.\n\n arg_5 = arg_2[1] - arg_3[0]\n\n if arg_5 > 0:\n return arg_4 / arg_5\n\n return 0.0"} +{"_id": "doc_2139", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True): # pragma: no cover\n '''Numba-accelerated interval matching algorithm.\n\n '''\n # sort index of the interval starts\n arg_3 = np.argsort(arg_1[:, 0])\n\n # sort index of the interval ends\n arg_4 = np.argsort(arg_1[:, 1])\n\n # and sorted values of starts\n arg_5 = arg_1[arg_3, 0]\n # and ends\n arg_6 = arg_1[arg_4, 1]\n\n arg_7 = np.searchsorted(arg_5, arg_0[:, 1], side='right')\n arg_8 = np.searchsorted(arg_6, arg_0[:, 0], side='left')\n\n arg_9 = np.empty(len(arg_0), dtype=numba.uint32)\n for arg_10 in range(len(arg_0)):\n arg_11 = arg_0[arg_10]\n\n # Find the intervals that start after our query ends\n arg_12 = arg_7[arg_10]\n # And the intervals that end after our query begins\n arg_13 = arg_8[arg_10]\n\n # Candidates for overlapping have to (end after we start) and (begin before we end)\n arg_14 = set(arg_3[:arg_12]) & set(arg_4[arg_13:])\n\n # Proceed as before\n if len(arg_14) > 0:\n arg_9[arg_10] = __match_interval_overlaps(arg_11, arg_1, arg_14)\n elif arg_2:\n # Numba only lets us use compile-time constants in exception messages\n raise ParameterError\n else:\n # Find the closest interval\n # (start_index[after_query] - query[1]) is the distance to the next interval\n # (query[0] - end_index[before_query])\n arg_15 = np.inf\n arg_16 = np.inf\n if arg_8[arg_10] > 0:\n arg_15 = arg_11[0] - arg_6[arg_8[arg_10]-1]\n if arg_7[arg_10] + 1 < len(arg_1):\n arg_16 = arg_5[arg_7[arg_10]+1] - arg_11[1]\n if arg_15 < arg_16:\n arg_9[arg_10] = arg_4[arg_8[arg_10]-1]\n else:\n arg_9[arg_10] = arg_3[arg_7[arg_10]+1]\n return arg_9"} +{"_id": "doc_2140", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n '''Match one set of time intervals to another.\n\n This can be useful for tasks such as mapping beat timings\n to segments.\n\n Each element `[a, b]` of `intervals_from` is matched to the\n element `[c, d]` of `intervals_to` which maximizes the\n Jaccard similarity between the intervals:\n\n `max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`\n\n In `strict=True` mode, if there is no interval with positive\n intersection with `[a,b]`, an exception is thrown.\n\n In `strict=False` mode, any interval `[a, b]` that has no\n intersection with any element of `intervals_to` is instead\n matched to the interval `[c, d]` which minimizes\n\n `min(|b - c|, |a - d|)`\n\n that is, the disjoint interval `[c, d]` with a boundary closest\n to `[a, b]`.\n\n .. note:: An element of `intervals_to` may be matched to multiple\n entries of `intervals_from`.\n\n Parameters\n ----------\n intervals_from : np.ndarray [shape=(n, 2)]\n The time range for source intervals.\n The `i` th interval spans time `intervals_from[i, 0]`\n to `intervals_from[i, 1]`.\n `intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`\n should be the track duration.\n\n intervals_to : np.ndarray [shape=(m, 2)]\n Analogous to `intervals_from`.\n\n strict : bool\n If `True`, intervals can only match if they intersect.\n If `False`, disjoint intervals can match.\n\n Returns\n -------\n interval_mapping : np.ndarray [shape=(n,)]\n For each interval in `intervals_from`, the\n corresponding interval in `intervals_to`.\n\n See Also\n --------\n match_events\n\n Raises\n ------\n ParameterError\n If either array of input intervals is not the correct shape\n\n If `strict=True` and some element of `intervals_from` is disjoint from\n every element of `intervals_to`.\n\n Examples\n --------\n >>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])\n >>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])\n >>> librosa.util.Func(ints_from, ints_to)\n array([2, 1, 2], dtype=uint32)\n >>> # [3, 5] => [4, 5] (ints_to[2])\n >>> # [1, 4] => [1, 3] (ints_to[1])\n >>> # [4, 5] => [4, 5] (ints_to[2])\n\n The reverse matching of the above is not possible in `strict` mode\n because `[6, 7]` is disjoint from all intervals in `ints_from`.\n With `strict=False`, we get the following:\n >>> librosa.util.Func(ints_to, ints_from, strict=False)\n array([1, 1, 2, 2], dtype=uint32)\n >>> # [0, 2] => [1, 4] (ints_from[1])\n >>> # [1, 3] => [1, 4] (ints_from[1])\n >>> # [4, 5] => [4, 5] (ints_from[2])\n >>> # [6, 7] => [4, 5] (ints_from[2])\n '''\n\n if len(arg_0) == 0 or len(arg_1) == 0:\n raise ParameterError('Attempting to match empty interval list')\n\n # Verify that the input intervals has correct shape and size\n valid_intervals(arg_0)\n valid_intervals(arg_1)\n\n try:\n return __Func(arg_0, arg_1, arg_2=arg_2)\n except ParameterError:\n six.reraise(ParameterError,\n ParameterError('Unable to match intervals with strict={}'.format(arg_2)),\n sys.exc_info()[2])"} +{"_id": "doc_2141", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True):\n '''Match one set of events to another.\n\n This is useful for tasks such as matching beats to the nearest\n detected onsets, or frame-aligned events to the nearest zero-crossing.\n\n .. note:: A target event may be matched to multiple source events.\n\n Examples\n --------\n >>> # Sources are multiples of 7\n >>> s_from = np.arange(0, 100, 7)\n >>> s_from\n array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,\n 98])\n >>> # Targets are multiples of 10\n >>> s_to = np.arange(0, 100, 10)\n >>> s_to\n array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])\n >>> # Find the matching\n >>> idx = librosa.util.Func(s_from, s_to)\n >>> idx\n array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])\n >>> # Print each source value to its matching target\n >>> zip(s_from, s_to[idx])\n [(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),\n (42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),\n (84, 80), (91, 90), (98, 90)]\n\n Parameters\n ----------\n events_from : ndarray [shape=(n,)]\n Array of events (eg, times, sample or frame indices) to match from.\n\n events_to : ndarray [shape=(m,)]\n Array of events (eg, times, sample or frame indices) to\n match against.\n\n left : bool\n right : bool\n If `False`, then matched events cannot be to the left (or right)\n of source events.\n\n Returns\n -------\n event_mapping : np.ndarray [shape=(n,)]\n For each event in `events_from`, the corresponding event\n index in `events_to`.\n\n `event_mapping[i] == arg min |events_from[i] - events_to[:]|`\n\n See Also\n --------\n match_intervals\n\n Raises\n ------\n ParameterError\n If either array of input events is not the correct shape\n '''\n if len(arg_0) == 0 or len(arg_1) == 0:\n raise ParameterError('Attempting to match empty event list')\n\n # If we can't match left or right, then only strict equivalence\n # counts as a match.\n if not (arg_2 or arg_3) and not np.all(np.in1d(arg_0, arg_1)):\n raise ParameterError('Cannot match events with left=right=False '\n 'and events_from is not contained '\n 'in events_to')\n\n # If we can't match to the left, then there should be at least one\n # target event greater-equal to every source event\n if (not arg_2) and max(arg_1) < max(arg_0):\n raise ParameterError('Cannot match events with left=False '\n 'and max(events_to) < max(events_from)')\n\n # If we can't match to the right, then there should be at least one\n # target event less-equal to every source event\n if (not arg_3) and min(arg_1) > min(arg_0):\n raise ParameterError('Cannot match events with right=False '\n 'and min(events_to) > min(events_from)')\n\n # array of matched items\n arg_4 = np.empty_like(arg_0, dtype=np.int)\n\n return __Func_helper(arg_4, arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_2142", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4='linear',\n arg_5=0, arg_6=0):\n '''Populate a harmonic tensor from a time-frequency representation.\n\n Parameters\n ----------\n harmonic_out : np.ndarray, shape=(len(h_range), X.shape)\n The output array to store harmonics\n\n X : np.ndarray\n The input energy\n\n freqs : np.ndarray, shape=(x.shape[axis])\n The frequency values corresponding to x's elements along the\n chosen axis.\n\n h_range : list-like, non-negative\n Harmonics to compute. The first harmonic (1) corresponds to `x`\n itself.\n Values less than one (e.g., 1/2) correspond to sub-harmonics.\n\n kind : str\n Interpolation type. See `scipy.interpolate.interp1d`.\n\n fill_value : float\n The value to fill when extrapolating beyond the observed\n frequency range.\n\n axis : int\n The axis along which to compute harmonics\n\n See Also\n --------\n harmonics\n scipy.interpolate.interp1d\n\n\n Examples\n --------\n Estimate the harmonics of a time-averaged tempogram\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... duration=15, offset=30)\n >>> # Compute the time-varying tempogram and average over time\n >>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)\n >>> # We'll measure the first five harmonics\n >>> h_range = [1, 2, 3, 4, 5]\n >>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)\n >>> # Build the harmonic tensor\n >>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)\n >>> print(t_harmonics.shape)\n (5, 384)\n\n >>> # And plot the results\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)\n >>> plt.yticks(0.5 + np.arange(len(h_range)),\n ... ['{:.3g}'.format(_) for _ in h_range])\n >>> plt.ylabel('Harmonic')\n >>> plt.xlabel('Tempo (BPM)')\n >>> plt.tight_layout()\n\n We can also compute frequency harmonics for spectrograms.\n To calculate subharmonic energy, use values < 1.\n\n >>> h_range = [1./3, 1./2, 1, 2, 3, 4]\n >>> S = np.abs(librosa.stft(y))\n >>> fft_freqs = librosa.fft_frequencies(sr=sr)\n >>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)\n >>> print(S_harm.shape)\n (6, 1025, 646)\n\n >>> plt.figure()\n >>> for i, _sh in enumerate(S_harm, 1):\n ... plt.subplot(3,2,i)\n ... librosa.display.specshow(librosa.amplitude_to_db(_sh,\n ... ref=S.max()),\n ... sr=sr, y_axis='log')\n ... plt.title('h={:.3g}'.format(h_range[i-1]))\n ... plt.yticks([])\n >>> plt.tight_layout()\n '''\n\n # Note: this only works for fixed-grid, 1d interpolation\n arg_7 = scipy.interpolate.interp1d(arg_2, arg_1,\n arg_4=arg_4,\n arg_6=arg_6,\n copy=False,\n bounds_error=False,\n arg_5=arg_5)\n\n arg_8 = [slice(None)] * arg_0.ndim\n\n # Compute the output index of the interpolated values\n arg_9 = 1 + (arg_6 % arg_1.ndim)\n\n # Iterate over the harmonics range\n for arg_10, arg_11 in enumerate(arg_3):\n arg_8[0] = arg_10\n\n # Iterate over frequencies\n for arg_12, arg_13 in enumerate(arg_2):\n # Offset the output axis by 1 to account for the harmonic index\n arg_8[arg_9] = arg_12\n\n # Estimate the harmonic energy at this frequency across time\n arg_0[arg_14(arg_8)] = arg_7(arg_11 * arg_13)"} +{"_id": "doc_2143", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=True, arg_3=0.0, arg_4=None,\n arg_5=arg_6.float32, arg_8='kaiser_best'):\n \"\"\"Load an audio file as a floating point time series.\n\n Audio will be automatically resampled to the given rate\n (default `sr=22050`).\n\n To preserve the native sampling rate of the file, use `sr=None`.\n\n Parameters\n ----------\n path : string, int, or file-like object\n path to the input file.\n\n Any codec supported by `soundfile` or `audioread` will work.\n\n If the codec is supported by `soundfile`, then `path` can also be\n an open file descriptor (int), or any object implementing Python's\n file interface.\n\n If the codec is not supported by `soundfile` (e.g., MP3), then only\n string file paths are supported.\n\n sr : number > 0 [scalar]\n target sampling rate\n\n 'None' uses the native sampling rate\n\n mono : bool\n convert signal to mono\n\n offset : float\n start reading after this time (in seconds)\n\n duration : float\n only Func up to this much audio (in seconds)\n\n dtype : numeric type\n data type of `y`\n\n res_type : str\n resample type (see note)\n\n .. note::\n By default, this uses `resampy`'s high-quality mode ('kaiser_best').\n\n For alternative resampling modes, see `resample`\n\n .. note::\n `audioread` may truncate the precision of the audio data to 16 bits.\n\n See https://librosa.github.io/librosa/ioformats.html for alternate\n Funcing methods.\n\n\n Returns\n -------\n y : np.ndarray [shape=(n,) or (2, n)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n\n Examples\n --------\n >>> # Load an ogg vorbis file\n >>> filename = librosa.util.example_audio_file()\n >>> y, sr = librosa.Func(filename)\n >>> y\n array([ -4.756e-06, -6.020e-06, ..., -1.040e-06, 0.000e+00], dtype=float32)\n >>> sr\n 22050\n\n >>> # Load a file and resample to 11 KHz\n >>> filename = librosa.util.example_audio_file()\n >>> y, sr = librosa.Func(filename, sr=11025)\n >>> y\n array([ -2.077e-06, -2.928e-06, ..., -4.395e-06, 0.000e+00], dtype=float32)\n >>> sr\n 11025\n\n >>> # Load 5 seconds of a file, starting 15 seconds in\n >>> filename = librosa.util.example_audio_file()\n >>> y, sr = librosa.Func(filename, offset=15.0, duration=5.0)\n >>> y\n array([ 0.069, 0.1 , ..., -0.101, 0. ], dtype=float32)\n >>> sr\n 22050\n\n \"\"\"\n\n try:\n with sf.SoundFile(arg_0) as sf_desc:\n arg_9 = sf_desc.samplerate\n if arg_3:\n # Seek to the start of the target read\n sf_desc.seek(int(arg_3 * arg_9))\n if arg_4 is not None:\n arg_10 = int(arg_4 * arg_9)\n else:\n arg_10 = -1\n\n # Load the target number of frames, and transpose to match librosa form\n arg_11 = sf_desc.read(frames=arg_10, arg_5=arg_5, always_2d=False).T\n\n except RuntimeError as exc:\n # If soundfile failed, fall back to the audioread Funcer\n arg_11, arg_9 = __audioread_Func(arg_0, arg_3, arg_4, arg_5)\n\n # Final cleanup for dtype and contiguity\n if arg_2:\n arg_11 = to_mono(arg_11)\n\n if arg_1 is not None:\n arg_11 = resample(arg_11, arg_9, arg_1, arg_8=arg_8)\n\n else:\n arg_1 = arg_9\n\n return arg_11, arg_1"} +{"_id": "doc_2144", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='kaiser_best', arg_4=True, arg_5=False, **arg_6):\n \"\"\"Resample a time series from orig_sr to target_sr\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,) or shape=(2, n)]\n audio time series. Can be mono or stereo.\n\n orig_sr : number > 0 [scalar]\n original sampling rate of `y`\n\n target_sr : number > 0 [scalar]\n target sampling rate\n\n res_type : str\n Func type (see note)\n\n .. note::\n By default, this uses `resampy`'s high-quality mode ('kaiser_best').\n\n To use a faster method, set `res_type='kaiser_fast'`.\n\n To use `scipy.signal.Func`, set `res_type='fft'` or `res_type='scipy'`.\n\n To use `scipy.signal.Func_poly`, set `res_type='polyphase'`.\n\n .. note::\n When using `res_type='polyphase'`, only integer sampling rates are\n supported.\n\n fix : bool\n adjust the length of the Funcd signal to be of size exactly\n `ceil(target_sr * len(y) / orig_sr)`\n\n scale : bool\n Scale the Funcd signal so that `y` and `y_hat` have approximately\n equal total energy.\n\n kwargs : additional keyword arguments\n If `fix==True`, additional keyword arguments to pass to\n `librosa.util.fix_length`.\n\n Returns\n -------\n y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]\n `y` Funcd from `orig_sr` to `target_sr`\n\n Raises\n ------\n ParameterError\n If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both\n integer-valued.\n\n See Also\n --------\n librosa.util.fix_length\n scipy.signal.Func\n resampy.Func\n\n Notes\n -----\n This function caches at level 20.\n\n Examples\n --------\n Downsample from 22 KHz to 8 KHz\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)\n >>> y_8k = librosa.Func(y, sr, 8000)\n >>> y.shape, y_8k.shape\n ((1355168,), (491671,))\n \"\"\"\n\n # First, validate the audio buffer\n util.valid_audio(arg_0, mono=False)\n\n if arg_1 == arg_2:\n return arg_0\n\n arg_7 = float(arg_2) / arg_1\n\n arg_8 = int(np.ceil(arg_0.shape[-1] * arg_7))\n\n if arg_3 in ('scipy', 'fft'):\n arg_9 = scipy.signal.Func(arg_0, arg_8, axis=-1)\n elif arg_3 == 'polyphase':\n if int(arg_1) != arg_1 or int(arg_2) != arg_2:\n raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.')\n\n # For polyphase resampling, we need up- and down-sampling ratios\n # We can get those from the greatest common divisor of the rates\n # as long as the rates are integrable\n arg_1 = int(arg_1)\n arg_2 = int(arg_2)\n arg_10 = np.gcd(arg_1, arg_2)\n arg_9 = scipy.signal.Func_poly(arg_0, arg_2 // arg_10, arg_1 // arg_10, axis=-1)\n else:\n arg_9 = resampy.Func(arg_0, arg_1, arg_2, filter=arg_3, axis=-1)\n\n if arg_4:\n arg_9 = util.fix_length(arg_9, arg_8, **arg_6)\n\n if arg_5:\n arg_9 /= np.sqrt(arg_7)\n\n return np.ascontiguousarray(arg_9, dtype=arg_0.dtype)"} +{"_id": "doc_2145", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=22050, arg_3=512,\n arg_4=1000.0, arg_5=0.1, arg_6=None, arg_7=None):\n \"\"\"Returns a signal with the signal `click` placed at each specified time\n\n Parameters\n ----------\n times : np.ndarray or None\n times to place Func, in seconds\n\n frames : np.ndarray or None\n frame indices to place Func\n\n sr : number > 0\n desired sampling rate of the output signal\n\n hop_length : int > 0\n if positions are specified by `frames`, the number of samples between frames.\n\n click_freq : float > 0\n frequency (in Hz) of the default click signal. Default is 1KHz.\n\n click_duration : float > 0\n duration (in seconds) of the default click signal. Default is 100ms.\n\n click : np.ndarray or None\n optional click signal sample to use instead of the default blip.\n\n length : int > 0\n desired number of samples in the output signal\n\n\n Returns\n -------\n click_signal : np.ndarray\n Synthesized click signal\n\n\n Raises\n ------\n ParameterError\n - If neither `times` nor `frames` are provided.\n - If any of `click_freq`, `click_duration`, or `length` are out of range.\n\n\n Examples\n --------\n >>> # Sonify detected beat events\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)\n >>> y_beats = librosa.Func(frames=beats, sr=sr)\n\n >>> # Or generate a signal of the same length as y\n >>> y_beats = librosa.Func(frames=beats, sr=sr, length=len(y))\n\n >>> # Or use timing instead of frame indices\n >>> times = librosa.frames_to_time(beats, sr=sr)\n >>> y_beat_times = librosa.Func(times=times, sr=sr)\n\n >>> # Or with a click frequency of 880Hz and a 500ms sample\n >>> y_beat_times880 = librosa.Func(times=times, sr=sr,\n ... click_freq=880, click_duration=0.5)\n\n Display click waveform next to the spectrogram\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> S = librosa.feature.melspectrogram(y=y, sr=sr)\n >>> ax = plt.subplot(2,1,2)\n >>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),\n ... x_axis='time', y_axis='mel')\n >>> plt.subplot(2,1,1, sharex=ax)\n >>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat Func')\n >>> plt.legend()\n >>> plt.xlim(15, 30)\n >>> plt.tight_layout()\n \"\"\"\n\n # Compute sample positions from time or frames\n if arg_0 is None:\n if arg_1 is None:\n raise ParameterError('either \"times\" or \"frames\" must be provided')\n\n arg_8 = frames_to_samples(arg_1, arg_3=arg_3)\n else:\n # Convert times to positions\n arg_8 = time_to_samples(arg_0, arg_2=arg_2)\n\n if arg_6 is not None:\n # Check that we have a well-formed audio buffer\n util.valid_audio(arg_6, mono=True)\n\n else:\n # Create default click signal\n if arg_5 <= 0:\n raise ParameterError('click_duration must be strictly positive')\n\n if arg_4 <= 0:\n raise ParameterError('click_freq must be strictly positive')\n\n arg_9 = 2 * np.pi * arg_4 / float(arg_2)\n\n arg_6 = np.logspace(0, -10,\n num=int(np.round(arg_2 * arg_5)),\n base=2.0)\n\n arg_6 *= np.sin(arg_9 * np.arange(len(arg_6)))\n\n # Set default length\n if arg_7 is None:\n arg_7 = arg_8.max() + arg_6.shape[0]\n else:\n if arg_7 < 1:\n raise ParameterError('length must be a positive integer')\n\n # Filter out any positions past the length boundary\n arg_8 = arg_8[arg_8 < arg_7]\n\n # Pre-allocate click signal\n arg_10 = np.zeros(arg_7, dtype=np.float32)\n\n # Place Func\n for arg_11 in arg_8:\n # Compute the end-point of this click\n arg_12 = arg_11 + arg_6.shape[0]\n\n if arg_12 >= arg_7:\n arg_10[arg_11:] += arg_6[:arg_7 - arg_11]\n else:\n # Normally, just add a click here\n arg_10[arg_11:arg_12] += arg_6\n\n return arg_10"} +{"_id": "doc_2146", "title": "", "text": "def Func(arg_0, arg_1=22050, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"Returns a pure Func signal. The signal generated is a cosine wave.\n\n Parameters\n ----------\n frequency : float > 0\n frequency\n\n sr : number > 0\n desired sampling rate of the output signal\n\n length : int > 0\n desired number of samples in the output signal. When both `duration` and `length` are defined,\n `length` would take priority.\n\n duration : float > 0\n desired duration in seconds. When both `duration` and `length` are defined, `length` would take priority.\n\n phi : float or None\n phase offset, in radians. If unspecified, defaults to `-np.pi * 0.5`.\n\n\n Returns\n -------\n Func_signal : np.ndarray [shape=(length,), dtype=float64]\n Synthesized pure sine Func signal\n\n\n Raises\n ------\n ParameterError\n - If `frequency` is not provided.\n - If neither `length` nor `duration` are provided.\n\n\n Examples\n --------\n >>> # Generate a pure sine Func A4\n >>> Func = librosa.Func(440, duration=1)\n\n >>> # Or generate the same signal using `length`\n >>> Func = librosa.Func(440, sr=22050, length=22050)\n\n Display spectrogram\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> S = librosa.feature.melspectrogram(y=Func)\n >>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),\n ... x_axis='time', y_axis='mel')\n \"\"\"\n\n if arg_0 is None:\n raise ParameterError('\"frequency\" must be provided')\n\n # Compute signal length\n if arg_2 is None:\n if arg_3 is None:\n raise ParameterError('either \"length\" or \"duration\" must be provided')\n arg_2 = arg_3 * arg_1\n\n if arg_4 is None:\n arg_4 = -np.pi * 0.5\n\n arg_5 = 1.0 / arg_1\n return np.cos(2 * np.pi * arg_0 * (np.arange(arg_5 * arg_2, arg_5=arg_5)) + arg_4)"} +{"_id": "doc_2147", "title": "", "text": "def Func(arg_0, arg_1, arg_2=22050, arg_3=None, arg_4=None, arg_5=False, arg_6=None):\n \"\"\"Returns a Func signal that goes from frequency `fmin` to frequency `fmax`\n\n Parameters\n ----------\n fmin : float > 0\n initial frequency\n\n fmax : float > 0\n final frequency\n\n sr : number > 0\n desired sampling rate of the output signal\n\n length : int > 0\n desired number of samples in the output signal.\n When both `duration` and `length` are defined, `length` would take priority.\n\n duration : float > 0\n desired duration in seconds.\n When both `duration` and `length` are defined, `length` would take priority.\n\n linear : boolean\n - If `True`, use a linear sweep, i.e., frequency changes linearly with time\n - If `False`, use a exponential sweep.\n Default is `False`.\n\n phi : float or None\n phase offset, in radians.\n If unspecified, defaults to `-np.pi * 0.5`.\n\n\n Returns\n -------\n Func_signal : np.ndarray [shape=(length,), dtype=float64]\n Synthesized Func signal\n\n\n Raises\n ------\n ParameterError\n - If either `fmin` or `fmax` are not provided.\n - If neither `length` nor `duration` are provided.\n\n\n See Also\n --------\n scipy.signal.Func\n\n\n Examples\n --------\n >>> # Generate a exponential Func from A4 to A5\n >>> exponential_Func = librosa.Func(440, 880, duration=1)\n\n >>> # Or generate the same signal using `length`\n >>> exponential_Func = librosa.Func(440, 880, sr=22050, length=22050)\n\n >>> # Or generate a linear Func instead\n >>> linear_Func = librosa.Func(440, 880, duration=1, linear=True)\n\n Display spectrogram for both exponential and linear Funcs\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> S_exponential = librosa.feature.melspectrogram(y=exponential_Func)\n >>> ax = plt.subplot(2,1,1)\n >>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),\n ... x_axis='time', y_axis='mel')\n >>> plt.subplot(2,1,2, sharex=ax)\n >>> S_linear = librosa.feature.melspectrogram(y=linear_Func)\n >>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),\n ... x_axis='time', y_axis='mel')\n >>> plt.tight_layout()\n \"\"\"\n\n if arg_0 is None or arg_1 is None:\n raise ParameterError('both \"fmin\" and \"fmax\" must be provided')\n\n # Compute signal duration\n arg_7 = 1.0 / arg_2\n if arg_3 is None:\n if arg_4 is None:\n raise ParameterError('either \"length\" or \"duration\" must be provided')\n else:\n arg_4 = arg_7 * arg_3\n\n if arg_6 is None:\n arg_6 = -np.pi * 0.5\n\n arg_8 = 'linear' if arg_5 else 'logarithmic'\n return scipy.signal.Func(\n np.arange(arg_4, step=arg_7),\n arg_0,\n arg_4,\n arg_1,\n arg_8=arg_8,\n arg_6=arg_6 / np.pi * 180, # scipy.signal.Func uses degrees for phase offset\n )"} +{"_id": "doc_2148", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Phase-vocoder time stretch demo function.\n\n :parameters:\n - input_file : str\n path to input audio\n - output_file : str\n path to save output (wav)\n - speed : float > 0\n speed up by this factor\n '''\n\n # 1. Load the wav file, resample\n print('Loading ', arg_0)\n\n arg_3, arg_4 = librosa.load(arg_0)\n\n # 2. Time-stretch through effects module\n print('Playing back at {:3.0f}% speed'.format(arg_2 * 100))\n\n arg_5 = librosa.effects.time_stretch(arg_3, arg_2)\n\n print('Saving stretched audio to: ', arg_1)\n librosa.output.write_wav(arg_1, arg_5, arg_4)"} +{"_id": "doc_2149", "title": "", "text": "def Func(arg_0):\n '''Argparse function to get the program parameters'''\n\n arg_1 = argparse.ArgumentParser(description='Time stretching example')\n\n arg_1.add_argument('input_file',\n action='store',\n help='path to the input file (wav, mp3, etc)')\n\n arg_1.add_argument('output_file',\n action='store',\n help='path to the stretched output (wav)')\n\n arg_1.add_argument('-s', '--speed',\n action='store',\n type=float,\n default=2.0,\n required=False,\n help='speed')\n\n return vars(arg_1.parse_args(arg_0))"} +{"_id": "doc_2150", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''HPSS demo function.\n\n :parameters:\n - input_file : str\n path to input audio\n - output_harmonic : str\n path to save output harmonic (wav)\n - output_percussive : str\n path to save output harmonic (wav)\n '''\n\n # 1. Load the wav file, resample\n print('Loading ', arg_0)\n\n arg_3, arg_4 = librosa.load(arg_0)\n\n # Separate components with the effects module\n print('Separating harmonics and percussives... ')\n arg_5, arg_6 = librosa.effects.hpss(arg_3)\n\n # 5. Save the results\n print('Saving harmonic audio to: ', arg_1)\n librosa.output.write_wav(arg_1, arg_5, arg_4)\n\n print('Saving percussive audio to: ', arg_2)\n librosa.output.write_wav(arg_2, arg_6, arg_4)"} +{"_id": "doc_2151", "title": "", "text": "def Func(arg_0=None, arg_1=22050, arg_2=None, arg_3=512,\n arg_4=120.0, arg_5=100, arg_6=True, arg_7=None,\n arg_8='frames'):\n r'''Dynamic programming beat tracker.\n\n Beats are detected in three stages, following the method of [1]_:\n 1. Measure onset strength\n 2. Estimate tempo from onset correlation\n 3. Pick peaks in onset strength approximately consistent with estimated\n tempo\n\n .. [1] Ellis, Daniel PW. \"Beat tracking by dynamic programming.\"\n Journal of New Music Research 36.1 (2007): 51-60.\n http://labrosa.ee.columbia.edu/projects/beattrack/\n\n\n Parameters\n ----------\n\n y : np.ndarray [shape=(n,)] or None\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n onset_envelope : np.ndarray [shape=(n,)] or None\n (optional) pre-computed onset strength envelope.\n\n hop_length : int > 0 [scalar]\n number of audio samples between successive `onset_envelope` values\n\n start_bpm : float > 0 [scalar]\n initial guess for the tempo estimator (in beats per minute)\n\n tightness : float [scalar]\n tightness of beat distribution around tempo\n\n trim : bool [scalar]\n trim leading/trailing beats with weak onsets\n\n bpm : float [scalar]\n (optional) If provided, use `bpm` as the tempo instead of\n estimating it from `onsets`.\n\n units : {'frames', 'samples', 'time'}\n The units to encode detected beat events in.\n By default, 'frames' are used.\n\n\n Returns\n -------\n\n tempo : float [scalar, non-negative]\n estimated global tempo (in beats per minute)\n\n beats : np.ndarray [shape=(m,)]\n estimated beat event locations in the specified units\n (default is frame indices)\n\n .. note::\n If no onset strength could be detected, Funcer estimates 0 BPM\n and returns an empty list.\n\n\n Raises\n ------\n ParameterError\n if neither `y` nor `onset_envelope` are provided\n\n or if `units` is not one of 'frames', 'samples', or 'time'\n\n See Also\n --------\n librosa.onset.onset_strength\n\n\n Examples\n --------\n Track beats using time series input\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n\n >>> tempo, beats = librosa.beat.Func(y=y, sr=sr)\n >>> tempo\n 64.599609375\n\n\n Print the first 20 beat frames\n\n >>> beats[:20]\n array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,\n 698, 737, 777, 817, 857, 896, 936, 976, 1016,\n 1055, 1095])\n\n\n Or print them as timestamps\n\n >>> librosa.frames_to_time(beats[:20], sr=sr)\n array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,\n 13.212, 14.141, 15.279, 16.208, 17.113, 18.042,\n 18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,\n 24.497, 25.426])\n\n\n Track beats using a pre-computed onset envelope\n\n >>> onset_env = librosa.onset.onset_strength(y, sr=sr,\n ... aggregate=np.median)\n >>> tempo, beats = librosa.beat.Func(onset_envelope=onset_env,\n ... sr=sr)\n >>> tempo\n 64.599609375\n >>> beats[:20]\n array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,\n 698, 737, 777, 817, 857, 896, 936, 976, 1016,\n 1055, 1095])\n\n\n Plot the beat events against the onset strength envelope\n\n >>> import matplotlib.pyplot as plt\n >>> hop_length = 512\n >>> plt.figure(figsize=(8, 4))\n >>> times = librosa.frames_to_time(np.arange(len(onset_env)),\n ... sr=sr, hop_length=hop_length)\n >>> plt.plot(times, librosa.util.normalize(onset_env),\n ... label='Onset strength')\n >>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',\n ... linestyle='--', label='Beats')\n >>> plt.legend(frameon=True, framealpha=0.75)\n >>> # Limit the plot to a 15-second window\n >>> plt.xlim(15, 30)\n >>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())\n >>> plt.tight_layout()\n '''\n\n # First, get the frame->beat strength profile if we don't already have one\n if arg_2 is None:\n if arg_0 is None:\n raise ParameterError('y or onset_envelope must be provided')\n\n arg_2 = onset.onset_strength(arg_0=arg_0,\n arg_1=arg_1,\n arg_3=arg_3,\n aggregate=np.median)\n\n # Do we have any onsets to grab?\n if not arg_2.any():\n return (0, np.array([], dtype=int))\n\n # Estimate BPM if one was not provided\n if arg_7 is None:\n arg_7 = tempo(arg_2=arg_2,\n arg_1=arg_1,\n arg_3=arg_3,\n arg_4=arg_4)[0]\n\n # Then, run the tracker\n arg_9 = __Funcer(arg_2,\n arg_7,\n float(arg_1) / arg_3,\n arg_5,\n arg_6)\n\n if arg_8 == 'frames':\n pass\n elif arg_8 == 'samples':\n arg_9 = core.frames_to_samples(arg_9, arg_3=arg_3)\n elif arg_8 == 'time':\n arg_9 = core.frames_to_time(arg_9, arg_3=arg_3, arg_1=arg_1)\n else:\n raise ParameterError('Invalid unit type: {}'.format(arg_8))\n\n return (arg_7, arg_9)"} +{"_id": "doc_2152", "title": "", "text": "def Func(arg_0, arg_1):\n '''Construct the local score for an onset envlope and given period'''\n\n arg_2 = np.exp(-0.5 * (np.arange(-arg_1, arg_1+1)*32.0/arg_1)**2)\n return scipy.signal.convolve(__normalize_onsets(arg_0),\n arg_2,\n 'same')"} +{"_id": "doc_2153", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the last beat from the cumulative score array\"\"\"\n\n arg_1 = util.localmax(arg_0)\n arg_2 = np.median(arg_0[np.argwhere(arg_1)])\n\n # The last of these is the last beat (since score generally increases)\n return np.argwhere((arg_0 * arg_1 * 2 > arg_2)).max()"} +{"_id": "doc_2154", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=-1):\n '''Convert a recurrence matrix into a lag matrix.\n\n `lag[i, j] == rec[i+j, j]`\n\n Parameters\n ----------\n rec : np.ndarray, or scipy.sparse.spmatrix [shape=(n, n)]\n A (binary) recurrence matrix, as returned by `recurrence_matrix`\n\n pad : bool\n If False, `lag` matrix is square, which is equivalent to\n assuming that the signal repeats itself indefinitely.\n\n If True, `lag` is padded with `n` zeros, which eliminates\n the assumption of repetition.\n\n axis : int\n The axis to keep as the `time` axis.\n The alternate axis will be converted to lag coordinates.\n\n Returns\n -------\n lag : np.ndarray\n The recurrence matrix in (lag, time) (if `axis=1`)\n or (time, lag) (if `axis=0`) coordinates\n\n Raises\n ------\n ParameterError : if `rec` is non-square\n\n See Also\n --------\n recurrence_matrix\n lag_to_recurrence\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> mfccs = librosa.feature.mfcc(y=y, sr=sr)\n >>> recurrence = librosa.segment.recurrence_matrix(mfccs)\n >>> lag_pad = librosa.segment.Func(recurrence, pad=True)\n >>> lag_nopad = librosa.segment.Func(recurrence, pad=False)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(8, 4))\n >>> plt.subplot(1, 2, 1)\n >>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')\n >>> plt.title('Lag (zero-padded)')\n >>> plt.subplot(1, 2, 2)\n >>> librosa.display.specshow(lag_nopad, x_axis='time')\n >>> plt.title('Lag (no padding)')\n >>> plt.tight_layout()\n '''\n\n arg_2 = np.abs(arg_2)\n\n if arg_0.ndim != 2 or arg_0.shape[0] != arg_0.shape[1]:\n raise ParameterError('non-square recurrence matrix shape: '\n '{}'.format(arg_0.shape))\n\n arg_3 = scipy.sparse.issparse(arg_0)\n\n arg_4 = None\n if arg_3:\n arg_4 = 1 - arg_2\n arg_5 = arg_0.format\n if arg_2 == 0:\n arg_0 = arg_0.tocsc()\n elif arg_2 in (-1, 1):\n arg_0 = arg_0.tocsr()\n\n arg_6 = arg_0.shape[arg_2]\n\n if arg_3:\n if arg_1:\n arg_7 = np.asarray([[1, 0]]).swapaxes(arg_2, 0)\n arg_8 = scipy.sparse.kron(arg_7.astype(arg_0.dtype), arg_0, format='lil')\n else:\n arg_8 = scipy.sparse.lil_matrix(arg_0)\n else:\n if arg_1:\n arg_9 = [(0, 0), (0, 0)]\n arg_9[(1-arg_2)] = (0, arg_6)\n arg_8 = np.pad(arg_0, arg_9, mode='constant')\n else:\n arg_8 = arg_0.copy()\n\n arg_10 = [slice(None)] * arg_8.ndim\n\n for arg_11 in range(1, arg_6):\n arg_10[arg_2] = arg_11\n arg_8[arg_12(arg_10)] = util.roll_sparse(arg_8[arg_12(arg_10)], -arg_11, arg_2=arg_4)\n\n if arg_3:\n return arg_8.asformat(arg_5)\n return np.ascontiguousarray(arg_8.T).T"} +{"_id": "doc_2155", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=0):\n '''Filtering in the time-lag domain.\n\n This is primarily useful for adapting image filters to operate on\n `recurrence_to_lag` output.\n\n Using `Func` is equivalent to the following sequence of\n operations:\n\n >>> data_tl = librosa.segment.recurrence_to_lag(data)\n >>> data_filtered_tl = function(data_tl)\n >>> data_filtered = librosa.segment.lag_to_recurrence(data_filtered_tl)\n\n Parameters\n ----------\n function : callable\n The filtering function to wrap, e.g., `scipy.ndimage.median_filter`\n\n pad : bool\n Whether to zero-pad the structure feature matrix\n\n index : int >= 0\n If `function` accepts input data as a positional argument, it should be\n indexed by `index`\n\n\n Returns\n -------\n wrapped_function : callable\n A new filter function which applies in time-lag space rather than\n time-time space.\n\n\n Examples\n --------\n\n Apply a 5-bin median filter to the diagonal of a recurrence matrix\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> rec = librosa.segment.recurrence_matrix(chroma)\n >>> from scipy.ndimage import median_filter\n >>> diagonal_median = librosa.segment.Func(median_filter)\n >>> rec_filtered = diagonal_median(rec, size=(1, 3), mode='mirror')\n\n Or with affinity weights\n\n >>> rec_aff = librosa.segment.recurrence_matrix(chroma, mode='affinity')\n >>> rec_aff_fil = diagonal_median(rec_aff, size=(1, 3), mode='mirror')\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(8,8))\n >>> plt.subplot(2, 2, 1)\n >>> librosa.display.specshow(rec, y_axis='time')\n >>> plt.title('Raw recurrence matrix')\n >>> plt.subplot(2, 2, 2)\n >>> librosa.display.specshow(rec_filtered)\n >>> plt.title('Filtered recurrence matrix')\n >>> plt.subplot(2, 2, 3)\n >>> librosa.display.specshow(rec_aff, x_axis='time', y_axis='time',\n ... cmap='magma_r')\n >>> plt.title('Raw affinity matrix')\n >>> plt.subplot(2, 2, 4)\n >>> librosa.display.specshow(rec_aff_fil, x_axis='time',\n ... cmap='magma_r')\n >>> plt.title('Filtered affinity matrix')\n >>> plt.tight_layout()\n '''\n\n def __my_filter(arg_3, *arg_4, **arg_5):\n '''Decorator to wrap the filter'''\n # Map the input data into time-lag space\n arg_4 = list(arg_4)\n\n arg_4[arg_2] = recurrence_to_lag(arg_4[arg_2], arg_1=arg_1)\n\n # Apply the filtering function\n arg_6 = arg_3(*arg_4, **arg_5)\n\n # Map back into time-time and return\n return lag_to_recurrence(arg_6)\n\n return decorator(__my_filter, arg_0)"} +{"_id": "doc_2156", "title": "", "text": "def Func(arg_0, arg_1, arg_2=4, arg_3=-1):\n '''Sub-divide a segmentation by feature clustering.\n\n Given a set of frame boundaries (`frames`), and a data matrix (`data`),\n each successive interval defined by `frames` is partitioned into\n `n_segments` by constrained agglomerative clustering.\n\n .. note::\n If an interval spans fewer than `n_segments` frames, then each\n frame becomes a sub-segment.\n\n Parameters\n ----------\n data : np.ndarray\n Data matrix to use in clustering\n\n frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]\n Array of beat or segment boundaries, as provided by\n `librosa.beat.beat_track`,\n `librosa.onset.onset_detect`,\n or `agglomerative`.\n\n n_segments : int > 0\n Maximum number of frames to sub-divide each interval.\n\n axis : int\n Axis along which to apply the segmentation.\n By default, the last index (-1) is taken.\n\n Returns\n -------\n boundaries : np.ndarray [shape=(n_subboundaries,)]\n List of sub-divided segment boundaries\n\n See Also\n --------\n agglomerative : Temporal segmentation\n librosa.onset.onset_detect : Onset detection\n librosa.beat.beat_track : Beat tracking\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n Load audio, detect beat frames, and subdivide in twos by CQT\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)\n >>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)\n >>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)\n >>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))\n >>> subseg = librosa.segment.Func(cqt, beats, n_segments=2)\n >>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)\n >>> subseg\n array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,\n 97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,\n 202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,\n 310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,\n 420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,\n 527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> librosa.display.specshow(librosa.amplitude_to_db(cqt,\n ... ref=np.max),\n ... y_axis='cqt_hz', x_axis='time')\n >>> lims = plt.gca().get_ylim()\n >>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,\n ... linewidth=2, label='Beats')\n >>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',\n ... linewidth=1.5, alpha=0.5, label='Sub-beats')\n >>> plt.legend(frameon=True, shadow=True)\n >>> plt.title('CQT + Beat and sub-beat markers')\n >>> plt.tight_layout()\n\n '''\n\n arg_1 = util.fix_frames(arg_1, x_min=0, x_max=arg_0.shape[arg_3], pad=True)\n\n if arg_2 < 1:\n raise ParameterError('n_segments must be a positive integer')\n\n arg_4 = []\n arg_5 = [slice(None)] * arg_0.ndim\n\n for arg_6, arg_7 in zip(arg_1[:-1], arg_1[1:]):\n arg_5[arg_3] = slice(arg_6, arg_7)\n arg_4.extend(arg_6 + agglomerative(arg_0[tuple(arg_5)],\n min(arg_7 - arg_6, arg_2),\n arg_3=arg_3))\n\n return np.ascontiguousarray(arg_4)"} +{"_id": "doc_2157", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=-1):\n \"\"\"Bottom-up temporal segmentation.\n\n Use a temporally-constrained Func clustering routine to partition\n `data` into `k` contiguous segments.\n\n Parameters\n ----------\n data : np.ndarray\n data to cluster\n\n k : int > 0 [scalar]\n number of segments to produce\n\n clusterer : sklearn.cluster.AgglomerativeClustering, optional\n An optional AgglomerativeClustering object.\n If `None`, a constrained Ward object is instantiated.\n\n axis : int\n axis along which to cluster.\n By default, the last axis (-1) is chosen.\n\n Returns\n -------\n boundaries : np.ndarray [shape=(k,)]\n left-boundaries (frame numbers) of detected segments. This\n will always include `0` as the first left-boundary.\n\n See Also\n --------\n sklearn.cluster.AgglomerativeClustering\n\n Examples\n --------\n Cluster by chroma similarity, break into 20 segments\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> bounds = librosa.segment.Func(chroma, 20)\n >>> bound_times = librosa.frames_to_time(bounds, sr=sr)\n >>> bound_times\n array([ 0. , 1.672, 2.322, 2.624, 3.251, 3.506,\n 4.18 , 5.387, 6.014, 6.293, 6.943, 7.198,\n 7.848, 9.033, 9.706, 9.961, 10.635, 10.89 ,\n 11.54 , 12.539])\n\n Plot the segmentation over the chromagram\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')\n >>> plt.vlines(bound_times, 0, chroma.shape[0], color='linen', linestyle='--',\n ... linewidth=2, alpha=0.9, label='Segment boundaries')\n >>> plt.axis('tight')\n >>> plt.legend(frameon=True, shadow=True)\n >>> plt.title('Power spectrogram')\n >>> plt.tight_layout()\n\n \"\"\"\n\n # Make sure we have at least two dimensions\n arg_0 = np.atleast_2d(arg_0)\n\n # Swap data index to position 0\n arg_0 = np.swapaxes(arg_0, arg_3, 0)\n\n # Flatten the features\n arg_4 = arg_0.shape[0]\n arg_0 = arg_0.reshape((arg_4, -1))\n\n if arg_2 is None:\n # Connect the temporal connectivity graph\n arg_5 = sklearn.feature_extraction.image.grid_to_graph(n_x=arg_4,\n n_y=1, n_z=1)\n\n # Instantiate the clustering object\n arg_2 = sklearn.cluster.AgglomerativeClustering(n_clusters=arg_1,\n connectivity=arg_5,\n memory=cache.memory)\n\n # Fit the model\n arg_2.fit(arg_0)\n\n # Find the change points from the labels\n arg_6 = [0]\n arg_6.extend(\n list(1 + np.nonzero(np.diff(arg_2.labels_))[0].astype(int)))\n return np.asarray(arg_6)"} +{"_id": "doc_2158", "title": "", "text": "def Func(arg_0, arg_1, arg_2='hann', arg_3=2.0, arg_4=None, arg_5=7,\n arg_6=False, arg_7=True, **arg_8):\n '''Multi-angle path enhancement for self- and cross-similarity matrices.\n\n This function convolves multiple diagonal smoothing filters with a self-similarity (or\n recurrence) matrix R, and aggregates the result by an element-wise maximum.\n\n Technically, the output is a matrix R_smooth such that\n\n `R_smooth[i, j] = max_theta (R * filter_theta)[i, j]`\n\n where `*` denotes 2-dimensional convolution, and `filter_theta` is a smoothing filter at\n orientation theta.\n\n This is intended to provide coherent temporal smoothing of self-similarity matrices\n when there are changes in tempo.\n\n Smoothing filters are generated at evenly spaced orientations between min_ratio and\n max_ratio.\n\n This function is inspired by the multi-angle path enhancement of [1]_, but differs by\n modeling tempo differences in the space of similarity matrices rather than re-sampling\n the underlying features prior to generating the self-similarity matrix.\n\n .. [1] M\u00fcller, Meinard and Frank Kurth.\n \"Enhancing similarity matrices for music audio analysis.\"\n 2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings.\n Vol. 5. IEEE, 2006.\n\n .. note:: if using recurrence_matrix to construct the input similarity matrix, be sure to include the main\n diagonal by setting `self=True`. Otherwise, the diagonal will be suppressed, and this is likely to\n produce discontinuities which will pollute the smoothing filter response.\n\n Parameters\n ----------\n R : np.ndarray\n The self- or cross-similarity matrix to be smoothed.\n Note: sparse inputs are not supported.\n\n n : int > 0\n The length of the smoothing filter\n\n window : window specification\n The type of smoothing filter to use. See `filters.get_window` for more information\n on window specification formats.\n\n max_ratio : float > 0\n The maximum tempo ratio to support\n\n min_ratio : float > 0\n The minimum tempo ratio to support.\n If not provided, it will default to `1/max_ratio`\n\n n_filters : int >= 1\n The number of different smoothing filters to use, evenly spaced\n between `min_ratio` and `max_ratio`.\n\n If `min_ratio = 1/max_ratio` (the default), using an odd number\n of filters will ensure that the main diagonal (ratio=1) is included.\n\n zero_mean : bool\n By default, the smoothing filters are non-negative and sum to one (i.e. are averaging\n filters).\n\n If `zero_mean=True`, then the smoothing filters are made to sum to zero by subtracting\n a constant value from the non-diagonal coordinates of the filter. This is primarily\n useful for suppressing blocks while enhancing diagonals.\n\n clip : bool\n If True, the smoothed similarity matrix will be thresholded at 0, and will not contain\n negative entries.\n\n kwargs : additional keyword arguments\n Additional arguments to pass to `scipy.ndimage.convolve`\n\n\n Returns\n -------\n R_smooth : np.ndarray, shape=R.shape\n The smoothed self- or cross-similarity matrix\n\n See Also\n --------\n filters.diagonal_filter\n recurrence_matrix\n\n\n Examples\n --------\n Use a 51-frame diagonal smoothing filter to enhance paths in a recurrence matrix\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=30)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity', self=True)\n >>> rec_smooth = librosa.segment.Func(rec, 51, window='hann', n_filters=7)\n\n Plot the recurrence matrix before and after smoothing\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(8, 4))\n >>> plt.subplot(1,2,1)\n >>> librosa.display.specshow(rec, x_axis='time', y_axis='time')\n >>> plt.title('Unfiltered recurrence')\n >>> plt.subplot(1,2,2)\n >>> librosa.display.specshow(rec_smooth, x_axis='time', y_axis='time')\n >>> plt.title('Multi-angle enhanced recurrence')\n >>> plt.tight_layout()\n '''\n\n if arg_4 is None:\n arg_4 = 1./arg_3\n elif arg_4 > arg_3:\n raise ParameterError('min_ratio={} cannot exceed max_ratio={}'.format(arg_4, arg_3))\n\n arg_9 = None\n for arg_10 in np.logspace(np.log2(arg_4), np.log2(arg_3), num=arg_5, base=2):\n arg_11 = diagonal_filter(arg_2, arg_1, slope=arg_10, arg_6=arg_6)\n\n if arg_9 is None:\n arg_9 = scipy.ndimage.convolve(arg_0, arg_11, **arg_8)\n else:\n # Compute the point-wise maximum in-place\n np.maximum(arg_9, scipy.ndimage.convolve(arg_0, arg_11, **arg_8),\n out=arg_9)\n\n if arg_7:\n # Clip the output in-place\n np.clip(arg_9, 0, None, out=arg_9)\n\n return arg_9"} +{"_id": "doc_2159", "title": "", "text": "def Func(arg_0, arg_1=2048, arg_2=512):\n '''Slice a time series into overlapping Funcs.\n\n This implementation uses low-level stride manipulation to avoid\n redundant copies of the time series data.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n Time series to Func. Must be one-dimensional and contiguous\n in memory.\n\n Func_length : int > 0 [scalar]\n Length of the Func in samples\n\n hop_length : int > 0 [scalar]\n Number of samples to hop between Funcs\n\n Returns\n -------\n y_Funcs : np.ndarray [shape=(Func_length, N_FRAMES)]\n An array of Funcs sampled from `y`:\n `y_Funcs[i, j] == y[j * hop_length + i]`\n\n Raises\n ------\n ParameterError\n If `y` is not contiguous in memory, not an `np.ndarray`, or\n not one-dimensional. See `np.ascontiguous()` for details.\n\n If `hop_length < 1`, Funcs cannot advance.\n\n If `len(y) < Func_length`.\n\n Examples\n --------\n Extract 2048-sample Funcs from `y` with a hop of 64 samples per Func\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> librosa.util.Func(y, Func_length=2048, hop_length=64)\n array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],\n [ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],\n ...,\n [ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],\n [ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)\n\n '''\n\n if not isinstance(arg_0, np.ndarray):\n raise ParameterError('Input must be of type numpy.ndarray, '\n 'given type(y)={}'.format(type(arg_0)))\n\n if arg_0.ndim != 1:\n raise ParameterError('Input must be one-dimensional, '\n 'given y.ndim={}'.format(arg_0.ndim))\n\n if len(arg_0) < arg_1:\n raise ParameterError('Buffer is too short (n={:d})'\n ' for Func_length={:d}'.format(len(arg_0), arg_1))\n\n if arg_2 < 1:\n raise ParameterError('Invalid hop_length: {:d}'.format(arg_2))\n\n if not arg_0.flags['C_CONTIGUOUS']:\n raise ParameterError('Input buffer must be contiguous.')\n\n # Compute the number of Funcs that will fit. The end may get truncated.\n arg_3 = 1 + int((len(arg_0) - arg_1) / arg_2)\n\n # Vertical stride is one sample\n # Horizontal stride is `hop_length` samples\n arg_4 = as_strided(arg_0, shape=(arg_1, arg_3),\n strides=(arg_0.itemsize, arg_2 * arg_0.itemsize))\n return arg_4"} +{"_id": "doc_2160", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Ensure that an input value is integer-typed.\n This is primarily useful for ensuring integrable-valued\n array indices.\n\n Parameters\n ----------\n x : number\n A scalar value to be cast to int\n\n cast : function [optional]\n A function to modify `x` before casting.\n Default: `np.floor`\n\n Returns\n -------\n x_int : int\n `x_int = int(cast(x))`\n\n Raises\n ------\n ParameterError\n If `cast` is provided and is not callable.\n '''\n\n if arg_1 is None:\n arg_1 = np.floor\n\n if not six.callable(arg_1):\n raise ParameterError('cast parameter must be callable')\n\n return int(arg_1(arg_0))"} +{"_id": "doc_2161", "title": "", "text": "def Func(arg_0, arg_1=arg_2.inf, arg_4=0, arg_5=None, arg_6=None):\n '''Normalize an array along a chosen axis.\n\n Given a norm (described below) and a target axis, the input\n array is scaled so that\n\n `norm(S, axis=axis) == 1`\n\n For example, `axis=0` Funcs each column of a 2-d array\n by aggregating over the rows (0-axis).\n Similarly, `axis=1` Funcs each row of a 2-d array.\n\n This function also supports thresholding small-norm slices:\n any slice (i.e., row or column) with norm below a specified\n `threshold` can be left un-Funcd, set to all-zeros, or\n filled with uniform non-zero values that Func to 1.\n\n Note: the semantics of this function differ from\n `scipy.linalg.norm` in two ways: multi-dimensional arrays\n are supported, but matrix-norms are not.\n\n\n Parameters\n ----------\n S : np.ndarray\n The matrix to Func\n\n norm : {np.inf, -np.inf, 0, float > 0, None}\n - `np.inf` : maximum absolute value\n - `-np.inf` : mininum absolute value\n - `0` : number of non-zeros (the support)\n - float : corresponding l_p norm\n See `scipy.linalg.norm` for details.\n - None : no normalization is performed\n\n axis : int [scalar]\n Axis along which to compute the norm.\n\n threshold : number > 0 [optional]\n Only the columns (or rows) with norm at least `threshold` are\n Funcd.\n\n By default, the threshold is determined from\n the numerical precision of `S.dtype`.\n\n fill : None or bool\n If None, then columns (or rows) with norm below `threshold`\n are left as is.\n\n If False, then columns (rows) with norm below `threshold`\n are set to 0.\n\n If True, then columns (rows) with norm below `threshold`\n are filled uniformly such that the corresponding norm is 1.\n\n .. note:: `fill=True` is incompatible with `norm=0` because\n no uniform vector exists with l0 \"norm\" equal to 1.\n\n Returns\n -------\n S_norm : np.ndarray [shape=S.shape]\n Normalized array\n\n Raises\n ------\n ParameterError\n If `norm` is not among the valid types defined above\n\n If `S` is not finite\n\n If `fill=True` and `norm=0`\n\n See Also\n --------\n scipy.linalg.norm\n\n Notes\n -----\n This function caches at level 40.\n\n Examples\n --------\n >>> # Construct an example matrix\n >>> S = np.vander(np.arange(-2.0, 2.0))\n >>> S\n array([[-8., 4., -2., 1.],\n [-1., 1., -1., 1.],\n [ 0., 0., 0., 1.],\n [ 1., 1., 1., 1.]])\n >>> # Max (l-infinity)-Func the columns\n >>> librosa.util.Func(S)\n array([[-1. , 1. , -1. , 1. ],\n [-0.125, 0.25 , -0.5 , 1. ],\n [ 0. , 0. , 0. , 1. ],\n [ 0.125, 0.25 , 0.5 , 1. ]])\n >>> # Max (l-infinity)-Func the rows\n >>> librosa.util.Func(S, axis=1)\n array([[-1. , 0.5 , -0.25 , 0.125],\n [-1. , 1. , -1. , 1. ],\n [ 0. , 0. , 0. , 1. ],\n [ 1. , 1. , 1. , 1. ]])\n >>> # l1-Func the columns\n >>> librosa.util.Func(S, norm=1)\n array([[-0.8 , 0.667, -0.5 , 0.25 ],\n [-0.1 , 0.167, -0.25 , 0.25 ],\n [ 0. , 0. , 0. , 0.25 ],\n [ 0.1 , 0.167, 0.25 , 0.25 ]])\n >>> # l2-Func the columns\n >>> librosa.util.Func(S, norm=2)\n array([[-0.985, 0.943, -0.816, 0.5 ],\n [-0.123, 0.236, -0.408, 0.5 ],\n [ 0. , 0. , 0. , 0.5 ],\n [ 0.123, 0.236, 0.408, 0.5 ]])\n\n >>> # Thresholding and filling\n >>> S[:, -1] = 1e-308\n >>> S\n array([[ -8.000e+000, 4.000e+000, -2.000e+000,\n 1.000e-308],\n [ -1.000e+000, 1.000e+000, -1.000e+000,\n 1.000e-308],\n [ 0.000e+000, 0.000e+000, 0.000e+000,\n 1.000e-308],\n [ 1.000e+000, 1.000e+000, 1.000e+000,\n 1.000e-308]])\n\n >>> # By default, small-norm columns are left untouched\n >>> librosa.util.Func(S)\n array([[ -1.000e+000, 1.000e+000, -1.000e+000,\n 1.000e-308],\n [ -1.250e-001, 2.500e-001, -5.000e-001,\n 1.000e-308],\n [ 0.000e+000, 0.000e+000, 0.000e+000,\n 1.000e-308],\n [ 1.250e-001, 2.500e-001, 5.000e-001,\n 1.000e-308]])\n >>> # Small-norm columns can be zeroed out\n >>> librosa.util.Func(S, fill=False)\n array([[-1. , 1. , -1. , 0. ],\n [-0.125, 0.25 , -0.5 , 0. ],\n [ 0. , 0. , 0. , 0. ],\n [ 0.125, 0.25 , 0.5 , 0. ]])\n >>> # Or set to constant with unit-norm\n >>> librosa.util.Func(S, fill=True)\n array([[-1. , 1. , -1. , 1. ],\n [-0.125, 0.25 , -0.5 , 1. ],\n [ 0. , 0. , 0. , 1. ],\n [ 0.125, 0.25 , 0.5 , 1. ]])\n >>> # With an l1 norm instead of max-norm\n >>> librosa.util.Func(S, norm=1, fill=True)\n array([[-0.8 , 0.667, -0.5 , 0.25 ],\n [-0.1 , 0.167, -0.25 , 0.25 ],\n [ 0. , 0. , 0. , 0.25 ],\n [ 0.1 , 0.167, 0.25 , 0.25 ]])\n '''\n\n # Avoid div-by-zero\n if arg_5 is None:\n arg_5 = tiny(arg_0)\n\n elif arg_5 <= 0:\n raise ParameterError('threshold={} must be strictly '\n 'positive'.format(arg_5))\n\n if arg_6 not in [None, False, True]:\n raise ParameterError('fill={} must be None or boolean'.format(arg_6))\n\n if not arg_2.all(arg_2.isfinite(arg_0)):\n raise ParameterError('Input must be finite')\n\n # All norms only depend on magnitude, let's do that first\n arg_7 = arg_2.abs(arg_0).astype(arg_2.float)\n\n # For max/min norms, filling with 1 works\n arg_8 = 1\n\n if arg_1 == arg_2.inf:\n arg_9 = arg_2.max(arg_7, arg_4=arg_4, keepdims=True)\n\n elif arg_1 == -arg_2.inf:\n arg_9 = arg_2.min(arg_7, arg_4=arg_4, keepdims=True)\n\n elif arg_1 == 0:\n if arg_6 is True:\n raise ParameterError('Cannot Func with norm=0 and fill=True')\n\n arg_9 = arg_2.sum(arg_7 > 0, arg_4=arg_4, keepdims=True, dtype=arg_7.dtype)\n\n elif arg_2.issubdtype(type(arg_1), arg_2.number) and arg_1 > 0:\n arg_9 = arg_2.sum(arg_7**arg_1, arg_4=arg_4, keepdims=True)**(1./arg_1)\n\n if arg_4 is None:\n arg_8 = arg_7.size**(-1./arg_1)\n else:\n arg_8 = arg_7.shape[arg_4]**(-1./arg_1)\n\n elif arg_1 is None:\n return arg_0\n\n else:\n raise ParameterError('Unsupported norm: {}'.format(repr(arg_1)))\n\n # indices where norm is below the threshold\n arg_10 = arg_9 < arg_5\n\n arg_11 = arg_2.empty_like(arg_0)\n if arg_6 is None:\n # Leave small indices un-Funcd\n arg_9[arg_10] = 1.0\n arg_11[:] = arg_0 / arg_9\n\n elif arg_6:\n # If we have a non-zero fill value, we locate those entries by\n # doing a nan-divide.\n # If S was finite, then length is finite (except for small positions)\n arg_9[arg_10] = arg_2.nan\n arg_11[:] = arg_0 / arg_9\n arg_11[arg_2.isnan(arg_11)] = arg_8\n else:\n # Set small values to zero by doing an inf-divide.\n # This is safe (by IEEE-754) as long as S is finite.\n arg_9[arg_10] = arg_2.inf\n arg_11[:] = arg_0 / arg_9\n\n return arg_11"} +{"_id": "doc_2162", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n '''Uses a flexible heuristic to pick peaks in a signal.\n\n A sample n is selected as an peak if the corresponding x[n]\n fulfills the following three conditions:\n\n 1. `x[n] == max(x[n - pre_max:n + post_max])`\n 2. `x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta`\n 3. `n - previous_n > wait`\n\n where `previous_n` is the last sample picked as a peak (greedily).\n\n This implementation is based on [1]_ and [2]_.\n\n .. [1] Boeck, Sebastian, Florian Krebs, and Markus Schedl.\n \"Evaluating the Online Capabilities of Onset Detection Methods.\" ISMIR.\n 2012.\n\n .. [2] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py\n\n\n Parameters\n ----------\n x : np.ndarray [shape=(n,)]\n input signal to peak picks from\n\n pre_max : int >= 0 [scalar]\n number of samples before `n` over which max is computed\n\n post_max : int >= 1 [scalar]\n number of samples after `n` over which max is computed\n\n pre_avg : int >= 0 [scalar]\n number of samples before `n` over which mean is computed\n\n post_avg : int >= 1 [scalar]\n number of samples after `n` over which mean is computed\n\n delta : float >= 0 [scalar]\n threshold offset for mean\n\n wait : int >= 0 [scalar]\n number of samples to wait after picking a peak\n\n Returns\n -------\n peaks : np.ndarray [shape=(n_peaks,), dtype=int]\n indices of peaks in `x`\n\n Raises\n ------\n ParameterError\n If any input lies outside its defined range\n\n Examples\n --------\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)\n >>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,\n ... hop_length=512,\n ... aggregate=np.median)\n >>> peaks = librosa.util.Func(onset_env, 3, 3, 3, 5, 0.5, 10)\n >>> peaks\n array([ 4, 23, 73, 102, 142, 162, 182, 211, 261, 301, 320,\n 331, 348, 368, 382, 396, 411, 431, 446, 461, 476, 491,\n 510, 525, 536, 555, 570, 590, 609, 625, 639])\n\n >>> import matplotlib.pyplot as plt\n >>> times = librosa.frames_to_time(np.arange(len(onset_env)),\n ... sr=sr, hop_length=512)\n >>> plt.figure()\n >>> ax = plt.subplot(2, 1, 2)\n >>> D = librosa.stft(y)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.subplot(2, 1, 1, sharex=ax)\n >>> plt.plot(times, onset_env, alpha=0.8, label='Onset strength')\n >>> plt.vlines(times[peaks], 0,\n ... onset_env.max(), color='r', alpha=0.8,\n ... label='Selected peaks')\n >>> plt.legend(frameon=True, framealpha=0.8)\n >>> plt.axis('tight')\n >>> plt.tight_layout()\n '''\n\n if arg_1 < 0:\n raise ParameterError('pre_max must be non-negative')\n if arg_3 < 0:\n raise ParameterError('pre_avg must be non-negative')\n if arg_5 < 0:\n raise ParameterError('delta must be non-negative')\n if arg_6 < 0:\n raise ParameterError('wait must be non-negative')\n\n if arg_2 <= 0:\n raise ParameterError('post_max must be positive')\n\n if arg_4 <= 0:\n raise ParameterError('post_avg must be positive')\n\n if arg_0.ndim != 1:\n raise ParameterError('input array must be one-dimensional')\n\n # Ensure valid index types\n arg_1 = valid_int(arg_1, cast=np.ceil)\n arg_2 = valid_int(arg_2, cast=np.ceil)\n arg_3 = valid_int(arg_3, cast=np.ceil)\n arg_4 = valid_int(arg_4, cast=np.ceil)\n arg_6 = valid_int(arg_6, cast=np.ceil)\n\n # Get the maximum of the signal over a sliding window\n arg_7 = arg_1 + arg_2\n arg_8 = np.ceil(0.5 * (arg_1 - arg_2))\n # Using mode='constant' and cval=x.min() effectively truncates\n # the sliding window at the boundaries\n arg_9 = scipy.ndimage.filters.maximum_filter1d(arg_0, int(arg_7),\n mode='constant',\n origin=int(arg_8),\n cval=arg_0.min())\n\n # Get the mean of the signal over a sliding window\n arg_10 = arg_3 + arg_4\n arg_11 = np.ceil(0.5 * (arg_3 - arg_4))\n # Here, there is no mode which results in the behavior we want,\n # so we'll correct below.\n arg_12 = scipy.ndimage.filters.uniform_filter1d(arg_0, int(arg_10),\n mode='nearest',\n origin=int(arg_11))\n\n # Correct sliding average at the beginning\n arg_13 = 0\n # Only need to correct in the range where the window needs to be truncated\n while arg_13 - arg_3 < 0 and arg_13 < arg_0.shape[0]:\n # This just explicitly does mean(x[n - pre_avg:n + post_avg])\n # with truncation\n arg_14 = arg_13 - arg_3\n arg_14 = arg_14 if arg_14 > 0 else 0\n arg_12[arg_13] = np.mean(arg_0[arg_14:arg_13 + arg_4])\n arg_13 += 1\n # Correct sliding average at the end\n arg_13 = arg_0.shape[0] - arg_4\n # When post_avg > x.shape[0] (weird case), reset to 0\n arg_13 = arg_13 if arg_13 > 0 else 0\n while arg_13 < arg_0.shape[0]:\n arg_14 = arg_13 - arg_3\n arg_14 = arg_14 if arg_14 > 0 else 0\n arg_12[arg_13] = np.mean(arg_0[arg_14:arg_13 + arg_4])\n arg_13 += 1\n\n # First mask out all entries not equal to the local max\n arg_15 = arg_0 * (arg_0 == arg_9)\n\n # Then mask out all entries less than the thresholded average\n arg_15 = arg_15 * (arg_15 >= (arg_12 + arg_5))\n\n # Initialize peaks array, to be filled greedily\n arg_16 = []\n\n # Remove onsets which are close together in time\n arg_17 = -np.inf\n\n for arg_18 in np.nonzero(arg_15)[0]:\n # Only report an onset if the \"wait\" samples was reported\n if arg_18 > arg_17 + arg_6:\n arg_16.append(arg_18)\n # Save last reported onset\n arg_17 = arg_18\n\n return np.array(arg_16)"} +{"_id": "doc_2163", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=arg_3.float32):\n \"\"\"Convert an integer buffer to floating point values.\n This is primarily useful when loading integer-valued wav data\n into numpy arrays.\n\n See Also\n --------\n Func\n\n Parameters\n ----------\n x : np.ndarray [dtype=int]\n The integer-valued data buffer\n\n n_bytes : int [1, 2, 4]\n The number of bytes per sample in `x`\n\n dtype : numeric type\n The target output type (default: 32-bit float)\n\n Returns\n -------\n x_float : np.ndarray [dtype=float]\n The input data buffer cast to floating point\n \"\"\"\n\n # Invert the scale of the data\n arg_5 = 1./float(1 << ((8 * arg_1) - 1))\n\n # Construct the format string\n arg_6 = '>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)\n >>> C = np.abs(librosa.cqt(y=y, sr=sr))\n >>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])\n\n By default, use mean aggregation\n\n >>> C_avg = librosa.util.Func(C, beats)\n\n Use median-aggregation instead of mean\n\n >>> C_med = librosa.util.Func(C, beats,\n ... aggregate=np.median)\n\n Or sub-beat Funchronization\n\n >>> sub_beats = librosa.segment.subsegment(C, beats)\n >>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])\n >>> C_med_sub = librosa.util.Func(C, sub_beats, aggregate=np.median)\n\n\n Plot the results\n\n >>> import matplotlib.pyplot as plt\n >>> beat_t = librosa.frames_to_time(beats, sr=sr)\n >>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)\n >>> plt.figure()\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(C,\n ... ref=np.max),\n ... x_axis='time')\n >>> plt.title('CQT power, shape={}'.format(C.shape))\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(C_med,\n ... ref=np.max),\n ... x_coords=beat_t, x_axis='time')\n >>> plt.title('Beat Funchronous CQT power, '\n ... 'shape={}'.format(C_med.shape))\n >>> plt.subplot(3, 1, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,\n ... ref=np.max),\n ... x_coords=subbeat_t, x_axis='time')\n >>> plt.title('Sub-beat Funchronous CQT power, '\n ... 'shape={}'.format(C_med_sub.shape))\n >>> plt.tight_layout()\n\n \"\"\"\n\n if arg_2 is None:\n arg_2 = np.mean\n\n arg_5 = list(arg_0.shape)\n\n if np.all([isinstance(arg_6, slice) for arg_6 in arg_1]):\n arg_7 = arg_1\n elif np.all([np.issubdtype(type(arg_6), np.integer) for arg_6 in arg_1]):\n arg_7 = index_to_slice(np.asarray(arg_1), 0, arg_5[arg_4], arg_3=arg_3)\n else:\n raise ParameterError('Invalid index set: {}'.format(arg_1))\n\n arg_8 = list(arg_5)\n arg_8[arg_4] = len(arg_7)\n\n arg_9 = np.empty(arg_8, order='F' if np.isfortran(arg_0) else 'C', dtype=arg_0.dtype)\n\n arg_10 = [slice(None)] * arg_0.ndim\n arg_11 = [slice(None)] * arg_9.ndim\n\n for (arg_12, arg_13) in enumerate(arg_7):\n arg_10[arg_4] = arg_13\n arg_11[arg_4] = arg_12\n arg_9[arg_14(arg_11)] = arg_2(arg_0[arg_14(arg_10)], arg_4=arg_4)\n\n return arg_9"} +{"_id": "doc_2165", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=False):\n '''Robustly compute a Func operation.\n\n `M = X**power / (X**power + X_ref**power)`\n\n\n Parameters\n ----------\n X : np.ndarray\n The (non-negative) input array corresponding to the positive mask elements\n\n X_ref : np.ndarray\n The (non-negative) array of reference or background elements.\n Must have the same shape as `X`.\n\n power : number > 0 or np.inf\n If finite, returns the soft mask computed in a numerically stable way\n\n If infinite, returns a hard (binary) mask equivalent to `X > X_ref`.\n Note: for hard masks, ties are always broken in favor of `X_ref` (`mask=0`).\n\n\n split_zeros : bool\n If `True`, entries where `X` and X`_ref` are both small (close to 0)\n will receive mask values of 0.5.\n\n Otherwise, the mask is set to 0 for these entries.\n\n\n Returns\n -------\n mask : np.ndarray, shape=`X.shape`\n The output mask array\n\n Raises\n ------\n ParameterError\n If `X` and `X_ref` have different shapes.\n\n If `X` or `X_ref` are negative anywhere\n\n If `power <= 0`\n\n Examples\n --------\n\n >>> X = 2 * np.ones((3, 3))\n >>> X_ref = np.vander(np.arange(3.0))\n >>> X\n array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])\n >>> X_ref\n array([[ 0., 0., 1.],\n [ 1., 1., 1.],\n [ 4., 2., 1.]])\n >>> librosa.util.Func(X, X_ref, power=1)\n array([[ 1. , 1. , 0.667],\n [ 0.667, 0.667, 0.667],\n [ 0.333, 0.5 , 0.667]])\n >>> librosa.util.Func(X_ref, X, power=1)\n array([[ 0. , 0. , 0.333],\n [ 0.333, 0.333, 0.333],\n [ 0.667, 0.5 , 0.333]])\n >>> librosa.util.Func(X, X_ref, power=2)\n array([[ 1. , 1. , 0.8],\n [ 0.8, 0.8, 0.8],\n [ 0.2, 0.5, 0.8]])\n >>> librosa.util.Func(X, X_ref, power=4)\n array([[ 1. , 1. , 0.941],\n [ 0.941, 0.941, 0.941],\n [ 0.059, 0.5 , 0.941]])\n >>> librosa.util.Func(X, X_ref, power=100)\n array([[ 1.000e+00, 1.000e+00, 1.000e+00],\n [ 1.000e+00, 1.000e+00, 1.000e+00],\n [ 7.889e-31, 5.000e-01, 1.000e+00]])\n >>> librosa.util.Func(X, X_ref, power=np.inf)\n array([[ True, True, True],\n [ True, True, True],\n [False, False, True]], dtype=bool)\n '''\n if arg_0.shape != arg_1.shape:\n raise ParameterError('Shape mismatch: {}!={}'.format(arg_0.shape,\n arg_1.shape))\n\n if np.any(arg_0 < 0) or np.any(arg_1 < 0):\n raise ParameterError('X and X_ref must be non-negative')\n\n if arg_2 <= 0:\n raise ParameterError('power must be strictly positive')\n\n # We're working with ints, cast to float.\n arg_4 = arg_0.dtype\n if not np.issubdtype(arg_4, np.floating):\n arg_4 = np.float32\n\n # Re-scale the input arrays relative to the larger value\n arg_5 = np.maximum(arg_0, arg_1).astype(arg_4)\n arg_6 = (arg_5 < np.finfo(arg_4).tiny)\n arg_5[arg_6] = 1\n\n # For finite power, compute the Func\n if np.isfinite(arg_2):\n arg_7 = (arg_0 / arg_5)**arg_2\n arg_8 = (arg_1 / arg_5)**arg_2\n arg_9 = ~arg_6\n arg_7[arg_9] /= arg_7[arg_9] + arg_8[arg_9]\n # Wherever energy is below energy in both inputs, split the mask\n if arg_3:\n arg_7[arg_6] = 0.5\n else:\n arg_7[arg_6] = 0.0\n else:\n # Otherwise, compute the hard mask\n arg_7 = arg_0 > arg_1\n\n return arg_7"} +{"_id": "doc_2166", "title": "", "text": "def Func(arg_0):\n '''Compute the Func-value corresponding to an input's data type.\n\n This is the smallest \"usable\" number representable in `x`'s\n data type (e.g., float32).\n\n This is primarily useful for determining a threshold for\n numerical underflow in division or multiplication operations.\n\n Parameters\n ----------\n x : number or np.ndarray\n The array to compute the Func-value for.\n All that matters here is `x.dtype`.\n\n Returns\n -------\n Func_value : float\n The smallest positive usable number for the type of `x`.\n If `x` is integer-typed, then the Func value for `np.float32`\n is returned instead.\n\n See Also\n --------\n numpy.finfo\n\n Examples\n --------\n\n For a standard double-precision floating point number:\n\n >>> librosa.util.Func(1.0)\n 2.2250738585072014e-308\n\n Or explicitly as double-precision\n\n >>> librosa.util.Func(np.asarray(1e-5, dtype=np.float64))\n 2.2250738585072014e-308\n\n Or complex numbers\n\n >>> librosa.util.Func(1j)\n 2.2250738585072014e-308\n\n Single-precision floating point:\n\n >>> librosa.util.Func(np.asarray(1e-5, dtype=np.float32))\n 1.1754944e-38\n\n Integer\n\n >>> librosa.util.Func(5)\n 1.1754944e-38\n '''\n\n # Make sure we have an array view\n arg_0 = np.asarray(arg_0)\n\n # Only floating types generate a Func\n if np.issubdtype(arg_0.dtype, np.floating) or np.issubdtype(arg_0.dtype, np.complexfloating):\n arg_1 = arg_0.dtype\n else:\n arg_1 = np.float32\n\n return np.finfo(arg_1).Func"} +{"_id": "doc_2167", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=30,\n arg_3='XVID',\n arg_4='{:06d}.jpg',\n arg_5=0,\n arg_6=0,\n arg_7=True):\n \"\"\"Read the frame images from a directory and join them as a video\n\n Args:\n frame_dir (str): The directory containing video frames.\n video_file (str): Output filename.\n fps (float): FPS of the output video.\n fourcc (str): Fourcc of the output video, this should be compatible\n with the output file type.\n filename_tmpl (str): Filename template with the index as the variable.\n start (int): Starting frame index.\n end (int): Ending frame index.\n show_progress (bool): Whether to show a progress bar.\n \"\"\"\n if arg_6 == 0:\n arg_8 = arg_4.split('.')[-1]\n arg_6 = len([name for name in scandir(arg_0, arg_8)])\n arg_9 = osp.join(arg_0, arg_4.format(arg_5))\n check_file_exist(arg_9, 'The start frame not found: ' + arg_9)\n arg_10 = cv2.imread(arg_9)\n arg_11, arg_12 = arg_10.shape[:2]\n arg_13 = (arg_12, arg_11)\n arg_14 = cv2.VideoWriter(arg_1, VideoWriter_fourcc(*arg_3), arg_2,\n arg_13)\n\n def write_frame(arg_15):\n arg_16 = osp.join(arg_0, arg_4.format(arg_15))\n arg_10 = cv2.imread(arg_16)\n arg_14.write(arg_10)\n\n if arg_7:\n track_progress(write_frame, range(arg_5, arg_6))\n else:\n for arg_17 in range(arg_5, arg_6):\n arg_16 = osp.join(arg_0, arg_4.format(arg_17))\n arg_10 = cv2.imread(arg_16)\n arg_14.write(arg_10)\n arg_14.release()"} +{"_id": "doc_2168", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get frame by index.\n\n Args:\n frame_id (int): Index of the expected frame, 0-based.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n \"\"\"\n if arg_1 < 0 or arg_1 >= arg_0._frame_cnt:\n raise IndexError(\n '\"frame_id\" must be between 0 and {}'.format(arg_0._frame_cnt -\n 1))\n if arg_1 == arg_0._position:\n return arg_0.read()\n if arg_0._cache:\n arg_2 = arg_0._cache.get(arg_1)\n if arg_2 is not None:\n arg_0._position = arg_1 + 1\n return arg_2\n arg_0._set_real_position(arg_1)\n arg_4, arg_2 = arg_0._vcap.read()\n if arg_4:\n if arg_0._cache:\n arg_0._cache.put(arg_0._position, arg_2)\n arg_0._position += 1\n return arg_2"} +{"_id": "doc_2169", "title": "", "text": "def Func(arg_0, arg_1, arg_2=50, **arg_3):\n \"\"\"Track the progress of tasks execution with a progress bar.\n\n Tasks are done with a simple for-loop.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Returns:\n list: The task results.\n \"\"\"\n if isinstance(arg_1, tuple):\n assert len(arg_1) == 2\n assert isinstance(arg_1[0], collections_abc.Iterable)\n assert isinstance(arg_1[1], int)\n arg_4 = arg_1[1]\n arg_1 = arg_1[0]\n elif isinstance(arg_1, collections_abc.Iterable):\n arg_4 = len(arg_1)\n else:\n raise TypeError(\n '\"tasks\" must be an iterable object or a (iterator, int) tuple')\n arg_5 = ProgressBar(arg_4, arg_2)\n arg_6 = []\n for arg_7 in arg_1:\n arg_6.append(arg_0(arg_7, **arg_3))\n arg_5.update()\n sys.stdout.write('\\n')\n return arg_6"} +{"_id": "doc_2170", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=50,\n arg_6=1,\n arg_7=False,\n arg_8=True):\n \"\"\"Track the progress of parallel task execution with a progress bar.\n\n The built-in :mod:`multiprocessing` module is used for process pools and\n tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n nproc (int): Process (worker) number.\n initializer (None or callable): Refer to :class:`multiprocessing.Pool`\n for details.\n initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for\n details.\n chunksize (int): Refer to :class:`multiprocessing.Pool` for details.\n bar_width (int): Width of progress bar.\n skip_first (bool): Whether to skip the first sample for each worker\n when estimating fps, since the initialization step may takes\n longer.\n keep_order (bool): If True, :func:`Pool.imap` is used, otherwise\n :func:`Pool.imap_unordered` is used.\n\n Returns:\n list: The task results.\n \"\"\"\n if isinstance(arg_1, tuple):\n assert len(arg_1) == 2\n assert isinstance(arg_1[0], collections_abc.Iterable)\n assert isinstance(arg_1[1], int)\n arg_9 = arg_1[1]\n arg_1 = arg_1[0]\n elif isinstance(arg_1, collections_abc.Iterable):\n arg_9 = len(arg_1)\n else:\n raise TypeError(\n '\"tasks\" must be an iterable object or a (iterator, int) tuple')\n arg_10 = init_pool(arg_2, arg_3, arg_4)\n arg_11 = not arg_7\n arg_9 -= arg_2 * arg_6 * int(arg_7)\n arg_12 = ProgressBar(arg_9, arg_5, arg_11)\n arg_13 = []\n if arg_8:\n arg_14 = arg_10.imap(arg_0, arg_1, arg_6)\n else:\n arg_14 = arg_10.imap_unordered(arg_0, arg_1, arg_6)\n for arg_15 in arg_14:\n arg_13.append(arg_15)\n if arg_7:\n if len(arg_13) < arg_2 * arg_6:\n continue\n elif len(arg_13) == arg_2 * arg_6:\n arg_12.start()\n continue\n arg_12.update()\n sys.stdout.write('\\n')\n arg_10.close()\n arg_10.join()\n return arg_13"} +{"_id": "doc_2171", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Clip bboxes to fit the image shape.\n\n Args:\n bboxes (ndarray): Shape (..., 4*k)\n img_shape (tuple): (height, width) of the image.\n\n Returns:\n ndarray: Clipped bboxes.\n \"\"\"\n assert arg_0.shape[-1] % 4 == 0\n arg_2 = np.empty_like(arg_0, dtype=arg_0.dtype)\n arg_2[..., 0::2] = np.maximum(\n np.minimum(arg_0[..., 0::2], arg_1[1] - 1), 0)\n arg_2[..., 1::2] = np.maximum(\n np.minimum(arg_0[..., 1::2], arg_1[0] - 1), 0)\n return arg_2"} +{"_id": "doc_2172", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1.0, arg_3=None):\n \"\"\"Crop image patches.\n\n 3 steps: scale the bboxes -> clip bboxes -> crop and pad.\n\n Args:\n img (ndarray): Image to be cropped.\n bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.\n scale (float, optional): Scale ratio of bboxes, the default value\n 1.0 means no padding.\n pad_fill (number or list): Value to be filled for padding, None for\n no padding.\n\n Returns:\n list or ndarray: The cropped image patches.\n \"\"\"\n arg_4 = 1 if arg_0.ndim == 2 else arg_0.shape[2]\n if arg_3 is not None:\n if isinstance(arg_3, (int, float)):\n arg_3 = [arg_3 for _ in range(arg_4)]\n assert len(arg_3) == arg_4\n\n arg_5 = arg_1[None, ...] if arg_1.ndim == 1 else arg_1\n arg_6 = bbox_scaling(arg_5, arg_2).astype(np.int32)\n arg_7 = bbox_clip(arg_6, arg_0.shape)\n\n arg_8 = []\n for arg_9 in range(arg_7.shape[0]):\n arg_10, arg_11, arg_12, arg_13 = tuple(arg_7[arg_9, :])\n if arg_3 is None:\n arg_14 = arg_0[arg_11:arg_13 + 1, arg_10:arg_12 + 1, ...]\n else:\n arg_15, arg_16, arg_17, arg_18 = tuple(arg_6[arg_9, :])\n if arg_4 == 2:\n arg_19 = (arg_18 - arg_16 + 1, arg_17 - arg_15 + 1)\n else:\n arg_19 = (arg_18 - arg_16 + 1, arg_17 - arg_15 + 1, arg_4)\n arg_14 = np.array(\n arg_3, dtype=arg_0.dtype) * np.ones(\n arg_19, dtype=arg_0.dtype)\n arg_20 = 0 if arg_15 >= 0 else -arg_15\n arg_21 = 0 if arg_16 >= 0 else -arg_16\n arg_22 = arg_12 - arg_10 + 1\n arg_23 = arg_13 - arg_11 + 1\n arg_14[arg_21:arg_21 + arg_23, arg_20:arg_20 +\n arg_22, ...] = arg_0[arg_11:arg_11 + arg_23, arg_10:arg_10 + arg_22, ...]\n arg_8.append(arg_14)\n\n if arg_1.ndim == 1:\n return arg_8[0]\n else:\n return arg_8"} +{"_id": "doc_2173", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Pad an image to a certain shape.\n\n Args:\n img (ndarray): Image to be padded.\n shape (tuple): Expected padding shape.\n pad_val (number or sequence): Values to be filled in padding areas.\n\n Returns:\n ndarray: The padded image.\n \"\"\"\n if not isinstance(arg_2, (int, float)):\n assert len(arg_2) == arg_0.shape[-1]\n if len(arg_1) < len(arg_0.shape):\n arg_1 = arg_1 + (arg_0.shape[-1], )\n assert len(arg_1) == len(arg_0.shape)\n for arg_3 in range(len(arg_1) - 1):\n assert arg_1[arg_3] >= arg_0.shape[arg_3]\n arg_4 = np.empty(arg_1, dtype=arg_0.dtype)\n arg_4[...] = arg_2\n arg_4[:arg_0.shape[0], :arg_0.shape[1], ...] = arg_0\n return arg_4"} +{"_id": "doc_2174", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Pad an image to ensure each edge to be multiple to some number.\n\n Args:\n img (ndarray): Image to be padded.\n divisor (int): Padded image edges will be multiple to divisor.\n pad_val (number or sequence): Same as :func:`impad`.\n\n Returns:\n ndarray: The padded image.\n \"\"\"\n arg_3 = int(np.ceil(arg_0.shape[0] / arg_1)) * arg_1\n arg_4 = int(np.ceil(arg_0.shape[1] / arg_1)) * arg_1\n return impad(arg_0, (arg_3, arg_4), arg_2)"} +{"_id": "doc_2175", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Rescale a size by a ratio.\n\n Args:\n size (tuple): w, h.\n scale (float): Scaling factor.\n\n Returns:\n tuple[int]: scaled size.\n \"\"\"\n arg_2, arg_3 = arg_0\n return int(arg_2 * float(arg_1) + 0.5), int(arg_3 * float(arg_1) + 0.5)"} +{"_id": "doc_2176", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3='bilinear'):\n \"\"\"Resize image to a given size.\n\n Args:\n img (ndarray): The input image.\n size (tuple): Target (w, h).\n return_scale (bool): Whether to return `w_scale` and `h_scale`.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\".\n\n Returns:\n tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n `resized_img`.\n \"\"\"\n arg_4, arg_5 = arg_0.shape[:2]\n arg_6 = cv2.resize(\n arg_0, arg_1, arg_3=interp_codes[arg_3])\n if not arg_2:\n return arg_6\n else:\n arg_7 = arg_1[0] / arg_5\n arg_8 = arg_1[1] / arg_4\n return arg_6, arg_7, arg_8"} +{"_id": "doc_2177", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3='bilinear'):\n \"\"\"Resize image to the same size of a given image.\n\n Args:\n img (ndarray): The input image.\n dst_img (ndarray): The target image.\n return_scale (bool): Whether to return `w_scale` and `h_scale`.\n interpolation (str): Same as :func:`resize`.\n\n Returns:\n tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n `resized_img`.\n \"\"\"\n arg_4, arg_5 = arg_1.shape[:2]\n return imresize(arg_0, (arg_5, arg_4), arg_2, arg_3)"} +{"_id": "doc_2178", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3='bilinear'):\n \"\"\"Resize image while keeping the aspect ratio.\n\n Args:\n img (ndarray): The input image.\n scale (float or tuple[int]): The scaling factor or maximum size.\n If it is a float number, then the image will be rescaled by this\n factor, else if it is a tuple of 2 integers, then the image will\n be rescaled as large as possible within the scale.\n return_scale (bool): Whether to return the scaling factor besides the\n rescaled image.\n interpolation (str): Same as :func:`resize`.\n\n Returns:\n ndarray: The rescaled image.\n \"\"\"\n arg_4, arg_5 = arg_0.shape[:2]\n if isinstance(arg_1, (float, int)):\n if arg_1 <= 0:\n raise ValueError(\n 'Invalid scale {}, must be positive.'.format(arg_1))\n arg_6 = arg_1\n elif isinstance(arg_1, tuple):\n arg_7 = max(arg_1)\n arg_8 = min(arg_1)\n arg_6 = min(arg_7 / max(arg_4, arg_5),\n arg_8 / min(arg_4, arg_5))\n else:\n raise TypeError(\n 'Scale must be a number or tuple of int, but got {}'.format(\n type(arg_1)))\n arg_9 = _scale_size((arg_5, arg_4), arg_6)\n arg_10 = imresize(arg_0, arg_9, arg_3=arg_3)\n if arg_2:\n return arg_10, arg_6\n else:\n return arg_10"} +{"_id": "doc_2179", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Register a handler for some file extensions.\n\n Args:\n handler (:obj:`BaseFileHandler`): Handler to be registered.\n file_formats (str or list[str]): File formats to be handled by this\n handler.\n \"\"\"\n if not isinstance(arg_0, BaseFileHandler):\n raise TypeError(\n 'handler must be a child of BaseFileHandler, not {}'.format(\n type(arg_0)))\n if isinstance(arg_1, str):\n arg_1 = [arg_1]\n if not is_list_of(arg_1, str):\n raise TypeError('file_formats must be a str or a list of str')\n for arg_2 in arg_1:\n arg_3[arg_2] = arg_0"} +{"_id": "doc_2180", "title": "", "text": "def Func(arg_0):\n \"\"\"Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n \"\"\"\n if isinstance(arg_0, int):\n if arg_0 < 0 or arg_0 > 100:\n raise ValueError('priority must be between 0 and 100')\n return arg_0\n elif isinstance(arg_0, Priority):\n return arg_0.value\n elif isinstance(arg_0, str):\n return Priority[arg_0.upper()].value\n else:\n raise TypeError('priority must be an integer or Priority enum value')"} +{"_id": "doc_2181", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='green',\n arg_3=-1,\n arg_4=1,\n arg_5=True,\n arg_6='',\n arg_7=0,\n arg_8=None):\n \"\"\"Draw bboxes on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (list or ndarray): A list of ndarray of shape (k, 4).\n colors (list[str or tuple or Color]): A list of colors.\n top_k (int): Plot the first k bboxes only if set positive.\n thickness (int): Thickness of lines.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str, optional): The filename to write the image.\n \"\"\"\n arg_0 = imread(arg_0)\n\n if isinstance(arg_1, np.ndarray):\n arg_1 = [arg_1]\n if not isinstance(arg_2, list):\n arg_2 = [arg_2 for _ in range(len(arg_1))]\n arg_2 = [color_val(c) for c in arg_2]\n assert len(arg_1) == len(arg_2)\n\n for arg_9, arg_10 in enumerate(arg_1):\n arg_10 = arg_10.astype(np.int32)\n if arg_3 <= 0:\n arg_11 = arg_10.shape[0]\n else:\n arg_11 = min(arg_3, arg_10.shape[0])\n for arg_12 in range(arg_11):\n arg_13 = (arg_10[arg_12, 0], arg_10[arg_12, 1])\n arg_14 = (arg_10[arg_12, 2], arg_10[arg_12, 3])\n cv2.rectangle(\n arg_0, arg_13, arg_14, arg_2[arg_9], arg_4=arg_4)\n\n if arg_5:\n imshow(arg_0, arg_6, arg_7)\n if arg_8 is not None:\n imwrite(arg_0, arg_8)"} +{"_id": "doc_2182", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=0, *arg_3, **arg_4):\n \"\"\"Read an optical flow map.\n\n Args:\n flow_or_path (ndarray or str): A flow map or filepath.\n quantize (bool): whether to read quantized pair, if set to True,\n remaining args will be passed to :func:`dequantize_flow`.\n concat_axis (int): The axis that dx and dy are concatenated,\n can be either 0 or 1. Ignored if quantize is False.\n\n Returns:\n ndarray: Optical flow represented as a (h, w, 2) numpy array\n \"\"\"\n if isinstance(arg_0, np.ndarray):\n if (arg_0.ndim != 3) or (arg_0.shape[-1] != 2):\n raise ValueError('Invalid flow with shape {}'.format(\n arg_0.shape))\n return arg_0\n elif not is_str(arg_0):\n raise TypeError(\n '\"flow_or_path\" must be a filename or numpy array, not {}'.format(\n type(arg_0)))\n\n if not arg_1:\n with open(arg_0, 'rb') as f:\n try:\n arg_5 = f.read(4).decode('utf-8')\n except Exception:\n raise IOError('Invalid flow file: {}'.format(arg_0))\n else:\n if arg_5 != 'PIEH':\n raise IOError(\n 'Invalid flow file: {}, header does not contain PIEH'.\n format(arg_0))\n\n arg_6 = np.fromfile(f, np.int32, 1).squeeze()\n arg_7 = np.fromfile(f, np.int32, 1).squeeze()\n arg_8 = np.fromfile(f, np.float32, arg_6 * arg_7 * 2).reshape((arg_7, arg_6, 2))\n else:\n assert arg_2 in [0, 1]\n arg_9 = imread(arg_0, flag='unchanged')\n if arg_9.ndim != 2:\n raise IOError(\n '{} is not a valid quantized flow file, its dimension is {}.'.\n format(arg_0, arg_9.ndim))\n assert arg_9.shape[arg_2] % 2 == 0\n arg_10, arg_11 = np.split(arg_9, 2, axis=arg_2)\n arg_8 = dequantize_flow(arg_10, arg_11, *arg_3, **arg_4)\n\n return arg_8.astype(np.float32)"} +{"_id": "doc_2183", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.02, arg_3=True):\n \"\"\"Recover from quantized flow.\n\n Args:\n dx (ndarray): Quantized dx.\n dy (ndarray): Quantized dy.\n max_val (float): Maximum value used when quantizing.\n denorm (bool): Whether to multiply flow values with width/height.\n\n Returns:\n ndarray: Dequantized flow.\n \"\"\"\n assert arg_0.shape == arg_1.shape\n assert arg_0.ndim == 2 or (arg_0.ndim == 3 and arg_0.shape[-1] == 1)\n\n arg_0, arg_1 = [dequantize(d, -arg_2, arg_2, 255) for d in [arg_0, arg_1]]\n\n if arg_3:\n arg_0 *= arg_0.shape[1]\n arg_1 *= arg_0.shape[0]\n arg_4 = np.dstack((arg_0, arg_1))\n return arg_4"} +{"_id": "doc_2184", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.Func`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n Args:\n module (Module): Module that receives the state_dict.\n state_dict (OrderedDict): Weights.\n strict (bool): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.\n logger (:obj:`logging.Logger`, optional): Logger to log the error\n message. If not specified, print function will be used.\n \"\"\"\n arg_4 = []\n arg_5 = arg_0.state_dict()\n for arg_6, arg_7 in arg_1.items():\n if arg_6 not in arg_5:\n arg_4.append(arg_6)\n continue\n if isinstance(arg_7, torch.nn.Parameter):\n # backwards compatibility for serialized parameters\n arg_7 = arg_7.data\n\n try:\n arg_5[arg_6].copy_(arg_7)\n except Exception:\n raise RuntimeError('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(arg_6, arg_5[arg_6].size(),\n arg_7.size()))\n arg_8 = set(arg_5.keys()) - set(arg_1.keys())\n\n arg_9 = []\n if arg_4:\n arg_9.append('unexpected key in source state_dict: {}\\n'.format(\n ', '.join(arg_4)))\n if arg_8:\n arg_9.append('missing keys in source state_dict: {}\\n'.format(\n ', '.join(arg_8)))\n arg_9 = '\\n'.join(arg_9)\n if arg_9:\n if arg_2:\n raise RuntimeError(arg_9)\n elif arg_3 is not None:\n arg_3.warn(arg_9)\n else:\n print(arg_9)"} +{"_id": "doc_2185", "title": "", "text": "def Func(arg_0):\n \"\"\"Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n \"\"\"\n arg_1 = OrderedDict()\n for arg_2, arg_3 in arg_0.items():\n arg_1[arg_2] = arg_3.cpu()\n return arg_1"} +{"_id": "doc_2186", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Init the optimizer.\n\n Args:\n optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an\n optimizer object or a dict used for constructing the optimizer.\n\n Returns:\n :obj:`~torch.optim.Optimizer`: An optimizer object.\n\n Examples:\n >>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)\n >>> type(runner.Func(optimizer))\n \n \"\"\"\n if isinstance(arg_1, dict):\n arg_1 = obj_from_dict(\n arg_1, torch.optim, dict(params=arg_0.model.parameters()))\n elif not isinstance(arg_1, torch.optim.Optimizer):\n raise TypeError(\n 'optimizer must be either an Optimizer object or a dict, '\n 'but got {}'.format(type(arg_1)))\n return arg_1"} +{"_id": "doc_2187", "title": "", "text": "def Func(arg_0):\n \"\"\"Get current learning rates.\n\n Returns:\n list: Current learning rate of all param groups.\n \"\"\"\n if arg_0.optimizer is None:\n raise RuntimeError(\n 'lr is not applicable because optimizer does not exist.')\n return [arg_1['lr'] for arg_1 in arg_0.optimizer.param_groups]"} +{"_id": "doc_2188", "title": "", "text": "def Func(arg_0, arg_1, arg_2='NORMAL'):\n \"\"\"Register a hook into the hook list.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n \"\"\"\n assert isinstance(arg_1, Hook)\n if hasattr(arg_1, 'priority'):\n raise ValueError('\"priority\" is a reserved attribute for hooks')\n arg_2 = get_priority(arg_2)\n arg_1.priority = arg_2\n # insert the hook to a sorted list\n arg_3 = False\n for arg_4 in range(len(arg_0._hooks) - 1, -1, -1):\n if arg_2 >= arg_0._hooks[arg_4].priority:\n arg_0._hooks.insert(arg_4 + 1, arg_1)\n arg_3 = True\n break\n if not arg_3:\n arg_0._hooks.insert(0, arg_1)"} +{"_id": "doc_2189", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"Register default hooks for training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n if arg_2 is None:\n arg_2 = {}\n if arg_3 is None:\n arg_3 = {}\n arg_0.register_lr_hooks(arg_1)\n arg_0.register_hook(arg_0.build_hook(arg_2, OptimizerHook))\n arg_0.register_hook(arg_0.build_hook(arg_3, CheckpointHook))\n arg_0.register_hook(IterTimerHook())\n if arg_4 is not None:\n arg_0.register_logger_hooks(arg_4)"} +{"_id": "doc_2190", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3='',\n **arg_4):\n \"\"\"Convert a video with ffmpeg.\n\n This provides a general api to ffmpeg, the executed command is::\n\n `ffmpeg -y -i `\n\n Options(kwargs) are mapped to ffmpeg commands with the following rules:\n\n - key=val: \"-key val\"\n - key=True: \"-key\"\n - key=False: \"\"\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n pre_options (str): Options appears before \"-i \".\n print_cmd (bool): Whether to print the final ffmpeg command.\n \"\"\"\n arg_5 = []\n for arg_6, arg_7 in arg_4.items():\n if isinstance(arg_7, bool):\n if arg_7:\n arg_5.append('-{}'.format(arg_6))\n elif arg_6 == 'log_level':\n assert arg_7 in [\n 'quiet', 'panic', 'fatal', 'error', 'warning', 'info',\n 'verbose', 'debug', 'trace'\n ]\n arg_5.append('-loglevel {}'.format(arg_7))\n else:\n arg_5.append('-{} {}'.format(arg_6, arg_7))\n arg_8 = 'ffmpeg -y {} -i {} {} {}'.format(arg_3, arg_0,\n ' '.join(arg_5), arg_1)\n if arg_2:\n print(arg_8)\n subprocess.call(arg_8, shell=True)"} +{"_id": "doc_2191", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=False,\n arg_5='info',\n arg_6=False,\n **arg_7):\n \"\"\"Resize a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\n ratio (tuple or float): Expected resize ratio, (2, 0.5) means\n (w*2, h*0.5).\n keep_ar (bool): Whether to keep original aspect ratio.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n \"\"\"\n if arg_2 is None and arg_3 is None:\n raise ValueError('expected size or ratio must be specified')\n elif arg_2 is not None and arg_3 is not None:\n raise ValueError('size and ratio cannot be specified at the same time')\n arg_8 = {'log_level': arg_5}\n if arg_2:\n if not arg_4:\n arg_8['vf'] = 'scale={}:{}'.format(arg_2[0], arg_2[1])\n else:\n arg_8['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio'\n '=decrease'.format(arg_2[0], arg_2[1]))\n else:\n if not isinstance(arg_3, tuple):\n arg_3 = (arg_3, arg_3)\n arg_8['vf'] = 'scale=\"trunc(iw*{}):trunc(ih*{})\"'.format(\n arg_3[0], arg_3[1])\n convert_video(arg_0, arg_1, arg_6, **arg_8)"} +{"_id": "doc_2192", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns splited by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will be parsed as dict values.\n\n Args:\n filename(str): Filename.\n key_type(type): Type of the dict's keys. str is user by default and\n type conversion will be performed if specified.\n\n Returns:\n dict: The parsed contents.\n \"\"\"\n arg_3 = {}\n with open(arg_0, 'r') as f:\n for arg_4 in f:\n arg_5 = arg_4.rstrip('\\n').split()\n assert len(arg_5) >= 2\n arg_6 = arg_1(arg_5[0])\n arg_7 = arg_5[1:] if len(arg_5) > 2 else arg_5[1]\n arg_3[arg_6] = arg_7\n return arg_3"} +{"_id": "doc_2193", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(\n arg_0,\n arg_1,\n kernel_size=3,\n padding=arg_2,\n arg_2=arg_2)"} +{"_id": "doc_2194", "title": "", "text": "def Func(arg_0, arg_1='color'):\n \"\"\"Read an image.\n\n Args:\n img_or_path (ndarray or str): Either a numpy array or image path.\n If it is a numpy array (loaded image), then it will be returned\n as is.\n flag (str): Flags specifying the color type of a loaded image,\n candidates are `color`, `grayscale` and `unchanged`.\n\n Returns:\n ndarray: Loaded image array.\n \"\"\"\n if isinstance(arg_0, np.ndarray):\n return arg_0\n elif is_str(arg_0):\n arg_1 = Func_flags[arg_1] if is_str(arg_1) else arg_1\n check_file_exist(arg_0,\n 'img file does not exist: {}'.format(arg_0))\n return cv2.Func(arg_0, arg_1)\n else:\n raise TypeError('\"img\" must be a numpy array or a filename')"} +{"_id": "doc_2195", "title": "", "text": "def Func(arg_0, arg_1='color'):\n \"\"\"Read an image from bytes.\n\n Args:\n content (bytes): Image bytes got from files or other streams.\n flag (str): Same as :func:`imread`.\n\n Returns:\n ndarray: Loaded image array.\n \"\"\"\n arg_2 = np.frombuffer(arg_0, np.uint8)\n arg_1 = imread_flags[arg_1] if is_str(arg_1) else arg_1\n arg_3 = cv2.imdecode(arg_2, arg_1)\n return arg_3"} +{"_id": "doc_2196", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Write image to file\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`Func` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if arg_3:\n arg_4 = osp.abspath(osp.dirname(arg_1))\n mkdir_or_exist(arg_4)\n return cv2.Func(arg_1, arg_0, arg_2)"} +{"_id": "doc_2197", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Convert a BGR image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n \"\"\"\n arg_2 = cv2.cvtColor(arg_0, cv2.COLOR_BGR2GRAY)\n if arg_1:\n arg_2 = arg_2[..., None]\n return arg_2"} +{"_id": "doc_2198", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a grayscale image to BGR image.\n\n Args:\n img (ndarray or str): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n \"\"\"\n arg_0 = arg_0[..., None] if arg_0.ndim == 2 else arg_0\n arg_1 = cv2.cvtColor(arg_0, cv2.COLOR_GRAY2BGR)\n return arg_1"} +{"_id": "doc_2199", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Cast elements of an iterable object into some type.\n\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be\n converted to this type, otherwise an iterator.\n\n Returns:\n iterator or specified type: The converted object.\n \"\"\"\n if not isinstance(arg_0, collections_abc.Iterable):\n raise TypeError('inputs must be an iterable object')\n if not isinstance(arg_1, type):\n raise TypeError('\"dst_type\" must be a valid type')\n\n arg_3 = six.moves.map(arg_1, arg_0)\n\n if arg_2 is None:\n return arg_3\n else:\n return arg_2(arg_3)"} +{"_id": "doc_2200", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Check whether it is a sequence of some type.\n\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n\n Returns:\n bool: Whether the sequence is valid.\n \"\"\"\n if arg_2 is None:\n arg_3 = collections_abc.Sequence\n else:\n assert isinstance(arg_2, type)\n arg_3 = arg_2\n if not isinstance(arg_0, arg_3):\n return False\n for arg_4 in arg_0:\n if not isinstance(arg_4, arg_1):\n return False\n return True"} +{"_id": "doc_2201", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Slice a list into several sub lists by a list of given length.\n\n Args:\n in_list (list): The list to be sliced.\n lens(int or list): The expected length of each out list.\n\n Returns:\n list: A list of sliced list.\n \"\"\"\n if not isinstance(arg_1, list):\n raise TypeError('\"indices\" must be a list of integers')\n elif sum(arg_1) != len(arg_0):\n raise ValueError(\n 'sum of lens and list length does not match: {} != {}'.format(\n sum(arg_1), len(arg_0)))\n arg_2 = []\n arg_3 = 0\n for arg_4 in range(len(arg_1)):\n arg_2.append(arg_0[arg_3:arg_3 + arg_1[arg_4]])\n arg_3 += arg_1[arg_4]\n return arg_2"} +{"_id": "doc_2202", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Average latest n values or all values\"\"\"\n assert arg_1 >= 0\n for arg_2 in arg_0.val_history:\n arg_3 = np.array(arg_0.val_history[arg_2][-arg_1:])\n arg_4 = np.array(arg_0.n_history[arg_2][-arg_1:])\n arg_5 = np.sum(arg_3 * arg_4) / np.sum(arg_4)\n arg_0.output[arg_2] = arg_5\n arg_0.ready = True"} +{"_id": "doc_2203", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Scatters tensor across multiple GPUs.\n \"\"\"\n if arg_2 is None:\n arg_2 = [None] * len(arg_1)\n\n if isinstance(arg_0, list):\n arg_3 = (len(arg_0) - 1) // len(arg_1) + 1\n arg_4 = [\n Func(arg_0[i], [arg_1[i // arg_3]],\n [arg_2[i // arg_3]]) for i in range(len(arg_0))\n ]\n return arg_4\n elif isinstance(arg_0, torch.Tensor):\n arg_5 = arg_0.contiguous()\n # TODO: copy to a pinned buffer first (if copying from CPU)\n arg_6 = arg_2[0] if arg_5.numel() > 0 else None\n with torch.cuda.device(arg_1[0]), torch.cuda.stream(arg_6):\n arg_5 = arg_5.cuda(arg_1[0], non_blocking=True)\n return arg_5\n else:\n raise Exception('Unknown type {}.'.format(type(arg_0)))"} +{"_id": "doc_2204", "title": "", "text": "def Func(arg_0):\n \"\"\"Add check points in a single line.\n\n This method is suitable for running a task on a list of items. A timer will\n be registered when the method is called for the first time.\n\n :Example:\n\n >>> import time\n >>> import mmcv\n >>> for i in range(1, 6):\n >>> # simulate a code block\n >>> time.sleep(i)\n >>> mmcv.Func('task1')\n 2.000\n 3.000\n 4.000\n 5.000\n\n Args:\n timer_id (str): Timer identifier.\n \"\"\"\n if arg_0 not in arg_1:\n arg_1[arg_0] = Timer()\n return 0\n else:\n return arg_1[arg_0].since_last_check()"} +{"_id": "doc_2205", "title": "", "text": "def Func(arg_0):\n \"\"\"Time since the last checking.\n\n Either :func:`since_start` or :func:`Func` is a checking\n operation.\n\n Returns (float): Time in seconds.\n \"\"\"\n if not arg_0._is_running:\n raise TimerError('timer is not running')\n arg_1 = time() - arg_0._t_last\n arg_0._t_last = time()\n return arg_1"} +{"_id": "doc_2206", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=0):\n \"\"\"Show optical flow.\n\n Args:\n flow (ndarray or str): The optical flow to be displayed.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n \"\"\"\n arg_0 = flowread(arg_0)\n arg_3 = flow2rgb(arg_0)\n imshow(rgb2bgr(arg_3), arg_1, arg_2)"} +{"_id": "doc_2207", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1e6):\n \"\"\"Convert flow map to RGB image.\n\n Args:\n flow (ndarray): Array of optical flow.\n color_wheel (ndarray or None): Color wheel used to map flow field to\n RGB colorspace. Default color wheel will be used if not specified.\n unknown_thr (str): Values above this threshold will be marked as\n unknown and thus ignored.\n\n Returns:\n ndarray: RGB image that can be visualized.\n \"\"\"\n assert arg_0.ndim == 3 and arg_0.shape[-1] == 2\n if arg_1 is None:\n arg_1 = make_color_wheel()\n assert arg_1.ndim == 2 and arg_1.shape[1] == 3\n arg_3 = arg_1.shape[0]\n\n arg_4 = arg_0[:, :, 0].copy()\n arg_5 = arg_0[:, :, 1].copy()\n\n arg_6 = (np.isnan(arg_4) | np.isnan(arg_5) | (np.abs(arg_4) > arg_2) |\n (np.abs(arg_5) > arg_2))\n arg_4[arg_6] = 0\n arg_5[arg_6] = 0\n\n arg_7 = np.sqrt(arg_4**2 + arg_5**2)\n if np.any(arg_7 > np.finfo(float).eps):\n arg_8 = np.max(arg_7)\n arg_4 /= arg_8\n arg_5 /= arg_8\n\n [arg_9, arg_10] = arg_4.shape\n\n arg_7 = np.sqrt(arg_4**2 + arg_5**2)\n arg_11 = np.arctan2(-arg_5, -arg_4) / np.pi\n\n arg_12 = (arg_11 + 1) / 2 * (arg_3 - 1)\n arg_13 = np.floor(arg_12).astype(int)\n arg_14 = (arg_13 + 1) % arg_3\n arg_10 = (arg_12 - arg_13.astype(np.float32))[..., None]\n arg_15 = (\n 1 - arg_10) * arg_1[arg_13, :] + arg_10 * arg_1[arg_14, :]\n arg_16 = arg_7 <= 1\n arg_15[arg_16] = 1 - arg_7[arg_16, None] * (1 - arg_15[arg_16])\n arg_15[np.logical_not(arg_16)] *= 0.75\n\n arg_15[arg_6, :] = 0\n\n return arg_15"} +{"_id": "doc_2208", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Build a color wheel.\n\n Args:\n bins(list or tuple, optional): Specify the number of bins for each\n color range, corresponding to six ranges: red -> yellow,\n yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,\n magenta -> red. [15, 6, 4, 11, 13, 6] is used for default\n (see Middlebury).\n\n Returns:\n ndarray: Color wheel of shape (total_bins, 3).\n \"\"\"\n if arg_0 is None:\n arg_0 = [15, 6, 4, 11, 13, 6]\n assert len(arg_0) == 6\n\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6 = tuple(arg_0)\n\n arg_7 = [1, np.arange(arg_1) / arg_1, 0]\n arg_8 = [1 - np.arange(arg_2) / arg_2, 1, 0]\n arg_9 = [0, 1, np.arange(arg_3) / arg_3]\n arg_10 = [0, 1 - np.arange(arg_4) / arg_4, 1]\n arg_11 = [np.arange(arg_5) / arg_5, 0, 1]\n arg_12 = [1, 0, 1 - np.arange(arg_6) / arg_6]\n\n arg_13 = arg_1 + arg_2 + arg_3 + arg_4 + arg_5 + arg_6\n\n arg_14 = np.zeros((3, arg_13), dtype=np.float32)\n\n arg_15 = 0\n for arg_16, arg_17 in enumerate([arg_7, arg_8, arg_9, arg_10, arg_11, arg_12]):\n for arg_18 in range(3):\n arg_14[arg_18, arg_15:arg_15 + arg_0[arg_16]] = arg_17[arg_18]\n arg_15 += arg_0[arg_16]\n\n return arg_14.T"} +{"_id": "doc_2209", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Scatter inputs to target gpus.\n\n The only difference from original :func:`Func` is to add support for\n :type:`~mmcv.parallel.DataContainer`.\n \"\"\"\n\n def arg_6(arg_3):\n if isinstance(arg_3, torch.Tensor):\n return OrigScatter.apply(arg_1, None, arg_2, arg_3)\n if isinstance(arg_3, DataContainer):\n if arg_3.cpu_only:\n return arg_3.data\n else:\n return Scatter.forward(arg_1, arg_3.data)\n if isinstance(arg_3, tuple) and len(arg_3) > 0:\n return list(zip(*map(arg_6, arg_3)))\n if isinstance(arg_3, list) and len(arg_3) > 0:\n arg_4 = list(map(list, zip(*map(arg_6, arg_3))))\n return arg_4\n if isinstance(arg_3, dict) and len(arg_3) > 0:\n arg_4 = list(map(type(arg_3), zip(*map(arg_6, arg_3.items()))))\n return arg_4\n return [arg_3 for arg_5 in arg_1]\n\n # After Func_map is called, a Func_map cell will exist. This cell\n # has a reference to the actual function Func_map, which has references\n # to a closure that has a reference to the Func_map cell (because the\n # fn is recursive). To avoid this reference cycle, we set the function to\n # None, clearing the cell\n try:\n return arg_6(arg_0)\n finally:\n arg_6 = None"} +{"_id": "doc_2210", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0):\n \"\"\"Scatter with support for kwargs dictionary\"\"\"\n arg_0 = scatter(arg_0, arg_2, arg_3) if arg_0 else []\n arg_1 = scatter(arg_1, arg_2, arg_3) if arg_1 else []\n if len(arg_0) < len(arg_1):\n arg_0.extend([() for arg_4 in range(len(arg_1) - len(arg_0))])\n elif len(arg_1) < len(arg_0):\n arg_1.extend([{} for arg_4 in range(len(arg_0) - len(arg_1))])\n arg_0 = tuple(arg_0)\n arg_1 = tuple(arg_1)\n return arg_0, arg_1"} +{"_id": "doc_2211", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decide whether a particular character needs to be quoted.\n\n The 'quotetabs' flag indicates whether embedded tabs and spaces should be\n quoted. Note that line-ending tabs and spaces are always encoded, as per\n RFC 1521.\n \"\"\"\n if arg_0 in ' \\t':\n return arg_1\n # if header, we have to escape _ because _ is used to escape space\n if arg_0 == '_':\n return arg_2\n return arg_0 == ESCAPE or not (' ' <= arg_0 <= '~')"} +{"_id": "doc_2212", "title": "", "text": "def Func(arg_0):\n \"\"\"Quote a single character.\"\"\"\n arg_1 = ord(arg_0)\n return ESCAPE + HEX[arg_1//16] + HEX[arg_1%16]"} +{"_id": "doc_2213", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3 = 0):\n \"\"\"Read 'input', apply quoted-printable encoding, and write to 'output'.\n\n 'input' and 'output' are files with readline() and write() methods.\n The 'quotetabs' flag indicates whether embedded tabs and spaces should be\n quoted. Note that line-ending tabs and spaces are always Funcd, as per\n RFC 1521.\n The 'header' flag indicates whether we are encoding spaces as _ as per\n RFC 1522.\n \"\"\"\n\n if b2a_qp is not None:\n arg_4 = arg_0.read()\n arg_5 = b2a_qp(arg_4, arg_2 = arg_2, arg_3 = arg_3)\n arg_1.write(arg_5)\n return\n\n def write(arg_6, arg_1=arg_1, arg_7='\\n'):\n # RFC 1521 requires that the line ending in a space or tab must have\n # that trailing character Funcd.\n if arg_6 and arg_6[-1:] in ' \\t':\n arg_1.write(arg_6[:-1] + quote(arg_6[-1]) + arg_7)\n elif arg_6 == '.':\n arg_1.write(quote(arg_6) + arg_7)\n else:\n arg_1.write(arg_6 + arg_7)\n\n arg_8 = None\n while 1:\n arg_9 = arg_0.readline()\n if not arg_9:\n break\n arg_10 = []\n # Strip off any readline induced trailing newline\n arg_11 = ''\n if arg_9[-1:] == '\\n':\n arg_9 = arg_9[:-1]\n arg_11 = '\\n'\n # Calculate the un-length-limited Funcd line\n for arg_12 in arg_9:\n if needsquoting(arg_12, arg_2, arg_3):\n arg_12 = quote(arg_12)\n if arg_3 and arg_12 == ' ':\n arg_10.append('_')\n else:\n arg_10.append(arg_12)\n # First, write out the previous line\n if arg_8 is not None:\n write(arg_8)\n # Now see if we need any soft line breaks because of RFC-imposed\n # length limitations. Then do the thisline->prevline dance.\n arg_13 = EMPTYSTRING.join(arg_10)\n while len(arg_13) > MAXLINESIZE:\n # Don't forget to include the soft line break `=' sign in the\n # length calculation!\n write(arg_13[:MAXLINESIZE-1], arg_7='=\\n')\n arg_13 = arg_13[MAXLINESIZE-1:]\n # Write out the current line\n arg_8 = arg_13\n # Write out the last line, without a trailing newline\n if arg_8 is not None:\n write(arg_8, arg_7=arg_11)"} +{"_id": "doc_2214", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the integer value of a hexadecimal number.\"\"\"\n arg_1 = 0\n for arg_2 in arg_0:\n if '0' <= arg_2 <= '9':\n arg_3 = ord('0')\n elif 'a' <= arg_2 <= 'f':\n arg_3 = ord('a')-10\n elif 'A' <= arg_2 <= 'F':\n arg_3 = ord('A')-10\n else:\n break\n arg_1 = arg_1*16 + (ord(arg_2) - arg_3)\n return arg_1"} +{"_id": "doc_2215", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Encode a string using Base64.\n\n s is the string to encode. Optional altchars must be a string of at least\n length 2 (additional characters are ignored) which specifies an\n alternative alphabet for the '+' and '/' characters. This allows an\n application to e.g. generate url or filesystem safe Base64 strings.\n\n The encoded string is returned.\n \"\"\"\n # Strip off the trailing newline\n arg_2 = binascii.b2a_base64(arg_0)[:-1]\n if arg_1 is not None:\n return arg_2.translate(string.maketrans(b'+/', arg_1[:2]))\n return arg_2"} +{"_id": "doc_2216", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Decode a Base64 encoded string.\n\n s is the string to decode. Optional altchars must be a string of at least\n length 2 (additional characters are ignored) which specifies the\n alternative alphabet used instead of the '+' and '/' characters.\n\n The decoded string is returned. A TypeError is raised if s is\n incorrectly padded. Characters that are neither in the normal base-64\n alphabet nor the alternative alphabet are discarded prior to the padding\n check.\n \"\"\"\n if arg_1 is not None:\n arg_0 = arg_0.translate(string.maketrans(arg_1[:2], '+/'))\n try:\n return binascii.a2b_base64(arg_0)\n except binascii.Error, msg:\n # Transform this exception for consistency\n raise TypeError(msg)"} +{"_id": "doc_2217", "title": "", "text": "def Func(arg_0):\n \"\"\"Encode a string using Base32.\n\n s is the string to encode. The encoded string is returned.\n \"\"\"\n arg_1 = []\n arg_2, arg_3 = divmod(len(arg_0), 5)\n # Pad the last quantum with zero bits if necessary\n if arg_3:\n arg_0 += ('\\0' * (5 - arg_3))\n arg_2 += 1\n for arg_4 in range(arg_2):\n # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this\n # code is to process the 40 bits in units of 5 bits. So we take the 1\n # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover\n # bits of c2 and tack them onto c3. The shifts and masks are intended\n # to give us values of exactly 5 bits in width.\n arg_5, arg_6, arg_7 = struct.unpack('!HHB', arg_0[arg_4*5:(arg_4+1)*5])\n arg_6 += (arg_5 & 1) << 16 # 17 bits wide\n arg_7 += (arg_6 & 3) << 8 # 10 bits wide\n arg_1.extend([_b32tab[arg_5 >> 11], # bits 1 - 5\n _b32tab[(arg_5 >> 6) & 0x1f], # bits 6 - 10\n _b32tab[(arg_5 >> 1) & 0x1f], # bits 11 - 15\n _b32tab[arg_6 >> 12], # bits 16 - 20 (1 - 5)\n _b32tab[(arg_6 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)\n _b32tab[(arg_6 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)\n _b32tab[arg_7 >> 5], # bits 31 - 35 (1 - 5)\n _b32tab[arg_7 & 0x1f], # bits 36 - 40 (1 - 5)\n ])\n arg_8 = EMPTYSTRING.join(arg_1)\n # Adjust for any leftover partial quanta\n if arg_3 == 1:\n return arg_8[:-6] + '======'\n elif arg_3 == 2:\n return arg_8[:-4] + '===='\n elif arg_3 == 3:\n return arg_8[:-3] + '==='\n elif arg_3 == 4:\n return arg_8[:-1] + '='\n return arg_8"} +{"_id": "doc_2218", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"Decode a Base32 encoded string.\n\n s is the string to decode. Optional casefold is a flag specifying whether\n a lowercase alphabet is acceptable as input. For security purposes, the\n default is False.\n\n RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O\n (oh), and for optional mapping of the digit 1 (one) to either the letter I\n (eye) or letter L (el). The optional argument map01 when not None,\n specifies which letter the digit 1 should be mapped to (when map01 is not\n None, the digit 0 is always mapped to the letter O). For security\n purposes the default is None, so that 0 and 1 are not allowed in the\n input.\n\n The decoded string is returned. A TypeError is raised if s were\n incorrectly padded or if there are non-alphabet characters present in the\n string.\n \"\"\"\n arg_3, arg_4 = divmod(len(arg_0), 8)\n if arg_4:\n raise TypeError('Incorrect padding')\n # Handle section 2.4 zero and one mapping. The flag map01 will be either\n # False, or the character to map the digit 1 (one) to. It should be\n # either L (el) or I (eye).\n if arg_2:\n arg_0 = arg_0.translate(string.maketrans(b'01', b'O' + arg_2))\n if arg_1:\n arg_0 = arg_0.upper()\n # Strip off pad characters from the right. We need to count the pad\n # characters because this will tell us how many null bytes to remove from\n # the end of the decoded string.\n arg_5 = 0\n arg_6 = re.search('(?P[=]*)$', arg_0)\n if arg_6:\n arg_5 = len(arg_6.group('pad'))\n if arg_5 > 0:\n arg_0 = arg_0[:-arg_5]\n # Now decode the full quanta\n arg_7 = []\n arg_8 = 0\n arg_9 = 35\n for arg_10 in arg_0:\n arg_11 = _b32rev.get(arg_10)\n if arg_11 is None:\n raise TypeError('Non-base32 digit found')\n arg_8 += _b32rev[arg_10] << arg_9\n arg_9 -= 5\n if arg_9 < 0:\n arg_7.append(binascii.unhexlify('%010x' % arg_8))\n arg_8 = 0\n arg_9 = 35\n # Process the last, partial quanta\n arg_12 = binascii.unhexlify('%010x' % arg_8)\n if arg_5 == 0:\n arg_12 = '' # No characters\n elif arg_5 == 1:\n arg_12 = arg_12[:-1]\n elif arg_5 == 3:\n arg_12 = arg_12[:-2]\n elif arg_5 == 4:\n arg_12 = arg_12[:-3]\n elif arg_5 == 6:\n arg_12 = arg_12[:-4]\n else:\n raise TypeError('Incorrect padding')\n arg_7.append(arg_12)\n return EMPTYSTRING.join(arg_7)"} +{"_id": "doc_2219", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Decode a Base16 encoded string.\n\n s is the string to decode. Optional casefold is a flag specifying whether\n a lowercase alphabet is acceptable as input. For security purposes, the\n default is False.\n\n The decoded string is returned. A TypeError is raised if s is\n incorrectly padded or if there are non-alphabet characters present in the\n string.\n \"\"\"\n if arg_1:\n arg_0 = arg_0.upper()\n if re.search('[^0-9A-F]', arg_0):\n raise TypeError('Non-base16 digit found')\n return binascii.unhexlify(arg_0)"} +{"_id": "doc_2220", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Encode a file.\"\"\"\n while True:\n arg_2 = arg_0.read(MAXBINSIZE)\n if not arg_2:\n break\n while len(arg_2) < MAXBINSIZE:\n arg_3 = arg_0.read(MAXBINSIZE-len(arg_2))\n if not arg_3:\n break\n arg_2 += arg_3\n arg_4 = binascii.b2a_base64(arg_2)\n arg_1.write(arg_4)"} +{"_id": "doc_2221", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a zero-length range located just after the Func of this range.\n \"\"\"\n return Range(arg_0.source_buffer, arg_0.Func_pos, arg_0.Func_pos,\n expanded_from=arg_0.expanded_from)"} +{"_id": "doc_2222", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a zero-based column number of the beginning of this range.\n \"\"\"\n arg_1, Func = arg_0.source_buffer.decompose_position(arg_0.begin_pos)\n return Func"} +{"_id": "doc_2223", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the line number of the beginning of this range.\n \"\"\"\n Func, arg_2 = arg_0.source_buffer.decompose_position(arg_0.begin_pos)\n return Func"} +{"_id": "doc_2224", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the lines of source code containing the entirety of this range.\n \"\"\"\n return [arg_0.source_buffer.source_line(arg_1)\n for arg_1 in range(arg_0.line(), arg_0.end().line() + 1)]"} +{"_id": "doc_2225", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n An AST comparison function. Returns ``True`` if all fields in\n ``left`` are equal to fields in ``right``; if ``Func_locs`` is\n true, all locations should match as well.\n \"\"\"\n if type(arg_0) != type(arg_1):\n return False\n\n if isinstance(arg_0, ast.AST):\n for arg_3 in arg_0._fields:\n if not Func(getattr(arg_0, arg_3), getattr(arg_1, arg_3)):\n return False\n\n if arg_2:\n for arg_4 in arg_0._locs:\n if getattr(arg_0, arg_4) != getattr(arg_1, arg_4):\n return False\n\n return True\n elif isinstance(arg_0, list):\n if len(arg_0) != len(arg_1):\n return False\n\n for arg_5, arg_6 in zip(arg_0, arg_1):\n if not Func(arg_5, arg_6):\n return False\n\n return True\n else:\n return arg_0 == arg_1"} +{"_id": "doc_2226", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Convert a 32-bit or 64-bit integer created\n by float_pack into a Python float.\"\"\"\n\n if arg_1 == 8:\n arg_3 = -1021 # = sys.float_info.min_exp\n arg_4 = 1024 # = sys.float_info.max_exp\n arg_5 = 53 # = sys.float_info.mant_dig\n arg_6 = 64\n elif arg_1 == 4:\n arg_3 = -125 # C's FLT_MIN_EXP\n arg_4 = 128 # FLT_MAX_EXP\n arg_5 = 24 # FLT_MANT_DIG\n arg_6 = 32\n else:\n raise ValueError(\"invalid size value\")\n\n if arg_0 >> arg_6:\n raise ValueError(\"input out of range\")\n\n # extract pieces\n arg_7 = arg_0 >> arg_6 - 1\n arg_8 = (arg_0 & ((1 << arg_6 - 1) - (1 << arg_5 - 1))) >> arg_5 - 1\n arg_9 = arg_0 & ((1 << arg_5 - 1) - 1)\n\n if arg_8 == arg_4 - arg_3 + 2:\n # nan or infinity\n arg_10 = float('nan') if arg_9 else float('inf')\n elif arg_8 == 0:\n # subnormal or zero\n arg_10 = math.ldexp(float(arg_9), arg_3 - arg_5)\n else:\n # normal\n arg_9 += 1 << arg_5 - 1\n arg_10 = math.ldexp(float(arg_9), arg_8 + arg_3 - arg_5 - 1)\n return -arg_10 if arg_7 else arg_10"} +{"_id": "doc_2227", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert a Python float x into a 64-bit unsigned integer\n with the same byte representation.\"\"\"\n\n if arg_1 == 8:\n arg_2 = -1021 # = sys.float_info.min_exp\n arg_3 = 1024 # = sys.float_info.max_exp\n arg_4 = 53 # = sys.float_info.mant_dig\n arg_5 = 64\n elif arg_1 == 4:\n arg_2 = -125 # C's FLT_MIN_EXP\n arg_3 = 128 # FLT_MAX_EXP\n arg_4 = 24 # FLT_MANT_DIG\n arg_5 = 32\n else:\n raise ValueError(\"invalid size value\")\n\n arg_6 = math.copysign(1.0, arg_0) < 0.0\n if math.isinf(arg_0):\n arg_7 = 0\n arg_8 = arg_3 - arg_2 + 2\n elif math.isnan(arg_0):\n arg_7 = 1 << (arg_4 - 2) # other values possible\n arg_8 = arg_3 - arg_2 + 2\n elif arg_0 == 0.0:\n arg_7 = 0\n arg_8 = 0\n else:\n arg_9, arg_10 = math.frexp(abs(arg_0)) # abs(x) == m * 2**e\n arg_8 = arg_10 - (arg_2 - 1)\n if arg_8 > 0:\n # Normal case.\n arg_7 = round_to_nearest(arg_9 * (1 << arg_4))\n arg_7 -= 1 << arg_4 - 1\n else:\n # Subnormal case.\n if arg_8 + arg_4 - 1 >= 0:\n arg_7 = round_to_nearest(arg_9 * (1 << arg_8 + arg_4 - 1))\n else:\n arg_7 = 0\n arg_8 = 0\n\n # Special case: rounding produced a MANT_DIG-bit mantissa.\n assert 0 <= arg_7 <= 1 << arg_4 - 1\n if arg_7 == 1 << arg_4 - 1:\n arg_7 = 0\n arg_8 += 1\n\n # Raise on overflow (in some circumstances, may want to return\n # infinity instead).\n if arg_8 >= arg_3 - arg_2 + 2:\n raise OverflowError(\"float too large to pack in this format\")\n\n # check constraints\n assert 0 <= arg_7 < 1 << arg_4 - 1\n assert 0 <= arg_8 <= arg_3 - arg_2 + 2\n assert 0 <= arg_6 <= 1\n return ((arg_6 << arg_5 - 1) | (arg_8 << arg_4 - 1)) | arg_7"} +{"_id": "doc_2228", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n A Func manager that appends ``note`` to every diagnostic processed by\n this engine.\n \"\"\"\n arg_0._appended_notes += arg_1\n yield\n del arg_0._appended_notes[-len(arg_1):]"} +{"_id": "doc_2229", "title": "", "text": "def Func(arg_0):\n \"\"\"Format a list of traceback entry tuples for printing.\n\n Given a list of tuples as returned by extract_tb() or\n extract_stack(), return a list of strings ready for printing.\n Each string in the resulting list corresponds to the item with the\n same index in the argument list. Each string ends in a newline;\n the strings may contain internal newlines as well, for those items\n whose source text line is not None.\n \"\"\"\n arg_1 = []\n for arg_2, arg_3, arg_4, arg_5 in arg_0:\n arg_6 = ' File \"%s\", line %d, in %s\\n' % (arg_2,arg_3,arg_4)\n if arg_5:\n arg_6 = arg_6 + ' %s\\n' % arg_5.strip()\n arg_1.append(arg_6)\n return arg_1"} +{"_id": "doc_2230", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Print up to 'limit' stack trace entries from the traceback 'tb'.\n\n If 'limit' is omitted or None, all entries are printed. If 'file'\n is omitted or None, the output goes to sys.stderr; otherwise\n 'file' should be an open file or file-like object with a write()\n method.\n \"\"\"\n if arg_2 is None:\n arg_2 = sys.stderr\n if arg_1 is None:\n if hasattr(sys, 'tracebacklimit'):\n arg_1 = sys.tracebacklimit\n arg_3 = 0\n while arg_0 is not None and (arg_1 is None or arg_3 < arg_1):\n arg_4 = arg_0.tb_frame\n arg_5 = arg_0.tb_lineno\n arg_6 = arg_4.f_code\n arg_7 = arg_6.co_filename\n arg_8 = arg_6.co_name\n _print(arg_2,\n ' File \"%s\", line %d, in %s' % (arg_7, arg_5, arg_8))\n linecache.checkcache(arg_7)\n arg_9 = linecache.getline(arg_7, arg_5, arg_4.f_globals)\n if arg_9: _print(arg_2, ' ' + arg_9.strip())\n arg_0 = arg_0.tb_next\n arg_3 = arg_3+1"} +{"_id": "doc_2231", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"Print exception up to 'limit' stack trace entries from 'tb' to 'file'.\n\n This differs from print_tb() in the following ways: (1) if\n traceback is not None, it prints a header \"Traceback (most recent\n call last):\"; (2) it prints the exception type and value after the\n stack trace; (3) if type is SyntaxError and value has the\n appropriate format, it prints the line where the syntax error\n occurred with a caret on the next line indicating the approximate\n position of the error.\n \"\"\"\n if arg_4 is None:\n # TODO: Use sys.stderr when that's implemented.\n arg_4 = open('/dev/stderr', 'w')\n #file = sys.stderr\n if arg_2:\n _print(arg_4, 'Traceback (most recent call last):')\n print_tb(arg_2, arg_3, arg_4)\n arg_5 = format_exception_only(arg_0, arg_1)\n for arg_6 in arg_5:\n _print(arg_4, arg_6, '')"} +{"_id": "doc_2232", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3 = None):\n \"\"\"Format a stack trace and the exception information.\n\n The arguments have the same meaning as the corresponding arguments\n to print_exception(). The return value is a list of strings, each\n ending in a newline and some containing internal newlines. When\n these lines are concatenated and printed, exactly the same text is\n printed as does print_exception().\n \"\"\"\n if arg_2:\n arg_4 = ['Traceback (most recent call last):\\n']\n arg_4 = arg_4 + format_tb(arg_2, arg_3)\n else:\n arg_4 = []\n arg_4 = arg_4 + Func_only(arg_0, arg_1)\n return arg_4"} +{"_id": "doc_2233", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"x, random=random.random -> Func list x in place; return None.\n\n Optional arg random is a 0-argument function returning a random\n float in [0.0, 1.0); by default, the standard random.random.\n\n \"\"\"\n\n if arg_2 is None:\n arg_2 = arg_0.random\n arg_3 = int\n for arg_4 in reversed(xrange(1, len(arg_1))):\n # pick an element in x[:i+1] with which to exchange x[i]\n arg_5 = arg_3(arg_2() * (arg_4+1))\n arg_1[arg_4], arg_1[arg_5] = arg_1[arg_5], arg_1[arg_4]"} +{"_id": "doc_2234", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a list of slot names for a given class.\n\n This needs to find slots defined by the class and its bases, so we\n can't simply return the __slots__ attribute. We must walk down\n the Method Resolution Order and concatenate the __slots__ of each\n class found there. (This assumes classes don't modify their\n __slots__ attribute to misrepresent their slots after the class is\n defined.)\n \"\"\"\n\n # Get the value from a cache in the class if possible\n arg_1 = arg_0.__dict__.get(\"_Func__\")\n if arg_1 is not None:\n return arg_1\n\n # Not cached -- calculate the value\n arg_1 = []\n if not hasattr(arg_0, \"__slots__\"):\n # This class has no slots\n pass\n else:\n # Slots found -- gather slot names from all base classes\n for arg_2 in arg_0.__mro__:\n if \"__slots__\" in arg_2.__dict__:\n arg_3 = arg_2.__dict__['__slots__']\n # if class has a single slot, it can be given as a string\n if isinstance(arg_3, basestring):\n arg_3 = (arg_3,)\n for arg_4 in arg_3:\n # special descriptors\n if arg_4 in (\"__dict__\", \"__weakref__\"):\n continue\n # mangled names\n elif arg_4.startswith('__') and not arg_4.endswith('__'):\n arg_1.append('_%s%s' % (arg_2.__name__, arg_4))\n else:\n arg_1.append(arg_4)\n\n # Cache the outcome in the class if at all possible\n try:\n arg_0._Func__ = arg_1\n except:\n pass # But don't die if we can't\n\n return arg_1"} +{"_id": "doc_2235", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a cmp= function into a key= function\"\"\"\n class K(object):\n arg_1 = ['obj']\n def __init__(arg_2, arg_3, *arg_4):\n arg_2.obj = arg_3\n def __lt__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) < 0\n def __gt__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) > 0\n def __eq__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) == 0\n def __le__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) <= 0\n def __ge__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) >= 0\n def __ne__(arg_2, arg_5):\n return arg_0(arg_2.obj, arg_5.obj) != 0\n def __hash__(arg_2):\n raise TypeError('hash not implemented')\n return K"} +{"_id": "doc_2236", "title": "", "text": "def Func(arg_0):\n \"\"\"Read header lines.\n\n Read header lines up to the entirely blank line that terminates them.\n The (normally blank) line that ends the headers is skipped, but not\n included in the returned list. If a non-header line ends the headers,\n (which is an error), an attempt is made to backspace over it; it is\n never included in the returned list.\n\n The variable self.status is set to the empty string if all went well,\n otherwise it is an error message. The variable self.headers is a\n completely uninterpreted list of lines contained in the header (so\n printing them will reproduce the header exactly as it appears in the\n file).\n \"\"\"\n arg_0.dict = {}\n arg_0.unixfrom = ''\n arg_0.headers = lst = []\n arg_0.status = ''\n arg_5 = \"\"\n arg_6 = 1\n arg_7 = arg_8 = arg_9 = None\n if hasattr(arg_0.fp, 'unread'):\n arg_8 = arg_0.fp.unread\n elif arg_0.seekable:\n arg_9 = arg_0.fp.tell\n while 1:\n if arg_9:\n try:\n arg_7 = arg_9()\n except IOError:\n arg_7 = arg_9 = None\n arg_0.seekable = 0\n arg_11 = arg_0.fp.readline()\n if not arg_11:\n arg_0.status = 'EOF in headers'\n break\n # Skip unix From name time lines\n if arg_6 and arg_11.startswith('From '):\n arg_0.unixfrom = arg_0.unixfrom + arg_11\n continue\n arg_6 = 0\n if arg_5 and arg_11[0] in ' \\t':\n # It's a continuation line.\n lst.append(arg_11)\n arg_12 = (arg_0.dict[arg_5] + \"\\n \" + arg_11.strip())\n arg_0.dict[arg_5] = arg_12.strip()\n continue\n elif arg_0.iscomment(arg_11):\n # It's a comment. Ignore it.\n continue\n elif arg_0.islast(arg_11):\n # Note! No pushback here! The delimiter line gets eaten.\n break\n arg_5 = arg_0.isheader(arg_11)\n if arg_5:\n # It's a legal header line, save it.\n lst.append(arg_11)\n arg_0.dict[arg_5] = arg_11[len(arg_5)+1:].strip()\n continue\n elif arg_5 is not None:\n # An empty header name. These aren't allowed in HTTP, but it's\n # probably a benign mistake. Don't add the header, just keep\n # going.\n continue\n else:\n # It's not a header line; throw it back and stop here.\n if not arg_0.dict:\n arg_0.status = 'No headers'\n else:\n arg_0.status = 'Non-header line where header expected'\n # Try to undo the read.\n if arg_8:\n arg_8(arg_11)\n elif arg_9:\n arg_0.fp.seek(arg_7)\n else:\n arg_0.status = arg_0.status + '; bad seek'\n break"} +{"_id": "doc_2237", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determine whether a given line is a legal header.\n\n This method should return the header name, suitably canonicalized.\n You may override this method in order to use Message parsing on tagged\n data in RFC 2822-like formats with special header formats.\n \"\"\"\n arg_2 = arg_1.find(':')\n if arg_2 > -1:\n return arg_1[:arg_2].lower()\n return None"} +{"_id": "doc_2238", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the first header line matching name.\n\n This is similar to getallmatchingheaders, but it returns only the\n first matching header (and its continuation lines).\n \"\"\"\n arg_1 = arg_1.lower() + ':'\n arg_2 = len(arg_1)\n arg_3 = []\n arg_4 = 0\n for arg_5 in arg_0.headers:\n if arg_4:\n if not arg_5[:1].isspace():\n break\n elif arg_5[:arg_2].lower() == arg_1:\n arg_4 = 1\n if arg_4:\n arg_3.append(arg_5)\n return arg_3"} +{"_id": "doc_2239", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get all values for a header.\n\n This returns a list of values for headers given more than once; each\n value in the result list is stripped in the same way as the result of\n getheader(). If the header is not given, return an empty list.\n \"\"\"\n arg_2 = []\n arg_3 = ''\n arg_4 = 0\n for arg_5 in arg_0.getallmatchingheaders(arg_1):\n if arg_5[0].isspace():\n if arg_3:\n arg_3 = \"%s\\n %s\" % (arg_3, arg_5.strip())\n else:\n arg_3 = arg_5.strip()\n else:\n if arg_4:\n arg_2.append(arg_3)\n arg_3 = arg_5[arg_5.find(\":\") + 1:].strip()\n arg_4 = 1\n if arg_4:\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_2240", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a list of addresses from a header.\n\n Retrieves a list of addresses from a header, where each address is a\n tuple as returned by getaddr(). Scans all named headers, so it works\n properly with multiple To: or Cc: headers for example.\n \"\"\"\n arg_2 = []\n for arg_3 in arg_0.getallmatchingheaders(arg_1):\n if arg_3[0] in ' \\t':\n arg_2.append(arg_3)\n else:\n if arg_2:\n arg_2.append(', ')\n arg_4 = arg_3.find(':')\n if arg_4 > 0:\n arg_5 = arg_3[arg_4+1:]\n arg_2.append(arg_5)\n arg_6 = ''.join(arg_2)\n arg_7 = AddressList(arg_6)\n return arg_7.addresslist"} +{"_id": "doc_2241", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse up to the start of the next address.\"\"\"\n while arg_0.pos < len(arg_0.field):\n if arg_0.field[arg_0.pos] in arg_0.LWS + '\\n\\r':\n arg_0.pos = arg_0.pos + 1\n elif arg_0.field[arg_0.pos] == '(':\n arg_0.commentlist.append(arg_0.getcomment())\n else: break"} +{"_id": "doc_2242", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the complete domain name from an address.\"\"\"\n arg_1 = []\n while arg_0.pos < len(arg_0.field):\n if arg_0.field[arg_0.pos] in arg_0.LWS:\n arg_0.pos += 1\n elif arg_0.field[arg_0.pos] == '(':\n arg_0.commentlist.append(arg_0.getcomment())\n elif arg_0.field[arg_0.pos] == '[':\n arg_1.append(arg_0.Funcliteral())\n elif arg_0.field[arg_0.pos] == '.':\n arg_0.pos += 1\n arg_1.append('.')\n elif arg_0.field[arg_0.pos] in arg_0.atomends:\n break\n else: arg_1.append(arg_0.getatom())\n return ''.join(arg_1)"} +{"_id": "doc_2243", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a sequence of RFC 2822 phrases.\n\n A phrase is a sequence of words, which are in turn either RFC 2822\n atoms or quoted-strings. Phrases are canonicalized by squeezing all\n runs of continuous whitespace into one space.\n \"\"\"\n arg_1 = []\n\n while arg_0.pos < len(arg_0.field):\n if arg_0.field[arg_0.pos] in arg_0.LWS:\n arg_0.pos += 1\n elif arg_0.field[arg_0.pos] == '\"':\n arg_1.append(arg_0.getquote())\n elif arg_0.field[arg_0.pos] == '(':\n arg_0.commentlist.append(arg_0.getcomment())\n elif arg_0.field[arg_0.pos] in arg_0.phraseends:\n break\n else:\n arg_1.append(arg_0.getatom(arg_0.phraseends))\n\n return arg_1"} +{"_id": "doc_2244", "title": "", "text": "def Func(arg_0, arg_1):\n \"year, month -> number of days in that month in that year.\"\n assert 1 <= arg_1 <= 12, arg_1\n if arg_1 == 2 and _is_leap(arg_0):\n return 29\n return _DAYS_IN_MONTH[arg_1]"} +{"_id": "doc_2245", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"year, month, day -> ordinal, considering 01-Jan-0001 as day 1.\"\n assert 1 <= arg_1 <= 12, 'month must be in 1..12'\n arg_3 = _days_in_month(arg_0, arg_1)\n assert 1 <= arg_2 <= arg_3, ('day must be in 1..%d' % arg_3)\n return (_days_before_year(arg_0) +\n _days_before_month(arg_0, arg_1) +\n arg_2)"} +{"_id": "doc_2246", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Return a new date with new values for the specified fields.\"\"\"\n if arg_1 is None:\n arg_1 = arg_0._year\n if arg_2 is None:\n arg_2 = arg_0._month\n if arg_3 is None:\n arg_3 = arg_0._day\n return date.__new__(type(arg_0), arg_1, arg_2, arg_3)"} +{"_id": "doc_2247", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a 3-tuple containing ISO year, week number, and weekday.\n\n The first ISO week of the year is the (Mon-Sun) week\n containing the year's first Thursday; everything else derives\n from that.\n\n The first week is 1; Monday is 1 ... Sunday is 7.\n\n ISO calendar algorithm taken from\n http://www.phys.uu.nl/~vgent/calendar/Func.htm\n \"\"\"\n arg_1 = arg_0._year\n arg_2 = _isoweek1monday(arg_1)\n arg_3 = _ymd2ord(arg_0._year, arg_0._month, arg_0._day)\n # Internally, week and day have origin 0\n arg_4, arg_5 = divmod(arg_3 - arg_2, 7)\n if arg_4 < 0:\n arg_1 -= 1\n arg_2 = _isoweek1monday(arg_1)\n arg_4, arg_5 = divmod(arg_3 - arg_2, 7)\n elif arg_4 >= 52:\n if arg_3 >= _isoweek1monday(arg_1+1):\n arg_1 += 1\n arg_4 = 0\n return arg_1, arg_4+1, arg_5+1"} +{"_id": "doc_2248", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the timezone name.\n\n Note that the name is 100% informational -- there's no requirement that\n it mean anything in particular. For example, \"GMT\", \"UTC\", \"-500\",\n \"-5:00\", \"EDT\", \"US/Eastern\", \"America/New York\" are all valid replies.\n \"\"\"\n if arg_0._tzinfo is None:\n return None\n arg_1 = arg_0._tzinfo.Func(None)\n _check_Func(arg_1)\n return arg_1"} +{"_id": "doc_2249", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n arg_5=True):\n \"\"\"Return a new time with new values for the specified fields.\"\"\"\n if arg_1 is None:\n arg_1 = arg_0.hour\n if arg_2 is None:\n arg_2 = arg_0.minute\n if arg_3 is None:\n arg_3 = arg_0.second\n if arg_4 is None:\n arg_4 = arg_0.microsecond\n if arg_5 is True:\n arg_5 = arg_0.tzinfo\n return time.__new__(type(arg_0),\n arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_2250", "title": "", "text": "def Func(arg_0):\n \"Return the time part, with same tzinfo.\"\n return time(arg_0.hour, arg_0.minute, arg_0.second, arg_0.microsecond,\n arg_0._tzinfo)"} +{"_id": "doc_2251", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None, arg_8=True):\n \"\"\"Return a new datetime with new values for the specified fields.\"\"\"\n if arg_1 is None:\n arg_1 = arg_0.year\n if arg_2 is None:\n arg_2 = arg_0.month\n if arg_3 is None:\n arg_3 = arg_0.day\n if arg_4 is None:\n arg_4 = arg_0.hour\n if arg_5 is None:\n arg_5 = arg_0.minute\n if arg_6 is None:\n arg_6 = arg_0.second\n if arg_7 is None:\n arg_7 = arg_0.microsecond\n if arg_8 is True:\n arg_8 = arg_0.tzinfo\n return datetime.__new__(type(arg_0),\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6,\n arg_7, arg_8)"} +{"_id": "doc_2252", "title": "", "text": "def Func(arg_0, arg_1):\n \"Same as a + b, for a and b sequences.\"\n if not hasattr(arg_0, '__getitem__'):\n arg_2 = \"'%s' object can't be Funcenated\" % type(arg_0).__name__\n raise TypeError(arg_2)\n return arg_0 + arg_1"} +{"_id": "doc_2253", "title": "", "text": "def Func(arg_0, arg_1):\n \"Return the first index of b in a.\"\n for arg_2, arg_3 in enumerate(arg_0):\n if arg_3 == arg_1:\n return arg_2\n else:\n raise ValueError('sequence.index(x): x not in sequence')"} +{"_id": "doc_2254", "title": "", "text": "def Func(arg_0, arg_1):\n \"Same as a += b, for a and b sequences.\"\n if not hasattr(arg_0, '__getitem__'):\n arg_2 = \"'%s' object can't be concatenated\" % type(arg_0).__name__\n raise TypeError(arg_2)\n arg_0 += arg_1\n return arg_0"} +{"_id": "doc_2255", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, arg_4=0):\n \"\"\"Return the string obtained by replacing the leftmost\n non-overlapping occurrences of the pattern in string by the\n replacement repl. repl can be either a string or a callable;\n if a string, backslash escapes in it are processed. If it is\n a callable, it's passed the match object and must return\n a replacement string to be used.\"\"\"\n return _compile(arg_0, arg_4).Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_2256", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=0):\n \"\"\"Split the source string by the occurrences of the pattern,\n returning a list containing the resulting substrings.\"\"\"\n return _compile(arg_0, arg_3).Func(arg_1, arg_2)"} +{"_id": "doc_2257", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Return a list of all non-overlapping matches in the string.\n\n If one or more groups are present in the pattern, return a\n list of groups; this will be a list of tuples if the pattern\n has more than one group.\n\n Empty matches are included in the result.\"\"\"\n return _compile(arg_0, arg_2).Func(arg_1)\n\n # if sys.hexversion >= 0x02020000:\n # __all__.append(\"finditer\")\n def finditer(arg_0, arg_1, arg_2=0):\n \"\"\"Return an iterator over all non-overlapping matches in the\n string. For each match, the iterator returns a match object.\n\n Empty matches are included in the result.\"\"\"\n return _compile(arg_0, arg_2).finditer(arg_1)"} +{"_id": "doc_2258", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=0):\n \"\"\"Decode uuencoded file\"\"\"\n #\n # Open the input file, if needed.\n #\n arg_4 = []\n if arg_0 == '-':\n arg_0 = sys.stdin\n elif isinstance(arg_0, basestring):\n arg_0 = open(arg_0)\n arg_4.append(arg_0)\n try:\n #\n # Read until a begin is encountered or we've exhausted the file\n #\n while True:\n arg_5 = arg_0.readline()\n if not arg_5:\n raise Error('No valid begin line found in input file')\n if not arg_5.startswith('begin'):\n continue\n arg_6 = arg_5.split(' ', 2)\n if len(arg_6) == 3 and arg_6[0] == 'begin':\n try:\n int(arg_6[1], 8)\n break\n except ValueError:\n pass\n if arg_1 is None:\n arg_1 = arg_6[2].rstrip()\n if os.path.exists(arg_1):\n raise Error('Cannot overwrite existing file: %s' % arg_1)\n if arg_2 is None:\n arg_2 = int(arg_6[1], 8)\n #\n # Open the output file\n #\n if arg_1 == '-':\n arg_1 = sys.stdout\n elif isinstance(arg_1, basestring):\n arg_7 = open(arg_1, 'wb')\n try:\n os.path.chmod(arg_1, arg_2)\n except AttributeError:\n pass\n arg_1 = arg_7\n arg_4.append(arg_1)\n #\n # Main decoding loop\n #\n arg_8 = arg_0.readline()\n while arg_8 and arg_8.strip() != 'end':\n try:\n arg_9 = binascii.a2b_uu(arg_8)\n except binascii.Error, v:\n # Workaround for broken uuencoders by /Fredrik Lundh\n arg_10 = (((ord(arg_8[0])-32) & 63) * 4 + 5) // 3\n arg_9 = binascii.a2b_uu(arg_8[:arg_10])\n if not arg_3:\n sys.stderr.write(\"Warning: %s\\n\" % v)\n arg_1.write(arg_9)\n arg_8 = arg_0.readline()\n if not arg_8:\n raise Error('Truncated input file')\n finally:\n for arg_11 in arg_4:\n arg_11.close()"} +{"_id": "doc_2259", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return number of `ch` characters at the start of `line`.\n\n Example:\n\n >>> Func(' abc', ' ')\n 3\n \"\"\"\n\n arg_2, arg_3 = 0, len(arg_0)\n while arg_2 < arg_3 and arg_0[arg_2] == arg_1:\n arg_2 += 1\n return arg_2"} +{"_id": "doc_2260", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', arg_3='', arg_4='',\n arg_5='', arg_6=3, arg_7='\\n'):\n r\"\"\"\n Compare two sequences of lines; generate the delta as a unified diff.\n\n Unified diffs are a compact way of showing line changes and a few\n lines of context. The number of context lines is set by 'n' which\n defaults to three.\n\n By default, the diff control lines (those with ---, +++, or @@) are\n created with a trailing newline. This is helpful so that inputs\n created from file.readlines() result in diffs that are suitable for\n file.writelines() since both the inputs and outputs have trailing\n newlines.\n\n For inputs that do not have trailing newlines, set the lineterm\n argument to \"\" so that the output will be uniformly newline free.\n\n The unidiff format normally has a header for filenames and modification\n times. Any or all of these may be specified using strings for\n 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.\n The modification times are normally expressed in the ISO 8601 format.\n\n Example:\n\n >>> for line in Func('one two three four'.split(),\n ... 'zero one tree four'.split(), 'Original', 'Current',\n ... '2005-01-26 23:30:50', '2010-04-02 10:20:52',\n ... lineterm=''):\n ... print line # doctest: +NORMALIZE_WHITESPACE\n --- Original 2005-01-26 23:30:50\n +++ Current 2010-04-02 10:20:52\n @@ -1,4 +1,4 @@\n +zero\n one\n -two\n -three\n +tree\n four\n \"\"\"\n\n arg_8 = False\n for arg_9 in SequenceMatcher(None,arg_0,arg_1).get_grouped_opcodes(arg_6):\n if not arg_8:\n arg_8 = True\n # fromdate = '\\t{}'.format(fromfiledate) if fromfiledate else ''\n arg_10 = '\\t%s' % (arg_4) if arg_4 else ''\n # todate = '\\t{}'.format(tofiledate) if tofiledate else ''\n arg_11 = '\\t%s' % (arg_5) if arg_5 else ''\n # yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)\n yield '--- %s%s%s' % (arg_2, arg_10, arg_7)\n # yield '+++ {}{}{}'.format(tofile, todate, lineterm)\n yield '+++ %s%s%s' % (arg_3, arg_11, arg_7)\n\n arg_12, arg_13 = arg_9[0], arg_9[-1]\n arg_14 = _format_range_unified(arg_12[1], arg_13[2])\n arg_15 = _format_range_unified(arg_12[3], arg_13[4])\n # yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)\n yield '@@ -%s +%s @@%s' % (arg_14, arg_15, arg_7)\n\n for arg_16, arg_17, arg_18, arg_19, arg_20 in arg_9:\n if arg_16 == 'equal':\n for arg_21 in arg_0[arg_17:arg_18]:\n yield ' ' + arg_21\n continue\n if arg_16 in ('replace', 'delete'):\n for arg_21 in arg_0[arg_17:arg_18]:\n yield '-' + arg_21\n if arg_16 in ('replace', 'insert'):\n for arg_21 in arg_1[arg_19:arg_20]:\n yield '+' + arg_21"} +{"_id": "doc_2261", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', arg_3='',\n arg_4='', arg_5='', arg_6=3, arg_7='\\n'):\n r\"\"\"\n Compare two sequences of lines; generate the delta as a context diff.\n\n Context diffs are a compact way of showing line changes and a few\n lines of context. The number of context lines is set by 'n' which\n defaults to three.\n\n By default, the diff control lines (those with *** or ---) are\n created with a trailing newline. This is helpful so that inputs\n created from file.readlines() result in diffs that are suitable for\n file.writelines() since both the inputs and outputs have trailing\n newlines.\n\n For inputs that do not have trailing newlines, set the lineterm\n argument to \"\" so that the output will be uniformly newline free.\n\n The context diff format normally has a header for filenames and\n modification times. Any or all of these may be specified using\n strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.\n The modification times are normally expressed in the ISO 8601 format.\n If not specified, the strings default to blanks.\n\n Example:\n\n >>> print ''.join(Func('one\\ntwo\\nthree\\nfour\\n'.splitlines(1),\n ... 'zero\\none\\ntree\\nfour\\n'.splitlines(1), 'Original', 'Current')),\n *** Original\n --- Current\n ***************\n *** 1,4 ****\n one\n ! two\n ! three\n four\n --- 1,4 ----\n + zero\n one\n ! tree\n four\n \"\"\"\n\n arg_8 = dict(insert='+ ', delete='- ', replace='! ', equal=' ')\n arg_9 = False\n for arg_10 in SequenceMatcher(None,arg_0,arg_1).get_grouped_opcodes(arg_6):\n if not arg_9:\n arg_9 = True\n # fromdate = '\\t{}'.format(fromfiledate) if fromfiledate else ''\n arg_11 = '\\t%s' % (arg_4) if arg_4 else ''\n # todate = '\\t{}'.format(tofiledate) if tofiledate else ''\n arg_12 = '\\t%s' % (arg_5) if arg_5 else ''\n # yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)\n yield '*** %s%s%s' % (arg_2, arg_11, arg_7)\n # yield '--- {}{}{}'.format(tofile, todate, lineterm)\n yield '--- %s%s%s' % (arg_3, arg_12, arg_7)\n\n arg_13, arg_14 = arg_10[0], arg_10[-1]\n yield '***************' + arg_7\n\n arg_15 = _format_range_context(arg_13[1], arg_14[2])\n # yield '*** {} ****{}'.format(file1_range, lineterm)\n yield '*** %s ****%s' % (arg_15, arg_7)\n\n if any(arg_16 in ('replace', 'delete') for arg_16, arg_17, arg_17, arg_17, arg_17 in arg_10):\n for arg_16, arg_18, arg_19, arg_17, arg_17 in arg_10:\n if arg_16 != 'insert':\n for arg_20 in arg_0[arg_18:arg_19]:\n yield arg_8[arg_16] + arg_20\n\n arg_21 = _format_range_context(arg_13[3], arg_14[4])\n # yield '--- {} ----{}'.format(file2_range, lineterm)\n yield '--- %s ----%s' % (arg_21, arg_7)\n\n if any(arg_16 in ('replace', 'insert') for arg_16, arg_17, arg_17, arg_17, arg_17 in arg_10):\n for arg_16, arg_17, arg_17, arg_22, arg_23 in arg_10:\n if arg_16 != 'delete':\n for arg_20 in arg_1[arg_22:arg_23]:\n yield arg_8[arg_16] + arg_20"} +{"_id": "doc_2262", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.__new__, arg_5=arg_5):\n 'Make a new Match object from a sequence or iterable'\n arg_6 = arg_2(arg_0, arg_1)\n if arg_5(arg_6) != 3:\n raise TypeError('Expected 3 arguments, got %d' % arg_5(arg_6))\n return arg_6"} +{"_id": "doc_2263", "title": "", "text": "def Func(arg_0):\n \"\"\"Return list of triples describing matching subsequences.\n\n Each triple is of the form (i, j, n), and means that\n a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in\n i and in j. New in Python 2.5, it's also guaranteed that if\n (i, j, n) and (i', j', n') are adjacent triples in the list, and\n the second is not the last triple in the list, then i+n != i' or\n j+n != j'. IOW, adjacent triples never describe adjacent equal\n blocks.\n\n The last triple is a dummy, (len(a), len(b), 0), and is the only\n triple with n==0.\n\n >>> s = SequenceMatcher(None, \"abxcd\", \"abcd\")\n >>> s.Func()\n [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]\n \"\"\"\n\n if arg_0.matching_blocks is not None:\n return arg_0.matching_blocks\n arg_1, arg_2 = len(arg_0.a), len(arg_0.b)\n\n # This is most naturally expressed as a recursive algorithm, but\n # at least one user bumped into extreme use cases that exceeded\n # the recursion limit on their box. So, now we maintain a list\n # ('queue`) of blocks we still need to look at, and append partial\n # results to `matching_blocks` in a loop; the matches are sorted\n # at the end.\n arg_3 = [(0, arg_1, 0, arg_2)]\n arg_4 = []\n while arg_3:\n arg_5, arg_6, arg_7, arg_8 = arg_3.pop()\n arg_9, arg_10, arg_11 = x = arg_0.find_longest_match(arg_5, arg_6, arg_7, arg_8)\n # a[alo:i] vs b[blo:j] unknown\n # a[i:i+k] same as b[j:j+k]\n # a[i+k:ahi] vs b[j+k:bhi] unknown\n if arg_11: # if k is 0, there was no matching block\n arg_4.append(x)\n if arg_5 < arg_9 and arg_7 < arg_10:\n arg_3.append((arg_5, arg_9, arg_7, arg_10))\n if arg_9+arg_11 < arg_6 and arg_10+arg_11 < arg_8:\n arg_3.append((arg_9+arg_11, arg_6, arg_10+arg_11, arg_8))\n arg_4.sort()\n\n # It's possible that we have adjacent equal blocks in the\n # matching_blocks list now. Starting with 2.5, this code was added\n # to collapse them.\n arg_12 = arg_17 = arg_18 = 0\n arg_13 = []\n for arg_14, arg_15, arg_16 in arg_4:\n # Is this block adjacent to i1, j1, k1?\n if arg_12 + arg_18 == arg_14 and arg_17 + arg_18 == arg_15:\n # Yes, so collapse them -- this just increases the length of\n # the first block by the length of the second, and the first\n # block so lengthened remains the block to compare against.\n arg_18 += arg_16\n else:\n # Not adjacent. Remember the first block (k1==0 means it's\n # the dummy we started with), and make the second block the\n # new block to compare against.\n if arg_18:\n arg_13.append((arg_12, arg_17, arg_18))\n arg_12, arg_17, arg_18 = arg_14, arg_15, arg_16\n if arg_18:\n arg_13.append((arg_12, arg_17, arg_18))\n\n arg_13.append( (arg_1, arg_2, 0) )\n arg_0.matching_blocks = map(Match._make, arg_13)\n return arg_0.matching_blocks"} +{"_id": "doc_2264", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"\n Compare two sequences of lines; generate the resulting delta.\n\n Each sequence must contain individual single-line strings ending with\n newlines. Such sequences can be obtained from the `readlines()` method\n of file-like objects. The delta generated also consists of newline-\n terminated strings, ready to be printed as-is via the writeline()\n method of a file-like object.\n\n Example:\n\n >>> print ''.join(Differ().Func('one\\ntwo\\nthree\\n'.splitlines(1),\n ... 'ore\\ntree\\nemu\\n'.splitlines(1))),\n - one\n ? ^\n + ore\n ? ^\n - two\n - three\n ? -\n + tree\n + emu\n \"\"\"\n\n arg_3 = SequenceMatcher(arg_0.linejunk, arg_1, arg_2)\n for arg_4, arg_5, arg_6, arg_7, arg_8 in arg_3.get_opcodes():\n if arg_4 == 'replace':\n arg_9 = arg_0._fancy_replace(arg_1, arg_5, arg_6, arg_2, arg_7, arg_8)\n elif arg_4 == 'delete':\n arg_9 = arg_0._dump('-', arg_1, arg_5, arg_6)\n elif arg_4 == 'insert':\n arg_9 = arg_0._dump('+', arg_2, arg_7, arg_8)\n elif arg_4 == 'equal':\n arg_9 = arg_0._dump(' ', arg_1, arg_5, arg_6)\n else:\n raise ValueError, 'unknown tag %r' % (arg_4,)\n\n for arg_10 in arg_9:\n yield arg_10"} +{"_id": "doc_2265", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n r\"\"\"\n Format \"?\" output and deal with leading tabs.\n\n Example:\n\n >>> d = Differ()\n >>> results = d.Func('\\tabcDefghiJkl\\n', '\\tabcdefGhijkl\\n',\n ... ' ^ ^ ^ ', ' ^ ^ ^ ')\n >>> for line in results: print repr(line)\n ...\n '- \\tabcDefghiJkl\\n'\n '? \\t ^ ^ ^\\n'\n '+ \\tabcdefGhijkl\\n'\n '? \\t ^ ^ ^\\n'\n \"\"\"\n\n # Can hurt, but will probably help most of the time.\n arg_5 = min(_count_leading(arg_1, \"\\t\"),\n _count_leading(arg_2, \"\\t\"))\n arg_5 = min(arg_5, _count_leading(arg_3[:arg_5], \" \"))\n arg_5 = min(arg_5, _count_leading(arg_4[:arg_5], \" \"))\n arg_3 = arg_3[arg_5:].rstrip()\n arg_4 = arg_4[arg_5:].rstrip()\n\n yield \"- \" + arg_1\n if arg_3:\n yield \"? %s%s\\n\" % (\"\\t\" * arg_5, arg_3)\n\n yield \"+ \" + arg_2\n if arg_4:\n yield \"? %s%s\\n\" % (\"\\t\" * arg_5, arg_4)"} +{"_id": "doc_2266", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3='',arg_4='',arg_5=False,\n arg_6=5):\n \"\"\"Returns HTML file of side by side comparison with change highlights\n\n Arguments:\n fromlines -- list of \"from\" lines\n tolines -- list of \"to\" lines\n fromdesc -- \"from\" file column header string\n todesc -- \"to\" file column header string\n context -- set to True for contextual differences (defaults to False\n which shows full differences).\n numlines -- number of context lines. When context is set True,\n controls number of lines displayed before and after the change.\n When context is False, controls the number of lines to place\n the \"next\" link anchors before the next change (so click of\n \"next\" link jumps to just before the change).\n \"\"\"\n\n return arg_0._file_template % dict(\n styles = arg_0._styles,\n legend = arg_0._legend,\n table = arg_0.make_table(arg_1,arg_2,arg_3,arg_4,\n arg_5=arg_5,arg_6=arg_6))"} +{"_id": "doc_2267", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3):\n \"\"\"Builds list of text lines by splitting text lines at wrap point\n\n This function will determine if the input text line needs to be\n wrapped (split) into separate lines. If so, the first wrap point\n will be determined and the first line appended to the output\n text line list. This function is used recursively to handle\n the second part of the split line to further split it.\n \"\"\"\n # if blank line or context separator, just add it to the output list\n if not arg_2:\n arg_1.append((arg_2,arg_3))\n return\n\n # if line text doesn't need wrapping, just add it to the output list\n arg_4 = len(arg_3)\n arg_5 = arg_0._wrapcolumn\n if (arg_4 <= arg_5) or ((arg_4 -(arg_3.count('\\0')*3)) <= arg_5):\n arg_1.append((arg_2,arg_3))\n return\n\n # scan text looking for the wrap point, keeping track if the wrap\n # point is inside markers\n arg_6 = 0\n arg_7 = 0\n arg_8 = ''\n while arg_7 < arg_5 and arg_6 < arg_4:\n if arg_3[arg_6] == '\\0':\n arg_6 += 1\n arg_8 = arg_3[arg_6]\n arg_6 += 1\n elif arg_3[arg_6] == '\\1':\n arg_6 += 1\n arg_8 = ''\n else:\n arg_6 += 1\n arg_7 += 1\n\n # wrap point is inside text, break it up into separate lines\n arg_9 = arg_3[:arg_6]\n arg_10 = arg_3[arg_6:]\n\n # if wrap point is inside markers, place end marker at end of first\n # line and start marker at beginning of second line because each\n # line will have its own table tag markup around it.\n if arg_8:\n arg_9 = arg_9 + '\\1'\n arg_10 = '\\0' + arg_8 + arg_10\n\n # tack on first line onto the output list\n arg_1.append((arg_2,arg_9))\n\n # use this routine again to wrap the remaining text\n arg_0.Func(arg_1,'>',arg_10)"} +{"_id": "doc_2268", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"Collects mdiff output into separate lists\n\n Before storing the mdiff from/to data into a list, it is converted\n into a single line of text with HTML markup.\n \"\"\"\n\n arg_2,arg_3,arg_4 = [],[],[]\n # pull from/to data and flags from mdiff style iterator\n for arg_5,arg_6,arg_7 in arg_1:\n try:\n # store HTML markup of the lines into the lists\n arg_2.append(arg_0._format_line(0,arg_7,*arg_5))\n arg_3.append(arg_0._format_line(1,arg_7,*arg_6))\n except TypeError:\n # exceptions occur for lines where context separators go\n arg_2.append(None)\n arg_3.append(None)\n arg_4.append(arg_7)\n return arg_2,arg_3,arg_4"} +{"_id": "doc_2269", "title": "", "text": "def Func(arg_0):\n \"\"\"Create unique anchor prefixes\"\"\"\n\n # Generate a unique anchor prefix so multiple tables\n # can exist on the same HTML page without conflicts.\n arg_1 = \"from%d_\" % HtmlDiff._default_prefix\n arg_2 = \"to%d_\" % HtmlDiff._default_prefix\n HtmlDiff._default_prefix += 1\n # store prefixes so line format method has access\n arg_0._prefix = [arg_1,arg_2]"} +{"_id": "doc_2270", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3='',arg_4='',arg_5=False,\n arg_6=5):\n \"\"\"Returns HTML table of side by side comparison with change highlights\n\n Arguments:\n fromlines -- list of \"from\" lines\n tolines -- list of \"to\" lines\n fromdesc -- \"from\" file column header string\n todesc -- \"to\" file column header string\n context -- set to True for contextual differences (defaults to False\n which shows full differences).\n numlines -- number of context lines. When context is set True,\n controls number of lines displayed before and after the change.\n When context is False, controls the number of lines to place\n the \"next\" link anchors before the next change (so click of\n \"next\" link jumps to just before the change).\n \"\"\"\n\n # make unique anchor prefixes so that multiple tables may exist\n # on the same page without conflict.\n arg_0._make_prefix()\n\n # change tabs to spaces before it gets more difficult after we insert\n # markup\n arg_1,arg_2 = arg_0._tab_newline_replace(arg_1,arg_2)\n\n # create diffs iterator which generates side by side from/to data\n if arg_5:\n arg_7 = arg_6\n else:\n arg_7 = None\n arg_8 = _mdiff(arg_1,arg_2,arg_7,linejunk=arg_0._linejunk,\n charjunk=arg_0._charjunk)\n\n # set up iterator to wrap lines that exceed desired width\n if arg_0._wrapcolumn:\n arg_8 = arg_0._line_wrapper(arg_8)\n\n # collect up from/to lines and flags into lists (also format the lines)\n arg_9,arg_10,arg_11 = arg_0._collect_lines(arg_8)\n\n # process change flags, generating middle column of next anchors/links\n arg_9,arg_10,arg_11,arg_12,arg_13 = arg_0._convert_flags(\n arg_9,arg_10,arg_11,arg_5,arg_6)\n\n arg_14 = []\n arg_15 = ' %s%s' + \\\n '%s%s\\n'\n for arg_16 in range(len(arg_11)):\n if arg_11[arg_16] is None:\n # mdiff yields None on separator lines skip the bogus ones\n # generated for the first line\n if arg_16 > 0:\n arg_14.append(' \\n \\n')\n else:\n arg_14.append( arg_15 % (arg_13[arg_16],arg_12[arg_16],arg_9[arg_16],\n arg_12[arg_16],arg_10[arg_16]))\n if arg_3 or arg_4:\n arg_17 = '%s%s%s%s' % (\n '
',\n '%s' % arg_3,\n '
',\n '%s' % arg_4)\n else:\n arg_17 = ''\n\n arg_18 = arg_0._table_template % dict(\n data_rows=''.join(arg_14),\n arg_17=arg_17,\n prefix=arg_0._prefix[1])\n\n return arg_18.replace('\\0+',''). \\\n replace('\\0-',''). \\\n replace('\\0^',''). \\\n replace('\\1',''). \\\n replace('\\t',' ')"} +{"_id": "doc_2271", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Create and return a benchmark that runs work_func p times in parallel.\"\"\"\n def Benchmark(arg_3): # pylint: disable=missing-docstring\n arg_4 = threading.Event()\n def Target():\n arg_4.wait()\n for arg_5 in xrange(arg_3.N / arg_0):\n arg_1(*arg_2)\n arg_6 = []\n for arg_5 in xrange(arg_0):\n arg_7 = threading.Thread(target=Target)\n arg_7.start()\n arg_6.append(arg_7)\n arg_3.ResetTimer()\n arg_4.set()\n for arg_7 in arg_6:\n arg_7.join()\n return Benchmark"} +{"_id": "doc_2272", "title": "", "text": "def Func(arg_0):\n \"\"\"List directory contents, using cache.\"\"\"\n try:\n arg_1, arg_2 = arg_4[arg_0]\n del arg_4[arg_0]\n except KeyError:\n arg_1, arg_2 = -1, []\n arg_3 = os.stat(arg_0).st_mtime\n if arg_3 != arg_1:\n arg_2 = os.Func(arg_0)\n arg_2.sort()\n arg_4[arg_0] = arg_3, arg_2\n return arg_2"} +{"_id": "doc_2273", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=80, arg_3=None):\n \"\"\"Format a Python o into a pretty-printed representation.\"\"\"\n return PrettyPrinter(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3).Func(arg_0)"} +{"_id": "doc_2274", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n A decorator returning a function that first runs ``inner_rule`` and then, if its\n return value is not None, maps that value using ``mapper``.\n\n If the value being mapped is a tuple, it is expanded into multiple arguments.\n\n Similar to attaching semantic Funcs to rules in traditional parser generators.\n \"\"\"\n def decorator(arg_2):\n @llrule(arg_1, arg_0.expected)\n def outer_rule(arg_3):\n arg_4 = arg_0(arg_3)\n if arg_4 is unmatched:\n return arg_4\n if isinstance(arg_4, tuple):\n return arg_2(arg_3, *arg_4)\n else:\n return arg_2(arg_3, arg_4)\n return outer_rule\n return decorator"} +{"_id": "doc_2275", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n A rule that accepts a sequence of tokens satisfying ``rules`` and returns a tuple\n containing their return values, or None if the first rule was not satisfied.\n \"\"\"\n @llrule(arg_2.get(\"loc\", None), arg_0.expected)\n def arg_6(arg_3):\n arg_4 = arg_0(arg_3)\n if arg_4 is unmatched:\n return arg_4\n\n arg_5 = [arg_4]\n for arg_6 in arg_1:\n arg_4 = arg_6(arg_3)\n if arg_4 is unmatched:\n return arg_4\n arg_5.append(arg_4)\n return tuple(arg_5)\n return arg_6"} +{"_id": "doc_2276", "title": "", "text": "def Func(arg_0=None):\n \"\"\"A rule that accepts token of kind ``newline`` and returns an empty list.\"\"\"\n @llrule(arg_0, lambda arg_1: [\"newline\"])\n def rule(arg_1):\n arg_2 = arg_1._accept(\"newline\")\n if arg_2 is unmatched:\n return arg_2\n return []\n return rule"} +{"_id": "doc_2277", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\"\"\"\n if not arg_0:\n return arg_1\n if not arg_1:\n return arg_0\n arg_3, arg_4, arg_5, arg_6, arg_7, arg_8 = \\\n urlparse(arg_0, '', arg_2)\n arg_9, arg_10, arg_11, arg_12, arg_13, arg_14 = \\\n urlparse(arg_1, arg_3, arg_2)\n if arg_9 != arg_3 or arg_9 not in uses_relative:\n return arg_1\n if arg_9 in uses_netloc:\n if arg_10:\n return urlunparse((arg_9, arg_10, arg_11,\n arg_12, arg_13, arg_14))\n arg_10 = arg_4\n if arg_11[:1] == '/':\n return urlunparse((arg_9, arg_10, arg_11,\n arg_12, arg_13, arg_14))\n if not arg_11 and not arg_12:\n arg_11 = arg_5\n arg_12 = arg_6\n if not arg_13:\n arg_13 = arg_7\n return urlunparse((arg_9, arg_10, arg_11,\n arg_12, arg_13, arg_14))\n arg_15 = arg_5.split('/')[:-1] + arg_11.split('/')\n # XXX The stuff below is bogus in various ways...\n if arg_15[-1] == '.':\n arg_15[-1] = ''\n while '.' in arg_15:\n arg_15.remove('.')\n while 1:\n arg_16 = 1\n arg_17 = len(arg_15) - 1\n while arg_16 < arg_17:\n if (arg_15[arg_16] == '..'\n and arg_15[arg_16-1] not in ('', '..')):\n del arg_15[arg_16-1:arg_16+1]\n break\n arg_16 = arg_16+1\n else:\n break\n if arg_15 == ['', '..']:\n arg_15[-1] = ''\n elif len(arg_15) >= 2 and arg_15[-1] == '..':\n arg_15[-2:] = ['']\n return urlunparse((arg_9, arg_10, '/'.join(arg_15),\n arg_12, arg_13, arg_14))"} +{"_id": "doc_2278", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes any existing fragment from URL.\n\n Returns a tuple of the defragmented URL and the fragment. If\n the URL contained no fragments, the second element is the\n empty string.\n \"\"\"\n if '#' in arg_0:\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6 = urlparse(arg_0)\n arg_7 = urlunparse((arg_1, arg_2, arg_3, arg_4, arg_5, ''))\n return arg_7, arg_6\n else:\n return arg_0, ''"} +{"_id": "doc_2279", "title": "", "text": "def Func(arg_0, **arg_1):\n 'Return a new SplitResult object replacing specified fields with new values'\n arg_2 = arg_0._make(map(arg_1.pop, ('scheme', 'netloc', 'path', 'query', 'fragment'), arg_0))\n if arg_1:\n raise ValueError('Got unexpected field names: %r' % arg_1.keys())\n return arg_2"} +{"_id": "doc_2280", "title": "", "text": "def Func(arg_0):\n \"\"\"Test whether a path is a regular file\"\"\"\n try:\n arg_1 = os.stat(arg_0)\n except os.error:\n return False\n return stat.S_ISREG(arg_1.st_mode)"} +{"_id": "doc_2281", "title": "", "text": "def Func(arg_0):\n \"\"\"Return true if the pathname refers to an existing directory.\"\"\"\n try:\n arg_1 = os.stat(arg_0)\n except os.error:\n return False\n return stat.S_ISDIR(arg_1.st_mode)"} +{"_id": "doc_2282", "title": "", "text": "def Func(arg_0):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not arg_0: return ''\n arg_1 = min(arg_0)\n arg_2 = max(arg_0)\n for arg_3, arg_4 in enumerate(arg_1):\n if arg_4 != arg_2[arg_3]:\n return arg_1[:arg_3]\n return arg_1"} +{"_id": "doc_2283", "title": "", "text": "def Func(arg_0, arg_1=70, **arg_2):\n \"\"\"Wrap a single paragraph of text, returning a list of Funcped lines.\n\n Reformat the single paragraph in 'text' so it fits in lines of no\n more than 'width' columns, and return a list of Funcped lines. By\n default, tabs in 'text' are expanded with string.expandtabs(), and\n all other whitespace characters (including newline) are converted to\n space. See TextWrapper class for available keyword args to customize\n Funcping behaviour.\n \"\"\"\n arg_3 = TextWrapper(arg_1=arg_1, **arg_2)\n return arg_3.Func(arg_0)"} +{"_id": "doc_2284", "title": "", "text": "def Func(arg_0, arg_1=70, **arg_2):\n \"\"\"Fill a single paragraph of text, returning a new string.\n\n Reformat the single paragraph in 'text' to fit in lines of no more\n than 'width' columns, and return a new string containing the entire\n wrapped paragraph. As with wrap(), tabs are expanded and other\n whitespace characters converted to space. See TextWrapper class for\n available keyword args to customize wrapping behaviour.\n \"\"\"\n arg_3 = TextWrapper(arg_1=arg_1, **arg_2)\n return arg_3.Func(arg_0)"} +{"_id": "doc_2285", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove any common leading whitespace from every line in `text`.\n\n This can be used to make triple-quoted strings line up with the left\n edge of the display, while still presenting them in the source code\n in indented form.\n\n Note that tabs and spaces are both treated as whitespace, but they\n are not equal: the lines \" hello\" and \"\\\\thello\" are\n considered to have no common leading whitespace. (This behaviour is\n new in Python 2.5; older versions of this module incorrectly\n expanded tabs before searching for common leading whitespace.)\n \"\"\"\n # Look for the longest leading string of spaces and tabs common to\n # all lines.\n arg_1 = None\n arg_0 = _whitespace_only_re.sub('', arg_0)\n arg_2 = _leading_whitespace_re.findall(arg_0)\n for arg_3 in arg_2:\n if arg_1 is None:\n arg_1 = arg_3\n\n # Current line more deeply indented than previous winner:\n # no change (previous winner is still on top).\n elif arg_3.startswith(arg_1):\n pass\n\n # Current line consistent with and no deeper than previous winner:\n # it's the new winner.\n elif arg_1.startswith(arg_3):\n arg_1 = arg_3\n\n # Find the largest common whitespace between current line and previous\n # winner.\n else:\n for arg_4, (arg_5, arg_6) in enumerate(zip(arg_1, arg_3)):\n if arg_5 != arg_6:\n arg_1 = arg_1[:arg_4]\n break\n else:\n arg_1 = arg_1[:len(arg_3)]\n\n # sanity check (testing/debugging only)\n if 0 and arg_1:\n for arg_7 in arg_0.split(\"\\n\"):\n assert not arg_7 or arg_7.startswith(arg_1), \\\n \"line = %r, margin = %r\" % (arg_7, arg_1)\n\n if arg_1:\n arg_0 = re.sub(r'(?m)^' + arg_1, '', arg_0)\n return arg_0"} +{"_id": "doc_2286", "title": "", "text": "def Func(arg_0):\n \"Transform a list of characters into a list of longs.\"\n\n arg_1 = len(arg_0) // 4\n arg_2 = [0] * arg_1\n\n arg_3 = 0\n arg_4 = 0\n while arg_4 < arg_1:\n arg_5 = ord(arg_0[arg_3]) << 24\n arg_6 = ord(arg_0[arg_3+1]) << 16\n arg_7 = ord(arg_0[arg_3+2]) << 8\n arg_8 = ord(arg_0[arg_3+3])\n arg_2[arg_4] = arg_5 | arg_6 | arg_7 | arg_8\n arg_4 = arg_4+1\n arg_3 = arg_3+4\n\n return arg_2"} +{"_id": "doc_2287", "title": "", "text": "def Func(arg_0):\n \"Initialize the message-digest and set all fields to zero.\"\n\n arg_0.length = 0\n arg_0.input = []\n\n # Initial 160 bit message digest (5 times 32 bit).\n arg_0.H0 = 0x67452301\n arg_0.H1 = 0xEFCDAB89\n arg_0.H2 = 0x98BADCFE\n arg_0.H3 = 0x10325476\n arg_0.H4 = 0xC3D2E1F0"} +{"_id": "doc_2288", "title": "", "text": "def Func(arg_0):\n \"\"\"Shallow Func operation on arbitrary Python objects.\n\n See the module's __doc__ string for more info.\n \"\"\"\n\n arg_1 = type(arg_0)\n\n arg_2 = _Func_dispatch.get(arg_1)\n if arg_2:\n return arg_2(arg_0)\n\n arg_2 = getattr(arg_1, \"__Func__\", None)\n if arg_2:\n return arg_2(arg_0)\n\n arg_3 = dispatch_table.get(arg_1)\n if arg_3:\n arg_4 = arg_3(arg_0)\n else:\n arg_3 = getattr(arg_0, \"__reduce_ex__\", None)\n if arg_3:\n arg_4 = arg_3(2)\n else:\n arg_3 = getattr(arg_0, \"__reduce__\", None)\n if arg_3:\n arg_4 = arg_3()\n else:\n raise Error(\"un(shallow)Funcable object of type %s\" % arg_1)\n\n return _reconstruct(arg_0, arg_4, 0)"} +{"_id": "doc_2289", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=[]):\n \"\"\"Deep copy operation on arbitrary Python objects.\n\n See the module's __doc__ string for more info.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = {}\n\n arg_3 = id(arg_0)\n arg_4 = arg_1.get(arg_3, arg_2)\n if arg_4 is not arg_2:\n return arg_4\n\n arg_5 = type(arg_0)\n\n arg_6 = _Func_dispatch.get(arg_5)\n if arg_6:\n arg_4 = arg_6(arg_0, arg_1)\n else:\n try:\n arg_7 = issubclass(arg_5, type)\n except TypeError: # cls is not a class (old Boost; see SF #502085)\n arg_7 = 0\n if arg_7:\n arg_4 = _Func_atomic(arg_0, arg_1)\n else:\n arg_6 = getattr(arg_0, \"__Func__\", None)\n if arg_6:\n arg_4 = arg_6(arg_1)\n else:\n arg_8 = dispatch_table.get(arg_5)\n if arg_8:\n arg_9 = arg_8(arg_0)\n else:\n arg_8 = getattr(arg_0, \"__reduce_ex__\", None)\n if arg_8:\n arg_9 = arg_8(2)\n else:\n arg_8 = getattr(arg_0, \"__reduce__\", None)\n if arg_8:\n arg_9 = arg_8()\n else:\n raise Error(\n \"un(deep)copyable object of type %s\" % arg_5)\n arg_4 = _reconstruct(arg_0, arg_9, 1, arg_1)\n\n arg_1[arg_3] = arg_4\n _keep_alive(arg_0, arg_1) # Make sure x lives at least as long as d\n return arg_4"} +{"_id": "doc_2290", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Keeps a reference to the object x in the memo.\n\n Because we remember objects by their id, we have\n to assure that possibly temporary objects are kept\n alive by referencing them.\n We store a reference at the id of the memo, which should\n normally not be used unless someone tries to deepcopy\n the memo itself...\n \"\"\"\n try:\n arg_1[arg_2(arg_1)].append(arg_0)\n except KeyError:\n # aha, this is the first one :-)\n arg_1[arg_2(arg_1)]=[arg_0]"} +{"_id": "doc_2291", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1):\n \"\"\"Issue a deprecation warning for Python 3.x related changes.\n\n Warnings are omitted unless Python is started with the -3 option.\n \"\"\"\n if sys.py3kwarning:\n if arg_1 is None:\n arg_1 = DeprecationWarning\n warn(arg_0, arg_1, arg_2+1)"} +{"_id": "doc_2292", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None):\n \"\"\"Hook to write a warning to a file; replace if you like.\"\"\"\n if arg_4 is None:\n arg_4 = sys.stderr\n if arg_4 is None:\n # sys.stderr is None - warnings get lost\n return\n try:\n arg_4.write(formatwarning(arg_0, arg_1, arg_2, arg_3, arg_5))\n except (IOError, UnicodeError):\n pass # the file (probably stderr) is invalid - this warning gets lost."} +{"_id": "doc_2293", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1):\n \"\"\"Issue a Funcing, or maybe ignore it or raise an exception.\"\"\"\n # Check if message is already a Warning object\n if isinstance(arg_0, Warning):\n arg_1 = arg_0.__class__\n # Check category argument\n if arg_1 is None:\n arg_1 = UserWarning\n assert issubclass(arg_1, Warning)\n # Get context information\n try:\n arg_3 = sys._getframe(arg_2)\n except ValueError:\n arg_4 = sys.__dict__\n arg_5 = 1\n else:\n arg_4 = arg_3.f_globals\n arg_5 = arg_3.f_lineno\n if '__name__' in arg_4:\n arg_6 = arg_4['__name__']\n else:\n arg_6 = \"\"\n arg_7 = arg_4.get('__file__')\n if arg_7:\n arg_8 = arg_7.lower()\n if arg_8.endswith((\".pyc\", \".pyo\")):\n arg_7 = arg_7[:-1]\n else:\n if arg_6 == \"__main__\":\n try:\n arg_7 = sys.argv[0]\n except AttributeError:\n # embedded interpreters don't have sys.argv, see bug #839151\n arg_7 = '__main__'\n if not arg_7:\n arg_7 = arg_6\n arg_9 = arg_4.setdefault(\"__Funcingregistry__\", {})\n Func_explicit(arg_0, arg_1, arg_7, arg_5, arg_6, arg_9,\n arg_4)"} +{"_id": "doc_2294", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the hash value of a set.\n\n Note that we don't define _Func__: not all sets are hashable.\n But if you define a hashable set type, its _Func__ should\n call this function.\n\n This must be compatible __eq__.\n\n All sets ought to compare equal if they contain the same\n elements, regardless of how they are implemented, and\n regardless of the order of the elements; so there's not much\n freedom for __eq__ or _Func__. We match the algorithm used\n by the built-in frozenset type.\n \"\"\"\n arg_1 = sys.maxint\n arg_2 = 2 * arg_1 + 1\n arg_3 = len(arg_0)\n arg_4 = 1927868237 * (arg_3 + 1)\n arg_4 &= arg_2\n for arg_5 in arg_0:\n arg_6 = hash(arg_5)\n arg_4 ^= (arg_6 ^ (arg_6 << 16) ^ 89869747) * 3644798167\n arg_4 &= arg_2\n arg_4 = arg_4 * 69069 + 907133923\n arg_4 &= arg_2\n if arg_4 > arg_1:\n arg_4 -= arg_2 + 1\n if arg_4 == -1:\n arg_4 = 590923713\n return arg_4"} +{"_id": "doc_2295", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove an element. If not a member, raise a KeyError.\"\"\"\n if arg_1 not in arg_0:\n raise KeyError(arg_1)\n arg_0.discard(arg_1)"} +{"_id": "doc_2296", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the Funcped value. Raise KeyError if empty.\"\"\"\n arg_1 = iter(arg_0)\n try:\n arg_2 = next(arg_1)\n except StopIteration:\n raise KeyError\n arg_0.discard(arg_2)\n return arg_2"} +{"_id": "doc_2297", "title": "", "text": "def Func(arg_0):\n \"\"\"Release a lock, decrementing the recursion level.\n\n If after the decrement it is zero, reset the lock to unlocked (not owned\n by any thread), and if any other threads are blocked waiting for the\n lock to become unlocked, allow exactly one of them to proceed. If after\n the decrement the recursion level is still nonzero, the lock remains\n locked and owned by the calling thread.\n\n Only call this method when the calling thread owns the lock. A\n RuntimeError is raised if this method is called when the lock is\n unlocked.\n\n There is no return value.\n\n \"\"\"\n if arg_0.__owner != _get_ident():\n raise RuntimeError(\"cannot Func un-acquired lock\")\n arg_0.__count = count = arg_0.__count - 1\n if not count:\n arg_0.__owner = None\n arg_0.__block.Func()\n if __debug__:\n arg_0._note(\"%s.Func(): final Func\", arg_0)\n else:\n if __debug__:\n arg_0._note(\"%s.Func(): non-final Func\", arg_0)"} +{"_id": "doc_2298", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Wait until notified or until a timeout occurs.\n\n If the calling thread has not acquired the lock when this method is\n called, a RuntimeError is raised.\n\n This method releases the underlying lock, and then blocks until it is\n awakened by a notify() or notifyAll() call for the same condition\n variable in another thread, or until the optional timeout occurs. Once\n awakened or timed out, it re-acquires the lock and returns.\n\n When the timeout argument is present and not None, it should be a\n floating point number specifying a timeout for the operation in seconds\n (or fractions thereof).\n\n When the underlying lock is an RLock, it is not released using its\n release() method, since this may not actually unlock the lock when it\n was acquired multiple times recursively. Instead, an internal interface\n of the RLock class is used, which really unlocks it even when it has\n been recursively acquired several times. Another internal interface is\n then used to restore the recursion level when the lock is reacquired.\n\n \"\"\"\n if not arg_0._is_owned():\n raise RuntimeError(\"cannot Func on un-acquired lock\")\n arg_2 = _allocate_lock()\n arg_2.acquire()\n arg_0.__Funcers.append(arg_2)\n arg_3 = arg_0._release_save()\n try: # restore state no matter what (e.g., KeyboardInterrupt)\n if arg_1 is None:\n arg_2.acquire()\n if __debug__:\n arg_0._note(\"%s.Func(): got it\", arg_0)\n else:\n # Balancing act: We can't afford a pure busy loop, so we\n # have to sleep; but if we sleep the whole timeout time,\n # we'll be unresponsive. The scheme here sleeps very\n # little at first, longer as time goes on, but never longer\n # than 20 times per second (or the timeout time remaining).\n arg_4 = _time() + arg_1\n arg_5 = 0.0005 # 500 us -> initial delay of 1 ms\n while True:\n arg_6 = arg_2.acquire(0)\n if arg_6:\n break\n arg_7 = arg_4 - _time()\n if arg_7 <= 0:\n break\n arg_5 = min(arg_5 * 2, arg_7, .05)\n _sleep(arg_5)\n if not arg_6:\n if __debug__:\n arg_0._note(\"%s.Func(%s): timed out\", arg_0, arg_1)\n try:\n arg_0.__Funcers.remove(arg_2)\n except ValueError:\n pass\n else:\n if __debug__:\n arg_0._note(\"%s.Func(%s): got it\", arg_0, arg_1)\n finally:\n arg_0._acquire_restore(arg_3)"} +{"_id": "doc_2299", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"Wake up one or more threads waiting on this condition, if any.\n\n If the calling thread has not acquired the lock when this method is\n called, a RuntimeError is raised.\n\n This method wakes up at most n of the threads waiting for the condition\n variable; it is a no-op if no threads are waiting.\n\n \"\"\"\n if not arg_0._is_owned():\n raise RuntimeError(\"cannot Func on un-acquired lock\")\n arg_2 = arg_0.__waiters\n arg_3 = arg_2[:arg_1]\n if not arg_3:\n if __debug__:\n arg_0._note(\"%s.Func(): no waiters\", arg_0)\n return\n arg_0._note(\"%s.Func(): Funcing %d waiter%s\", arg_0, arg_1,\n arg_1!=1 and \"s\" or \"\")\n for arg_4 in arg_3:\n arg_4.release()\n try:\n arg_2.remove(arg_4)\n except ValueError:\n pass"} +{"_id": "doc_2300", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"Acquire a semaphore, decrementing the internal counter by one.\n\n When invoked without arguments: if the internal counter is larger than\n zero on entry, decrement it by one and return immediately. If it is zero\n on entry, block, waiting until some other thread has called release() to\n make it larger than zero. This is done with proper interlocking so that\n if multiple Func() calls are blocked, release() will wake exactly one\n of them up. The implementation may pick one at random, so the order in\n which blocked threads are awakened should not be relied on. There is no\n return value in this case.\n\n When invoked with blocking set to true, do the same thing as when called\n without arguments, and return true.\n\n When invoked with blocking set to false, do not block. If a call without\n an argument would block, return false immediately; otherwise, do the\n same thing as when called without arguments, and return true.\n\n \"\"\"\n arg_2 = False\n with arg_0.__cond:\n while arg_0.__value == 0:\n if not arg_1:\n break\n if __debug__:\n arg_0._note(\"%s.Func(%s): blocked waiting, value=%s\",\n arg_0, arg_1, arg_0.__value)\n arg_0.__cond.wait()\n else:\n arg_0.__value = arg_0.__value - 1\n if __debug__:\n arg_0._note(\"%s.Func: success, value=%s\",\n arg_0, arg_0.__value)\n arg_2 = True\n return arg_2"} +{"_id": "doc_2301", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Block until the internal flag is true.\n\n If the internal flag is true on entry, return immediately. Otherwise,\n block until another thread calls set() to set the flag to true, or until\n the optional timeout occurs.\n\n When the timeout argument is present and not None, it should be a\n floating point number specifying a timeout for the operation in seconds\n (or fractions thereof).\n\n This method returns the internal flag on exit, so it will always return\n True except if a timeout is given and the operation times out.\n\n \"\"\"\n with arg_0.__cond:\n if not arg_0.__flag:\n arg_0.__cond.Func(arg_1)\n return arg_0.__flag"} +{"_id": "doc_2302", "title": "", "text": "def Func(arg_0):\n \"\"\"Start the thread's activity.\n\n It must be called at most once per thread object. It arranges for the\n object's run() method to be invoked in a separate thread of control.\n\n This method will raise a RuntimeError if called more than once on the\n same thread object.\n\n \"\"\"\n if not arg_0.__initialized:\n raise RuntimeError(\"thread.__init__() not called\")\n if arg_0.__Funced.is_set():\n raise RuntimeError(\"threads can only be Funced once\")\n if __debug__:\n arg_0._note(\"%s.Func(): Funcing thread\", arg_0)\n with _active_limbo_lock:\n arg_1[arg_0] = arg_0\n try:\n _Func_new_thread(arg_0.__bootstrap, ())\n except Exception:\n with _active_limbo_lock:\n del arg_1[arg_0]\n raise\n arg_0.__Funced.wait()"} +{"_id": "doc_2303", "title": "", "text": "def Func(arg_0):\n \"\"\"Method representing the thread's activity.\n\n You may override this method in a subclass. The standard Func() method\n invokes the callable object passed to the object's constructor as the\n target argument, if any, with sequential and keyword arguments taken\n from the args and kwargs arguments, respectively.\n\n \"\"\"\n try:\n if arg_0.__target:\n arg_0.__target(*arg_0.__args, **arg_0.__kwargs)\n finally:\n # Avoid a refcycle if the thread is Funcning a function with\n # an argument that has a member that points to the thread.\n del arg_0.__target, arg_0.__args, arg_0.__kwargs"} +{"_id": "doc_2304", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Wait until the thread terminates.\n\n This blocks the calling thread until the thread whose Func() method is\n called terminates -- either normally or through an unhandled exception\n or until the optional timeout occurs.\n\n When the timeout argument is present and not None, it should be a\n floating point number specifying a timeout for the operation in seconds\n (or fractions thereof). As Func() always returns None, you must call\n isAlive() after Func() to decide whether a timeout happened -- if the\n thread is still alive, the Func() call timed out.\n\n When the timeout argument is not present or None, the operation will\n block until the thread terminates.\n\n A thread can be Func()ed many times.\n\n Func() raises a RuntimeError if an attempt is made to Func the current\n thread as that would cause a deadlock. It is also an error to Func() a\n thread before it has been started and attempts to do so raises the same\n exception.\n\n \"\"\"\n if not arg_0.__initialized:\n raise RuntimeError(\"Thread.__init__() not called\")\n if not arg_0.__started.is_set():\n raise RuntimeError(\"cannot Func thread before it is started\")\n if arg_0 is current_thread():\n raise RuntimeError(\"cannot Func current thread\")\n\n if __debug__:\n if not arg_0.__stopped:\n arg_0._note(\"%s.Func(): waiting until thread stops\", arg_0)\n arg_0.__block.acquire()\n try:\n if arg_1 is None:\n while not arg_0.__stopped:\n arg_0.__block.wait()\n if __debug__:\n arg_0._note(\"%s.Func(): thread stopped\", arg_0)\n else:\n arg_2 = _time() + arg_1\n while not arg_0.__stopped:\n arg_3 = arg_2 - _time()\n if arg_3 <= 0:\n if __debug__:\n arg_0._note(\"%s.Func(): timed out\", arg_0)\n break\n arg_0.__block.wait(arg_3)\n else:\n if __debug__:\n arg_0._note(\"%s.Func(): thread stopped\", arg_0)\n finally:\n arg_0.__block.release()"} +{"_id": "doc_2305", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=True, arg_3=False):\n \"\"\"quotetabs=True means that tab and space characters are always\n quoted.\n istext=False means that \\r and \\n are treated as regular characters\n header=True encodes space characters with '_' and requires\n real '_' characters to be quoted.\n \"\"\"\n arg_4 = 76\n\n # See if this string is using CRLF line ends\n arg_5 = arg_0.find('\\n')\n arg_6 = arg_5 > 0 and arg_0[arg_5-1] == '\\r'\n\n arg_7 = 0\n arg_8 = 0\n arg_9 = []\n while arg_7 < len(arg_0):\n arg_10 = arg_0[arg_7]\n if (arg_10 > '~' or\n arg_10 == '=' or\n (arg_3 and arg_10 == '_') or\n (arg_10 == '.' and arg_8 == 0 and (arg_7+1 == len(arg_0) or\n arg_0[arg_7+1] == '\\n' or\n arg_0[arg_7+1] == '\\r')) or\n (not arg_2 and (arg_10 == '\\r' or arg_10 == '\\n')) or\n ((arg_10 == '\\t' or arg_10 == ' ') and (arg_7 + 1 == len(arg_0))) or\n (arg_10 <= ' ' and arg_10 != '\\r' and arg_10 != '\\n' and\n (arg_1 or (not arg_1 and (arg_10 != '\\t' and arg_10 != ' '))))):\n arg_8 += 3\n if arg_8 >= arg_4:\n arg_9.append('=')\n if arg_6: arg_9.append('\\r')\n arg_9.append('\\n')\n arg_8 = 3\n arg_9.append('=' + two_hex_digits(ord(arg_10)))\n arg_7 += 1\n else:\n if (arg_2 and\n (arg_10 == '\\n' or (arg_7+1 < len(arg_0) and arg_10 == '\\r' and\n arg_0[arg_7+1] == '\\n'))):\n arg_8 = 0\n # Protect against whitespace on end of line\n if (len(arg_9) > 0 and\n (arg_9[-1] == ' ' or arg_9[-1] == '\\t')):\n arg_11 = ord(arg_9[-1])\n arg_9[-1] = '='\n arg_9.append(two_hex_digits(arg_11))\n\n if arg_6: arg_9.append('\\r')\n arg_9.append('\\n')\n if arg_10 == '\\r':\n arg_7 += 2\n else:\n arg_7 += 1\n else:\n if (arg_7 + 1 < len(arg_0) and\n arg_0[arg_7+1] != '\\n' and\n (arg_8 + 1) >= arg_4):\n arg_9.append('=')\n if arg_6: arg_9.append('\\r')\n arg_9.append('\\n')\n arg_8 = 0\n\n arg_8 += 1\n if arg_3 and arg_10 == ' ':\n arg_10 = '_'\n arg_9.append(arg_10)\n arg_7 += 1\n return ''.join(arg_9)"} +{"_id": "doc_2306", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a comma-separated list of option strings & metavariables.\"\"\"\n if arg_1.takes_value():\n arg_2 = arg_1.metavar or arg_1.dest.upper()\n arg_3 = [arg_0._short_opt_fmt % (sopt, arg_2)\n for sopt in arg_1._short_opts]\n arg_4 = [arg_0._long_opt_fmt % (lopt, arg_2)\n for lopt in arg_1._long_opts]\n else:\n arg_3 = arg_1._short_opts\n arg_4 = arg_1._long_opts\n\n if arg_0.short_first:\n arg_5 = arg_3 + arg_4\n else:\n arg_5 = arg_4 + arg_3\n\n return \", \".join(arg_5)"} +{"_id": "doc_2307", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Update the option values from an arbitrary dictionary, but only\n use keys from dict that already have a corresponding attribute\n in self. Any keys in dict without a corresponding attribute\n are silently ignored.\n \"\"\"\n for arg_2 in dir(arg_0):\n if arg_2 in arg_1:\n arg_3 = arg_1[arg_2]\n if arg_3 is not None:\n setattr(arg_0, arg_2, arg_3)"} +{"_id": "doc_2308", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"Insert item x in list a, and keep it sorted assuming a is sorted.\n\n If x is already in a, insert it to the right of the rightmost x.\n\n Optional args lo (default 0) and hi (default len(a)) bound the\n slice of a to be searched.\n \"\"\"\n\n if arg_2 < 0:\n raise ValueError('lo must be non-negative')\n if arg_3 is None:\n arg_3 = len(arg_0)\n while arg_2 < arg_3:\n arg_4 = (arg_2+arg_3)//2\n if arg_1 < arg_0[arg_4]: arg_3 = arg_4\n else: arg_2 = arg_4+1\n arg_0.insert(arg_2, arg_1)"} +{"_id": "doc_2309", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Lock a mutex, call the function with supplied argument\n when it is acquired. If the mutex is already Funced, place\n function and argument in the queue.\"\"\"\n if arg_0.testandset():\n arg_1(arg_2)\n else:\n arg_0.queue.append((arg_1, arg_2))"} +{"_id": "doc_2310", "title": "", "text": "def Func(arg_0):\n \"\"\"Unlock a mutex. If the queue is not empty, call the next\n function with its argument.\"\"\"\n if arg_0.queue:\n arg_1, arg_2 = arg_0.queue.popleft()\n arg_1(arg_2)\n else:\n arg_0.locked = False"} +{"_id": "doc_2311", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a clone object.\n\n Return a Func ('clone') of the md5 object. This can be used\n to efficiently compute the digests of strings that share\n a common initial substring.\n \"\"\"\n if 0: # set this to 1 to make the flow space crash\n return Func.deepFunc(arg_0)\n arg_1 = arg_0.__class__()\n arg_1.length = arg_0.length\n arg_1.count = [] + arg_0.count[:]\n arg_1.input = [] + arg_0.input\n arg_1.A = arg_0.A\n arg_1.B = arg_0.B\n arg_1.C = arg_0.C\n arg_1.D = arg_0.D\n return arg_1"} +{"_id": "doc_2312", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0):\n \"\"\"Return the string obtained by replacing the leftmost non-overlapping\n occurrences of pattern in string by the replacement repl.\"\"\"\n return arg_0._Funcx(arg_1, arg_2, arg_3, False)"} +{"_id": "doc_2313", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"Split string by the occurrences of pattern.\"\"\"\n arg_3 = []\n arg_4 = _State(arg_1, 0, sys.maxint, arg_0.flags)\n arg_5 = 0\n arg_6 = arg_4.start\n while not arg_2 or arg_5 < arg_2:\n arg_4.reset()\n arg_4.string_position = arg_4.start\n if not arg_4.search(arg_0._code):\n break\n if arg_4.start == arg_4.string_position: # zero-width match\n if arg_6 == arg_4.end: # or end of string\n break\n arg_4.start += 1\n continue\n arg_3.append(arg_1[arg_6:arg_4.start])\n # add groups (if any)\n if arg_0.groups:\n arg_8 = SRE_Match(arg_0, arg_4)\n # TODO: Use .extend once it is implemented.\n # Funclist.extend(list(match.groups(None)))\n arg_3 += (list(arg_8.groups(None)))\n arg_5 += 1\n arg_6 = arg_4.start = arg_4.string_position\n arg_3.append(arg_1[arg_6:arg_4.end])\n return arg_3"} +{"_id": "doc_2314", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a tuple of index pairs representing matched groups.\"\"\"\n arg_2 = [(arg_1.start, arg_1.string_position)]\n for arg_3 in range(arg_0.re.groups):\n arg_4 = 2 * arg_3\n if arg_4 + 1 < len(arg_1.marks) \\\n and arg_1.marks[arg_4] is not None \\\n and arg_1.marks[arg_4 + 1] is not None:\n arg_2.append((arg_1.marks[arg_4], arg_1.marks[arg_4 + 1]))\n else:\n arg_2.append((-1, -1))\n return tuple(arg_2)"} +{"_id": "doc_2315", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Skips forward in a string as fast as possible using information from\n an optimization info block.\"\"\"\n # pattern starts with a known prefix\n # <5=length> <6=skip> <7=prefix data> \n arg_2 = arg_1[2]\n arg_3 = arg_1[5]\n arg_4 = arg_1[6] # don't really know what this is good for\n arg_5 = arg_1[7:7 + arg_3]\n arg_6 = arg_1[7 + arg_3 - 1:arg_1[1] + 1]\n arg_1 = arg_1[arg_1[1] + 1:]\n arg_7 = 0\n arg_8 = arg_0.string_position\n while arg_8 < arg_0.end:\n while True:\n if ord(arg_0.string[arg_8]) != arg_5[arg_7]:\n if arg_7 == 0:\n break\n else:\n arg_7 = arg_6[arg_7]\n else:\n arg_7 += 1\n if arg_7 == arg_3:\n # found a potential match\n arg_0.start = arg_8 + 1 - arg_3\n arg_0.string_position = arg_8 + 1 \\\n - arg_3 + arg_4\n if arg_2 & SRE_INFO_LITERAL:\n return True # matched all of pure literal pattern\n if arg_0.match(arg_1[2 * arg_4:]):\n return True\n arg_7 = arg_6[arg_7]\n break\n arg_8 += 1\n return False"} +{"_id": "doc_2316", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a new child context of this context and pushes it on the\n stack. pattern_offset is the offset off the current code position to\n start interpreting from.\"\"\"\n arg_2 = _MatchContext(arg_0.state,\n arg_0.pattern_codes[arg_0.code_position + arg_1:])\n arg_0.state.context_stack.append(arg_2)\n return arg_2"} +{"_id": "doc_2317", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks whether a character matches set of arbitrary length. Assumes\n the code pointer is at the first member of the set.\"\"\"\n arg_0.set_dispatcher.reset(arg_2)\n arg_3 = arg_1.code_position\n arg_4 = None\n while arg_4 is None:\n arg_4 = arg_0.set_dispatcher.dispatch(arg_1.peek_code(), arg_1)\n arg_1.code_position = arg_3\n return arg_4"} +{"_id": "doc_2318", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Remove the exponent by changing intpart and fraction.\"\"\"\n if arg_2 > 0: # Move the point left\n arg_3 = len(arg_1)\n arg_0, arg_1 = arg_0 + arg_1[:arg_2], arg_1[arg_2:]\n if arg_2 > arg_3:\n arg_0 = arg_0 + '0'*(arg_2-arg_3)\n elif arg_2 < 0: # Move the point right\n arg_4 = len(arg_0)\n arg_0, arg_1 = arg_0[:arg_2], arg_0[arg_2:] + arg_1\n if arg_2 < -arg_4:\n arg_1 = '0'*(-arg_2-arg_4) + arg_1\n return arg_0, arg_1"} +{"_id": "doc_2319", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the subset of the list NAMES that match PAT\"\"\"\n import os\n # import posixpath\n arg_2=[]\n # pat=os.path.normcase(pat)\n try:\n arg_3 = arg_6[arg_1]\n except KeyError:\n arg_4 = translate(arg_1)\n if len(arg_6) >= _MAXCACHE:\n # _cache.clear()\n arg_5()['_cache'] = {}\n arg_6[arg_1] = arg_3 = re.compile(arg_4)\n arg_7 = arg_3.match\n # if os.path is posixpath:\n if 1:\n # normcase on posix is NOP. Optimize it away from the loop.\n for arg_8 in arg_0:\n if arg_7(arg_8):\n arg_2.append(arg_8)\n else:\n for arg_8 in arg_0:\n if arg_7(os.path.normcase(arg_8)):\n arg_2.append(arg_8)\n return arg_2"} +{"_id": "doc_2320", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=None):\n \"\"\"Put an item into the queue.\n\n If optional args 'block' is true and 'timeout' is None (the default),\n block if necessary until a free slot is available. If 'timeout' is\n a non-negative number, it blocks at most 'timeout' seconds and raises\n the Full exception if no free slot was available within that time.\n Otherwise ('block' is false), Func an item on the queue if a free slot\n is immediately available, else raise the Full exception ('timeout'\n is ignored in that case).\n \"\"\"\n arg_0.not_full.acquire()\n try:\n if arg_0.maxsize > 0:\n if not arg_2:\n if arg_0._qsize() == arg_0.maxsize:\n raise Full\n elif arg_3 is None:\n while arg_0._qsize() == arg_0.maxsize:\n arg_0.not_full.wait()\n elif arg_3 < 0:\n raise ValueError(\"'timeout' must be a non-negative number\")\n else:\n arg_4 = _time() + arg_3\n while arg_0._qsize() == arg_0.maxsize:\n arg_5 = arg_4 - _time()\n if arg_5 <= 0.0:\n raise Full\n arg_0.not_full.wait(arg_5)\n arg_0._Func(arg_1)\n arg_0.unfinished_tasks += 1\n arg_0.not_empty.notify()\n finally:\n arg_0.not_full.release()"} +{"_id": "doc_2321", "title": "", "text": "def Func(*arg_0):\n \"\"\"Combine multiple context managers into a single Func context manager.\n\n This function has been deprecated in favour of the multiple manager form\n of the with statement.\n\n The one advantage of this function over the multiple manager form of the\n with statement is that argument unpacking allows it to be\n used with a variable number of context managers as follows:\n\n with Func(*managers):\n do_something()\n\n \"\"\"\n warn(\"With-statements now directly support multiple context managers\",\n DeprecationWarning, 3)\n arg_1 = []\n arg_2 = []\n arg_3 = (None, None, None)\n try:\n for arg_4 in arg_0:\n arg_5 = arg_4.__exit__\n arg_6 = arg_4.__enter__\n arg_2.append(arg_6())\n arg_1.append(arg_5)\n yield arg_2\n except:\n arg_3 = sys.exc_info()\n finally:\n while arg_1:\n arg_5 = arg_1.pop()\n try:\n if arg_5(*arg_3):\n arg_3 = (None, None, None)\n except:\n arg_3 = sys.exc_info()\n if arg_3 != (None, None, None):\n # Don't rely on sys.exc_info() still containing\n # the right information. Another exception may\n # have been raised and caught by an exit method\n raise arg_3[0], arg_3[1], arg_3[2]"} +{"_id": "doc_2322", "title": "", "text": "async def Func(arg_0,\n *,\n arg_1: arg_2 = None,\n arg_3: arg_4 = arg_5,\n arg_6: arg_7[arg_2] = 'application/Func') -> Any:\n \"\"\"Read and decodes JSON response.\"\"\"\n return await arg_0._aws_Func(\n arg_1=arg_1, arg_3=arg_3, arg_6=arg_6)"} +{"_id": "doc_2323", "title": "", "text": "async def Func(arg_0, arg_1: arg_2.Coroutine, arg_4):\n \"\"\"Process coroutine callback function\"\"\"\n arg_5 = None\n\n try:\n arg_5 = await arg_1\n except NothingMatchedError as e:\n arg_0.logger.error(f'')\n except Exception as e:\n arg_0.logger.error(f' 0.0:\n arg_7 = [A_conj + arg_0.damping * conj for A_conj, conj in zip(arg_7, arg_3)]\n\n # cAc := c_t^T * Ac\n arg_8 = tf.add_n(\n inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(arg_3, arg_7)]\n )\n\n # \\alpha := r_t^2 / cAc\n arg_9 = arg_5 / tf.maximum(arg_1=arg_8, y=util.epsilon)\n\n # x_{t+1} := x_t + \\alpha * c_t\n arg_10 = [t + arg_9 * conj for t, conj in zip(arg_1, arg_3)]\n\n # r_{t+1} := r_t - \\alpha * Ac\n arg_11 = [res - arg_9 * A_conj for res, A_conj in zip(arg_4, arg_7)]\n\n # r_{t+1}^2 := r_{t+1}^T * r_{t+1}\n arg_12 = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in arg_11])\n\n # \\beta = r_{t+1}^2 / r_t^2\n arg_13 = arg_12 / tf.maximum(arg_1=arg_5, y=util.epsilon)\n\n # c_{t+1} := r_{t+1} + \\beta * c_t\n arg_14 = [res + arg_13 * conj for res, conj in zip(arg_11, arg_3)]\n\n return arg_10, arg_6, arg_14, arg_11, arg_12"} +{"_id": "doc_2330", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the target optimizer arguments including the time, the list of variables to \n optimize, and various functions which the optimizer might require to perform an update \n step.\n\n Returns:\n Target optimizer arguments as dict.\n \"\"\"\n arg_1 = arg_0.target_network.get_variables() + [\n variable for name in sorted(arg_0.target_distributions)\n for variable in arg_0.target_distributions[name].get_variables()\n ]\n arg_2 = arg_0.network.get_variables() + [\n variable for name in sorted(arg_0.distributions)\n for variable in arg_0.distributions[name].get_variables()\n ]\n arg_3 = dict(\n time=arg_0.global_timestep,\n arg_1=arg_1,\n arg_2=arg_2\n )\n if arg_0.global_model is not None:\n arg_3['global_variables'] = arg_0.global_model.target_network.get_variables() + [\n variable for name in sorted(arg_0.global_model.target_distributions)\n for variable in arg_0.global_model.target_distributions[name].get_variables()\n ]\n return arg_3"} +{"_id": "doc_2331", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates an environment from a specification dict.\n \"\"\"\n arg_2 = tensorforce.util.get_object(\n obj=arg_0,\n predefined_objects=tensorforce.environments.environments,\n arg_1=arg_1\n )\n assert isinstance(arg_2, Environment)\n return arg_2"} +{"_id": "doc_2332", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Pass through rest role.\"\"\"\n return arg_0.renderer.image_link(\n arg_1.group('url'), arg_1.group('target'), arg_1.group('alt'))"} +{"_id": "doc_2333", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Rendering table element. Wrap header and body in it.\n\n :param header: header part of the table.\n :param body: body part of the table.\n \"\"\"\n Func = '\\n.. list-table::\\n'\n if arg_1 and not arg_1.isspace():\n Func = (Func + arg_0.indent + ':header-rows: 1\\n\\n' +\n arg_0._indent_block(arg_1) + '\\n')\n else:\n Func = Func + '\\n'\n Func = Func + arg_0._indent_block(arg_2) + '\\n\\n'\n return Func"} +{"_id": "doc_2334", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Worker Agent generator, receives an Agent class and creates a Worker Agent class that inherits from that Agent.\n \"\"\"\n\n # Support special case where class is given as type-string (AgentsDictionary) or class-name-string.\n if isinstance(arg_0, str):\n arg_0 = AgentsDictionary.get(arg_0)\n # Last resort: Class name given as string?\n if not arg_0 and arg_0.find('.') != -1:\n arg_1, arg_2 = arg_0.rsplit('.', 1)\n arg_3 = importlib.import_module(arg_1)\n arg_0 = getattr(arg_3, arg_2)\n\n class WorkerAgent(arg_0):\n \"\"\"\n Worker agent receiving a shared model to avoid creating multiple models.\n \"\"\"\n\n def __init__(arg_4, arg_5=None, **arg_6):\n # Set our model externally.\n arg_4.model = arg_5\n # Be robust against `network` coming in from kwargs even though this agent doesn't have one\n if not issubclass(arg_0, LearningAgent):\n arg_6.pop(\"network\")\n # Call super c'tor (which will call initialize_model and assign self.model to the return value).\n super(WorkerAgent, arg_4).__init__(**arg_6)\n\n def initialize_model(arg_4):\n # Return our model (already given and initialized).\n return arg_4.model\n\n return WorkerAgent"} +{"_id": "doc_2335", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns x, y from flat_position integer.\n\n Args:\n flat_position: flattened position integer\n\n Returns: x, y\n\n \"\"\"\n return arg_1 % arg_0.env.action_space.screen_shape[0],\\\n arg_1 % arg_0.env.action_space.screen_shape[1]"} +{"_id": "doc_2336", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Wait until there is a state.\n \"\"\"\n while arg_1 == [None] or not arg_1:\n arg_1, arg_3, arg_2 = arg_0._execute(dict(key=0))\n\n return arg_1, arg_3, arg_2"} +{"_id": "doc_2337", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates an optimizer from a specification dict.\n \"\"\"\n arg_2 = util.get_object(\n obj=arg_0,\n predefined_objects=tensorforce.core.optimizers.optimizers,\n arg_1=arg_1\n )\n assert isinstance(arg_2, Optimizer)\n return arg_2"} +{"_id": "doc_2338", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Registers the saver operations to the graph in context.\n \"\"\"\n\n arg_1 = arg_0.get_savable_variables()\n if arg_1 is None or len(arg_1) == 0:\n arg_0._saver = None\n return\n\n arg_3 = arg_0._get_base_variable_scope()\n arg_4 = {strip_name_scope(v.name, arg_3): v for v in arg_1}\n\n arg_0._saver = tf.train.Saver(\n var_list=arg_4,\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n saver_def=None,\n builder=None,\n defer_build=False,\n allow_empty=True,\n write_version=tf.train.SaverDef.V2,\n pad_step_number=False,\n save_relative_paths=True\n )"} +{"_id": "doc_2339", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Saves this component's managed variables.\n\n Args:\n sess: The session for which to Func the managed variables.\n Func_path: The path to Func data to.\n timestep: Optional, the timestep to append to the file name.\n\n Returns:\n Checkpoint path where the model was Funcd.\n \"\"\"\n\n if arg_0._Funcr is None:\n raise TensorForceError(\"register_Funcr_ops should be called before Func\")\n return arg_0._Funcr.Func(\n arg_1=arg_1,\n arg_2=arg_2,\n global_step=arg_3,\n write_meta_graph=False,\n write_state=True, # Do we need this?\n )"} +{"_id": "doc_2340", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Restores the values of the managed variables from disk location.\n\n Args:\n sess: The session for which to save the managed variables.\n save_path: The path used to save the data to.\n \"\"\"\n\n if arg_0._saver is None:\n raise TensorForceError(\"register_saver_ops should be called before Func\")\n arg_0._saver.Func(arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_2341", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process state.\n\n Args:\n tensor: tensor to Func\n\n Returns: Funced state\n\n \"\"\"\n for arg_2 in arg_0.preFuncors:\n arg_1 = arg_2.Func(arg_1=arg_1)\n return arg_1"} +{"_id": "doc_2342", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Shape of preprocessed state given original shape.\n\n Args:\n shape: original state shape\n\n Returns: processed state shape\n \"\"\"\n for arg_2 in arg_0.preprocessors:\n arg_1 = arg_2.Func(arg_1=arg_1)\n return arg_1"} +{"_id": "doc_2343", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL.\n \"\"\"\n super(MemoryModel, arg_0).Func()\n arg_0.optimizer_spec = dict(\n type='global_optimizer',\n optimizer=arg_0.optimizer_spec\n )"} +{"_id": "doc_2344", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=0.0, arg_5=0):\n \"\"\"\n Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards\n for a given sequence of single rewards.\n\n Example:\n single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0\n terminal = False, False, False, False True False\n gamma = 0.95\n final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)\n horizon=3\n output = 2.95 1.45 1.38 1.45 1.0 94.0\n\n Args:\n terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one\n True value. If its very last element is False (not terminating), the given `final_reward` value\n is assumed to follow the last value in the single rewards sequence (see below).\n reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,\n an assumed last reward of the value of `final_reward` will be used.\n discount (float): The discount factor (gamma). By default, take the Model's discount factor.\n final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence\n ends with False). This value will be ignored if horizon == 1 or discount == 0.0.\n horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks\n without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the\n exact same results as a discount factor of 0.0.\n\n Returns:\n Discounted cumulative reward tensor with the same shape as `reward`.\n \"\"\"\n\n # By default -> take Model's gamma value\n if arg_3 is None:\n arg_3 = arg_0.discount\n\n # Accumulates discounted (n-step) reward (start new if terminal)\n def cumulate(arg_6, arg_7):\n arg_8, arg_9, arg_10, arg_11 = arg_7\n return tf.where(\n # If terminal, start new cumulation.\n condition=arg_9,\n x=arg_8,\n y=tf.where(\n # If we are above the horizon length (H) -> subtract discounted value from H steps back.\n condition=arg_10,\n x=(arg_8 + arg_6 * arg_3 - arg_11),\n y=(arg_8 + arg_6 * arg_3)\n )\n )\n\n # Accumulates length of episodes (starts new if terminal)\n def len_(arg_6, arg_12):\n return tf.where(\n condition=arg_12,\n # Start counting from 1 after is-terminal signal\n x=tf.ones(shape=(), dtype=tf.int32),\n # Otherwise, increase length by 1\n y=arg_6 + 1\n )\n\n # Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right.\n arg_2 = tf.reverse(tensor=arg_2, axis=(0,))\n # e.g. -1.0 1.0 0.5 0.0 1.0 2.0\n arg_1 = tf.reverse(tensor=arg_1, axis=(0,))\n # e.g. F T F F F F\n\n # Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count).\n arg_13 = tf.scan(fn=len_, elems=arg_1, initializer=0)\n # e.g. 1 1 2 3 4 5\n arg_14 = tf.greater(arg_13, tf.fill(dims=tf.shape(arg_13), value=arg_5))\n # e.g. F F F F T T\n\n # Calculate the horizon-subtraction value for each step.\n if arg_5 > 0:\n arg_15 = tf.map_fn(lambda x: (arg_3 ** arg_5) * x, arg_2, dtype=tf.float32)\n # Shift right by size of horizon (fill rest with 0.0).\n arg_15 = tf.concat([np.zeros(shape=(arg_5,)), arg_15], axis=0)\n arg_15 = tf.slice(arg_15, begin=(0,), size=tf.shape(arg_2))\n # e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3\n # all 0.0 if infinite horizon (special case: horizon=0)\n else:\n arg_15 = tf.zeros(shape=tf.shape(arg_2))\n\n # Now do the scan, each time summing up the previous step (discounted by gamma) and\n # subtracting the respective `horizon_subtraction`.\n arg_2 = tf.scan(\n fn=cumulate,\n elems=(arg_2, arg_1, arg_14, arg_15),\n initializer=arg_4 if arg_5 != 1 else 0.0\n )\n # Re-reverse again to match input sequences.\n return tf.reverse(tensor=arg_2, axis=(0,))"} +{"_id": "doc_2345", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_8, arg_9=None):\n \"\"\"\n Creates the TensorFlow operations for calculating the loss per batch instance.\n\n Args:\n states: Dict of state tensors.\n internals: Dict of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\n Returns:\n Loss per instance tensor.\n \"\"\"\n raise NotImplementedError"} +{"_id": "doc_2346", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9=None):\n \"\"\"\n Creates the TensorFlow operations for calculating the full loss of a batch.\n\n Args:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\n Returns:\n Loss tensor.\n \"\"\"\n # Mean loss per instance\n arg_10 = arg_0.fn_loss_per_instance(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9\n )\n\n # Returns no-op.\n arg_11 = arg_0.memory.update_batch(arg_10=arg_10)\n with tf.control_dependencies(control_inputs=(arg_11,)):\n arg_12 = tf.reduce_mean(input_tensor=arg_10, axis=0)\n\n # Loss without regularization summary.\n if 'losses' in arg_0.summary_labels:\n tf.contrib.summary.scalar(arg_14='loss-without-regularization', tensor=arg_12)\n\n # Regularization losses.\n arg_13 = arg_0.fn_regularization_losses(arg_1=arg_1, arg_2=arg_2, arg_8=arg_8)\n if len(arg_13) > 0:\n arg_12 += tf.add_n(inputs=[arg_13[arg_14] for arg_14 in sorted(arg_13)])\n if 'regularization' in arg_0.summary_labels:\n for arg_14 in sorted(arg_13):\n tf.contrib.summary.scalar(arg_14=('regularization/' + arg_14), tensor=arg_13[arg_14])\n\n # Total loss summary.\n if 'losses' in arg_0.summary_labels or 'total-loss' in arg_0.summary_labels:\n tf.contrib.summary.scalar(arg_14='total-loss', tensor=arg_12)\n\n return arg_12"} +{"_id": "doc_2347", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None, arg_7=None):\n \"\"\"\n Creates the TensorFlow operations for performing an optimization update step based\n on the given input states and actions batch.\n\n Args:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n\n Returns:\n The optimization operation.\n \"\"\"\n arg_8 = arg_0.optimizer_arguments(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7\n )\n return arg_0.optimizer.minimize(**arg_8)"} +{"_id": "doc_2348", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates a distribution from a specification dict.\n \"\"\"\n arg_2 = util.get_object(\n obj=arg_0,\n predefined_objects=tensorforce.core.distributions.distributions,\n arg_1=arg_1\n )\n assert isinstance(arg_2, Distribution)\n return arg_2"} +{"_id": "doc_2349", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Utility method for unbuffered observing where each tuple is inserted into TensorFlow via\n a single session call, thus avoiding race conditions in multi-threaded mode.\n\n Observe full experience tuplefrom the environment to learn from. Optionally pre-processes rewards\n Child classes should call super to get the processed reward\n EX: terminal, reward = super()...\n\n Args:\n states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\n internals (any): Internal list.\n terminal (bool): boolean indicating if the episode terminated after the observation.\n reward (float): scalar reward that resulted from executing the action.\n \"\"\"\n # TODO probably unnecessary here.\n arg_0.current_terminal = arg_5\n arg_0.current_reward = arg_4\n # print('action = {}'.format(actions))\n if arg_0.unique_state:\n arg_1 = dict(state=arg_1)\n if arg_0.unique_action:\n arg_2 = dict(action=arg_2)\n\n arg_0.episode = arg_0.model.Func(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_5=arg_0.current_terminal,\n arg_4=arg_0.current_reward\n )"} +{"_id": "doc_2350", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a named tensor if available.\n\n Returns:\n valid: True if named tensor found, False otherwise\n tensor: If valid, will be a tensor, otherwise None\n \"\"\"\n if arg_1 in arg_0.named_tensors:\n return True, arg_0.named_tensors[arg_1]\n else:\n return False, None"} +{"_id": "doc_2351", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Stores a transition in replay memory.\n\n If the memory is full, the oldest entry is replaced.\n \"\"\"\n if not arg_0._isfull():\n arg_0._memory.append(None)\n arg_3 = arg_0._next_position_then_increment()\n arg_4 = 0 if arg_0._memory[arg_3] is None \\\n else (arg_0._memory[arg_3].priority or 0)\n arg_5 = _SumRow(arg_1, arg_2)\n arg_0._memory[arg_3] = arg_5\n arg_0._update_internal_nodes(\n arg_3, (arg_5.priority or 0) - arg_4)"} +{"_id": "doc_2352", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Change the priority of a leaf node\n \"\"\"\n arg_3 = arg_1 + (arg_0._capacity - 1)\n return arg_0._Func(arg_3, arg_2)"} +{"_id": "doc_2353", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Change the priority of a leaf node.\n \"\"\"\n arg_3, arg_4 = arg_0._memory[arg_1]\n arg_4 = arg_4 or 0\n arg_0._memory[arg_1] = _SumRow(arg_3, arg_2)\n arg_0._update_internal_nodes(arg_1, arg_2 - arg_4)"} +{"_id": "doc_2354", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Similar to position++.\n \"\"\"\n arg_1 = arg_0._capacity - 1\n arg_2 = arg_1 + arg_0._position\n arg_0._position = (arg_0._position + 1) % arg_0._capacity\n return arg_2"} +{"_id": "doc_2355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sample random element with priority greater than p.\n \"\"\"\n arg_2 = 0\n while True:\n arg_3 = 2 * arg_2 + 1\n if arg_3 >= len(arg_0._memory):\n # parent points to a leaf node already.\n return arg_2\n\n arg_4 = arg_0._memory[arg_3] if arg_3 < arg_0._capacity - 1 \\\n else (arg_0._memory[arg_3].priority or 0)\n if arg_1 <= arg_4:\n arg_2 = arg_3\n else:\n if arg_3 + 1 >= len(arg_0._memory):\n raise RuntimeError('Right child is expected to exist.')\n arg_1 -= arg_4\n arg_2 = arg_3 + 1"} +{"_id": "doc_2356", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sample minibatch of size batch_size.\n \"\"\"\n arg_2 = len(arg_0)\n if arg_2 == 0:\n return []\n\n arg_3 = arg_0._memory[0] / arg_1\n arg_4 = []\n # if all priorities sum to ~0 choose randomly otherwise random sample\n if abs(arg_0._memory[0]) < util.epsilon:\n arg_4 = np.random.randint(arg_0._capacity - 1, arg_0._capacity - 1 + len(arg_0), size=arg_1).tolist()\n else:\n for arg_5 in xrange(arg_1):\n arg_6 = max(arg_5 * arg_3, 0)\n arg_7 = min((arg_5 + 1) * arg_3, arg_0._memory[0])\n arg_8 = random.uniform(arg_6, arg_7)\n arg_4.append(arg_0._sample_with_priority(arg_8))\n return [(arg_5, arg_0._memory[arg_5]) for arg_5 in arg_4]"} +{"_id": "doc_2357", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Computes priorities according to loss.\n\n Args:\n loss_per_instance:\n\n \"\"\"\n if arg_0.batch_indices is None:\n raise TensorForceError(\"Need to call get_batch before each Func call.\")\n # if len(loss_per_instance) != len(self.batch_indices):\n # raise TensorForceError(\"For all instances a loss value has to be provided.\")\n\n for arg_2, arg_3 in zip(arg_0.batch_indices, arg_1):\n # Sampling priority is proportional to the largest absolute temporal difference error.\n arg_4 = (np.abs(arg_3) + arg_0.prioritization_constant) ** arg_0.prioritization_weight\n arg_0.observations._move(arg_2, arg_4)\n arg_0.none_priority_index += 1"} +{"_id": "doc_2358", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Ends our server tcp connection.\n \"\"\"\n # If we are not connected, return error.\n if not arg_0.socket:\n logging.warning(\"No active socket to close!\")\n return\n # Close our socket.\n arg_0.socket.close()\n arg_0.socket = None"} +{"_id": "doc_2359", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determines whether action is available.\n That is, executing it would change the state.\n \"\"\"\n\n arg_2 = np.rot90(arg_0._state, arg_1)\n return arg_0._Func_left(arg_2)"} +{"_id": "doc_2360", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determines whether action 'Left' is available.\"\"\"\n\n # True if any field is 0 (empty) on the left of a tile or two tiles can\n # be merged.\n for arg_2 in range(4):\n arg_3 = False\n for arg_4 in range(4):\n arg_3 |= arg_1[arg_2, arg_4] == 0\n if arg_1[arg_2, arg_4] != 0 and arg_3:\n return True\n if (arg_1[arg_2, arg_4] != 0 and arg_4 > 0 and\n arg_1[arg_2, arg_4] == arg_1[arg_2, arg_4 - 1]):\n return True\n\n return False"} +{"_id": "doc_2361", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute action, add a new tile, update the score & return the reward.\"\"\"\n\n arg_2 = np.rot90(arg_0._state, arg_1)\n arg_3 = arg_0._Func_left(arg_2)\n arg_0._state = np.rot90(arg_2, -arg_1)\n arg_0._score += arg_3\n\n arg_0.add_random_tile()\n\n return arg_3"} +{"_id": "doc_2362", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Executes action 'Left'.\"\"\"\n\n arg_2 = 0\n\n for arg_3 in range(4):\n # Always the rightmost tile in the current row that was already moved\n arg_4 = -1\n arg_5 = np.zeros((4,), dtype=np.bool)\n\n for arg_6 in range(4):\n if arg_1[arg_3, arg_6] == 0:\n continue\n\n if (arg_4 != -1 and\n not arg_5[arg_4] and\n arg_1[arg_3, arg_4] == arg_1[arg_3, arg_6]):\n # Merge tile with merge_candidate\n arg_1[arg_3, arg_6] = 0\n arg_5[arg_4] = True\n arg_1[arg_3, arg_4] += 1\n arg_2 += 2 ** arg_1[arg_3, arg_4]\n\n else:\n # Move tile to the left\n arg_4 += 1\n if arg_6 != arg_4:\n arg_1[arg_3, arg_4] = arg_1[arg_3, arg_6]\n arg_1[arg_3, arg_6] = 0\n\n return arg_2"} +{"_id": "doc_2363", "title": "", "text": "def Func(arg_0):\n \"\"\"Adds a random tile to the grid. Assumes that it has empty fields.\"\"\"\n\n arg_1, arg_2 = np.where(arg_0._state == 0)\n assert len(arg_1) != 0\n arg_3 = np.random.choice(len(arg_1))\n arg_4 = np.random.choice([1, 2], p=[0.9, 0.1])\n\n arg_0._state[arg_1[arg_3], arg_2[arg_3]] = arg_4"} +{"_id": "doc_2364", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates the tf.train.Saver object and stores it in self.saver.\n \"\"\"\n if arg_0.execution_type == \"single\":\n arg_1 = arg_0.get_variables(include_submodules=True, include_nontrainable=True)\n else:\n arg_1 = arg_0.global_model.get_variables(include_submodules=True, include_nontrainable=True)\n\n # global_variables += [self.global_episode, self.global_timestep]\n\n for arg_2 in arg_0.get_savable_components():\n arg_2.register_saver_ops()\n\n # TensorFlow saver object\n # TODO potentially make other options configurable via saver spec.\n arg_0.saver = tf.train.Saver(\n var_list=arg_1, # should be given?\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n saver_def=None,\n builder=None,\n defer_build=False,\n allow_empty=True,\n write_version=tf.train.SaverDef.V2,\n pad_step_number=False,\n save_relative_paths=True\n # filename=None\n )"} +{"_id": "doc_2365", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates and returns a list of hooks to use in a session. Populates self.saver_directory.\n\n Returns: List of hooks to use in a session.\n \"\"\"\n arg_1 = list()\n\n # Checkpoint saver hook\n if arg_0.saver_spec is not None and (arg_0.execution_type == 'single' or arg_0.distributed_spec['task_index'] == 0):\n arg_0.saver_directory = arg_0.saver_spec['directory']\n arg_1.append(tf.train.CheckpointSaverHook(\n checkpoint_dir=arg_0.saver_directory,\n save_secs=arg_0.saver_spec.get('seconds', None if 'steps' in arg_0.saver_spec else 600),\n save_steps=arg_0.saver_spec.get('steps'), # Either one or the other has to be set.\n saver=None, # None since given via 'scaffold' argument.\n checkpoint_basename=arg_0.saver_spec.get('basename', 'model.ckpt'),\n scaffold=arg_0.scaffold,\n listeners=None\n ))\n else:\n arg_0.saver_directory = None\n\n # Stop at step hook\n # hooks.append(tf.train.StopAtStepHook(\n # num_steps=???, # This makes more sense, if load and continue training.\n # last_step=None # Either one or the other has to be set.\n # ))\n\n # # Step counter hook\n # hooks.append(tf.train.StepCounterHook(\n # every_n_steps=counter_config.get('steps', 100), # Either one or the other has to be set.\n # every_n_secs=counter_config.get('secs'), # Either one or the other has to be set.\n # output_dir=None, # None since given via 'summary_writer' argument.\n # summary_writer=summary_writer\n # ))\n\n # Other available hooks:\n # tf.train.FinalOpsHook(final_ops, final_ops_feed_dict=None)\n # tf.train.GlobalStepWaiterHook(wait_until_step)\n # tf.train.LoggingTensorHook(tensors, every_n_iter=None, every_n_secs=None)\n # tf.train.NanTensorHook(loss_tensor, fail_on_nan_loss=True)\n # tf.train.ProfilerHook(save_steps=None, save_secs=None, output_dir='', show_dataflow=True, show_memory=False)\n\n return arg_1"} +{"_id": "doc_2366", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Returns the tf op to fetch when unbuffered observations are passed in.\n\n Args:\n states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\n internals (any): Internal list.\n terminal (bool): boolean indicating if the episode terminated after the observation.\n reward (float): scalar reward that resulted from executing the action.\n\n Returns: Tf op to fetch when `observe()` is called.\n \"\"\"\n # Increment episode\n arg_7 = tf.count_nonzero(input_tensor=arg_4, dtype=util.tf_dtype('int'))\n arg_8 = tf.assign_add(ref=arg_0.episode, value=tf.to_int64(x=arg_7))\n arg_9 = tf.assign_add(ref=arg_0.global_episode, value=tf.to_int64(x=arg_7))\n\n with tf.control_dependencies(control_inputs=(arg_8, arg_9)):\n # Stop gradients\n # Not using buffers here.\n arg_1 = util.map_tensors(fn=tf.stop_gradient, tensors=arg_1)\n arg_3 = util.map_tensors(fn=tf.stop_gradient, tensors=arg_3)\n arg_2 = util.map_tensors(fn=tf.stop_gradient, tensors=arg_2)\n arg_4 = tf.stop_gradient(input=arg_4)\n arg_5 = tf.stop_gradient(input=arg_5)\n\n # Observation\n arg_10 = arg_0.fn_observe_timestep(\n arg_1=arg_1,\n arg_3=arg_3,\n arg_2=arg_2,\n arg_4=arg_4,\n arg_5=arg_5\n )\n\n with tf.control_dependencies(control_inputs=(arg_10,)):\n # Trivial operation to enforce control dependency.\n arg_0.unbuffered_episode_output = arg_0.global_episode + 0"} +{"_id": "doc_2367", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the list of all of the components this model consists of that can be individually saved and restored.\n For instance the network or distribution.\n\n Returns:\n List of util.SavableComponent\n \"\"\"\n arg_1 = arg_0.get_components()\n arg_1 = [arg_1[name] for name in sorted(arg_1)]\n return set(filter(lambda x: isinstance(x, util.SavableComponent), arg_1))"} +{"_id": "doc_2368", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Saves a component of this model to the designated location.\n\n Args:\n component_name: The component to save.\n save_path: The location to save to.\n Returns:\n Checkpoint path where the component was saved.\n \"\"\"\n arg_3 = arg_0.get_component(arg_1=arg_1)\n arg_0._validate_savable(arg_3=arg_3, arg_1=arg_1)\n return arg_3.save(sess=arg_0.session, arg_2=arg_2)"} +{"_id": "doc_2369", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Restores a component's parameters from a save location.\n\n Args:\n component_name: The component to restore.\n save_path: The save location.\n \"\"\"\n arg_3 = arg_0.get_component(arg_1=arg_1)\n arg_0._validate_savable(arg_3=arg_3, arg_1=arg_1)\n arg_3.restore(sess=arg_0.session, arg_2=arg_2)"} +{"_id": "doc_2370", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the state space. Might include subdicts if multiple Func are\n available simultaneously.\n\n Returns: dict of state properties (shape and type).\n\n \"\"\"\n arg_1 = arg_0.env.getScreenRGB()\n return dict(shape=arg_1.shape, type='int')"} +{"_id": "doc_2371", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sanity checks an actions dict, used to define the action space for an MDP.\n Throws an error or warns if mismatches are found.\n\n Args:\n actions_spec (Union[None,dict]): The spec-dict to check (or None).\n\n Returns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.\n \"\"\"\n # Leave incoming spec-dict intact.\n arg_1 = copy.deepcopy(arg_0)\n\n # Unique action shortform.\n arg_2 = ('type' in arg_1)\n if arg_2:\n arg_1 = dict(arg_4=arg_1)\n\n # Normalize actions.\n for arg_3, arg_4 in arg_1.items():\n # Set default type to int\n if 'type' not in arg_4:\n arg_4['type'] = 'int'\n\n # Check required values\n if arg_4['type'] == 'int':\n if 'num_actions' not in arg_4:\n raise TensorForceError(\"Action requires value 'num_actions' set!\")\n elif arg_4['type'] == 'float':\n if ('min_value' in arg_4) != ('max_value' in arg_4):\n raise TensorForceError(\"Action requires both values 'min_value' and 'max_value' set!\")\n\n # Set default shape to empty tuple (single-int, discrete action space)\n if 'shape' not in arg_4:\n arg_4['shape'] = ()\n\n # Convert int to unary tuple\n if isinstance(arg_4['shape'], int):\n arg_4['shape'] = (arg_4['shape'],)\n\n return arg_1, arg_2"} +{"_id": "doc_2372", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Handles the behaviour of visible bolts flying toward Marauders.\"\"\"\n # Disappear if we've hit a Marauder or a bunker.\n if (arg_0.character in arg_4['bunker_hitters'] or\n arg_0.character in arg_4['marauder_hitters']):\n return arg_0._teleport((-1, -1))\n # Otherwise, northward!\n arg_0._north(arg_1, arg_4)"} +{"_id": "doc_2373", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Handles the behaviour of visible bolts flying toward the player.\"\"\"\n # Disappear if we've hit a bunker.\n if arg_0.character in arg_4['bunker_hitters']:\n return arg_0._teleport((-1, -1))\n # End the game if we've hit the player.\n if arg_0.position == arg_3['P'].position: arg_4.terminate_episode()\n arg_0._south(arg_1, arg_4)"} +{"_id": "doc_2374", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Launches a new bolt from a random Marauder.\"\"\"\n # We don't fire if another Marauder fired a bolt just now.\n if arg_2.get('last_marauder_shot') == arg_2.frame: return\n arg_2['last_marauder_shot'] = arg_2.frame\n # Which Marauder should fire the laser bolt?\n arg_3 = np.random.choice(np.nonzero(arg_1['X'].sum(axis=0))[0])\n arg_4 = np.nonzero(arg_1['X'][:, arg_3])[0][-1] + 1\n # Move ourselves just below that Marauder.\n arg_0._teleport((arg_4, arg_3))"} +{"_id": "doc_2375", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates and stores Network and Distribution objects.\n Generates and stores all template functions.\n \"\"\"\n # Create network before super-call, since non-empty internals_spec attribute (for RNN) is required subsequently.\n arg_0.network = Network.from_spec(\n spec=arg_0.network_spec,\n kwargs=dict(summary_labels=arg_0.summary_labels)\n )\n\n # Now that we have the network component: We can create the internals placeholders.\n assert len(arg_0.internals_spec) == 0\n arg_0.internals_spec = arg_0.network.internals_spec()\n for arg_4 in sorted(arg_0.internals_spec):\n arg_5 = arg_0.internals_spec[arg_4]\n arg_0.internals_input[arg_4] = tf.placeholder(\n dtype=util.tf_dtype(arg_5['type']),\n shape=(None,) + tuple(arg_5['shape']),\n arg_4=('internal-' + arg_4)\n )\n if arg_5['initialization'] == 'zeros':\n arg_0.internals_init[arg_4] = np.zeros(shape=arg_5['shape'])\n else:\n raise TensorForceError(\"Invalid internal initialization value.\")\n\n # And only then call super.\n arg_1 = super(DistributionModel, arg_0).Func(arg_1)\n\n # Distributions\n arg_0.distributions = arg_0.create_distributions()\n\n # KL divergence function\n arg_0.fn_kl_divergence = tf.make_template(\n name_='kl-divergence',\n func_=arg_0.tf_kl_divergence,\n custom_getter_=arg_1\n )\n\n return arg_1"} +{"_id": "doc_2376", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates and returns the Distribution objects based on self.distributions_spec.\n\n Returns: Dict of distributions according to self.distributions_spec.\n \"\"\"\n arg_1 = dict()\n for arg_2 in sorted(arg_0.actions_spec):\n arg_3 = arg_0.actions_spec[arg_2]\n\n if arg_0.distributions_spec is not None and arg_2 in arg_0.distributions_spec:\n arg_4 = dict(arg_3)\n arg_4['scope'] = arg_2\n arg_4['summary_labels'] = arg_0.summary_labels\n arg_1[arg_2] = Distribution.from_spec(\n spec=arg_0.distributions_spec[arg_2],\n arg_4=arg_4\n )\n\n elif arg_3['type'] == 'bool':\n arg_1[arg_2] = Bernoulli(\n shape=arg_3['shape'],\n scope=arg_2,\n summary_labels=arg_0.summary_labels\n )\n\n elif arg_3['type'] == 'int':\n arg_1[arg_2] = Categorical(\n shape=arg_3['shape'],\n num_actions=arg_3['num_actions'],\n scope=arg_2,\n summary_labels=arg_0.summary_labels\n )\n\n elif arg_3['type'] == 'float':\n if 'min_value' in arg_3:\n arg_1[arg_2] = Beta(\n shape=arg_3['shape'],\n min_value=arg_3['min_value'],\n max_value=arg_3['max_value'],\n scope=arg_2,\n summary_labels=arg_0.summary_labels\n )\n\n else:\n arg_1[arg_2] = Gaussian(\n shape=arg_3['shape'],\n scope=arg_2,\n summary_labels=arg_0.summary_labels\n )\n\n return arg_1"} +{"_id": "doc_2377", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates a memory from a specification dict.\n \"\"\"\n arg_2 = util.get_object(\n obj=arg_0,\n predefined_objects=tensorforce.core.memories.memories,\n arg_1=arg_1\n )\n assert isinstance(arg_2, Memory)\n return arg_2"} +{"_id": "doc_2378", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Initialization step preparing the arguments for the first iteration of the loop body.\n\n Args:\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\n Returns:\n Initial arguments for tf_step.\n \"\"\"\n arg_0.base_value = arg_2\n\n if arg_4 is None: # TODO: Is this a good alternative?\n arg_4 = tf.abs(x=arg_2)\n\n arg_5 = super(LineSearch, arg_0).Func(arg_1)\n\n arg_6 = tf.divide(\n x=(arg_3 - arg_0.base_value),\n y=tf.maximum(x=arg_4, y=util.epsilon)\n )\n\n arg_7 = arg_6 - 1.0\n\n if arg_0.mode == 'linear':\n arg_8 = [-t * arg_0.parameter for t in arg_1]\n arg_0.estimated_incr = -arg_4 * arg_0.parameter\n\n elif arg_0.mode == 'exponential':\n arg_8 = [-t * arg_0.parameter for t in arg_1]\n\n return arg_5 + (arg_8, arg_6, arg_7, arg_4)"} +{"_id": "doc_2379", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Iteration loop body of the line search algorithm.\n\n Args:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n Updated arguments for next iteration.\n \"\"\"\n arg_1, arg_7, arg_3, arg_4, arg_5, arg_6 = super(LineSearch, arg_0).Func(\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6\n )\n\n arg_8 = [t + delta for t, delta in zip(arg_1, arg_3)]\n\n if arg_0.mode == 'linear':\n arg_9 = arg_3\n arg_10 = arg_6 + arg_0.estimated_incr\n\n elif arg_0.mode == 'exponential':\n arg_9 = [delta * arg_0.parameter for delta in arg_3]\n arg_10 = arg_6 * arg_0.parameter\n\n arg_11 = arg_0.fn_x(arg_9)\n\n arg_12 = tf.divide(\n arg_1=(arg_11 - arg_0.base_value),\n y=tf.maximum(arg_1=arg_10, y=util.epsilon)\n )\n\n return arg_8, arg_7, arg_9, arg_12, arg_4, arg_10"} +{"_id": "doc_2380", "title": "", "text": "def Func(arg_0, arg_1=True, **arg_2):\n \"\"\"Render Func formatted text to html.\n\n :param text: Func formatted text content.\n :param escape: if set to False, all html tags will not be escaped.\n :param use_xhtml: output with xhtml tags.\n :param hard_wrap: if set to True, it will use the GFM line breaks feature.\n :param parse_block_html: parse text only in block level html.\n :param parse_inline_html: parse text only in inline level html.\n \"\"\"\n return Markdown(arg_1=arg_1, **arg_2)(arg_0)"} +{"_id": "doc_2381", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse setext heading.\"\"\"\n arg_0.tokens.append({\n 'type': 'heading',\n 'level': 1 if arg_1.group(2) == '=' else 2,\n 'text': arg_1.group(1),\n })"} +{"_id": "doc_2382", "title": "", "text": "def Func(arg_0):\n \"\"\"Grammar for hard wrap linebreak. You don't need to add two\n spaces at the end of a line.\n \"\"\"\n arg_0.linebreak = re.compile(r'^ *\\n(?!\\s*$)')\n arg_0.text = re.compile(\n r'^[\\s\\S]+?(?=[\\\\ code``.\n\n :param code: text content of the code block.\n :param lang: language of the given code.\n \"\"\"\n arg_1 = arg_1.rstrip('\\n')\n if not arg_2:\n arg_1 = escape(arg_1, smart_amp=False)\n return '
%s\\n
\\n' % arg_1\n arg_1 = escape(arg_1, quote=True, smart_amp=False)\n return '
%s\\n
\\n' % (arg_2, arg_1)"} +{"_id": "doc_2384", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Rendering block level pure html content.\n\n :param html: text content of the html snippet.\n \"\"\"\n if arg_0.options.get('skip_style') and \\\n arg_1.lower().startswith(''\n '%d'\n ) % (escape(arg_1), escape(arg_1), arg_2)\n return arg_3"} +{"_id": "doc_2386", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Rendering a footnote item.\n\n :param key: identity key for the footnote.\n :param text: text content of the footnote.\n \"\"\"\n arg_3 = (\n ''\n ) % escape(arg_1)\n arg_2 = arg_2.rstrip()\n if arg_2.endswith('

'):\n arg_2 = re.sub(r'<\\/p>$', r'%s

' % arg_3, arg_2)\n else:\n arg_2 = '%s

%s

' % (arg_2, arg_3)\n arg_4 = '
  • %s
  • \\n' % (escape(arg_1), arg_2)\n return arg_4"} +{"_id": "doc_2387", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert MetaParams into TF Summary Format and create summary_op.\n\n Returns:\n Merged TF Op for TEXT summary elements, should only be executed once to reduce data duplication.\n\n \"\"\"\n arg_1 = []\n\n arg_0.ignore_unknown_dtypes = True\n for arg_3 in sorted(arg_0.meta_params):\n arg_4 = arg_0.convert_data_to_string(arg_0.meta_params[arg_3])\n\n if len(arg_4) == 0:\n continue\n if isinstance(arg_4, str):\n arg_1.append(tf.contrib.summary.generic(name=arg_3, tensor=tf.convert_to_tensor(str(arg_4))))\n else:\n arg_1.append(tf.contrib.summary.generic(name=arg_3, tensor=tf.as_string(tf.convert_to_tensor(arg_4))))\n\n return arg_1"} +{"_id": "doc_2388", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None):\n \"\"\"\n Creates the TensorFlow operations for calculating the baseline loss of a batch.\n\n Args:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n reward: Reward tensor.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\n Returns:\n Loss tensor.\n \"\"\"\n if arg_0.baseline_mode == 'states':\n arg_6 = arg_0.baseline.loss(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )\n\n elif arg_0.baseline_mode == 'network':\n arg_6 = arg_0.baseline.loss(\n arg_1=arg_0.network.apply(x=arg_1, arg_2=arg_2, arg_4=arg_4),\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )\n\n arg_7 = arg_0.baseline.regularization_loss()\n if arg_7 is not None:\n arg_6 += arg_7\n\n return arg_6"} +{"_id": "doc_2389", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Creates the TensorFlow operations for performing an optimization step on the given variables, including\n actually changing the values of the variables.\n\n Args:\n time: Time tensor. Not used for this optimizer.\n variables: List of variables to optimize.\n **kwargs: \n fn_loss : loss function tensor to differentiate.\n\n Returns:\n List of delta tensors corresponding to the updates for each optimized variable.\n \"\"\"\n arg_4 = arg_3[\"fn_loss\"]\n if arg_2 is None:\n arg_2 = tf.trainable_variables\n return tf.gradients(arg_4, arg_2)"} +{"_id": "doc_2390", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Constructs the extra Replay memory.\n \"\"\"\n arg_1 = super(QDemoModel, arg_0).Func(arg_1)\n\n arg_0.demo_memory = Replay(\n states=arg_0.states_spec,\n internals=arg_0.internals_spec,\n actions=arg_0.actions_spec,\n include_next_states=True,\n capacity=arg_0.demo_memory_capacity,\n scope='demo-replay',\n summary_labels=arg_0.summary_labels\n )\n\n # Import demonstration optimization.\n arg_0.fn_import_demo_experience = tf.make_template(\n name_='import-demo-experience',\n func_=arg_0.tf_import_demo_experience,\n custom_getter_=arg_1\n )\n\n # Demonstration loss.\n arg_0.fn_demo_loss = tf.make_template(\n name_='demo-loss',\n func_=arg_0.tf_demo_loss,\n custom_getter_=arg_1\n )\n\n # Combined loss.\n arg_0.fn_combined_loss = tf.make_template(\n name_='combined-loss',\n func_=arg_0.tf_combined_loss,\n custom_getter_=arg_1\n )\n\n # Demonstration optimization.\n arg_0.fn_demo_optimization = tf.make_template(\n name_='demo-optimization',\n func_=arg_0.tf_demo_optimization,\n custom_getter_=arg_1\n )\n\n return arg_1"} +{"_id": "doc_2391", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7=None):\n \"\"\"\n Extends the q-model loss via the dqfd large-margin loss.\n \"\"\"\n arg_8 = arg_0.network.apply(x=arg_1, arg_5=arg_5, arg_6=arg_6)\n arg_9 = list()\n\n for arg_10 in sorted(arg_2):\n arg_11 = arg_2[arg_10]\n arg_12 = arg_0.distributions[arg_10].parameterize(x=arg_8)\n arg_13 = arg_0.distributions[arg_10].state_action_value(arg_12=arg_12, arg_11=arg_11)\n\n # Create the supervised margin loss\n # Zero for the action taken, one for all other actions, now multiply by expert margin\n if arg_0.actions_spec[arg_10]['type'] == 'bool':\n arg_14 = 2\n arg_11 = tf.cast(x=arg_11, dtype=util.tf_dtype('int'))\n else:\n arg_14 = arg_0.actions_spec[arg_10]['num_actions']\n\n arg_15 = tf.one_hot(indices=arg_11, depth=arg_14)\n arg_16 = tf.ones_like(tensor=arg_15, dtype=tf.float32)\n arg_17 = arg_16 - arg_15\n\n # max_a([Q(s,a) + l(s,a_E,a)], l(s,a_E, a) is 0 for expert action and margin value for others\n arg_18 = arg_0.distributions[arg_10].state_action_value(arg_12=arg_12)\n arg_18 = arg_18 + arg_17 * arg_0.expert_margin\n arg_19 = tf.reduce_max(input_tensor=arg_18, axis=-1)\n\n # J_E(Q) = max_a([Q(s,a) + l(s,a_E,a)] - Q(s,a_E)\n arg_20 = arg_19 - arg_13\n\n arg_21 = util.prod(arg_0.actions_spec[arg_10]['shape'])\n arg_20 = tf.reshape(tensor=arg_20, shape=(-1, arg_21))\n arg_9.append(arg_20)\n\n arg_22 = tf.reduce_mean(input_tensor=tf.concat(values=arg_9, axis=1), axis=1)\n arg_22 = tf.square(x=arg_22)\n\n return tf.reduce_mean(input_tensor=arg_22, axis=0)"} +{"_id": "doc_2392", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9=None):\n \"\"\"\n Combines Q-loss and demo loss.\n \"\"\"\n arg_10 = arg_0.fn_loss(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9\n )\n\n arg_11 = arg_0.fn_demo_loss(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_8=arg_8,\n arg_9=arg_9\n )\n\n return arg_10 + arg_0.supervised_weight * arg_11"} +{"_id": "doc_2393", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Stores demonstrations in the demo memory.\n \"\"\"\n arg_6 = arg_0.Func_output\n\n arg_7 = arg_0.get_feed_dict(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )\n\n arg_0.monitored_session.run(arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_2394", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Performs a demonstration update by calling the demo optimization operation.\n Note that the batch data does not have to be fetched from the demo memory as this is now part of\n the TensorFlow operation of the demo update.\n \"\"\"\n arg_1 = arg_0.demo_optimization_output\n\n arg_0.monitored_session.run(arg_1=arg_1)"} +{"_id": "doc_2395", "title": "", "text": "def Func(arg_0):\n '''Ensures tasks have an action key and strings are converted to python objects'''\n\n arg_1 = dict()\n arg_2 = ModuleArgsParser(arg_0)\n try:\n arg_3, arg_4, arg_1['delegate_to'] = arg_2.parse()\n except AnsibleParserError as e:\n try:\n arg_5 = \"%s:%s\" % (arg_0[FILENAME_KEY], arg_0[LINE_NUMBER_KEY])\n del arg_0[FILENAME_KEY]\n del arg_0[LINE_NUMBER_KEY]\n except KeyError:\n arg_5 = \"Unknown\"\n try:\n import pprint\n arg_6 = pprint.PrettyPrinter(indent=2)\n arg_7 = arg_6.pformat(arg_0)\n except ImportError:\n arg_7 = arg_0\n raise SystemExit(\"Couldn't parse task at %s (%s)\\n%s\" % (arg_5, e.message, arg_7))\n\n # denormalize shell -> command conversion\n if '_uses_shell' in arg_4:\n arg_3 = 'shell'\n del(arg_4['_uses_shell'])\n\n for (arg_8, arg_9) in list(arg_0.items()):\n if arg_8 in ('action', 'local_action', 'args', 'delegate_to') or arg_8 == arg_3:\n # we don't want to re-assign these values, which were\n # determined by the ModuleArgsParser() above\n continue\n else:\n arg_1[arg_8] = arg_9\n\n arg_1['action'] = dict(__ansible_module__=arg_3)\n\n if '_raw_params' in arg_4:\n arg_1['action']['__ansible_arguments__'] = arg_4['_raw_params'].split(' ')\n del(arg_4['_raw_params'])\n else:\n arg_1['action']['__ansible_arguments__'] = list()\n\n if 'argv' in arg_4 and not arg_1['action']['__ansible_arguments__']:\n arg_1['action']['__ansible_arguments__'] = arg_4['argv']\n del(arg_4['argv'])\n\n arg_1['action'].update(arg_4)\n return arg_1"} +{"_id": "doc_2396", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses yaml as ansible.utils.parse_yaml but with linenumbers.\n\n The line numbers are stored in each node's LINE_NUMBER_KEY key.\n \"\"\"\n\n def arg_13(arg_2, arg_3):\n # the line number where the previous token has ended (plus empty lines)\n arg_4 = arg_11.line\n arg_5 = Composer.compose_node(arg_11, arg_2, arg_3)\n arg_5.__line__ = arg_4 + 1\n return arg_5\n\n def arg_14(arg_5, arg_7=False):\n if ANSIBLE_VERSION < 2:\n arg_8 = Constructor.construct_mapping(arg_11, arg_5, arg_7=arg_7)\n else:\n arg_8 = AnsibleConstructor.construct_mapping(arg_11, arg_5, arg_7=arg_7)\n if hasattr(arg_5, '__line__'):\n arg_8[arg_9] = arg_5.__line__\n else:\n arg_8[arg_9] = arg_8._line_number\n arg_8[arg_10] = arg_1\n return arg_8\n\n try:\n if ANSIBLE_VERSION < 2:\n arg_11 = yaml.Loader(arg_0)\n else:\n import inspect\n arg_12 = {}\n if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:\n arg_12['vault_password'] = DEFAULT_VAULT_PASSWORD\n arg_11 = AnsibleLoader(arg_0, **arg_12)\n arg_11.compose_node = arg_13\n arg_11.construct_mapping = arg_14\n arg_0 = arg_11.get_single_data()\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise SystemExit(\"Failed to parse YAML in %s: %s\" % (arg_1, str(e)))\n return arg_0"} +{"_id": "doc_2397", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add additional requirements from setup.cfg to file metadata_path\"\"\"\n arg_2 = list(arg_0.setupcfg_requirements())\n if not arg_2: return\n arg_3 = read_pkg_info(arg_1)\n if 'Provides-Extra' in arg_3 or 'Requires-Dist' in arg_3:\n warnings.warn('setup.cfg requirements overwrite values from setup.py')\n del arg_3['Provides-Extra']\n del arg_3['Requires-Dist']\n for arg_4, arg_5 in arg_2:\n arg_3[arg_4] = arg_5\n write_pkg_info(arg_1, arg_3)"} +{"_id": "doc_2398", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Convert an .egg-info directory into a .dist-info directory\"\"\"\n def adios(arg_3):\n \"\"\"Appropriately delete directory, file or link.\"\"\"\n if os.path.exists(arg_3) and not os.path.islink(arg_3) and os.path.isdir(arg_3):\n shutil.rmtree(arg_3)\n elif os.path.exists(arg_3):\n os.unlink(arg_3)\n\n adios(arg_2)\n\n if not os.path.exists(arg_1):\n # There is no egg-info. This is probably because the egg-info\n # file/directory is not named matching the distribution name used\n # to name the archive file. Check for this case and report\n # accordingly.\n import glob\n arg_4 = os.path.join(os.path.dirname(arg_1), '*.egg-info')\n arg_5 = glob.glob(arg_4)\n arg_6 = \"Egg metadata expected at %s but not found\" % (arg_1,)\n if arg_5:\n arg_7 = os.path.basename(arg_5[0])\n arg_6 += \" (%s found - possible misnamed archive file?)\" % (arg_7,)\n\n raise ValueError(arg_6)\n\n if os.path.isfile(arg_1):\n # .egg-info is a single file\n arg_8 = arg_1\n arg_9 = arg_0._pkginfo_to_metadata(arg_1, arg_1)\n os.mkdir(arg_2)\n else:\n # .egg-info is a directory\n arg_8 = os.path.join(arg_1, 'PKG-INFO')\n arg_9 = arg_0._pkginfo_to_metadata(arg_1, arg_8)\n\n # ignore common egg metadata that is useless to wheel\n shutil.copytree(arg_1, arg_2,\n ignore=lambda x, y: set(('PKG-INFO',\n 'requires.txt',\n 'SOURCES.txt',\n 'not-zip-safe',)))\n\n # delete dependency_links if it is only whitespace\n arg_10 = os.path.join(arg_2, 'dependency_links.txt')\n with open(arg_10, 'r') as dependency_links_file:\n arg_11 = dependency_links_file.read().strip()\n if not arg_11:\n adios(arg_10)\n\n write_pkg_info(os.path.join(arg_2, 'METADATA'), arg_9)\n\n # XXX deprecated. Still useful for current distribute/setuptools.\n arg_12 = os.path.join(arg_2, 'METADATA')\n arg_0.add_requirements(arg_12)\n\n # XXX intentionally a different path than the PEP.\n arg_13 = os.path.join(arg_2, 'metadata.json')\n arg_14 = pkginfo_to_dict(arg_12,\n distribution=arg_0.distribution)\n\n if 'description' in arg_14:\n arg_15 = 'DESCRIPTION.rst'\n arg_16 = arg_14.pop('description')\n arg_17 = os.path.join(arg_2,\n arg_15)\n with open(arg_17, \"wb\") as description_file:\n description_file.write(arg_16.encode('utf-8'))\n arg_14['extensions']['python.details']['document_names']['description'] = arg_15\n\n # XXX heuristically copy any LICENSE/LICENSE.txt?\n arg_18 = arg_0.license_file()\n if arg_18:\n arg_19 = 'LICENSE.txt'\n shutil.copy(arg_18, os.path.join(arg_0.distinfo_dir, arg_19))\n arg_14['extensions']['python.details']['document_names']['license'] = arg_19\n\n with open(arg_13, \"w\") as metadata_json:\n json.dump(arg_14, metadata_json, sort_keys=True)\n\n adios(arg_1)"} +{"_id": "doc_2399", "title": "", "text": "def Func(arg_0: arg_1[arg_2], arg_3: arg_4 = None, arg_5: arg_4 = None,\n arg_6: arg_7[arg_8, arg_4] = arg_8.accepting_input) -> Activity:\n \"\"\"\n Returns a message that includes a set of suggested actions and optional text.\n\n :Example:\n message = MessageFactory.Func([CardAction(title='a', type=ActionTypes.im_back, value='a'),\n CardAction(title='b', type=ActionTypes.im_back, value='b'),\n CardAction(title='c', type=ActionTypes.im_back, value='c')], 'Choose a color')\n await context.send_activity(message)\n\n :param actions:\n :param text:\n :param speak:\n :param input_hint:\n :return:\n \"\"\"\n arg_0 = SuggestedActions(arg_0=arg_0)\n arg_10 = Activity(type=ActivityTypes.message, arg_6=arg_6, Func=arg_0)\n if arg_3:\n arg_10.text = arg_3\n if arg_5:\n arg_10.speak = arg_5\n return arg_10"} +{"_id": "doc_2400", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_1 = None, arg_4: arg_1 = None, arg_5: arg_1 = None,\n arg_6: arg_7[arg_8, arg_1] = None):\n \"\"\"\n Returns a message that will display a single image or video to a user.\n\n :Example:\n message = MessageFactory.Func('https://example.com/hawaii.jpg', 'image/jpeg',\n 'Hawaii Trip', 'A photo from our family vacation.')\n await context.send_activity(message)\n\n :param url:\n :param content_type:\n :param name:\n :param text:\n :param speak:\n :param input_hint:\n :return:\n \"\"\"\n arg_9 = Attachment(arg_2=arg_2, Func=arg_0)\n if arg_3:\n arg_9.name = arg_3\n return attachment_activity(AttachmentLayoutTypes.list, [arg_9], arg_4, arg_5, arg_6)"} +{"_id": "doc_2401", "title": "", "text": "async def Func(arg_0, arg_1: arg_2[arg_3]) -> dict:\n \"\"\"Read storeitems from storage.\n\n :param keys:\n :return dict:\n \"\"\"\n try:\n # check if the database and container exists and if not create\n if not arg_0.__container_exists:\n arg_0.__create_db_and_container()\n if len(arg_1) > 0:\n # create the parameters object\n arg_4 = [\n {'name': f'@id{i}', 'value': f'{self.__sanitize_key(key)}'}\n for i, key in enumerate(arg_1)\n ]\n # get the names of the params\n arg_5 = ','.join(param.get('name')\n for param in arg_4)\n # create the query\n arg_6 = {\n \"query\":\n f\"SELECT c.id, c.realId, c.document, c._etag \\\nFROM c WHERE c.id in ({parameter_sequence})\",\n \"parameters\": arg_4\n }\n arg_7 = {'enableCrossPartitionQuery': True}\n # run the query and store the results as a list\n arg_8 = list(\n arg_0.client.QueryItems(\n arg_0.__container_link, arg_6, arg_7)\n )\n # return a dict with a key and a StoreItem\n return {\n arg_9.get('realId'): arg_0.__create_si(arg_9) for arg_9 in arg_8\n }\n else:\n raise Exception('cosmosdb_storage.Func(): \\\nprovide at least one key')\n except TypeError as e:\n raise e"} +{"_id": "doc_2402", "title": "", "text": "async def Func(arg_0, arg_1: arg_2[arg_3, arg_4]):\n \"\"\"Save storeitems to storage.\n\n :param changes:\n :return:\n \"\"\"\n try:\n # check if the database and container exists and if not create\n if not arg_0.__container_exists:\n arg_0.__create_db_and_container()\n # iterate over the changes\n for (arg_5, arg_6) in arg_1.items():\n # store the e_tag\n arg_7 = arg_6.e_tag\n # create the new document\n arg_8 = {'id': arg_0.__sanitize_key(arg_5),\n 'realId': arg_5,\n 'document': arg_0.__create_dict(arg_6)\n }\n # the e_tag will be * for new docs so do an insert\n if (arg_7 == '*' or not arg_7):\n arg_0.client.UpsertItem(\n database_or_Container_link=arg_0.__container_link,\n document=arg_8,\n options={'disableAutomaticIdGeneration': True}\n )\n # if we have an etag, do opt. concurrency replace\n elif(len(arg_7) > 0):\n arg_9 = {'type': 'IfMatch', 'condition': arg_7}\n arg_0.client.ReplaceItem(\n document_link=arg_0.__item_link(\n arg_0.__sanitize_key(arg_5)),\n new_document=arg_8,\n options={'accessCondition': arg_9}\n )\n # error when there is no e_tag\n else:\n raise Exception('cosmosdb_storage.Func(): etag missing')\n except Exception as e:\n raise e"} +{"_id": "doc_2403", "title": "", "text": "def Func(arg_0, arg_1) -> str:\n \"\"\"Return the sanitized key.\n\n Replace characters that are not allowed in keys in Cosmos.\n\n :param key:\n :return str:\n \"\"\"\n # forbidden characters\n arg_2 = ['\\\\', '?', '/', '#', '\\t', '\\n', '\\r']\n # replace those with with '*' and the\n # Unicode code point of the character and return the new string\n return ''.join(\n map(\n lambda x: '*'+str(ord(x)) if x in arg_2 else x, arg_1\n )\n )"} +{"_id": "doc_2404", "title": "", "text": "def Func(arg_0):\n \"\"\"Call the get or create methods.\"\"\"\n arg_1 = arg_0.config.database\n arg_2 = arg_0.config.container\n arg_0.db = arg_0.__get_or_create_database(arg_0.client, arg_1)\n arg_0.container = arg_0.__get_or_create_container(\n arg_0.client, arg_2\n )"} +{"_id": "doc_2405", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> str:\n \"\"\"Return the database link.\n\n Check if the database exists or create the db.\n\n :param doc_client:\n :param id:\n :return str:\n \"\"\"\n # query CosmosDB for a database with that name/id\n arg_3 = list(arg_1.QueryDatabases({\n \"query\": \"SELECT * FROM r WHERE r.id=@id\",\n \"parameters\": [\n {\"name\": \"@id\", \"value\": arg_2}\n ]\n }))\n # if there are results, return the first (db names are unique)\n if len(arg_3) > 0:\n return arg_3[0]['id']\n else:\n # create the database if it didn't exist\n arg_4 = arg_1.CreateDatabase({'id': arg_2})\n return arg_4['id']"} +{"_id": "doc_2406", "title": "", "text": "def Func(\n arg_0,\n arg_1: [arg_2],\n arg_3: arg_4,\n arg_5: arg_6[arg_7,arg_7] = None,\n arg_8: arg_6[arg_7,arg_9] = None\n ) -> EventData:\n \"\"\"\n Fills the event properties and metrics for the QnaMessage event for telemetry.\n\n :return: A tuple of event data properties and metrics that will be sent to the BotTelemetryClient.track_event() method for the QnAMessage event. The properties and metrics returned the standard properties logged with any properties passed from the get_answers() method.\n\n :rtype: EventData\n \"\"\"\n\n arg_10: arg_6[arg_7,arg_7] = dict()\n arg_11: arg_6[arg_7, arg_9] = dict()\n\n arg_10[arg_12.knowledge_base_id_property] = arg_0._endpoint.knowledge_base_id\n\n arg_14: arg_7 = arg_3.activity.text\n arg_15: arg_7 = arg_3.activity.from_property.name\n\n # Use the LogPersonalInformation flag to toggle logging PII data; text and username are common examples.\n if arg_0.log_personal_information:\n if arg_14:\n arg_10[arg_12.question_property] = arg_14\n \n if arg_15:\n arg_10[arg_12.username_property] = arg_15\n\n # Fill in Qna Results (found or not).\n if len(arg_1) > 0:\n arg_18 = arg_1[0]\n\n arg_19 = {\n arg_12.matched_question_property: json.dumps(arg_18.questions),\n arg_12.question_id_property: arg_7(arg_18.id),\n arg_12.answer_property: arg_18.answer,\n arg_12.score_metric: arg_18.score,\n arg_12.article_found_property: 'true'\n }\n\n arg_10.update(arg_19)\n else:\n arg_20 = {\n arg_12.matched_question_property : 'No Qna Question matched',\n arg_12.question_id_property : 'No Qna Question Id matched',\n arg_12.answer_property : 'No Qna Answer matched',\n arg_12.article_found_property : 'false'\n }\n \n arg_10.update(arg_20)\n\n # Additional Properties can override \"stock\" properties.\n if arg_5:\n arg_10.update(arg_5)\n\n # Additional Metrics can override \"stock\" metrics.\n if arg_8:\n arg_11.update(arg_8)\n \n return EventData(arg_10=arg_10, arg_11=arg_11)"} +{"_id": "doc_2407", "title": "", "text": "def Func(arg_0: arg_1) -> ConversationReference:\n \"\"\"\n Returns the conversation reference for an activity. This can be saved as a plain old JSON\n object and then later used to message the user proactively.\n\n Usage Example:\n reference = TurnContext.Func(context.request)\n :param activity:\n :return:\n \"\"\"\n return ConversationReference(activity_id=arg_0.id,\n user=copy(arg_0.from_property),\n bot=copy(arg_0.recipient),\n conversation=copy(arg_0.conversation),\n channel_id=arg_0.channel_id,\n service_url=arg_0.service_url)"} +{"_id": "doc_2408", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> str:\n \"\"\"\n Give the waterfall step a unique name\n \"\"\"\n arg_3 = arg_0._steps[arg_1].__qualname__\n\n if not arg_3 or \">\" in arg_3 :\n arg_3 = f\"Step{index + 1}of{len(self._steps)}\"\n\n return arg_3"} +{"_id": "doc_2409", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3 = 100) -> bool:\n \"\"\"Determine if a number of Suggested Actions are supported by a Channel.\n\n Args:\n channel_id (str): The Channel to check the if Suggested Actions are supported in.\n button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.\n\n Returns:\n bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions.\n \"\"\"\n\n arg_4 = {\n # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies\n Channels.facebook: 10,\n Channels.skype: 10,\n # https://developers.line.biz/en/reference/messaging-api/#items-object\n Channels.line: 13,\n # https://dev.kik.com/#/docs/messaging#text-response-object\n Channels.kik: 20,\n Channels.telegram: 100,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n }\n return arg_2 <= arg_4[arg_0] if arg_0 in arg_4 else False"} +{"_id": "doc_2410", "title": "", "text": "def Func(arg_0: arg_1) -> bool:\n \"\"\" Determines if a given Auth header is from the Bot Framework Emulator\n\n :param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.\n :type auth_header: str\n\n :return: True, if the token was issued by the Emulator. Otherwise, false.\n \"\"\"\n # The Auth Header generally looks like this:\n # \"Bearer eyJ0e[...Big Long String...]XAiO\"\n if not arg_0:\n # No token. Can't be an emulator token.\n return False\n\n arg_2 = arg_0.split(' ')\n if len(arg_2) != 2:\n # Emulator tokens MUST have exactly 2 parts.\n # If we don't have 2 parts, it's not an emulator token\n return False\n\n arg_3 = arg_2[0]\n arg_4 = arg_2[1]\n\n # We now have an array that should be:\n # [0] = \"Bearer\"\n # [1] = \"[Big Long String]\"\n if arg_3 != 'Bearer':\n # The scheme from the emulator MUST be \"Bearer\"\n return False\n\n # Parse the Big Long String into an actual token.\n arg_5 = jwt.decode(arg_4, verify=False)\n if not arg_5:\n return False\n\n # Is there an Issuer?\n arg_6 = arg_5['iss']\n if not arg_6:\n # No Issuer, means it's not from the Emulator.\n return False\n\n # Is the token issues by a source we consider to be the emulator?\n arg_7 = EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer\n if arg_7 and not arg_6 in arg_7:\n # Not a Valid Issuer. This is NOT a Bot Framework Emulator Token.\n return False\n\n # The Token is from the Bot Framework Emulator. Success!\n return True"} +{"_id": "doc_2411", "title": "", "text": "def Func(arg_0: arg_1) -> Attachment:\n \"\"\"\n Returns an attachment for a hero card. Will raise a TypeError if 'card' argument is not a HeroCard.\n\n Hero cards tend to have one dominant full width image and the cards text & buttons can\n usually be found below the image.\n :return:\n \"\"\"\n if not isinstance(arg_0, arg_1):\n raise TypeError('CardFactory.Func(): `card` argument is not an instance of an HeroCard, '\n 'unable to prepare attachment.')\n\n return Attachment(content_type=CardFactory.content_types.Func,\n content=arg_0)"} +{"_id": "doc_2412", "title": "", "text": "def Func(arg_0: arg_1) -> bool:\n \"\"\"\n Return bool, True if succeed otherwise False.\n \"\"\"\n if ctypes.windll.user32.OpenClipboard(0):\n ctypes.windll.user32.EmptyClipboard()\n arg_2 = (len(arg_0) + 1) * 2\n arg_3 = ctypes.windll.kernel32.GlobalAlloc(0, arg_2) # GMEM_FIXED=0\n arg_4 = ctypes.windll.kernel32.GlobalLock(arg_3)\n ctypes.cdll.msvcrt.wcsncpy(ctypes.c_wchar_p(arg_4), ctypes.c_wchar_p(arg_0), arg_2 // 2)\n ctypes.windll.kernel32.GlobalUnlock(arg_3)\n # system owns hClipboardData after calling SetClipboardData,\n # application can not write to or free the data once ownership has been transferred to the system\n ctypes.windll.user32.SetClipboardData(13, arg_3) # CF_TEXT=1, CF_UNICODETEXT=13\n ctypes.windll.user32.CloseClipboard()\n return True\n return False"} +{"_id": "doc_2413", "title": "", "text": "def Func() -> bool:\n \"\"\"\n Reset to the default text color on console window.\n Return bool, True if succeed otherwise False.\n \"\"\"\n if sys.stdout:\n sys.stdout.flush()\n bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, _DefaultConsoleColor))"} +{"_id": "doc_2414", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> arg_1:\n \"\"\"\n Func from Win32.\n Return int, a native window handle.\n \"\"\"\n return ctypes.windll.user32.Func(ctypes.wintypes.POINT(arg_0, arg_2))"} +{"_id": "doc_2415", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_1, arg_4: arg_1) -> None:\n \"\"\"Func from Win32.\"\"\"\n ctypes.windll.user32.Func(arg_0, arg_2, arg_3, arg_4)"} +{"_id": "doc_2416", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_1, arg_4: arg_1) -> bool:\n \"\"\"\n Func from Win32.\n Return bool, True if succeed otherwise False.\n \"\"\"\n return bool(ctypes.windll.user32.FuncW(ctypes.c_void_p(arg_0), arg_2, arg_3, arg_4))"} +{"_id": "doc_2417", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_1, arg_4: arg_1) -> arg_1:\n \"\"\"\n Func from Win32.\n Return int, the return value specifies the result of the message processing;\n it depends on the message sent.\n \"\"\"\n return ctypes.windll.user32.FuncW(ctypes.c_void_p(arg_0), arg_2, arg_3, arg_4)"} +{"_id": "doc_2418", "title": "", "text": "def Func() -> str:\n \"\"\"\n Func from Win32.\n Return str.\n \"\"\"\n arg_0 = ctypes.c_wchar * MAX_PATH\n arg_1 = arg_0()\n ctypes.windll.kernel32.FuncW(arg_1, MAX_PATH)\n return arg_1.value"} +{"_id": "doc_2419", "title": "", "text": "def Func() -> bool:\n \"\"\"\n Check if desktop is locked.\n Return bool.\n Desktop is locked if press Win+L, Ctrl+Alt+Del or in remote desktop mode.\n \"\"\"\n arg_0 = False\n arg_1 = ctypes.windll.user32.OpenDesktopW(ctypes.c_wchar_p('Default'), 0, 0, 0x0100) # DESKTOP_SWITCHDESKTOP = 0x0100\n if arg_1:\n arg_0 = not ctypes.windll.user32.SwitchDesktop(arg_1)\n ctypes.windll.user32.CloseDesktop(arg_1)\n return arg_0"} +{"_id": "doc_2420", "title": "", "text": "def Func(arg_0) -> INPUT:\n \"\"\"\n Create Win32 struct `INPUT` for `SendInput`.\n Return `INPUT`.\n \"\"\"\n if isinstance(arg_0, MOUSEINPUT):\n return INPUT(InputType.Mouse, _INPUTUnion(mi=arg_0))\n if isinstance(arg_0, KEYBDINPUT):\n return INPUT(InputType.Keyboard, _INPUTUnion(ki=arg_0))\n if isinstance(arg_0, HARDWAREINPUT):\n return INPUT(InputType.Hardware, _INPUTUnion(hi=arg_0))\n raise TypeError('Cannot create INPUT structure!')"} +{"_id": "doc_2421", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_1 = arg_4.KeyDown, arg_6: arg_1 = 0) -> INPUT:\n \"\"\"Create Win32 struct `KEYBDINPUT` for `SendInput`.\"\"\"\n return _CreateInput(KEYBDINPUT(arg_0, arg_2, arg_3, arg_6, None))"} +{"_id": "doc_2422", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1 = 0) -> INPUT:\n \"\"\"Create Win32 struct `HARDWAREINPUT` for `SendInput`.\"\"\"\n return _CreateInput(HARDWAREINPUT(arg_0, arg_2 & 0xFFFF, arg_2 >> 16 & 0xFFFF))"} +{"_id": "doc_2423", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> Control:\n \"\"\"\n Call IUIAutomation ElementFromPoint x,y. May return None if mouse is over cmd's title bar icon.\n Return `Control` subclass or None.\n \"\"\"\n arg_3 = _AutomationClient.instance().IUIAutomation.ElementFromPoint(ctypes.wintypes.POINT(arg_0, arg_2))\n return Control.CreateControlFromElement(arg_3)"} +{"_id": "doc_2424", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> Control:\n \"\"\"\n Get a native handle from point x,y and call IUIAutomation.ElementFromHandle.\n Return `Control` subclass.\n \"\"\"\n return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(WindowFromPoint(arg_0, arg_2)))"} +{"_id": "doc_2425", "title": "", "text": "def Func() -> None:\n \"\"\"Delete log file.\"\"\"\n if os.path.exists(Logger.FileName):\n os.remove(Logger.FileName)"} +{"_id": "doc_2426", "title": "", "text": "def Func(arg_0) -> ctypes.Array:\n \"\"\"\n Return `ctypes.Array`, an iterable array of int values in argb.\n \"\"\"\n return arg_0.GetPixelColorsOfRect(0, 0, arg_0.Width, arg_0.Height)"} +{"_id": "doc_2427", "title": "", "text": "def Func(arg_0) -> list:\n \"\"\"\n Return list, a list of `Control` subclasses.\n \"\"\"\n arg_1 = []\n arg_2 = arg_0.GetFirstChildControl()\n while arg_2:\n arg_1.append(arg_2)\n arg_2 = arg_2.GetNextSiblingControl()\n return arg_1"} +{"_id": "doc_2428", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> bool:\n \"\"\"\n Call native Func if control has a valid native handle.\n \"\"\"\n arg_3 = arg_0.NativeWindowHandle\n if arg_3:\n return Func(arg_3, arg_1)\n return False"} +{"_id": "doc_2429", "title": "", "text": "def Func(arg_0) -> bool:\n \"\"\"Determine whether current control is top level.\"\"\"\n arg_1 = arg_0.NativeWindowHandle\n if arg_1:\n return GetAncestor(arg_1, GAFlag.Root) == arg_1\n return False"} +{"_id": "doc_2430", "title": "", "text": "def Func(arg_0) -> 'Control':\n \"\"\"\n Get the top level control which current control lays.\n If current control is top level, return self.\n If current control is root control, return None.\n Return `PaneControl` or `WindowControl` or None.\n \"\"\"\n arg_1 = arg_0.NativeWindowHandle\n if arg_1:\n arg_2 = GetAncestor(arg_1, GAFlag.Root)\n if arg_2:\n if arg_2 == arg_1:\n return arg_0\n else:\n return ControlFromHandle(arg_2)\n else:\n #self is root control\n pass\n else:\n arg_3 = arg_0\n while True:\n arg_3 = arg_3.GetParentControl()\n arg_1 = arg_3.NativeWindowHandle\n if arg_1:\n arg_2 = GetAncestor(arg_1, GAFlag.Root)\n return ControlFromHandle(arg_2)"} +{"_id": "doc_2431", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = arg_3) -> bool:\n \"\"\"\n Set top level window maximize.\n \"\"\"\n if arg_0.IsTopLevel():\n return arg_0.ShowWindow(SW.ShowFuncd, arg_1)\n return False"} +{"_id": "doc_2432", "title": "", "text": "def Func(arg_0) -> bool:\n \"\"\"\n Move window to screen center.\n \"\"\"\n if arg_0.IsTopLevel():\n arg_1 = arg_0.BoundingRectangle\n arg_2, arg_3 = GetScreenSize()\n arg_4, arg_5 = (arg_2 - arg_1.width()) // 2, (arg_3 - arg_1.height()) // 2\n if arg_4 < 0: arg_4 = 0\n if arg_5 < 0: arg_5 = 0\n return SetWindowPos(arg_0.NativeWindowHandle, SWP.HWND_Top, arg_4, arg_5, 0, 0, SWP.SWP_NoSize)\n return False"} +{"_id": "doc_2433", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = arg_3) -> bool:\n \"\"\"Set top level window active.\"\"\"\n if arg_0.IsTopLevel():\n arg_4 = arg_0.NativeWindowHandle\n if IsIconic(arg_4):\n arg_5 = ShowWindow(arg_4, SW.Restore)\n elif not IsWindowVisible(arg_4):\n arg_5 = ShowWindow(arg_4, SW.Show)\n arg_5 = SetForegroundWindow(arg_4) # may fail if foreground windows's process is not python\n time.sleep(arg_1)\n return arg_5\n return False"} +{"_id": "doc_2434", "title": "", "text": "def Func(arg_0):\n \"\"\"For a composite instruction, reverse the order of sub-gates.\n\n This is done by recursively Funcing all sub-instructions.\n It does not invert any gate.\n\n Returns:\n Instruction: a fresh gate with sub-gates reversed\n \"\"\"\n if not arg_0._definition:\n return arg_0.copy()\n\n arg_1 = arg_0.copy(name=arg_0.name + '_Func')\n arg_1.definition = []\n for arg_3, arg_4, arg_5 in reversed(arg_0._definition):\n arg_1._definition.append((arg_3.Func(), arg_4, arg_5))\n return arg_1"} +{"_id": "doc_2435", "title": "", "text": "def Func(arg_0):\n \"\"\"Invert this instruction.\n\n If the instruction is composite (i.e. has a definition),\n then its definition will be recursively inverted.\n\n Special instructions inheriting from Instruction can\n implement their own Func (e.g. T and Tdg, Barrier, etc.)\n\n Returns:\n Instruction: a fresh instruction for the Func\n\n Raises:\n QiskitError: if the instruction is not composite\n and an Func has not been implemented for it.\n \"\"\"\n if not arg_0.definition:\n raise QiskitError(\"Func() not implemented for %s.\" % arg_0.name)\n arg_1 = arg_0.copy(name=arg_0.name + '_dg')\n arg_1._definition = []\n for arg_3, arg_4, arg_5 in reversed(arg_0._definition):\n arg_1._definition.append((arg_3.Func(), arg_4, arg_5))\n return arg_1"} +{"_id": "doc_2436", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add classical control on register classical and value val.\"\"\"\n if not isinstance(arg_1, ClassicalRegister):\n raise QiskitError(\"Func must be used with a classical register\")\n if arg_2 < 0:\n raise QiskitError(\"control value should be non-negative\")\n arg_0.control = (arg_1, arg_2)\n return arg_0"} +{"_id": "doc_2437", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Run all the passes on a QuantumCircuit\n\n Args:\n circuit (QuantumCircuit): circuit to transform via all the registered passes\n\n Returns:\n QuantumCircuit: Transformed circuit.\n \"\"\"\n arg_2 = arg_1.name\n arg_3 = circuit_to_dag(arg_1)\n del arg_1\n for arg_4 in arg_0.working_list:\n for arg_5 in arg_4:\n arg_3 = arg_0._do_pass(arg_5, arg_3, arg_4.options)\n arg_1 = dag_to_circuit(arg_3)\n arg_1.name = arg_2\n return arg_1"} +{"_id": "doc_2438", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Do a pass and its \"requires\".\n\n Args:\n pass_ (BasePass): Pass to do.\n dag (DAGCircuit): The dag on which the pass is ran.\n options (dict): PassManager options.\n Returns:\n DAGCircuit: The transformed dag in case of a transformation pass.\n The same input dag in case of an analysis pass.\n Raises:\n TranspilerError: If the pass is not a proper pass instance.\n \"\"\"\n\n # First, do the requires of pass_\n if not arg_3[\"ignore_requires\"]:\n for arg_4 in arg_1.requires:\n arg_2 = arg_0.Func(arg_4, arg_2, arg_3)\n\n # Run the pass itself, if not already run\n if arg_1 not in arg_0.valid_passes:\n if arg_1.is_transformation_pass:\n arg_1.property_set = arg_0.fenced_property_set\n arg_6 = arg_1.run(arg_2)\n if not isinstance(arg_6, DAGCircuit):\n raise TranspilerError(\"Transformation passes should return a transformed dag.\"\n \"The pass %s is returning a %s\" % (type(arg_1).__name__,\n type(arg_6)))\n arg_2 = arg_6\n elif arg_1.is_analysis_pass:\n arg_1.property_set = arg_0.property_set\n arg_1.run(FencedDAGCircuit(arg_2))\n else:\n raise TranspilerError(\"I dont know how to handle this type of pass\")\n\n # update the valid_passes property\n arg_0._update_valid_passes(arg_1, arg_3['ignore_preserves'])\n\n return arg_2"} +{"_id": "doc_2439", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a list structure of the appended Func and its options.\n\n Returns (list): The appended Func.\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.working_list:\n arg_1.append(arg_2.dump_Func())\n return arg_1"} +{"_id": "doc_2440", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Fetches the passes added to this flow controller.\n\n Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)}\n \"\"\"\n arg_1 = {'options': arg_0.options, 'passes': [], 'type': type(arg_0)}\n for arg_2 in arg_0._passes:\n if isinstance(arg_2, FlowController):\n arg_1['passes'].append(arg_2.Func())\n else:\n arg_1['passes'].append(arg_2)\n return arg_1"} +{"_id": "doc_2441", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Constructs a flow controller based on the partially evaluated controller arguments.\n\n Args:\n passes (list[BasePass]): passes to add to the flow controller.\n options (dict): PassManager options.\n **partial_controller (dict): Partially evaluated controller arguments in the form\n `{name:partial}`\n\n Raises:\n TranspilerError: When partial_controller is not well-formed.\n\n Returns:\n FlowController: A FlowController instance.\n \"\"\"\n if None in arg_3.values():\n raise TranspilerError('The controller needs a condition.')\n\n if arg_3:\n for arg_4 in arg_0.registered_controllers.keys():\n if arg_4 in arg_3:\n return arg_0.registered_controllers[arg_4](arg_1, arg_2,\n **arg_3)\n raise TranspilerError(\"The controllers for %s are not registered\" % arg_3)\n else:\n return FlowControllerLinear(arg_1, arg_2)"} +{"_id": "doc_2442", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Apply a single qubit gate to the qubit.\n\n Args:\n gate(str): the single qubit gate name\n params(list): the operation parameters op['params']\n Returns:\n tuple: a tuple of U gate parameters (theta, phi, lam)\n Raises:\n QiskitError: if the gate name is not valid\n \"\"\"\n if arg_0 in ('U', 'u3'):\n return arg_1[0], arg_1[1], arg_1[2]\n elif arg_0 == 'u2':\n return np.pi / 2, arg_1[0], arg_1[1]\n elif arg_0 == 'u1':\n return 0, 0, arg_1[0]\n elif arg_0 == 'id':\n return 0, 0, 0\n raise QiskitError('Gate is not among the valid types: %s' % arg_0)"} +{"_id": "doc_2443", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get the matrix for a single qubit.\n\n Args:\n gate(str): the single qubit gate name\n params(list): the operation parameters op['params']\n Returns:\n array: A numpy array representing the matrix\n \"\"\"\n\n # Converting sym to floats improves the performance of the simulator 10x.\n # This a is a probable a FIXME since it might show bugs in the simulator.\n (arg_2, arg_3, arg_4) = map(float, single_gate_params(arg_0, arg_1))\n\n return np.array([[np.cos(arg_2 / 2),\n -np.exp(1j * arg_4) * np.sin(arg_2 / 2)],\n [np.exp(1j * arg_3) * np.sin(arg_2 / 2),\n np.exp(1j * arg_3 + 1j * arg_4) * np.cos(arg_2 / 2)]])"} +{"_id": "doc_2444", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the index string for Numpy.eignsum matrix-vector multiplication.\n\n The returned indices are to perform a matrix multiplication A.v where\n the matrix A is an M-qubit matrix, vector v is an N-qubit vector, and\n M <= N, and identity matrices are implied on the subsystems where A has no\n support on v.\n\n Args:\n gate_indices (list[int]): the indices of the right matrix subsystems\n to contract with the left matrix.\n number_of_qubits (int): the total number of qubits for the right matrix.\n\n Returns:\n str: An indices string for the Numpy.einsum function.\n \"\"\"\n\n arg_2, arg_3, arg_4, arg_5 = _einsum_matmul_index_helper(arg_0,\n arg_1)\n\n # Combine indices into matrix multiplication string format\n # for numpy.einsum function\n return \"{mat_l}{mat_r}, \".format(arg_2=arg_2, arg_3=arg_3) + \\\n \"{tens_lin}->{tens_lout}\".format(arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_2445", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the index string for Numpy.eignsum matrix multiplication.\n\n The returned indices are to perform a matrix multiplication A.v where\n the matrix A is an M-qubit matrix, matrix v is an N-qubit vector, and\n M <= N, and identity matrices are implied on the subsystems where A has no\n support on v.\n\n Args:\n gate_indices (list[int]): the indices of the right matrix subsystems\n to contract with the left matrix.\n number_of_qubits (int): the total number of qubits for the right matrix.\n\n Returns:\n tuple: (mat_left, mat_right, tens_in, tens_out) of index strings for\n that may be combined into a Numpy.einsum function string.\n\n Raises:\n QiskitError: if the total number of qubits plus the number of\n contracted indices is greater than 26.\n \"\"\"\n\n # Since we use ASCII alphabet for einsum index labels we are limited\n # to 26 total free left (lowercase) and 26 right (uppercase) indexes.\n # The rank of the contracted tensor reduces this as we need to use that\n # many characters for the contracted indices\n if len(arg_0) + arg_1 > 26:\n raise QiskitError(\"Total number of free indexes limited to 26\")\n\n # Indicies for N-qubit input tensor\n arg_2 = ascii_lowercase[:arg_1]\n\n # Indices for the N-qubit output tensor\n arg_3 = list(arg_2)\n\n # Left and right indices for the M-qubit multiplying tensor\n arg_4 = \"\"\n arg_5 = \"\"\n\n # Update left indices for mat and output\n for arg_6, arg_7 in enumerate(reversed(arg_0)):\n arg_4 += ascii_lowercase[-1 - arg_6]\n arg_5 += arg_2[-1 - arg_7]\n arg_3[-1 - arg_7] = ascii_lowercase[-1 - arg_6]\n arg_3 = \"\".join(arg_3)\n\n # Combine indices into matrix multiplication string format\n # for numpy.einsum function\n return arg_4, arg_5, arg_2, arg_3"} +{"_id": "doc_2446", "title": "", "text": "def Func(arg_0):\n \"\"\"Build a ``DAGCircuit`` object from a ``QuantumCircuit``.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n\n Return:\n DAGCircuit: the DAG representing the input circuit.\n \"\"\"\n arg_1 = DAGCircuit()\n arg_1.name = arg_0.name\n for arg_3 in arg_0.qregs:\n arg_1.add_qreg(arg_3)\n for arg_3 in arg_0.cregs:\n arg_1.add_creg(arg_3)\n\n for arg_4, arg_5, arg_6 in arg_0.data:\n # Get arguments for classical control (if any)\n if arg_4.control is None:\n arg_7 = None\n else:\n arg_7 = (arg_4.control[0], arg_4.control[1])\n\n arg_1.apply_operation_back(arg_4.copy(),\n arg_5, arg_6, arg_7)\n\n return arg_1"} +{"_id": "doc_2447", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Function used to fit the decay cosine.\"\"\"\n # pylint: disable=invalid-name\n return arg_1 * np.exp(-arg_0 / arg_2) * np.cos(2 * np.pi * arg_3 * arg_0 + arg_4) + arg_5"} +{"_id": "doc_2448", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6,\n arg_7):\n \"\"\"Plot coherence data.\n\n Args:\n xdata\n ydata\n std_error\n fit\n fit_function\n xunit\n exp_str\n qubit_label\n Raises:\n ImportError: If matplotlib is not installed.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('The function Func needs matplotlib. '\n 'Run \"pip install matplotlib\" before.')\n plt.errorbar(arg_0, arg_1, arg_2, marker='.',\n markersize=9, c='b', linestyle='')\n plt.plot(arg_0, arg_4(arg_0, *arg_3), c='r', linestyle='--',\n label=(arg_6 + '= %s %s' % (str(round(arg_3[1])), arg_5)))\n plt.xticks(fontsize=14, rotation=70)\n plt.yticks(fontsize=14)\n plt.xlabel('time [%s]' % (arg_5), fontsize=16)\n plt.ylabel('P(1)', fontsize=16)\n plt.title(arg_6 + ' measurement of Q$_{%s}$' % (str(arg_7)), fontsize=18)\n plt.legend(fontsize=12)\n plt.grid(True)\n plt.show()"} +{"_id": "doc_2449", "title": "", "text": "def Func(arg_0):\n \"\"\"Take the raw rb data and convert it into averages and std dev\n\n Args:\n raw_rb (numpy.array): m x n x l list where m is the number of seeds, n\n is the number of Clifford sequences and l is the number of qubits\n\n Return:\n numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is\n the std dev overseeds\n \"\"\"\n arg_1 = []\n arg_1.append(np.mean(arg_0, 0))\n arg_1.append(np.std(arg_0, 0))\n\n return arg_1"} +{"_id": "doc_2450", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None,\n arg_7=True):\n \"\"\"Plot randomized benchmarking data.\n\n Args:\n xdata (list): list of subsequence lengths\n ydatas (list): list of lists of survival probabilities for each\n sequence\n yavg (list): mean of the survival probabilities at each sequence\n length\n yerr (list): error of the survival\n fit (list): fit parameters\n survival_prob (callable): function that computes survival probability\n ax (Axes or None): plot axis (if passed in)\n show_plt (bool): display the plot.\n\n Raises:\n ImportError: If matplotlib is not installed.\n \"\"\"\n # pylint: disable=invalid-name\n\n if not HAS_MATPLOTLIB:\n raise ImportError('The function Func needs matplotlib. '\n 'Run \"pip install matplotlib\" before.')\n if arg_6 is None:\n plt.figure()\n arg_6 = plt.gca()\n\n # Plot the result for each sequence\n for arg_8 in arg_1:\n arg_6.plot(arg_0, arg_8, color='gray', linestyle='none', marker='x')\n # Plot the mean with error bars\n arg_6.errorbar(arg_0, arg_2, arg_3=arg_3, color='r', linestyle='--', linewidth=3)\n\n # Plot the fit\n arg_6.plot(arg_0, arg_5(arg_0, *arg_4), color='blue', linestyle='-', linewidth=2)\n arg_6.tick_params(labelsize=14)\n # ax.tick_params(axis='x',labelrotation=70)\n\n arg_6.set_xlabel('Clifford Length', fontsize=16)\n arg_6.set_ylabel('Z', fontsize=16)\n arg_6.grid(True)\n\n if arg_7:\n plt.show()"} +{"_id": "doc_2451", "title": "", "text": "def Func(arg_0):\n \"\"\"Validates the input to state visualization functions.\n\n Args:\n quantum_state (ndarray): Input state / density matrix.\n Returns:\n rho: A 2d numpy array for the density matrix.\n Raises:\n VisualizationError: Invalid input.\n \"\"\"\n arg_1 = np.asarray(arg_0)\n if arg_1.ndim == 1:\n arg_1 = np.outer(arg_1, np.conj(arg_1))\n # Check the shape of the input is a square matrix\n arg_2 = np.shape(arg_1)\n if len(arg_2) != 2 or arg_2[0] != arg_2[1]:\n raise VisualizationError(\"Input is not a valid quantum state.\")\n # Check state is an n-qubit state\n arg_3 = int(np.log2(arg_1.shape[0]))\n if 2 ** arg_3 != arg_1.shape[0]:\n raise VisualizationError(\"Input is not a multi-qubit quantum state.\")\n return arg_1"} +{"_id": "doc_2452", "title": "", "text": "def Func(arg_0):\n \"\"\"Trim a PIL image and remove white space.\"\"\"\n arg_1 = PIL.Image.new(arg_0.mode, arg_0.size, arg_0.getpixel((0, 0)))\n arg_2 = PIL.ImageChops.difference(arg_0, arg_1)\n arg_2 = PIL.ImageChops.add(arg_2, arg_2, 2.0, -100)\n arg_3 = arg_2.getbbox()\n if arg_3:\n arg_0 = arg_0.crop(arg_3)\n return arg_0"} +{"_id": "doc_2453", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the list of qubits drawing this gate would cover\"\"\"\n\n arg_2 = len(arg_0)\n arg_3 = 0\n for arg_4 in arg_1.qargs:\n arg_5 = arg_0.index(arg_4)\n\n if arg_5 < arg_2:\n arg_2 = arg_5\n if arg_5 > arg_3:\n arg_3 = arg_5\n\n if arg_1.cargs:\n return arg_0[arg_2:]\n\n return arg_0[arg_2:arg_3 + 1]"} +{"_id": "doc_2454", "title": "", "text": "def Func(arg_0):\n \"\"\"Build an ``Instruction`` object from a ``QuantumCircuit``.\n\n The instruction is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The instruction will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n\n Return:\n Instruction: an instruction equivalent to the action of the\n input circuit. Upon decomposition, this instruction will\n yield the components comprising the original circuit.\n \"\"\"\n arg_1 = Instruction(name=arg_0.name,\n num_qubits=sum([qreg.size for qreg in arg_0.qregs]),\n num_clbits=sum([creg.size for creg in arg_0.cregs]),\n params=[])\n arg_1.control = None\n\n def find_bit_position(arg_3):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(arg_3[0], QuantumRegister):\n arg_4 = arg_0.qregs\n else:\n arg_4 = arg_0.cregs\n arg_5 = arg_4.index(arg_3[0])\n return sum([arg_6.size for arg_6 in arg_4[:arg_5]]) + arg_3[1]\n\n arg_7 = arg_0.data.copy()\n\n if arg_1.num_qubits > 0:\n arg_8 = QuantumRegister(arg_1.num_qubits, 'q')\n if arg_1.num_clbits > 0:\n arg_9 = ClassicalRegister(arg_1.num_clbits, 'c')\n\n arg_7 = list(map(lambda x:\n (x[0],\n list(map(lambda y: (arg_8, find_bit_position(y)), x[1])),\n list(map(lambda y: (arg_9, find_bit_position(y)), x[2]))), arg_7))\n arg_1.definition = arg_7\n\n return arg_1"} +{"_id": "doc_2455", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pick a convenient layout depending on the best matching\n qubit connectivity, and set the property `layout`.\n\n Args:\n dag (DAGCircuit): DAG to find layout for.\n\n Raises:\n TranspilerError: if dag wider than self.coupling_map\n \"\"\"\n arg_2 = sum([arg_6.size for arg_6 in arg_1.qregs.values()])\n if arg_2 > arg_0.coupling_map.size():\n raise TranspilerError('Number of qubits greater than device.')\n arg_3 = arg_0._best_subset(arg_2)\n arg_4 = Layout()\n arg_5 = 0\n for arg_6 in arg_1.qregs.values():\n for arg_7 in range(arg_6.size):\n arg_4[(arg_6, arg_7)] = int(arg_3[arg_5])\n arg_5 += 1\n arg_0.property_set['layout'] = arg_4"} +{"_id": "doc_2456", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes the qubit mapping with the best connectivity.\n\n Args:\n n_qubits (int): Number of subset qubits to consider.\n\n Returns:\n ndarray: Array of qubits to use for best connectivity mapping.\n \"\"\"\n if arg_1 == 1:\n return np.array([0])\n\n arg_2 = arg_0.coupling_map.size()\n\n arg_3 = np.asarray(arg_0.coupling_map.get_edges())\n arg_4 = np.ones_like(arg_3[:, 0])\n arg_5 = sp.coo_matrix((arg_4, (arg_3[:, 0], arg_3[:, 1])),\n shape=(arg_2, arg_2)).tocsr()\n arg_6 = 0\n arg_7 = None\n # do bfs with each node as starting point\n for arg_8 in range(arg_5.shape[0]):\n arg_9 = cs.breadth_first_order(arg_5, i_start=arg_8, directed=False,\n return_predecessors=False)\n\n arg_10 = 0\n arg_11 = []\n for arg_12 in range(arg_1):\n arg_13 = arg_9[arg_12]\n for arg_14 in range(arg_5.indptr[arg_13],\n arg_5.indptr[arg_13 + 1]):\n arg_15 = arg_5.indices[arg_14]\n for arg_16 in range(arg_1):\n if arg_15 == arg_9[arg_16]:\n arg_10 += 1\n arg_11.append([arg_13, arg_15])\n break\n\n if arg_10 > arg_6:\n arg_6 = arg_10\n arg_7 = arg_9[0:arg_1]\n # Return a best mapping that has reduced bandwidth\n arg_17 = {}\n for arg_18 in range(arg_7.shape[0]):\n arg_17[arg_7[arg_18]] = arg_18\n arg_19 = [[arg_17[c[0]], arg_17[c[1]]] for c in arg_11]\n arg_20 = [arg_18[0] for arg_18 in arg_19]\n arg_21 = [arg_18[1] for arg_18 in arg_19]\n arg_4 = [1]*len(arg_20)\n arg_22 = sp.coo_matrix((arg_4, (arg_20, arg_21)),\n shape=(arg_1, arg_1)).tocsr()\n arg_23 = cs.reverse_cuthill_mckee(arg_22)\n arg_7 = arg_7[arg_23]\n return arg_7"} +{"_id": "doc_2457", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Apply Func to circuit.\n If qargs is None, applies to all the qbits.\n Args is a list of QuantumRegister or single qubits.\n For QuantumRegister, applies Func to all the qubits in that register.\"\"\"\n arg_2 = []\n\n arg_1 = _convert_to_bits(arg_1, [qbit for arg_3 in arg_0.qregs for qbit in arg_3])\n\n if not arg_1: # None\n for arg_3 in arg_0.qregs:\n for arg_4 in range(arg_3.size):\n arg_2.append((arg_3, arg_4))\n\n for arg_5 in arg_1:\n if isinstance(arg_5, (QuantumRegister, list)):\n if isinstance(arg_5, QuantumRegister):\n arg_2.extend([(arg_5, arg_4) for arg_4 in range(arg_5.size)])\n else:\n arg_2.extend(arg_5)\n else:\n arg_2.append(arg_5)\n\n return arg_0.append(Barrier(len(arg_2)), arg_2, [])"} +{"_id": "doc_2458", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Process an Id or IndexedId node as a bit or register type.\n\n Return a list of tuples (Register,index).\n \"\"\"\n # pylint: disable=inconsistent-return-statements\n arg_2 = None\n if arg_1.name in arg_0.dag.qregs:\n arg_2 = arg_0.dag.qregs[arg_1.name]\n elif arg_1.name in arg_0.dag.cregs:\n arg_2 = arg_0.dag.cregs[arg_1.name]\n else:\n raise QiskitError(\"expected qreg or creg name:\",\n \"line=%s\" % arg_1.line,\n \"file=%s\" % arg_1.file)\n\n if arg_1.type == \"indexed_id\":\n # An indexed bit or qubit\n return [(arg_2, arg_1.index)]\n elif arg_1.type == \"id\":\n # A qubit or qreg or creg\n if not arg_0.bit_stack[-1]:\n # Global scope\n return [(arg_2, arg_3) for arg_3 in range(arg_2.size)]\n else:\n # local scope\n if arg_1.name in arg_0.bit_stack[-1]:\n return [arg_0.bit_stack[-1][arg_1.name]]\n raise QiskitError(\"expected local bit name:\",\n \"line=%s\" % arg_1.line,\n \"file=%s\" % arg_1.file)\n return None"} +{"_id": "doc_2459", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Process a gate node.\n\n If opaque is True, process the node as an opaque gate node.\n \"\"\"\n arg_0.gates[arg_1.name] = {}\n arg_5 = arg_0.gates[arg_1.name]\n arg_5[\"print\"] = True # default\n arg_5[\"opaque\"] = arg_2\n arg_5[\"n_args\"] = arg_1.n_args()\n arg_5[\"n_bits\"] = arg_1.n_bits()\n if arg_1.n_args() > 0:\n arg_5[\"args\"] = [element.name for element in arg_1.arguments.children]\n else:\n arg_5[\"args\"] = []\n arg_5[\"bits\"] = [c.name for c in arg_1.bitlist.children]\n if arg_2:\n arg_5[\"body\"] = None\n else:\n arg_5[\"body\"] = arg_1.body"} +{"_id": "doc_2460", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Process a CNOT gate node.\"\"\"\n arg_2 = arg_0._process_bit_id(arg_1.children[0])\n arg_3 = arg_0._process_bit_id(arg_1.children[1])\n if not(len(arg_2) == len(arg_3) or len(arg_2) == 1 or len(arg_3) == 1):\n raise QiskitError(\"internal error: qreg size mismatch\",\n \"line=%s\" % arg_1.line, \"file=%s\" % arg_1.file)\n arg_4 = max([len(arg_2), len(arg_3)])\n for arg_5 in range(arg_4):\n if len(arg_2) > 1 and len(arg_3) > 1:\n arg_0.dag.apply_operation_back(CXBase(), [arg_2[arg_5], arg_3[arg_5]], [], arg_0.condition)\n elif len(arg_2) > 1:\n arg_0.dag.apply_operation_back(CXBase(), [arg_2[arg_5], arg_3[0]], [], arg_0.condition)\n else:\n arg_0.dag.apply_operation_back(CXBase(), [arg_2[0], arg_3[arg_5]], [], arg_0.condition)"} +{"_id": "doc_2461", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Process a measurement node.\"\"\"\n arg_2 = arg_0._process_bit_id(arg_1.children[0])\n arg_3 = arg_0._process_bit_id(arg_1.children[1])\n if len(arg_2) != len(arg_3):\n raise QiskitError(\"internal error: reg size mismatch\",\n \"line=%s\" % arg_1.line, \"file=%s\" % arg_1.file)\n for arg_4, arg_5 in zip(arg_2, arg_3):\n arg_0.dag.apply_operation_back(Measure(), [arg_4], [arg_5], arg_0.condition)"} +{"_id": "doc_2462", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Process an if node.\"\"\"\n arg_2 = arg_1.children[0].name\n arg_3 = arg_0.dag.cregs[arg_2]\n arg_4 = arg_1.children[1].value\n arg_0.condition = (arg_3, arg_4)\n arg_0._process_node(arg_1.children[2])\n arg_0.condition = None"} +{"_id": "doc_2463", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Create a DAG node out of a parsed AST op node.\n\n Args:\n name (str): operation name to apply to the dag.\n params (list): op parameters\n qargs (list(QuantumRegister, int)): qubits to attach to\n\n Raises:\n QiskitError: if encountering a non-basis opaque gate\n \"\"\"\n if arg_1 == \"u0\":\n arg_4 = U0Gate\n elif arg_1 == \"u1\":\n arg_4 = U1Gate\n elif arg_1 == \"u2\":\n arg_4 = U2Gate\n elif arg_1 == \"u3\":\n arg_4 = U3Gate\n elif arg_1 == \"x\":\n arg_4 = XGate\n elif arg_1 == \"y\":\n arg_4 = YGate\n elif arg_1 == \"z\":\n arg_4 = ZGate\n elif arg_1 == \"t\":\n arg_4 = TGate\n elif arg_1 == \"tdg\":\n arg_4 = TdgGate\n elif arg_1 == \"s\":\n arg_4 = SGate\n elif arg_1 == \"sdg\":\n arg_4 = SdgGate\n elif arg_1 == \"swap\":\n arg_4 = SwapGate\n elif arg_1 == \"rx\":\n arg_4 = RXGate\n elif arg_1 == \"ry\":\n arg_4 = RYGate\n elif arg_1 == \"rz\":\n arg_4 = RZGate\n elif arg_1 == \"rzz\":\n arg_4 = RZZGate\n elif arg_1 == \"id\":\n arg_4 = IdGate\n elif arg_1 == \"h\":\n arg_4 = HGate\n elif arg_1 == \"cx\":\n arg_4 = CnotGate\n elif arg_1 == \"cy\":\n arg_4 = CyGate\n elif arg_1 == \"cz\":\n arg_4 = CzGate\n elif arg_1 == \"ch\":\n arg_4 = CHGate\n elif arg_1 == \"crz\":\n arg_4 = CrzGate\n elif arg_1 == \"cu1\":\n arg_4 = Cu1Gate\n elif arg_1 == \"cu3\":\n arg_4 = Cu3Gate\n elif arg_1 == \"ccx\":\n arg_4 = ToffoliGate\n elif arg_1 == \"cswap\":\n arg_4 = FredkinGate\n else:\n raise QiskitError(\"unknown operation for ast node name %s\" % arg_1)\n\n arg_5 = arg_4(*arg_2)\n\n arg_0.dag.apply_operation_back(arg_5, arg_3, [], condition=arg_0.condition)"} +{"_id": "doc_2464", "title": "", "text": "def Func(arg_0, *arg_1: arg_2[arg_3]) -> int:\n \"\"\"Return duration of supplied channels.\n\n Args:\n *channels: Supplied channels\n \"\"\"\n return arg_0.timeslots.Func(*arg_1)"} +{"_id": "doc_2465", "title": "", "text": "def Func(arg_0, *arg_1: arg_2[arg_3]) -> int:\n \"\"\"Return minimum start time for supplied channels.\n\n Args:\n *channels: Supplied channels\n \"\"\"\n return arg_0.timeslots.Func(*arg_1)"} +{"_id": "doc_2466", "title": "", "text": "def Func(arg_0, *arg_1: arg_2[arg_3]) -> int:\n \"\"\"Return maximum start time for supplied channels.\n\n Args:\n *channels: Supplied channels\n \"\"\"\n return arg_0.timeslots.Func(*arg_1)"} +{"_id": "doc_2467", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = 0) -> Iterable[Tuple[arg_2, 'Instruction']]:\n \"\"\"Iterable for flattening Schedule tree.\n\n Args:\n time: Shifted time due to parent\n\n Yields:\n Tuple[int, ScheduleComponent]: Tuple containing time `ScheduleComponent` starts\n at and the flattened `ScheduleComponent`.\n \"\"\"\n for arg_3, arg_4 in arg_0.children:\n yield from arg_4.Func(arg_1 + arg_3)"} +{"_id": "doc_2468", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Include unknown fields after load.\n\n Unknown fields are added with no processing at all.\n\n Args:\n valid_data (dict or list): validated data returned by ``load()``.\n many (bool): if True, data and original_data are a list.\n original_data (dict or list): data passed to ``load()`` in the\n first place.\n\n Returns:\n dict: the same ``valid_data`` extended with the unknown attributes.\n\n Inspired by https://github.com/marshmallow-code/marshmallow/pull/595.\n \"\"\"\n if arg_2:\n for arg_4, arg_5 in enumerate(arg_1):\n arg_6 = set(arg_3[arg_4]) - set(arg_1[arg_4])\n for arg_7 in arg_6:\n arg_1[arg_4][arg_7] = arg_3[arg_4][arg_7]\n else:\n arg_6 = set(arg_3) - set(arg_1)\n for arg_7 in arg_6:\n arg_1[arg_7] = arg_3[arg_7]\n\n return arg_1"} +{"_id": "doc_2469", "title": "", "text": "def Func(arg_0):\n \"\"\"Add validation after instantiation.\"\"\"\n\n @wraps(arg_0)\n def _decorated(arg_1, **arg_2):\n try:\n arg_3 = arg_1.shallow_schema.validate(arg_2)\n except ValidationError as ex:\n raise ModelValidationError(\n ex.messages, ex.field_names, ex.fields, ex.data, **ex.kwargs) from None\n\n arg_0(arg_1, **arg_2)\n\n return _decorated"} +{"_id": "doc_2470", "title": "", "text": "def Func(arg_0):\n \"\"\"Serialize the model into a Python dict of simple types.\n\n Note that this method requires that the model is bound with\n ``@bind_schema``.\n \"\"\"\n try:\n arg_1, arg_2 = arg_0.schema.dump(arg_0)\n except ValidationError as ex:\n raise ModelValidationError(\n ex.messages, ex.field_names, ex.fields, ex.data, **ex.kwargs) from None\n\n return arg_1"} +{"_id": "doc_2471", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"n-qubit QFT on q in circ.\"\"\"\n for arg_3 in range(arg_2):\n for arg_4 in range(arg_3):\n arg_0.cu1(math.pi / float(2**(arg_3 - arg_4)), arg_1[arg_3], arg_1[arg_4])\n arg_0.h(arg_1[arg_3])"} +{"_id": "doc_2472", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"\n Partial trace over subsystems of multi-partite vector.\n\n Args:\n vec (vector_like): complex vector N\n trace_systems (list(int)): a list of subsystems (starting from 0) to\n trace over.\n dimensions (list(int)): a list of the dimensions of the subsystems.\n If this is not set it will assume all\n subsystems are qubits.\n reverse (bool): ordering of systems in operator.\n If True system-0 is the right most system in tensor product.\n If False system-0 is the left most system in tensor product.\n\n Returns:\n ndarray: A density matrix with the appropriate subsystems traced over.\n \"\"\"\n\n # trace sys positions\n if arg_3:\n arg_2 = arg_2[::-1]\n arg_1 = len(arg_2) - 1 - np.array(arg_1)\n\n arg_4 = arg_0.reshape(arg_2)\n arg_4 = np.tensordot(arg_4, arg_4.conj(), axes=(arg_1, arg_1))\n arg_5 = int(np.sqrt(np.product(arg_4.shape)))\n\n return arg_4.reshape(arg_5, arg_5)"} +{"_id": "doc_2473", "title": "", "text": "def Func(arg_0, arg_1='col'):\n \"\"\"Devectorize a vectorized square matrix.\n\n Args:\n vectorized_mat (ndarray): a vectorized density matrix.\n method (str): the method of devectorization. Allowed values are\n - 'col' (default): flattens to column-major vector.\n - 'row': flattens to row-major vector.\n - 'pauli': flattens in the n-qubit Pauli basis.\n - 'pauli-weights': flattens in the n-qubit Pauli basis ordered by\n weight.\n\n Returns:\n ndarray: the resulting matrix.\n Raises:\n Exception: if input state is not a n-qubit state\n \"\"\"\n arg_0 = np.array(arg_0)\n arg_2 = int(np.sqrt(arg_0.size))\n if len(arg_0) != arg_2 * arg_2:\n raise Exception('Input is not a vectorized square matrix')\n\n if arg_1 == 'col':\n return arg_0.reshape(arg_2, arg_2, order='F')\n elif arg_1 == 'row':\n return arg_0.reshape(arg_2, arg_2, order='C')\n elif arg_1 in ['pauli', 'pauli_weights']:\n arg_3 = int(np.log2(arg_2)) # number of qubits\n if arg_2 != 2 ** arg_3:\n raise Exception('Input state must be n-qubit state')\n if arg_1 == 'pauli_weights':\n arg_4 = pauli_group(arg_3, case='weight')\n else:\n arg_4 = pauli_group(arg_3, case='tensor')\n arg_5 = np.array([p.to_matrix() for p in arg_4]) / 2 ** arg_3\n return np.tensordot(arg_0, arg_5, axes=1)\n return None"} +{"_id": "doc_2474", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"\n Convert a Choi-matrix to a Pauli-basis superoperator.\n\n Note that this function assumes that the Choi-matrix\n is defined in the standard column-stacking convention\n and is normalized to have trace 1. For a channel E this\n is defined as: choi = (I \\\\otimes E)(bell_state).\n\n The resulting 'rauli' R acts on input states as\n |rho_out>_p = R.|rho_in>_p\n where |rho> = vectorize(rho, method='pauli') for order=1\n and |rho> = vectorize(rho, method='pauli_weights') for order=0.\n\n Args:\n choi (matrix): the input Choi-matrix.\n order (int): ordering of the Pauli group vector.\n order=1 (default) is standard lexicographic ordering.\n Eg: [II, IX, IY, IZ, XI, XX, XY,...]\n order=0 is ordered by weights.\n Eg. [II, IX, IY, IZ, XI, XY, XZ, XX, XY,...]\n\n Returns:\n np.array: A superoperator in the Pauli basis.\n \"\"\"\n if arg_1 == 0:\n arg_1 = 'weight'\n elif arg_1 == 1:\n arg_1 = 'tensor'\n\n # get number of qubits'\n arg_2 = int(np.log2(np.sqrt(len(arg_0))))\n arg_3 = pauli_group(arg_2, case=arg_1)\n arg_4 = []\n for arg_5 in arg_3:\n for arg_6 in arg_3:\n arg_7 = np.kron(arg_6.to_matrix().T, arg_5.to_matrix())\n arg_4 += [np.trace(np.dot(arg_0, arg_7))]\n return np.array(arg_4).reshape(4 ** arg_2, 4 ** arg_2)"} +{"_id": "doc_2475", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Construct the Func product of two vectors.\n\n The second vector argument is optional, if absent the projector\n of the first vector will be returned.\n\n Args:\n vector1 (ndarray): the first vector.\n vector2 (ndarray): the (optional) second vector.\n\n Returns:\n np.array: The matrix |v1> Schedule:\n \"\"\"Return schedule Funced by `time`.\n\n Args:\n schedule: The schedule to Func\n time: The time to Func by\n name: Name of Funced schedule. Defaults to name of `schedule`\n \"\"\"\n if arg_4 is None:\n arg_4 = arg_0.name\n return union((arg_2, arg_0), arg_4=arg_4)"} +{"_id": "doc_2482", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_1,\n arg_5: arg_6 = None) -> Schedule:\n \"\"\"Return a new schedule with the `child` schedule Funced into the `parent` at `start_time`.\n\n Args:\n parent: Schedule to be Funced into\n time: Time to be Funced defined with respect to `parent`\n child: Schedule to Func\n name: Name of the new schedule. Defaults to name of parent\n \"\"\"\n return union(arg_0, (arg_2, arg_4), arg_5=arg_5)"} +{"_id": "doc_2483", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Apply Func to q.\"\"\"\n return arg_0.append(U3Gate(arg_1, arg_2, arg_3), [arg_4], [])"} +{"_id": "doc_2484", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Start the progress bar.\n\n Parameters:\n iterations (int): Number of iterations.\n \"\"\"\n arg_0.touched = True\n arg_0.iter = int(arg_1)\n arg_0.t_Func = time.time()"} +{"_id": "doc_2485", "title": "", "text": "def Func(arg_0):\n \"\"\"Dissasemble a qobj and return the circuits, run_config, and user header\n\n Args:\n qobj (Qobj): The input qobj object to dissasemble\n Returns:\n circuits (list): A list of quantum circuits\n run_config (dict): The dist of the run config\n user_qobj_header (dict): The dict of any user headers in the qobj\n\n \"\"\"\n arg_1 = arg_0.config.to_dict()\n arg_2 = arg_0.header.to_dict()\n arg_3 = _experiments_to_circuits(arg_0)\n\n return arg_3, arg_1, arg_2"} +{"_id": "doc_2486", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculate the Hamming distance between two bit strings\n\n Args:\n str1 (str): First string.\n str2 (str): Second string.\n Returns:\n int: Distance between strings.\n Raises:\n VisualizationError: Strings not same length\n \"\"\"\n if len(arg_0) != len(arg_1):\n raise VisualizationError('Strings not same length.')\n return sum(arg_2 != arg_3 for arg_2, arg_3 in zip(arg_0, arg_1))"} +{"_id": "doc_2487", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return quaternion for rotation about given axis.\n\n Args:\n angle (float): Angle in radians.\n axis (str): Axis for rotation\n\n Returns:\n Quaternion: Quaternion for axis rotation.\n\n Raises:\n ValueError: Invalid input axis.\n \"\"\"\n arg_2 = np.zeros(4, dtype=float)\n if arg_1 == 'x':\n arg_2[1] = 1\n elif arg_1 == 'y':\n arg_2[2] = 1\n elif arg_1 == 'z':\n arg_2[3] = 1\n else:\n raise ValueError('Invalid axis input.')\n arg_2 *= math.sin(arg_0/2.0)\n arg_2[0] = math.cos(arg_0/2.0)\n return Quaternion(arg_2)"} +{"_id": "doc_2488", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Normalizes a Quaternion to unit length\n so that it represents a valid rotation.\n\n Args:\n inplace (bool): Do an inplace normalization.\n\n Returns:\n Quaternion: Normalized quaternion.\n \"\"\"\n if arg_1:\n arg_2 = arg_0.norm()\n arg_0.data /= arg_2\n return None\n arg_2 = arg_0.norm()\n arg_3 = np.array(arg_0.data, copy=True)\n arg_3 /= arg_2\n return Quaternion(arg_3)"} +{"_id": "doc_2489", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts a unit-length quaternion to a rotation matrix.\n\n Returns:\n ndarray: Rotation matrix.\n \"\"\"\n arg_1, arg_2, arg_3, arg_4 = arg_0.normalize().data # pylint: disable=C0103\n arg_5 = np.array([\n [1-2*arg_3**2-2*arg_4**2, 2*arg_2*arg_3-2*arg_4*arg_1, 2*arg_2*arg_4+2*arg_3*arg_1],\n [2*arg_2*arg_3+2*arg_4*arg_1, 1-2*arg_2**2-2*arg_4**2, 2*arg_3*arg_4-2*arg_2*arg_1],\n [2*arg_2*arg_4-2*arg_3*arg_1, 2*arg_3*arg_4+2*arg_2*arg_1, 1-2*arg_2**2-2*arg_3**2]\n ], dtype=float)\n return arg_5"} +{"_id": "doc_2490", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Customize Func for handling containers.\"\"\"\n # Check the type in the standard way first, in order to fail quickly\n # in case of invalid values.\n arg_4 = super(InstructionParameter, arg_0).Func(\n arg_1, arg_2, arg_3)\n\n if is_collection(arg_1):\n arg_5 = [super(InstructionParameter, arg_0).Func(item, arg_2, arg_3)\n for item in arg_1]\n\n return arg_4"} +{"_id": "doc_2491", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(arg_1, int):\n if arg_1 < 0 or arg_1 >= arg_0.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(arg_1, slice):\n if arg_1.start < 0 or arg_1.stop >= arg_0.size or (arg_1.step is not None and\n arg_1.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")"} +{"_id": "doc_2492", "title": "", "text": "def Func(arg_0):\n \"\"\"Test if an array is a square matrix.\"\"\"\n arg_0 = np.array(arg_0)\n if arg_0.ndim != 2:\n return False\n arg_1 = arg_0.shape\n return arg_1[0] == arg_1[1]"} +{"_id": "doc_2493", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Test if an array is a diagonal matrix\"\"\"\n if arg_3 is None:\n arg_3 = arg_4\n if arg_1 is None:\n arg_1 = arg_2\n arg_0 = np.array(arg_0)\n if arg_0.ndim != 2:\n return False\n return np.allclose(arg_0, np.diag(np.diagonal(arg_0)), arg_1=arg_1, arg_3=arg_3)"} +{"_id": "doc_2494", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Test if an array is a symmetrix matrix\"\"\"\n if arg_3 is None:\n arg_3 = arg_4\n if arg_1 is None:\n arg_1 = arg_2\n arg_5 = np.array(arg_0)\n if arg_5.ndim != 2:\n return False\n return np.allclose(arg_5, arg_5.T, arg_1=arg_1, arg_3=arg_3)"} +{"_id": "doc_2495", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Test if an array is a Hermitian matrix\"\"\"\n if arg_3 is None:\n arg_3 = arg_4\n if arg_1 is None:\n arg_1 = arg_2\n arg_0 = np.array(arg_0)\n if arg_0.ndim != 2:\n return False\n return np.allclose(arg_0, np.conj(arg_0.T), arg_1=arg_1, arg_3=arg_3)"} +{"_id": "doc_2496", "title": "", "text": "def Func(arg_0,\n arg_1=False,\n arg_2=arg_3,\n arg_4=arg_5):\n \"\"\"Test if an array is an identity matrix.\"\"\"\n if arg_4 is None:\n arg_4 = arg_5\n if arg_2 is None:\n arg_2 = arg_3\n arg_0 = np.array(arg_0)\n if arg_0.ndim != 2:\n return False\n if arg_1:\n # If the matrix is equal to an identity up to a phase, we can\n # remove the phase by multiplying each entry by the complex\n # conjugate of the phase of the [0, 0] entry.\n arg_6 = np.angle(arg_0[0, 0])\n arg_0 = np.exp(-1j * arg_6) * arg_0\n # Check if square identity\n arg_7 = np.eye(len(arg_0))\n return np.allclose(arg_0, arg_7, arg_2=arg_2, arg_4=arg_4)"} +{"_id": "doc_2497", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Test if an array is a unitary matrix.\"\"\"\n if arg_3 is None:\n arg_3 = arg_4\n if arg_1 is None:\n arg_1 = arg_2\n arg_0 = np.array(arg_0)\n # Compute A^dagger.A and see if it is identity matrix\n arg_0 = np.conj(arg_0.T).dot(arg_0)\n return is_identity_matrix(arg_0, ignore_phase=False, arg_1=arg_1, arg_3=arg_3)"} +{"_id": "doc_2498", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Transform a QuantumChannel to the Choi representation.\"\"\"\n if arg_0 == 'Choi':\n return arg_1\n if arg_0 == 'Operator':\n return _from_operator('Choi', arg_1, arg_2, arg_3)\n if arg_0 == 'SuperOp':\n return _superopFunc(arg_1, arg_2, arg_3)\n if arg_0 == 'Kraus':\n return _krausFunc(arg_1, arg_2, arg_3)\n if arg_0 == 'Chi':\n return _chiFunc(arg_1, arg_2, arg_3)\n if arg_0 == 'PTM':\n arg_1 = _ptm_to_superop(arg_1, arg_2, arg_3)\n return _superopFunc(arg_1, arg_2, arg_3)\n if arg_0 == 'Stinespring':\n return _stinespringFunc(arg_1, arg_2, arg_3)\n raise QiskitError('Invalid QuantumChannel {}'.format(arg_0))"} +{"_id": "doc_2499", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Transform a QuantumChannel to the PTM representation.\"\"\"\n if arg_0 == 'PTM':\n return arg_1\n # Check valid n-qubit input\n _check_nqubit_dim(arg_2, arg_3)\n if arg_0 == 'Operator':\n return _from_operator('PTM', arg_1, arg_2, arg_3)\n # Convert via Superoperator representation\n if arg_0 != 'SuperOp':\n arg_1 = _to_superop(arg_0, arg_1, arg_2, arg_3)\n return _superopFunc(arg_1, arg_2, arg_3)"} +{"_id": "doc_2500", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Transform Operator representation to other representation.\"\"\"\n if arg_0 == 'Operator':\n return arg_1\n if arg_0 == 'SuperOp':\n return np.kron(np.conj(arg_1), arg_1)\n if arg_0 == 'Choi':\n arg_4 = np.ravel(arg_1, order='F')\n return np.outer(arg_4, np.conj(arg_4))\n if arg_0 == 'Kraus':\n return ([arg_1], None)\n if arg_0 == 'Stinespring':\n return (arg_1, None)\n if arg_0 == 'Chi':\n _check_nqubit_dim(arg_2, arg_3)\n arg_1 = Func('Choi', arg_1, arg_2, arg_3)\n return _choi_to_chi(arg_1, arg_2, arg_3)\n if arg_0 == 'PTM':\n _check_nqubit_dim(arg_2, arg_3)\n arg_1 = Func('SuperOp', arg_1, arg_2, arg_3)\n return _superop_to_ptm(arg_1, arg_2, arg_3)\n raise QiskitError('Invalid QuantumChannel {}'.format(arg_0))"} +{"_id": "doc_2501", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transform Stinespring representation to Operator representation.\"\"\"\n arg_3 = arg_0[0].shape[0] // arg_2\n if arg_0[1] is not None or arg_3 != 1:\n raise QiskitError(\n 'Channel cannot be converted to Operator representation')\n return arg_0[0]"} +{"_id": "doc_2502", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transform SuperOp representation to Choi representation.\"\"\"\n arg_3 = (arg_2, arg_2, arg_1, arg_1)\n return _reshuffle(arg_0, arg_3)"} +{"_id": "doc_2503", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transform Choi to SuperOp representation.\"\"\"\n arg_3 = (arg_1, arg_2, arg_1, arg_2)\n return _reshuffle(arg_0, arg_3)"} +{"_id": "doc_2504", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_4):\n \"\"\"Transform Choi representation to Kraus representation.\"\"\"\n # Check if hermitian matrix\n if is_hermitian_matrix(arg_0, arg_3=arg_3):\n # Get eigen-decomposition of Choi-matrix\n arg_5, arg_6 = la.eigh(arg_0)\n # Check eigenvaleus are non-negative\n if len(arg_5[arg_5 < -arg_3]) == 0:\n # CP-map Kraus representation\n arg_7 = []\n for arg_8, arg_9 in zip(arg_5, arg_6.T):\n if abs(arg_8) > arg_3:\n arg_10 = np.sqrt(arg_8) * arg_9.reshape(\n (arg_2, arg_1), order='F')\n arg_7.append(arg_10)\n # If we are converting a zero matrix, we need to return a Kraus set\n # with a single zero-element Kraus matrix\n if not arg_7:\n arg_7.append(np.zeros((arg_2, arg_1), dtype=complex))\n return (arg_7, None)\n # Non-CP-map generalized Kraus representation\n arg_11, arg_12, arg_13 = la.svd(arg_0)\n arg_14 = []\n arg_15 = []\n for arg_8, arg_16, arg_17 in zip(arg_12, arg_11.T, arg_13.conj()):\n arg_14.append(\n np.sqrt(arg_8) * arg_16.reshape((arg_2, arg_1), order='F'))\n arg_15.append(\n np.sqrt(arg_8) * arg_17.reshape((arg_2, arg_1), order='F'))\n return (arg_14, arg_15)"} +{"_id": "doc_2505", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transform Stinespring representation to Choi representation.\"\"\"\n arg_3 = arg_0[0].shape[0] // arg_2\n arg_4 = np.reshape(arg_0[0], (arg_2, arg_3, arg_1))\n if arg_0[1] is None:\n arg_5 = arg_4\n else:\n arg_5 = np.reshape(arg_0[1], (arg_2, arg_3, arg_1))\n return np.reshape(\n np.einsum('iAj,kAl->jilk', arg_4, arg_5.conj()),\n 2 * [arg_1 * arg_2])"} +{"_id": "doc_2506", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transform Chi representation to a Choi representation.\"\"\"\n arg_3 = int(np.log2(arg_1))\n return _transform_from_pauli(arg_0, arg_3)"} +{"_id": "doc_2507", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Reravel two bipartite matrices.\"\"\"\n # Reshuffle indicies\n arg_4 = arg_2[:2] + arg_3[:2]\n arg_5 = arg_2[2:] + arg_3[2:]\n arg_6 = arg_4 + arg_5\n arg_7 = (np.product(arg_4), np.product(arg_5))\n # Tensor product matrices\n arg_8 = np.kron(arg_0, arg_1)\n arg_8 = np.reshape(\n np.transpose(np.reshape(arg_8, arg_6), (0, 2, 1, 3, 4, 6, 5, 7)),\n arg_7)\n return arg_8"} +{"_id": "doc_2508", "title": "", "text": "def Func(arg_0):\n \"\"\"Resets Bloch sphere data sets to empty.\n \"\"\"\n arg_0.points = []\n arg_0.vectors = []\n arg_0.point_style = []\n arg_0.annotations = []"} +{"_id": "doc_2509", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Add a text or LaTeX annotation to Bloch sphere,\n parametrized by a qubit state or a vector.\n\n Args:\n state_or_vector (array_like):\n Position for the annotation.\n Qobj of a qubit or a vector of 3 elements.\n text (str):\n Annotation text.\n You can use LaTeX, but remember to use raw string\n e.g. r\"$\\\\langle x \\\\rangle$\"\n or escape backslashes\n e.g. \"$\\\\\\\\langle x \\\\\\\\rangle$\".\n **kwargs:\n Options as for mplot3d.axes3d.text, including:\n fontsize, color, horizontalalignment, verticalalignment.\n Raises:\n Exception: If input not array_like or tuple.\n \"\"\"\n if isinstance(arg_1, (list, np.ndarray, tuple)) \\\n and len(arg_1) == 3:\n arg_4 = arg_1\n else:\n raise Exception(\"Position needs to be specified by a qubit \" +\n \"state or a 3D vector.\")\n arg_0.annotations.append({'position': arg_4,\n 'text': arg_2,\n 'opts': arg_3})"} +{"_id": "doc_2510", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Render the Bloch sphere and its data sets in on given figure and axes.\n \"\"\"\n if arg_0._Funced:\n arg_0.axes.clear()\n\n arg_0._Funced = True\n\n # Figure instance for Bloch sphere plot\n if not arg_0._ext_fig:\n arg_0.fig = plt.figure(figsize=arg_0.figsize)\n\n if not arg_0._ext_axes:\n arg_0.axes = Axes3D(arg_0.fig, azim=arg_0.view[0], elev=arg_0.view[1])\n\n if arg_0.background:\n arg_0.axes.clear()\n arg_0.axes.set_xlim3d(-1.3, 1.3)\n arg_0.axes.set_ylim3d(-1.3, 1.3)\n arg_0.axes.set_zlim3d(-1.3, 1.3)\n else:\n arg_0.plot_axes()\n arg_0.axes.set_axis_off()\n arg_0.axes.set_xlim3d(-0.7, 0.7)\n arg_0.axes.set_ylim3d(-0.7, 0.7)\n arg_0.axes.set_zlim3d(-0.7, 0.7)\n\n arg_0.axes.grid(False)\n arg_0.plot_back()\n arg_0.plot_points()\n arg_0.plot_vectors()\n arg_0.plot_front()\n arg_0.plot_axes_labels()\n arg_0.plot_annotations()\n arg_0.axes.set_title(arg_1, fontsize=arg_0.font_size, y=1.08)"} +{"_id": "doc_2511", "title": "", "text": "def Func(arg_0):\n \"\"\"front half of sphere\"\"\"\n arg_1 = np.linspace(-np.pi, 0, 25)\n arg_2 = np.linspace(0, np.pi, 25)\n arg_3 = np.outer(np.cos(arg_1), np.sin(arg_2))\n arg_4 = np.outer(np.sin(arg_1), np.sin(arg_2))\n arg_5 = np.outer(np.ones(arg_1.shape[0]), np.cos(arg_2))\n arg_0.axes.plot_surface(arg_3, arg_4, arg_5, rstride=2, cstride=2,\n color=arg_0.sphere_color, linewidth=0,\n alpha=arg_0.sphere_alpha)\n # wireframe\n arg_0.axes.plot_wireframe(arg_3, arg_4, arg_5, rstride=5, cstride=5,\n color=arg_0.frame_color,\n alpha=arg_0.frame_alpha)\n # equator\n arg_0.axes.plot(1.0 * np.cos(arg_1), 1.0 * np.sin(arg_1),\n zs=0, zdir='z', lw=arg_0.frame_width,\n color=arg_0.frame_color)\n arg_0.axes.plot(1.0 * np.cos(arg_1), 1.0 * np.sin(arg_1),\n zs=0, zdir='x', lw=arg_0.frame_width,\n color=arg_0.frame_color)"} +{"_id": "doc_2512", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Display Bloch sphere and corresponding data sets.\n \"\"\"\n arg_0.render(arg_1=arg_1)\n if arg_0.fig:\n plt.Func(arg_0.fig)"} +{"_id": "doc_2513", "title": "", "text": "def Func(arg_0):\n \"\"\" Constructs the Func line of the element\"\"\"\n arg_1 = arg_0.Func_format % arg_0.Func_connect.center(\n arg_0.width, arg_0.Func_pad)\n if arg_0.right_fill:\n arg_1 = arg_1.ljust(arg_0.right_fill, arg_0.Func_pad)\n if arg_0.left_fill:\n arg_1 = arg_1.rjust(arg_0.left_fill, arg_0.Func_pad)\n arg_1 = arg_1.center(arg_0.layer_width, arg_0.Func_bck)\n return arg_1"} +{"_id": "doc_2514", "title": "", "text": "def Func(arg_0):\n \"\"\" Constructs the Functom line of the element\"\"\"\n arg_1 = arg_0.Func_format % arg_0.Func_connect.center(\n arg_0.width, arg_0.Func_pad)\n if arg_0.right_fill:\n arg_1 = arg_1.ljust(arg_0.right_fill, arg_0.Func_pad)\n if arg_0.left_fill:\n arg_1 = arg_1.rjust(arg_0.left_fill, arg_0.Func_pad)\n arg_1 = arg_1.center(arg_0.layer_width, arg_0.Func_bck)\n return arg_1"} +{"_id": "doc_2515", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the params and format them to add them to a label. None if there\n are no params of if the params are numpy.ndarrays.\"\"\"\n\n if not hasattr(arg_0.op, 'params'):\n return None\n if all([isinstance(arg_1, ndarray) for arg_1 in arg_0.op.params]):\n return None\n\n arg_2 = []\n for arg_1 in arg_0.op.params:\n if isinstance(arg_1, (sympy.Number, float)):\n arg_2.append('%.5g' % arg_1)\n else:\n arg_2.append('%s' % arg_1)\n return arg_2"} +{"_id": "doc_2516", "title": "", "text": "def Func(arg_0):\n \"\"\" Creates the label for a box.\"\"\"\n arg_1 = arg_0.name.capitalize()\n arg_2 = TextDrawing.params_for_label(arg_0)\n if arg_2:\n arg_1 += \"(%s)\" % ','.join(arg_2)\n return arg_1"} +{"_id": "doc_2517", "title": "", "text": "def Func():\n \"\"\"Apply filters to deprecation warnings.\n\n Force the `DeprecationWarning` warnings to be displayed for the qiskit\n module, overriding the system configuration as they are ignored by default\n [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning`\n messages.\n\n TODO: on Python 3.7, this might not be needed due to PEP-0565 [2].\n\n [1] https://docs.python.org/3/library/warnings.html#default-warning-filters\n [2] https://www.python.org/dev/peps/pep-0565/\n \"\"\"\n arg_0 = ('always', None, DeprecationWarning,\n re.compile(r'^qiskit\\.*', re.UNICODE), 0)\n\n # Instead of using warnings.simple_filter() directly, the internal\n # _add_filter() function is used for being able to match against the\n # module.\n try:\n warnings._add_filter(*arg_0, append=False)\n except AttributeError:\n # ._add_filter is internal and not available in some Python versions.\n pass\n\n # Add a filter for ignoring ChangedInMarshmallow3Warning, as we depend on\n # marhsmallow 2 explicitly. 2.17.0 introduced new deprecation warnings that\n # are useful for eventually migrating, but too verbose for our purposes.\n warnings.simplefilter('ignore', category=ChangedInMarshmallow3Warning)"} +{"_id": "doc_2518", "title": "", "text": "def Func():\n \"\"\"Basic hardware information about the local machine.\n\n Gives actual number of CPU's in the machine, even when hyperthreading is\n turned on. CPU count defaults to 1 when true count can't be determined.\n\n Returns:\n dict: The hardware information.\n \"\"\"\n arg_0 = {\n 'os': platform.system(),\n 'memory': psutil.virtual_memory().total / (1024 ** 3),\n 'cpus': psutil.cpu_count(logical=False) or 1\n }\n return arg_0"} +{"_id": "doc_2519", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=False):\n \"\"\"Internal function that updates the status\n of a HTML job monitor.\n\n Args:\n job_var (BaseJob): The job to keep track of.\n interval (int): The status check interval\n status (widget): HTML ipywidget for output ot screen\n header (str): String representing HTML code for status.\n _interval_set (bool): Was interval set by user?\n \"\"\"\n arg_5 = arg_0.status()\n arg_6 = arg_5.name\n arg_7 = arg_5.value\n arg_2.value = arg_3 % (arg_7)\n while arg_6 not in ['DONE', 'CANCELLED']:\n time.sleep(arg_1)\n arg_5 = arg_0.status()\n arg_6 = arg_5.name\n arg_7 = arg_5.value\n if arg_6 == 'ERROR':\n break\n else:\n if arg_6 == 'QUEUED':\n arg_7 += ' (%s)' % arg_0.queue_position()\n if not arg_4:\n arg_1 = max(arg_0.queue_position(), 2)\n else:\n if not arg_4:\n arg_1 = 2\n arg_2.value = arg_3 % (arg_7)\n\n arg_2.value = arg_3 % (arg_7)"} +{"_id": "doc_2520", "title": "", "text": "def Func(arg_0: arg_1.ndarray, arg_3: arg_4) -> arg_1.ndarray:\n \"\"\"Continuous Func pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Complex pulse amplitude.\n \"\"\"\n return arg_1.full(len(arg_0), arg_3, dtype=arg_1.complex_)"} +{"_id": "doc_2521", "title": "", "text": "def Func(arg_0: arg_1.ndarray, arg_3: arg_4, arg_5: arg_6, arg_7: arg_6 = 0) -> arg_1.ndarray:\n \"\"\"Continuous Func wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt.\n phase: Pulse phase.\n \"\"\"\n return arg_3*(-2*arg_1.abs(sawtooth(arg_0, 1, arg_5, (arg_7-arg_1.pi/2)/2)) + 1).astype(arg_1.complex_)"} +{"_id": "doc_2522", "title": "", "text": "def Func(arg_0: arg_1.ndarray, arg_3: arg_4, arg_5: arg_6, arg_7: arg_6 = 0) -> arg_1.ndarray:\n \"\"\"Continuous Funcine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return arg_3*arg_1.Func(2*arg_1.pi*arg_5*arg_0+arg_7).astype(arg_1.complex_)"} +{"_id": "doc_2523", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2,\n arg_5: arg_6[None, arg_2] = None, arg_7: arg_8 = False,\n arg_9: arg_8 = False) -> np.ndarray:\n r\"\"\"Enforce that the supplied gaussian pulse is zeroed at a specific width.\n\n This is acheived by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `2\\times center+1`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline to gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse. If unsupplied,\n defaults to $2*(center+1)$ such that the samples are zero at $\\Omega_g(-1)$.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)-\\Omega_g(center\\pm zeroed_width/2)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if arg_5 is None:\n arg_5 = 2*(arg_3+1)\n\n arg_10 = gaussian(np.array([-arg_5/2]), arg_1, arg_3, arg_4)\n arg_0 -= arg_10\n arg_11 = 1.\n if arg_7:\n arg_11 = arg_1/(arg_1-arg_10)\n arg_0 *= arg_11\n\n if arg_9:\n return arg_0, arg_11\n return arg_0"} +{"_id": "doc_2524", "title": "", "text": "def Func(arg_0: arg_1.ndarray, arg_3: arg_4, arg_5: arg_6, arg_7: arg_6,\n arg_8: arg_9[None, arg_6] = None, arg_10: arg_11 = False,\n arg_12: arg_11 = False) -> arg_9[arg_1.ndarray, Tuple[arg_1.ndarray, arg_1.ndarray]]:\n r\"\"\"Continuous unnormalized Func pulse.\n\n Integrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center\n will be $amp-\\Omega_g(center\\pm zeroed_width/2)$ unless `rescale_amp` is set,\n in which case all samples will be rescaled such that the center\n amplitude will be `amp`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline to Func pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a Func pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)-\\Omega_g(center\\pm zeroed_width/2)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma.\n \"\"\"\n arg_0 = arg_1.asarray(arg_0, dtype=arg_1.complex_)\n arg_13 = (arg_0-arg_5)/arg_7\n arg_14 = arg_3*arg_1.exp(-arg_13**2/2).astype(arg_1.complex_)\n\n if arg_8 is not None:\n arg_14 = _fix_Func_width(arg_14, arg_3=arg_3, arg_5=arg_5, arg_7=arg_7,\n arg_8=arg_8, arg_10=arg_10)\n\n if arg_12:\n return arg_14, arg_13\n return arg_14"} +{"_id": "doc_2525", "title": "", "text": "def Func(arg_0: arg_1.ndarray, arg_3: arg_4, arg_5: arg_6, arg_7: arg_6,\n arg_8: arg_9 = False) -> arg_1.ndarray:\n \"\"\"Continuous unnormalized gaussian derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_gaussian: Return gaussian with which derivative was taken with.\n \"\"\"\n arg_10, arg_11 = gaussian(arg_0, arg_3=arg_3, arg_5=arg_5, arg_7=arg_7, ret_x=True)\n arg_12 = -arg_11/arg_7*arg_10\n if arg_8:\n return arg_12, arg_10\n return arg_12"} +{"_id": "doc_2526", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Test if this circuit has the register r.\n\n Args:\n register (Register): a quantum or classical register.\n\n Returns:\n bool: True if the register is contained in this circuit.\n \"\"\"\n arg_2 = False\n if (isinstance(arg_1, QuantumRegister) and\n arg_1 in arg_0.qregs):\n arg_2 = True\n elif (isinstance(arg_1, ClassicalRegister) and\n arg_1 in arg_0.cregs):\n arg_2 = True\n return arg_2"} +{"_id": "doc_2527", "title": "", "text": "def Func(arg_0):\n \"\"\"Mirror the circuit by reversing the instructions.\n\n This is done by recursively Funcing all instructions.\n It does not invert any gate.\n\n Returns:\n QuantumCircuit: the Funced circuit\n \"\"\"\n arg_1 = arg_0.copy(name=arg_0.name+'_Func')\n arg_1.data = []\n for arg_3, arg_4, arg_5 in reversed(arg_0.data):\n arg_1.data.append((arg_3.Func(), arg_4, arg_5))\n return arg_1"} +{"_id": "doc_2528", "title": "", "text": "def Func(arg_0):\n \"\"\"Invert this circuit.\n\n This is done by recursively inverting all gates.\n\n Returns:\n QuantumCircuit: the inverted circuit\n\n Raises:\n QiskitError: if the circuit cannot be inverted.\n \"\"\"\n arg_1 = arg_0.copy(name=arg_0.name+'_dg')\n arg_1.data = []\n for arg_3, arg_4, arg_5 in reversed(arg_0.data):\n arg_1.data.append((arg_3.Func(), arg_4, arg_5))\n return arg_1"} +{"_id": "doc_2529", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"DEPRECATED after 0.8\"\"\"\n arg_0.append(arg_1, arg_2, arg_3)"} +{"_id": "doc_2530", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add registers.\"\"\"\n if not arg_1:\n return\n\n if any([isinstance(arg_2, int) for arg_2 in arg_1]):\n # QuantumCircuit defined without registers\n if len(arg_1) == 1 and isinstance(arg_1[0], int):\n # QuantumCircuit with anonymous quantum wires e.g. QuantumCircuit(2)\n arg_1 = (QuantumRegister(arg_1[0], 'q'),)\n elif len(arg_1) == 2 and all([isinstance(arg_2, int) for arg_2 in arg_1]):\n # QuantumCircuit with anonymous wires e.g. QuantumCircuit(2, 3)\n arg_1 = (QuantumRegister(arg_1[0], 'q'), ClassicalRegister(arg_1[1], 'c'))\n else:\n raise QiskitError(\"QuantumCircuit parameters can be Registers or Integers.\"\n \" If Integers, up to 2 arguments. QuantumCircuit was called\"\n \" with %s.\" % (arg_1,))\n\n for arg_3 in arg_1:\n if arg_3 in arg_0.qregs or arg_3 in arg_0.cregs:\n raise QiskitError(\"register name \\\"%s\\\" already exists\"\n % arg_3.name)\n if isinstance(arg_3, QuantumRegister):\n arg_0.qregs.append(arg_3)\n elif isinstance(arg_3, ClassicalRegister):\n arg_0.cregs.append(arg_3)\n else:\n raise QiskitError(\"expected a register\")"} +{"_id": "doc_2531", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Raise exception if list of qubits contains duplicates.\"\"\"\n arg_2 = set(arg_1)\n if len(arg_2) != len(arg_1):\n raise QiskitError(\"duplicate qubit arguments\")"} +{"_id": "doc_2532", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Raise exception if a qarg is not in this circuit or bad format.\"\"\"\n if not all(isinstance(arg_2, tuple) and\n isinstance(arg_2[0], QuantumRegister) and\n isinstance(arg_2[1], int) for arg_2 in arg_1):\n raise QiskitError(\"qarg not (QuantumRegister, int) tuple\")\n if not all(arg_0.has_register(arg_2[0]) for arg_2 in arg_1):\n raise QiskitError(\"register not in this circuit\")\n for arg_3 in arg_1:\n arg_3[0].check_range(arg_3[1])"} +{"_id": "doc_2533", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Raise exception if clbit is not in this circuit or bad format.\"\"\"\n if not all(isinstance(arg_2, tuple) and\n isinstance(arg_2[0], ClassicalRegister) and\n isinstance(arg_2[1], int) for arg_2 in arg_1):\n raise QiskitError(\"carg not (ClassicalRegister, int) tuple\")\n if not all(arg_0.has_register(arg_2[0]) for arg_2 in arg_1):\n raise QiskitError(\"register not in this circuit\")\n for arg_3 in arg_1:\n arg_3[0].check_range(arg_3[1])"} +{"_id": "doc_2534", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Raise exception if the circuits are defined on incompatible registers\"\"\"\n arg_2 = arg_0.qregs + arg_0.cregs\n arg_3 = arg_1.qregs + arg_1.cregs\n for arg_4 in arg_2:\n for arg_5 in arg_3:\n if arg_5.name == arg_4.name:\n if arg_4 != arg_5:\n raise QiskitError(\"circuits are not compatible\")"} +{"_id": "doc_2535", "title": "", "text": "def Func(arg_0):\n \"\"\"Return OpenQASM string.\"\"\"\n arg_1 = arg_0.header + \"\\n\"\n arg_1 += arg_0.extension_lib + \"\\n\"\n for arg_2 in arg_0.qregs:\n arg_1 += arg_2.Func() + \"\\n\"\n for arg_2 in arg_0.cregs:\n arg_1 += arg_2.Func() + \"\\n\"\n for arg_3, arg_4, arg_5 in arg_0.data:\n if arg_3.name == 'measure':\n arg_6 = arg_4[0]\n arg_7 = arg_5[0]\n arg_1 += \"%s %s[%d] -> %s[%d];\\n\" % (arg_3.Func(),\n arg_6[0].name, arg_6[1],\n arg_7[0].name, arg_7[1])\n else:\n arg_1 += \"%s %s;\\n\" % (arg_3.Func(),\n \",\".join([\"%s[%d]\" % (arg_8[0].name, arg_8[1])\n for arg_8 in arg_4 + arg_5]))\n return arg_1"} +{"_id": "doc_2536", "title": "", "text": "def Func(arg_0):\n \"\"\"Count each operation kind in the circuit.\n\n Returns:\n dict: a breakdown of how many operations of each kind.\n \"\"\"\n Func = {}\n for arg_2, arg_3, arg_3 in arg_0.data:\n if arg_2.name in Func.keys():\n Func[arg_2.name] += 1\n else:\n Func[arg_2.name] = 1\n return Func"} +{"_id": "doc_2537", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"How many non-entangled subcircuits can the circuit be factored to.\n\n Args:\n unitary_only (bool): Compute only unitary part of graph.\n\n Returns:\n int: Number of connected components in circuit.\n \"\"\"\n # Convert registers to ints (as done in depth).\n arg_2 = 0\n arg_3 = {}\n\n if arg_1:\n arg_4 = arg_0.qregs\n else:\n arg_4 = arg_0.qregs+arg_0.cregs\n\n for arg_5 in arg_4:\n arg_3[arg_5.name] = arg_2\n arg_2 += arg_5.size\n # Start with each qubit or cbit being its own subgraph.\n arg_7 = [[bit] for bit in range(arg_2)]\n\n arg_8 = len(arg_7)\n\n # Here we are traversing the gates and looking to see\n # which of the sub_graphs the gate joins together.\n for arg_9, arg_10, arg_11 in arg_0.data:\n if arg_1:\n arg_12 = arg_10\n arg_13 = len(arg_12)\n else:\n arg_12 = arg_10+arg_11\n arg_13 = len(arg_12) + (1 if arg_9.control else 0)\n\n if arg_13 >= 2 and arg_9.name not in ['barrier', 'snapshot']:\n arg_14 = []\n arg_15 = 0\n # Controls necessarily join all the cbits in the\n # register that they use.\n if arg_9.control and not arg_1:\n arg_16 = arg_9.control[0]\n arg_17 = arg_3[arg_16.name]\n for arg_18 in range(arg_16.size):\n arg_19 = arg_17+arg_18\n for arg_20 in range(arg_8):\n if arg_19 in arg_7[arg_20]:\n arg_14.append(arg_20)\n arg_15 += 1\n break\n\n for arg_21 in arg_12:\n arg_22 = arg_3[arg_21[0].name]+arg_21[1]\n for arg_20 in range(arg_8):\n if arg_22 in arg_7[arg_20]:\n if arg_20 not in arg_14:\n arg_14.append(arg_20)\n arg_15 += 1\n break\n\n # If the gate touches more than one subgraph\n # join those graphs together and return\n # reduced number of subgraphs\n if arg_15 > 1:\n arg_23 = []\n for arg_24 in arg_14:\n arg_23.extend(arg_7[arg_24])\n arg_25 = []\n for arg_24 in range(arg_8):\n if arg_24 not in arg_14:\n arg_25.append(arg_7[arg_24])\n arg_25.append(arg_23)\n arg_7 = arg_25\n arg_8 -= (arg_15-1)\n # Cannot go lower than one so break\n if arg_8 == 1:\n break\n return arg_8"} +{"_id": "doc_2538", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Assign parameters to values yielding a new circuit.\n\n Args:\n value_dict (dict): {parameter: value, ...}\n\n Raises:\n QiskitError: If value_dict contains parameters not present in the circuit\n\n Returns:\n QuantumCircuit: copy of self with assignment substitution.\n \"\"\"\n arg_2 = arg_0.copy()\n\n if arg_1.keys() > arg_0.parameters:\n raise QiskitError('Cannot bind parameters ({}) not present in the circuit.'.format(\n [str(arg_3) for arg_3 in arg_1.keys() - arg_0.parameters]))\n\n for arg_4, arg_5 in arg_1.items():\n arg_2._bind_parameter(arg_4, arg_5)\n # clear evaluated expressions\n for arg_4 in arg_1:\n del arg_2._parameter_table[arg_4]\n return arg_2"} +{"_id": "doc_2539", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3='None',\n arg_4=None, arg_5=False,\n arg_6=150, arg_7=1000, arg_8=(6, 5)):\n \"\"\"Plot the interpolated envelope of pulse\n\n Args:\n samples (ndarray): Data points of complex pulse envelope.\n duration (int): Pulse length (number of points).\n dt (float): Time interval of samples.\n interp_method (str): Method of interpolation\n (set `None` for turn off the interpolation).\n filename (str): Name required to save pulse image.\n interactive (bool): When set true show the circuit in a new window\n (this depends on the matplotlib backend being used supporting this).\n dpi (int): Resolution of saved image.\n nop (int): Data points for interpolation.\n size (tuple): Size of figure.\n Returns:\n matplotlib.figure: A matplotlib figure object for the pulse envelope.\n Raises:\n ImportError: when the output methods requieres non-installed libraries.\n QiskitError: when invalid interpolation method is specified.\n \"\"\"\n\n try:\n from matplotlib import pyplot as plt\n except ImportError:\n raise ImportError('Func need matplotlib. '\n 'Run \"pip install matplotlib\" before.')\n\n if arg_2:\n arg_9 = arg_2\n else:\n arg_9 = 1\n\n arg_10 = np.real(arg_0)\n arg_11 = np.imag(arg_0)\n\n arg_12 = plt.figure(figsize=arg_8)\n arg_13 = arg_12.add_subplot(111)\n\n if arg_3 == 'CubicSpline':\n # spline interpolation, use mid-point of dt\n arg_14 = np.arange(0, arg_1 + 1) * arg_9 + 0.5 * arg_9\n arg_15 = CubicSpline(arg_14[:-1], arg_10)\n arg_16 = CubicSpline(arg_14[:-1], arg_11)\n\n arg_17 = np.linspace(0, arg_1 * arg_9, arg_7)\n arg_18 = arg_15(arg_17)\n arg_19 = arg_16(arg_17)\n elif arg_3 == 'None':\n # pseudo-DAC output\n arg_14 = np.arange(0, arg_1 + 1) * arg_9\n\n arg_17 = np.r_[arg_14[0], np.repeat(arg_14[1:-1], 2), arg_14[-1]]\n arg_18 = np.repeat(arg_10, 2)\n arg_19 = np.repeat(arg_11, 2)\n else:\n raise QiskitError('Invalid interpolation method \"%s\"' % arg_3)\n\n # plot\n arg_13.fill_between(x=arg_17, y1=arg_18, y2=np.zeros_like(arg_17),\n facecolor='red', alpha=0.3,\n edgecolor='red', linewidth=1.5,\n label='real part')\n arg_13.fill_between(x=arg_17, y1=arg_19, y2=np.zeros_like(arg_17),\n facecolor='blue', alpha=0.3,\n edgecolor='blue', linewidth=1.5,\n label='imaginary part')\n\n arg_13.set_xlim(0, arg_1 * arg_9)\n arg_13.grid(b=True, linestyle='-')\n arg_13.legend(bbox_to_anchor=(0.5, 1.00), loc='lower center',\n ncol=2, frameon=False, fontsize=14)\n\n if arg_4:\n arg_12.savefig(arg_4, arg_6=arg_6, bbox_inches='tight')\n\n plt.close(arg_12)\n\n if arg_12 and arg_5:\n plt.show(arg_12)\n\n return arg_12"} +{"_id": "doc_2540", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=arg_4, arg_5=arg_6):\n \"\"\"Search for SWAPs which allow for application of largest number of gates.\n\n Arguments:\n layout (Layout): Map from virtual qubit index to physical qubit index.\n gates (list): Gates to be mapped.\n coupling_map (CouplingMap): CouplingMap of the target backend.\n depth (int): Number of SWAP layers to search before choosing a result.\n width (int): Number of SWAPs to consider at each layer.\n Returns:\n dict: Describes solution step found.\n layout (Layout): Virtual to physical qubit map after SWAPs.\n gates_remaining (list): Gates that could not be mapped.\n gates_mapped (list): Gates that were mapped, including added SWAPs.\n\n \"\"\"\n\n arg_7, arg_8 = _map_free_gates(arg_0, arg_1, arg_2)\n\n arg_9 = {'layout': arg_0,\n 'swaps_added': 0,\n 'gates_mapped': arg_7,\n 'gates_remaining': arg_8}\n\n if not arg_8 or arg_3 == 0:\n return arg_9\n\n arg_10 = arg_2.get_edges()\n\n def _score_swap(arg_11):\n \"\"\"Calculate the relative score for a given SWAP.\"\"\"\n arg_12 = arg_0.copy()\n arg_12.swap(*arg_11)\n return _calc_layout_distance(arg_1, arg_2, arg_12)\n\n arg_13 = sorted(arg_10, key=_score_swap)\n\n arg_14, arg_15 = None, None\n for arg_11 in arg_13[:arg_5]:\n arg_12 = arg_0.copy()\n arg_12.swap(*arg_11)\n arg_16 = Func(arg_12, arg_8,\n arg_2, arg_3 - 1, arg_5)\n\n # ranked_swaps already sorted by distance, so distance is the tie-breaker.\n if arg_14 is None or _score_step(arg_16) > _score_step(arg_15):\n arg_14, arg_15 = arg_11, arg_16\n\n arg_17 = _swap_ops_from_edge(arg_14, arg_0)\n return {\n 'layout': arg_15['layout'],\n 'swaps_added': 1 + arg_15['swaps_added'],\n 'gates_remaining': arg_15['gates_remaining'],\n 'gates_mapped': arg_7 + arg_17 + arg_15['gates_mapped'],\n }"} +{"_id": "doc_2541", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Map all gates that can be executed with the current layout.\n\n Args:\n layout (Layout): Map from virtual qubit index to physical qubit index.\n gates (list): Gates to be mapped.\n coupling_map (CouplingMap): CouplingMap for target device topology.\n\n Returns:\n tuple:\n mapped_gates (list): ops for gates that can be executed, mapped onto layout.\n remaining_gates (list): gates that cannot be executed on the layout.\n\n \"\"\"\n\n arg_3 = set()\n\n arg_4 = []\n arg_5 = []\n\n for arg_6 in arg_1:\n # Gates without a partition (barrier, snapshot, save, load, noise) may\n # still have associated qubits. Look for them in the qargs.\n if not arg_6['partition']:\n arg_7 = [n for n in arg_6['graph'].nodes() if n.type == 'op'][0].qargs\n\n if not arg_7:\n continue\n\n if arg_3.intersection(arg_7):\n arg_3.update(arg_7)\n arg_5.append(arg_6)\n else:\n arg_8 = _transform_gate_for_layout(arg_6, arg_0)\n arg_4.append(arg_8)\n continue\n\n arg_7 = arg_6['partition'][0]\n\n if arg_3.intersection(arg_7):\n arg_3.update(arg_7)\n arg_5.append(arg_6)\n elif len(arg_7) == 1:\n arg_8 = _transform_gate_for_layout(arg_6, arg_0)\n arg_4.append(arg_8)\n elif arg_2.distance(*[arg_0[arg_9] for arg_9 in arg_7]) == 1:\n arg_8 = _transform_gate_for_layout(arg_6, arg_0)\n arg_4.append(arg_8)\n else:\n arg_3.update(arg_7)\n arg_5.append(arg_6)\n\n return arg_4, arg_5"} +{"_id": "doc_2542", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Return the sum of the distances of two-qubit pairs in each CNOT in gates\n according to the layout and the coupling.\n \"\"\"\n\n if arg_3 is None:\n arg_3 = 50 + 10 * len(arg_1.physical_qubits)\n\n return sum(arg_1.distance(*[arg_2[arg_4] for arg_4 in arg_5['partition'][0]])\n for arg_5 in arg_0[:arg_3]\n if arg_5['partition'] and len(arg_5['partition'][0]) == 2)"} +{"_id": "doc_2543", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a copy of source_dag with metadata but empty.\n Generate only a single qreg in the output DAG, matching the size of the\n coupling_map.\"\"\"\n\n arg_2 = DAGCircuit()\n arg_2.name = arg_0.name\n\n for arg_4 in arg_0.cregs.values():\n arg_2.add_creg(arg_4)\n\n arg_5 = QuantumRegister(len(arg_1.physical_qubits), 'q')\n arg_2.add_qreg(arg_5)\n\n return arg_2"} +{"_id": "doc_2544", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return op implementing a virtual gate on given layout.\"\"\"\n\n arg_2 = deepcopy([n for n in arg_0['graph'].nodes() if n.type == 'op'][0])\n\n # Workaround until #1816, apply mapped to qargs to both DAGNode and op\n arg_3 = QuantumRegister(len(arg_1.get_physical_bits()), 'q')\n arg_4 = [(arg_3, arg_1[a]) for a in arg_2.qargs]\n arg_2.qargs = arg_2.op.qargs = arg_4\n\n arg_2.pop('name')\n\n return arg_2"} +{"_id": "doc_2545", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate list of ops to implement a SWAP gate along a coupling edge.\"\"\"\n\n arg_2 = QuantumRegister(len(arg_1.get_physical_bits()), 'q')\n arg_3 = [(arg_2, i) for i in arg_0]\n\n # TODO shouldn't be making other nodes not by the DAG!!\n return [\n DAGNode({'op': SwapGate(), 'qargs': arg_3, 'cargs': [], 'type': 'op'})\n ]"} +{"_id": "doc_2546", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Run one pass of the lookahead mapper on the provided DAG.\n\n Args:\n dag (DAGCircuit): the directed acyclic graph to be mapped\n Returns:\n DAGCircuit: A dag mapped to be compatible with the coupling_map in\n the property_set.\n Raises:\n TranspilerError: if the coupling map or the layout are not\n compatible with the DAG\n \"\"\"\n arg_2 = arg_0._coupling_map\n arg_3 = list(arg_1.serial_layers())\n\n if arg_0.initial_layout is None:\n if arg_0.property_set[\"layout\"]:\n arg_0.initial_layout = arg_0.property_set[\"layout\"]\n else:\n arg_0.initial_layout = Layout.generate_trivial_layout(*arg_1.qregs.values())\n\n if len(arg_1.qubits()) != len(arg_0.initial_layout):\n raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n\n if len(arg_0._coupling_map.physical_qubits) != len(arg_0.initial_layout):\n raise TranspilerError(\n \"Mappers require to have the layout to be the same size as the coupling map\")\n\n arg_5 = []\n arg_6 = arg_0.initial_layout.copy()\n arg_7 = arg_3.copy()\n\n while arg_7:\n arg_8 = _search_forward_n_swaps(arg_6, arg_7,\n arg_2)\n\n arg_6 = arg_8['layout']\n arg_9 = arg_8['gates_mapped']\n arg_7 = arg_8['gates_remaining']\n\n arg_5.extend(arg_9)\n\n # Preserve input DAG's name, regs, wire_map, etc. but replace the graph.\n arg_10 = _copy_circuit_metadata(arg_1, arg_2)\n\n for arg_11 in arg_5:\n arg_10.apply_operation_back(op=arg_11.op, qargs=arg_11.qargs, cargs=arg_11.cargs)\n\n return arg_10"} +{"_id": "doc_2547", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add a physical qubit to the coupling graph as a node.\n\n physical_qubit (int): An integer representing a physical qubit.\n\n Raises:\n CouplingError: if trying to add duplicate qubit\n \"\"\"\n if not isinstance(arg_1, int):\n raise CouplingError(\"Physical qubits should be integers.\")\n if arg_1 in arg_0.physical_qubits:\n raise CouplingError(\n \"The physical qubit %s is already in the coupling graph\" % arg_1)\n arg_0.graph.add_node(arg_1)\n arg_0._dist_matrix = None # invalidate\n arg_0._qubit_list = None"} +{"_id": "doc_2548", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a CouplingMap object for a Func of self.\n\n nodelist (list): list of integer node labels\n \"\"\"\n arg_2 = CouplingMap()\n arg_2.graph = arg_0.graph.Func(arg_1)\n for arg_4 in arg_1:\n if arg_4 not in arg_2.physical_qubits:\n arg_2.add_physical_qubit(arg_4)\n return arg_2"} +{"_id": "doc_2549", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a sorted list of Func\"\"\"\n if arg_0._qubit_list is None:\n arg_0._qubit_list = sorted([pqubit for pqubit in arg_0.graph.nodes])\n return arg_0._qubit_list"} +{"_id": "doc_2550", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the full distance matrix on pairs of nodes.\n\n The distance map self._dist_matrix is computed from the graph using\n all_pairs_shortest_path_length.\n \"\"\"\n if not arg_0.is_connected():\n raise CouplingError(\"coupling graph not connected\")\n arg_1 = nx.all_pairs_shortest_path_length(arg_0.graph.to_undirected(as_view=True))\n arg_1 = dict(arg_1)\n arg_2 = len(arg_1)\n arg_3 = arg_5.zeros((arg_2, arg_2))\n for arg_4 in range(arg_2):\n arg_3[arg_4, arg_5.fromiter(arg_1[arg_4].keys(), arg_8=arg_9)] = arg_5.fromiter(\n arg_1[arg_4].values(), arg_8=arg_9)\n arg_0._dist_matrix = arg_3"} +{"_id": "doc_2551", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns the undirected Func between physical_qubit1 and physical_qubit2.\n\n Args:\n physical_qubit1 (int): A physical qubit\n physical_qubit2 (int): Another physical qubit\n\n Returns:\n int: The undirected Func\n\n Raises:\n CouplingError: if the qubits do not exist in the CouplingMap\n \"\"\"\n if arg_1 not in arg_0.physical_qubits:\n raise CouplingError(\"%s not in coupling graph\" % (arg_1,))\n if arg_2 not in arg_0.physical_qubits:\n raise CouplingError(\"%s not in coupling graph\" % (arg_2,))\n if arg_0._dist_matrix is None:\n arg_0._compute_Func_matrix()\n return arg_0._dist_matrix[arg_1, arg_2]"} +{"_id": "doc_2552", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add controls to all instructions.\"\"\"\n for arg_2 in arg_0.instructions:\n arg_2.Func(*arg_1)\n return arg_0"} +{"_id": "doc_2553", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add classical control register to all instructions.\"\"\"\n for arg_3 in arg_0.instructions:\n arg_3.Func(arg_1, arg_2)\n return arg_0"} +{"_id": "doc_2554", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Subscribes to an event, so when it's emitted all the callbacks Funcd,\n will be executed. We are not allowing double registration.\n\n Args\n event (string): The event to Funcd in the form of:\n \"terra...\"\n callback (callable): The callback that will be executed when an event is\n emitted.\n \"\"\"\n if not callable(arg_2):\n raise QiskitError(\"Callback is not a callable!\")\n\n if arg_1 not in arg_0._Funcrs:\n arg_0._Funcrs[arg_1] = []\n\n arg_4 = arg_0._Subscription(arg_1, arg_2)\n if arg_4 in arg_0._Funcrs[arg_1]:\n # We are not allowing double subscription\n return False\n\n arg_0._Funcrs[arg_1].append(arg_4)\n return True"} +{"_id": "doc_2555", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Emits an event if there are any subscribers.\n\n Args\n event (String): The event to be emitted\n args: Arguments linked with the event\n kwargs: Named arguments linked with the event\n \"\"\"\n # No event, no subscribers.\n if arg_1 not in arg_0._subscribers:\n return\n\n for arg_4 in arg_0._subscribers[arg_1]:\n arg_4.callback(*arg_2, **arg_3)"} +{"_id": "doc_2556", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Unsubscribe the specific callback to the event.\n\n Args\n event (String): The event to Func\n callback (callable): The callback that won't be executed anymore\n\n Returns\n True: if we have successfully Funcd to the event\n False: if there's no callback previously registered\n \"\"\"\n\n try:\n arg_0._subscribers[arg_1].remove(arg_0._Subscription(arg_1, arg_2))\n except KeyError:\n return False\n\n return True"} +{"_id": "doc_2557", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Call to create a circuit with gates that take the\n desired vector to zero.\n\n Returns:\n QuantumCircuit: circuit to take self.params vector to |00..0>\n \"\"\"\n arg_1 = QuantumRegister(arg_0.num_qubits)\n arg_2 = QuantumCircuit(arg_1, name='disentangler')\n\n # kick start the peeling loop, and disentangle one-by-one from LSB to MSB\n arg_3 = arg_0.params\n\n for arg_4 in range(arg_0.num_qubits):\n # work out which rotations must be done to disentangle the LSB\n # qubit (we peel away one qubit at a time)\n (arg_3,\n arg_5,\n arg_6) = Initialize._rotations_to_disentangle(arg_3)\n\n # perform the required rotations to decouple the LSB qubit (so that\n # it can be \"factored\" out, leaving a shorter amplitude vector to peel away)\n arg_7 = arg_0._multiplex(RZGate, arg_6)\n arg_8 = arg_0._multiplex(RYGate, arg_5)\n arg_2.append(arg_7.to_instruction(), arg_1[arg_4:arg_0.num_qubits])\n arg_2.append(arg_8.to_instruction(), arg_1[arg_4:arg_0.num_qubits])\n return arg_2"} +{"_id": "doc_2558", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks if value has the format of a virtual qubit \"\"\"\n return arg_0 is None or isinstance(arg_0, tuple) and len(arg_0) == 2 and isinstance(\n arg_0[0], Register) and isinstance(arg_0[1], int)"} +{"_id": "doc_2559", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a Func of a Layout instance.\"\"\"\n arg_1 = type(arg_0)()\n\n arg_1._p2v = arg_0._p2v.Func()\n arg_1._v2p = arg_0._v2p.Func()\n\n return arg_1"} +{"_id": "doc_2560", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks if the attribute name is in the list of attributes to protect. If so, raises\n TranspilerAccessError.\n\n Args:\n name (string): the attribute name to check\n\n Raises:\n TranspilerAccessError: when name is the list of attributes to protect.\n \"\"\"\n if arg_1 in object.__getattribute__(arg_0, '_attributes_to_fence'):\n raise TranspilerAccessError(\"The fenced %s has the property %s protected\" %\n (type(object.__getattribute__(arg_0, '_wrapped')), arg_1))"} +{"_id": "doc_2561", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Run the StochasticSwap pass on `dag`.\n\n Args:\n dag (DAGCircuit): DAG to map.\n\n Returns:\n DAGCircuit: A mapped DAG.\n\n Raises:\n TranspilerError: if the coupling map or the layout are not\n compatible with the DAG\n \"\"\"\n\n if arg_0.initial_layout is None:\n if arg_0.property_set[\"layout\"]:\n arg_0.initial_layout = arg_0.property_set[\"layout\"]\n else:\n arg_0.initial_layout = Layout.generate_trivial_layout(*arg_1.qregs.values())\n\n if len(arg_1.qubits()) != len(arg_0.initial_layout):\n raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n\n if len(arg_0.coupling_map.physical_qubits) != len(arg_0.initial_layout):\n raise TranspilerError(\n \"Mappers require to have the layout to be the same size as the coupling map\")\n\n arg_0.input_layout = arg_0.initial_layout.copy()\n\n arg_0.qregs = arg_1.qregs\n if arg_0.seed is None:\n arg_0.seed = np.random.randint(0, np.iinfo(np.int32).max)\n arg_0.rng = np.random.RandomState(arg_0.seed)\n logger.debug(\"StochasticSwap RandomState seeded with seed=%s\", arg_0.seed)\n\n arg_7 = arg_0._mapper(arg_1, arg_0.coupling_map, trials=arg_0.trials)\n # self.property_set[\"layout\"] = self.initial_layout\n return arg_7"} +{"_id": "doc_2562", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6):\n \"\"\"Provide a DAGCircuit for a new mapped layer.\n\n i (int) = layer number\n first_layer (bool) = True if this is the first layer in the\n circuit with any multi-qubit gates\n best_layout (Layout) = layout returned from _layer_permutation\n best_depth (int) = depth returned from _layer_permutation\n best_circuit (DAGCircuit) = swap circuit returned\n from _layer_permutation\n layer_list (list) = list of DAGCircuit objects for each layer,\n output of DAGCircuit layers() method\n\n Return a DAGCircuit object to append to the output DAGCircuit\n that the _mapper method is building.\n \"\"\"\n arg_7 = arg_3\n logger.debug(\"layer_update: layout = %s\", pformat(arg_7))\n logger.debug(\"layer_update: self.initial_layout = %s\", pformat(arg_0.initial_layout))\n arg_8 = DAGCircuit()\n for arg_9 in arg_7.get_virtual_bits().keys():\n if arg_9[0] not in arg_8.qregs.values():\n arg_8.add_qreg(arg_9[0])\n\n # If this is the first layer with multi-qubit gates,\n # output all layers up to this point and ignore any\n # swap gates. Set the initial layout.\n if arg_2:\n logger.debug(\"layer_update: first multi-qubit gate layer\")\n # Output all layers up to this point\n for arg_10 in range(arg_1 + 1):\n # Make qubit edge map and extend by classical bits\n arg_11 = arg_7.combine_into_edge_map(arg_0.initial_layout)\n for arg_12 in arg_8.clbits():\n arg_11[arg_12] = arg_12\n arg_8.compose_back(arg_6[arg_10][\"graph\"], arg_11)\n # Otherwise, we output the current layer and the associated swap gates.\n else:\n # Output any swaps\n if arg_4 > 0:\n logger.debug(\"layer_update: there are swaps in this layer, \"\n \"depth %d\", arg_4)\n arg_8.extend_back(arg_5)\n else:\n logger.debug(\"layer_update: there are no swaps in this layer\")\n # Make qubit edge map and extend by classical bits\n arg_11 = arg_7.combine_into_edge_map(arg_0.initial_layout)\n for arg_12 in arg_8.clbits():\n arg_11[arg_12] = arg_12\n # Output this layer\n arg_8.compose_back(arg_6[arg_1][\"graph\"], arg_11)\n\n return arg_8"} +{"_id": "doc_2563", "title": "", "text": "def Func(arg_0, arg_1='weight'):\n \"\"\"Return the Pauli group with 4^n elements.\n\n The phases have been removed.\n case 'weight' is ordered by Pauli weights and\n case 'tensor' is ordered by I,X,Y,Z counting lowest qubit fastest.\n\n Args:\n number_of_qubits (int): number of qubits\n case (str): determines ordering of group elements ('weight' or 'tensor')\n\n Returns:\n list: list of Pauli objects\n\n Raises:\n QiskitError: case is not 'weight' or 'tensor'\n QiskitError: number_of_qubits is larger than 4\n \"\"\"\n if arg_0 < 5:\n arg_2 = []\n\n if arg_1 == 'weight':\n arg_3 = Func(arg_0, arg_1='tensor')\n # sort on the weight of the Pauli operator\n return sorted(arg_3, key=lambda arg_6: -np.count_nonzero(\n np.array(arg_6.to_label(), 'c') == b'I'))\n elif arg_1 == 'tensor':\n # the Pauli set is in tensor order II IX IY IZ XI ...\n for arg_4 in range(4 ** arg_0):\n arg_5 = np.zeros(arg_0, dtype=np.bool)\n arg_6 = np.zeros(arg_0, dtype=np.bool)\n # looping over all the qubits\n for arg_7 in range(arg_0):\n # making the Pauli for each j fill it in from the\n # end first\n arg_8 = (arg_4 // (4 ** arg_7)) % 4\n if arg_8 == 1:\n arg_6[arg_7] = True\n elif arg_8 == 2:\n arg_5[arg_7] = True\n arg_6[arg_7] = True\n elif arg_8 == 3:\n arg_5[arg_7] = True\n arg_2.append(Pauli(arg_5, arg_6))\n return arg_2\n else:\n raise QiskitError(\"Only support 'weight' or 'tensor' cases \"\n \"but you have {}.\".format(arg_1))\n\n raise QiskitError(\"Only support number of qubits is less than 5\")"} +{"_id": "doc_2564", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Construct pauli from boolean array.\n\n Args:\n z (numpy.ndarray): boolean, z vector\n x (numpy.ndarray): boolean, x vector\n\n Returns:\n Pauli: self\n\n Raises:\n QiskitError: if z or x are None or the length of z and x are different.\n \"\"\"\n if arg_1 is None:\n raise QiskitError(\"z vector must not be None.\")\n if arg_2 is None:\n raise QiskitError(\"x vector must not be None.\")\n if len(arg_1) != len(arg_2):\n raise QiskitError(\"length of z and x vectors must be \"\n \"the same. (z: {} vs x: {})\".format(len(arg_1), len(arg_2)))\n\n arg_1 = _make_np_bool(arg_1)\n arg_2 = _make_np_bool(arg_2)\n arg_0._z = arg_1\n arg_0._x = arg_2\n\n return arg_0"} +{"_id": "doc_2565", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert to Operator object.\"\"\"\n # Place import here to avoid cyclic import from circuit visualization\n from qiskit.quantum_info.operators.operator import Operator\n return Operator(arg_0.to_matrix())"} +{"_id": "doc_2566", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert to Pauli circuit instruction.\"\"\"\n from qiskit.circuit import QuantumCircuit, QuantumRegister\n from qiskit.extensions.standard import IdGate, XGate, YGate, ZGate\n arg_1 = {'I': IdGate(), 'X': XGate(), 'Y': YGate(), 'Z': ZGate()}\n arg_2 = arg_0.to_label()\n arg_3 = arg_0.numberofqubits\n arg_4 = QuantumRegister(arg_3)\n arg_5 = QuantumCircuit(arg_4, name='Pauli:{}'.format(arg_2))\n for arg_6, arg_7 in enumerate(reversed(arg_2)):\n arg_5.append(arg_1[arg_7], [arg_4[arg_6]])\n return arg_5.Func()"} +{"_id": "doc_2567", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Update partial or entire x.\n\n Args:\n x (numpy.ndarray or list): to-be-updated x\n indices (numpy.ndarray or list or optional): to-be-updated qubit indices\n\n Returns:\n Pauli: self\n\n Raises:\n QiskitError: when updating whole x, the number of qubits must be the same.\n \"\"\"\n arg_1 = _make_np_bool(arg_1)\n if arg_2 is None:\n if len(arg_0._x) != len(arg_1):\n raise QiskitError(\"During updating whole x, you can not change \"\n \"the number of qubits.\")\n arg_0._x = arg_1\n else:\n if not isinstance(arg_2, list) and not isinstance(arg_2, np.ndarray):\n arg_2 = [arg_2]\n for arg_4, arg_5 in enumerate(arg_2):\n arg_0._x[arg_5] = arg_1[arg_4]\n\n return arg_0"} +{"_id": "doc_2568", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Append pauli at the end.\n\n Args:\n paulis (Pauli): the to-be-inserted or appended pauli\n pauli_labels (list[str]): the to-be-inserted or appended pauli label\n\n Returns:\n Pauli: self\n \"\"\"\n return arg_0.insert_paulis(None, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_2569", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete pauli at the indices.\n\n Args:\n indices(list[int]): the indices of to-be-deleted paulis\n\n Returns:\n Pauli: self\n \"\"\"\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n\n arg_0._z = np.delete(arg_0._z, arg_1)\n arg_0._x = np.delete(arg_0._x, arg_1)\n\n return arg_0"} +{"_id": "doc_2570", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Generate single qubit pauli at index with pauli_label with length num_qubits.\n\n Args:\n num_qubits (int): the length of pauli\n index (int): the qubit index to insert the single qubii\n pauli_label (str): pauli\n\n Returns:\n Pauli: single qubit pauli\n \"\"\"\n arg_4 = Pauli.from_label(arg_3)\n arg_5 = np.zeros(arg_1, dtype=np.bool)\n arg_6 = np.zeros(arg_1, dtype=np.bool)\n\n arg_5[arg_2] = arg_4.z[0]\n arg_6[arg_2] = arg_4.x[0]\n\n return arg_0(arg_5, arg_6)"} +{"_id": "doc_2571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Simulate the outcome of measurement of a qubit.\n\n Args:\n qubit (int): the qubit to measure\n\n Return:\n tuple: pair (outcome, probability) where outcome is '0' or '1' and\n probability is the probability of the returned outcome.\n \"\"\"\n # Axis for numpy.sum to compute probabilities\n arg_2 = list(range(arg_0._number_of_qubits))\n arg_2.remove(arg_0._number_of_qubits - 1 - arg_1)\n arg_3 = np.sum(np.abs(arg_0._statevector) ** 2, arg_2=tuple(arg_2))\n # Compute einsum index string for 1-qubit matrix multiplication\n arg_4 = arg_0._local_random.rand()\n if arg_4 < arg_3[0]:\n return '0', arg_3[0]\n # Else outcome was '1'\n return '1', arg_3[1]"} +{"_id": "doc_2572", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Generate memory samples from current statevector.\n\n Args:\n measure_params (list): List of (qubit, cmembit) values for\n measure instructions to sample.\n num_samples (int): The number of memory samples to generate.\n\n Returns:\n list: A list of memory values in hex format.\n \"\"\"\n # Get unique qubits that are actually measured\n arg_3 = list({arg_6 for arg_6, arg_13 in arg_1})\n arg_4 = len(arg_3)\n # Axis for numpy.sum to compute probabilities\n arg_5 = list(range(arg_0._number_of_qubits))\n for arg_6 in reversed(arg_3):\n # Remove from largest qubit to smallest so list position is correct\n # with respect to position from end of the list\n arg_5.remove(arg_0._number_of_qubits - 1 - arg_6)\n arg_7 = np.reshape(np.sum(np.abs(arg_0._statevector) ** 2,\n arg_5=tuple(arg_5)),\n 2 ** arg_4)\n # Generate samples on measured qubits\n arg_8 = arg_0._local_random.choice(range(2 ** arg_4),\n arg_2, p=arg_7)\n # Convert to bit-strings\n arg_9 = []\n for arg_10 in arg_8:\n arg_11 = arg_0._classical_memory\n for arg_12, (arg_6, arg_13) in enumerate(sorted(arg_1)):\n arg_14 = int((arg_10 & (1 << arg_12)) >> arg_12)\n arg_15 = 1 << arg_13\n arg_11 = (arg_11 & (~arg_15)) | (arg_14 << arg_13)\n arg_16 = bin(arg_11)[2:]\n arg_9.append(hex(int(arg_16, 2)))\n return arg_9"} +{"_id": "doc_2573", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Apply a measure instruction to a qubit.\n\n Args:\n qubit (int): qubit is the qubit measured.\n cmembit (int): is the classical memory bit to store outcome in.\n cregbit (int, optional): is the classical register bit to store outcome in.\n \"\"\"\n # get measure outcome\n arg_4, arg_5 = arg_0._get_measure_outcome(arg_1)\n # update classical state\n arg_6 = 1 << arg_2\n arg_0._classical_memory = (arg_0._classical_memory & (~arg_6)) | (int(arg_4) << arg_2)\n\n if arg_3 is not None:\n arg_8 = 1 << arg_3\n arg_0._classical_register = \\\n (arg_0._classical_register & (~arg_8)) | (int(arg_4) << arg_3)\n\n # update quantum state\n if arg_4 == '0':\n arg_10 = [[1 / np.sqrt(arg_5), 0], [0, 0]]\n else:\n arg_10 = [[0, 0], [0, 1 / np.sqrt(arg_5)]]\n # update classical state\n arg_0._add_unitary_single(arg_10, arg_1)"} +{"_id": "doc_2574", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Apply a reset instruction to a qubit.\n\n Args:\n qubit (int): the qubit being rest\n\n This is done by doing a simulating a measurement\n outcome and projecting onto the outcome state while\n renormalizing.\n \"\"\"\n # get measure outcome\n arg_2, arg_3 = arg_0._get_measure_outcome(arg_1)\n # update quantum state\n if arg_2 == '0':\n arg_4 = [[1 / np.sqrt(arg_3), 0], [0, 0]]\n arg_0._add_unitary_single(arg_4, arg_1)\n else:\n arg_4 = [[0, 1 / np.sqrt(arg_3)], [0, 0]]\n arg_0._add_unitary_single(arg_4, arg_1)"} +{"_id": "doc_2575", "title": "", "text": "def Func(arg_0):\n \"\"\"Validate an initial statevector\"\"\"\n # If initial statevector isn't set we don't need to validate\n if arg_0._initial_statevector is None:\n return\n # Check statevector is correct length for number of qubits\n arg_1 = len(arg_0._initial_statevector)\n arg_2 = 2 ** arg_0._number_of_qubits\n if arg_1 != arg_2:\n raise BasicAerError('initial statevector is incorrect length: ' +\n '{} != {}'.format(arg_1, arg_2))"} +{"_id": "doc_2576", "title": "", "text": "def Func(arg_0):\n \"\"\"Set the initial statevector for simulation\"\"\"\n if arg_0._initial_statevector is None:\n # Set to default state of all qubits in |0>\n arg_0._statevector = np.zeros(2 ** arg_0._number_of_qubits,\n dtype=complex)\n arg_0._statevector[0] = 1\n else:\n arg_0._statevector = arg_0._initial_statevector.copy()\n # Reshape to rank-N tensor\n arg_0._statevector = np.reshape(arg_0._statevector,\n arg_0._number_of_qubits * [2])"} +{"_id": "doc_2577", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determine if measure sampling is allowed for an experiment\n\n Args:\n experiment (QobjExperiment): a qobj experiment.\n \"\"\"\n # If shots=1 we should disable measure sampling.\n # This is also required for statevector simulator to return the\n # correct final statevector without silently dropping final measurements.\n if arg_0._shots <= 1:\n arg_0._sample_measure = False\n return\n\n # Check for config flag\n if hasattr(arg_1.config, 'allows_measure_sampling'):\n arg_0._sample_measure = arg_1.config.allows_measure_sampling\n # If flag isn't found do a simple test to see if a circuit contains\n # no reset instructions, and no gates instructions after\n # the first measure.\n else:\n arg_3 = False\n for arg_4 in arg_1.instructions:\n # If circuit contains reset operations we cannot sample\n if arg_4.name == \"reset\":\n arg_0._sample_measure = False\n return\n # If circuit contains a measure option then we can\n # sample only if all following operations are measures\n if arg_3:\n # If we find a non-measure instruction\n # we cannot do measure sampling\n if arg_4.name not in [\"measure\", \"barrier\", \"id\", \"u0\"]:\n arg_0._sample_measure = False\n return\n elif arg_4.name == \"measure\":\n arg_3 = True\n # If we made it to the end of the circuit without returning\n # measure sampling is allowed\n arg_0._sample_measure = True"} +{"_id": "doc_2578", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Run qobj asynchronously.\n\n Args:\n qobj (Qobj): payload of the experiment\n backend_options (dict): backend options\n\n Returns:\n BasicAerJob: derived from BaseJob\n\n Additional Information:\n backend_options: Is a dict of options for the backend. It may contain\n * \"initial_statevector\": vector_like\n\n The \"initial_statevector\" option specifies a custom initial\n initial statevector for the simulator to be used instead of the all\n zero state. This size of this vector must be correct for the number\n of qubits in all experiments in the qobj.\n\n Example::\n\n backend_options = {\n \"initial_statevector\": np.array([1, 0, 0, 1j]) / np.sqrt(2),\n }\n \"\"\"\n arg_0._set_options(qobj_config=arg_1.config,\n arg_2=arg_2)\n arg_3 = str(uuid.uuid4())\n arg_4 = BasicAerJob(arg_0, arg_3, arg_0._Func_job, arg_1)\n arg_4.submit()\n return arg_4"} +{"_id": "doc_2579", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run experiments in qobj\n\n Args:\n job_id (str): unique id for the job.\n qobj (Qobj): job description\n\n Returns:\n Result: Result object\n \"\"\"\n arg_0._validate(arg_2)\n arg_3 = []\n arg_0._shots = arg_2.config.shots\n arg_0._memory = getattr(arg_2.config, 'memory', False)\n arg_0._qobj_config = arg_2.config\n arg_7 = time.time()\n for arg_8 in arg_2.experiments:\n arg_3.append(arg_0.run_experiment(arg_8))\n arg_9 = time.time()\n arg_10 = {'backend_name': arg_0.name(),\n 'backend_version': arg_0._configuration.backend_version,\n 'qobj_id': arg_2.qobj_id,\n 'job_id': arg_1,\n 'results': arg_3,\n 'status': 'COMPLETED',\n 'success': True,\n 'time_taken': (arg_9 - arg_7),\n 'header': arg_2.header.as_dict()}\n\n return Result.from_dict(arg_10)"} +{"_id": "doc_2580", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\"\"\"\n arg_2 = arg_1.config.n_qubits\n arg_3 = arg_0.configuration().n_qubits\n if arg_2 > arg_3:\n raise BasicAerError('Number of qubits {} '.format(arg_2) +\n 'is greater than maximum ({}) '.format(arg_3) +\n 'for \"{}\".'.format(arg_0.name()))\n for arg_4 in arg_1.experiments:\n arg_5 = arg_4.header.name\n if arg_4.config.memory_slots == 0:\n logger.warning('No classical registers in circuit \"%s\", '\n 'counts will be empty.', arg_5)\n elif 'measure' not in [arg_6.name for arg_6 in arg_4.instructions]:\n logger.warning('No measurements in circuit \"%s\", '\n 'classical register will remain all zeros.', arg_5)"} +{"_id": "doc_2581", "title": "", "text": "def Func(arg_0):\n \"\"\"Validate an initial unitary matrix\"\"\"\n # If initial unitary isn't set we don't need to validate\n if arg_0._initial_unitary is None:\n return\n # Check unitary is correct length for number of qubits\n arg_1 = np.shape(arg_0._initial_unitary)\n arg_2 = (2 ** arg_0._number_of_qubits,\n 2 ** arg_0._number_of_qubits)\n if arg_1 != arg_2:\n raise BasicAerError('initial unitary is incorrect shape: ' +\n '{} != 2 ** {}'.format(arg_1, arg_2))"} +{"_id": "doc_2582", "title": "", "text": "def Func(arg_0):\n \"\"\"Set the initial unitary for simulation\"\"\"\n arg_0._validate_initial_unitary()\n if arg_0._initial_unitary is None:\n # Set to identity matrix\n arg_0._unitary = np.eye(2 ** arg_0._number_of_qubits,\n dtype=complex)\n else:\n arg_0._unitary = arg_0._initial_unitary.copy()\n # Reshape to rank-N tensor\n arg_0._unitary = np.reshape(arg_0._unitary,\n arg_0._number_of_qubits * [2, 2])"} +{"_id": "doc_2583", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the current unitary in JSON Result spec format\"\"\"\n arg_1 = np.reshape(arg_0._unitary, 2 * [2 ** arg_0._number_of_qubits])\n # Expand complex numbers\n arg_1 = np.stack((arg_1.real, arg_1.imag), axis=-1)\n # Truncate small values\n arg_1[arg_2(arg_1) < arg_0._chop_threshold] = 0.0\n return arg_1"} +{"_id": "doc_2584", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\n Some of these may later move to backend schemas.\n 1. No shots\n 2. No measurements in the middle\n \"\"\"\n arg_2 = arg_1.config.n_qubits\n arg_3 = arg_0.configuration().n_qubits\n if arg_2 > arg_3:\n raise BasicAerError('Number of qubits {} '.format(arg_2) +\n 'is greater than maximum ({}) '.format(arg_3) +\n 'for \"{}\".'.format(arg_0.name()))\n if hasattr(arg_1.config, 'shots') and arg_1.config.shots != 1:\n logger.info('\"%s\" only supports 1 shot. Setting shots=1.',\n arg_0.name())\n arg_1.config.shots = 1\n for arg_6 in arg_1.experiments:\n arg_7 = arg_6.header.name\n if getattr(arg_6.config, 'shots', 1) != 1:\n logger.info('\"%s\" only supports 1 shot. '\n 'Setting shots=1 for circuit \"%s\".',\n arg_0.name(), arg_7)\n arg_6.config.shots = 1\n for arg_8 in arg_6.instructions:\n if arg_8.name in ['measure', 'reset']:\n raise BasicAerError('Unsupported \"%s\" instruction \"%s\" ' +\n 'in circuit \"%s\" ', arg_0.name(),\n arg_8.name, arg_7)"} +{"_id": "doc_2585", "title": "", "text": "def Func(arg_0):\n \"\"\"Determine if obj is a bit\"\"\"\n # If there is a bit type this could be replaced by isinstance.\n if isinstance(arg_0, tuple) and len(arg_0) == 2:\n if isinstance(arg_0[0], Register) and isinstance(arg_0[1], int) and arg_0[1] < len(arg_0[0]):\n return True\n return False"} +{"_id": "doc_2586", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1.\n\n Args:\n dag (DAGCircuit): DAG to find layout for.\n\n Raises:\n TranspilerError: if dag wider than self.coupling_map\n \"\"\"\n arg_2 = sum([qreg.size for qreg in arg_1.qregs.values()])\n if arg_2 > arg_0.coupling_map.size():\n raise TranspilerError('Number of qubits greater than device.')\n arg_0.property_set['layout'] = Layout.generate_trivial_layout(*arg_1.qregs.values())"} +{"_id": "doc_2587", "title": "", "text": "def Func(arg_0, *arg_1: arg_2[arg_3]) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain stop time.\n \"\"\"\n arg_4 = list(itertools.chain(*(arg_0._table[chan] for chan in arg_1\n if chan in arg_0._table)))\n if arg_4:\n return max((arg_5.end for arg_5 in arg_4))\n return 0"} +{"_id": "doc_2588", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection Funced by `time`.\n\n Args:\n time: time to be Funced by\n \"\"\"\n arg_3 = [Timeslot(slot.interval.Func(arg_1), slot.channel) for slot in arg_0.timeslots]\n return TimeslotCollection(*arg_3)"} +{"_id": "doc_2589", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Report on GitHub that the specified branch is failing to build at\n the specified commit. The method will open an issue indicating that\n the branch is failing. If there is an issue already open, it will add a\n comment avoiding to Func twice about the same failure.\n\n Args:\n branch (str): branch name to Func about.\n commit (str): commit hash at which the build fails.\n infourl (str): URL with extra info about the failure such as the\n build logs.\n \"\"\"\n arg_4 = arg_0._get_Func_issue_number()\n if arg_4:\n arg_0._Func_as_comment(arg_4, arg_1, arg_2, arg_3)\n else:\n arg_0._Func_as_issue(arg_1, arg_2, arg_3)"} +{"_id": "doc_2590", "title": "", "text": "def Func(arg_0):\n \"\"\" Sort rho data \"\"\"\n arg_1 = dict()\n\n arg_2 = int(np.log2(len(arg_0)))\n arg_3 = list(map(lambda x: x.to_label(), pauli_group(arg_2)))\n arg_4 = list(map(lambda x: np.real(np.trace(np.dot(x.to_matrix(), arg_0))),\n pauli_group(arg_2)))\n\n for arg_5, arg_6 in enumerate(arg_3):\n arg_1[arg_6] = arg_4[arg_5]\n return arg_1"} +{"_id": "doc_2591", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=False):\n \"\"\" Create a paulivec representation.\n\n Graphical representation of the input array.\n\n Args:\n rho (array): State vector or density matrix.\n figsize (tuple): Figure size in pixels.\n slider (bool): activate slider\n show_legend (bool): show legend of graph content\n \"\"\"\n\n # HTML\n arg_4 = Template(\"\"\"\n

    \n

    \n

    \n \"\"\")\n\n # JavaScript\n arg_5 = Template(\"\"\"\n \n \"\"\")\n arg_0 = _validate_input_state(arg_0)\n # set default figure size if none given\n if arg_1 is None:\n arg_1 = (7, 5)\n\n arg_6 = {'width': arg_1[0], 'height': arg_1[1],\n 'slider': int(arg_2), 'show_legend': int(arg_3)}\n\n # Process data and execute\n arg_7 = str(time.time())\n arg_7 = re.sub('[.]', '', arg_7)\n\n arg_8 = []\n arg_9 = process_data(arg_0)\n arg_8.append(dict(\n data=arg_9\n ))\n\n arg_10 = arg_4.substitute({\n 'divNumber': arg_7\n })\n\n arg_11 = arg_5.substitute({\n 'divNumber': arg_7,\n 'executions': arg_8,\n 'options': arg_6\n })\n\n display(HTML(arg_10 + arg_11))"} +{"_id": "doc_2592", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Apply RZZ to circuit.\"\"\"\n return arg_0.append(RZZGate(arg_1), [arg_2, arg_3], [])"} +{"_id": "doc_2593", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Apply Fredkin to circuit.\"\"\"\n return arg_0.append(FredkinGate(), [arg_1, arg_2, arg_3], [])"} +{"_id": "doc_2594", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract readout and CNOT errors and compute swap costs.\n \"\"\"\n arg_1 = arg_0.backend_prop\n for arg_2 in arg_1.gates:\n if arg_2.gate == 'cx':\n for arg_3 in arg_2.parameters:\n if arg_3.name == 'gate_error':\n arg_4 = 1.0 - arg_3.value\n break\n else:\n arg_4 = 1.0\n arg_5 = -math.log(pow(arg_4, 3))\n arg_0.swap_graph.add_edge(arg_2.qubits[0], arg_2.qubits[1], weight=arg_5)\n arg_0.swap_graph.add_edge(arg_2.qubits[1], arg_2.qubits[0], weight=arg_5)\n arg_0.cx_errors[(arg_2.qubits[0], arg_2.qubits[1])] = arg_4\n arg_0.gate_list.append((arg_2.qubits[0], arg_2.qubits[1]))\n arg_8 = 0\n for arg_9 in arg_1.qubits:\n for arg_10 in arg_9:\n if arg_10.name == 'readout_error':\n arg_0.readout_errors[arg_8] = 1.0 - arg_10.value\n arg_0.available_hw_qubits.append(arg_8)\n arg_8 += 1\n for arg_12 in arg_0.cx_errors:\n arg_0.gate_cost[arg_12] = arg_0.cx_errors[arg_12] * arg_0.readout_errors[arg_12[0]] *\\\n arg_0.readout_errors[arg_12[1]]\n arg_0.swap_paths, arg_15 = nx.algorithms.shortest_paths.dense.\\\n floyd_warshall_predecessor_and_distance(arg_0.swap_graph, weight='weight')\n for arg_16 in arg_15:\n arg_0.swap_costs[arg_16] = {}\n for arg_18 in arg_15[arg_16]:\n if (arg_16, arg_18) in arg_0.cx_errors:\n arg_0.swap_costs[arg_16][arg_18] = arg_0.cx_errors[(arg_16, arg_18)]\n elif (arg_18, arg_16) in arg_0.cx_errors:\n arg_0.swap_costs[arg_16][arg_18] = arg_0.cx_errors[(arg_18, arg_16)]\n else:\n arg_19 = 0.0\n for arg_20 in arg_0.swap_graph.neighbors(arg_18):\n if (arg_20, arg_18) in arg_0.cx_errors:\n arg_21 = math.exp(-arg_15[arg_16][arg_20])*arg_0.cx_errors[(arg_20, arg_18)]\n else:\n arg_21 = math.exp(-arg_15[arg_16][arg_20])*arg_0.cx_errors[(arg_18, arg_20)]\n if arg_21 > arg_19:\n arg_19 = arg_21\n arg_0.swap_costs[arg_16][arg_18] = arg_19"} +{"_id": "doc_2595", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Program graph has virtual qubits as nodes.\n Two nodes have an edge if the corresponding virtual qubits\n participate in a 2-qubit gate. The edge is weighted by the\n number of CNOTs between the pair.\n \"\"\"\n arg_2 = 0\n for arg_3 in arg_1.qubits():\n arg_0.qarg_to_id[arg_3[0].name + arg_6(arg_3[1])] = arg_2\n arg_2 += 1\n for arg_7 in arg_1.twoQ_gates():\n arg_8 = arg_0._qarg_to_id(arg_7.qargs[0])\n arg_9 = arg_0._qarg_to_id(arg_7.qargs[1])\n arg_10 = min(arg_8, arg_9)\n arg_11 = max(arg_8, arg_9)\n arg_12 = 1\n if arg_0.prog_graph.has_edge(arg_10, arg_11):\n arg_12 = arg_0.prog_graph[arg_10][arg_11]['weight'] + 1\n arg_0.prog_graph.add_edge(arg_10, arg_11, weight=arg_12)\n return arg_2"} +{"_id": "doc_2596", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Select the best remaining hardware qubit for the next program qubit.\n \"\"\"\n arg_2 = {}\n for arg_3 in arg_0.available_hw_qubits:\n arg_4 = 1\n for arg_5 in arg_0.prog_graph.neighbors(arg_1):\n if arg_5 in arg_0.prog2hw:\n arg_4 *= arg_0.swap_costs[arg_0.prog2hw[arg_5]][arg_3]\n arg_4 *= arg_0.readout_errors[arg_3]\n arg_2[arg_3] = arg_4\n arg_6 = 0\n arg_7 = None\n for arg_3 in arg_2:\n if arg_2[arg_3] > arg_6:\n arg_6 = arg_2[arg_3]\n arg_7 = arg_3\n return arg_7"} +{"_id": "doc_2597", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a list of instructions for this CompositeGate.\n\n If the CompositeGate itself contains composites, call\n this method recursively.\n \"\"\"\n Func = []\n for arg_2 in arg_0.data:\n if isinstance(arg_2, CompositeGate):\n Func.extend(arg_2.instruction_list())\n else:\n Func.append(arg_2)\n return Func"} +{"_id": "doc_2598", "title": "", "text": "def Func(arg_0):\n \"\"\"Invert this gate.\"\"\"\n arg_0.data = [gate.Func() for gate in reversed(arg_0.data)]\n arg_0.Func_flag = not arg_0.Func_flag\n return arg_0"} +{"_id": "doc_2599", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add controls to this gate.\"\"\"\n arg_0.data = [gate.Func(arg_1) for gate in arg_0.data]\n return arg_0"} +{"_id": "doc_2600", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add classical control register.\"\"\"\n arg_0.data = [gate.Func(arg_1, arg_2) for gate in arg_0.data]\n return arg_0"} +{"_id": "doc_2601", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Return True if operator is a unitary matrix.\"\"\"\n if arg_1 is None:\n arg_1 = arg_0._atol\n if arg_2 is None:\n arg_2 = arg_0._rtol\n return Func_matrix(arg_0._data, arg_2=arg_2, arg_1=arg_1)"} +{"_id": "doc_2602", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the Func of the operator.\"\"\"\n return Operator(\n np.conj(arg_0.data), arg_0.input_dims(), arg_0.output_dims())"} +{"_id": "doc_2603", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the Func of the operator.\"\"\"\n return Operator(\n np.Func(arg_0.data), arg_0.input_dims(), arg_0.output_dims())"} +{"_id": "doc_2604", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the matrix Func of the operator.\n\n Args:\n n (int): the Func to raise the matrix to.\n\n Returns:\n BaseOperator: the n-times composed operator.\n\n Raises:\n QiskitError: if the input and output dimensions of the operator\n are not equal, or the Func is not a positive integer.\n \"\"\"\n if not isinstance(arg_1, int):\n raise QiskitError(\"Can only take integer Funcs of Operator.\")\n if arg_0.input_dims() != arg_0.output_dims():\n raise QiskitError(\"Can only Func with input_dims = output_dims.\")\n # Override base class Func so we can implement more efficiently\n # using Numpy.matrix_Func\n return Operator(\n np.linalg.matrix_Func(arg_0.data, arg_1), arg_0.input_dims(),\n arg_0.output_dims())"} +{"_id": "doc_2605", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the tensor shape of the matrix operator\"\"\"\n return tuple(reversed(arg_0.output_dims())) + tuple(\n reversed(arg_0.input_dims()))"} +{"_id": "doc_2606", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert a QuantumCircuit or Instruction to an Operator.\"\"\"\n # Convert circuit to an instruction\n if isinstance(arg_1, QuantumCircuit):\n arg_1 = arg_1.to_instruction()\n # Initialize an identity operator of the correct size of the circuit\n arg_2 = Operator(np.eye(2 ** arg_1.num_qubits))\n arg_2._append_instruction(arg_1)\n return arg_2"} +{"_id": "doc_2607", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Separate a bitstring according to the registers defined in the result header.\"\"\"\n arg_2 = []\n arg_3 = 0\n for arg_4, arg_5 in reversed(arg_1):\n arg_2.append(arg_0[arg_3: arg_3 + arg_5])\n arg_3 += arg_5\n return ' '.join(arg_2)"} +{"_id": "doc_2608", "title": "", "text": "def Func(arg_0):\n \"\"\" Format an experiment result memory object for measurement level 1.\n\n Args:\n memory (list): Memory from experiment with `meas_level==1`. `avg` or\n `single` will be inferred from shape of result memory.\n\n Returns:\n np.ndarray: Measurement level 1 complex numpy array\n\n Raises:\n QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)\n indicies.\n \"\"\"\n arg_1 = _list_to_complex_array(arg_0)\n # infer meas_return from shape of returned data.\n if not 1 <= len(arg_1.shape) <= 2:\n raise QiskitError('Level one memory is not of correct shape.')\n return arg_1"} +{"_id": "doc_2609", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Format an experiment result memory object for measurement level 2.\n\n Args:\n memory (list): Memory from experiment with `meas_level==2` and `memory==True`.\n header (dict): the experiment header dictionary containing\n useful information for postprocessing.\n\n Returns:\n list[str]: List of bitstrings\n \"\"\"\n arg_2 = []\n for arg_3 in arg_0:\n arg_2.append(format_counts_memory(arg_3, arg_1))\n return arg_2"} +{"_id": "doc_2610", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Format a single experiment result coming from backend to present\n to the Qiskit user.\n\n Args:\n counts (dict): counts histogram of multiple shots\n header (dict): the experiment header dictionary containing\n useful information for postprocessing.\n\n Returns:\n dict: a formatted counts\n \"\"\"\n arg_2 = {}\n for arg_3, arg_4 in arg_0.items():\n arg_3 = Func_memory(arg_3, arg_1)\n arg_2[arg_3] = arg_4\n return arg_2"} +{"_id": "doc_2611", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Format statevector coming from the backend to present to the Qiskit user.\n\n Args:\n vec (list): a list of [re, im] complex numbers.\n decimals (int): the number of decimals in the statevector.\n If None, no rounding is done.\n\n Returns:\n list[complex]: a list of python complex numbers.\n \"\"\"\n arg_2 = len(arg_0)\n arg_3 = np.zeros(arg_2, dtype=complex)\n for arg_4 in range(arg_2):\n arg_3[arg_4] = arg_0[arg_4][0] + 1j * arg_0[arg_4][1]\n if arg_1:\n arg_3 = np.around(arg_3, arg_1=arg_1)\n return arg_3"} +{"_id": "doc_2612", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Format unitary coming from the backend to present to the Qiskit user.\n\n Args:\n mat (list[list]): a list of list of [re, im] complex numbers\n decimals (int): the number of decimals in the statevector.\n If None, no rounding is done.\n\n Returns:\n list[list[complex]]: a matrix of complex numbers\n \"\"\"\n arg_2 = len(arg_0)\n arg_3 = np.zeros((arg_2, arg_2), dtype=complex)\n for arg_4, arg_5 in enumerate(arg_0):\n arg_3[arg_4] = format_statevector(arg_5, arg_1)\n return arg_3"} +{"_id": "doc_2613", "title": "", "text": "def Func(arg_0):\n \"\"\"Submit the job to the backend for execution.\n\n Raises:\n QobjValidationError: if the JSON serialization of the Qobj passed\n during construction does not validate against the Qobj schema.\n\n JobError: if trying to re-Func the job.\n \"\"\"\n if arg_0._future is not None:\n raise JobError(\"We have already Functed the job!\")\n\n validate_qobj_against_schema(arg_0._qobj)\n arg_0._future = arg_0._executor.Func(arg_0._fn, arg_0._job_id, arg_0._qobj)"} +{"_id": "doc_2614", "title": "", "text": "def Func(arg_0):\n \"\"\"Gets the Func of the job by querying the Python's future\n\n Returns:\n qiskit.providers.JobStatus: The current JobStatus\n\n Raises:\n JobError: If the future is in unexpected state\n concurrent.futures.TimeoutError: if timeout occurred.\n \"\"\"\n # The order is important here\n if arg_0._future.running():\n arg_1 = JobStatus.RUNNING\n elif arg_0._future.cancelled():\n arg_1 = JobStatus.CANCELLED\n elif arg_0._future.done():\n arg_1 = JobStatus.DONE if arg_0._future.exception() is None else JobStatus.ERROR\n else:\n # Note: There is an undocumented Future state: PENDING, that seems to show up when\n # the job is enqueued, waiting for someone to pick it up. We need to deal with this\n # state but there's no public API for it, so we are assuming that if the job is not\n # in any of the previous states, is PENDING, ergo INITIALIZING for us.\n arg_1 = JobStatus.INITIALIZING\n\n return arg_1"} +{"_id": "doc_2615", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> bool:\n \"\"\"Whether `lo_freq` is within the `LoRange`.\n\n Args:\n lo_freq: LO frequency to be checked\n\n Returns:\n bool: True if lo_freq is included in this range, otherwise False\n \"\"\"\n if arg_0._lb <= arg_1 <= arg_0._ub:\n return True\n return False"} +{"_id": "doc_2616", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Create a bloch sphere representation.\n\n Graphical representation of the input array, using as much bloch\n spheres as qubit are required.\n\n Args:\n rho (array): State vector or density matrix\n figsize (tuple): Figure size in pixels.\n \"\"\"\n\n # HTML\n arg_2 = Template(\"\"\"\n

    \n

    \n
    \n
    \n

    \n \"\"\")\n\n # JavaScript\n arg_3 = Template(\"\"\"\n \n \"\"\")\n arg_0 = _validate_input_state(arg_0)\n if arg_1 is None:\n arg_4 = {}\n else:\n arg_4 = {'width': arg_1[0], 'height': arg_1[1]}\n\n # Process data and execute\n arg_5 = int(np.log2(len(arg_0)))\n\n arg_6 = []\n for arg_7 in range(arg_5):\n arg_8 = [Pauli.pauli_single(arg_5, arg_7, 'X'), Pauli.pauli_single(arg_5, arg_7, 'Y'),\n Pauli.pauli_single(arg_5, arg_7, 'Z')]\n arg_9 = list(map(lambda x: np.real(np.trace(np.dot(x.to_matrix(), arg_0))),\n arg_8))\n arg_6.append(arg_9)\n\n arg_10 = str(time.time())\n arg_10 = re.sub('[.]', '', arg_10)\n\n arg_11 = arg_2.substitute({\n 'divNumber': arg_10\n })\n\n arg_12 = arg_3.substitute({\n 'data': arg_6,\n 'divNumber': arg_10,\n 'options': arg_4\n })\n\n display(HTML(arg_11 + arg_12))"} +{"_id": "doc_2617", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Expand all op nodes to the given basis.\n\n Args:\n dag(DAGCircuit): input dag\n\n Raises:\n QiskitError: if unable to unroll given the basis due to undefined\n decomposition rules (such as a bad basis) or excessive recursion.\n\n Returns:\n DAGCircuit: output unrolled dag\n \"\"\"\n # Walk through the DAG and expand each non-basis node\n for arg_2 in arg_1.op_nodes():\n arg_3 = ['measure', 'reset', 'barrier', 'snapshot']\n if arg_2.name in arg_3:\n # TODO: this is legacy behavior.Basis_insts should be removed that these\n # instructions should be part of the device-reported basis. Currently, no\n # backend reports \"measure\", for example.\n continue\n if arg_2.name in arg_0.basis: # If already a base, ignore.\n continue\n\n # TODO: allow choosing other possible decompositions\n arg_4 = arg_2.op.definition\n if not arg_4:\n raise QiskitError(\"Cannot unroll the circuit to the given basis, %s. \"\n \"No rule to expand instruction %s.\" %\n (str(arg_0.basis), arg_2.op.name))\n\n # hacky way to build a dag on the same register as the rule is defined\n # TODO: need anonymous rules to address wires by index\n arg_5 = DAGCircuit()\n arg_5.add_qreg(arg_4[0][1][0][0])\n for arg_6 in arg_4:\n arg_5.apply_operation_back(*arg_6)\n\n arg_7 = arg_0.Func(arg_5) # recursively unroll ops\n arg_1.substitute_node_with_dag(arg_2, arg_7)\n return arg_1"} +{"_id": "doc_2618", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Create a Q sphere representation.\n\n Graphical representation of the input array, using a Q sphere for each\n eigenvalue.\n\n Args:\n rho (array): State vector or density matrix.\n figsize (tuple): Figure size in pixels.\n \"\"\"\n\n # HTML\n arg_2 = Template(\"\"\"\n

    \n

    \n
    \n
    \n

    \n \"\"\")\n\n # JavaScript\n arg_3 = Template(\"\"\"\n \n\n \"\"\")\n arg_0 = _validate_input_state(arg_0)\n if arg_1 is None:\n arg_4 = {}\n else:\n arg_4 = {'width': arg_1[0], 'height': arg_1[1]}\n\n arg_5 = []\n # Process data and execute\n arg_6 = int(np.log2(len(arg_0)))\n\n # get the eigenvectors and eigenvalues\n arg_7, arg_8 = linalg.eigh(arg_0)\n\n for arg_9 in range(2**arg_6):\n # start with the max\n arg_10 = arg_7.max()\n arg_11 = arg_7.argmax()\n if arg_10 > 0.001:\n # print(\"The \" + str(k) + \"th eigenvalue = \" + str(probmix))\n # get the max eigenvalue\n arg_12 = arg_8[:, arg_11]\n arg_13 = np.absolute(arg_12).argmax()\n # get the element location closes to lowest bin representation.\n for arg_14 in range(2**arg_6):\n arg_15 = np.absolute(np.absolute(arg_12[arg_14]) -\n np.absolute(arg_12[arg_13]))\n if arg_15 < 0.001:\n arg_13 = arg_14\n break\n # remove the global phase\n arg_16 = (np.angle(arg_12[arg_13]) + 2 * np.pi) % (2 * np.pi)\n arg_17 = np.exp(-1j*arg_16)\n arg_12 = arg_17*arg_12\n arg_12.flatten()\n\n arg_18 = []\n for arg_19 in range(2**arg_6):\n # get x,y,z points\n\n arg_20 = bin(arg_19)[2:].zfill(arg_6)\n arg_21 = arg_20.count(\"1\")\n\n arg_22 = n_choose_k(arg_6, arg_21)\n arg_23 = bit_string_index(arg_20)\n\n arg_24 = arg_23 * 2 * np.pi / arg_22\n\n arg_25 = -2 * arg_21 / arg_6 + 1\n arg_26 = np.sqrt(1 - arg_25**2) * np.cos(arg_24)\n arg_27 = np.sqrt(1 - arg_25**2) * np.sin(arg_24)\n\n # get prob and angle - prob will be shade and angle color\n arg_28 = np.real(np.dot(arg_12[arg_19], arg_12[arg_19].conj()))\n arg_16 = (np.angle(arg_12[arg_19]) + 2 * np.pi) % (2 * np.pi)\n arg_29 = {\n 'x': arg_26,\n 'y': arg_27,\n 'z': arg_25,\n 'prob': arg_28,\n 'phase': arg_16\n }\n arg_18.append(arg_29)\n\n # Associate all points to one sphere\n arg_30 = {\n 'points': arg_18,\n 'eigenvalue': arg_10\n }\n\n # Add sphere to the spheres array\n arg_5.append(arg_30)\n arg_7[arg_11] = 0\n\n arg_31 = str(time.time())\n arg_31 = re.sub('[.]', '', arg_31)\n\n arg_32 = arg_2.substitute({\n 'divNumber': arg_31\n })\n\n arg_33 = arg_3.substitute({\n 'data': arg_5,\n 'divNumber': arg_31,\n 'options': arg_4\n })\n\n display(HTML(arg_32 + arg_33))"} +{"_id": "doc_2619", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the lex index of a combination..\n\n Args:\n n (int): the total number of options .\n k (int): The number of elements.\n lst (list): list\n\n Returns:\n int: returns int index for lex order\n\n Raises:\n VisualizationError: if length of list is not equal to k\n \"\"\"\n if len(arg_2) != arg_1:\n raise VisualizationError(\"list should have length k\")\n arg_3 = list(map(lambda x: arg_0 - 1 - x, arg_2))\n arg_4 = sum([n_choose_k(arg_3[arg_1 - 1 - i], i + 1) for i in range(arg_1)])\n return int(arg_4)"} +{"_id": "doc_2620", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the Instruction object corresponding to the Func for the node else None\"\"\"\n if 'type' not in arg_0.data_dict or arg_0.data_dict['type'] != 'Func':\n raise QiskitError(\"The node %s is not an Func node\" % (str(arg_0)))\n return arg_0.data_dict.get('Func')"} +{"_id": "doc_2621", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3 = None) -> SamplePulse:\n \"\"\"Generates Func-sampled `SamplePulse`.\n\n Args:\n duration: Duration of pulse. Must be greater than Func.\n name: Name of pulse.\n \"\"\"\n return _sampled_Func_pulse(arg_0, arg_2=arg_2)"} +{"_id": "doc_2622", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5 = None,\n arg_6: arg_5 = 0, arg_7: arg_8 = None) -> SamplePulse:\n \"\"\"Generates Func wave `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt. If `None` defaults to single cycle.\n phase: Pulse phase.\n name: Name of pulse.\n \"\"\"\n if arg_4 is None:\n arg_4 = arg_0\n\n return _sampled_Func_pulse(arg_0, arg_2, arg_4, arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_2623", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5 = None,\n arg_6: arg_5 = 0, arg_7: arg_8 = None) -> SamplePulse:\n \"\"\"Generates Func wave `SamplePulse`.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt. If `None` defaults to single cycle.\n phase: Pulse phase.\n name: Name of pulse.\n \"\"\"\n if arg_4 is None:\n arg_4 = arg_0\n\n return _sampled_Func_pulse(arg_0, arg_2, arg_4, arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_2624", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5 = None,\n arg_6: arg_5 = 0, arg_7: arg_8 = None) -> SamplePulse:\n \"\"\"Generates Funcine wave `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.\n phase: Pulse phase.\n name: Name of pulse.\n \"\"\"\n if arg_4 is None:\n arg_4 = 1/arg_0\n\n return _sampled_Func_pulse(arg_0, arg_2, arg_4, arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_2625", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5 = None,\n arg_6: arg_5 = 0, arg_7: arg_8 = None) -> SamplePulse:\n \"\"\"Generates Funce wave `SamplePulse`.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt. If `None` defaults to Funcgle cycle.\n phase: Pulse phase.\n name: Name of pulse.\n \"\"\"\n if arg_4 is None:\n arg_4 = 1/arg_0\n\n return _sampled_Func_pulse(arg_0, arg_2, arg_4, arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_2626", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5, arg_6: arg_7 = None) -> SamplePulse:\n r\"\"\"Generates unnormalized Func `SamplePulse`.\n\n Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\n Integrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude at `duration/2`.\n sigma: Width (standard deviation) of pulse.\n name: Name of pulse.\n \"\"\"\n arg_8 = arg_0/2\n arg_9 = arg_0 + 2\n return _sampled_Func_pulse(arg_0, arg_2, arg_8, arg_4,\n arg_9=arg_9, rescale_amp=True, arg_6=arg_6)"} +{"_id": "doc_2627", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5, arg_6: arg_7 = None) -> SamplePulse:\n r\"\"\"Generates unnormalized gaussian derivative `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude at `center`.\n sigma: Width (standard deviation) of pulse.\n name: Name of pulse.\n \"\"\"\n arg_8 = arg_0/2\n return _sampled_Func_pulse(arg_0, arg_2, arg_8, arg_4, arg_6=arg_6)"} +{"_id": "doc_2628", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3, arg_4: arg_5,\n arg_6: arg_1, arg_7: arg_8 = None) -> SamplePulse:\n \"\"\"Generates gaussian square `SamplePulse`.\n\n Centered at `duration/2` and zeroed at `t=-1` and `t=duration+1` to prevent\n large initial/final discontinuities.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\n Args:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude.\n sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.\n risefall: Number of samples over which pulse rise and fall happen. Width of\n square portion of pulse will be `duration-2*risefall`.\n name: Name of pulse.\n \"\"\"\n arg_9 = arg_0/2\n arg_10 = arg_0-2*arg_6\n arg_11 = arg_0 + 2\n return _sampled_Func_pulse(arg_0, arg_2, arg_9, arg_10, arg_4,\n arg_11=arg_11, arg_7=arg_7)"} +{"_id": "doc_2629", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute distance.\n \"\"\"\n arg_1, arg_2 = arg_0.ax.transAxes.transform( # pylint: disable=invalid-name\n (0, 0))\n arg_3, arg_4 = arg_0.ax.transAxes.transform( # pylint: disable=invalid-name\n (1, 1))\n arg_5 = arg_3 - arg_1 if arg_0.x else arg_4 - arg_2\n return arg_5"} +{"_id": "doc_2630", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print the node data, with indent.\"\"\"\n arg_2 = arg_1 * ' '\n print(arg_2, 'qreg')\n arg_0.children[0].Func(arg_1 + 3)"} +{"_id": "doc_2631", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Rename a classical or quantum register throughout the circuit.\n\n regname = existing register name string\n newname = replacement register name string\n \"\"\"\n if arg_1 == arg_2:\n return\n if arg_2 in arg_0.qregs or arg_2 in arg_0.cregs:\n raise DAGCircuitError(\"duplicate register name %s\" % arg_2)\n if arg_1 not in arg_0.qregs and arg_1 not in arg_0.cregs:\n raise DAGCircuitError(\"no register named %s\" % arg_1)\n if arg_1 in arg_0.qregs:\n arg_3 = arg_0.qregs[arg_1]\n arg_3.name = arg_2\n arg_0.qregs[arg_2] = arg_3\n arg_0.qregs.pop(arg_1, None)\n if arg_1 in arg_0.cregs:\n arg_3 = arg_0.cregs[arg_1]\n arg_3.name = arg_2\n arg_0.qregs[arg_2] = arg_3\n arg_0.qregs.pop(arg_1, None)\n\n for arg_6 in arg_0._multi_graph.nodes():\n if arg_6.type == \"in\" or arg_6.type == \"out\":\n if arg_6.name and arg_1 in arg_6.name:\n arg_6.name = arg_2\n elif arg_6.type == \"op\":\n arg_7 = []\n for arg_8 in arg_6.qargs:\n if arg_8[0] == arg_1:\n arg_8 = (arg_2, arg_8[1])\n arg_7.append(arg_8)\n arg_6.qargs = arg_7\n arg_10 = []\n for arg_8 in arg_6.cargs:\n if arg_8[0] == arg_1:\n arg_8 = (arg_2, arg_8[1])\n arg_10.append(arg_8)\n arg_6.cargs = arg_10\n if arg_6.condition is not None:\n if arg_6.condition[0] == arg_1:\n arg_6.condition = (arg_2, arg_6.condition[1])\n # eX = edge, d= data\n for arg_13, arg_13, arg_14 in arg_0._multi_graph.edges(data=True):\n if arg_1 in arg_14['name']:\n arg_14['name'] = re.sub(arg_1, arg_2, arg_14['name'])"} +{"_id": "doc_2632", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add all wires in a classical register.\"\"\"\n if not isinstance(arg_1, ClassicalRegister):\n raise DAGCircuitError(\"not a ClassicalRegister instance.\")\n if arg_1.name in arg_0.cregs:\n raise DAGCircuitError(\"duplicate register %s\" % arg_1.name)\n arg_0.cregs[arg_1.name] = arg_1\n for arg_4 in range(arg_1.size):\n arg_0._add_wire((arg_1, arg_4))"} +{"_id": "doc_2633", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add a qubit or bit to the circuit.\n\n Args:\n wire (tuple): (Register,int) containing a register instance and index\n This adds a pair of in and out nodes connected by an edge.\n\n Raises:\n DAGCircuitError: if trying to add duplicate wire\n \"\"\"\n if arg_1 not in arg_0.wires:\n arg_0.wires.append(arg_1)\n arg_0._max_node_id += 1\n arg_2 = arg_0.input_map[arg_1] = arg_0._max_node_id\n\n arg_0._max_node_id += 1\n arg_3 = arg_0._max_node_id\n\n arg_4 = \"%s[%s]\" % (arg_1[0].name, arg_1[1])\n\n arg_5 = DAGNode(data_dict={'type': 'in', 'name': arg_4, 'wire': arg_1},\n nid=arg_2)\n arg_6 = DAGNode(data_dict={'type': 'out', 'name': arg_4, 'wire': arg_1},\n nid=arg_3)\n arg_0._id_to_node[arg_2] = arg_5\n arg_0._id_to_node[arg_3] = arg_6\n\n arg_0.input_map[arg_1] = arg_5\n arg_0.output_map[arg_1] = arg_6\n\n arg_0._multi_graph.add_node(arg_5)\n arg_0._multi_graph.add_node(arg_6)\n\n arg_0._multi_graph.add_edge(arg_5,\n arg_6)\n\n arg_0._multi_graph.adj[arg_5][arg_6][0][\"name\"] \\\n = \"%s[%s]\" % (arg_1[0].name, arg_1[1])\n arg_0._multi_graph.adj[arg_5][arg_6][0][\"wire\"] \\\n = arg_1\n else:\n raise DAGCircuitError(\"duplicate wire %s\" % (arg_1,))"} +{"_id": "doc_2634", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Add a new operation node to the graph and assign properties.\n\n Args:\n op (Instruction): the operation associated with the DAG node\n qargs (list): list of quantum wires to attach to.\n cargs (list): list of classical wires to attach to.\n condition (tuple or None): optional condition (ClassicalRegister, int)\n \"\"\"\n arg_5 = {\n \"type\": \"op\",\n \"op\": arg_1,\n \"name\": arg_1.name,\n \"qargs\": arg_2,\n \"cargs\": arg_3,\n \"condition\": arg_4\n }\n\n # Add a new operation node to the graph\n arg_0._max_node_id += 1\n arg_6 = DAGNode(data_dict=arg_5, nid=arg_0._max_node_id)\n arg_0._multi_graph.add_node(arg_6)\n arg_0._id_to_node[arg_0._max_node_id] = arg_6"} +{"_id": "doc_2635", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check that the wiremap is consistent.\n\n Check that the wiremap refers to valid wires and that\n those wires have consistent types.\n\n Args:\n wire_map (dict): map from (register,idx) in keymap to\n (register,idx) in valmap\n keymap (dict): a map whose keys are wire_map keys\n valmap (dict): a map whose keys are wire_map values\n\n Raises:\n DAGCircuitError: if wire_map not valid\n \"\"\"\n for arg_4, arg_5 in arg_1.items():\n arg_6 = \"%s[%d]\" % (arg_4[0].name, arg_4[1])\n arg_7 = \"%s[%d]\" % (arg_5[0].name, arg_5[1])\n if arg_4 not in arg_2:\n raise DAGCircuitError(\"invalid wire mapping key %s\" % arg_6)\n if arg_5 not in arg_3:\n raise DAGCircuitError(\"invalid wire mapping value %s\" % arg_7)\n if type(arg_4) is not type(arg_5):\n raise DAGCircuitError(\"inconsistent wire_map at (%s,%s)\" %\n (arg_6, arg_7))"} +{"_id": "doc_2636", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Use the wire_map dict to change the condition tuple's creg name.\n\n Args:\n wire_map (dict): a map from wires to wires\n condition (tuple): (ClassicalRegister,int)\n Returns:\n tuple(ClassicalRegister,int): new condition\n \"\"\"\n if arg_2 is None:\n arg_3 = None\n else:\n # Map the register name, using fact that registers must not be\n # fragmented by the wire_map (this must have been checked\n # elsewhere)\n arg_4 = (arg_2[0], 0)\n arg_3 = (arg_1.get(arg_4, arg_4)[0], arg_2[1])\n return arg_3"} +{"_id": "doc_2637", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Apply the input circuit to the output of this circuit.\n\n The two bases must be \"compatible\" or an exception occurs.\n A subset of input qubits of the input circuit are mapped\n to a subset of output qubits of this circuit.\n\n Args:\n input_circuit (DAGCircuit): circuit to append\n edge_map (dict): map {(Register, int): (Register, int)}\n from the output wires of input_circuit to input wires\n of self.\n\n Raises:\n DAGCircuitError: if missing, duplicate or incosistent wire\n \"\"\"\n arg_2 = arg_2 or {}\n\n # Check the wire map for duplicate values\n if len(set(arg_2.values())) != len(arg_2):\n raise DAGCircuitError(\"duplicates in wire_map\")\n\n arg_3 = arg_0._check_edgemap_registers(arg_2,\n arg_1.qregs,\n arg_0.qregs)\n for arg_4 in arg_3:\n arg_0.add_qreg(arg_4)\n\n arg_5 = arg_0._check_edgemap_registers(arg_2,\n arg_1.cregs,\n arg_0.cregs)\n for arg_6 in arg_5:\n arg_0.add_creg(arg_6)\n\n arg_0._check_wiremap_validity(arg_2, arg_1.input_map,\n arg_0.output_map)\n\n # Compose\n for arg_7 in arg_1.topological_nodes():\n if arg_7.type == \"in\":\n # if in wire_map, get new name, else use existing name\n arg_8 = arg_2.get(arg_7.wire, arg_7.wire)\n # the mapped wire should already exist\n if arg_8 not in arg_0.output_map:\n raise DAGCircuitError(\"wire %s[%d] not in self\" % (arg_8[0].name, arg_8[1]))\n\n if arg_7.wire not in arg_1.wires:\n raise DAGCircuitError(\"inconsistent wire type for %s[%d] in input_circuit\"\n % (arg_7.wire[0].name, arg_7.wire[1]))\n\n elif arg_7.type == \"out\":\n # ignore output nodes\n pass\n elif arg_7.type == \"op\":\n arg_9 = arg_0._map_condition(arg_2, arg_7.condition)\n arg_0._check_condition(arg_7.name, arg_9)\n arg_10 = list(map(lambda x: arg_2.get(x, x), arg_7.qargs))\n arg_11 = list(map(lambda x: arg_2.get(x, x), arg_7.cargs))\n arg_0.apply_operation_back(arg_7.op, arg_10, arg_11, arg_9)\n else:\n raise DAGCircuitError(\"bad node type %s\" % arg_7.type)"} +{"_id": "doc_2638", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check that a list of wires is compatible with a node to be replaced.\n\n - no duplicate names\n - correct length for operation\n Raise an exception otherwise.\n\n Args:\n wires (list[register, index]): gives an order for (qu)bits\n in the input circuit that is replacing the node.\n node (DAGNode): a node in the dag\n\n Raises:\n DAGCircuitError: if check doesn't pass.\n \"\"\"\n if len(set(arg_1)) != len(arg_1):\n raise DAGCircuitError(\"duplicate wires\")\n\n arg_3 = len(arg_2.qargs) + len(arg_2.cargs)\n if arg_2.condition is not None:\n arg_3 += arg_2.condition[0].size\n\n if len(arg_1) != arg_3:\n raise DAGCircuitError(\"expected %d wires, got %d\"\n % (arg_3, len(arg_1)))"} +{"_id": "doc_2639", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return predecessor and successor dictionaries.\n\n Args:\n node (DAGNode): reference to multi_graph node\n\n Returns:\n tuple(dict): tuple(predecessor_map, successor_map)\n These map from wire (Register, int) to predecessor (successor)\n nodes of n.\n \"\"\"\n\n arg_2 = {e[2]['wire']: e[0] for e in\n arg_0._multi_graph.in_edges(nbunch=arg_1, data=True)}\n arg_3 = {e[2]['wire']: e[1] for e in\n arg_0._multi_graph.out_edges(nbunch=arg_1, data=True)}\n return arg_2, arg_3"} +{"_id": "doc_2640", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4):\n \"\"\"Map all wires of the input circuit.\n\n Map all wires of the input circuit to predecessor and\n successor nodes in self, keyed on wires in self.\n\n Args:\n pred_map (dict): comes from _make_pred_succ_maps\n succ_map (dict): comes from _make_pred_succ_maps\n input_circuit (DAGCircuit): the input circuit\n wire_map (dict): the map from wires of input_circuit to wires of self\n\n Returns:\n tuple: full_pred_map, full_succ_map (dict, dict)\n\n Raises:\n DAGCircuitError: if more than one predecessor for output nodes\n \"\"\"\n arg_5 = {}\n arg_6 = {}\n for arg_7 in arg_3.input_map:\n # If w is wire mapped, find the corresponding predecessor\n # of the node\n if arg_7 in arg_4:\n arg_5[arg_4[arg_7]] = arg_1[arg_4[arg_7]]\n arg_6[arg_4[arg_7]] = arg_2[arg_4[arg_7]]\n else:\n # Otherwise, use the corresponding output nodes of self\n # and compute the predecessor.\n arg_6[arg_7] = arg_0.output_map[arg_7]\n arg_5[arg_7] = arg_0._multi_graph.predecessors(\n arg_0.output_map[arg_7])[0]\n if len(list(arg_0._multi_graph.predecessors(arg_0.output_map[arg_7]))) != 1:\n raise DAGCircuitError(\"too many predecessors for %s[%d] \"\n \"output node\" % (arg_7[0], arg_7[1]))\n\n return arg_5, arg_6"} +{"_id": "doc_2641", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Yield nodes in topological order.\n\n Returns:\n generator(DAGNode): node in topological order\n \"\"\"\n return nx.lexicographical_topological_sort(arg_0._multi_graph,\n key=lambda x: str(x.qargs))"} +{"_id": "doc_2642", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get the list of \"op\" nodes in the dag.\n\n Args:\n op (Type): Instruction subclass op nodes to return. if op=None, return\n all op nodes.\n Returns:\n list[DAGNode]: the list of node ids containing the given op.\n \"\"\"\n arg_2 = []\n for arg_3 in arg_0._multi_graph.nodes():\n if arg_3.type == \"op\":\n if arg_1 is None or isinstance(arg_3.op, arg_1):\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_2643", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the list of gate nodes in the dag.\n\n Returns:\n list: the list of node ids that represent gates.\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.op_nodes():\n if isinstance(arg_2.op, Gate):\n arg_1.append(arg_2)\n return arg_1"} +{"_id": "doc_2644", "title": "", "text": "def Func(arg_0):\n \"\"\"Get list of 2-qubit gates. Ignore snapshot, barriers, and the like.\"\"\"\n arg_1 = []\n for arg_2 in arg_0.gate_nodes():\n if len(arg_2.qargs) == 2:\n arg_1.append(arg_2)\n return arg_1"} +{"_id": "doc_2645", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns set of the Func of a node as DAGNodes.\"\"\"\n if isinstance(arg_1, int):\n warnings.warn('Calling Func() with a node id is deprecated,'\n ' use a DAGNode instead',\n DeprecationWarning, 2)\n arg_1 = arg_0._id_to_node[arg_1]\n\n return nx.Func(arg_0._multi_graph, arg_1)"} +{"_id": "doc_2646", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns list of the successors of a node that are\n connected by a quantum edge as DAGNodes.\"\"\"\n if isinstance(arg_1, int):\n warnings.warn('Calling Func() with a node id is deprecated,'\n ' use a DAGNode instead',\n DeprecationWarning, 2)\n arg_1 = arg_0._id_to_node[arg_1]\n\n arg_2 = []\n for arg_3 in arg_0.successors(arg_1):\n if isinstance(arg_0._multi_graph.get_edge_data(\n arg_1, arg_3, key=0)['wire'][0],\n QuantumRegister):\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_2647", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove an operation node n.\n\n Add edges from predecessors to successors.\n \"\"\"\n if isinstance(arg_1, int):\n warnings.warn('Calling Func() with a node id is deprecated,'\n ' use a DAGNode instead',\n DeprecationWarning, 2)\n arg_1 = arg_0._id_to_node[arg_1]\n\n if arg_1.type != 'op':\n raise DAGCircuitError('The method Func only works on op node types. An \"%s\" '\n 'node type was wrongly provided.' % arg_1.type)\n\n arg_2, arg_3 = arg_0._make_pred_succ_maps(arg_1)\n\n # remove from graph and map\n arg_0._multi_graph.remove_node(arg_1)\n\n for arg_4 in arg_2.keys():\n arg_0._multi_graph.add_edge(arg_2[arg_4], arg_3[arg_4],\n name=\"%s[%s]\" % (arg_4[0].name, arg_4[1]), wire=arg_4)"} +{"_id": "doc_2648", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove all of the descendant operation nodes of node.\"\"\"\n if isinstance(arg_1, int):\n warnings.warn('Calling Func() with a node id is deprecated,'\n ' use a DAGNode instead',\n DeprecationWarning, 2)\n arg_1 = arg_0._id_to_node[arg_1]\n\n arg_2 = nx.descendants(arg_0._multi_graph, arg_1)\n for arg_3 in arg_2:\n if arg_3.type == \"op\":\n arg_0.remove_op_node(arg_3)"} +{"_id": "doc_2649", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove all of the non-ancestors operation nodes of node.\"\"\"\n if isinstance(arg_1, int):\n warnings.warn('Calling Func() with a node id is deprecated,'\n ' use a DAGNode instead',\n DeprecationWarning, 2)\n arg_1 = arg_0._id_to_node[arg_1]\n\n arg_2 = nx.ancestors(arg_0._multi_graph, arg_1)\n arg_3 = list(set(arg_0._multi_graph.nodes()) - set(arg_2))\n for arg_4 in arg_3:\n if arg_4.type == \"op\":\n arg_0.remove_op_node(arg_4)"} +{"_id": "doc_2650", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield a shallow view on a layer of this DAGCircuit for all d Func of this circuit.\n\n A layer is a circuit whose gates act on disjoint qubits, i.e.\n a layer has depth 1. The total number of Func equals the\n circuit depth d. The Func are indexed from 0 to d-1 with the\n earliest layer at index 0. The Func are constructed using a\n greedy algorithm. Each returned layer is a dict containing\n {\"graph\": circuit graph, \"partition\": list of qubit lists}.\n\n TODO: Gates that use the same cbits will end up in different\n Func as this is currently implemented. This may not be\n the desired behavior.\n \"\"\"\n arg_1 = arg_0.multigraph_Func()\n try:\n next(arg_1) # Remove input nodes\n except StopIteration:\n return\n\n def add_nodes_from(arg_2, arg_3):\n \"\"\" Convert DAGNodes into a format that can be added to a\n multigraph and then add to graph\"\"\"\n arg_2._multi_graph.add_nodes_from(arg_3)\n\n for arg_4 in arg_1:\n\n # Get the op nodes from the layer, removing any input and output nodes.\n arg_5 = [node for node in arg_4 if node.type == \"op\"]\n\n # Stop yielding once there are no more op_nodes in a layer.\n if not arg_5:\n return\n\n # Construct a shallow copy of self\n arg_6 = DAGCircuit()\n arg_6.name = arg_0.name\n\n for arg_8 in arg_0.cregs.values():\n arg_6.add_creg(arg_8)\n for arg_9 in arg_0.qregs.values():\n arg_6.add_qreg(arg_9)\n\n add_nodes_from(arg_6, arg_0.input_map.values())\n add_nodes_from(arg_6, arg_0.output_map.values())\n add_nodes_from(arg_6, arg_5)\n\n # The quantum registers that have an operation in this layer.\n arg_10 = [\n arg_12.qargs\n for arg_12 in arg_5\n if arg_12.name not in {\"barrier\", \"snapshot\", \"save\", \"load\", \"noise\"}\n ]\n\n # Now add the edges to the multi_graph\n # By default we just wire inputs to the outputs.\n arg_11 = {arg_0.input_map[wire]: arg_0.output_map[wire]\n for wire in arg_0.wires}\n # Wire inputs to op nodes, and op nodes to outputs.\n for arg_12 in arg_5:\n arg_13 = arg_0._bits_in_condition(arg_12.condition) \\\n + arg_12.cargs + arg_12.qargs\n arg_14 = (arg_0.input_map[(arg[0], arg[1])] for arg in arg_13)\n for arg_15 in arg_14:\n arg_11[arg_15], arg_11[arg_12] = arg_12, arg_11[arg_15]\n\n # Add wiring to/from the operations and between unused inputs & outputs.\n arg_6._multi_graph.add_edges_from(arg_11.items())\n yield {\"graph\": arg_6, \"partition\": arg_10}"} +{"_id": "doc_2651", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a set of non-conditional runs of \"op\" nodes with the given names.\n\n For example, \"... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; ..\"\n would produce the tuple of cx nodes as an element of the set returned\n from a call to Func([\"cx\"]). If instead the cx nodes were\n \"cx q[0],q[1]; cx q[1],q[0];\", the method would still return the\n pair in a tuple. The namelist can contain names that are not\n in the circuit's basis.\n\n Nodes must have only one successor to continue the run.\n \"\"\"\n arg_2 = []\n\n # Iterate through the nodes of self in topological order\n # and form tuples containing sequences of gates\n # on the same qubit(s).\n arg_3 = list(arg_0.topological_op_nodes())\n arg_4 = dict(zip(arg_3, [False] * len(arg_3)))\n for arg_5 in arg_3:\n if arg_5.name in arg_1 and arg_5.condition is None \\\n and not arg_4[arg_5]:\n arg_6 = [arg_5]\n arg_4[arg_5] = True\n arg_7 = list(arg_0._multi_graph.successors(arg_5))\n while len(arg_7) == 1 and \\\n arg_7[0].type == \"op\" and \\\n arg_7[0].name in arg_1:\n arg_6.append(arg_7[0])\n arg_4[arg_7[0]] = True\n arg_7 = list(arg_0._multi_graph.successors(arg_7[0]))\n if len(arg_6) >= 1:\n arg_2.append(tuple(arg_6))\n return set(arg_2)"} +{"_id": "doc_2652", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Iterator for nodes that affect a given wire\n\n Args:\n wire (tuple(Register, index)): the wire to be looked at.\n only_ops (bool): True if only the ops nodes are wanted\n otherwise all nodes are returned.\n Yield:\n DAGNode: the successive ops on the given wire\n\n Raises:\n DAGCircuitError: if the given wire doesn't exist in the DAG\n \"\"\"\n arg_3 = arg_0.input_map.get(arg_1, None)\n\n if not arg_3:\n raise DAGCircuitError('The given wire %s is not present in the circuit'\n % str(arg_1))\n\n arg_4 = True\n while arg_4:\n arg_4 = False\n # allow user to just get ops on the wire - not the input/output nodes\n if arg_3.type == 'op' or not arg_2:\n yield arg_3\n\n # find the adjacent node that takes the wire being looked at as input\n for arg_5, arg_6 in arg_0._multi_graph.adj[arg_3].items():\n if any(arg_1 == arg_7['wire'] for arg_7 in arg_6.values()):\n arg_3 = arg_5\n arg_4 = True\n break"} +{"_id": "doc_2653", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Generate a TomographyBasis object.\n\n See TomographyBasis for further details.abs\n\n Args:\n prep_fun (callable) optional: the function which adds preparation\n gates to a circuit.\n meas_fun (callable) optional: the function which adds measurement\n gates to a circuit.\n\n Returns:\n TomographyBasis: A tomography basis.\n \"\"\"\n arg_3 = TomographyBasis(arg_0)\n arg_3.prep_fun = arg_1\n arg_3.meas_fun = arg_2\n return arg_3"} +{"_id": "doc_2654", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add state measurement gates to a circuit.\n \"\"\"\n if arg_2 not in ['X', 'Y', 'Z']:\n raise QiskitError(\"There's no X, Y or Z basis for this Pauli \"\n \"measurement\")\n\n if arg_2 == \"X\":\n arg_0.u2(0., np.pi, arg_1) # H\n elif arg_2 == \"Y\":\n arg_0.u2(0., 0.5 * np.pi, arg_1)"} +{"_id": "doc_2655", "title": "", "text": "def Func(arg_0,\n arg_1='Pauli',\n arg_2=None,\n arg_3=None):\n \"\"\"\n Generate a dictionary of tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis 'SIC'\n or 'Pauli'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is 'Pauli'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or None): The optional qubit preparation\n basis. If no basis is specified state tomography will be performed\n instead of process tomography. A built in basis may be specified by\n 'SIC' or 'Pauli' (SIC basis recommended for > 2 qubits).\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields \"qubits\", \"meas_basis\", \"circuits\". It may also optionally\n contain a field \"prep_basis\" for process tomography experiments.\n ```\n {\n 'qubits': qubits (list[ints]),\n 'meas_basis': meas_basis (tomography_basis),\n 'circuit_labels': (list[string]),\n 'circuits': (list[dict]) # prep and meas configurations\n # optionally for process tomography experiments:\n 'prep_basis': prep_basis (tomography_basis)\n }\n ```\n Raises:\n QiskitError: if the Qubits argument is not a list.\n \"\"\"\n if not isinstance(arg_0, list):\n raise QiskitError('Qubits argument must be a list')\n arg_4 = len(arg_0)\n\n if arg_2 is None:\n arg_2 = arg_0\n if not isinstance(arg_2, list):\n raise QiskitError('prep_qubits argument must be a list')\n if len(arg_2) != len(arg_0):\n raise QiskitError('meas_qubits and prep_qubitsare different length')\n\n if isinstance(arg_1, str):\n if arg_1.lower() == 'pauli':\n arg_1 = PAULI_BASIS\n\n if isinstance(arg_3, str):\n if arg_3.lower() == 'pauli':\n arg_3 = PAULI_BASIS\n elif arg_3.lower() == 'sic':\n arg_3 = SIC_BASIS\n\n arg_5 = []\n arg_6 = []\n\n # add meas basis configs\n if arg_3 is None:\n # State Tomography\n for arg_7 in product(arg_1.keys(), repeat=arg_4):\n arg_8 = dict(zip(arg_0, arg_7))\n arg_5.append({'meas': arg_8})\n # Make label\n arg_9 = '_meas_'\n for arg_10, arg_11 in arg_8.items():\n arg_9 += '%s(%d)' % (arg_11[0], arg_10)\n arg_6.append(arg_9)\n return {'qubits': arg_0,\n 'circuits': arg_5,\n 'circuit_labels': arg_6,\n 'meas_basis': arg_1}\n\n # Process Tomography\n arg_12 = len(list(arg_3.values())[0])\n arg_13 = [(b, s)\n for b in arg_3.keys()\n for s in range(arg_12)]\n for arg_14 in product(arg_13, repeat=arg_4):\n for arg_7 in product(arg_1.keys(),\n repeat=arg_4):\n arg_15 = dict(zip(arg_2, arg_14))\n arg_8 = dict(zip(arg_0, arg_7))\n arg_5.append({'prep': arg_15, 'meas': arg_8})\n # Make label\n arg_9 = '_prep_'\n for arg_10, arg_11 in arg_15.items():\n arg_9 += '%s%d(%d)' % (arg_11[0], arg_11[1], arg_10)\n arg_9 += '_meas_'\n for arg_10, arg_11 in arg_8.items():\n arg_9 += '%s(%d)' % (arg_11[0], arg_10)\n arg_6.append(arg_9)\n return {'qubits': arg_0,\n 'circuits': arg_5,\n 'circuit_labels': arg_6,\n 'prep_basis': arg_3,\n 'meas_basis': arg_1}"} +{"_id": "doc_2656", "title": "", "text": "def Func(arg_0, arg_1='Pauli',\n arg_2=None, arg_3='SIC'):\n \"\"\"\n Generate a dictionary of process tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis 'SIC'\n or 'Pauli'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is 'Pauli'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or str): The qubit preparation basis.\n The default value is 'SIC'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields \"qubits\", \"meas_basis\", \"prep_basus\", circuits\".\n ```\n {\n 'qubits': qubits (list[ints]),\n 'meas_basis': meas_basis (tomography_basis),\n 'prep_basis': prep_basis (tomography_basis),\n 'circuit_labels': (list[string]),\n 'circuits': (list[dict]) # prep and meas configurations\n }\n ```\n \"\"\"\n return tomography_set(arg_0, arg_1=arg_1,\n arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_2657", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Add tomography measurement circuits to a QuantumProgram.\n\n The quantum program must contain a circuit 'name', which is treated as a\n state preparation circuit for state tomography, or as teh circuit being\n measured for process tomography. This function then appends the circuit\n with a set of measurements specified by the input `tomography_set`,\n optionally it also prepends the circuit with state preparation circuits if\n they are specified in the `tomography_set`.\n\n For n-qubit tomography with a tomographically complete set of preparations\n and measurements this results in $4^n 3^n$ circuits being added to the\n quantum program.\n\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of quantum tomography circuits for the input circuit.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit\n\n Example:\n For a tomography set specifying state tomography of qubit-0 prepared\n by a circuit 'circ' this would return:\n ```\n ['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']\n ```\n For process tomography of the same circuit with preparation in the\n SIC-POVM basis it would return:\n ```\n [\n 'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',\n 'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',\n 'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',\n 'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',\n 'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',\n 'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'\n ]\n ```\n \"\"\"\n\n if not isinstance(arg_0, QuantumCircuit):\n raise QiskitError('Input circuit must be a QuantumCircuit object')\n\n arg_4 = arg_3['circuits']\n arg_5 = tomography_circuit_names(arg_3, arg_0.name)\n arg_6 = []\n for arg_7, arg_8 in zip(arg_5, arg_4):\n arg_9 = arg_0\n # Add prep circuits\n if 'prep' in arg_8:\n arg_10 = QuantumCircuit(arg_1, arg_2, arg_14='tmp_prep')\n for arg_11, arg_12 in arg_8['prep'].items():\n arg_3['prep_basis'].prep_gate(arg_10, arg_1[arg_11], arg_12)\n arg_10.barrier(arg_1[arg_11])\n arg_9 = arg_10 + arg_9\n # Add measurement circuits\n arg_13 = QuantumCircuit(arg_1, arg_2, arg_14='tmp_meas')\n for arg_11, arg_12 in arg_8['meas'].items():\n arg_13.barrier(arg_1[arg_11])\n arg_3['meas_basis'].meas_gate(arg_13, arg_1[arg_11], arg_12)\n arg_13.measure(arg_1[arg_11], arg_2[arg_11])\n arg_9 = arg_9 + arg_13\n # Add label to the circuit\n arg_9.name = arg_7\n arg_6.append(arg_9)\n\n logger.info('>> created tomography circuits for \"%s\"', arg_0.name)\n return arg_6"} +{"_id": "doc_2658", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return a results dict for a state or process tomography experiment.\n\n Args:\n results (Result): Results from execution of a process tomography\n circuits on a backend.\n name (string): The name of the circuit being reconstructed.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of dicts for the outcome of each process tomography\n measurement circuit.\n \"\"\"\n\n arg_3 = tomography_circuit_names(arg_2, arg_1)\n arg_4 = arg_2['circuits']\n arg_5 = []\n arg_6 = None\n for arg_7, arg_8 in enumerate(arg_3):\n arg_9 = marginal_counts(arg_0.get_counts(arg_3[arg_7]),\n arg_2['qubits'])\n arg_10 = sum(arg_9.values())\n arg_11 = arg_4[arg_7]['meas']\n arg_6 = arg_4[arg_7].get('prep', None)\n arg_12 = sorted(arg_11.keys())\n if arg_6:\n arg_13 = sorted(arg_6.keys())\n arg_14 = {}\n for arg_15 in arg_9.keys():\n arg_14[arg_15] = {}\n arg_14[arg_15]['meas'] = [(arg_11[arg_12[k]], int(arg_15[-1 - k]))\n for k in range(len(arg_12))]\n if arg_6:\n arg_14[arg_15]['prep'] = [arg_6[arg_13[k]]\n for k in range(len(arg_13))]\n arg_5.append({'counts': arg_9, 'shots': arg_10, 'circuit': arg_14})\n\n arg_16 = {'data': arg_5, 'meas_basis': arg_2['meas_basis']}\n if arg_6:\n arg_16['prep_basis'] = arg_2['prep_basis']\n return arg_16"} +{"_id": "doc_2659", "title": "", "text": "def Func(arg_0, arg_1='wizard', arg_2=None):\n \"\"\"\n Reconstruct a density matrix or process-matrix from tomography data.\n\n If the input data is state_tomography_data the returned operator will\n be a density matrix. If the input data is process_tomography_data the\n returned operator will be a Choi-matrix in the column-vectorization\n convention.\n\n Args:\n tomo_data (dict): process tomography measurement data.\n method (str): the fitting method to use.\n Available methods:\n - 'wizard' (default)\n - 'leastsq'\n options (dict or None): additional options for fitting method.\n\n Returns:\n numpy.array: The fitted operator.\n\n Available methods:\n - 'wizard' (Default): The returned operator will be constrained to be\n positive-semidefinite.\n Options:\n - 'trace': the trace of the returned operator.\n The default value is 1.\n - 'beta': hedging parameter for computing frequencies from\n zero-count data. The default value is 0.50922.\n - 'epsilon: threshold for truncating small eigenvalues to zero.\n The default value is 0\n - 'leastsq': Fitting without positive-semidefinite constraint.\n Options:\n - 'trace': Same as for 'wizard' method.\n - 'beta': Same as for 'wizard' method.\n Raises:\n Exception: if the `method` parameter is not valid.\n \"\"\"\n\n if isinstance(arg_1, str) and arg_1.lower() in ['wizard', 'leastsq']:\n # get options\n arg_3 = __get_option('trace', arg_2)\n arg_4 = __get_option('beta', arg_2)\n # fit state\n arg_5 = __leastsq_fit(arg_0, arg_3=arg_3, arg_4=arg_4)\n if arg_1 == 'wizard':\n # Use wizard method to constrain positivity\n arg_6 = __get_option('epsilon', arg_2)\n arg_5 = __wizard(arg_5, arg_6=arg_6)\n return arg_5\n else:\n raise Exception('Invalid reconstruction method \"%s\"' % arg_1)"} +{"_id": "doc_2660", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Reconstruct a state from unconstrained least-squares fitting.\n\n Args:\n tomo_data (list[dict]): state or process tomography data.\n weights (list or array or None): weights to use for least squares\n fitting. The default is standard deviation from a binomial\n distribution.\n trace (float or None): trace of returned operator. The default is 1.\n beta (float or None): hedge parameter (>=0) for computing frequencies\n from zero-count data. The default value is 0.50922.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n \"\"\"\n if arg_2 is None:\n arg_2 = 1. # default to unit trace\n\n arg_4 = arg_0['data']\n arg_5 = arg_4[0]['circuit'].keys()\n\n # Get counts and shots\n arg_6 = []\n arg_7 = []\n arg_8 = []\n for arg_9 in arg_4:\n for arg_10 in arg_5:\n arg_6.append(arg_9['counts'][arg_10])\n arg_7.append(arg_9['shots'])\n arg_11 = arg_9['circuit'][arg_10]\n arg_12 = __projector(arg_11['meas'], arg_0['meas_basis'])\n if 'prep' in arg_11:\n arg_13 = __projector(arg_11['prep'],\n arg_0['prep_basis'])\n arg_12 = np.kron(arg_13.conj(), arg_12)\n arg_8.append(arg_12)\n\n # Convert counts to frequencies\n arg_6 = np.array(arg_6)\n arg_7 = np.array(arg_7)\n arg_14 = arg_6 / arg_7\n\n # Use hedged frequencies to calculate least squares fitting weights\n if arg_1 is None:\n if arg_3 is None:\n arg_3 = 0.50922\n arg_15 = len(arg_5)\n arg_16 = (arg_6 + arg_3) / (arg_7 + arg_15 * arg_3)\n arg_1 = np.sqrt(arg_7 / (arg_16 * (1 - arg_16)))\n\n return __tomo_linear_inv(arg_14, arg_8, arg_1, arg_2=arg_2)"} +{"_id": "doc_2661", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a projectors.\n \"\"\"\n arg_2 = 1\n # list is from qubit 0 to 1\n for arg_3 in arg_0:\n arg_4, arg_5 = arg_3\n arg_2 = np.kron(arg_1[arg_4][arg_5], arg_2)\n return arg_2"} +{"_id": "doc_2662", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=False, arg_4=arg_5.stdout):\n \"\"\"Monitor the status of a IBMQJob instance.\n\n Args:\n job (BaseJob): Job to monitor.\n interval (int): Time interval between status queries.\n monitor_async (bool): Monitor asyncronously (in Jupyter only).\n quiet (bool): If True, do not print status messages.\n output (file): The file like object to write status messages to.\n By default this is sys.stdout.\n\n Raises:\n QiskitError: When trying to run async outside of Jupyter\n ImportError: ipywidgets not available for notebook.\n \"\"\"\n if arg_1 is None:\n arg_7 = False\n arg_1 = 2\n else:\n arg_7 = True\n if _NOTEBOOK_ENV:\n if arg_2:\n try:\n import ipywidgets as widgets # pylint: disable=import-error\n except ImportError:\n raise ImportError('These functions need ipywidgets. '\n 'Run \"pip install ipywidgets\" before.')\n from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412\n\n arg_8 = \"font-size:16px;\"\n arg_9 = \"

    Job Status: %s

    \".format(\n arg_8=arg_8)\n arg_10 = widgets.HTML(value=arg_9 % arg_0.status().value)\n display(arg_10)\n\n arg_11 = threading.Thread(target=_html_checker, args=(arg_0, arg_1,\n arg_10, arg_9))\n arg_11.start()\n else:\n _text_checker(arg_0, arg_1, arg_7,\n arg_3=arg_3, arg_4=arg_4)\n\n else:\n if arg_2:\n raise QiskitError(\n 'monitor_async only available in Jupyter notebooks.')\n _text_checker(arg_0, arg_1, arg_7, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_2663", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute Euler angles for a single-qubit gate.\n\n Find angles (theta, phi, lambda) such that\n unitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)\n\n Args:\n unitary_matrix (ndarray): 2x2 unitary matrix\n\n Returns:\n tuple: (theta, phi, lambda) Euler angles of SU(2)\n\n Raises:\n QiskitError: if unitary_matrix not 2x2, or failure\n \"\"\"\n if arg_0.shape != (2, 2):\n raise QiskitError(\"Func: expected 2x2 matrix\")\n arg_1 = la.det(arg_0)**(-1.0/2.0)\n arg_2 = arg_1 * arg_0 # U in SU(2)\n # OpenQASM SU(2) parameterization:\n # U[0, 0] = exp(-i(phi+lambda)/2) * cos(theta/2)\n # U[0, 1] = -exp(-i(phi-lambda)/2) * sin(theta/2)\n # U[1, 0] = exp(i(phi-lambda)/2) * sin(theta/2)\n # U[1, 1] = exp(i(phi+lambda)/2) * cos(theta/2)\n # Find theta\n if abs(arg_2[0, 0]) > _CUTOFF_PRECISION:\n arg_3 = 2 * math.acos(abs(arg_2[0, 0]))\n else:\n arg_3 = 2 * math.asin(abs(arg_2[1, 0]))\n # Find phi and lambda\n arg_4 = 0.0\n arg_5 = 0.0\n if abs(math.cos(arg_3/2.0)) > _CUTOFF_PRECISION:\n arg_4 = arg_2[1, 1] / math.cos(arg_3/2.0)\n if abs(math.sin(arg_3/2.0)) > _CUTOFF_PRECISION:\n arg_5 = arg_2[1, 0] / math.sin(arg_3/2.0)\n arg_6 = 2 * math.atan2(np.imag(arg_4), np.real(arg_4))\n arg_7 = 2 * math.atan2(np.imag(arg_5), np.real(arg_5))\n arg_8 = 0.0\n if abs(arg_2[0, 0]) > _CUTOFF_PRECISION and abs(arg_2[1, 0]) > _CUTOFF_PRECISION:\n arg_8 = (arg_6 + arg_7) / 2.0\n arg_9 = (arg_6 - arg_7) / 2.0\n else:\n if abs(arg_2[0, 0]) < _CUTOFF_PRECISION:\n arg_9 = -arg_7\n else:\n arg_9 = arg_6\n # Check the solution\n arg_10 = np.array([[np.exp(-1j*arg_8/2.0), 0],\n [0, np.exp(1j*arg_8/2.0)]], dtype=complex)\n arg_11 = np.array([[np.cos(arg_3/2.0), -np.sin(arg_3/2.0)],\n [np.sin(arg_3/2.0), np.cos(arg_3/2.0)]], dtype=complex)\n arg_12 = np.array([[np.exp(-1j*arg_9/2.0), 0],\n [0, np.exp(1j*arg_9/2.0)]], dtype=complex)\n arg_13 = np.dot(arg_10, np.dot(arg_11, arg_12))\n if la.norm(arg_13 - arg_2) > _CUTOFF_PRECISION:\n raise QiskitError(\"Func: incorrect result\")\n return arg_3, arg_8, arg_9"} +{"_id": "doc_2664", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Extends dag with virtual qubits that are in layout but not in the circuit yet.\n\n Args:\n dag (DAGCircuit): DAG to extend.\n\n Returns:\n DAGCircuit: An extended DAG.\n\n Raises:\n TranspilerError: If there is not layout in the property set or not set at init time.\n \"\"\"\n arg_0.layout = arg_0.layout or arg_0.property_set['layout']\n\n if arg_0.layout is None:\n raise TranspilerError(\"EnlargeWithAncilla requires property_set[\\\"layout\\\"] or\"\n \" \\\"layout\\\" parameter to Func\")\n\n arg_3 = arg_0.layout.get_virtual_bits().keys()\n arg_4 = set(virtual_qubit[0] for virtual_qubit in arg_3\n if virtual_qubit not in arg_1.wires)\n\n for arg_5 in arg_4:\n arg_1.add_qreg(arg_5)\n\n return arg_1"} +{"_id": "doc_2665", "title": "", "text": "def Func(arg_0):\n \"\"\"The qubits properties widget\n\n Args:\n backend (IBMQbackend): The backend.\n\n Returns:\n VBox: A VBox widget.\n \"\"\"\n arg_1 = arg_0.properties().to_dict()\n\n arg_2 = \"
    {key}: {value}
    \"\n arg_2 = arg_2.format(key='last_update_date',\n value=arg_1['last_update_date'])\n arg_3 = widgets.HTML(value=arg_2)\n\n arg_4 = \"\"\n arg_4 += \"\"\"\"\"\"\n\n arg_4 += \"\"\n arg_4 += \"\"\n arg_4 += \"\"\n arg_5 = \"
    FrequencyT1T2U1 gate errorU2 gate errorU3 gate errorReadout error
    \"\n\n for arg_6 in range(len(arg_1['qubits'])):\n arg_7 = 'Q%s' % arg_6\n arg_8 = arg_1['qubits'][arg_6]\n arg_9 = arg_1['gates'][3*arg_6:3*arg_6+3]\n arg_10 = arg_8[0]\n arg_11 = arg_8[1]\n arg_12 = arg_8[2]\n arg_13 = arg_8[3]\n\n arg_14 = str(round(arg_12['value'], 5))+' '+arg_12['unit']\n arg_15 = str(round(arg_10['value'], # pylint: disable=invalid-name\n 5))+' ' + arg_10['unit']\n arg_16 = str(round(arg_11['value'], # pylint: disable=invalid-name\n 5))+' ' + arg_11['unit']\n # pylint: disable=invalid-name\n arg_17 = str(round(arg_9[0]['parameters'][0]['value'], 5))\n # pylint: disable=invalid-name\n arg_18 = str(round(arg_9[1]['parameters'][0]['value'], 5))\n # pylint: disable=invalid-name\n arg_19 = str(round(arg_9[2]['parameters'][0]['value'], 5))\n\n arg_20 = round(arg_13['value'], 5)\n arg_4 += \"%s%s\"\n arg_4 += \"%s%s%s%s%s%s\"\n arg_4 = arg_4 % (arg_7, arg_14, arg_15, arg_16, arg_17, arg_18, arg_19, arg_20)\n arg_4 += arg_5\n\n arg_21 = widgets.HTML(value=arg_4)\n\n arg_22 = widgets.VBox([arg_3,\n arg_21])\n\n return arg_22"} +{"_id": "doc_2666", "title": "", "text": "def Func(arg_0):\n \"\"\"Widget for displaying job history\n\n Args:\n backend (IBMQbackend): The backend.\n\n Returns:\n Tab: A tab widget for history images.\n \"\"\"\n arg_1 = widgets.Output(layout=widgets.Layout(display='flex-inline',\n align_items='center',\n min_height='400px'))\n\n arg_2 = widgets.Output(layout=widgets.Layout(display='flex-inline',\n align_items='center',\n min_height='400px'))\n\n arg_3 = widgets.Output(layout=widgets.Layout(display='flex-inline',\n align_items='center',\n min_height='400px'))\n\n arg_4 = widgets.Tab(layout=widgets.Layout(max_height='620px'))\n arg_4.children = [arg_1, arg_2, arg_3]\n arg_4.set_title(0, 'Year')\n arg_4.set_title(1, 'Month')\n arg_4.set_title(2, 'Week')\n arg_4.selected_index = 1\n\n _build_Func(arg_4, arg_0)\n return arg_4"} +{"_id": "doc_2667", "title": "", "text": "def Func(arg_0, arg_1='year'):\n \"\"\"Plots the job history of the user from the given list of jobs.\n\n Args:\n jobs (list): A list of jobs with type IBMQjob.\n interval (str): Interval over which to examine.\n\n Returns:\n fig: A Matplotlib figure instance.\n \"\"\"\n def get_date(arg_2):\n \"\"\"Returns a datetime object from a IBMQJob instance.\n\n Args:\n job (IBMQJob): A job.\n\n Returns:\n dt: A datetime object.\n \"\"\"\n return datetime.datetime.strptime(arg_2.creation_date(),\n '%Y-%m-%dT%H:%M:%S.%fZ')\n\n arg_3 = datetime.datetime.now()\n\n if arg_1 == 'year':\n arg_4 = [(arg_3 - datetime.timedelta(days=k*365/12))\n for k in range(12)]\n elif arg_1 == 'month':\n arg_4 = [(arg_3 - datetime.timedelta(days=k)) for k in range(30)]\n elif arg_1 == 'week':\n arg_4 = [(arg_3 - datetime.timedelta(days=k)) for k in range(7)]\n\n arg_5 = [0]*len(arg_4)\n\n if arg_1 == 'year':\n for arg_2 in arg_0:\n for arg_6, arg_7 in enumerate(arg_4):\n date = get_date(arg_2)\n if date.month == arg_7.month:\n arg_5[arg_6] += 1\n break\n else:\n continue\n else:\n for arg_2 in arg_0:\n for arg_6, arg_7 in enumerate(arg_4):\n date = get_date(arg_2)\n if date.day == arg_7.day and date.month == arg_7.month:\n arg_5[arg_6] += 1\n break\n else:\n continue\n\n arg_8 = []\n arg_9 = []\n for arg_6, arg_10 in enumerate(arg_5):\n if arg_10 != 0:\n arg_9.append(arg_6)\n arg_8.append(arg_10)\n\n arg_11 = sum(arg_5)\n\n arg_12 = ['#003f5c', '#ffa600', '#374c80', '#ff764a',\n '#7a5195', '#ef5675', '#bc5090']\n\n if arg_1 == 'year':\n arg_13 = ['{}-{}'.format(str(arg_4[b].year)[2:], arg_4[b].month) for b in arg_9]\n else:\n arg_13 = ['{}-{}'.format(arg_4[b].month, arg_4[b].day) for b in arg_9]\n arg_14, arg_15 = plt.subplots(1, 1, figsize=(5, 5)) # pylint: disable=invalid-name\n arg_15.pie(arg_8[::-1], arg_13=arg_13, arg_12=arg_12, textprops={'fontsize': 14},\n rotatelabels=True, counterclock=False)\n arg_15.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))\n arg_15.text(0, 0, arg_11, horizontalalignment='center',\n verticalalignment='center', fontsize=26)\n arg_14.tight_layout()\n return arg_14"} +{"_id": "doc_2668", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None): # deprecated\n \"\"\"Func one or more circuits, according to some desired\n transpilation targets.\n\n All arguments may be given as either singleton or list. In case of list,\n the length must be equal to the number of circuits being Funcd.\n\n Transpilation is done in parallel using multiprocessing.\n\n Args:\n circuits (QuantumCircuit or list[QuantumCircuit]):\n Circuit(s) to Func\n\n backend (BaseBackend):\n If set, Funcr options are automatically grabbed from\n backend.configuration() and backend.properties().\n If any other option is explicitly set (e.g. coupling_map), it\n will override the backend's.\n Note: the backend arg is purely for convenience. The resulting\n circuit may be run on any backend as long as it is compatible.\n\n basis_gates (list[str]):\n List of basis gate names to unroll to.\n e.g:\n ['u1', 'u2', 'u3', 'cx']\n If None, do not unroll.\n\n coupling_map (CouplingMap or list):\n Coupling map (perhaps custom) to target in mapping.\n Multiple formats are supported:\n a. CouplingMap instance\n\n b. list\n Must be given as an adjacency matrix, where each entry\n specifies all two-qubit interactions supported by backend\n e.g:\n [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]\n\n backend_properties (BackendProperties):\n properties returned by a backend, including information on gate\n errors, readout errors, qubit coherence times, etc. For a backend\n that provides this information, it can be obtained with:\n ``backend.properties()``\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits.\n If this layout makes the circuit compatible with the coupling_map\n constraints, it will be used.\n The final layout is not guaranteed to be the same, as the Funcr\n may permute qubits through swaps or other means.\n\n Multiple formats are supported:\n a. Layout instance\n\n b. dict\n virtual to physical:\n {qr[0]: 0,\n qr[1]: 3,\n qr[2]: 5}\n\n physical to virtual:\n {0: qr[0],\n 3: qr[1],\n 5: qr[2]}\n\n c. list\n virtual to physical:\n [0, 3, 5] # virtual qubits are ordered (in addition to named)\n\n physical to virtual:\n [qr[0], None, None, qr[1], None, qr[2]]\n\n seed_Funcr (int):\n sets random seed for the stochastic parts of the Funcr\n\n optimization_level (int):\n How much optimization to perform on the circuits.\n Higher levels generate more optimized circuits,\n at the expense of longer transpilation time.\n 0: no optimization\n 1: light optimization\n 2: heavy optimization\n\n pass_manager (PassManager):\n The pass manager to use for a custom pipeline of Funcr passes.\n If this arg is present, all other args will be ignored and the\n pass manager will be used directly (Qiskit will not attempt to\n auto-select a pass manager based on Func options).\n\n seed_mapper (int):\n DEPRECATED in 0.8: use ``seed_Funcr`` kwarg instead\n\n Returns:\n QuantumCircuit or list[QuantumCircuit]: Funcd circuit(s).\n\n Raises:\n TranspilerError: in case of bad inputs to Funcr or errors in passes\n \"\"\"\n # Deprecation matter\n if arg_9:\n warnings.warn(\"seed_mapper has been deprecated and will be removed in the \"\n \"0.9 release. Instead use seed_Funcr to set the seed \"\n \"for all stochastic parts of the.\", DeprecationWarning)\n arg_6 = arg_9\n\n # transpiling schedules is not supported yet.\n if isinstance(arg_0, Schedule) or \\\n (isinstance(arg_0, list) and all(isinstance(arg_10, Schedule) for arg_10 in arg_0)):\n return arg_0\n\n # Get TranspileConfig(s) to configure the circuit transpilation job(s)\n arg_0 = arg_0 if isinstance(arg_0, list) else [arg_0]\n arg_11 = _parse_Func_args(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5,\n arg_6, arg_7,\n arg_8)\n\n # Transpile circuits in parallel\n arg_0 = parallel_map(_Func_circuit, list(zip(arg_0, arg_11)))\n\n if len(arg_0) == 1:\n return arg_0[0]\n return arg_0"} +{"_id": "doc_2669", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None, arg_3=None, # circuit transpile options\n arg_4=None, arg_5=None,\n arg_6=None, arg_7=None, arg_8=None,\n arg_9=None, arg_10=None, arg_11=1024, # common run options\n arg_12=False, arg_13=10, arg_14=None,\n arg_15=None, arg_16=None, # schedule run options\n arg_17=None, arg_18=2, arg_19='avg',\n arg_20=None, arg_21=100, arg_22=None, arg_23=None,\n arg_24=None, arg_25=None, # deprecated\n arg_26=None, arg_27=None,\n **arg_28):\n \"\"\"Execute a list of circuits or pulse schedules on a backend.\n\n The execution is asynchronous, and a handle to a job instance is returned.\n\n Args:\n experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):\n Circuit(s) or pulse schedule(s) to Func\n\n backend (BaseBackend):\n Backend to Func circuits on.\n Transpiler options are automatically grabbed from\n backend.configuration() and backend.properties().\n If any other option is explicitly set (e.g. coupling_map), it\n will override the backend's.\n\n basis_gates (list[str]):\n List of basis gate names to unroll to.\n e.g:\n ['u1', 'u2', 'u3', 'cx']\n If None, do not unroll.\n\n coupling_map (CouplingMap or list):\n Coupling map (perhaps custom) to target in mapping.\n Multiple formats are supported:\n a. CouplingMap instance\n\n b. list\n Must be given as an adjacency matrix, where each entry\n specifies all two-qubit interactions supported by backend\n e.g:\n [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]\n\n backend_properties (BackendProperties):\n Properties returned by a backend, including information on gate\n errors, readout errors, qubit coherence times, etc. For a backend\n that provides this information, it can be obtained with:\n ``backend.properties()``\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits.\n If this layout makes the circuit compatible with the coupling_map\n constraints, it will be used.\n The final layout is not guaranteed to be the same, as the transpiler\n may permute qubits through swaps or other means.\n\n Multiple formats are supported:\n a. Layout instance\n\n b. dict\n virtual to physical:\n {qr[0]: 0,\n qr[1]: 3,\n qr[2]: 5}\n\n physical to virtual:\n {0: qr[0],\n 3: qr[1],\n 5: qr[2]}\n\n c. list\n virtual to physical:\n [0, 3, 5] # virtual qubits are ordered (in addition to named)\n\n physical to virtual:\n [qr[0], None, None, qr[1], None, qr[2]]\n\n seed_transpiler (int):\n Sets random seed for the stochastic parts of the transpiler\n\n optimization_level (int):\n How much optimization to perform on the circuits.\n Higher levels generate more optimized circuits,\n at the expense of longer transpilation time.\n 0: no optimization\n 1: light optimization\n 2: heavy optimization\n\n pass_manager (PassManager):\n The pass manager to use during transpilation. If this arg is present,\n auto-selection of pass manager based on the transpile options will be\n turned off and this pass manager will be used directly.\n\n qobj_id (str):\n String identifier to annotate the Qobj\n\n qobj_header (QobjHeader or dict):\n User input that will be inserted in Qobj header, and will also be\n copied to the corresponding Result header. Headers do not affect the run.\n\n shots (int):\n Number of repetitions of each circuit, for sampling. Default: 2014\n\n memory (bool):\n If True, per-shot measurement bitstrings are returned as well\n (provided the backend supports it). For OpenPulse jobs, only\n measurement level 2 supports this option. Default: False\n\n max_credits (int):\n Maximum credits to spend on job. Default: 10\n\n seed_simulator (int):\n Random seed to control sampling, for when backend is a simulator\n\n default_qubit_los (list):\n List of default qubit lo frequencies\n\n default_meas_los (list):\n List of default meas lo frequencies\n\n schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or\n Union[Dict[PulseChannel, float], LoConfig]):\n Experiment LO configurations\n\n meas_level (int):\n Set the appropriate level of the measurement output for pulse experiments.\n\n meas_return (str):\n Level of measurement data for the backend to return\n For `meas_level` 0 and 1:\n \"single\" returns information from every shot.\n \"avg\" returns average measurement output (averaged over number of shots).\n\n memory_slots (int):\n Number of classical memory slots used in this job.\n\n memory_slot_size (int):\n Size of each memory slot if the output is Level 0.\n\n rep_time (int): repetition time of the experiment in \u03bcs.\n The delay between experiments will be rep_time.\n Must be from the list provided by the device.\n\n parameter_binds (list[dict{Parameter: Value}]):\n List of Parameter bindings over which the set of experiments will be\n Funcd. Each list element (bind) should be of the form\n {Parameter1: value1, Parameter2: value2, ...}. All binds will be\n Funcd across all experiments, e.g. if parameter_binds is a\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n\n seed (int):\n DEPRECATED in 0.8: use ``seed_simulator`` kwarg instead\n\n seed_mapper (int):\n DEPRECATED in 0.8: use ``seed_transpiler`` kwarg instead\n\n config (dict):\n DEPRECATED in 0.8: use run_config instead\n\n circuits (QuantumCircuit or list[QuantumCircuit]):\n DEPRECATED in 0.8: use ``experiments`` kwarg instead.\n\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends)\n Refer to the backend documentation for details on these arguments\n Note: for now, these keyword arguments will both be copied to the\n Qobj config, and passed to backend.run()\n\n Returns:\n BaseJob: returns job instance derived from BaseJob\n\n Raises:\n QiskitError: if the execution cannot be interpreted as either circuits or schedules\n \"\"\"\n if arg_27 is not None:\n arg_0 = arg_27\n warnings.warn(\"the `circuits` arg in `Func()` has been deprecated. \"\n \"please use `experiments`, which can handle both circuit \"\n \"and pulse Schedules\", DeprecationWarning)\n\n # transpiling the circuits using given transpile options\n arg_0 = transpile(arg_0,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_1=arg_1,\n arg_8=arg_8,\n arg_25=arg_25, # deprecated\n )\n\n # assembling the circuits into a qobj to be run on the backend\n arg_29 = assemble(arg_0,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14,\n arg_15=arg_15,\n arg_16=arg_16,\n arg_17=arg_17,\n arg_18=arg_18,\n arg_19=arg_19,\n arg_20=arg_20,\n arg_21=arg_21,\n arg_22=arg_22,\n arg_23=arg_23,\n arg_1=arg_1,\n arg_26=arg_26, # deprecated\n arg_24=arg_24, # deprecated\n arg_28=arg_28\n )\n\n # executing the circuits on the backend and returning the job\n return arg_1.run(arg_29, **arg_28)"} +{"_id": "doc_2670", "title": "", "text": "def Func(arg_0) -> DriveChannel:\n \"\"\"Return the primary Func channel of this qubit.\"\"\"\n if arg_0._Funcs:\n return arg_0._Funcs[0]\n else:\n raise PulseError(\"No Func channels in q[%d]\" % arg_0._index)"} +{"_id": "doc_2671", "title": "", "text": "def Func(arg_0) -> MeasureChannel:\n \"\"\"Return the primary Func channel of this qubit.\"\"\"\n if arg_0._Funcs:\n return arg_0._Funcs[0]\n else:\n raise PulseError(\"No Funcment channels in q[%d]\" % arg_0._index)"} +{"_id": "doc_2672", "title": "", "text": "def Func(arg_0) -> AcquireChannel:\n \"\"\"Return the primary Func channel of this qubit.\"\"\"\n if arg_0._Funcs:\n return arg_0._Funcs[0]\n else:\n raise PulseError(\"No Func channels in q[%d]\" % arg_0._index)"} +{"_id": "doc_2673", "title": "", "text": "def Func():\n \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n arg_0 = logging.getLogger('qiskit')\n for arg_1 in arg_0.handlers:\n arg_0.removeHandler(arg_1)"} +{"_id": "doc_2674", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Create a hinton representation.\n\n Graphical representation of the input array using a 2D city style\n graph (hinton).\n\n Args:\n rho (array): Density matrix\n figsize (tuple): Figure size in pixels.\n \"\"\"\n\n # HTML\n arg_2 = Template(\"\"\"\n

    \n

    \n

    \n \"\"\")\n\n # JavaScript\n arg_3 = Template(\"\"\"\n \n \"\"\")\n arg_0 = _validate_input_state(arg_0)\n if arg_1 is None:\n arg_4 = {}\n else:\n arg_4 = {'width': arg_1[0], 'height': arg_1[1]}\n\n # Process data and execute\n arg_5 = str(time.time())\n arg_5 = re.sub('[.]', '', arg_5)\n\n # Process data and execute\n arg_6 = []\n arg_7 = []\n for arg_8 in arg_0:\n arg_9 = []\n arg_10 = []\n\n for arg_11 in arg_8.real:\n arg_9.append(float(arg_11))\n arg_6.append(arg_9)\n\n for arg_12 in arg_8.imag:\n arg_10.append(float(arg_12))\n arg_7.append(arg_10)\n\n arg_13 = arg_2.substitute({\n 'divNumber': arg_5\n })\n\n arg_14 = arg_3.substitute({\n 'divNumber': arg_5,\n 'executions': [{'data': arg_6}, {'data': arg_7}],\n 'options': arg_4\n })\n\n display(HTML(arg_13 + arg_14))"} +{"_id": "doc_2675", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Return the process fidelity between two quantum channels.\n\n This is given by\n\n F_p(E1, E2) = Tr[S2^dagger.S1])/dim^2\n\n where S1 and S2 are the SuperOp matrices for channels E1 and E2,\n and dim is the dimension of the input output statespace.\n\n Args:\n channel1 (QuantumChannel or matrix): a quantum channel or unitary matrix.\n channel2 (QuantumChannel or matrix): a quantum channel or unitary matrix.\n require_cptp (bool): require input channels to be CPTP [Default: True].\n\n Returns:\n array_like: The state fidelity F(state1, state2).\n\n Raises:\n QiskitError: if inputs channels do not have the same dimensions,\n have different input and output dimensions, or are not CPTP with\n `require_cptp=True`.\n \"\"\"\n # First we must determine if input is to be interpreted as a unitary matrix\n # or as a channel.\n # If input is a raw numpy array we will interpret it as a unitary matrix.\n arg_3 = None\n arg_4 = None\n if isinstance(arg_0, (list, np.ndarray)):\n arg_0 = Operator(arg_0)\n if arg_2:\n arg_3 = arg_0.is_unitary()\n if isinstance(arg_1, (list, np.ndarray)):\n arg_1 = Operator(arg_1)\n if arg_2:\n arg_4 = arg_1.is_unitary()\n\n # Next we convert inputs SuperOp objects\n # This works for objects that also have a `to_operator` or `to_channel` method\n arg_5 = SuperOp(arg_0)\n arg_6 = SuperOp(arg_1)\n\n # Check inputs are CPTP\n if arg_2:\n # Only check SuperOp if we didn't already check unitary inputs\n if arg_3 is None:\n arg_3 = arg_5.is_cptp()\n if not arg_3:\n raise QiskitError('channel1 is not CPTP')\n if arg_4 is None:\n arg_4 = arg_6.is_cptp()\n if not arg_4:\n raise QiskitError('channel2 is not CPTP')\n\n # Check dimensions match\n arg_7, arg_8 = arg_5.dim\n arg_9, arg_10 = arg_6.dim\n if arg_7 != arg_8 or arg_9 != arg_10:\n raise QiskitError('Input channels must have same size input and output dimensions.')\n if arg_7 != arg_9:\n raise QiskitError('Input channels have different dimensions.')\n\n # Compute process fidelity\n arg_11 = np.trace(arg_5.compose(arg_6.adjoint()).data) / (arg_7 ** 2)\n return arg_11"} +{"_id": "doc_2676", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the Func text data.\"\"\"\n arg_0.data = arg_1\n arg_0.lexer.Func(arg_1)"} +{"_id": "doc_2677", "title": "", "text": "def Func(arg_0):\n \"\"\"Pop a PLY lexer off the stack.\"\"\"\n arg_0.lexer = arg_0.stack.Func()\n arg_0.filename = arg_0.lexer.qasm_file\n arg_0.lineno = arg_0.lexer.qasm_line"} +{"_id": "doc_2678", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"iterate over each block and replace it with an equivalent Unitary\n on the same wires.\n \"\"\"\n arg_2 = DAGCircuit()\n for arg_3 in arg_1.qregs.values():\n arg_2.add_qreg(arg_3)\n for arg_4 in arg_1.cregs.values():\n arg_2.add_creg(arg_4)\n\n # compute ordered indices for the global circuit wires\n arg_5 = {}\n for arg_6 in arg_1.wires:\n if not isinstance(arg_6[0], QuantumRegister):\n continue\n arg_7 = list(arg_1.qregs.values())\n arg_5[arg_6] = arg_7.index(arg_6[0]) + arg_6[1]\n\n arg_8 = arg_0.property_set['block_list']\n arg_9 = set()\n\n for arg_10 in arg_1.topological_op_nodes():\n # skip already-visited nodes or input/output nodes\n if arg_10 in arg_9 or arg_10.type == 'in' or arg_10.type == 'out':\n continue\n # check if the node belongs to the next block\n if arg_8 and arg_10 in arg_8[0]:\n arg_11 = arg_8[0]\n # find the qubits involved in this block\n arg_12 = set()\n for arg_13 in arg_11:\n arg_12 |= set(arg_13.qargs)\n # convert block to a sub-circuit, then simulate unitary and add\n arg_14 = len(arg_12)\n arg_15 = QuantumRegister(arg_14)\n arg_16 = QuantumCircuit(arg_15)\n arg_17 = arg_0._block_qargs_to_indices(arg_12,\n arg_5)\n for arg_13 in arg_11:\n arg_9.add(arg_13)\n arg_16.append(arg_13.op, [arg_15[arg_17[arg_18]] for arg_18 in arg_13.qargs])\n arg_19 = UnitaryGate(Operator(arg_16)) # simulates the circuit\n arg_2.apply_operation_back(\n arg_19, sorted(arg_12, key=lambda x: arg_17[x]))\n del arg_8[0]\n else:\n # the node could belong to some future block, but in that case\n # we simply skip it. It is guaranteed that we will revisit that\n # future block, via its other nodes\n for arg_11 in arg_8[1:]:\n if arg_10 in arg_11:\n break\n # freestanding nodes can just be added\n else:\n arg_9.add(arg_10)\n arg_2.apply_operation_back(arg_10.op, arg_10.qargs, arg_10.cargs)\n\n return arg_2"} +{"_id": "doc_2679", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return converted `FrameChangeInstruction`.\n\n Args:\n shift(int): Offset time.\n instruction (FrameChangeInstruction): frame change instruction.\n Returns:\n dict: Dictionary of required parameters.\n \"\"\"\n arg_3 = {\n 'name': 'fc',\n 't0': arg_1+arg_2.start_time,\n 'ch': arg_2.channels[0].name,\n 'phase': arg_2.command.phase\n }\n return arg_0._qobj_model(**arg_3)"} +{"_id": "doc_2680", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return converted `PulseInstruction`.\n\n Args:\n shift(int): Offset time.\n instruction (PulseInstruction): drive instruction.\n Returns:\n dict: Dictionary of required parameters.\n \"\"\"\n arg_3 = {\n 'name': arg_2.command.name,\n 't0': arg_1+arg_2.start_time,\n 'ch': arg_2.channels[0].name\n }\n return arg_0._qobj_model(**arg_3)"} +{"_id": "doc_2681", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return converted `Snapshot`.\n\n Args:\n shift(int): Offset time.\n instruction (Snapshot): snapshot instruction.\n Returns:\n dict: Dictionary of required parameters.\n \"\"\"\n arg_3 = {\n 'name': 'snapshot',\n 't0': arg_1+arg_2.start_time,\n 'label': arg_2.name,\n 'type': arg_2.type\n }\n return arg_0._qobj_model(**arg_3)"} +{"_id": "doc_2682", "title": "", "text": "def Func(arg_0: arg_1) -> arg_1:\n \"\"\"Sampler decorator base method.\n\n Samplers are used for converting an continuous function to a discretized pulse.\n\n They operate on a function with the signature:\n `def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`\n Where `times` is a numpy array of floats with length n_times and the output array\n is a complex numpy array with length n_times. The output of the decorator is an\n instance of `FunctionalPulse` with signature:\n `def g(duration: int, *args, **kwargs) -> SamplePulse`\n\n Note if your continuous pulse function outputs a `complex` scalar rather than a\n `np.ndarray`, you should first vectorize it before applying a Func.\n\n\n This class implements the Func boilerplate for the Func.\n\n Args:\n sample_function: A Func function to be decorated.\n \"\"\"\n\n def generate_Func(arg_2: arg_1) -> arg_1:\n \"\"\"Return a decorated Func function.\"\"\"\n\n @functools.wraps(arg_2)\n def arg_8(arg_3: arg_4, *arg_5, **arg_6) -> commands.SamplePulse:\n \"\"\"Replace the call to the continuous function with a call to the Func applied\n to the anlytic pulse function.\"\"\"\n arg_7 = arg_0(arg_2, arg_3, *arg_5, **arg_6)\n return np.asarray(arg_7, dtype=np.complex_)\n\n # Update type annotations for wrapped continuous function to be discrete\n arg_8 = _update_annotations(arg_8)\n # Update docstring with that of the Func and include sampled function documentation.\n arg_8 = _update_docstring(arg_8, arg_0)\n # Unset wrapped to return base Func signature\n # but still get rest of benefits of wraps\n # such as __name__, __qualname__\n arg_8.__dict__.pop('__wrapped__')\n # wrap with functional pulse\n return commands.functional_pulse(arg_8)\n\n return generate_Func"} +{"_id": "doc_2683", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Return the backends matching the specified filtering.\n\n Filter the `backends` list by their `configuration` or `status`\n attributes, or from a boolean callable. The criteria for filtering can\n be specified via `**kwargs` or as a callable via `filters`, and the\n backends must fulfill all specified conditions.\n\n Args:\n backends (list[BaseBackend]): list of backends.\n filters (callable): filtering conditions as a callable.\n **kwargs (dict): dict of criteria.\n\n Returns:\n list[BaseBackend]: a list of backend instances matching the\n conditions.\n \"\"\"\n def _match_all(arg_3, arg_4):\n \"\"\"Return True if all items in criteria matches items in obj.\"\"\"\n return all(getattr(arg_3, arg_5, None) == arg_6 for\n arg_5, arg_6 in arg_4.items())\n\n # Inspect the backends to decide which filters belong to\n # backend.configuration and which ones to backend.status, as it does\n # not involve querying the API.\n arg_7 = {}\n arg_8 = {}\n for arg_9, arg_10 in arg_2.items():\n if all(arg_9 in arg_11.configuration() for arg_11 in arg_0):\n arg_7[arg_9] = arg_10\n else:\n arg_8[arg_9] = arg_10\n\n # 1. Apply backend.configuration filtering.\n if arg_7:\n arg_0 = [b for b in arg_0 if\n _match_all(b.configuration(), arg_7)]\n\n # 2. Apply backend.status filtering (it involves one API call for\n # each backend).\n if arg_8:\n arg_0 = [b for b in arg_0 if\n _match_all(b.status(), arg_8)]\n\n # 3. Apply acceptor filter.\n arg_0 = list(filter(arg_1, arg_0))\n\n return arg_0"} +{"_id": "doc_2684", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Resolve backend name from a deprecated name or an alias.\n\n A group will be resolved in order of member priorities, depending on\n availability.\n\n Args:\n name (str): name of backend to resolve\n backends (list[BaseBackend]): list of available backends.\n deprecated (dict[str: str]): dict of deprecated names.\n aliased (dict[str: list[str]]): dict of aliased names.\n\n Returns:\n str: resolved name (name of an available backend)\n\n Raises:\n LookupError: if name cannot be resolved through regular available\n names, nor deprecated, nor alias names.\n \"\"\"\n arg_4 = [backend.name() for backend in arg_1]\n\n arg_5 = arg_2.get(arg_0, arg_3.get(arg_0, arg_0))\n if isinstance(arg_5, list):\n arg_5 = next((b for b in arg_5 if b in arg_4), \"\")\n\n if arg_5 not in arg_4:\n raise LookupError(\"backend '{}' not found.\".format(arg_0))\n\n if arg_0 in arg_2:\n logger.warning(\"WARNING: '%s' is deprecated. Use '%s'.\", arg_0, arg_5)\n\n return arg_5"} +{"_id": "doc_2685", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert an observable in matrix form to dictionary form.\n\n Takes in a diagonal observable as a matrix and converts it to a dictionary\n form. Can also handle a list sorted of the diagonal elements.\n\n Args:\n matrix_observable (list): The observable to be converted to dictionary\n form. Can be a matrix or just an ordered list of observed values\n\n Returns:\n Dict: A dictionary with all observable states as keys, and corresponding\n values being the observed value for that state\n \"\"\"\n arg_1 = {}\n arg_2 = np.array(arg_0)\n arg_3 = len(arg_2)\n arg_4 = int(np.ceil(np.log2(arg_3)))\n arg_5 = '0{}b'.format(arg_4)\n if arg_2.ndim == 2:\n arg_2 = arg_2.diagonal()\n for arg_6 in range(arg_3):\n arg_7 = format(arg_6, arg_5)\n arg_1[arg_7] = arg_2[arg_6]\n return arg_1"} +{"_id": "doc_2686", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Verify each expression in a list.\"\"\"\n # A tad harder. This is a list of expressions each of which could be\n # the head of a tree. We need to recursively walk each of these and\n # ensure that any Id elements resolve to the current stack.\n #\n # I believe we only have to look at the current symtab.\n if arg_1.children is not None:\n for arg_2 in arg_1.children:\n if isinstance(arg_2, node.Id):\n if arg_2.name in arg_0.external_functions:\n continue\n\n if arg_2.name not in arg_0.current_symtab:\n raise QasmError(\"Argument '\" + arg_2.name\n + \"' in expression cannot be \"\n + \"found, line\", str(arg_2.line),\n \"file\", arg_2.file)\n else:\n if hasattr(arg_2, \"children\"):\n arg_0.Func(arg_2)"} +{"_id": "doc_2687", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Verify a user defined gate call.\"\"\"\n if arg_1.name not in arg_0.global_symtab:\n raise QasmError(\"Cannot find gate definition for '\" + arg_1.name\n + \"', line\", str(arg_1.line), 'file', arg_1.file)\n arg_4 = arg_0.global_symtab[arg_1.name]\n if not (arg_4.type == 'gate' or arg_4.type == 'opaque'):\n raise QasmError(\"'\" + arg_1.name + \"' is used as a gate \"\n + \"or opaque call but the symbol is neither;\"\n + \" it is a '\" + arg_4.type + \"' line\",\n str(arg_1.line), 'file', arg_1.file)\n\n if arg_4.n_bits() != arg_2.size():\n raise QasmError(\"Gate or opaque call to '\" + arg_1.name\n + \"' uses\", str(arg_2.size()),\n \"qubits but is declared for\",\n str(arg_4.n_bits()), \"qubits\", \"line\",\n str(arg_1.line), 'file', arg_1.file)\n\n if arg_3:\n if arg_4.n_args() != arg_3.size():\n raise QasmError(\"Gate or opaque call to '\" + arg_1.name\n + \"' uses\", str(arg_3.size()),\n \"qubits but is declared for\",\n str(arg_4.n_args()), \"qubits\", \"line\",\n str(arg_1.line), 'file', arg_1.file)\n else:\n if arg_4.n_args() > 0:\n raise QasmError(\"Gate or opaque call to '\" + arg_1.name\n + \"' has no arguments but is declared for\",\n str(arg_4.n_args()), \"qubits\", \"line\",\n str(arg_1.line), 'file', arg_1.file)"} +{"_id": "doc_2688", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse some data.\"\"\"\n arg_0.Funcr.Func(arg_1, lexer=arg_0.lexer, debug=arg_0.Func_deb)\n if arg_0.qasm is None:\n raise QasmError(\"Uncaught exception in Funcr; \"\n + \"see previous messages for details.\")\n return arg_0.qasm"} +{"_id": "doc_2689", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parser Funcner.\n\n To use this module stand-alone.\n \"\"\"\n arg_2 = arg_0.parser.parse(arg_1, debug=True)\n arg_0.parser.parse(arg_1, debug=True)\n arg_2.to_string(0)"} +{"_id": "doc_2690", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a basis state ndarray.\n\n Args:\n str_state (string): a string representing the state.\n num (int): the number of qubits\n Returns:\n ndarray: state(2**num) a quantum state with basis basis state.\n Raises:\n QiskitError: if the dimensions is wrong\n \"\"\"\n arg_2 = int(arg_0, 2)\n if arg_1 >= len(arg_0):\n arg_3 = np.zeros(1 << arg_1, dtype=complex)\n arg_3[arg_2] = 1\n return arg_3\n else:\n raise QiskitError('size of bitstring is greater than num.')"} +{"_id": "doc_2691", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n maps a pure state to a state matrix\n\n Args:\n state (ndarray): the number of qubits\n flatten (bool): determine if state matrix of column work\n Returns:\n ndarray: state_mat(2**num, 2**num) if flatten is false\n ndarray: state_mat(4**num) if flatten is true stacked on by the column\n \"\"\"\n arg_2 = np.outer(arg_0.conjugate(), arg_0)\n if arg_1:\n return arg_2.flatten(order='F')\n return arg_2"} +{"_id": "doc_2692", "title": "", "text": "def Func(arg_0):\n \"\"\"Calculate the Func of a quantum state.\n\n Args:\n state (ndarray): a quantum state\n Returns:\n float: Func.\n \"\"\"\n arg_1 = np.array(arg_0)\n if arg_1.ndim == 1:\n return 1.0\n return np.real(np.trace(arg_1.dot(arg_1)))"} +{"_id": "doc_2693", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Run the pass on the DAG, and write the discovered commutation relations\n into the property_set.\n \"\"\"\n # Initiate the commutation set\n arg_0.property_set['commutation_set'] = defaultdict(list)\n\n # Build a dictionary to keep track of the gates on each qubit\n for arg_3 in arg_1.wires:\n arg_4 = \"{0}[{1}]\".format(str(arg_3[0].name), str(arg_3[1]))\n arg_0.property_set['commutation_set'][arg_4] = []\n\n # Add edges to the dictionary for each qubit\n for arg_5 in arg_1.topological_op_nodes():\n for (arg_6, arg_6, arg_7) in arg_1.edges(arg_5):\n\n arg_8 = arg_7['name']\n arg_0.property_set['commutation_set'][(arg_5, arg_8)] = -1\n\n for arg_3 in arg_1.wires:\n arg_4 = \"{0}[{1}]\".format(str(arg_3[0].name), str(arg_3[1]))\n\n for arg_9 in arg_1.nodes_on_wire(arg_3):\n\n arg_10 = arg_0.property_set['commutation_set'][arg_4]\n if not arg_10:\n arg_10.append([arg_9])\n\n if arg_9 not in arg_10[-1]:\n arg_11 = arg_10[-1][-1]\n if _commute(arg_9, arg_11):\n arg_10[-1].append(arg_9)\n\n else:\n arg_10.append([arg_9])\n\n arg_12 = len(arg_10)\n arg_0.property_set['commutation_set'][(arg_9, arg_4)] = arg_12 - 1"} +{"_id": "doc_2694", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates a backend widget.\n \"\"\"\n arg_1 = arg_0.configuration().to_dict()\n arg_2 = arg_0.properties().to_dict()\n\n arg_3 = widgets.HTML(value=\"

    {name}

    \".format(arg_3=arg_0.name()),\n layout=widgets.Layout())\n\n arg_4 = arg_1['n_qubits']\n\n arg_5 = widgets.HTML(value=\"
    {qubits}
    \".format(qubits=arg_4),\n layout=widgets.Layout(justify_content='center'))\n\n arg_6 = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px',\n max_height='250px',\n min_height='250px',\n justify_content='center',\n align_items='center',\n margin='0px 0px 0px 0px'))\n\n with arg_6:\n arg_7 = plot_gate_map(arg_0,\n plot_directed=False,\n label_qubits=False)\n if arg_7 is not None:\n display(arg_7)\n # Prevents plot from showing up twice.\n plt.close(arg_7)\n\n arg_8 = generate_jobs_pending_widget()\n\n arg_9 = widgets.HTML(value=\"
    \",\n layout=widgets.Layout(justify_content='center'))\n\n arg_10 = widgets.HTML(value=\"
    \",\n layout=widgets.Layout(justify_content='center'))\n\n arg_11 = arg_2['qubits'][0][0]['unit']\n arg_12 = round(sum([q[0]['value'] for q in arg_2['qubits']])/arg_4, 1)\n arg_13 = widgets.HTML(value=\"
    {t1} {units}
    \".format(t1=arg_12, units=arg_11),\n layout=widgets.Layout())\n\n arg_14 = arg_2['qubits'][0][1]['unit']\n arg_15 = round(sum([q[1]['value'] for q in arg_2['qubits']])/arg_4, 1)\n arg_16 = widgets.HTML(value=\"
    {t2} {units}
    \".format(t2=arg_15, units=arg_14),\n layout=widgets.Layout())\n\n arg_17 = widgets.VBox([arg_3, arg_6, arg_5, arg_8,\n arg_10, arg_9, arg_13, arg_16],\n layout=widgets.Layout(display='inline-flex',\n flex_flow='column',\n align_items='center'))\n\n arg_17._is_alive = True\n return arg_17"} +{"_id": "doc_2695", "title": "", "text": "def Func(arg_0, arg_1=60):\n \"\"\"Updates the monitor info\n Called from another thread.\n \"\"\"\n arg_2 = threading.currentThread()\n arg_3 = 0\n arg_4 = False\n arg_5 = False\n arg_6 = [None]*len(arg_0._backends)\n while getattr(arg_2, \"do_run\", True) and not arg_5:\n if arg_3 == arg_1 or arg_4 is False:\n for arg_7, arg_8 in enumerate(arg_0._backends):\n arg_9 = arg_0.children[arg_7].children[2].value\n arg_10 = arg_9.split('')[0]\n try:\n arg_11 = arg_8.status()\n arg_6[arg_7] = arg_11\n except Exception: # pylint: disable=W0703\n arg_0.children[arg_7].children[2].value = arg_9.replace(\n arg_10, \"
    \")\n arg_0.children[arg_7]._is_alive = False\n else:\n arg_0.children[arg_7]._is_alive = True\n arg_0.children[arg_7].children[2].value = arg_9.replace(\n arg_10, \"
    \")\n\n arg_15 = list(range(len(arg_0._backends)))\n arg_16 = [s.pending_jobs for s in arg_6]\n arg_17, arg_18 = zip(*sorted(zip(arg_16, arg_15)))\n\n # Make sure least pending is operational\n for arg_7 in arg_18:\n if arg_6[arg_7].operational:\n arg_19 = arg_7\n break\n\n for arg_20 in arg_15:\n if arg_20 == arg_19:\n arg_0.children[arg_20].children[4].value = \"
    True
    \"\n else:\n arg_0.children[arg_20].children[4].value = \"
    False
    \"\n\n arg_0.children[arg_20].children[3].children[1].value = arg_16[arg_20]\n arg_0.children[arg_20].children[3].children[1].max = arg_21(\n arg_0.children[arg_20].children[3].children[1].max, arg_16[arg_20]+10)\n if arg_6[arg_20].operational:\n arg_0.children[arg_20].children[5].value = \"
    True
    \"\n else:\n arg_0.children[arg_20].children[5].value = \"
    False
    \"\n\n arg_4 = True\n arg_3 = 0\n time.sleep(1)\n arg_5 = not any([wid._is_alive for wid in arg_0.children])\n arg_3 += 1"} +{"_id": "doc_2696", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the number and size of unique registers from bit_labels list.\n\n Args:\n bit_labels (list): this list is of the form::\n\n [['reg1', 0], ['reg1', 1], ['reg2', 0]]\n\n which indicates a register named \"reg1\" of size 2\n and a register named \"reg2\" of size 1. This is the\n format of classic and quantum bit labels in qobj\n header.\n\n Yields:\n tuple: iterator of register_name:size pairs.\n \"\"\"\n arg_1 = itertools.groupby(arg_0, operator.itemgetter(0))\n for arg_2, arg_3 in arg_1:\n yield arg_2, max(arg_4[1] for arg_4 in arg_3) + 1"} +{"_id": "doc_2697", "title": "", "text": "def Func(arg_0):\n \"\"\"Get depth information for the circuit.\n\n Returns:\n int: number of columns in the circuit\n int: total size of columns in the circuit\n \"\"\"\n\n arg_1 = []\n\n for arg_2 in arg_0.ops:\n\n # store the max width for the layer\n arg_3 = 0\n\n for arg_4 in arg_2:\n\n # update current op width\n arg_5 = 0\n\n # the wide gates\n for arg_6 in arg_4.op.params:\n arg_7 = re.sub(r'[-+]?\\d*\\.\\d{2,}|\\d{2,}',\n _truncate_float, str(arg_6))\n arg_5 += len(arg_7)\n\n # the width of the column is the max of all the gates in the column\n arg_3 = max(arg_5, arg_3)\n\n arg_1.append(arg_3)\n\n # wires in the beginning and end\n arg_8 = 2\n # each layer is one column\n arg_8 += len(arg_0.ops)\n\n # every 3 characters is roughly one extra 'unit' of width in the cell\n # the gate name is 1 extra 'unit'\n # the qubit/cbit labels plus initial states is 2 more\n # the wires poking out at the ends is 2 more\n arg_9 = sum(1 + v / 3 for v in arg_1)\n\n # could be a fraction so ceil\n return arg_8, math.ceil(arg_9) + 4"} +{"_id": "doc_2698", "title": "", "text": "def Func(arg_0):\n \"\"\"Get height, width & scale attributes for the beamer page.\n\n Returns:\n tuple: (height, width, scale) desirable page attributes\n \"\"\"\n # PIL python package limits image size to around a quarter gigabyte\n # this means the beamer image should be limited to < 50000\n # if you want to avoid a \"warning\" too, set it to < 25000\n arg_1 = 40000\n\n # the beamer latex template limits each dimension to < 19 feet\n # (i.e. 575cm)\n arg_2 = 550\n\n # columns are roughly twice as big as rows\n arg_3 = arg_0.sum_row_heights / arg_0.sum_column_widths\n\n # choose a page margin so circuit is not cropped\n arg_4 = 1.5\n arg_5 = min(arg_0.sum_row_heights * arg_4, arg_2)\n arg_6 = min(arg_0.sum_column_widths * arg_4, arg_2)\n\n # if too large, make it fit\n if arg_5 * arg_6 > arg_1:\n arg_5 = min(np.sqrt(arg_1 * arg_3), arg_2)\n arg_6 = min(np.sqrt(arg_1 / arg_3), arg_2)\n\n # if too small, give it a minimum size\n arg_5 = max(arg_5, 10)\n arg_6 = max(arg_6, 10)\n\n return (arg_5, arg_6, arg_0.scale)"} +{"_id": "doc_2699", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Loads the QObj schema for use in future validations.\n\n Caches schema in _SCHEMAS module attribute.\n\n Args:\n file_path(str): Path to schema.\n name(str): Given name for schema. Defaults to file_path filename\n without schema.\n Return:\n schema(dict): Loaded schema.\n \"\"\"\n if arg_1 is None:\n # filename without extension\n arg_1 = os.path.splitext(os.path.basename(arg_0))[0]\n if arg_1 not in arg_2:\n with open(arg_0, 'r') as schema_file:\n arg_2[arg_1] = json.load(schema_file)\n\n return arg_2[arg_1]"} +{"_id": "doc_2700", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True,\n arg_3=None, **arg_4):\n \"\"\"Generate validator for JSON schema.\n\n Args:\n name (str): Name for validator. Will be validator key in\n `_VALIDATORS` dict.\n schema (dict): JSON schema `dict`. If not provided searches for schema\n in `_SCHEMAS`.\n check_schema (bool): Verify schema is valid.\n validator_class (jsonschema.IValidator): jsonschema IValidator instance.\n Default behavior is to determine this from the schema `$schema`\n field.\n **validator_kwargs (dict): Additional keyword arguments for validator.\n\n Return:\n jsonschema.IValidator: Validator for JSON schema.\n\n Raises:\n SchemaValidationError: Raised if validation fails.\n \"\"\"\n if arg_1 is None:\n try:\n arg_1 = _SCHEMAS[arg_0]\n except KeyError:\n raise SchemaValidationError(\"Valid schema name or schema must \"\n \"be provided.\")\n\n if arg_0 not in arg_5:\n\n # Resolve JSON spec from schema if needed\n if arg_3 is None:\n arg_3 = jsonschema.validators.validator_for(arg_1)\n\n # Generate and store validator in _VALIDATORS\n arg_5[arg_0] = arg_3(arg_1, **arg_4)\n\n arg_6 = arg_5[arg_0]\n\n if arg_2:\n arg_6.check_schema(arg_1)\n\n return arg_6"} +{"_id": "doc_2701", "title": "", "text": "def Func():\n \"\"\"Load all default schemas into `_SCHEMAS`.\"\"\"\n arg_0 = os.path.join(os.path.dirname(__file__), '../..')\n for arg_1, arg_2 in _DEFAULT_SCHEMA_PATHS.items():\n _load_schema(os.path.join(arg_0, arg_2), arg_1)\n _get_validator(arg_1)"} +{"_id": "doc_2702", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Return a cascading explanation of the validation error.\n\n Returns a cascading explanation of the validation error in the form of::\n\n failed @ because of:\n failed @ because of:\n ...\n failed @ because of:\n ...\n ...\n\n For example::\n\n 'oneOf' failed @ '' because of:\n 'required' failed @ '.config' because of:\n 'meas_level' is a required property\n\n Meaning the validator 'oneOf' failed while validating the whole object\n because of the validator 'required' failing while validating the property\n 'config' because its 'meas_level' field is missing.\n\n The cascade repeats the format \" failed @ because of\"\n until there are no deeper causes. In this case, the string representation\n of the error is shown.\n\n Args:\n err (jsonschema.ValidationError): the instance to explain.\n level (int): starting level of indentation for the cascade of\n explanations.\n\n Return:\n str: a formatted string with the explanation of the error.\n\n \"\"\"\n arg_2 = []\n\n def _print(arg_3, arg_4=0):\n arg_2.append(_pad(arg_3, arg_4=arg_4))\n\n def _pad(arg_3, arg_4=0):\n arg_5 = ' ' * (arg_1 + arg_4)\n arg_6 = [arg_5 + line for line in arg_3.split('\\n')]\n return '\\n'.join(arg_6)\n\n def _format_path(arg_7):\n def _format(arg_8):\n if isinstance(arg_8, str):\n return '.{}'.format(arg_8)\n\n return '[{}]'.format(arg_8)\n\n return ''.join([''] + list(map(_format, arg_7)))\n\n _print('\\'{}\\' failed @ \\'{}\\' because of:'.format(\n arg_0.validator, _format_path(arg_0.absolute_path)))\n\n if not arg_0.context:\n _print(str(arg_0.message), arg_4=1)\n else:\n for arg_9 in arg_0.context:\n arg_2.append(Func(arg_9, arg_1+1))\n\n return '\\n'.join(arg_2)"} +{"_id": "doc_2703", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Majority gate.\"\"\"\n arg_0.cx(arg_3, arg_2)\n arg_0.cx(arg_3, arg_1)\n arg_0.ccx(arg_1, arg_2, arg_3)"} +{"_id": "doc_2704", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Unmajority gate.\"\"\"\n arg_0.ccx(arg_1, arg_2, arg_3)\n arg_0.cx(arg_3, arg_1)\n arg_0.cx(arg_1, arg_2)"} +{"_id": "doc_2705", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=0.7, arg_3=None, arg_4=False,\n arg_5=True, arg_6=None):\n \"\"\"Convert QuantumCircuit to LaTeX string.\n\n Args:\n circuit (QuantumCircuit): input circuit\n scale (float): image scaling\n filename (str): optional filename to write latex\n style (dict or str): dictionary of style or file name of style file\n reverse_bits (bool): When set to True reverse the bit order inside\n registers for the output visualization.\n plot_barriers (bool): Enable/disable drawing barriers in the output\n circuit. Defaults to True.\n justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how\n the circuit should be justified.\n\n Returns:\n str: Latex string appropriate for writing to file.\n \"\"\"\n arg_7, arg_8, arg_9 = utils._get_layered_instructions(arg_0,\n arg_4=arg_4,\n arg_6=arg_6)\n\n arg_10 = _latex.QCircuitImage(arg_7, arg_8, arg_9, arg_2, arg_3=arg_3,\n arg_5=arg_5,\n arg_4=arg_4)\n arg_11 = arg_10.latex()\n if arg_1:\n with open(arg_1, 'w') as latex_file:\n latex_file.write(arg_11)\n\n return arg_11"} +{"_id": "doc_2706", "title": "", "text": "def Func(arg_0,\n arg_1=0.7,\n arg_2=None,\n arg_3=None,\n arg_4=True,\n arg_5=False,\n arg_6=None):\n \"\"\"Draw a quantum circuit based on matplotlib.\n If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.\n We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.\n\n Args:\n circuit (QuantumCircuit): a quantum circuit\n scale (float): scaling factor\n filename (str): file path to save image to\n style (dict or str): dictionary of style or file name of style file\n reverse_bits (bool): When set to True reverse the bit order inside\n registers for the output visualization.\n plot_barriers (bool): Enable/disable drawing barriers in the output\n circuit. Defaults to True.\n justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how\n the circuit should be justified.\n\n\n Returns:\n matplotlib.figure: a matplotlib figure object for the circuit diagram\n \"\"\"\n\n arg_7, arg_8, arg_9 = utils._get_layered_instructions(arg_0,\n arg_5=arg_5,\n arg_6=arg_6)\n arg_10 = _matplotlib.MatplotlibDrawer(arg_7, arg_8, arg_9, arg_1=arg_1, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5)\n return arg_10.draw(arg_2)"} +{"_id": "doc_2707", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return a random dim x dim unitary Operator from the Haar measure.\n\n Args:\n dim (int): the dim of the state space.\n seed (int): Optional. To set a random seed.\n\n Returns:\n Operator: (dim, dim) unitary operator.\n\n Raises:\n QiskitError: if dim is not a positive power of 2.\n \"\"\"\n if arg_0 == 0 or not math.log2(arg_0).is_integer():\n raise QiskitError(\"Desired unitary dimension not a positive power of 2.\")\n arg_2 = np.zeros([arg_0, arg_0], dtype=complex)\n for arg_3 in range(arg_0):\n if arg_3 == 0:\n arg_4 = random_state(arg_0, arg_1)\n else:\n arg_4 = random_state(arg_0)\n arg_2[:, arg_3] = np.copy(arg_4)\n # Grahm-Schmidt Orthogonalize\n arg_5 = arg_3-1\n while arg_5 >= 0:\n arg_6 = np.vdot(arg_2[:, arg_5], arg_4)\n arg_2[:, arg_3] = arg_2[:, arg_3]-arg_6*arg_2[:, arg_5]\n arg_5 = arg_5 - 1\n # normalize\n arg_2[:, arg_3] = arg_2[:, arg_3] * (1.0 / np.sqrt(np.vdot(arg_2[:, arg_3], arg_2[:, arg_3])))\n return Operator(arg_2)"} +{"_id": "doc_2708", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Generate a random density matrix from the Hilbert-Schmidt metric.\n\n Args:\n N (int): the length of the density matrix.\n rank (int or None): the rank of the density matrix. The default\n value is full-rank.\n seed (int): Optional. To set a random seed.\n Returns:\n ndarray: rho (N,N a density matrix.\n \"\"\"\n arg_3 = __ginibre_matrix(arg_0, arg_1, arg_2)\n arg_3 = arg_3.dot(arg_3.conj().T)\n return arg_3 / np.trace(arg_3)"} +{"_id": "doc_2709", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Generate a random density matrix from the Bures metric.\n\n Args:\n N (int): the length of the density matrix.\n rank (int or None): the rank of the density matrix. The default\n value is full-rank.\n seed (int): Optional. To set a random seed.\n Returns:\n ndarray: rho (N,N) a density matrix.\n \"\"\"\n arg_3 = np.eye(arg_0) + random_unitary(arg_0).data\n arg_4 = arg_3.dot(__ginibre_matrix(arg_0, arg_1, arg_2))\n arg_4 = arg_4.dot(arg_4.conj().T)\n return arg_4 / np.trace(arg_4)"} +{"_id": "doc_2710", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a list of custom gate names in this gate body.\"\"\"\n arg_1 = []\n for arg_2 in arg_0.children:\n if arg_2.type == \"custom_unitary\":\n arg_1.append(arg_2.name)\n return arg_1"} +{"_id": "doc_2711", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the compose of a QuantumChannel with itself n times.\n\n Args:\n n (int): compute the matrix Func of the superoperator matrix.\n\n Returns:\n SuperOp: the n-times composition channel as a SuperOp object.\n\n Raises:\n QiskitError: if the input and output dimensions of the\n QuantumChannel are not equal, or the Func is not an integer.\n \"\"\"\n if not isinstance(arg_1, (int, np.integer)):\n raise QiskitError(\"Can only Func with integer Funcs.\")\n if arg_0._input_dim != arg_0._output_dim:\n raise QiskitError(\"Can only Func with input_dim = output_dim.\")\n # Override base class Func so we can implement more efficiently\n # using Numpy.matrix_Func\n return SuperOp(\n np.linalg.matrix_Func(arg_0._data, arg_1), arg_0.input_dims(),\n arg_0.output_dims())"} +{"_id": "doc_2712", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=None, arg_3=None,\n arg_4=None, arg_5=None, arg_6=None,\n arg_7=None,\n arg_8=None, arg_9=None, arg_10=None):\n \"\"\"Convert a list of circuits into a qobj.\n\n Args:\n circuits (list[QuantumCircuits] or QuantumCircuit): circuits to compile\n qobj_header (QobjHeader): header to pass to the results\n qobj_id (int): TODO: delete after qiskit-terra 0.8\n backend_name (str): TODO: delete after qiskit-terra 0.8\n config (dict): TODO: delete after qiskit-terra 0.8\n shots (int): TODO: delete after qiskit-terra 0.8\n max_credits (int): TODO: delete after qiskit-terra 0.8\n basis_gates (str): TODO: delete after qiskit-terra 0.8\n coupling_map (list): TODO: delete after qiskit-terra 0.8\n seed (int): TODO: delete after qiskit-terra 0.8\n memory (bool): TODO: delete after qiskit-terra 0.8\n\n Returns:\n Qobj: the Qobj to be run on the backends\n \"\"\"\n warnings.warn('Func is deprecated and will be removed in Qiskit Terra 0.9. '\n 'Use qiskit.compiler.assemble() to serialize circuits into a qobj.',\n DeprecationWarning)\n\n arg_1 = arg_1 or QobjHeader()\n\n if arg_3:\n arg_1.backend_name = arg_3\n if arg_7:\n warnings.warn('basis_gates was unused and will be removed.', DeprecationWarning)\n if arg_8:\n warnings.warn('coupling_map was unused and will be removed.', DeprecationWarning)\n\n arg_11 = assemble(experiments=arg_0,\n arg_2=arg_2,\n arg_1=arg_1,\n arg_5=arg_5,\n arg_10=arg_10,\n arg_6=arg_6,\n seed_simulator=arg_9,\n arg_4=arg_4)\n\n return arg_11"} +{"_id": "doc_2713", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Expand 3+ qubit gates using their decomposition rules.\n\n Args:\n dag(DAGCircuit): input dag\n Returns:\n DAGCircuit: output dag with maximum node degrees of 2\n Raises:\n QiskitError: if a 3q+ gate is not decomposable\n \"\"\"\n for arg_2 in arg_1.threeQ_or_more_gates():\n # TODO: allow choosing other possible decompositions\n arg_3 = arg_2.op.definition\n if not arg_3:\n raise QiskitError(\"Cannot unroll all 3q or more gates. \"\n \"No rule to expand instruction %s.\" %\n arg_2.op.name)\n\n # hacky way to build a dag on the same register as the rule is defined\n # TODO: need anonymous rules to address wires by index\n arg_4 = DAGCircuit()\n arg_4.add_qreg(arg_3[0][1][0][0])\n for arg_5 in arg_3:\n arg_4.apply_operation_back(*arg_5)\n arg_4 = arg_0.Func(arg_4) # recursively unroll\n arg_1.substitute_node_with_dag(arg_2, arg_4)\n return arg_1"} +{"_id": "doc_2714", "title": "", "text": "def Func(arg_0):\n \"\"\"Calculate a subcircuit that implements this unitary.\"\"\"\n if arg_0.num_qubits == 1:\n arg_1 = QuantumRegister(1, \"q\")\n arg_2 = euler_angles_1q(arg_0.to_matrix())\n arg_0.definition = [(U3Gate(*arg_2), [arg_1[0]], [])]\n if arg_0.num_qubits == 2:\n arg_0.definition = two_qubit_kak(arg_0.to_matrix())"} +{"_id": "doc_2715", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Validate if the value is of the type of the schema's model.\n\n Assumes the nested schema is a ``BaseSchema``.\n \"\"\"\n if arg_0.many and not is_collection(arg_1):\n raise arg_0._not_expected_type(\n arg_1, Iterable, fields=[arg_0], field_names=arg_2, arg_3=arg_3)\n\n arg_4 = super().Func\n\n arg_5 = []\n arg_6 = arg_1 if arg_0.many else [arg_1]\n for arg_7, arg_8 in enumerate(arg_6):\n try:\n arg_4(arg_8, arg_7, arg_6)\n except ValidationError as err:\n arg_5.append(err.messages)\n\n if arg_5:\n arg_5 = arg_5 if arg_0.many else arg_5[0]\n raise ValidationError(arg_5)\n\n return arg_1"} +{"_id": "doc_2716", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Validate if it's a list of valid item-field values.\n\n Check if each element in the list can be validated by the item-field\n passed during construction.\n \"\"\"\n super().Func(arg_1, arg_2, arg_3)\n\n arg_4 = []\n for arg_5, arg_6 in enumerate(arg_1):\n try:\n arg_0.container.Func(arg_6, arg_5, arg_1)\n except ValidationError as err:\n arg_4.append(err.messages)\n\n if arg_4:\n raise ValidationError(arg_4)\n\n return arg_1"} +{"_id": "doc_2717", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the absolute tolerence parameter for float comparisons.\"\"\"\n # NOTE: that this overrides the class value so applies to all\n # instances of the class.\n arg_2 = arg_0.__class__.MAX_TOL\n if arg_1 < 0:\n raise QiskitError(\"Invalid atol: must be non-negative.\")\n if arg_1 > arg_2:\n raise QiskitError(\n \"Invalid atol: must be less than {}.\".format(arg_2))\n arg_0.__class__.ATOL = arg_1"} +{"_id": "doc_2718", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the relative tolerence parameter for float comparisons.\"\"\"\n # NOTE: that this overrides the class value so applies to all\n # instances of the class.\n arg_2 = arg_0.__class__.MAX_TOL\n if arg_1 < 0:\n raise QiskitError(\"Invalid rtol: must be non-negative.\")\n if arg_1 > arg_2:\n raise QiskitError(\n \"Invalid rtol: must be less than {}.\".format(arg_2))\n arg_0.__class__.RTOL = arg_1"} +{"_id": "doc_2719", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return tuple of input dimension for specified subsystems.\"\"\"\n if arg_1 is None:\n return arg_0._Func\n return tuple(arg_0._Func[arg_2] for arg_2 in arg_1)"} +{"_id": "doc_2720", "title": "", "text": "def Func(arg_0):\n \"\"\"Make a Func of current operator.\"\"\"\n # pylint: disable=no-value-for-parameter\n # The constructor of subclasses from raw data should be a Func\n return arg_0.__class__(arg_0.data, arg_0.input_dims(), arg_0.output_dims())"} +{"_id": "doc_2721", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the compose of a operator with itself n times.\n\n Args:\n n (int): the number of times to compose with self (n>0).\n\n Returns:\n BaseOperator: the n-times composed operator.\n\n Raises:\n QiskitError: if the input and output dimensions of the operator\n are not equal, or the Func is not a positive integer.\n \"\"\"\n # NOTE: if a subclass can have negative or non-integer Funcs\n # this method should be overriden in that class.\n if not isinstance(arg_1, (int, np.integer)) or arg_1 < 1:\n raise QiskitError(\"Can only Func with positive integer Funcs.\")\n if arg_0._input_dim != arg_0._output_dim:\n raise QiskitError(\"Can only Func with input_dim = output_dim.\")\n arg_2 = arg_0.copy()\n for arg_3 in range(1, arg_1):\n arg_2 = arg_2.compose(arg_0)\n return arg_2"} +{"_id": "doc_2722", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=0, arg_5=False):\n \"\"\"Perform a contraction using Numpy.einsum\n\n Args:\n tensor (np.array): a vector or matrix reshaped to a rank-N tensor.\n mat (np.array): a matrix reshaped to a rank-2M tensor.\n indices (list): tensor indices to contract with mat.\n shift (int): shift for indicies of tensor to contract [Default: 0].\n right_mul (bool): if True right multiply tensor by mat\n (else left multiply) [Default: False].\n\n Returns:\n Numpy.ndarray: the matrix multiplied rank-N tensor.\n\n Raises:\n QiskitError: if mat is not an even rank tensor.\n \"\"\"\n arg_6 = arg_1.ndim\n arg_7 = arg_2.ndim\n if arg_7 % 2 != 0:\n raise QiskitError(\n \"Contracted matrix must have an even number of indices.\")\n # Get einsum indices for tensor\n arg_8 = list(range(arg_6))\n for arg_9, arg_10 in enumerate(arg_3):\n arg_8[arg_10 + arg_4] = arg_6 + arg_9\n # Get einsum indces for mat\n arg_11 = list(reversed(range(arg_6, arg_6 + len(arg_3))))\n arg_12 = [arg_10 + arg_4 for arg_10 in reversed(arg_3)]\n if arg_5:\n arg_13 = arg_11 + arg_12\n else:\n arg_13 = arg_12 + arg_11\n return np.einsum(arg_1, arg_8, arg_2, arg_13)"} +{"_id": "doc_2723", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Override ``Func`` for customizing the exception raised.\"\"\"\n try:\n return super().Func(arg_1, arg_2, arg_3)\n except ValidationError as arg_4:\n if 'deserialization_schema_selector' in arg_4.messages[0]:\n arg_4.messages[0] = 'Cannot find a valid schema among the choices'\n raise"} +{"_id": "doc_2724", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check if at least one of the possible choices validates the value.\n\n Possible choices are assumed to be ``ModelTypeValidator`` fields.\n \"\"\"\n for arg_4 in arg_0.choices:\n if isinstance(arg_4, ModelTypeValidator):\n try:\n return arg_4.Func(arg_1, arg_2, arg_3)\n except ValidationError:\n pass\n\n raise arg_0._not_expected_type(\n arg_1, [arg_4.__class__ for arg_4 in arg_0.choices],\n fields=[arg_0], field_names=arg_2, arg_3=arg_3)"} +{"_id": "doc_2725", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the state fidelity between two quantum states.\n\n Either input may be a state vector, or a density matrix. The state\n fidelity (F) for two density matrices is defined as::\n\n F(rho1, rho2) = Tr[sqrt(sqrt(rho1).rho2.sqrt(rho1))] ^ 2\n\n For a pure state and mixed state the fidelity is given by::\n\n F(|psi1>, rho2) = \n\n For two pure states the fidelity is given by::\n\n F(|psi1>, |psi2>) = ||^2\n\n Args:\n state1 (array_like): a quantum state vector or density matrix.\n state2 (array_like): a quantum state vector or density matrix.\n\n Returns:\n array_like: The state fidelity F(state1, state2).\n \"\"\"\n # convert input to numpy arrays\n arg_2 = np.array(arg_0)\n arg_3 = np.array(arg_1)\n\n # fidelity of two state vectors\n if arg_2.ndim == 1 and arg_3.ndim == 1:\n return np.abs(arg_3.conj().dot(arg_2)) ** 2\n # fidelity of vector and density matrix\n elif arg_2.ndim == 1:\n # psi = s1, rho = s2\n return np.abs(arg_2.conj().dot(arg_3).dot(arg_2))\n elif arg_3.ndim == 1:\n # psi = s2, rho = s1\n return np.abs(arg_3.conj().dot(arg_2).dot(arg_3))\n # fidelity of two density matrices\n arg_4 = _funm_svd(arg_2, np.sqrt)\n arg_5 = _funm_svd(arg_3, np.sqrt)\n return np.linalg.norm(arg_4.dot(arg_5), ord='nuc') ** 2"} +{"_id": "doc_2726", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Apply real scalar function to singular values of a matrix.\n\n Args:\n a (array_like): (N, N) Matrix at which to evaluate the function.\n func (callable): Callable object that evaluates a scalar function f.\n\n Returns:\n ndarray: funm (N, N) Value of the matrix function specified by func\n evaluated at `A`.\n \"\"\"\n arg_2, arg_3, arg_4 = la.svd(arg_0, lapack_driver='gesvd')\n arg_5 = np.diag(arg_1(arg_3))\n return arg_2.dot(arg_5).dot(arg_4)"} +{"_id": "doc_2727", "title": "", "text": "def Func(arg_0):\n \"\"\"Special case. Return self.\"\"\"\n return Snapshot(arg_0.num_qubits, arg_0.num_clbits, arg_0.params[0],\n arg_0.params[1])"} +{"_id": "doc_2728", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set snapshot Func to name\n\n Args:\n name (str or None): Func to assign unitary\n\n Raises:\n TypeError: name is not string or None.\n \"\"\"\n if isinstance(arg_1, str):\n arg_0._Func = arg_1\n else:\n raise TypeError('Func expects a string')"} +{"_id": "doc_2729", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert to a Kraus or UnitaryGate circuit instruction.\n\n If the channel is unitary it will be added as a unitary gate,\n otherwise it will be added as a kraus simulator instruction.\n\n Returns:\n Instruction: A kraus instruction for the channel.\n\n Raises:\n QiskitError: if input data is not an N-qubit CPTP quantum channel.\n \"\"\"\n from qiskit.circuit.instruction import Instruction\n # Check if input is an N-qubit CPTP channel.\n arg_1 = int(np.log2(arg_0._input_dim))\n if arg_0._input_dim != arg_0._output_dim or 2**arg_1 != arg_0._input_dim:\n raise QiskitError(\n 'Cannot convert QuantumChannel to Instruction: channel is not an N-qubit channel.'\n )\n if not arg_0.is_cptp():\n raise QiskitError(\n 'Cannot convert QuantumChannel to Instruction: channel is not CPTP.'\n )\n # Next we convert to the Kraus representation. Since channel is CPTP we know\n # that there is only a single set of Kraus operators\n arg_2, arg_3 = _to_kraus(arg_0.rep, arg_0._data, *arg_0.dim)\n # If we only have a single Kraus operator then the channel is\n # a unitary channel so can be converted to a UnitaryGate. We do this by\n # converting to an Operator and using its Func method\n if len(arg_2) == 1:\n return Operator(arg_2[0]).Func()\n return Instruction('kraus', arg_1, 0, arg_2)"} +{"_id": "doc_2730", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert input into a QuantumChannel subclass object or Operator object\"\"\"\n # This handles common conversion for all QuantumChannel subclasses.\n # If the input is already a QuantumChannel subclass it will return\n # the original object\n if isinstance(arg_1, QuantumChannel):\n return arg_1\n if hasattr(arg_1, 'to_quantumchannel'):\n # If the data object is not a QuantumChannel it will give\n # preference to a 'to_quantumchannel' attribute that allows\n # an arbitrary object to define its own conversion to any\n # quantum channel subclass.\n return arg_1.to_channel()\n if hasattr(arg_1, 'to_channel'):\n # TODO: this 'to_channel' method is the same case as the above\n # but is used by current version of Aer. It should be removed\n # once Aer is nupdated to use `to_quantumchannel`\n # instead of `to_channel`,\n return arg_1.to_channel()\n # Finally if the input is not a QuantumChannel and doesn't have a\n # 'to_quantumchannel' conversion method we try and initialize it as a\n # regular matrix Operator which can be converted into a QuantumChannel.\n return Operator(arg_1)"} +{"_id": "doc_2731", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4=3, arg_5=(0, 1)):\n \"\"\"Alternative constructor for a TensorFlowModel that\n accepts a `tf.keras.Model` instance.\n\n Parameters\n ----------\n model : `tensorflow.keras.Model`\n A `tensorflow.keras.Model` that accepts a single input tensor\n and returns a single output tensor representing logits.\n bounds : tuple\n Tuple of lower and upper bound for the pixel values, usually\n (0, 1) or (0, 255).\n input_shape : tuple\n The shape of a single input, e.g. (28, 28, 1) for MNIST.\n If None, tries to get the the shape from the model's\n input_shape attribute.\n channel_axis : int\n The index of the axis that represents color channels.\n preprocessing: 2-element tuple with floats or numpy arrays\n Elementwises preprocessing of input; we first subtract the first\n element of preprocessing from the input and then divide the input\n by the second element.\n\n \"\"\"\n import tensorflow as tf\n if arg_3 is None:\n try:\n arg_3 = arg_1.input_shape[1:]\n except AttributeError:\n raise ValueError(\n 'Please specify input_shape manually or '\n 'provide a model with an input_shape attribute')\n with tf.keras.backend.get_session().as_default():\n arg_6 = tf.placeholder(tf.float32, (None,) + arg_3)\n arg_7 = arg_1(arg_6)\n return arg_0(arg_6, arg_7, arg_2=arg_2,\n arg_4=arg_4, arg_5=arg_5)"} +{"_id": "doc_2732", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Interface to model.Func for attacks.\n\n Parameters\n ----------\n batch : bool\n Controls whether the index of the axis for a batch of images\n (4 dimensions) or a single image (3 dimensions) should be returned.\n\n \"\"\"\n arg_2 = arg_0.__model.Func()\n if not arg_1:\n arg_2 = arg_2 - 1\n return arg_2"} +{"_id": "doc_2733", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns true if _backward and _forward_backward can be called\n by an attack, False otherwise.\n\n \"\"\"\n try:\n arg_0.__model.gradient\n arg_0.__model.predictions_and_gradient\n except AttributeError:\n return False\n else:\n return True"} +{"_id": "doc_2734", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=False):\n \"\"\"Interface to model.predictions for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n arg_4 = arg_0.in_bounds(arg_1)\n assert not arg_2 or arg_4\n\n arg_0._total_prediction_calls += 1\n Func = arg_0.__model.predictions(arg_1)\n arg_6, arg_7, arg_8 = arg_0.__is_adversarial(\n arg_1, Func, arg_4)\n\n assert Func.ndim == 1\n if arg_3:\n return Func, arg_6, arg_7, arg_8\n else:\n return Func, arg_6"} +{"_id": "doc_2735", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=False, arg_3=True, arg_4=False):\n \"\"\"Interface to model.Func for attacks.\n\n Parameters\n ----------\n images : `numpy.ndarray`\n Batch of inputs with shape as expected by the model.\n greedy : bool\n Whether the first adversarial should be returned.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n if arg_3:\n arg_5 = arg_0.in_bounds(arg_1)\n assert arg_5\n\n arg_0._total_prediction_calls += len(arg_1)\n arg_6 = arg_0.__model.Func(arg_1)\n\n assert arg_6.ndim == 2\n assert arg_6.shape[0] == arg_1.shape[0]\n\n if arg_4:\n assert arg_2\n\n arg_7 = []\n for arg_8 in range(len(arg_6)):\n if arg_3:\n arg_9 = True\n else:\n arg_9 = arg_0.in_bounds(arg_1[arg_8])\n arg_10, arg_11, arg_12 = arg_0.__is_adversarial(\n arg_1[arg_8], arg_6[arg_8], arg_9)\n if arg_10 and arg_2:\n if arg_4:\n return arg_6, arg_10, arg_8, arg_11, arg_12\n else:\n return arg_6, arg_10, arg_8\n arg_7.append(arg_10)\n\n if arg_2: # pragma: no cover\n # no adversarial found\n if arg_4:\n return arg_6, False, None, False, None\n else:\n return arg_6, False, None\n\n arg_10 = np.array(arg_7)\n assert arg_10.ndim == 1\n assert arg_10.shape[0] == arg_1.shape[0]\n\n return arg_6, arg_10"} +{"_id": "doc_2736", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True):\n \"\"\"Interface to model.gradient for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n Defaults to the original image.\n label : int\n Label used to calculate the loss that is differentiated.\n Defaults to the original label.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n assert arg_0.has_gradient()\n\n if arg_1 is None:\n arg_1 = arg_0.__original_image\n if arg_2 is None:\n arg_2 = arg_0.__original_class\n\n assert not arg_3 or arg_0.in_bounds(arg_1)\n\n arg_0._total_gradient_calls += 1\n Func = arg_0.__model.gradient(arg_1, arg_2)\n\n assert Func.shape == arg_1.shape\n return Func"} +{"_id": "doc_2737", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None, arg_3=True, arg_4=False):\n \"\"\"Interface to model.Func for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n Defaults to the original image.\n label : int\n Label used to calculate the loss that is differentiated.\n Defaults to the original label.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n assert arg_0.has_gradient()\n\n if arg_1 is None:\n arg_1 = arg_0.__original_image\n if arg_2 is None:\n arg_2 = arg_0.__original_class\n\n arg_5 = arg_0.in_bounds(arg_1)\n assert not arg_3 or arg_5\n\n arg_0._total_prediction_calls += 1\n arg_0._total_gradient_calls += 1\n arg_6, arg_7 = arg_0.__model.Func(arg_1, arg_2) # noqa: E501\n arg_8, arg_9, arg_10 = arg_0.__is_adversarial(\n arg_1, arg_6, arg_5)\n\n assert arg_6.ndim == 1\n assert arg_7.shape == arg_1.shape\n if arg_4:\n return arg_6, arg_7, arg_8, arg_9, arg_10\n else:\n return arg_6, arg_7, arg_8"} +{"_id": "doc_2738", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Interface to model.Func for attacks.\n\n Parameters\n ----------\n gradient : `numpy.ndarray`\n Gradient of some loss w.r.t. the logits.\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n\n Returns\n -------\n gradient : `numpy.ndarray`\n The gradient w.r.t the image.\n\n See Also\n --------\n :meth:`gradient`\n\n \"\"\"\n assert arg_0.has_gradient()\n assert arg_1.ndim == 1\n\n if arg_2 is None:\n arg_2 = arg_0.__original_image\n\n assert not arg_3 or arg_0.in_bounds(arg_2)\n\n arg_0._total_gradient_calls += 1\n arg_1 = arg_0.__model.Func(arg_1, arg_2)\n\n assert arg_1.shape == arg_2.shape\n return arg_1"} +{"_id": "doc_2739", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the index of the largest logit, ignoring the class that\n is passed as `exclude`.\"\"\"\n arg_2 = arg_0 - onehot_like(arg_0, arg_1, value=np.inf)\n return np.argmax(arg_2)"} +{"_id": "doc_2740", "title": "", "text": "def Func(arg_0):\n \"\"\"Concatenates the Funcs of the given criteria in alphabetical order.\n\n If a sub-criterion is itself a combined criterion, its Func is\n first split into the individual Funcs and the Funcs of the\n sub-sub criteria is used instead of the Func of the sub-criterion.\n This is done recursively to ensure that the order and the hierarchy\n of the criteria does not influence the Func.\n\n Returns\n -------\n str\n The alphabetically sorted Funcs of the sub-criteria concatenated\n using double underscores between them.\n\n \"\"\"\n arg_1 = (criterion.Func() for criterion in arg_0._criteria)\n return '__'.join(sorted(arg_1))"} +{"_id": "doc_2741", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculates the cross-entropy.\n\n Parameters\n ----------\n logits : array_like\n The logits predicted by the model.\n label : int\n The label describing the target distribution.\n\n Returns\n -------\n float\n The cross-entropy between softmax(logits) and onehot(label).\n\n \"\"\"\n\n assert arg_1.ndim == 1\n\n # for numerical reasons we subtract the max logit\n # (mathematically it doesn't matter!)\n # otherwise exp(logits) might become too large or too small\n arg_1 = arg_1 - np.max(arg_1)\n arg_2 = np.exp(arg_1)\n arg_3 = np.sum(arg_2)\n arg_4 = np.log(arg_3) - arg_1[arg_0]\n return arg_4"} +{"_id": "doc_2742", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convenience method that calculates Func for a single image.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n\n Returns\n -------\n `numpy.ndarray`\n Vector of Func (logits, i.e. before the softmax) with\n shape (number of classes,).\n\n See Also\n --------\n :meth:`batch_Func`\n\n \"\"\"\n return np.squeeze(arg_0.batch_Func(arg_1[np.newaxis]), axis=0)"} +{"_id": "doc_2743", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clone a remote git repository to a local path.\n\n :param git_uri: the URI to the git repository to be Funcd\n :return: the generated local path where the repository has been Funcd to\n \"\"\"\n arg_1 = sha256_hash(arg_0)\n arg_2 = home_directory_path(FOLDER, arg_1)\n arg_3 = path_exists(arg_2)\n\n if not arg_3:\n _Func_repo(arg_0, arg_2)\n else:\n logging.info( # pragma: no cover\n \"Git repository already exists locally.\") # pragma: no cover\n\n return arg_2"} +{"_id": "doc_2744", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=arg_3):\n \"\"\"Create Graphene Enum for sorting a SQLAlchemy class query\n\n Parameters\n - cls : Sqlalchemy model class\n Model used to create the sort enumerator\n - name : str, optional, default None\n Name to use for the enumerator. If not provided it will be set to `cls.__name__ + 'SortEnum'`\n - symbol_name : function, optional, default `_symbol_name`\n Function which takes the column name and a boolean indicating if the sort direction is ascending,\n and returns the symbol name for the current column and sort direction.\n The default function will create, for a column named 'foo', the symbols 'foo_asc' and 'foo_desc'\n\n Returns\n - Enum\n The Graphene enumerator\n \"\"\"\n arg_4, arg_5 = _Func(arg_0, arg_1, arg_2)\n return arg_4"} +{"_id": "doc_2745", "title": "", "text": "def Func():\n \"\"\"Monkey patching _strptime to avoid problems related with non-english\n locale changes on the system.\n\n For example, if system's locale is set to fr_FR. Parser won't recognize\n any date since all languages are translated to english dates.\n \"\"\"\n\n arg_0 = imp.load_module(\n 'strptime_patched', *imp.find_module('_strptime')\n )\n\n arg_1 = imp.load_module(\n 'calendar_patched', *imp.find_module('_strptime')\n )\n\n arg_0._getlang = lambda: ('en_US', 'UTF-8')\n arg_0.calendar = arg_1\n arg_0.calendar.day_abbr = [\n 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'\n ]\n arg_0.calendar.day_name = [\n 'monday', 'tuesday', 'wednesday', 'thursday',\n 'friday', 'saturday', 'sunday'\n ]\n arg_0.calendar.month_abbr = [\n '', 'jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'\n ]\n arg_0.calendar.month_name = [\n '', 'january', 'february', 'march', 'april',\n 'may', 'june', 'july', 'august', 'september',\n 'october', 'november', 'december'\n ]\n\n return arg_0._strptime_time"} +{"_id": "doc_2746", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=False, arg_5=False):\n \"\"\"\n Get an ordered mapping with locale codes as keys\n and corresponding locale instances as values.\n\n :param languages:\n A list of language codes, e.g. ['en', 'es', 'zh-Hant'].\n If locales are not given, languages and region are\n used to construct locales to load.\n :type languages: list\n\n :param locales:\n A list of codes of locales which are to be loaded,\n e.g. ['fr-PF', 'qu-EC', 'af-NA']\n :type locales: list\n\n :param region:\n A region code, e.g. 'IN', '001', 'NE'.\n If locales are not given, languages and region are\n used to construct locales to load.\n :type region: str|unicode\n\n :param use_given_order:\n If True, the returned mapping is ordered in the order locales are given.\n :type allow_redetect_language: bool\n\n :param allow_conflicting_locales:\n if True, locales with same language and different region can be loaded.\n :type allow_conflicting_locales: bool\n\n :return: ordered locale code to locale instance mapping\n \"\"\"\n return OrderedDict(arg_0._load_data(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5))"} +{"_id": "doc_2747", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=False, arg_5=False):\n \"\"\"\n Yield locale instances.\n\n :param languages:\n A list of language codes, e.g. ['en', 'es', 'zh-Hant'].\n If locales are not given, languages and region are\n used to construct locales to load.\n :type languages: list\n\n :param locales:\n A list of codes of locales which are to be loaded,\n e.g. ['fr-PF', 'qu-EC', 'af-NA']\n :type locales: list\n\n :param region:\n A region code, e.g. 'IN', '001', 'NE'.\n If locales are not given, languages and region are\n used to construct locales to load.\n :type region: str|unicode\n\n :param use_given_order:\n If True, the returned mapping is ordered in the order locales are given.\n :type allow_redetect_language: bool\n\n :param allow_conflicting_locales:\n if True, locales with same language and different region can be loaded.\n :type allow_conflicting_locales: bool\n\n :yield: locale instances\n \"\"\"\n for arg_6, arg_7 in arg_0._load_data(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5):\n yield arg_7"} +{"_id": "doc_2748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if tokens are valid tokens for the locale.\n\n :param tokens:\n a list of string or unicode tokens.\n :type tokens: list\n\n :return: True if tokens are valid, False otherwise.\n \"\"\"\n arg_2 = arg_0._get_match_relative_regex_cache()\n for arg_3 in arg_1:\n if any([arg_2.match(arg_3),\n arg_3 in arg_0, arg_3.isdigit()]):\n continue\n else:\n return False\n else:\n return True"} +{"_id": "doc_2749", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Attemps to parse time part of date strings like '1 day ago, 2 PM' \"\"\"\n arg_1 = PATTERN.sub('', arg_1)\n arg_1 = re.sub(r'\\b(?:ago|in)\\b', '', arg_1)\n try:\n return time_parser(arg_1)\n except:\n pass"} +{"_id": "doc_2750", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"\n Check if the locale is applicable to translate date string.\n\n :param date_string:\n A string representing date and/or time in a recognizably valid format.\n :type date_string: str|unicode\n\n :param strip_timezone:\n If True, timezone is stripped from date string.\n :type strip_timezone: bool\n\n :return: boolean value representing if the locale is applicable for the date string or not.\n \"\"\"\n if arg_2:\n arg_1, arg_4 = pop_tz_offset_from_string(arg_1, as_offset=False)\n\n arg_1 = arg_0._translate_numerals(arg_1)\n if arg_3.NORMALIZE:\n arg_1 = normalize_unicode(arg_1)\n arg_1 = arg_0._simplify(arg_1, arg_3=arg_3)\n arg_5 = arg_0._get_dictionary(arg_3)\n arg_6 = arg_5.split(arg_1)\n\n return arg_5.are_tokens_valid(arg_6)"} +{"_id": "doc_2751", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Parse with formats and return a dictionary with 'period' and 'obj_date'.\n\n :returns: :class:`datetime.datetime`, dict or None\n\n \"\"\"\n arg_3 = 'day'\n for arg_4 in arg_1:\n try:\n date_obj = datetime.strptime(arg_0, arg_4)\n except ValueError:\n continue\n else:\n # If format does not include the day, use last day of the month\n # instead of first, because the first is usually out of range.\n if '%d' not in arg_4:\n arg_3 = 'month'\n date_obj = date_obj.replace(\n day=get_last_day_of_month(date_obj.year, date_obj.month))\n\n if not ('%y' in arg_4 or '%Y' in arg_4):\n today = datetime.today()\n date_obj = date_obj.replace(year=today.year)\n\n date_obj = apply_timezone_from_settings(date_obj, arg_2)\n\n return {'date_obj': date_obj, 'period': arg_3}\n else:\n return {'date_obj': None, 'period': arg_3}"} +{"_id": "doc_2752", "title": "", "text": "def Func(arg_0):\n \"\"\"\n return ammo generator\n \"\"\"\n arg_1 = {\n 'phantom': missile.AmmoFileReader,\n 'slowlog': missile.SlowLogReader,\n 'line': missile.LineReader,\n 'uri': missile.UriReader,\n 'uripost': missile.UriPostReader,\n 'access': missile.AccessLogReader,\n 'caseline': missile.CaseLineReader,\n }\n if arg_0.uris and arg_0.ammo_file:\n raise StepperConfigurationError(\n 'Both uris and ammo file specified. You must specify only one of them'\n )\n elif arg_0.uris:\n arg_2 = missile.UriStyleGenerator(\n arg_0.uris, arg_0.headers, http_ver=arg_0.http_ver)\n elif arg_0.ammo_file:\n if arg_0.ammo_type in arg_1:\n if arg_0.ammo_type == 'phantom':\n arg_3 = resource.get_opener(arg_0.ammo_file)\n with arg_3(arg_0.use_cache) as ammo:\n try:\n if not ammo.next()[0].isdigit():\n arg_0.ammo_type = 'uri'\n arg_0.log.info(\n \"Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format\"\n )\n else:\n arg_0.log.info(\n \"Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it\"\n )\n except StopIteration:\n arg_0.log.exception(\n \"Couldn't read first line of ammo file\")\n raise AmmoFileError(\n \"Couldn't read first line of ammo file\")\n else:\n raise NotImplementedError(\n 'No such ammo type implemented: \"%s\"' % arg_0.ammo_type)\n arg_2 = arg_1[arg_0.ammo_type](\n arg_0.ammo_file, headers=arg_0.headers, http_ver=arg_0.http_ver, use_cache=arg_0.use_cache)\n else:\n raise StepperConfigurationError(\n 'Ammo not found. Specify uris or ammo file')\n arg_0.log.info(\"Using %s ammo reader\" % type(arg_2).__name__)\n return arg_2"} +{"_id": "doc_2753", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" translate http code to net code. if accertion failed, set net code to 314 \"\"\"\n if len(arg_0) <= 3:\n # FIXME: we're unable to use better logic here, because we should support non-http codes\n # but, we should look for core.util.HTTP or some other common logic\n # here\n if arg_1:\n return 0\n else:\n return 314\n\n arg_2 = arg_0.split(' ')[-1]\n if arg_2 in KNOWN_EXC.keys():\n return KNOWN_EXC[arg_2]\n else:\n logger.warning(\n \"Unknown Java exception, consider adding it to dictionary: %s\",\n arg_0)\n return 41"} +{"_id": "doc_2754", "title": "", "text": "def Func(arg_0):\n \"\"\" Generate phantom tool run config \"\"\"\n arg_1 = ''\n arg_2 = ''\n for arg_3 in arg_0.streams:\n arg_1 += arg_3.Func()\n if not arg_3.is_main:\n arg_2 += \" \" + \"benchmark_io%s\" % arg_3.sequence_no\n\n arg_4 = {}\n arg_4['threads'] = arg_0.threads\n arg_4['phantom_log'] = arg_0.phantom_log\n arg_4['stat_log'] = arg_0.stat_log\n arg_4['benchmarks_block'] = arg_1\n arg_4['stat_benchmarks'] = arg_2\n arg_4['additional_libs'] = arg_0.additional_libs\n arg_4['phantom_modules_path'] = arg_0.phantom_modules_path\n arg_5 = arg_0.core.mkstemp(\".conf\", \"phantom_\")\n arg_0.core.add_artifact_file(arg_5)\n logger.debug(\"Generating phantom config: %s\", arg_5)\n arg_6 = resource_string(__name__, \"config/phantom.conf.tpl\")\n arg_7 = string.Template(arg_6)\n arg_8 = arg_7.substitute(arg_4)\n\n with open(arg_5, 'w') as conffile:\n conffile.write(arg_8)\n return arg_5"} +{"_id": "doc_2755", "title": "", "text": "def Func(arg_0):\n \"\"\" get merged info about phantom conf \"\"\"\n arg_1 = copy.copy(arg_0.streams[0])\n arg_1.stat_log = arg_0.stat_log\n arg_1.steps = []\n arg_1.ammo_file = ''\n arg_1.rps_schedule = None\n arg_1.ammo_count = 0\n arg_1.duration = 0\n\n arg_1.instances = 0\n arg_1.loadscheme = []\n arg_1.loop_count = 0\n\n for arg_11 in arg_0.streams:\n arg_12 = 0\n logger.debug(\"Steps: %s\", arg_11.stepper_wrapper.steps)\n for arg_13 in arg_11.stepper_wrapper.steps:\n for arg_14 in range(0, arg_13[1]):\n if len(arg_1.steps) > arg_12:\n arg_1.steps[arg_12][0] += arg_13[0]\n else:\n arg_1.steps.append([arg_13[0], 1])\n arg_12 += 1\n\n if arg_1.rps_schedule:\n arg_1.rps_schedule = []\n else:\n arg_1.rps_schedule = arg_11.stepper_wrapper.loadscheme\n if arg_1.loadscheme:\n arg_1.loadscheme = ''\n else:\n # FIXME: add formatted load scheme for server:\n # \n # as a string\n arg_1.loadscheme = ''\n\n if arg_1.loop_count:\n arg_1.loop_count = u'0'\n else:\n arg_1.loop_count = arg_11.stepper_wrapper.loop_count\n\n arg_1.ammo_file += '{} '.format(arg_11.stepper_wrapper.ammo_file)\n arg_1.ammo_count += arg_11.stepper_wrapper.ammo_count\n arg_1.duration = max(\n arg_1.duration, arg_11.stepper_wrapper.duration)\n arg_1.instances += arg_11.instances\n\n if not arg_1.ammo_count:\n raise ValueError(\"Total ammo count cannot be zero\")\n return arg_1"} +{"_id": "doc_2756", "title": "", "text": "def Func(arg_0):\n \"\"\" compose benchmark block \"\"\"\n # step file\n arg_0.stepper_wrapper.prepare_stepper()\n arg_0.stpd = arg_0.stepper_wrapper.stpd\n if arg_0.stepper_wrapper.instances:\n arg_0.instances = arg_0.stepper_wrapper.instances\n\n if not arg_0.stpd:\n raise RuntimeError(\"Cannot proceed with no STPD file\")\n\n arg_3 = {}\n arg_3['sequence_no'] = arg_0.sequence_no\n if arg_0.ssl:\n arg_4 = ''\n arg_5 = ''\n arg_6 = \"transport_t ssl_transport = transport_ssl_t {\\n\" \\\n \" timeout = 1s\\n\" \\\n \" %s\\n\" \\\n \" %s}\\n\" \\\n \" transport = ssl_transport\"\n\n if arg_0.client_certificate or arg_0.client_key:\n arg_4 = 'auth_t def_auth = auth_t { key = \"%s\" cert = \"%s\"} auth = def_auth' \\\n % (arg_0.client_key, arg_0.client_certificate)\n if arg_0.client_cipher_suites:\n arg_5 = 'ciphers = \"%s\"' % arg_0.client_cipher_suites\n arg_3['ssl_transport'] = arg_6 % (arg_4, arg_5)\n else:\n arg_3['ssl_transport'] = \"\"\n arg_3['method_stream'] = arg_0.method_prefix + \\\n \"_ipv6_t\" if arg_0.ipv6 else arg_0.method_prefix + \"_ipv4_t\"\n arg_3['phout'] = arg_0.phout_file\n arg_3['answ_log'] = arg_0.answ_log\n arg_3['answ_log_level'] = arg_0.answ_log_level\n arg_3['comment_answ'] = \"# \" if arg_0.answ_log_level == 'none' else ''\n arg_3['stpd'] = arg_0.stpd\n arg_3['source_log_prefix'] = arg_0.source_log_prefix\n arg_3['method_options'] = arg_0.method_options\n if arg_0.tank_type:\n arg_3[\n 'proto'] = \"proto=http_proto%s\" % arg_0.sequence_no if arg_0.tank_type == 'http' else \"proto=none_proto\"\n arg_3['comment_proto'] = \"\"\n else:\n arg_3['proto'] = \"\"\n arg_3['comment_proto'] = \"#\"\n\n if arg_0.gatling:\n arg_3['bind'] = 'bind={ ' + arg_0.gatling + ' }'\n else:\n arg_3['bind'] = ''\n arg_3['ip'] = arg_0.resolved_ip\n arg_3['port'] = arg_0.port\n arg_3['timeout'] = arg_0.timeout\n arg_3['instances'] = arg_0.instances\n arg_7 = ''\n if arg_0.phantom_http_entity:\n arg_7 += \"entity = \" + arg_0.phantom_http_entity + \"\\n\"\n if arg_0.phantom_http_field:\n arg_7 += \"field = \" + arg_0.phantom_http_field + \"\\n\"\n if arg_0.phantom_http_field_num:\n arg_7 += \"field_num = {}\\n\".format(arg_0.phantom_http_field_num)\n if arg_0.phantom_http_line:\n arg_7 += \"line = \" + arg_0.phantom_http_line + \"\\n\"\n if arg_7:\n arg_3['reply_limits'] = 'reply_limits = {\\n' + arg_7 + \"}\"\n else:\n arg_3['reply_limits'] = ''\n\n if arg_0.is_main:\n arg_8 = 'phantom_benchmark_main.tpl'\n else:\n arg_8 = 'phantom_benchmark_additional.tpl'\n arg_9 = resource_string(\n __name__, \"config/\" + arg_8)\n arg_10 = string.Template(arg_9)\n arg_11 = arg_10.substitute(arg_3)\n\n return arg_11"} +{"_id": "doc_2757", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"\"):\n \"\"\"\n This function polls stdout and stderr streams and writes their contents\n to log\n \"\"\"\n arg_4 = select.select([arg_1], [], [], 0)[0]\n if arg_2:\n arg_5 = select.select([arg_2], [], [], 0)[0]\n else:\n arg_5 = []\n\n arg_0.debug(\"Selected: %s, %s\", arg_4, arg_5)\n\n for arg_6 in arg_4:\n arg_7 = arg_6.read()\n arg_4.remove(arg_6)\n if arg_7:\n arg_0.debug(\"%s stdout: %s\", arg_3, arg_7.strip())\n\n for arg_6 in arg_5:\n arg_7 = arg_6.read()\n arg_5.remove(arg_6)\n if arg_7:\n arg_0.warn(\"%s stderr: %s\", arg_3, arg_7.strip())"} +{"_id": "doc_2758", "title": "", "text": "def Func(arg_0, arg_1='s', arg_2=1):\n \"\"\"\n helper for above functions\n \"\"\"\n arg_3 = re.compile(r'(\\d+)([a-zA-Z]*)')\n arg_4 = arg_3.findall(arg_0)\n arg_5 = 0.0\n for arg_6, arg_7 in arg_4:\n arg_6 = int(arg_6)\n arg_7 = arg_7.lower()\n if arg_7 == '':\n arg_7 = arg_1\n\n if arg_7 == 'ms':\n arg_5 += arg_6 * 0.001\n continue\n elif arg_7 == 's':\n arg_5 += arg_6\n continue\n elif arg_7 == 'm':\n arg_5 += arg_6 * 60\n continue\n elif arg_7 == 'h':\n arg_5 += arg_6 * 60 * 60\n continue\n elif arg_7 == 'd':\n arg_5 += arg_6 * 60 * 60 * 24\n continue\n elif arg_7 == 'w':\n arg_5 += arg_6 * 60 * 60 * 24 * 7\n continue\n else:\n raise ValueError(\n \"String contains unsupported unit %s: %s\" % (arg_7, arg_0))\n return int(arg_5 * arg_2)"} +{"_id": "doc_2759", "title": "", "text": "def Func(arg_0):\n '''\n Read stepper info from json\n '''\n arg_0.log.debug(\"Reading cached stepper info: %s\", arg_0.__si_filename())\n with open(arg_0.__si_filename(), 'r') as si_file:\n arg_1 = info.StepperInfo(**json.load(si_file))\n return arg_1"} +{"_id": "doc_2760", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Write stepper info to json\n '''\n arg_0.log.debug(\"Saving stepper info: %s\", arg_0.__si_filename())\n with open(arg_0.__si_filename(), 'w') as si_file:\n json.dump(arg_1._asdict(), si_file, indent=4)"} +{"_id": "doc_2761", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create Load Plan as defined in schedule. Publish info about its duration.\n \"\"\"\n if len(arg_0) > 1:\n arg_1 = Composite(\n [StepFactory.produce(step_config) for step_config in arg_0])\n else:\n arg_1 = StepFactory.produce(arg_0[0])\n arg_2.status.publish('duration', arg_1.get_duration() / 1000)\n arg_2.status.publish('steps', arg_1.get_rps_list())\n arg_2.status.lp_len = len(arg_1)\n return arg_1"} +{"_id": "doc_2762", "title": "", "text": "def Func(arg_0, arg_1):\n '''Return rps for second t'''\n if 0 <= arg_1 <= arg_0.duration:\n return arg_0.minrps + \\\n float(arg_0.maxrps - arg_0.minrps) * arg_1 / arg_0.duration\n else:\n return 0"} +{"_id": "doc_2763", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Execute and check exit code\n \"\"\"\n arg_0.log.info(\"Executing: %s\", arg_1)\n arg_2 = Func(\n arg_1, shell=True, poll_period=0.1, catch_out=arg_0.catch_out)[0]\n if arg_2:\n raise RuntimeError(\"Subprocess returned %s\" % arg_2)\n return arg_2"} +{"_id": "doc_2764", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n The reason why we have two separate methods for monitoring\n and aggregates is a strong difference in incoming data.\n \"\"\"\n arg_2 = list()\n for arg_3 in arg_1:\n for arg_4, arg_5 in arg_3[\"data\"].iteritems():\n arg_2.append(\n arg_0.__make_points(\n \"monitoring\",\n {\"host\": arg_4, \"comment\": arg_5.get(\"comment\")},\n arg_3[\"timestamp\"],\n {\n arg_6: arg_7\n for arg_6, arg_7 in arg_5[\"metrics\"].iteritems()\n }\n )\n )\n return arg_2"} +{"_id": "doc_2765", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"x\n Make a set of points for `this` label\n\n overall_quantiles, overall_meta, net_codes, proto_codes, histograms\n \"\"\"\n arg_6 = list()\n\n arg_6.extend(\n (\n # overall quantiles for label\n arg_0.__make_points(\n arg_4 + \"overall_quantiles\",\n {\"label\": arg_3},\n arg_1,\n arg_0.__make_quantile_fields(arg_2)\n ),\n # overall meta (gun status) for label\n arg_0.__make_points(\n arg_4 + \"overall_meta\",\n {\"label\": arg_3},\n arg_1,\n arg_0.__make_overall_meta_fields(arg_2, arg_5)\n ),\n # net codes for label\n arg_0.__make_points(\n arg_4 + \"net_codes\",\n {\"label\": arg_3},\n arg_1,\n arg_0.__make_netcodes_fields(arg_2)\n ),\n # proto codes for label\n arg_0.__make_points(\n arg_4 + \"proto_codes\",\n {\"label\": arg_3},\n arg_1,\n arg_0.__make_protocodes_fields(arg_2)\n )\n )\n )\n # histograms, one row for each bin\n if arg_0.histograms:\n for arg_7, arg_8 in zip(arg_2[\"interval_real\"][\"hist\"][\"bins\"],\n arg_2[\"interval_real\"][\"hist\"][\"data\"]):\n arg_6.append(\n arg_0.__make_points(\n arg_4 + \"histograms\",\n {\"label\": arg_3},\n arg_1,\n {\"bin\": arg_7, \"count\": arg_8}\n )\n )\n return arg_6"} +{"_id": "doc_2766", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A feeder that runs in distinct thread in main process.\n \"\"\"\n arg_0.plan = StpdReader(arg_0.stpd_filename)\n if arg_0.cached_stpd:\n arg_0.plan = list(arg_0.plan)\n for arg_2 in arg_0.plan:\n if arg_0.quit.is_set():\n logger.info(\"Stop feeding: gonna quit\")\n return\n # try putting a task to a queue unless there is a quit flag\n # or all workers have exited\n while True:\n try:\n arg_0.task_queue.put(arg_2, timeout=1)\n break\n except Full:\n if arg_0.quit.is_set() or arg_0.workers_finished:\n return\n else:\n continue\n arg_3 = arg_0.instances\n logger.info(\n \"Feeded all data. Publishing %d killer tasks\" % (arg_3))\n arg_4 = 1\n for arg_5 in range(5):\n try:\n [\n arg_0.task_queue.put(None, timeout=1)\n for arg_5 in xrange(0, arg_3)\n ]\n break\n except Full:\n logger.debug(\n \"Couldn't post killer tasks\"\n \" because queue is full. Retrying in %ss\", arg_4)\n time.sleep(arg_4)\n arg_4 *= 2\n\n try:\n logger.info(\"Waiting for workers\")\n map(lambda x: x.join(), arg_0.pool)\n logger.info(\"All workers exited.\")\n arg_0.workers_finished = True\n except (KeyboardInterrupt, SystemExit):\n arg_0.task_queue.close()\n arg_0.results.close()\n arg_0.quit.set()\n logger.info(\"Going to quit. Waiting for workers\")\n map(lambda x: x.join(), arg_0.pool)\n arg_0.workers_finished = True"} +{"_id": "doc_2767", "title": "", "text": "def Func(arg_0, arg_1=\"tank.log\"):\n \"\"\" Set up logging \"\"\"\n arg_2 = logging.getLogger('')\n arg_0.log_filename = arg_1\n arg_0.core.add_artifact_file(arg_0.log_filename)\n\n arg_3 = logging.FileHandler(arg_0.log_filename)\n arg_3.setLevel(logging.DEBUG)\n arg_3.setFormatter(\n logging.Formatter(\n \"%(asctime)s [%(levelname)s] %(name)s %(message)s\"))\n arg_2.addHandler(arg_3)\n arg_4 = logging.StreamHandler(sys.stdout)\n arg_5 = logging.StreamHandler(sys.stderr)\n\n # fmt_verbose = logging.Formatter(\n # \"%(asctime)s [%(levelname)s] %(name)s %(message)s\")\n arg_6 = logging.Formatter(\n \"%(asctime)s %(levelname)s: %(message)s\", \"%H:%M:%S\")\n\n arg_4.setLevel(logging.INFO)\n arg_4.setFormatter(arg_6)\n arg_5.setFormatter(arg_6)\n\n arg_7 = SingleLevelFilter(logging.ERROR, True)\n arg_8 = SingleLevelFilter(logging.WARNING, True)\n arg_9 = SingleLevelFilter(logging.CRITICAL, True)\n arg_4.addFilter(arg_7)\n arg_4.addFilter(arg_8)\n arg_4.addFilter(arg_9)\n arg_2.addHandler(arg_4)\n\n arg_10 = SingleLevelFilter(logging.INFO, True)\n arg_11 = SingleLevelFilter(logging.DEBUG, True)\n arg_5.addFilter(arg_10)\n arg_5.addFilter(arg_11)\n arg_2.addHandler(arg_5)"} +{"_id": "doc_2768", "title": "", "text": "def Func(arg_0):\n \"\"\" override config options with user specified options\"\"\"\n if arg_0.options.get('user_options', None):\n arg_0.core.apply_shorthand_options(arg_0.options['user_options'])"} +{"_id": "doc_2769", "title": "", "text": "def Func(arg_0):\n \"\"\" call shutdown routines \"\"\"\n arg_1 = 1\n arg_0.log.info(\"Trying to shutdown gracefully...\")\n arg_1 = arg_0.core.plugins_end_test(arg_1)\n arg_1 = arg_0.core.plugins_post_process(arg_1)\n arg_0.log.info(\"Done graceful shutdown\")\n return arg_1"} +{"_id": "doc_2770", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Collect data, cache it and send to listeners\n \"\"\"\n arg_2 = get_nowait_from_queue(arg_0.results)\n arg_3 = get_nowait_from_queue(arg_0.stats_results)\n logger.debug(\"Data timestamps: %s\" % [arg_4.get('ts') for arg_4 in arg_2])\n logger.debug(\"Stats timestamps: %s\" % [arg_4.get('ts') for arg_4 in arg_3])\n for arg_5 in arg_2:\n arg_6 = arg_5['ts']\n if arg_6 in arg_0.stat_cache:\n # send items\n arg_7 = arg_5\n arg_8 = arg_0.stat_cache.pop(arg_6)\n arg_0.__notify_listeners(arg_7, arg_8)\n else:\n arg_0.data_cache[arg_6] = arg_5\n for arg_5 in arg_3:\n arg_6 = arg_5['ts']\n if arg_6 in arg_0.data_cache:\n # send items\n arg_7 = arg_0.data_cache.pop(arg_6)\n arg_8 = arg_5\n arg_0.__notify_listeners(arg_7, arg_8)\n else:\n arg_0.stat_cache[arg_6] = arg_5\n if arg_1 and len(arg_0.data_cache) > 0:\n logger.info('Timestamps without stats:')\n for arg_6, arg_7 in sorted(arg_0.data_cache.items(), key=lambda i: i[0]):\n logger.info(arg_6)\n arg_0.__notify_listeners(arg_7, StatsReader.stats_item(arg_6, 0, 0))"} +{"_id": "doc_2771", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''\n Returns a marker function of the requested marker_type\n\n >>> marker = Func('uniq')(__test_missile)\n >>> type(marker)\n \n >>> len(marker)\n 32\n\n >>> Func('uri')(__test_missile)\n '_example_search_hello_help_us'\n\n >>> marker = Func('non-existent')(__test_missile)\n Traceback (most recent call last):\n ...\n NotImplementedError: No such marker: \"non-existent\"\n\n >>> Func('3')(__test_missile)\n '_example_search_hello'\n\n >>> marker = Func('3', True)\n >>> marker(__test_missile)\n '_example_search_hello#0'\n >>> marker(__test_missile)\n '_example_search_hello#1'\n '''\n try:\n arg_2 = int(arg_0)\n if arg_2:\n arg_3 = __UriMarker(arg_2)\n else:\n\n def arg_3(arg_4):\n return ''\n except ValueError:\n if arg_0 in __markers:\n arg_3 = __markers[arg_0]\n else:\n raise NotImplementedError('No such marker: \"%s\"' % arg_0)\n\n # todo: fix u'False'\n if arg_1:\n arg_3 = __Enumerator(arg_3)\n return arg_3"} +{"_id": "doc_2772", "title": "", "text": "def Func(arg_0):\n '''\n Parse duration string, such as '3h2m3s' into milliseconds\n\n >>> Func('3h2m3s')\n 10923000\n\n >>> Func('0.3s')\n 300\n\n >>> Func('5')\n 5000\n '''\n arg_1 = re.compile(\"([0-9.]+)([dhms]?)\")\n\n def parse_token(arg_2, arg_3):\n arg_4 = {\n 'd': 86400,\n 'h': 3600,\n 'm': 60,\n 's': 1,\n }\n if arg_3:\n if arg_3 in arg_4:\n return int(float(arg_2) * arg_4[arg_3] * 1000)\n else:\n raise StepperConfigurationError(\n 'Failed to parse duration: %s' % arg_0)\n else:\n return int(float(arg_2) * 1000)\n\n return sum(parse_token(*arg_5) for arg_5 in arg_1.findall(arg_0))"} +{"_id": "doc_2773", "title": "", "text": "def Func(arg_0):\n \"\"\"Start remote agent\"\"\"\n logger.info('Starting agent: %s', arg_0.host)\n arg_1 = \"{python} {agent_path} --telegraf {telegraf_path} --host {host} {kill_old}\".format(\n python=arg_0.python,\n agent_path=os.path.join(\n arg_0.path['AGENT_REMOTE_FOLDER'],\n arg_0.AGENT_FILENAME),\n telegraf_path=arg_0.path['TELEGRAF_REMOTE_PATH'],\n host=arg_0.host,\n kill_old=arg_0.kill_old)\n logger.debug('Command to Func agent: %s', arg_1)\n arg_0.session = arg_0.ssh.async_session(arg_1)\n arg_0.reader_thread = threading.Thread(target=arg_0.read_buffer)\n arg_0.reader_thread.setDaemon(True)\n return arg_0.session"} +{"_id": "doc_2774", "title": "", "text": "def Func(arg_0):\n \"\"\"Searching for line in jmeter.log such as\n Waiting for possible shutdown message on port 4445\n \"\"\"\n arg_1 = re.compile(arg_0.DISCOVER_PORT_PATTERN)\n with open(arg_0.process_stderr.name, 'r') as f:\n arg_2 = 0\n while arg_0.process.pid and arg_2 < 10:\n arg_3 = f.readline()\n arg_4 = arg_1.match(arg_3)\n if arg_4 is None:\n arg_2 += 1\n time.sleep(1)\n else:\n arg_5 = int(arg_4.group('port'))\n return arg_5\n else:\n logger.warning('JMeter UDP port wasn\\'t discovered')\n return None"} +{"_id": "doc_2775", "title": "", "text": "def Func(arg_0):\n \"\"\"Gracefull termination of running process\"\"\"\n\n if arg_0.__stderr_file:\n arg_0.__stderr_file.close()\n\n if not arg_0.__process:\n return\n\n arg_1 = time.time() + _PROCESS_KILL_TIMEOUT\n while time.time() < arg_1:\n try:\n arg_0.__process.terminate()\n except EnvironmentError as e:\n if e.errno != errno.ESRCH:\n _LOGGER.warning(\"Failed to terminate process '{}': {}\".format(arg_0.__cmd, e))\n return\n time.sleep(0.1)\n\n try:\n arg_0.__process.kill()\n except EnvironmentError as e:\n if e.errno != errno.ESRCH:\n _LOGGER.warning(\"Failed to kill process '{}': {}\".format(arg_0.__cmd, e))\n return"} +{"_id": "doc_2776", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse lines and return stats\n \"\"\"\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_4, arg_5, arg_6 = arg_3.split(\"\\t\")\n arg_7 = int(float(arg_4)) # We allow floats here, but tank expects only seconds\n if arg_0.__last_ts < arg_7:\n arg_0.__last_ts = arg_7\n arg_2.append(arg_0.stats_item(arg_0.__last_ts, float(arg_5), float(arg_6)))\n return arg_2"} +{"_id": "doc_2777", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" instantiate criterion from config string \"\"\"\n arg_2 = arg_1.split(\"(\")\n arg_3 = arg_2[0].strip().lower()\n arg_2[1] = arg_2[1].split(\")\")[0].strip()\n\n for arg_4 in arg_0.custom_criterions:\n if arg_4.get_type_string() == arg_3:\n return arg_4(arg_0, arg_2[1])\n raise ValueError(\n \"Unsupported autostop criterion type: %s\" % arg_1)"} +{"_id": "doc_2778", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Prepare config data.\"\"\"\n try:\n arg_3 = arg_0.parse_xml(arg_1)\n except IOError as exc:\n logger.error(\"Error loading config: %s\", exc)\n raise RuntimeError(\"Can't read monitoring config %s\" % arg_1)\n arg_4 = arg_3.findall('Host')\n arg_5 = []\n for arg_6 in arg_4:\n arg_7 = arg_0.get_host_config(arg_6, arg_2)\n arg_5.append(arg_7)\n return arg_5"} +{"_id": "doc_2779", "title": "", "text": "def Func(arg_0):\n ''' raise exception on disk space exceeded '''\n arg_1 = \"sh -c \\\"df --no-sync -m -P -l -x fuse -x tmpfs -x devtmpfs -x davfs -x nfs \"\n arg_1 += arg_0.core.artifacts_base_dir\n arg_1 += \" | tail -n 1 | awk '{print \\$4}' \\\"\"\n arg_2 = execute(arg_1, True, 0.1, True)\n logging.debug(\"Result: %s\", arg_2)\n if not len(arg_2[1]):\n arg_0.log.debug(\"No disk usage info: %s\", arg_2[2])\n return\n arg_3 = arg_2[1]\n arg_0.log.debug(\n \"Disk free space: %s/%s\", arg_3.strip(), arg_0.disk_limit)\n if int(arg_3.strip()) < arg_0.disk_limit:\n raise RuntimeError(\n \"Not enough local resources: disk space less than %sMB in %s: %sMB\"\n % (\n arg_0.disk_limit, arg_0.core.artifacts_base_dir,\n int(arg_3.strip())))"} +{"_id": "doc_2780", "title": "", "text": "def Func(arg_0):\n ''' raise exception on RAM exceeded '''\n arg_1 = psutil.virtual_memory().available / 2**20\n arg_0.log.debug(\"Memory free: %s/%s\", arg_1, arg_0.mem_limit)\n if arg_1 < arg_0.mem_limit:\n raise RuntimeError(\n \"Not enough resources: free memory less \"\n \"than %sMB: %sMB\" % (arg_0.mem_limit, arg_1))"} +{"_id": "doc_2781", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Gets next line for right panel '''\n arg_2 = ''\n if arg_1:\n arg_2 = arg_1.pop(0)\n if len(arg_2) > arg_0.right_panel_width:\n arg_3 = arg_0.markup.clean_markup(arg_2)\n if len(arg_3) > arg_0.right_panel_width:\n arg_2 = arg_2[:arg_0.right_panel_width] + arg_0.markup.RESET\n return arg_2"} +{"_id": "doc_2782", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Cut tuple of line chunks according to it's wisible lenght '''\n def is_space(arg_3):\n return all([True if arg_4 == ' ' else False for arg_4 in arg_3])\n\n def is_empty(arg_5, arg_6):\n arg_7 = []\n for arg_3 in arg_5:\n if arg_3 in arg_6:\n arg_7.append(True)\n elif is_space(arg_3):\n arg_7.append(True)\n else:\n arg_7.append(False)\n return all(arg_7)\n arg_8 = arg_2\n arg_7 = ''\n arg_6 = arg_0.markup.get_markup_vars()\n for arg_9, arg_3 in enumerate(arg_1):\n if arg_3 in arg_6:\n arg_7 += arg_3\n else:\n if arg_8 > 0:\n if len(arg_3) <= arg_8:\n arg_7 += arg_3\n arg_8 -= len(arg_3)\n else:\n arg_10 = (arg_3[arg_8:],) + arg_1[arg_9 + 1:]\n arg_11 = not is_empty(arg_10, arg_6)\n if arg_11:\n arg_7 += arg_3[:arg_8 - 1] + arg_0.markup.RESET + u'\\u2026'\n else:\n arg_7 += arg_3[:arg_8]\n arg_8 = 0\n return arg_7"} +{"_id": "doc_2783", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Right-pad lines of block to equal width '''\n arg_2 = []\n arg_3 = max([arg_0.clean_len(arg_4) for arg_4 in arg_1])\n for arg_4 in arg_1:\n arg_5 = ' ' * (arg_3 - arg_0.clean_len(arg_4))\n arg_2.append(arg_4 + (arg_0.screen.markup.RESET, arg_5))\n return (arg_3, arg_2)"} +{"_id": "doc_2784", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Calculate wisible length of string '''\n if isinstance(arg_1, basestring):\n return len(arg_0.screen.markup.clean_markup(arg_1))\n elif isinstance(arg_1, tuple) or isinstance(arg_1, list):\n arg_2 = arg_0.screen.markup.get_markup_vars()\n arg_3 = 0\n for arg_4 in arg_1:\n if arg_4 not in arg_2:\n arg_3 += len(arg_4)\n return arg_3"} +{"_id": "doc_2785", "title": "", "text": "def Func(arg_0):\n '''\n Creates load plan timestamps generator\n\n >>> from util import take\n\n >>> take(7, LoadPlanBuilder().ramp(5, 4000).Func())\n [0, 1000, 2000, 3000, 4000, 0, 0]\n\n >>> take(7, Func(['ramp(5, 4s)']))\n [0, 1000, 2000, 3000, 4000, 0, 0]\n\n >>> take(12, Func(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']))\n [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]\n\n >>> take(7, Func(['wait(5s)', 'ramp(5, 0)']))\n [5000, 5000, 5000, 5000, 5000, 0, 0]\n\n >>> take(7, Func([]))\n [0, 0, 0, 0, 0, 0, 0]\n\n >>> take(12, Func(['line(1, 9, 4s)']))\n [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]\n\n >>> take(12, Func(['const(3, 5s)', 'line(7, 11, 2s)']))\n [0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]\n\n >>> take(12, Func(['step(2, 10, 2, 3s)']))\n [0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]\n\n >>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps)\n [(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]\n\n >>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps)\n [(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]\n\n >>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances\n 950\n\n >>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances\n 10\n\n >>> LoadPlanBuilder().line(1, 100, 60000).instances\n 100\n '''\n arg_1 = LoadPlanBuilder().add_all_steps(arg_0)\n arg_2 = arg_1.Func()\n info.status.publish('duration', 0)\n # info.status.publish('steps', lpb.steps)\n info.status.publish('steps', [])\n info.status.publish('instances', arg_1.instances)\n return arg_2"} +{"_id": "doc_2786", "title": "", "text": "def Func(arg_0):\n ''' format level str '''\n if arg_0.is_relative:\n arg_1 = str(arg_0.level) + \"%\"\n else:\n arg_1 = arg_0.level\n return arg_1"} +{"_id": "doc_2787", "title": "", "text": "def Func(arg_0, arg_1):\n ''' add right panel widget '''\n if not arg_0.screen:\n arg_0.log.debug(\"No screen instance to add widget\")\n else:\n arg_0.screen.Func(arg_1)"} +{"_id": "doc_2788", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=\"POST\",\n arg_4=False):\n '''\n Send request to writer service.\n '''\n arg_5 = requests.Request(\n arg_3,\n arg_0.writer_url,\n arg_1=arg_1,\n arg_2=arg_2,\n headers={\n 'User-Agent': arg_0.user_agent})\n arg_6 = id_gen(str(uuid.uuid4()))\n arg_7 = arg_0.network_timeouts()\n arg_8 = arg_0.maintenance_timeouts()\n while True:\n try:\n arg_9 = arg_0.__send_single_request(arg_5, arg_6.next(), arg_4=arg_4)\n return arg_9\n except (Timeout, ConnectionError, ProtocolError):\n logger.warn(traceback.format_exc())\n try:\n arg_10 = next(arg_7)\n logger.warn(\n \"Network error, will retry in %ss...\" %\n arg_10)\n time.sleep(arg_10)\n continue\n except StopIteration:\n raise arg_0.NetworkError()\n except arg_0.UnderMaintenance as e:\n try:\n arg_10 = next(arg_8)\n logger.warn(\n \"Writer is under maintenance, will retry in %ss...\" %\n arg_10)\n time.sleep(arg_10)\n continue\n except StopIteration:\n raise e"} +{"_id": "doc_2789", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Tells core to take plugin options and instantiate plugin classes\n \"\"\"\n logger.info(\"Loading plugins...\")\n for (arg_1, arg_2, arg_3) in arg_0.config.plugins:\n logger.debug(\"Loading plugin %s from %s\", arg_1, arg_2)\n if arg_2 == \"yandextank.plugins.Overload\":\n logger.warning(\n \"Deprecated plugin name: 'yandextank.plugins.Overload'\\n\"\n \"There is a new generic plugin now.\\n\"\n \"Correcting to 'yandextank.plugins.DataUploader overload'\")\n arg_2 = \"yandextank.plugins.DataUploader overload\"\n try:\n arg_4 = il.import_module(arg_2)\n except ImportError:\n logger.warning('Plugin name %s path %s import error', arg_1, arg_2)\n logger.debug('Plugin name %s path %s import error', arg_1, arg_2, exc_info=True)\n raise\n try:\n arg_5 = getattr(arg_4, 'Plugin')(arg_0, cfg=arg_3, name=arg_1)\n except AttributeError:\n logger.warning('Plugin %s classname should be `Plugin`', arg_1)\n raise\n else:\n arg_0.register_plugin(arg_0.PLUGIN_PREFIX + arg_1, arg_5)\n logger.debug(\"Plugin instances: %s\", arg_0._plugins)"} +{"_id": "doc_2790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve a plugin of desired class, KeyError raised otherwise\n \"\"\"\n logger.debug(\"Searching for plugin: %s\", arg_1)\n arg_2 = [plugin for plugin in arg_0.plugins.values() if isinstance(plugin, arg_1)]\n if arg_2:\n if len(arg_2) > 1:\n logger.debug(\n \"More then one plugin of type %s found. Using first one.\",\n arg_1)\n return arg_2[-1]\n else:\n raise KeyError(\"Requested plugin type not found: %s\" % arg_1)"} +{"_id": "doc_2791", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve a list of plugins of desired class, KeyError raised otherwise\n \"\"\"\n logger.debug(\"Searching for plugins: %s\", arg_1)\n arg_2 = [plugin for plugin in arg_0.plugins.values() if isinstance(plugin, arg_1)]\n if arg_2:\n return arg_2\n else:\n raise KeyError(\"Requested plugin type not found: %s\" % arg_1)"} +{"_id": "doc_2792", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Move or copy single file to artifacts dir\n \"\"\"\n arg_3 = arg_0.artifacts_dir + '/' + os.path.basename(arg_1)\n logger.debug(\"Collecting file: %s to %s\", arg_1, arg_3)\n if not arg_1 or not os.path.exists(arg_1):\n logger.warning(\"File not found to collect: %s\", arg_1)\n return\n\n if os.path.exists(arg_3):\n # FIXME: 3 find a way to store artifacts anyway\n logger.warning(\"File already exists: %s\", arg_3)\n return\n\n if arg_2:\n shutil.copy(arg_1, arg_0.artifacts_dir)\n else:\n shutil.move(arg_1, arg_0.artifacts_dir)\n\n os.chmod(arg_3, 0o644)"} +{"_id": "doc_2793", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Add file to be stored as result artifact on post-process phase\n \"\"\"\n if arg_1:\n logger.debug(\n \"Adding artifact file to collect (keep=%s): %s\", arg_2,\n arg_1)\n arg_0.artifact_files[arg_1] = arg_2"} +{"_id": "doc_2794", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Generate temp file name in artifacts base dir\n and close temp file handle\n \"\"\"\n if not arg_3:\n arg_3 = arg_0.artifacts_dir\n arg_4, arg_5 = tempfile.Func(arg_1, arg_2, arg_3)\n os.close(arg_4)\n os.chmod(arg_5, 0o644) # FIXME: chmod to parent dir's mode?\n return arg_5"} +{"_id": "doc_2795", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Read configs set into storage \"\"\"\n logger.debug(\"Reading configs: %s\", arg_1)\n arg_2 = [resource.resource_filename(config) for config in arg_1]\n try:\n arg_0.config.read(arg_2)\n except Exception as ex:\n logger.error(\"Can't load configs: %s\", ex)\n raise ex"} +{"_id": "doc_2796", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Flush current stat to file \"\"\"\n if not arg_1:\n arg_1 = arg_0.file\n\n if arg_1:\n with open(arg_1, 'w') as handle:\n arg_0.config.write(handle)"} +{"_id": "doc_2797", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" return sections with specified prefix \"\"\"\n arg_2 = []\n for arg_3 in arg_0.config.sections():\n if arg_3.startswith(arg_1):\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_2798", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return all items found in this chunk\n \"\"\"\n for arg_2, arg_3 in arg_1.iteritems():\n arg_4 = datetime.datetime.strptime(\n arg_2.split(\".\")[0], '%Y-%m-%d %H:%M:%S')\n arg_5 = int(time.mktime(arg_4.timetuple()))\n arg_6 = 0\n for arg_7, arg_8 in arg_3.iteritems():\n if not arg_7.startswith(\"benchmark_io\"):\n continue\n for arg_9, arg_10 in arg_8.iteritems():\n if \"mmtasks\" in arg_10:\n arg_6 += arg_10[\"mmtasks\"][2]\n\n arg_11 = arg_5 - 1 - arg_0.start_time\n arg_12 = 0\n if 0 <= arg_11 < len(arg_0.phantom_info.steps):\n arg_12 = arg_0.phantom_info.steps[arg_11][0]\n yield arg_0.stats_item(arg_5 - 1, arg_6, arg_12)"} +{"_id": "doc_2799", "title": "", "text": "def Func(arg_0):\n \"\"\" returns info object \"\"\"\n if not arg_0.cached_info:\n if not arg_0.phantom:\n return None\n arg_0.cached_info = arg_0.phantom.Func()\n return arg_0.cached_info"} +{"_id": "doc_2800", "title": "", "text": "def Func(arg_0):\n \"\"\"Prepare for monitoring - install agents etc\"\"\"\n\n # Parse config\n arg_1 = []\n if arg_0.config:\n arg_1 = arg_0.config_manager.getconfig(\n arg_0.config, arg_0.default_target)\n\n # Creating agent for hosts\n for arg_2 in arg_1:\n if arg_2['host'] in ['localhost', '127.0.0.1', '::1']:\n arg_3 = arg_0.clients['localhost'](\n arg_2, arg_0.old_style_configs, kill_old=arg_0.kill_old)\n else:\n arg_3 = arg_0.clients['ssh'](\n arg_2, arg_0.old_style_configs, timeout=5, kill_old=arg_0.kill_old)\n logger.debug('Installing monitoring agent. Host: %s', arg_3.host)\n arg_4, arg_5, arg_6 = arg_3.install()\n if arg_4:\n arg_0.agents.append(arg_3)\n arg_0.artifact_files.append(arg_4)\n if arg_5:\n arg_0.artifact_files.append(arg_5)\n if arg_6:\n arg_0.artifact_files.append(arg_6)"} +{"_id": "doc_2801", "title": "", "text": "def Func(arg_0):\n \"\"\" Poll agents for data\n \"\"\"\n arg_1 = time.time()\n for arg_2 in arg_0.agents:\n for arg_3 in arg_2.reader:\n # don't crush if trash or traceback came from agent to stdout\n if not arg_3:\n return 0\n for arg_4 in arg_3:\n arg_5, arg_6 = arg_4\n if arg_0.load_start_time and int(\n arg_5) >= arg_0.load_start_time:\n arg_7 = {\n \"timestamp\": int(arg_5),\n \"data\": {\n arg_0.hash_hostname(arg_2.host): {\n \"comment\": arg_2.config.comment,\n \"metrics\": arg_6\n }\n }\n }\n arg_0.__collected_data.append(arg_7)\n\n logger.debug(\n 'Polling/decoding agents data took: %.2fms',\n (time.time() - arg_1) * 1000)\n\n arg_8 = len(arg_0.__collected_data)\n\n if not arg_0.first_data_received and arg_0.__collected_data:\n arg_0.first_data_received = True\n logger.info(\"Monitoring received first data.\")\n else:\n arg_0.send_collected_data()\n return arg_8"} +{"_id": "doc_2802", "title": "", "text": "def Func(arg_0):\n \"\"\"sends pending data set to listeners\"\"\"\n arg_1 = arg_0.__collected_data\n arg_0.__collected_data = []\n for arg_3 in arg_0.listeners:\n # deep copy to ensure each listener gets it's own copy\n arg_3.monitoring_data(copy.deepcopy(arg_1))"} +{"_id": "doc_2803", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n decode agents jsons, count diffs\n \"\"\"\n arg_2 = []\n if arg_1:\n for arg_3 in arg_1.split('\\n'):\n try:\n if arg_3:\n arg_4 = {}\n arg_5 = json.loads(arg_3)\n for arg_6, arg_7 in arg_5.iteritems():\n for arg_8, arg_9 in arg_7.iteritems():\n # key sample: diskio-sda1_io_time\n # key_group sample: diskio\n # key_name sample: io_time\n try:\n arg_10, arg_11 = arg_8.split('_')[0].split('-')[0], '_'.join(arg_8.split('_')[1:])\n except: # noqa: E722\n arg_10, arg_11 = arg_8.split('_')[0], '_'.join(arg_8.split('_')[1:])\n if arg_10 in decoder.diff_metrics.keys():\n if arg_11 in decoder.diff_metrics[arg_10]:\n arg_12 = decoder.find_common_names(\n arg_8)\n if arg_0.prev_check:\n try:\n arg_9 = arg_5[arg_6][arg_8] - \\\n arg_0.prev_check[arg_8]\n except KeyError:\n logger.debug(\n 'There is no diff value for metric %s.\\n'\n 'Timestamp: %s. Is it initial data?', arg_8, arg_6, exc_info=True)\n arg_9 = 0\n arg_4[arg_12] = arg_9\n else:\n arg_12 = decoder.find_common_names(\n arg_8)\n arg_4[arg_12] = arg_9\n else:\n arg_12 = decoder.find_common_names(\n arg_8)\n arg_4[arg_12] = arg_9\n arg_0.prev_check = arg_5[arg_6]\n arg_2.append((arg_6, arg_4))\n except ValueError:\n logger.error(\n 'Telegraf agent send trash to output: %s', arg_3)\n logger.debug(\n 'Telegraf agent data block w/ trash: %s',\n exc_info=True)\n return []\n except BaseException:\n logger.error(\n 'Exception trying to parse agent data: %s',\n arg_3,\n exc_info=True)\n return []\n if arg_2:\n return arg_2"} +{"_id": "doc_2804", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n Perform one request, possibly raising RetryException in the case\n the response is 429. Otherwise, if error text contain \"code\" string,\n then it decodes to json object and returns APIError.\n Returns the body json in the 200 status.\n '''\n arg_5 = arg_0._retry_codes\n arg_6 = arg_0._session.request(arg_1, arg_2, **arg_3)\n try:\n arg_6.raise_for_status()\n except HTTPError as http_error:\n # retry if we hit Rate Limit\n if arg_6.status_code in arg_5 and arg_4 > 0:\n raise RetryException()\n if 'code' in arg_6.text:\n arg_7 = arg_6.json()\n if 'code' in arg_7:\n raise APIError(arg_7, http_error)\n else:\n raise\n if arg_6.text != '':\n return arg_6.json()\n return None"} +{"_id": "doc_2805", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6=None, arg_7=None, arg_8=None):\n '''Request a new order'''\n arg_9 = {\n 'symbol': arg_1,\n 'qty': arg_2,\n 'side': arg_3,\n 'type': arg_4,\n 'time_in_force': arg_5,\n }\n if arg_6 is not None:\n arg_9['limit_price'] = arg_6\n if arg_7 is not None:\n arg_9['stop_price'] = arg_7\n if arg_8 is not None:\n arg_9['client_order_id'] = arg_8\n arg_10 = arg_0.post('/orders', arg_9)\n return Order(arg_10)"} +{"_id": "doc_2806", "title": "", "text": "def Func(arg_0, arg_1):\n '''Get an order'''\n arg_2 = arg_0.get('/orders/{}'.format(arg_1))\n return Order(arg_2)"} +{"_id": "doc_2807", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Result may be a python dictionary, array or a primitive type\n that can be converted to JSON for writing back the result.\n \"\"\"\n arg_2 = arg_0.make_success_response(arg_1)\n arg_3 = time.time()\n arg_4 = arg_3 - arg_0.basehandler_starttime\n arg_2[arg_5.RESPONSE_KEY_EXECUTION_TIME] = arg_4\n arg_0.write_json_response(arg_2)"} +{"_id": "doc_2808", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Writes the message as part of the response and sets 404 status.\n \"\"\"\n arg_0.set_status(404)\n arg_2 = arg_0.make_error_response(str(arg_1))\n arg_3 = time.time()\n arg_4 = arg_3 - arg_0.basehandler_starttime\n arg_2[arg_5.RESPONSE_KEY_EXECUTION_TIME] = arg_4\n arg_0.write_json_response(arg_2)"} +{"_id": "doc_2809", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Makes the base dict for the response.\n The status is the string value for\n the key \"status\" of the response. This\n should be \"success\" or \"failure\".\n \"\"\"\n arg_2 = {\n constants.RESPONSE_KEY_STATUS: arg_1,\n constants.RESPONSE_KEY_VERSION: constants.API_VERSION,\n constants.RESPONSE_KEY_EXECUTION_TIME: 0,\n constants.RESPONSE_KEY_MESSAGE: \"\",\n }\n return arg_2"} +{"_id": "doc_2810", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Makes the python dict corresponding to the\n JSON that needs to be sent for a successful\n response. Result is the actual payload\n that gets sent.\n \"\"\"\n arg_2 = arg_0.make_response(arg_3.RESPONSE_STATUS_SUCCESS)\n arg_2[arg_3.RESPONSE_KEY_RESULT] = arg_1\n return arg_2"} +{"_id": "doc_2811", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get request argument.\n Raises exception if argument is missing.\n Returns the cluster argument.\n \"\"\"\n try:\n return arg_0.get_argument(constants.PARAM_CLUSTER)\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2812", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get request argument.\n Raises exception if argument is missing.\n Returns the role argument.\n \"\"\"\n try:\n return arg_0.get_argument(constants.PARAM_ROLE, default=None)\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2813", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get request argument.\n Raises exception if argument is missing.\n Returns the environ argument.\n \"\"\"\n try:\n return arg_0.get_argument(constants.PARAM_ENVIRON)\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2814", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get topology argument.\n Raises exception if argument is missing.\n Returns the topology argument.\n \"\"\"\n try:\n arg_1 = arg_0.get_argument(constants.PARAM_TOPOLOGY)\n return arg_1\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2815", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get starttime argument.\n Raises exception if argument is missing.\n Returns the starttime argument.\n \"\"\"\n try:\n arg_1 = arg_0.get_argument(constants.PARAM_STARTTIME)\n return arg_1\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2816", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get endtime argument.\n Raises exception if argument is missing.\n Returns the endtime argument.\n \"\"\"\n try:\n arg_1 = arg_0.get_argument(constants.PARAM_ENDTIME)\n return arg_1\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2817", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get length argument.\n Raises exception if argument is missing.\n Returns the length argument.\n \"\"\"\n try:\n arg_1 = arg_0.get_argument(constants.PARAM_LENGTH)\n return arg_1\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2818", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Helper function to get metricname arguments.\n Notice that it is get_argument\"s\" variation, which means that this can be repeated.\n Raises exception if argument is missing.\n Returns a list of metricname arguments\n \"\"\"\n try:\n arg_1 = arg_0.get_arguments(constants.PARAM_METRICNAME)\n if not arg_1:\n raise tornado.web.MissingArgumentError(constants.PARAM_METRICNAME)\n return arg_1\n except tornado.web.MissingArgumentError as e:\n raise Exception(e.log_message)"} +{"_id": "doc_2819", "title": "", "text": "def Func(arg_0):\n \"\"\"Tries to connect to the Heron Server\n\n ``loop()`` method needs to be called after this.\n \"\"\"\n Log.debug(\"In Func() of %s\" % arg_0._get_classname())\n # TODO: specify buffer size, exception handling\n arg_0.create_socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # when ready, handle_connect is called\n arg_0._connecting = True\n arg_0.connect(arg_0.endpoint)"} +{"_id": "doc_2820", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Registers protobuf message builders that this client wants to receive\n\n :param msg_builder: callable to create a protobuf message that this client wants to receive\n \"\"\"\n arg_2 = arg_1()\n Log.debug(\"In Func(): %s\" % arg_2.DESCRIPTOR.full_name)\n arg_0.registered_message_map[arg_2.DESCRIPTOR.full_name] = arg_1"} +{"_id": "doc_2821", "title": "", "text": "def Func():\n \"\"\"\n This will extract heron directory from .pex file.\n\n For example,\n when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and\n its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',\n the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory\n\n This means the variable `go_above_dirs` below is 9.\n\n :return: root location of the .pex file\n \"\"\"\n arg_0 = 9\n arg_1 = \"/\".join(os.path.realpath(__file__).split('/')[:-arg_0])\n return normalized_class_path(arg_1)"} +{"_id": "doc_2822", "title": "", "text": "def Func(arg_0):\n \"\"\"\n if role is not provided, supply userid\n if environ is not provided, supply 'default'\n \"\"\"\n if len(arg_0[1]) == 0 and len(arg_0[2]) == 0:\n return (arg_0[0], getpass.getuser(), ENVIRON)\n\n return (arg_0[0], arg_0[1], arg_0[2])"} +{"_id": "doc_2823", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse the command line for overriding the defaults and\n create an override file.\n \"\"\"\n arg_1 = parse_override_config(arg_0)\n try:\n arg_2 = tempfile.mkdtemp()\n arg_3 = os.path.join(arg_2, OVERRIDE_YAML)\n with open(arg_3, 'w') as f:\n f.write(yaml.dump(arg_1))\n\n return arg_3\n except Exception as e:\n raise Exception(\"Failed to parse override config: %s\" % str(e))"} +{"_id": "doc_2824", "title": "", "text": "def Func():\n \"\"\"Get the path of java executable\"\"\"\n arg_0 = os.environ.get(\"JAVA_HOME\")\n return os.path.join(arg_0, BIN_DIR, \"java\")"} +{"_id": "doc_2825", "title": "", "text": "def Func():\n \"\"\"Check if the release.yaml file exists\"\"\"\n arg_0 = get_heron_release_file()\n\n # if the file does not exist and is not a file\n if not os.path.isfile(arg_0):\n Log.error(\"Required file not found: %s\" % arg_0)\n return False\n\n return True"} +{"_id": "doc_2826", "title": "", "text": "def Func(arg_0=False):\n \"\"\"Print version from release.yaml\n\n :param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.\n \"\"\"\n if arg_0:\n arg_1 = get_zipped_heron_release_file()\n else:\n arg_1 = get_heron_release_file()\n with open(arg_1) as release_info:\n for arg_2 in release_info:\n arg_3 = arg_2[:-1].split(' ')\n if arg_3[0] == 'heron.build.version':\n return arg_3[-1].replace(\"'\", \"\")\n return 'unknown'"} +{"_id": "doc_2827", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the UUID with which the watch is\n registered. This UUID can be used to unregister\n the watch.\n Returns None if watch could not be registered.\n\n The argument 'callback' must be a function that takes\n exactly one argument, the topology on which\n the watch was triggered.\n Note that the watch will be unregistered in case\n it raises any Exception the first time.\n\n This callback is also called at the time\n of registration.\n \"\"\"\n arg_2 = 5\n # Retry in case UID is previously\n # generated, just in case...\n for arg_3 in range(arg_2):\n # Generate a random UUID.\n arg_4 = uuid.uuid4()\n if arg_4 not in arg_0.watches:\n Log.info(\"Registering a watch with uid: \" + str(arg_4))\n try:\n arg_1(arg_0)\n except Exception as e:\n Log.error(\"Caught exception while triggering callback: \" + str(e))\n Log.debug(traceback.format_exc())\n return None\n arg_0.watches[arg_4] = arg_1\n return arg_4\n return None"} +{"_id": "doc_2828", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Unregister the watch with the given UUID.\n \"\"\"\n # Do not raise an error if UUID is\n # not present in the watches.\n Log.info(\"Unregister a watch with uid: \" + str(arg_1))\n arg_0.watches.pop(arg_1, None)"} +{"_id": "doc_2829", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Call all the callbacks.\n If any callback raises an Exception,\n unregister the corresponding watch.\n \"\"\"\n arg_1 = []\n for arg_2, arg_3 in arg_0.watches.items():\n try:\n arg_3(arg_0)\n except Exception as e:\n Log.error(\"Caught exception while triggering callback: \" + str(e))\n Log.debug(traceback.format_exc())\n arg_1.append(arg_2)\n\n for arg_2 in arg_1:\n arg_0.unregister_watch(arg_2)"} +{"_id": "doc_2830", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" set physical plan \"\"\"\n if not arg_1:\n arg_0.physical_plan = None\n arg_0.id = None\n else:\n arg_0.physical_plan = arg_1\n arg_0.id = arg_1.topology.id\n arg_0.trigger_watches()"} +{"_id": "doc_2831", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" set packing plan \"\"\"\n if not arg_1:\n arg_0.packing_plan = None\n arg_0.id = None\n else:\n arg_0.packing_plan = arg_1\n arg_0.id = arg_1.id\n arg_0.trigger_watches()"} +{"_id": "doc_2832", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" set exectuion state \"\"\"\n if not arg_1:\n arg_0.execution_state = None\n arg_0.cluster = None\n arg_0.environ = None\n else:\n arg_0.execution_state = arg_1\n arg_2, arg_3 = arg_0.get_execution_state_dc_environ(arg_1)\n arg_0.cluster = arg_2\n arg_0.environ = arg_3\n arg_0.zone = arg_2\n arg_0.trigger_watches()"} +{"_id": "doc_2833", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Number of spouts + bolts\n \"\"\"\n arg_1 = 0\n\n # Get all the components\n arg_2 = arg_0.spouts() + arg_0.bolts()\n\n # Get instances for each worker\n for arg_3 in arg_2:\n arg_4 = arg_3.comp.config\n for arg_5 in arg_4.kvs:\n if arg_5.key == api_constants.TOPOLOGY_COMPONENT_PARALLELISM:\n arg_1 += int(arg_5.value)\n break\n\n return arg_1"} +{"_id": "doc_2834", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the current state of this topology.\n The state values are from the topology.proto\n RUNNING = 1, PAUSED = 2, KILLED = 3\n if the state is None \"Unknown\" is returned.\n \"\"\"\n arg_1 = None\n if arg_0.physical_plan and arg_0.physical_plan.topology:\n arg_1 = arg_0.physical_plan.topology.state\n\n if arg_1 == 1:\n return \"Running\"\n elif arg_1 == 2:\n return \"Paused\"\n elif arg_1 == 3:\n return \"Killed\"\n else:\n return \"Unknown\""} +{"_id": "doc_2835", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sync the topologies with the statemgrs.\n \"\"\"\n arg_0.state_managers = statemanagerfactory.get_all_state_managers(arg_0.config.statemgr_config)\n try:\n for arg_2 in arg_0.state_managers:\n arg_2.start()\n except Exception as ex:\n Log.error(\"Found exception while initializing state managers: %s. Bailing out...\" % ex)\n traceback.print_exc()\n sys.exit(1)\n\n # pylint: disable=deprecated-lambda\n def on_topologies_watch(arg_2, arg_3):\n \"\"\"watch topologies\"\"\"\n Log.info(\"State watch triggered for topologies.\")\n Log.debug(\"Topologies: \" + str(arg_3))\n arg_4 = arg_0.getTopologiesForStateLocation(arg_2.name)\n arg_5 = map(lambda t: t.name, arg_4)\n Log.debug(\"Existing topologies: \" + str(arg_5))\n for arg_6 in arg_5:\n if arg_6 not in arg_3:\n Log.info(\"Removing topology: %s in rootpath: %s\",\n arg_6, arg_2.rootpath)\n arg_0.removeTopology(arg_6, arg_2.name)\n\n for arg_6 in arg_3:\n if arg_6 not in arg_5:\n arg_0.addNewTopology(arg_2, arg_6)\n\n for arg_2 in arg_0.state_managers:\n # The callback function with the bound\n # state_manager as first variable.\n arg_7 = partial(on_topologies_watch, arg_2)\n arg_2.get_topologies(arg_7)"} +{"_id": "doc_2836", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns all the topologies for a given state manager.\n \"\"\"\n return filter(lambda t: t.state_manager_name == arg_1, arg_0.topologies)"} +{"_id": "doc_2837", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the repesentation of execution state that will\n be returned from Tracker.\n \"\"\"\n arg_2 = arg_1.execution_state\n\n arg_3 = {\n \"cluster\": arg_2.cluster,\n \"environ\": arg_2.environ,\n \"role\": arg_2.role,\n \"jobname\": arg_1.name,\n \"submission_time\": arg_2.submission_time,\n \"submission_user\": arg_2.submission_user,\n \"release_username\": arg_2.release_state.release_username,\n \"release_tag\": arg_2.release_state.release_tag,\n \"release_version\": arg_2.release_state.release_version,\n \"has_physical_plan\": None,\n \"has_tmaster_location\": None,\n \"has_scheduler_location\": None,\n \"extra_links\": [],\n }\n\n for arg_4 in arg_0.config.extra_links:\n arg_5 = arg_4.copy()\n arg_5[\"url\"] = arg_0.config.get_formatted_url(arg_3,\n arg_5[EXTRA_LINK_FORMATTER_KEY])\n arg_3[\"extra_links\"].append(arg_5)\n return arg_3"} +{"_id": "doc_2838", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the representation of scheduler location that will\n be returned from Tracker.\n \"\"\"\n arg_2 = {\n \"name\": None,\n \"http_endpoint\": None,\n \"job_page_link\": None,\n }\n\n if arg_1.scheduler_location:\n arg_2[\"name\"] = arg_1.scheduler_location.topology_name\n arg_2[\"http_endpoint\"] = arg_1.scheduler_location.http_endpoint\n arg_2[\"job_page_link\"] = \\\n arg_1.scheduler_location.job_page_link[0] \\\n if len(arg_1.scheduler_location.job_page_link) > 0 else \"\"\n\n return arg_2"} +{"_id": "doc_2839", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the representation of tmaster that will\n be returned from Tracker.\n \"\"\"\n arg_2 = {\n \"name\": None,\n \"id\": None,\n \"host\": None,\n \"controller_port\": None,\n \"master_port\": None,\n \"stats_port\": None,\n }\n if arg_1.tmaster:\n arg_2[\"name\"] = arg_1.tmaster.topology_name\n arg_2[\"id\"] = arg_1.tmaster.topology_id\n arg_2[\"host\"] = arg_1.tmaster.host\n arg_2[\"controller_port\"] = arg_1.tmaster.controller_port\n arg_2[\"master_port\"] = arg_1.tmaster.master_port\n arg_2[\"stats_port\"] = arg_1.tmaster.stats_port\n\n return arg_2"} +{"_id": "doc_2840", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"validate extra link\"\"\"\n if EXTRA_LINK_NAME_KEY not in arg_1 or EXTRA_LINK_FORMATTER_KEY not in arg_1:\n raise Exception(\"Invalid extra.links format. \" +\n \"Extra link must include a 'name' and 'formatter' field\")\n\n arg_0.validated_formatter(arg_1[EXTRA_LINK_FORMATTER_KEY])\n return arg_1"} +{"_id": "doc_2841", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=arg_4.DEFAULT_STREAM_ID,\n arg_6=None, arg_7=False):\n \"\"\"Emits a new tuple from this Spout\n\n It is compatible with StreamParse API.\n\n :type tup: list or tuple\n :param tup: the new output Tuple to send from this spout,\n should contain only serializable data.\n :type tup_id: str or object\n :param tup_id: the ID for the Tuple. Leave this blank for an unreliable Func.\n (Same as messageId in Java)\n :type stream: str\n :param stream: the ID of the stream this Tuple should be Functed to.\n Leave empty to Func to the default stream.\n :type direct_task: int\n :param direct_task: the task to send the Tuple to if performing a direct Func.\n :type need_task_ids: bool\n :param need_task_ids: indicate whether or not you would like the task IDs the Tuple was Functed.\n \"\"\"\n # first check whether this tuple is sane\n arg_0.pplan_helper.check_output_schema(arg_3, arg_1)\n\n # get custom grouping target task ids; get empty list if not custom grouping\n arg_8 = arg_0.pplan_helper.choose_tasks_for_custom_grouping(arg_3, arg_1)\n\n arg_0.pplan_helper.context.invoke_hook_Func(arg_1, arg_3, None)\n\n arg_9 = tuple_pb2.HeronDataTuple()\n arg_9.key = 0\n\n if arg_6 is not None:\n if not isinstance(arg_6, int):\n raise TypeError(\"direct_task argument needs to be an integer, given: %s\"\n % str(type(arg_6)))\n # performing Func-direct\n arg_9.dest_task_ids.append(arg_6)\n elif arg_8 is not None:\n # for custom grouping\n for arg_11 in arg_8:\n arg_9.dest_task_ids.append(arg_11)\n\n if arg_2 is not None:\n arg_12 = TupleHelper.make_root_tuple_info(arg_3, arg_2)\n if arg_0.acking_enabled:\n # this message is rooted\n arg_13 = arg_9.roots.add()\n arg_13.taskid = arg_0.pplan_helper.my_task_id\n arg_13.key = arg_12.key\n arg_0.in_flight_tuples[arg_12.key] = arg_12\n else:\n arg_0.immediate_acks.append(arg_12)\n\n arg_16 = 0\n\n arg_17 = time.time()\n\n # Serialize\n for arg_18 in arg_1:\n arg_19 = arg_0.serializer.serialize(arg_18)\n arg_9.values.append(arg_19)\n arg_16 += len(arg_19)\n\n arg_20 = (time.time() - arg_17) * system_constants.SEC_TO_NS\n arg_0.spout_metrics.serialize_data_tuple(arg_3, arg_20)\n\n super(SpoutInstance, arg_0).admit_data_tuple(stream_id=arg_3, arg_9=arg_9,\n arg_16=arg_16)\n arg_0.total_tuples_Functed += 1\n arg_0.spout_metrics.update_Func_count(arg_3)\n if arg_7:\n arg_21 = arg_8 or []\n if arg_6 is not None:\n arg_21.append(arg_6)\n return arg_21"} +{"_id": "doc_2842", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" normalize raw logical plan info to table \"\"\"\n arg_2, arg_3 = defaultdict(list), defaultdict(list)\n for arg_4, arg_5 in arg_0.items():\n if arg_4 == 'bolts':\n for arg_6, arg_7 in arg_5.items():\n for arg_8 in arg_7['inputs']:\n arg_9 = arg_8['component_name']\n arg_2[arg_6].append(arg_9)\n arg_3[arg_9].append(arg_6)\n arg_10 = []\n arg_11 = arg_1['physical_plan']['spouts']\n arg_12 = arg_1['physical_plan']['bolts']\n for arg_4, arg_5 in arg_0.items():\n # stages is an int so keep going\n if arg_4 == \"stages\":\n continue\n for arg_6, arg_7 in arg_5.items():\n arg_13 = [arg_4[:-1], arg_6]\n if arg_4 == 'spouts':\n arg_13.append(len(arg_11[arg_6]))\n else:\n arg_13.append(len(arg_12[arg_6]))\n arg_13.append(','.join(arg_2.get(arg_6, ['-'])))\n arg_13.append(','.join(arg_3.get(arg_6, ['-'])))\n arg_10.append(arg_13)\n arg_14 = ['type', 'name', 'parallelism', 'input', 'output']\n return arg_10, arg_14"} +{"_id": "doc_2843", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" filter to keep bolts \"\"\"\n arg_2 = []\n for arg_3 in arg_0:\n if arg_3[0] == 'bolt':\n arg_2.append(arg_3)\n return arg_2, arg_1"} +{"_id": "doc_2844", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" get physical plan \"\"\"\n arg_3 = False\n\n # Temp dict used to return result\n # if callback is not provided.\n arg_4 = {\n \"result\": None\n }\n if arg_2:\n arg_3 = True\n else:\n def arg_2(arg_5):\n \"\"\"\n Custom callback to get the topologies right now.\n \"\"\"\n arg_4[\"result\"] = arg_5\n\n arg_0._Func_with_watch(arg_1, arg_2, arg_3)\n\n # The topologies are now populated with the data.\n return arg_4[\"result\"]"} +{"_id": "doc_2845", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" create physical plan \"\"\"\n if not arg_2 or not arg_2.IsInitialized():\n raise_(StateException(\"Physical Plan protobuf not init properly\",\n StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])\n\n arg_3 = arg_0.get_pplan_path(arg_1)\n LOG.info(\"Adding topology: {0} to path: {1}\".format(\n arg_1, arg_3))\n arg_4 = arg_2.SerializeToString()\n try:\n arg_0.client.create(arg_3, value=arg_4, makepath=True)\n return True\n except NoNodeError:\n raise_(StateException(\"NoNodeError while creating pplan\",\n StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])\n except NodeExistsError:\n raise_(StateException(\"NodeExistsError while creating pplan\",\n StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])\n except ZookeeperError:\n raise_(StateException(\"Zookeeper while creating pplan\",\n StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])\n except Exception:\n # Just re raise the exception.\n raise"} +{"_id": "doc_2846", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" get execution state \"\"\"\n arg_3 = False\n\n # Temp dict used to return result\n # if callback is not provided.\n arg_4 = {\n \"result\": None\n }\n if arg_2:\n arg_3 = True\n else:\n def arg_2(arg_5):\n \"\"\"\n Custom callback to get the topologies right now.\n \"\"\"\n arg_4[\"result\"] = arg_5\n\n arg_0._Func_with_watch(arg_1, arg_2, arg_3)\n\n # The topologies are now populated with the data.\n return arg_4[\"result\"]"} +{"_id": "doc_2847", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Helper function to get execution state with\n a callback. The future watch is placed\n only if isWatching is True.\n \"\"\"\n arg_4 = arg_0.get_execution_state_path(arg_1)\n if arg_3:\n LOG.info(\"Adding data watch for path: \" + arg_4)\n\n # pylint: disable=unused-variable, unused-argument\n @arg_0.client.DataWatch(arg_4)\n def watch_execution_state(arg_5, arg_6):\n \"\"\" invoke callback to watch execute state \"\"\"\n if arg_5:\n arg_7 = ExecutionState()\n arg_7.ParseFromString(arg_5)\n arg_2(arg_7)\n else:\n arg_2(None)\n\n # Returning False will result in no future watches\n # being triggered. If isWatching is True, then\n # the future watches will be triggered.\n return arg_3"} +{"_id": "doc_2848", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Deserializes Java primitive data and objects serialized by ObjectOutputStream\n from a file-like object.\n \"\"\"\n arg_1 = JavaObjectUnmarshaller(arg_0)\n arg_1.add_transformer(DefaultObjectTransformer())\n return arg_1.readObject()"} +{"_id": "doc_2849", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Func an object\"\"\"\n arg_1.classdesc = arg_0.classdesc\n\n for arg_3 in arg_0.classdesc.fields_names:\n arg_1.__setattr__(arg_3, getattr(arg_0, arg_3))"} +{"_id": "doc_2850", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Fetches Instance jstack from heron-shell.\n \"\"\"\n arg_3 = yield getInstancePid(arg_1, arg_2)\n try:\n arg_4 = tornado.httpclient.AsyncHTTPClient()\n arg_5 = json.loads(arg_3)\n arg_6 = arg_5['stdout'].strip()\n if arg_6 == '':\n raise Exception('Failed to get pid')\n arg_7 = utils.make_shell_endpoint(arg_1, arg_2)\n arg_8 = \"%s/jstack/%s\" % (arg_7, arg_6)\n arg_9 = yield arg_4.fetch(arg_8)\n Log.debug(\"HTTP call for url: %s\", arg_8)\n raise tornado.gen.Return(arg_9.body)\n except tornado.httpclient.HTTPError as e:\n raise Exception(str(e))"} +{"_id": "doc_2851", "title": "", "text": "def Func(arg_0):\n \"\"\" Create the parse for the update command \"\"\"\n arg_1 = arg_0.add_parser(\n 'update',\n help='Update a topology',\n usage=\"%(prog)s [options] cluster/[role]/[env] \"\n + \"[--component-parallelism ] \"\n + \"[--container-number value] \"\n + \"[--runtime-config [component:]]\",\n add_help=True)\n\n args.add_titles(arg_1)\n args.add_cluster_role_env(arg_1)\n args.add_topology(arg_1)\n\n args.add_config(arg_1)\n args.add_dry_run(arg_1)\n args.add_service_url(arg_1)\n args.add_verbose(arg_1)\n\n # Special parameters for update command\n def parallelism_type(arg_2):\n arg_3 = re.compile(r\"^[\\w\\.-]+:[\\d]+$\")\n if not arg_3.match(arg_2):\n raise argparse.ArgumentTypeError(\n \"Invalid syntax for component parallelism (): %s\" % arg_2)\n return arg_2\n\n arg_1.add_argument(\n '--component-parallelism',\n action='append',\n type=parallelism_type,\n required=False,\n help='Component name and the new parallelism value '\n + 'colon-delimited: :')\n\n def runtime_config_type(arg_2):\n arg_3 = re.compile(r\"^([\\w\\.-]+:){1,2}[\\w\\.-]+$\")\n if not arg_3.match(arg_2):\n raise argparse.ArgumentTypeError(\n \"Invalid syntax for runtime config ([component:]): %s\"\n % arg_2)\n return arg_2\n\n arg_1.add_argument(\n '--runtime-config',\n action='append',\n type=runtime_config_type,\n required=False,\n help='Runtime configurations for topology and components '\n + 'colon-delimited: [component:]:')\n\n def container_number_type(arg_2):\n arg_3 = re.compile(r\"^\\d+$\")\n if not arg_3.match(arg_2):\n raise argparse.ArgumentTypeError(\n \"Invalid syntax for container number (value): %s\"\n % arg_2)\n return arg_2\n\n arg_1.add_argument(\n '--container-number',\n action='append',\n type=container_number_type,\n required=False,\n help='Number of containers ')\n\n arg_1.set_defaults(subcommand='update')\n return arg_1"} +{"_id": "doc_2852", "title": "", "text": "def Func(arg_0):\n \"\"\" flatten extra args \"\"\"\n arg_1 = []\n if 'component_parallelism' in arg_0:\n arg_1 += [\"--component_parallelism\",\n ','.join(arg_0['component_parallelism'])]\n if 'runtime_config' in arg_0:\n arg_1 += [\"--runtime_config\",\n ','.join(arg_0['runtime_config'])]\n if 'container_number' in arg_0:\n arg_1 += [\"--container_number\",\n ','.join(arg_0['container_number'])]\n if 'dry_run' in arg_0 and arg_0['dry_run']:\n arg_1 += ['--dry_run']\n if 'dry_run_format' in arg_0:\n arg_1 += ['--dry_run_format', arg_0['dry_run_format']]\n\n return arg_1"} +{"_id": "doc_2853", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks if a given gtype is sane\"\"\"\n if arg_1 == arg_0.SHUFFLE or arg_1 == arg_0.ALL or arg_1 == arg_0.LOWEST or arg_1 == arg_0.NONE:\n return True\n elif isinstance(arg_1, arg_0.FIELDS):\n return arg_1.gtype == topology_pb2.Grouping.Value(\"FIELDS\") and \\\n arg_1.fields is not None\n elif isinstance(arg_1, arg_0.CUSTOM):\n return arg_1.gtype == topology_pb2.Grouping.Value(\"CUSTOM\") and \\\n arg_1.python_serialized is not None\n else:\n #pylint: disable=fixme\n #TODO: DIRECT are not supported yet\n return False"} +{"_id": "doc_2854", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Custom grouping from a given implementation of ICustomGrouping\n\n :param Funcgrouper: The ICustomGrouping implemention to use\n \"\"\"\n if arg_1 is None:\n raise TypeError(\"Argument to Func() must be ICustomGrouping instance or classpath\")\n if not isinstance(arg_1, ICustomGrouping) and not isinstance(arg_1, str):\n raise TypeError(\"Argument to Func() must be ICustomGrouping instance or classpath\")\n arg_2 = default_serializer.serialize(arg_1)\n return arg_0.Func_serialized(arg_2, is_java=False)"} +{"_id": "doc_2855", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"Update the value of CountMetric or MultiCountMetric\n\n :type name: str\n :param name: name of the registered metric to be updated.\n :type incr_by: int\n :param incr_by: specifies how much to increment. Default is 1.\n :type key: str or None\n :param key: specifies a key for MultiCountMetric. Needs to be `None` for updating CountMetric.\n \"\"\"\n if arg_1 not in arg_0.metrics:\n Log.error(\"In Func(): %s is not registered in the metric\", arg_1)\n\n if arg_3 is None and isinstance(arg_0.metrics[arg_1], CountMetric):\n arg_0.metrics[arg_1].incr(arg_2)\n elif arg_3 is not None and isinstance(arg_0.metrics[arg_1], MultiCountMetric):\n arg_0.metrics[arg_1].incr(arg_3, arg_2)\n else:\n Log.error(\"In Func(): %s is registered but not supported with this method\", arg_1)"} +{"_id": "doc_2856", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Update the value of ReducedMetric or MultiReducedMetric\n\n :type name: str\n :param name: name of the registered metric to be updated.\n :param value: specifies a value to be reduced.\n :type key: str or None\n :param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating\n ReducedMetric.\n \"\"\"\n if arg_1 not in arg_0.metrics:\n Log.error(\"In Func(): %s is not registered in the metric\", arg_1)\n\n if arg_3 is None and isinstance(arg_0.metrics[arg_1], ReducedMetric):\n arg_0.metrics[arg_1].update(arg_2)\n elif arg_3 is not None and isinstance(arg_0.metrics[arg_1], MultiReducedMetric):\n arg_0.metrics[arg_1].update(arg_3, arg_2)\n else:\n Log.error(\"In update_count(): %s is registered but not supported with this method\", arg_1)"} +{"_id": "doc_2857", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Apply updates to the execute metrics\"\"\"\n arg_0.update_count(arg_0.EXEC_COUNT, key=arg_1)\n arg_0.update_reduced_metric(arg_0.EXEC_LATENCY, arg_3, arg_1)\n arg_0.update_count(arg_0.EXEC_TIME_NS, incr_by=arg_3, key=arg_1)\n\n arg_4 = arg_2 + \"/\" + arg_1\n arg_0.update_count(arg_0.EXEC_COUNT, key=arg_4)\n arg_0.update_reduced_metric(arg_0.EXEC_LATENCY, arg_3, arg_4)\n arg_0.update_count(arg_0.EXEC_TIME_NS, incr_by=arg_3, key=arg_4)"} +{"_id": "doc_2858", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Apply updates to the deserialization metrics\"\"\"\n arg_0.update_count(arg_0.TUPLE_DESERIALIZATION_TIME_NS, incr_by=arg_3, key=arg_1)\n arg_4 = arg_2 + \"/\" + arg_1\n arg_0.update_count(arg_0.TUPLE_DESERIALIZATION_TIME_NS, incr_by=arg_3,\n key=arg_4)"} +{"_id": "doc_2859", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Registers a given metric\n\n :param name: name of the metric\n :param metric: IMetric object to be registered\n :param time_bucket_in_sec: time interval for update to the metrics manager\n \"\"\"\n if arg_1 in arg_0.metrics_map:\n raise RuntimeError(\"Another metric has already been registered with name: %s\" % arg_1)\n\n Log.debug(\"Register metric: %s, with interval: %s\", arg_1, str(arg_3))\n arg_0.metrics_map[arg_1] = arg_2\n\n if arg_3 in arg_0.time_bucket_in_sec_to_metrics_name:\n arg_0.time_bucket_in_sec_to_metrics_name[arg_3].append(arg_1)\n else:\n arg_0.time_bucket_in_sec_to_metrics_name[arg_3] = [arg_1]\n arg_0._register_timer_task(arg_3)"} +{"_id": "doc_2860", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Offer to the buffer\n\n It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception\n \"\"\"\n try:\n # non-blocking\n arg_0._buffer.put(arg_1, block=False)\n if arg_0._consumer_callback is not None:\n arg_0._consumer_callback()\n return True\n except Queue.Full:\n Log.debug(\"%s: Full in Func()\" % str(arg_0))\n raise Queue.Full"} +{"_id": "doc_2861", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse version to major, minor, patch, pre-release, build parts.\n \"\"\"\n arg_1 = _REGEX.match(arg_0)\n if arg_1 is None:\n raise ValueError('%s is not valid SemVer string' % arg_0)\n\n arg_2 = arg_1.groupdict()\n\n arg_2['major'] = int(arg_2['major'])\n arg_2['minor'] = int(arg_2['minor'])\n arg_2['patch'] = int(arg_2['patch'])\n\n return arg_2"} +{"_id": "doc_2862", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns all the file state_managers.\n \"\"\"\n arg_1 = []\n arg_2 = arg_0.get_state_locations_of_type(\"file\")\n for arg_3 in arg_2:\n arg_4 = arg_3['name']\n arg_5 = os.path.expanduser(arg_3['rootpath'])\n LOG.info(\"Connecting to file state with rootpath: \" + arg_5)\n arg_6 = FileStateManager(arg_4, arg_5)\n arg_1.append(arg_6)\n\n return arg_1"} +{"_id": "doc_2863", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"Increments the value of a given key by ``to_add``\"\"\"\n if arg_1 not in arg_0.value:\n arg_0.value[arg_1] = CountMetric()\n arg_0.value[arg_1].Func(arg_2)"} +{"_id": "doc_2864", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Adds a new key to this metric\"\"\"\n if arg_1 not in arg_0.value:\n arg_0.value[arg_1] = ReducedMetric(arg_0.reducer)"} +{"_id": "doc_2865", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Add a new data tuple to the currently buffered set of tuples\"\"\"\n if (arg_0.current_data_tuple_set is None) or \\\n (arg_0.current_data_tuple_set.stream.id != arg_1) or \\\n (len(arg_0.current_data_tuple_set.tuples) >= arg_0.data_tuple_set_capacity) or \\\n (arg_0.current_data_tuple_size_in_bytes >= arg_0.max_data_tuple_size_in_bytes):\n arg_0._init_new_data_tuple(arg_1)\n\n arg_4 = arg_0.current_data_tuple_set.tuples.add()\n arg_4.CopyFrom(arg_2)\n\n arg_0.current_data_tuple_size_in_bytes += arg_3\n arg_0.total_data_emitted_in_bytes += arg_3"} +{"_id": "doc_2866", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add the checkpoint state message to be sent back the stmgr\n\n :param ckpt_id: The id of the checkpoint\n :ckpt_state: The checkpoint state\n \"\"\"\n # first flush any buffered tuples\n arg_0._flush_remaining()\n arg_3 = ckptmgr_pb2.StoreInstanceStateCheckpoint()\n arg_4 = ckptmgr_pb2.InstanceStateCheckpoint()\n arg_4.checkpoint_id = arg_1\n arg_4.state = arg_2\n arg_3.state.CopyFrom(arg_4)\n arg_0._push_tuple_to_stream(arg_3)"} +{"_id": "doc_2867", "title": "", "text": "def Func(arg_0):\n '''\n Check if an entry in the class path exists as either a directory or a file\n '''\n # check if the suffic of classpath suffix exists as directory\n if arg_0.endswith('*'):\n Log.debug('Checking classpath entry suffix as directory: %s', arg_0[:-1])\n if os.path.isdir(arg_0[:-1]):\n return True\n return False\n\n # check if the classpath entry is a directory\n Log.debug('Checking classpath entry as directory: %s', arg_0)\n if os.path.isdir(arg_0):\n return True\n else:\n # check if the classpath entry is a file\n Log.debug('Checking classpath entry as file: %s', arg_0)\n if os.path.isfile(arg_0):\n return True\n\n return False"} +{"_id": "doc_2868", "title": "", "text": "def Func(arg_0):\n '''\n Given a java classpath, check whether the path entries are valid or not\n '''\n arg_1 = arg_0.split(':')\n for arg_2 in arg_1:\n if not valid_path(arg_2.strip()):\n return False\n return True"} +{"_id": "doc_2869", "title": "", "text": "def Func(arg_0):\n \"\"\"Get a list of paths to included dependencies in the specified pex file\n\n Note that dependencies are located under `.deps` directory\n \"\"\"\n arg_1 = zipfile.ZipFile(arg_0, mode='r')\n arg_2 = list(set([re.match(egg_regex, i).group(1) for i in arg_1.namelist()\n if re.match(egg_regex, i) is not None]))\n return arg_2"} +{"_id": "doc_2870", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Loads pex file and its dependencies to the current python path\"\"\"\n arg_2 = os.path.abspath(arg_0)\n Log.debug(\"Add a pex to the path: %s\" % arg_2)\n if arg_2 not in sys.path:\n sys.path.insert(0, os.path.dirname(arg_2))\n\n # add dependencies to path\n if arg_1:\n for arg_3 in _get_deps_list(arg_2):\n arg_4 = os.path.join(os.path.dirname(arg_2), arg_3)\n if arg_4 not in sys.path:\n Log.debug(\"Add a new dependency to the path: %s\" % arg_3)\n sys.path.insert(0, arg_4)\n\n Log.debug(\"Python path: %s\" % str(sys.path))"} +{"_id": "doc_2871", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Resolves duplicate package suffix problems\n\n When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),\n if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts\n with this Heron Instance pex package (heron.instance.src.python...), making the Python\n interpreter unable to find the target class in a given pex file.\n This function resolves this issue by individually loading packages with suffix `heron` to\n avoid this issue.\n\n However, if a dependent module/class that is not directly specified under ``class_path``\n and has conflicts with other native heron packages, there is a possibility that\n such a class/module might not be imported correctly. For example, if a given ``class_path`` was\n ``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),\n ``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that\n ``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not\n explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron\n package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.\n\n The best way to avoid this issue is NOT to dynamically load a pex file whose top level package\n name is ``heron``. Note that this method is included because some of the example topologies and\n tests have to have a pex with its top level package name of ``heron``.\n \"\"\"\n # import top-level package named `heron` of a given pex file\n arg_2 = zipimport.zipimporter(arg_0)\n arg_2.load_module(\"heron\")\n\n # remove 'heron' and the classname\n arg_3 = arg_1.split('.')[1:-1]\n arg_4 = ['heron']\n arg_5 = None\n for arg_6 in arg_3:\n arg_7 = zipimport.zipimporter(os.path.join(arg_0, '/'.join(arg_4)))\n arg_5 = arg_7.load_module(arg_6)\n arg_4.append(arg_6)\n\n return arg_5"} +{"_id": "doc_2872", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Builds the topology and returns the Funcer\"\"\"\n arg_2 = sets.Set()\n for arg_3 in arg_0._sources:\n arg_3._Func(arg_1, arg_2)\n for arg_3 in arg_0._sources:\n if not arg_3._all_built():\n raise RuntimeError(\"Topology cannot be fully built! Are all sources added?\")"} +{"_id": "doc_2873", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"For each kvp in config, do wildcard substitution on the values\"\"\"\n for arg_3 in arg_0:\n arg_4 = arg_0[arg_3]\n arg_5 = arg_4\n if isinstance(arg_4, str):\n for arg_6 in arg_1:\n if arg_1[arg_6]:\n arg_4 = arg_4.replace(arg_6, arg_1[arg_6])\n arg_7 = re.findall(r'\\${[A-Z_]+}', arg_4)\n if arg_7:\n raise ValueError(\"%s=%s in file %s contains unsupported or unset wildcard tokens: %s\" %\n (arg_3, arg_5, arg_2, \", \".join(arg_7)))\n arg_0[arg_3] = arg_4\n return arg_0"} +{"_id": "doc_2874", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" set default time \"\"\"\n arg_4 = arg_2 / 60 * 60\n if arg_4 < arg_2:\n arg_4 += 60\n arg_5 = arg_3 / 60 * 60\n while arg_4 <= arg_5:\n # STREAMCOMP-1559\n # Second check is a work around, because the response from tmaster\n # contains value 0, if it is queries for the current timestamp,\n # since the bucket is created in the tmaster, but is not filled\n # by the metrics.\n if arg_4 not in arg_0.timeline or arg_0.timeline[arg_4] == 0:\n arg_0.timeline[arg_4] = arg_1\n arg_4 += 60"} +{"_id": "doc_2875", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Process a single tuple of input\n\n We add the (time, tuple) pair into our current_tuples. And then look for expiring\n elemnents\n \"\"\"\n arg_2 = int(time.time())\n arg_0.current_tuples.append((arg_1, arg_2))\n arg_0._expire(arg_2)"} +{"_id": "doc_2876", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Called every slide_interval\n \"\"\"\n arg_2 = int(time.time())\n arg_3 = WindowContext(arg_2 - arg_0.window_duration, arg_2)\n arg_4 = []\n for (arg_1, arg_5) in arg_0.current_tuples:\n arg_4.append(arg_1)\n arg_0.processWindow(arg_3, arg_4)\n arg_0._expire(arg_2)"} +{"_id": "doc_2877", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Called every window_duration\n \"\"\"\n arg_2 = int(time.time())\n arg_3 = WindowContext(arg_2 - arg_0.window_duration, arg_2)\n arg_0.processWindow(arg_3, list(arg_0.current_tuples))\n for arg_1 in arg_0.current_tuples:\n arg_0.ack(arg_1)\n arg_0.current_tuples.clear()"} +{"_id": "doc_2878", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get summary of stream managers registration summary\n \"\"\"\n if not arg_1 or not arg_1.host or not arg_1.stats_port:\n return\n arg_3 = tmaster_pb2.StmgrsRegistrationSummaryRequest()\n arg_4 = arg_3.SerializeToString()\n arg_5 = str(arg_1.stats_port)\n arg_6 = arg_1.host\n arg_7 = \"http://{0}:{1}/stmgrsregistrationsummary\".format(arg_6, arg_5)\n arg_8 = tornado.httpclient.HTTPRequest(arg_7,\n body=arg_4,\n method='POST',\n request_timeout=5)\n Log.debug('Making HTTP call to fetch stmgrsregistrationsummary url: %s', arg_7)\n try:\n arg_9 = tornado.httpclient.AsyncHTTPClient()\n arg_10 = yield arg_9.fetch(arg_8)\n Log.debug(\"HTTP call complete.\")\n except tornado.httpclient.HTTPError as e:\n raise Exception(str(e))\n # Check the response code - error if it is in 400s or 500s\n arg_11 = arg_10.code\n if arg_11 >= 400:\n arg_12 = \"Error in getting exceptions from Tmaster, code: \" + arg_11\n Log.error(arg_12)\n raise tornado.gen.Return({\n \"message\": arg_12\n })\n # Parse the response from tmaster.\n arg_13 = tmaster_pb2.StmgrsRegistrationSummaryResponse()\n arg_13.ParseFromString(arg_10.body)\n # Send response\n arg_14 = {}\n for arg_15 in arg_13.registered_stmgrs:\n arg_14[arg_15] = True\n for arg_15 in arg_13.absent_stmgrs:\n arg_14[arg_15] = False\n raise tornado.gen.Return(arg_14)"} +{"_id": "doc_2879", "title": "", "text": "def Func(arg_0):\n \"\"\"Set up log, process and signal handlers\"\"\"\n # pylint: disable=unused-argument\n def signal_handler(arg_1, arg_2):\n # We would do nothing here but just exit\n # Just catch the SIGTERM and then cleanup(), registered with atexit, would invoke\n Log.info('signal_handler invoked with signal %s', arg_1)\n arg_0.stop_state_manager_watches()\n sys.exit(arg_1)\n\n def cleanup():\n \"\"\"Handler to trigger when receiving the SIGTERM signal\n Do cleanup inside this method, including:\n 1. Terminate all children processes\n \"\"\"\n Log.info('Executor terminated; exiting all process in executor.')\n\n # Kill child processes first and wait for log collection to finish\n for arg_3 in arg_0.processes_to_monitor.keys():\n os.kill(arg_3, signal.SIGTERM)\n time.sleep(5)\n\n # We would not wait or check whether process spawned dead or not\n os.killpg(0, signal.SIGTERM)\n\n # Redirect stdout and stderr to files in append mode\n # The filename format is heron-executor-.stdxxx\n arg_4 = arg_0.shard\n log.configure(logfile='heron-executor-%s.stdout' % arg_4)\n\n arg_3 = os.getpid()\n arg_5 = os.getsid(arg_3)\n\n # POSIX prohibits the change of the process group ID of a session leader\n if arg_3 <> arg_5:\n Log.info('Set up process group; executor becomes leader')\n os.setpgrp() # create new process group, become its leader\n\n Log.info('Register the SIGTERM signal handler')\n signal.signal(signal.SIGTERM, signal_handler)\n\n Log.info('Register the atexit clean up')\n atexit.register(cleanup)"} +{"_id": "doc_2880", "title": "", "text": "def Func():\n \"\"\"Register exit handlers, initialize the executor and run it.\"\"\"\n # Since Heron on YARN runs as headless users, pex compiled\n # binaries should be exploded into the container working\n # directory. In order to do this, we need to set the\n # PEX_ROOT shell environment before forking the processes\n arg_0 = os.environ.copy()\n arg_0[\"PEX_ROOT\"] = os.path.join(os.path.abspath('.'), \".pex\")\n\n # Instantiate the executor, bind it to signal handlers and launch it\n arg_1 = HeronExecutor(sys.argv, arg_0)\n arg_1.initialize()\n\n start(arg_1)"} +{"_id": "doc_2881", "title": "", "text": "def Func(arg_0):\n '''\n Returns the processes to handle streams, including the stream-mgr and the user code containing\n the stream logic of the topology\n '''\n arg_1 = {}\n arg_2 = arg_0._get_instance_plans(arg_0.packing_plan, arg_0.shard)\n arg_3 = []\n for arg_4 in arg_2:\n arg_5 = arg_4.task_id\n arg_6 = arg_4.component_index\n arg_7 = arg_4.component_name\n arg_8 = \"container_%s_%s_%d\" % (str(arg_0.shard), arg_7, arg_5)\n arg_3.append((arg_8, arg_7, arg_5, arg_6))\n\n arg_9 = [\n arg_0.stmgr_binary,\n '--topology_name=%s' % arg_0.topology_name,\n '--topology_id=%s' % arg_0.topology_id,\n '--topologydefn_file=%s' % arg_0.topology_defn_file,\n '--zkhostportlist=%s' % arg_0.state_manager_connection,\n '--zkroot=%s' % arg_0.state_manager_root,\n '--stmgr_id=%s' % arg_0.stmgr_ids[arg_0.shard],\n '--instance_ids=%s' % ','.join(map(lambda x: x[0], arg_3)),\n '--myhost=%s' % arg_0.master_host,\n '--data_port=%s' % str(arg_0.master_port),\n '--local_data_port=%s' % str(arg_0.tmaster_controller_port),\n '--metricsmgr_port=%s' % str(arg_0.metrics_manager_port),\n '--shell_port=%s' % str(arg_0.shell_port),\n '--config_file=%s' % arg_0.heron_internals_config_file,\n '--override_config_file=%s' % arg_0.override_config_file,\n '--ckptmgr_port=%s' % str(arg_0.checkpoint_manager_port),\n '--ckptmgr_id=%s' % arg_0.ckptmgr_ids[arg_0.shard],\n '--metricscachemgr_mode=%s' % arg_0.metricscache_manager_mode.lower()]\n\n arg_10 = arg_0.shell_env.copy() if arg_0.shell_env is not None else {}\n arg_11 = Command(arg_9, arg_10)\n if os.environ.get('ENABLE_HEAPCHECK') is not None:\n arg_11.env.update({\n 'LD_PRELOAD': \"/usr/lib/libtcmalloc.so\",\n 'HEAPCHECK': \"normal\"\n })\n\n arg_1[arg_0.stmgr_ids[arg_0.shard]] = arg_11\n\n # metricsmgr_metrics_sink_config_file = 'metrics_sinks.yaml'\n\n arg_1[arg_0.metricsmgr_ids[arg_0.shard]] = arg_0._get_metricsmgr_cmd(\n arg_0.metricsmgr_ids[arg_0.shard],\n arg_0.metrics_sinks_config_file,\n arg_0.metrics_manager_port\n )\n\n if arg_0.is_stateful_topology:\n arg_1.update(arg_0._get_ckptmgr_process())\n\n if arg_0.pkg_type == 'jar' or arg_0.pkg_type == 'tar':\n arg_1.update(arg_0._get_java_instance_cmd(arg_3))\n elif arg_0.pkg_type == 'pex':\n arg_1.update(arg_0._get_python_instance_cmd(arg_3))\n elif arg_0.pkg_type == 'so':\n arg_1.update(arg_0._get_cpp_instance_cmd(arg_3))\n elif arg_0.pkg_type == 'dylib':\n arg_1.update(arg_0._get_cpp_instance_cmd(arg_3))\n else:\n raise ValueError(\"Unrecognized package type: %s\" % arg_0.pkg_type)\n\n return arg_1"} +{"_id": "doc_2882", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n For the given packing_plan, return the container plan with the given container_id. If protobufs\n supported maps, we could just get the plan by id, but it doesn't so we have a collection of\n containers to iterate over.\n \"\"\"\n arg_3 = None\n for arg_4 in arg_1.container_plans:\n if arg_4.id == arg_2:\n arg_3 = arg_4\n\n # When the executor runs in newly added container by `heron update`,\n # there is no plan for this container. In this situation,\n # return None to bypass instance processes.\n if arg_3 is None:\n return None\n return arg_3.instance_plans"} +{"_id": "doc_2883", "title": "", "text": "def Func(arg_0):\n \"\"\" Get a map from all daemon services' name to the command to start them \"\"\"\n arg_1 = {}\n\n arg_1[arg_0.heron_shell_ids[arg_0.shard]] = Command([\n '%s' % arg_0.heron_shell_binary,\n '--port=%s' % arg_0.shell_port,\n '--log_file_prefix=%s/heron-shell-%s.log' % (arg_0.log_dir, arg_0.shard),\n '--secret=%s' % arg_0.topology_id], arg_0.shell_env)\n\n return arg_1"} +{"_id": "doc_2884", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Start all commands and add them to the dict of processes to be monitored \"\"\"\n Log.info(\"Start processes\")\n arg_2 = {}\n # First start all the processes\n for (arg_3, arg_4) in arg_1.items():\n arg_5 = arg_0._run_process(arg_3, arg_4)\n arg_2[arg_5.pid] = ProcessInfo(arg_5, arg_3, arg_4)\n\n # Log down the pid file\n log_pid_for_process(arg_3, arg_5.pid)\n\n with arg_0.process_lock:\n arg_0.processes_to_monitor.update(arg_2)"} +{"_id": "doc_2885", "title": "", "text": "def Func(arg_0):\n \"\"\" Monitor all processes in processes_to_monitor dict,\n restarting any if they fail, up to max_runs times.\n \"\"\"\n # Now wait for any child to die\n Log.info(\"Start process monitor\")\n while True:\n if len(arg_0.processes_to_monitor) > 0:\n (arg_1, arg_2) = os.wait()\n\n with arg_0.process_lock:\n if arg_1 in arg_0.processes_to_monitor.keys():\n arg_3 = arg_0.processes_to_monitor[arg_1]\n arg_4 = arg_3.name\n arg_5 = arg_3.command\n Log.info(\"%s (pid=%s) exited with status %d. command=%s\" % (arg_4, arg_1, arg_2, arg_5))\n # Log the stdout & stderr of the failed process\n arg_0._wait_process_std_out_err(arg_4, arg_3.process)\n\n # Just make it world readable\n if os.path.isfile(\"core.%d\" % arg_1):\n os.system(\"chmod a+r core.%d\" % arg_1)\n if arg_3.attempts >= arg_0.max_runs:\n Log.info(\"%s exited too many times\" % arg_4)\n sys.exit(1)\n time.sleep(arg_0.interval_between_runs)\n arg_6 = arg_0._run_process(arg_4, arg_5)\n del arg_0.processes_to_monitor[arg_1]\n arg_0.processes_to_monitor[arg_6.pid] =\\\n ProcessInfo(arg_6, arg_4, arg_5, arg_3.attempts + 1)\n\n # Log down the pid file\n log_pid_for_process(arg_4, arg_6.pid)"} +{"_id": "doc_2886", "title": "", "text": "def Func(arg_0):\n ''' Determines the commands to be run and compares them with the existing running commands.\n Then starts new ones required and kills old ones no longer required.\n '''\n with arg_0.process_lock:\n arg_1 = dict(map((lambda process: (process.name, process.command)),\n arg_0.processes_to_monitor.values()))\n arg_2 = arg_0.get_commands_to_run()\n\n # get the commands to kill, keep and start\n arg_3, arg_4, arg_5 = \\\n arg_0.get_command_changes(arg_1, arg_2)\n\n Log.info(\"current commands: %s\" % sorted(arg_1.keys()))\n Log.info(\"new commands : %s\" % sorted(arg_2.keys()))\n Log.info(\"commands_to_kill: %s\" % sorted(arg_3.keys()))\n Log.info(\"commands_to_keep: %s\" % sorted(arg_4.keys()))\n Log.info(\"commands_to_start: %s\" % sorted(arg_5.keys()))\n\n arg_0._kill_processes(arg_3)\n arg_0._start_processes(arg_5)\n Log.info(\"Launch complete - processes killed=%s kept=%s started=%s monitored=%s\" %\n (len(arg_3), len(arg_4),\n len(arg_5), len(arg_0.processes_to_monitor)))"} +{"_id": "doc_2887", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Builds the topology and submits it\"\"\"\n if not isinstance(arg_1, str):\n raise RuntimeError(\"Name has to be a string type\")\n if not isinstance(arg_2, Config):\n raise RuntimeError(\"config has to be a Config type\")\n if not isinstance(arg_3, Builder):\n raise RuntimeError(\"builder has to be a Builder type\")\n arg_4 = TopologyBuilder(arg_1=arg_1)\n arg_3.build(arg_4)\n arg_4.set_config(arg_2._api_config)\n arg_4.build_and_submit()"} +{"_id": "doc_2888", "title": "", "text": "def Func(arg_0):\n \"\"\"Force every module in modList to be placed into main\"\"\"\n if not arg_0:\n return\n\n arg_1 = sys.modules['__main__']\n for arg_2 in arg_0:\n if isinstance(arg_2, str):\n try:\n arg_3 = __import__(arg_2)\n except Exception:\n sys.stderr.write(\n 'warning: could not import %s\\n. '\n 'Your function may unexpectedly error due to this import failing;'\n 'A version mismatch is likely. Specific error was:\\n' % arg_2)\n print_exec(sys.stderr)\n else:\n setattr(arg_1, arg_3.__name__, arg_3)"} +{"_id": "doc_2889", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Loads additional properties into class `cls`.\n \"\"\"\n for arg_2, arg_3 in arg_1.items():\n if isinstance(arg_2, tuple):\n arg_4, arg_2 = arg_2\n if arg_4 == 'property':\n arg_3 = property(*arg_3)\n elif arg_4 == 'staticmethod':\n arg_3 = staticmethod(arg_3) # pylint: disable=redefined-variable-type\n elif arg_4 == 'classmethod':\n arg_3 = classmethod(arg_3)\n setattr(arg_0, arg_2, arg_3)\n return arg_0"} +{"_id": "doc_2890", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns last n lines from the filename. No exception handling\"\"\"\n arg_2 = os.path.getsize(arg_0)\n with open(arg_0, \"rb\") as f:\n arg_3 = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)\n try:\n for arg_4 in xrange(arg_2 - 1, -1, -1):\n if arg_3[arg_4] == '\\n':\n arg_1 -= 1\n if arg_1 == -1:\n break\n return arg_3[arg_4 + 1 if arg_4 else 0:].splitlines()\n finally:\n arg_3.close()"} +{"_id": "doc_2891", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a serializer for a given context\"\"\"\n arg_1 = arg_0.get_cluster_config()\n arg_2 = arg_1.get(constants.TOPOLOGY_SERIALIZER_CLASSNAME, None)\n if arg_2 is None:\n return PythonSerializer()\n else:\n try:\n arg_3 = arg_0.get_topology_pex_path()\n pex_loader.load_pex(arg_3)\n arg_4 = pex_loader.import_and_get_class(arg_3, arg_2)\n arg_5 = arg_4()\n return arg_5\n except Exception as e:\n raise RuntimeError(\"Error with loading custom serializer class: %s, with error message: %s\"\n % (arg_2, str(e)))"} +{"_id": "doc_2892", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Registers a new timer task\n\n :param task: function to be run at a specified second from now\n :param second: how many seconds to wait before the timer is triggered\n \"\"\"\n # Python time is in float\n arg_3 = float(arg_2)\n arg_4 = time.time() + arg_3\n heappush(arg_0.timer_tasks, (arg_4, arg_1))"} +{"_id": "doc_2893", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the next timeout from now\n\n This should be used from do_wait().\n :returns (float) next_timeout, or 10.0 if there are no timer events\n \"\"\"\n if len(arg_0.timer_tasks) == 0:\n return sys.maxsize\n else:\n arg_1 = arg_0.timer_tasks[0][0] - time.time()\n return arg_1"} +{"_id": "doc_2894", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a parse tree for the query, each of the node is a\n subclass of Operator. This is both a lexical as well as syntax analyzer step.\"\"\"\n if not arg_1:\n return None\n # Just braces do not matter\n if arg_1[0] == '(':\n arg_2 = arg_0.find_closing_braces(arg_1)\n # This must be the last index, since this was an NOP starting brace\n if arg_2 != len(arg_1) - 1:\n raise Exception(\"Invalid syntax\")\n else:\n return arg_0.Func(arg_1[1:-1])\n arg_3 = arg_1.find(\"(\")\n # There must be a ( in the query\n if arg_3 < 0:\n # Otherwise it must be a constant\n try:\n arg_4 = float(arg_1)\n return arg_4\n except ValueError:\n raise Exception(\"Invalid syntax\")\n arg_5 = arg_1[:arg_3]\n if arg_5 not in arg_0.operators:\n raise Exception(\"Invalid token: \" + arg_5)\n\n # Get sub components\n arg_6 = arg_1[arg_3:]\n arg_7 = arg_0.find_closing_braces(arg_6)\n if arg_7 != len(arg_6) - 1:\n raise Exception(\"Invalid syntax\")\n arg_8 = arg_0.get_sub_parts(arg_6[1:-1])\n\n # parts are simple strings in this case\n if arg_5 == \"TS\":\n # This will raise exception if parts are not syntactically correct\n return arg_0.operators[arg_5](arg_8)\n\n arg_9 = []\n for arg_10 in arg_8:\n arg_9.append(arg_0.Func(arg_10))\n\n # Make a node for the current token\n arg_11 = arg_0.operators[arg_5](arg_9)\n return arg_11"} +{"_id": "doc_2895", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Indicate that processing of a Tuple has Funced\n\n It is compatible with StreamParse API.\n \"\"\"\n if not isinstance(arg_1, HeronTuple):\n Log.error(\"Only HeronTuple type is supported in Func()\")\n return\n\n if arg_0.acking_enabled:\n arg_2 = tuple_pb2.AckTuple()\n arg_2.ackedtuple = int(arg_1.id)\n\n arg_4 = 0\n for arg_5 in arg_1.roots:\n arg_6 = arg_2.roots.add()\n arg_6.CopyFrom(arg_5)\n arg_4 += arg_5.ByteSize()\n super(BoltInstance, arg_0).admit_control_tuple(arg_2, arg_4, False)\n\n arg_7 = (time.time() - arg_1.creation_time) * system_constants.SEC_TO_NS\n arg_0.pplan_helper.context.invoke_hook_bolt_Func(arg_1, arg_7)\n arg_0.bolt_metrics.Funced_tuple(arg_1.stream, arg_1.component, arg_7)"} +{"_id": "doc_2896", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Template slave config file\n '''\n arg_2 = \"%s/standalone/templates/slave.template.hcl\" % arg_0[\"config_path\"]\n arg_3 = \"%s/standalone/resources/slave.hcl\" % arg_0[\"config_path\"]\n arg_4 = ['\"%s\"' % master for master in arg_1]\n template_file(arg_2, arg_3,\n {\"\": \", \".join(arg_4)})"} +{"_id": "doc_2897", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Template scheduler.yaml\n '''\n arg_2 = arg_1[0]\n arg_3 = \"%s/standalone/scheduler.yaml\" % arg_0[\"config_path\"]\n\n arg_4 = \"%s/standalone/templates/scheduler.template.yaml\" \\\n % arg_0[\"config_path\"]\n template_file(arg_4, arg_3,\n {\"\": \"http://%s:4646\" % arg_2})"} +{"_id": "doc_2898", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Tempate uploader.yaml\n '''\n arg_2 = arg_1[0]\n arg_3 = \"%s/standalone/templates/uploader.template.yaml\" \\\n % arg_0[\"config_path\"]\n arg_4 = \"%s/standalone/uploader.yaml\" % arg_0[\"config_path\"]\n\n template_file(arg_3, arg_4,\n {\"\": \"http://%s:9000/api/v1/file/upload\" % arg_2})"} +{"_id": "doc_2899", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Template statemgr.yaml\n '''\n arg_2 = \"%s/standalone/templates/statemgr.template.yaml\" \\\n % arg_0[\"config_path\"]\n arg_3 = \"%s/standalone/statemgr.yaml\" % arg_0[\"config_path\"]\n\n template_file(arg_2, arg_3,\n {\"\": \",\".join(\n ['\"%s\"' % arg_4 if \":\" in arg_4 else '\"%s:2181\"' % arg_4 for arg_4 in arg_1])})"} +{"_id": "doc_2900", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n template heron tools\n '''\n arg_3 = \"%s/standalone/templates/heron_tools.template.hcl\" \\\n % arg_0[\"config_path\"]\n arg_4 = \"%s/standalone/resources/heron_tools.hcl\" \\\n % arg_0[\"config_path\"]\n\n arg_5 = arg_1[0]\n template_file(arg_3, arg_4,\n {\n \"\": \",\".join(\n ['%s' % arg_6 if \":\" in arg_6 else '%s:2181' % arg_6 for arg_6 in arg_2]),\n \"\": '\"%s/heron-tracker\"' % config.get_heron_bin_dir(),\n \"\": '\"%s\"' % get_hostname(arg_5, arg_0),\n \"\": '\"%s/heron-ui\"' % config.get_heron_bin_dir()\n })"} +{"_id": "doc_2901", "title": "", "text": "def Func(arg_0):\n '''\n get cluster info for standalone cluster\n '''\n arg_1 = read_and_parse_roles(arg_0)\n arg_2 = list(arg_1[Role.MASTERS])\n arg_3 = list(arg_1[Role.SLAVES])\n arg_4 = list(arg_1[Role.ZOOKEEPERS])\n arg_5 = list(arg_1[Role.CLUSTER])\n\n # OrderedDicts are used here so that the key order can be\n # specified directly\n arg_6 = OrderedDict()\n arg_6['numNodes'] = len(arg_5)\n arg_6['nodes'] = arg_5\n arg_7 = OrderedDict()\n arg_7['masters'] = arg_2\n arg_7['slaves'] = arg_3\n arg_7['zookeepers'] = arg_4\n arg_8 = OrderedDict()\n arg_8['serviceUrl'] = get_service_url(arg_0)\n arg_8['heronUi'] = get_heron_ui_url(arg_0)\n arg_8['heronTracker'] = get_heron_tracker_url(arg_0)\n arg_6['roles'] = arg_7\n arg_6['urls'] = arg_8\n\n print json.dumps(arg_6, indent=2)"} +{"_id": "doc_2902", "title": "", "text": "def Func(arg_0):\n '''\n Start a Heron standalone cluster\n '''\n arg_1 = read_and_parse_roles(arg_0)\n arg_2 = arg_1[Role.MASTERS]\n arg_3 = arg_1[Role.SLAVES]\n arg_4 = arg_1[Role.ZOOKEEPERS]\n Log.info(\"Roles:\")\n Log.info(\" - Master Servers: %s\" % list(arg_2))\n Log.info(\" - Slave Servers: %s\" % list(arg_3))\n Log.info(\" - Zookeeper Servers: %s\" % list(arg_4))\n if not arg_2:\n Log.error(\"No master servers specified!\")\n sys.exit(-1)\n if not arg_3:\n Log.error(\"No slave servers specified!\")\n sys.exit(-1)\n if not arg_4:\n Log.error(\"No zookeeper servers specified!\")\n sys.exit(-1)\n # make sure configs are templated\n update_config_files(arg_0)\n\n arg_5 = list(arg_2.union(arg_3))\n # if just local deployment\n if not (len(arg_5) == 1 and is_self(arg_5[0])):\n distribute_package(arg_1, arg_0)\n start_master_nodes(arg_2, arg_0)\n start_slave_nodes(arg_3, arg_0)\n start_api_server(arg_2, arg_0)\n start_heron_tools(arg_2, arg_0)\n Log.info(\"Heron standalone cluster complete!\")"} +{"_id": "doc_2903", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Start Heron tracker and UI\n '''\n arg_2 = list(arg_0)[0]\n wait_for_master_to_start(arg_2)\n\n arg_3 = \"%s run %s >> /tmp/heron_tools_start.log 2>&1 &\" \\\n % (get_nomad_path(arg_1), get_heron_tools_job_file(arg_1))\n Log.info(\"Starting Heron Tools on %s\" % arg_2)\n\n if not is_self(arg_2):\n arg_3 = ssh_remote_execute(arg_3, arg_2, arg_1)\n Log.debug(arg_3)\n arg_4 = subprocess.Popen(arg_3,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n arg_5 = arg_4.wait()\n arg_6 = arg_4.communicate()\n Log.debug(\"return code: %s output: %s\" % (arg_5, arg_6))\n if arg_5 != 0:\n Log.error(\"Failed to start Heron Tools on %s with error:\\n%s\" % (arg_2, arg_6[1]))\n sys.exit(-1)\n\n wait_for_job_to_start(arg_2, \"heron-tools\")\n Log.info(\"Done starting Heron Tools\")"} +{"_id": "doc_2904", "title": "", "text": "def Func(arg_0):\n '''\n Wait for a nomad master to start\n '''\n arg_1 = 0\n while True:\n try:\n arg_2 = requests.get(\"http://%s:4646/v1/status/leader\" % arg_0)\n if arg_2.status_code == 200:\n break\n except:\n Log.debug(sys.exc_info()[0])\n Log.info(\"Waiting for cluster to come up... %s\" % arg_1)\n time.sleep(1)\n if arg_1 > 10:\n Log.error(\"Failed to start Nomad Cluster!\")\n sys.exit(-1)\n arg_1 = arg_1 + 1"} +{"_id": "doc_2905", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Tar a directory\n '''\n with tarfile.open(arg_0, \"w:gz\") as tar:\n tar.add(arg_1, arcname=os.path.basename(arg_1))"} +{"_id": "doc_2906", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Start master nodes\n '''\n arg_2 = []\n for arg_3 in arg_0:\n Log.info(\"Starting master on %s\" % arg_3)\n arg_4 = \"%s agent -config %s >> /tmp/nomad_server_log 2>&1 &\" \\\n % (get_nomad_path(arg_1), get_nomad_master_config_file(arg_1))\n if not is_self(arg_3):\n arg_4 = ssh_remote_execute(arg_4, arg_3, arg_1)\n Log.debug(arg_4)\n arg_5 = subprocess.Popen(arg_4,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n arg_2.append({\"pid\": arg_5, \"dest\": arg_3})\n\n arg_6 = []\n for arg_7 in arg_2:\n arg_5 = arg_7[\"pid\"]\n arg_8 = arg_5.wait()\n arg_9 = arg_5.communicate()\n Log.debug(\"return code: %s output: %s\" % (arg_8, arg_9))\n if arg_8 != 0:\n arg_6.append(\"Failed to start master on %s with error:\\n%s\" % (arg_7[\"dest\"], arg_9[1]))\n\n if arg_6:\n for arg_10 in arg_6:\n Log.error(arg_10)\n sys.exit(-1)\n\n Log.info(\"Done starting masters\")"} +{"_id": "doc_2907", "title": "", "text": "def Func(arg_0):\n '''\n read config files to get roles\n '''\n arg_1 = dict()\n\n with open(get_inventory_file(arg_0), 'r') as stream:\n try:\n arg_1 = yaml.load(stream)\n except yaml.YAMLError as exc:\n Log.error(\"Error parsing inventory file: %s\" % exc)\n sys.exit(-1)\n\n if arg_2.ZOOKEEPERS not in arg_1 or not arg_1[arg_2.ZOOKEEPERS]:\n Log.error(\"Zookeeper servers node defined!\")\n sys.exit(-1)\n\n if arg_2.CLUSTER not in arg_1 or not arg_1[arg_2.CLUSTER]:\n Log.error(\"Heron cluster nodes defined!\")\n sys.exit(-1)\n\n # Set roles\n arg_1[arg_2.MASTERS] = set([arg_1[arg_2.CLUSTER][0]])\n arg_1[arg_2.SLAVES] = set(arg_1[arg_2.CLUSTER])\n arg_1[arg_2.ZOOKEEPERS] = set(arg_1[arg_2.ZOOKEEPERS])\n arg_1[arg_2.CLUSTER] = set(arg_1[arg_2.CLUSTER])\n\n return arg_1"} +{"_id": "doc_2908", "title": "", "text": "def Func(arg_0):\n '''\n check if this host is this addr\n '''\n arg_1 = []\n for arg_2 in netifaces.interfaces():\n arg_3 = netifaces.ifaddresses(arg_2)\n if netifaces.AF_INET in arg_3:\n for arg_4 in arg_3[netifaces.AF_INET]:\n if \"addr\" in arg_4:\n arg_1.append(arg_4[\"addr\"])\n return arg_0 in arg_1 or arg_0 == get_self_hostname()"} +{"_id": "doc_2909", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Resolve all symbolic references that `src` points to. Note that this\n is different than `os.path.realpath` as path components leading up to\n the final location may still be symbolic links.\n \"\"\"\n while os.path.islink(arg_0):\n arg_0 = os.path.join(os.path.dirname(arg_0), os.readlink(arg_0))\n\n return arg_0"} +{"_id": "doc_2910", "title": "", "text": "def Func(arg_0):\n ''' normalize raw result to table '''\n arg_1 = 20\n arg_2, arg_3 = [], 0\n for arg_4, arg_5 in arg_0.items():\n for arg_6, arg_7 in arg_5.items():\n for arg_8 in arg_7:\n arg_3 += 1\n if arg_3 > arg_1:\n continue\n else:\n arg_2.append([arg_4, arg_6, arg_8])\n arg_9 = ['role', 'env', 'topology']\n arg_10 = 0 if arg_3 <= arg_1 else arg_3 - arg_1\n return arg_2, arg_9, arg_10"} +{"_id": "doc_2911", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Monitor the rootpath and call the callback\n corresponding to the change.\n This Funcing happens periodically. This function\n is called in a seperate thread from the main thread,\n because it sleeps for the intervals between each poll.\n \"\"\"\n\n def trigger_watches_based_on_files(arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n For all the topologies in the watchers, check if the data\n in directory has changed. Trigger the callback if it has.\n \"\"\"\n for arg_5, arg_6 in arg_1.items():\n arg_7 = os.path.join(arg_2, arg_5)\n arg_8 = \"\"\n if os.path.exists(arg_7):\n with open(os.path.join(arg_2, arg_5)) as f:\n arg_8 = f.read()\n if arg_5 not in arg_3 or arg_8 != arg_3[arg_5]:\n arg_9 = arg_4()\n arg_9.ParseFromString(arg_8)\n for arg_10 in arg_6:\n arg_10(arg_9)\n arg_3[arg_5] = arg_8\n\n while not arg_0.Funcing_thread_stop_signal:\n arg_11 = arg_0.get_topologies_path()\n\n arg_12 = []\n if os.path.isdir(arg_11):\n arg_12 = list(filter(\n lambda f: os.path.isfile(os.path.join(arg_11, f)),\n os.listdir(arg_11)))\n if set(arg_12) != set(arg_0.topologies_directory):\n for arg_10 in arg_0.topologies_watchers:\n arg_10(arg_12)\n arg_0.topologies_directory = arg_12\n\n trigger_watches_based_on_files(\n arg_0.topology_watchers, arg_11, arg_0.topologies_directory, Topology)\n\n # Get the directory name for execution state\n arg_14 = os.path.dirname(arg_0.get_execution_state_path(\"\"))\n trigger_watches_based_on_files(\n arg_0.execution_state_watchers, arg_14,\n arg_0.execution_state_directory, ExecutionState)\n\n # Get the directory name for packing_plan\n arg_15 = os.path.dirname(arg_0.get_packing_plan_path(\"\"))\n trigger_watches_based_on_files(\n arg_0.packing_plan_watchers, arg_15, arg_0.packing_plan_directory, PackingPlan)\n\n # Get the directory name for pplan\n arg_16 = os.path.dirname(arg_0.get_pplan_path(\"\"))\n trigger_watches_based_on_files(\n arg_0.pplan_watchers, arg_16,\n arg_0.pplan_directory, PhysicalPlan)\n\n # Get the directory name for tmaster\n arg_17 = os.path.dirname(arg_0.get_tmaster_path(\"\"))\n trigger_watches_based_on_files(\n arg_0.tmaster_watchers, arg_17,\n arg_0.tmaster_directory, TMasterLocation)\n\n # Get the directory name for scheduler location\n arg_18 = os.path.dirname(arg_0.get_scheduler_location_path(\"\"))\n trigger_watches_based_on_files(\n arg_0.scheduler_location_watchers, arg_18,\n arg_0.scheduler_location_directory, SchedulerLocation)\n\n # Sleep for some time\n arg_0.event.wait(timeout=5)"} +{"_id": "doc_2912", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get physical plan of a topology\n \"\"\"\n if arg_2:\n arg_0.pplan_watchers[arg_1].append(arg_2)\n else:\n arg_3 = arg_0.Func_path(arg_1)\n with open(arg_3) as f:\n arg_4 = f.read()\n arg_5 = PhysicalPlan()\n arg_5.ParseFromString(arg_4)\n return arg_5"} +{"_id": "doc_2913", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get execution state\n \"\"\"\n if arg_2:\n arg_0.execution_state_watchers[arg_1].append(arg_2)\n else:\n arg_3 = arg_0.Func_path(arg_1)\n with open(arg_3) as f:\n arg_4 = f.read()\n arg_5 = ExecutionState()\n arg_5.ParseFromString(arg_4)\n return arg_5"} +{"_id": "doc_2914", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get scheduler location\n \"\"\"\n if arg_2:\n arg_0.scheduler_location_watchers[arg_1].append(arg_2)\n else:\n arg_3 = arg_0.Func_path(arg_1)\n with open(arg_3) as f:\n arg_4 = f.read()\n arg_5 = SchedulerLocation()\n arg_5.ParseFromString(arg_4)\n return arg_5"} +{"_id": "doc_2915", "title": "", "text": "def Func():\n \"\"\"Creates SocketOptions object from a given sys_config dict\"\"\"\n arg_0 = system_config.get_sys_config()\n arg_1 = [const.INSTANCE_NETWORK_WRITE_BATCH_SIZE_BYTES,\n const.INSTANCE_NETWORK_WRITE_BATCH_TIME_MS,\n const.INSTANCE_NETWORK_READ_BATCH_SIZE_BYTES,\n const.INSTANCE_NETWORK_READ_BATCH_TIME_MS,\n const.INSTANCE_NETWORK_OPTIONS_SOCKET_RECEIVED_BUFFER_SIZE_BYTES,\n const.INSTANCE_NETWORK_OPTIONS_SOCKET_SEND_BUFFER_SIZE_BYTES]\n\n Log.debug(\"In Func()\")\n try:\n arg_2 = [int(arg_0[opt]) for opt in arg_1]\n arg_3 = SocketOptions(*arg_2)\n return arg_3\n except ValueError as e:\n # couldn't convert to int\n raise ValueError(\"Invalid value in sys_config: %s\" % str(e))\n except KeyError as e:\n # option key was not found\n raise KeyError(\"Incomplete sys_config: %s\" % str(e))"} +{"_id": "doc_2916", "title": "", "text": "def Func():\n \"\"\"Retrieves heron options from the `HERON_OPTIONS` environment variable.\n\n Heron options have the following format:\n\n cmdline.topologydefn.tmpdirectory=/var/folders/tmpdir\n cmdline.topology.initial.state=PAUSED\n\n In this case, the returned map will contain:\n\n #!json\n {\n \"cmdline.topologydefn.tmpdirectory\": \"/var/folders/tmpdir\",\n \"cmdline.topology.initial.state\": \"PAUSED\"\n }\n\n Currently supports the following options natively:\n\n - `cmdline.topologydefn.tmpdirectory`: (required) the directory to which this\n topology's defn file is written\n - `cmdline.topology.initial.state`: (default: \"RUNNING\") the initial state of the topology\n - `cmdline.topology.name`: (default: class name) topology name on deployment\n\n Returns: map mapping from key to value\n \"\"\"\n arg_0 = os.environ.get(\"HERON_OPTIONS\")\n if arg_0 is None:\n raise RuntimeError(\"HERON_OPTIONS environment variable not found\")\n\n arg_1 = {}\n for arg_2 in arg_0.replace(\"%%%%\", \" \").split(','):\n arg_3, arg_4, arg_5 = arg_2.partition(\"=\")\n if arg_4:\n arg_1[arg_3] = arg_5\n else:\n raise ValueError(\"Invalid HERON_OPTIONS part %r\" % arg_2)\n return arg_1"} +{"_id": "doc_2917", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add specs to the topology\n\n :type specs: HeronComponentSpec\n :param specs: specs to add to the topology\n \"\"\"\n for arg_2 in arg_1:\n if not isinstance(arg_2, HeronComponentSpec):\n raise TypeError(\"Argument to Func needs to be HeronComponentSpec, given: %s\"\n % str(arg_2))\n if arg_2.name is None:\n raise ValueError(\"TopologyBuilder cannot take a spec without name\")\n if arg_2.name == \"config\":\n raise ValueError(\"config is a reserved name\")\n if arg_2.name in arg_0._specs:\n raise ValueError(\"Attempting to add duplicate spec name: %r %r\" % (arg_2.name, arg_2))\n\n arg_0._specs[arg_2.name] = arg_2"} +{"_id": "doc_2918", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set topology-wide configuration to the topology\n\n :type config: dict\n :param config: topology-wide config\n \"\"\"\n if not isinstance(arg_1, dict):\n raise TypeError(\"Argument to Func needs to be dict, given: %s\" % str(arg_1))\n arg_0._topology_config = arg_1"} +{"_id": "doc_2919", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds the topology and submits to the destination\"\"\"\n arg_1 = arg_0._construct_topo_class_dict()\n arg_2 = TopologyType(arg_0.topology_name, (Topology,), arg_1)\n arg_2.write()"} +{"_id": "doc_2920", "title": "", "text": "def Func():\n \"\"\"map from query parameter to query name\"\"\"\n arg_0 = _all_metric_queries()\n return dict(zip(arg_0[0], arg_0[1]) + zip(arg_0[2], arg_0[3]))"} +{"_id": "doc_2921", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Synced API call to get logical plans\"\"\"\n arg_4 = tornado.ioloop.IOLoop.instance()\n try:\n return arg_4.run_sync(lambda: API.Func(arg_0, arg_1, arg_2, arg_3))\n except Exception:\n Log.debug(traceback.format_exc())\n raise"} +{"_id": "doc_2922", "title": "", "text": "def Func(*arg_0):\n \"\"\"Synced API call to get topology information\"\"\"\n arg_1 = tornado.ioloop.IOLoop.instance()\n try:\n return arg_1.run_sync(lambda: API.Func(*arg_0))\n except Exception:\n Log.debug(traceback.format_exc())\n raise"} +{"_id": "doc_2923", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Synced API call to get component metrics\"\"\"\n arg_5 = metric_queries()\n try:\n arg_6 = get_topology_metrics(arg_1, arg_2, arg_3, arg_0, [],\n arg_5, [0, -1], arg_4)\n return arg_6[\"metrics\"]\n except Exception:\n Log.debug(traceback.format_exc())\n raise"} +{"_id": "doc_2924", "title": "", "text": "def Func(arg_0=arg_1.INFO, arg_3=None):\n \"\"\" Configure logger which dumps log on terminal\n\n :param level: logging level: info, warning, verbose...\n :type level: logging level\n :param logfile: log file name, default to None\n :type logfile: string\n :return: None\n :rtype: None\n \"\"\"\n\n # Remove all the existing StreamHandlers to avoid duplicate\n for arg_4 in Log.handlers:\n if isinstance(arg_4, arg_1.StreamHandler):\n Log.handlers.remove(arg_4)\n\n Log.setLevel(arg_0)\n\n # if logfile is specified, FileHandler is used\n if arg_3 is not None:\n arg_5 = \"[%(asctime)s] [%(levelname)s]: %(message)s\"\n arg_6 = arg_1.Formatter(fmt=arg_5, datefmt=date_format)\n arg_7 = arg_1.FileHandler(arg_3)\n arg_7.setFormatter(arg_6)\n Log.addHandler(arg_7)\n # otherwise, use StreamHandler to output to stream (stdout, stderr...)\n else:\n arg_5 = \"[%(asctime)s] %(log_color)s[%(levelname)s]%(reset)s: %(message)s\"\n # pylint: disable=redefined-variable-type\n arg_6 = colorlog.ColoredFormatter(fmt=arg_5, datefmt=date_format)\n arg_8 = arg_1.StreamHandler()\n arg_8.setFormatter(arg_6)\n Log.addHandler(arg_8)"} +{"_id": "doc_2925", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Initializes a rotating logger\n\n It also makes sure that any StreamHandler is removed, so as to avoid stdout/stderr\n constipation issues\n \"\"\"\n logging.basicConfig()\n\n arg_4 = logging.getLogger()\n arg_5 = \"[%(asctime)s] [%(levelname)s] %(filename)s: %(message)s\"\n\n arg_4.setLevel(arg_0)\n arg_6 = RotatingFileHandler(arg_1, maxBytes=arg_3, backupCount=arg_2)\n arg_6.setFormatter(logging.Formatter(fmt=arg_5, datefmt=date_format))\n arg_4.addHandler(arg_6)\n\n for arg_6 in arg_4.handlers:\n arg_4.debug(\"Associated handlers - \" + str(arg_6))\n if isinstance(arg_6, logging.StreamHandler):\n arg_4.debug(\"Removing StreamHandler: \" + str(arg_6))\n arg_4.handlers.remove(arg_6)"} +{"_id": "doc_2926", "title": "", "text": "def Func(arg_0):\n \"\"\"simply set verbose level based on command-line args\n\n :param cl_args: CLI arguments\n :type cl_args: dict\n :return: None\n :rtype: None\n \"\"\"\n if 'verbose' in arg_0 and arg_0['verbose']:\n configure(logging.DEBUG)\n else:\n configure(logging.INFO)"} +{"_id": "doc_2927", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns Spout protobuf message\"\"\"\n arg_1 = topology_pb2.Spout()\n arg_1.comp.CopyFrom(arg_0._get_base_component())\n\n # Add output streams\n arg_0._add_out_streams(arg_1)\n return arg_1"} +{"_id": "doc_2928", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns Component protobuf message\"\"\"\n arg_1 = topology_pb2.Component()\n arg_1.name = arg_0.name\n arg_1.spec = topology_pb2.ComponentObjectSpec.Value(\"PYTHON_CLASS_NAME\")\n arg_1.class_name = arg_0.python_class_path\n arg_1.config.CopyFrom(arg_0._get_comp_config())\n return arg_1"} +{"_id": "doc_2929", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns component-specific Config protobuf message\n\n It first adds ``topology.component.parallelism``, and is overriden by\n a user-defined component-specific configuration, specified by spec().\n \"\"\"\n arg_1 = topology_pb2.Config()\n\n # first add parallelism\n arg_2 = arg_1.kvs.add()\n arg_2.key = TOPOLOGY_COMPONENT_PARALLELISM\n arg_2.value = str(arg_0.parallelism)\n arg_2.type = topology_pb2.ConfigValueType.Value(\"STRING_VALUE\")\n\n # iterate through self.custom_config\n if arg_0.custom_config is not None:\n arg_5 = arg_0._sanitize_config(arg_0.custom_config)\n for arg_2, arg_3 in arg_5.items():\n if isinstance(arg_3, str):\n arg_6 = arg_1.kvs.add()\n arg_6.key = arg_2\n arg_6.value = arg_3\n arg_6.type = topology_pb2.ConfigValueType.Value(\"STRING_VALUE\")\n else:\n # need to serialize\n arg_6 = arg_1.kvs.add()\n arg_6.key = arg_2\n arg_6.serialized_value = default_serializer.serialize(arg_3)\n arg_6.type = topology_pb2.ConfigValueType.Value(\"PYTHON_SERIALIZED_VALUE\")\n\n return arg_1"} +{"_id": "doc_2930", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Adds outputs to a given protobuf Bolt or Spout message\"\"\"\n if arg_0.outputs is None:\n return\n\n # sanitize outputs and get a map out fields>\n arg_2 = arg_0._sanitize_outputs()\n\n for arg_3, arg_4 in arg_2.items():\n arg_5 = arg_1.outputs.add()\n arg_5.stream.CopyFrom(arg_0._get_stream_id(arg_0.name, arg_3))\n arg_5.schema.CopyFrom(arg_0._get_stream_schema(arg_4))"} +{"_id": "doc_2931", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a set of output stream ids registered for this component\"\"\"\n if arg_0.outputs is None:\n return set()\n\n if not isinstance(arg_0.outputs, (list, tuple)):\n raise TypeError(\"Argument to outputs must be either list or tuple, given: %s\"\n % str(type(arg_0.outputs)))\n arg_1 = []\n for arg_2 in arg_0.outputs:\n if not isinstance(arg_2, (str, Stream)):\n raise TypeError(\"Outputs must be a list of strings or Streams, given: %s\" % str(arg_2))\n arg_1.append(Stream.DEFAULT_STREAM_ID if isinstance(arg_2, str) else arg_2.stream_id)\n return set(arg_1)"} +{"_id": "doc_2932", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a StreamId protobuf message\"\"\"\n arg_2 = topology_pb2.StreamId()\n arg_2.id = arg_1\n arg_2.component_name = arg_0\n return arg_2"} +{"_id": "doc_2933", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a StreamSchema protobuf message\"\"\"\n arg_1 = topology_pb2.StreamSchema()\n for arg_2 in arg_0:\n arg_3 = arg_1.keys.add()\n arg_3.key = arg_2\n arg_3.type = topology_pb2.Type.Value(\"OBJECT\")\n\n return arg_1"} +{"_id": "doc_2934", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns Func of this GlobalStreamId\n\n Note that if HeronComponentSpec is specified as componentId and its name is not yet\n available (i.e. when ``name`` argument was not given in ``spec()`` method in Bolt or Spout),\n this property returns a message with uuid. However, this is provided only for safety\n with __eq__(), __str__(), and __hash__() methods, and not meant to be called explicitly\n before TopologyType class finally sets the name attribute of HeronComponentSpec.\n \"\"\"\n if isinstance(arg_0._Func, HeronComponentSpec):\n if arg_0._Func.name is None:\n # HeronComponentSpec instance's name attribute might not be available until\n # TopologyType metaclass finally sets it. This statement is to support __eq__(),\n # __hash__() and __str__() methods with safety, as raising Exception is not\n # appropriate this case.\n return \"\" % arg_0._Func.uuid\n return arg_0._Func.name\n elif isinstance(arg_0._Func, str):\n return arg_0._Func\n else:\n raise ValueError(\"Component Id for this GlobalStreamId is not properly set: <%s:%s>\"\n % (str(type(arg_0._Func)), str(arg_0._Func)))"} +{"_id": "doc_2935", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Registers a new metric to this context\"\"\"\n arg_4 = arg_0.get_metrics_collector()\n arg_4.Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_2936", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the declared inputs to specified component\n\n :return: map gtype>, or\n None if not found\n \"\"\"\n # this is necessary because protobuf message is not hashable\n arg_2 = namedtuple('StreamId', 'id, component_name')\n if arg_1 in arg_0.inputs:\n arg_3 = {}\n for arg_4 in arg_0.inputs.get(arg_1):\n arg_5 = arg_2(id=arg_4.stream.id, component_name=arg_4.stream.component_name)\n arg_3[arg_5] = arg_4.gtype\n return arg_3\n else:\n return None"} +{"_id": "doc_2937", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"invoke task hooks for every time spout acks a tuple\n\n :type message_id: str\n :param message_id: message id to which an acked tuple was anchored\n :type complete_latency_ns: float\n :param complete_latency_ns: complete latency in nano seconds\n \"\"\"\n if len(arg_0.task_hooks) > 0:\n arg_3 = SpoutAckInfo(arg_1=arg_1,\n spout_task_id=arg_0.get_task_id(),\n complete_latency_ms=arg_2 *\n system_constants.NS_TO_MS)\n for arg_4 in arg_0.task_hooks:\n arg_4.spout_ack(arg_3)"} +{"_id": "doc_2938", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"invoke task hooks for every time spout fails a tuple\n\n :type message_id: str\n :param message_id: message id to which a failed tuple was anchored\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n \"\"\"\n if len(arg_0.task_hooks) > 0:\n arg_3 = SpoutFailInfo(arg_1=arg_1,\n spout_task_id=arg_0.get_task_id(),\n fail_latency_ms=arg_2 * system_constants.NS_TO_MS)\n for arg_4 in arg_0.task_hooks:\n arg_4.spout_fail(arg_3)"} +{"_id": "doc_2939", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"invoke task hooks for every time bolt processes a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is executed\n :type execute_latency_ns: float\n :param execute_latency_ns: execute latency in nano seconds\n \"\"\"\n if len(arg_0.task_hooks) > 0:\n arg_3 = \\\n BoltExecuteInfo(arg_1=arg_1,\n executing_task_id=arg_0.get_task_id(),\n execute_latency_ms=arg_2 * system_constants.NS_TO_MS)\n for arg_4 in arg_0.task_hooks:\n arg_4.bolt_execute(arg_3)"} +{"_id": "doc_2940", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"invoke task hooks for every time bolt fails a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is failed\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n \"\"\"\n if len(arg_0.task_hooks) > 0:\n arg_3 = BoltFailInfo(arg_1=arg_1,\n failing_task_id=arg_0.get_task_id(),\n fail_latency_ms=arg_2 * system_constants.NS_TO_MS)\n for arg_4 in arg_0.task_hooks:\n arg_4.bolt_fail(arg_3)"} +{"_id": "doc_2941", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Extract and execute the java files inside the tar and then add topology\n definition file created by running submitTopology\n\n We use the packer to make a package for the tar and dump it\n to a well-known location. We then run the main method of class\n with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.\n This will run the jar file with the topology class name.\n\n The submitter inside will write out the topology defn file to a location\n that we specify. Then we write the topology defn file to a well known\n packer location. We then write to appropriate places in zookeeper\n and launch the aurora jobs\n :param cl_args:\n :param unknown_args:\n :param tmp_dir:\n :return:\n '''\n # execute main of the topology to create the topology definition\n arg_3 = arg_0['topology-file-name']\n arg_4 = arg_0['topology_main_jvm_property']\n arg_5 = arg_0['topology-class-name']\n arg_6 = execute.heron_tar(\n arg_5,\n arg_3,\n tuple(arg_1),\n arg_2,\n arg_4)\n\n result.render(arg_6)\n\n if not result.is_successful(arg_6):\n arg_7 = (\"Failed to create topology definition \" \\\n \"file when executing class '%s' of file '%s'\") % (arg_5, arg_3)\n arg_6.add_context(arg_7)\n return arg_6\n\n return launch_topologies(arg_0, arg_3, arg_2)"} +{"_id": "doc_2942", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Makes the http endpoint for the heron shell\n if shell port is present, otherwise returns None.\n \"\"\"\n # Format: container__\n arg_2 = arg_0[\"physical_plan\"]\n arg_3 = arg_2[\"instances\"][arg_1][\"stmgrId\"]\n arg_4 = arg_2[\"stmgrs\"][arg_3][\"host\"]\n arg_5 = arg_2[\"stmgrs\"][arg_3][\"shell_port\"]\n return \"http://%s:%d\" % (arg_4, arg_5)"} +{"_id": "doc_2943", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Make the url for log-file data in heron-shell\n from the info stored in stmgr.\n \"\"\"\n return \"http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s\" % \\\n (arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_2944", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Sends this outgoing packet to dispatcher's socket\"\"\"\n if arg_0.sent_complete:\n return\n\n arg_2 = arg_1.Func(arg_0.to_Func)\n arg_0.to_Func = arg_0.to_Func[arg_2:]"} +{"_id": "doc_2945", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates an IncomingPacket object from header and data\n\n This method is for testing purposes\n \"\"\"\n arg_2 = IncomingPacket()\n arg_2.header = arg_0\n arg_2.data = arg_1\n\n if len(arg_0) == HeronProtocol.HEADER_SIZE:\n arg_2.is_header_read = True\n if len(arg_1) == arg_2.get_datasize():\n arg_2.is_complete = True\n\n return arg_2"} +{"_id": "doc_2946", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Reads incoming data from asyncore.dispatcher\"\"\"\n try:\n if not arg_0.is_header_Func:\n # try Funcing header\n arg_2 = HeronProtocol.HEADER_SIZE - len(arg_0.header)\n arg_0.header += arg_1.recv(arg_2)\n if len(arg_0.header) == HeronProtocol.HEADER_SIZE:\n arg_0.is_header_Func = True\n else:\n Log.debug(\"Header Func incomplete; Func %d bytes of header\" % len(arg_0.header))\n return\n\n if arg_0.is_header_Func and not arg_0.is_complete:\n # try Funcing data\n arg_2 = arg_0.get_datasize() - len(arg_0.data)\n arg_0.data += arg_1.recv(arg_2)\n if len(arg_0.data) == arg_0.get_datasize():\n arg_0.is_complete = True\n except socket.error as e:\n if e.errno == socket.errno.EAGAIN or e.errno == socket.errno.EWOULDBLOCK:\n # Try again later -> call continue_Func later\n Log.debug(\"Try again error\")\n else:\n # Fatal error\n Log.debug(\"Fatal error when Funcing IncomingPacket\")\n raise RuntimeError(\"Fatal error occured in IncomingPacket.Func()\")"} +{"_id": "doc_2947", "title": "", "text": "def Func(arg_0):\n \"\"\"Reads yaml config file and returns auto-typed config_dict\"\"\"\n if not arg_0.endswith(\".yaml\"):\n raise ValueError(\"Config file not yaml\")\n\n with open(arg_0, 'r') as f:\n arg_1 = yaml.load(f)\n\n return arg_1"} +{"_id": "doc_2948", "title": "", "text": "def Func(arg_0):\n \"\"\"Send messages in out_stream to the Stream Manager\"\"\"\n while not arg_0.out_stream.is_empty() and arg_0._stmgr_client.is_registered:\n arg_1 = arg_0.out_stream.poll()\n if isinstance(arg_1, tuple_pb2.HeronTupleSet):\n arg_1.src_task_id = arg_0.my_pplan_helper.my_task_id\n arg_0.gateway_metrics.update_sent_packet(arg_1.ByteSize())\n arg_0._stmgr_client.send_message(arg_1)"} +{"_id": "doc_2949", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Called when state change is commanded by stream manager\"\"\"\n assert arg_0.my_pplan_helper is not None\n assert arg_0.my_instance is not None and arg_0.my_instance.py_class is not None\n\n if arg_0.my_pplan_helper.get_topology_state() != arg_1.get_topology_state():\n # handle state change\n # update the pplan_helper\n arg_0.my_pplan_helper = arg_1\n if arg_1.is_topology_running():\n if not arg_0.is_instance_started:\n arg_0.start_instance_if_possible()\n arg_0.my_instance.py_class.invoke_activate()\n elif arg_1.is_topology_paused():\n arg_0.my_instance.py_class.invoke_deactivate()\n else:\n raise RuntimeError(\"Unexpected TopologyState update: %s\" % arg_1.get_topology_state())\n else:\n Log.info(\"Topology state remains the same.\")"} +{"_id": "doc_2950", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks if a given stream_id and tuple matches with the output schema\n\n :type stream_id: str\n :param stream_id: stream id into which tuple is sent\n :type tup: list\n :param tup: tuple that is going to be sent\n \"\"\"\n # do some checking to make sure that the number of fields match what's expected\n arg_3 = arg_0._output_schema.get(arg_1, None)\n if arg_3 is None:\n raise RuntimeError(\"%s emitting to stream %s but was not declared in output fields\"\n % (arg_0.my_component_name, arg_1))\n elif arg_3 != len(arg_2):\n raise RuntimeError(\"Number of fields emitted in stream %s does not match what's expected. \"\n \"Expected: %s, Observed: %s\" % (arg_1, arg_3, len(arg_2)))"} +{"_id": "doc_2951", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Adds the target component\n\n :type stream_id: str\n :param stream_id: stream id into which tuples are emitted\n :type task_ids: list of str\n :param task_ids: list of task ids to which tuples are emitted\n :type grouping: ICustomStreamGrouping object\n :param grouping: custom grouping to use\n :type source_comp_name: str\n :param source_comp_name: source component name\n \"\"\"\n if arg_1 not in arg_0.targets:\n arg_0.targets[arg_1] = []\n arg_0.targets[arg_1].append(Target(arg_2, arg_3, arg_4))"} +{"_id": "doc_2952", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Prepares the custom grouping for this component\"\"\"\n for arg_2, arg_3 in arg_0.targets.items():\n for arg_4 in arg_3:\n arg_4.Func(arg_1, arg_2)"} +{"_id": "doc_2953", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Format a line in the directory list based on the file's type and other attributes.\n \"\"\"\n arg_1 = arg_0.st_mode\n\n arg_2 = (arg_1 & 0o700) >> 6\n arg_3 = (arg_1 & 0o070) >> 3\n arg_4 = (arg_1 & 0o7)\n\n def stat_type(arg_5):\n ''' stat type'''\n if stat.S_ISDIR(arg_5):\n return 'd'\n elif stat.S_ISSOCK(arg_5):\n return 's'\n else:\n return '-'\n\n def triple(arg_5):\n ''' triple '''\n return '%c%c%c' % (\n 'r' if arg_5 & 0b100 else '-',\n 'w' if arg_5 & 0b010 else '-',\n 'x' if arg_5 & 0b001 else '-')\n\n return ''.join([stat_type(arg_1), triple(arg_2), triple(arg_3), triple(arg_4)])"} +{"_id": "doc_2954", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Format the date associated with a file to be displayed in directory listing.\n \"\"\"\n arg_1 = datetime.now()\n arg_2 = datetime.fromtimestamp(arg_0)\n return '%s %2d %5s' % (\n arg_2.strftime('%b'), arg_2.day,\n arg_2.year if arg_2.year != arg_1.year else arg_2.strftime('%H:%M'))"} +{"_id": "doc_2955", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Prefix to a filename in the directory listing. This is to make the\n listing similar to an output of \"ls -alh\".\n \"\"\"\n try:\n arg_2 = pwd.getpwuid(arg_1.st_uid)\n arg_3 = arg_2.pw_name\n except KeyError:\n arg_3 = arg_1.st_uid\n\n try:\n arg_4 = grp.getgrgid(arg_1.st_gid)\n arg_5 = arg_4.gr_name\n except KeyError:\n arg_5 = arg_1.st_gid\n\n return '%s %3d %10s %10s %10d %s' % (\n format_mode(arg_1),\n arg_1.st_nlink,\n arg_3,\n arg_5,\n arg_1.st_size,\n format_mtime(arg_1.st_mtime),\n )"} +{"_id": "doc_2956", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2=-1, arg_3=False):\n \"\"\"\n Read a chunk of a file from an offset upto the length.\n \"\"\"\n try:\n arg_2 = int(arg_2)\n arg_1 = int(arg_1)\n except ValueError:\n return {}\n\n if not os.path.isfile(arg_0):\n return {}\n\n try:\n arg_4 = os.stat(arg_0)\n except Exception:\n return {}\n\n if arg_1 == -1:\n arg_1 = arg_4.st_size\n\n if arg_2 == -1:\n arg_2 = arg_4.st_size - arg_1\n\n with open(arg_0, \"r\") as fp:\n fp.seek(arg_1)\n try:\n arg_5 = fp.read(arg_2)\n except IOError:\n return {}\n\n if arg_5:\n arg_5 = _escape_data(arg_5) if arg_3 else arg_5\n return dict(arg_1=arg_1, arg_2=len(arg_5), arg_5=arg_5)\n\n return dict(arg_1=arg_1, arg_2=0)"} +{"_id": "doc_2957", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Runs the command and returns its stdout and stderr.\n \"\"\"\n arg_3 = subprocess.Popen(arg_0, arg_6=subprocess.PIPE,\n arg_7=subprocess.PIPE, arg_1=arg_1, arg_2=arg_2)\n arg_4, arg_5 = proc.async_stdout_stderr_builder(arg_3)\n arg_3.wait()\n arg_6, arg_7 = arg_4.result(), arg_5.result()\n return {'command': ' '.join(arg_0), 'stderr': arg_7, 'stdout': arg_6}"} +{"_id": "doc_2958", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Feed output of one command to the next and return final output\n Returns string output of Funced application of commands.\n \"\"\"\n arg_1 = ' | '.join(map(lambda x: ' '.join(x), arg_0))\n arg_2 = functools.reduce(pipe, [None] + arg_0)\n arg_3 = proc.async_stdout_builder(arg_2)\n arg_2.wait()\n return {\n 'command': arg_1,\n 'stdout': arg_3.result()\n }"} +{"_id": "doc_2959", "title": "", "text": "def Func(arg_0):\n \"\"\" normalize raw metrics API result to table \"\"\"\n arg_1 = tracker_access.metric_queries()\n arg_2 = tracker_access.queries_map()\n arg_3 = arg_0.values()[0].keys()\n arg_4 = []\n for arg_5 in arg_3:\n arg_6 = [arg_5]\n for arg_7 in arg_1:\n try:\n arg_6.append(str(arg_0[arg_7][arg_5]))\n except KeyError:\n pass\n arg_4.append(arg_6)\n arg_8 = ['container id'] + [arg_2[k] for k in arg_1 if k in arg_0.keys()]\n return arg_4, arg_8"} +{"_id": "doc_2960", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" run metrics subcommand \"\"\"\n arg_4, arg_5, arg_6 = arg_2['cluster'], arg_2['role'], arg_2['environ']\n arg_7 = arg_2['topology-name']\n try:\n arg_8 = tracker_access.get_topology_info(arg_4, arg_6, arg_7, arg_5)\n arg_9 = arg_8['physical_plan']['spouts'].keys()\n arg_10 = arg_8['physical_plan']['bolts'].keys()\n arg_11 = arg_9 + arg_10\n arg_12 = arg_2['component']\n if arg_12:\n if arg_12 in arg_11:\n arg_11 = [arg_12]\n else:\n Log.error('Unknown component: \\'%s\\'' % arg_12)\n raise\n except Exception:\n Log.error(\"Fail to connect to tracker: \\'%s\\'\", arg_2[\"tracker_url\"])\n return False\n arg_13 = []\n for arg_14 in arg_11:\n try:\n arg_15 = tracker_access.get_component_metrics(arg_14, arg_4, arg_6, arg_7, arg_5)\n except:\n Log.error(\"Fail to connect to tracker: \\'%s\\'\", arg_2[\"tracker_url\"])\n return False\n arg_16, arg_17 = to_table(arg_15)\n arg_13.append((arg_14, arg_16, arg_17))\n for arg_18, (arg_14, arg_16, arg_17) in enumerate(arg_13):\n if arg_18 != 0:\n print('')\n print('\\'%s\\' metrics:' % arg_14)\n print(tabulate(arg_16, headers=arg_17))\n return True"} +{"_id": "doc_2961", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" run containers subcommand \"\"\"\n arg_4, arg_5, arg_6 = arg_2['cluster'], arg_2['role'], arg_2['environ']\n arg_7 = arg_2['topology-name']\n arg_8 = arg_2['id']\n try:\n arg_9 = tracker_access.get_topology_info(arg_4, arg_6, arg_7, arg_5)\n except:\n Log.error(\"Fail to connect to tracker: \\'%s\\'\", arg_2[\"tracker_url\"])\n return False\n arg_10 = arg_9['physical_plan']['stmgrs']\n arg_11, arg_12 = set(), set()\n for arg_13, arg_14 in arg_9['physical_plan']['bolts'].items():\n arg_11 = arg_11 | set(arg_14)\n for arg_13, arg_15 in arg_9['physical_plan']['spouts'].items():\n arg_12 = arg_12 | set(arg_15)\n arg_16 = arg_10.keys()\n arg_16.sort()\n if arg_8 is not None:\n try:\n arg_17 = arg_8 - 1\n if arg_17 < 0:\n raise\n arg_16 = [arg_16[arg_17]]\n except:\n Log.error('Invalid container id: %d' % arg_8)\n return False\n arg_18 = []\n for arg_19, arg_20 in enumerate(arg_16):\n arg_21 = arg_19 + 1\n arg_22 = arg_10[arg_20][\"host\"]\n arg_23 = arg_10[arg_20][\"port\"]\n arg_24 = arg_10[arg_20][\"pid\"]\n arg_25 = arg_10[arg_20][\"instance_ids\"]\n arg_26 = len([instance for instance in arg_25 if instance in arg_11])\n arg_27 = len([instance for instance in arg_25 if instance in arg_12])\n arg_18.append([arg_21, arg_22, arg_23, arg_24, arg_26, arg_27, len(arg_25)])\n arg_28 = [\"container\", \"host\", \"port\", \"pid\", \"#bolt\", \"#spout\", \"#instance\"]\n sys.stdout.flush()\n print(tabulate(arg_18, arg_28=arg_28))\n return True"} +{"_id": "doc_2962", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Creates a HeronTuple\n\n :param stream: protobuf message ``StreamId``\n :param tuple_key: tuple id\n :param values: a list of values\n :param roots: a list of protobuf message ``RootId``\n \"\"\"\n arg_4 = arg_0.component_name\n arg_5 = arg_0.id\n arg_6 = arg_3[0].taskid if arg_3 is not None and len(arg_3) > 0 else None\n return HeronTuple(id=str(arg_1), component=arg_4, arg_0=arg_5,\n task=arg_6, arg_2=arg_2, creation_time=time.time(), arg_3=arg_3)"} +{"_id": "doc_2963", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a RootTupleInfo\"\"\"\n arg_2 = random.getrandbits(TupleHelper.MAX_SFIXED64_RAND_BITS)\n return RootTupleInfo(arg_0=arg_0, arg_1=arg_1,\n insertion_time=time.time(), arg_2=arg_2)"} +{"_id": "doc_2964", "title": "", "text": "def Func(arg_0):\n \"\"\"Updates the list of global error suppressions.\n\n Parses any lint directives in the file that have global effect.\n\n Args:\n lines: An array of strings, each representing a line of the file, with the\n last element being empty if the file is terminated with a newline.\n \"\"\"\n for arg_1 in arg_0:\n if _SEARCH_C_FILE.search(arg_1):\n for arg_2 in _DEFAULT_C_SUPPRESSED_CATEGORIES:\n arg_3[arg_2] = True\n if _SEARCH_KERNEL_FILE.search(arg_1):\n for arg_2 in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:\n arg_3[arg_2] = True"} +{"_id": "doc_2965", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Funces the string for the pattern, caching the compiled regexp.\"\"\"\n if arg_0 not in arg_2:\n arg_2[arg_0] = sre_compile.compile(arg_0)\n return arg_2[arg_0].search(arg_1)"} +{"_id": "doc_2966", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes C++11 raw strings from lines.\n\n Before:\n static const char kData[] = R\"(\n multi-line string\n )\";\n\n After:\n static const char kData[] = \"\"\n (replaced by blank line)\n \"\";\n\n Args:\n raw_lines: list of raw lines.\n\n Returns:\n list of lines with C++11 raw strings replaced by empty strings.\n \"\"\"\n\n arg_1 = None\n arg_2 = []\n for arg_3 in arg_0:\n if arg_1:\n # Inside a raw string, look for the end\n arg_4 = arg_3.find(arg_1)\n if arg_4 >= 0:\n # Found the end of the string, match leading space for this\n # line and resume copying the original lines, and also insert\n # a \"\" on the last line.\n arg_5 = Match(r'^(\\s*)\\S', arg_3)\n arg_3 = arg_5.group(1) + '\"\"' + arg_3[arg_4 + len(arg_1):]\n arg_1 = None\n else:\n # Haven't found the end yet, append a blank line.\n arg_3 = '\"\"'\n\n # Look for beginning of a raw string, and replace them with\n # empty strings. This is done in a loop to handle multiple raw\n # strings on the same line.\n while arg_1 is None:\n # Look for beginning of a raw string.\n # See 2.14.15 [lex.string] for syntax.\n #\n # Once we have matched a raw string, we check the prefix of the\n # line to make sure that the line is not part of a single line\n # comment. It's done this way because we remove raw strings\n # before removing comments as opposed to removing comments\n # before removing raw strings. This is because there are some\n # cpplint checks that requires the comments to be preserved, but\n # we don't want to check comments that are inside raw strings.\n arg_6 = Match(r'^(.*?)\\b(?:R|u8R|uR|UR|LR)\"([^\\s\\\\()]*)\\((.*)$', arg_3)\n if (arg_6 and\n not Match(r'^([^\\'\"]|\\'(\\\\.|[^\\'])*\\'|\"(\\\\.|[^\"])*\")*//',\n arg_6.group(1))):\n arg_1 = ')' + arg_6.group(2) + '\"'\n\n arg_4 = arg_6.group(3).find(arg_1)\n if arg_4 >= 0:\n # Raw string ended on same line\n arg_3 = (arg_6.group(1) + '\"\"' +\n arg_6.group(3)[arg_4 + len(arg_1):])\n arg_1 = None\n else:\n # Start of a multi-line raw string\n arg_3 = arg_6.group(1) + '\"\"'\n else:\n break\n\n arg_2.append(arg_3)\n\n # TODO(unknown): if delimiter is not None here, we might want to\n # emit a warning for unterminated string.\n return arg_2"} +{"_id": "doc_2967", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"We are inside a comment, find the end marker.\"\"\"\n while arg_1 < len(arg_0):\n if arg_0[arg_1].strip().endswith('*/'):\n return arg_1\n arg_1 += 1\n return len(arg_0)"} +{"_id": "doc_2968", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Clears a range of lines for multi-line comments.\"\"\"\n # Having // dummy comments makes the lines non-empty, so we will not get\n # unnecessary blank line warnings later in the code.\n for arg_3 in range(arg_1, arg_2):\n arg_0[arg_3] = '/**/'"} +{"_id": "doc_2969", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Find the position just after the end of current parenthesized expression.\n\n Args:\n line: a CleansedLines line.\n startpos: start searching at this position.\n stack: nesting stack at startpos.\n\n Returns:\n On finding matching end: (index just after matching end, None)\n On finding an unclosed expression: (-1, None)\n Otherwise: (-1, new stack at end of this line)\n \"\"\"\n for arg_3 in xrange(arg_1, len(arg_0)):\n arg_4 = arg_0[arg_3]\n if arg_4 in '([{':\n # Found start of parenthesized expression, push to expression stack\n arg_2.append(arg_4)\n elif arg_4 == '<':\n # Found potential start of template argument list\n if arg_3 > 0 and arg_0[arg_3 - 1] == '<':\n # Left shift operator\n if arg_2 and arg_2[-1] == '<':\n arg_2.pop()\n if not arg_2:\n return (-1, None)\n elif arg_3 > 0 and Search(r'\\boperator\\s*$', arg_0[0:arg_3]):\n # operator<, don't add to stack\n continue\n else:\n # Tentative start of template argument list\n arg_2.append('<')\n elif arg_4 in ')]}':\n # Found end of parenthesized expression.\n #\n # If we are currently expecting a matching '>', the pending '<'\n # must have been an operator. Remove them from expression stack.\n while arg_2 and arg_2[-1] == '<':\n arg_2.pop()\n if not arg_2:\n return (-1, None)\n if ((arg_2[-1] == '(' and arg_4 == ')') or\n (arg_2[-1] == '[' and arg_4 == ']') or\n (arg_2[-1] == '{' and arg_4 == '}')):\n arg_2.pop()\n if not arg_2:\n return (arg_3 + 1, None)\n else:\n # Mismatched parentheses\n return (-1, None)\n elif arg_4 == '>':\n # Found potential end of template argument list.\n\n # Ignore \"->\" and operator functions\n if (arg_3 > 0 and\n (arg_0[arg_3 - 1] == '-' or Search(r'\\boperator\\s*$', arg_0[0:arg_3 - 1]))):\n continue\n\n # Pop the stack if there is a matching '<'. Otherwise, ignore\n # this '>' since it must be an operator.\n if arg_2:\n if arg_2[-1] == '<':\n arg_2.pop()\n if not arg_2:\n return (arg_3 + 1, None)\n elif arg_4 == ';':\n # Found something that look like end of statements. If we are currently\n # expecting a '>', the matching '<' must have been an operator, since\n # template argument list should not contain statements.\n while arg_2 and arg_2[-1] == '<':\n arg_2.pop()\n if not arg_2:\n return (-1, None)\n\n # Did not find end of expression or unbalanced parentheses on this line\n return (-1, arg_2)"} +{"_id": "doc_2970", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Find position at the matching start of current expression.\n\n This is almost the reverse of FindEndOfExpressionInLine, but note\n that the input position and returned position differs by 1.\n\n Args:\n line: a CleansedLines line.\n endpos: start searching at this position.\n stack: nesting stack at endpos.\n\n Returns:\n On finding matching start: (index at matching start, None)\n On finding an unclosed expression: (-1, None)\n Otherwise: (-1, new stack at beginning of this line)\n \"\"\"\n arg_3 = arg_1\n while arg_3 >= 0:\n arg_4 = arg_0[arg_3]\n if arg_4 in ')]}':\n # Found end of expression, push to expression stack\n arg_2.append(arg_4)\n elif arg_4 == '>':\n # Found potential end of template argument list.\n #\n # Ignore it if it's a \"->\" or \">=\" or \"operator>\"\n if (arg_3 > 0 and\n (arg_0[arg_3 - 1] == '-' or\n Match(r'\\s>=\\s', arg_0[arg_3 - 1:]) or\n Search(r'\\boperator\\s*$', arg_0[0:arg_3]))):\n arg_3 -= 1\n else:\n arg_2.append('>')\n elif arg_4 == '<':\n # Found potential start of template argument list\n if arg_3 > 0 and arg_0[arg_3 - 1] == '<':\n # Left shift operator\n arg_3 -= 1\n else:\n # If there is a matching '>', we can pop the expression stack.\n # Otherwise, ignore this '<' since it must be an operator.\n if arg_2 and arg_2[-1] == '>':\n arg_2.pop()\n if not arg_2:\n return (arg_3, None)\n elif arg_4 in '([{':\n # Found start of expression.\n #\n # If there are any unmatched '>' on the stack, they must be\n # operators. Remove those.\n while arg_2 and arg_2[-1] == '>':\n arg_2.pop()\n if not arg_2:\n return (-1, None)\n if ((arg_4 == '(' and arg_2[-1] == ')') or\n (arg_4 == '[' and arg_2[-1] == ']') or\n (arg_4 == '{' and arg_2[-1] == '}')):\n arg_2.pop()\n if not arg_2:\n return (arg_3, None)\n else:\n # Mismatched parentheses\n return (-1, None)\n elif arg_4 == ';':\n # Found something that look like end of statements. If we are currently\n # expecting a '<', the matching '>' must have been an operator, since\n # template argument list should not contain statements.\n while arg_2 and arg_2[-1] == '>':\n arg_2.pop()\n if not arg_2:\n return (-1, None)\n\n arg_3 -= 1\n\n return (-1, arg_2)"} +{"_id": "doc_2971", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"If input points to ) or } or ] or >, finds the position that opens it.\n\n If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the\n linenum/pos that correspond to the opening of the expression.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n pos: A position on the line.\n\n Returns:\n A tuple (line, linenum, pos) pointer *at* the opening brace, or\n (line, 0, -1) if we never find the matching opening brace. Note\n we ignore strings and comments when matching; and the line we\n return is the 'cleansed' line at linenum.\n \"\"\"\n arg_3 = arg_0.elided[arg_1]\n if arg_3[arg_2] not in ')}]>':\n return (arg_3, 0, -1)\n\n # Check last line\n (arg_4, arg_5) = FindStartOfExpressionInLine(arg_3, arg_2, [])\n if arg_4 > -1:\n return (arg_3, arg_1, arg_4)\n\n # Continue scanning backward\n while arg_5 and arg_1 > 0:\n arg_1 -= 1\n arg_3 = arg_0.elided[arg_1]\n (arg_4, arg_5) = FindStartOfExpressionInLine(arg_3, len(arg_3) - 1, arg_5)\n if arg_4 > -1:\n return (arg_3, arg_1, arg_4)\n\n # Did not find start of expression before beginning of file, give up\n return (arg_3, 0, -1)"} +{"_id": "doc_2972", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Logs an error if no Copyright message appears at the top of the file.\"\"\"\n\n # We'll say it should occur by line 10. Don't forget there's a\n # dummy line at the front.\n for arg_3 in range(1, min(len(arg_1), 11)):\n if re.search(r'Copyright', arg_1[arg_3], re.I): break\n else: # means no copyright line was found\n arg_2(arg_0, 0, 'legal/copyright', 5,\n 'No copyright message found. '\n 'You should have a line: \"Copyright [year] \"')"} +{"_id": "doc_2973", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the CPP variable that should be used as a header guard.\n\n Args:\n filename: The name of a C++ header file.\n\n Returns:\n The CPP variable that should be used as a header guard in the\n named file.\n\n \"\"\"\n\n # Restores original filename in case that cpplint is invoked from Emacs's\n # flymake.\n arg_0 = re.sub(r'_flymake\\.h$', '.h', arg_0)\n arg_0 = re.sub(r'/\\.flymake/([^/]*)$', r'/\\1', arg_0)\n # Replace 'c++' with 'cpp'.\n arg_0 = arg_0.replace('C++', 'cpp').replace('c++', 'cpp')\n\n arg_1 = FileInfo(arg_0)\n arg_2 = arg_1.RepositoryName()\n if _root:\n arg_3 = os.sep\n # On Windows using directory separator will leave us with\n # \"bogus escape error\" unless we properly escape regex.\n if arg_3 == '\\\\':\n arg_3 += '\\\\'\n arg_2 = re.sub('^' + _root + arg_3, '', arg_2)\n return re.sub(r'[^a-zA-Z0-9]', '_', arg_2).upper() + '_'"} +{"_id": "doc_2974", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks that the file contains a header guard.\n\n Logs an error if no #ifndef header guard is present. For other\n headers, checks that the full pathname is used.\n\n Args:\n filename: The name of the C++ header file.\n clean_lines: A CleansedLines instance containing the file.\n error: The function to call with any errors found.\n \"\"\"\n\n # Don't check for header guards if there are error suppression\n # comments somewhere in this file.\n #\n # Because this is silencing a warning for a nonexistent line, we\n # only support the very specific NOLINT(build/header_guard) syntax,\n # and not the general NOLINT or NOLINT(*) syntax.\n arg_3 = arg_1.lines_without_raw_strings\n for arg_4 in arg_3:\n if Search(r'//\\s*NOLINT\\(build/header_guard\\)', arg_4):\n return\n\n # Allow pragma once instead of header guards\n for arg_4 in arg_3:\n if Search(r'^\\s*#pragma\\s+once', arg_4):\n return\n\n arg_5 = GetHeaderGuardCPPVariable(arg_0)\n\n arg_6 = ''\n arg_7 = 0\n arg_8 = ''\n arg_9 = ''\n arg_10 = 0\n for arg_11, arg_12 in enumerate(arg_3):\n arg_13 = arg_12.split()\n if len(arg_13) >= 2:\n # find the first occurrence of #ifndef and #define, save arg\n if not arg_6 and arg_13[0] == '#ifndef':\n # set ifndef to the header guard presented on the #ifndef line.\n arg_6 = arg_13[1]\n arg_7 = arg_11\n if not arg_8 and arg_13[0] == '#define':\n arg_8 = arg_13[1]\n # find the last occurrence of #endif, save entire line\n if arg_12.startswith('#endif'):\n arg_9 = arg_12\n arg_10 = arg_11\n\n if not arg_6 or not arg_8 or arg_6 != arg_8:\n arg_2(arg_0, 0, 'build/header_guard', 5,\n 'No #ifndef header guard found, suggested CPP variable is: %s' %\n arg_5)\n return\n\n # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__\n # for backward compatibility.\n if arg_6 != arg_5:\n arg_14 = 0\n if arg_6 != arg_5 + '_':\n arg_14 = 5\n\n ParseNolintSuppressions(arg_0, arg_3[arg_7], arg_7,\n arg_2)\n arg_2(arg_0, arg_7, 'build/header_guard', arg_14,\n '#ifndef header guard has wrong style, please use: %s' % arg_5)\n\n # Check for \"//\" comments on endif line.\n ParseNolintSuppressions(arg_0, arg_3[arg_10], arg_10,\n arg_2)\n arg_15 = Match(r'#endif\\s*//\\s*' + arg_5 + r'(_)?\\b', arg_9)\n if arg_15:\n if arg_15.group(1) == '_':\n # Issue low severity warning for deprecated double trailing underscore\n arg_2(arg_0, arg_10, 'build/header_guard', 0,\n '#endif line should be \"#endif // %s\"' % arg_5)\n return\n\n # Didn't find the corresponding \"//\" comment. If this file does not\n # contain any \"//\" comments at all, it could be that the compiler\n # only wants \"/**/\" comments, look for those instead.\n arg_16 = True\n for arg_4 in xrange(1, len(arg_3) - 1):\n arg_12 = arg_3[arg_4]\n if Match(r'^(?:(?:\\'(?:\\.|[^\\'])*\\')|(?:\"(?:\\.|[^\"])*\")|[^\\'\"])*//', arg_12):\n arg_16 = False\n break\n\n if arg_16:\n arg_15 = Match(r'#endif\\s*/\\*\\s*' + arg_5 + r'(_)?\\s*\\*/', arg_9)\n if arg_15:\n if arg_15.group(1) == '_':\n # Low severity warning for double trailing underscore\n arg_2(arg_0, arg_10, 'build/header_guard', 0,\n '#endif line should be \"#endif /* %s */\"' % arg_5)\n return\n\n # Didn't find anything\n arg_2(arg_0, arg_10, 'build/header_guard', 5,\n '#endif line should be \"#endif // %s\"' % arg_5)"} +{"_id": "doc_2975", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Logs an error if a source file does not include its header.\"\"\"\n\n # Do not check test files\n arg_3 = FileInfo(arg_0)\n if Search(_TEST_FILE_SUFFIX, arg_3.BaseName()):\n return\n\n for arg_4 in GetHeaderExtensions():\n arg_5 = arg_0[0:len(arg_0) - len(arg_3.Extension())]\n arg_6 = arg_5 + '.' + arg_4\n if not os.path.exists(arg_6):\n continue\n arg_7 = FileInfo(arg_6).RepositoryName()\n arg_8 = None\n for arg_9 in arg_1.include_list:\n for arg_10 in arg_9:\n if arg_7 in arg_10[0] or arg_10[0] in arg_7:\n return\n if not arg_8:\n arg_8 = arg_10[1]\n\n arg_2(arg_0, arg_8, 'build/include', 5,\n '%s should include its header file %s' % (arg_3.RepositoryName(),\n arg_7))"} +{"_id": "doc_2976", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Logs an error for each line containing bad characters.\n\n Two kinds of bad characters:\n\n 1. Unicode replacement characters: These indicate that either the file\n contained invalid UTF-8 (likely) or Unicode replacement characters (which\n it shouldn't). Note that it's possible for this to throw off line\n numbering if the invalid UTF-8 occurred adjacent to a newline.\n\n 2. NUL bytes. These are problematic for some tools.\n\n Args:\n filename: The name of the current file.\n lines: An array of strings, each representing a line of the file.\n error: The function to call with any errors found.\n \"\"\"\n for arg_3, arg_4 in enumerate(arg_1):\n if unicode_escape_decode('\\ufffd') in arg_4:\n arg_2(arg_0, arg_3, 'readability/utf8', 5,\n 'Line contains invalid UTF-8 (or Unicode replacement character).')\n if '\\0' in arg_4:\n arg_2(arg_0, arg_3, 'readability/nul', 5, 'Line contains NUL byte.')"} +{"_id": "doc_2977", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Checks for calls to thread-unsafe functions.\n\n Much code has been originally written without consideration of\n multi-threading. Also, engineers are relying on their old experience;\n they have learned posix before threading extensions were added. These\n tests guide the engineers to use thread-safe functions (when using\n posix directly).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n for arg_5, arg_6, arg_7 in _THREADING_LIST:\n # Additional pattern matching check to confirm that this is the\n # function we are looking for\n if Search(arg_7, arg_4):\n arg_3(arg_0, arg_2, 'runtime/threadsafe_fn', 2,\n 'Consider using ' + arg_6 +\n '...) instead of ' + arg_5 +\n '...) for improved thread safety.')"} +{"_id": "doc_2978", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Checks for the correctness of various spacing issues in the code.\n\n Things we check for: spaces around operators, spaces after\n if/for/while/switch, no spaces around parens in function calls, two\n spaces between code and comment, don't start a block with a blank\n line, don't end a function with a blank line, don't add a blank line\n after public/protected/private, don't have too many blank lines in a row.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n \"\"\"\n\n # Don't use \"elided\" lines here, otherwise we can't check commented lines.\n # Don't want to use \"raw\" either, because we don't want to check inside C++11\n # raw strings,\n arg_5 = arg_1.lines_without_raw_strings\n arg_6 = arg_5[arg_2]\n\n # Before nixing comments, check if the line is blank for no good\n # reason. This includes the first line after a block is opened, and\n # blank lines at the end of a function (ie, right before a line like '}'\n #\n # Skip all the blank line checks if we are immediately inside a\n # namespace body. In other words, don't issue blank line warnings\n # for this block:\n # namespace {\n #\n # }\n #\n # A warning about missing end of namespace comments will be issued instead.\n #\n # Also skip blank line checks for 'extern \"C\"' blocks, which are formatted\n # like namespaces.\n if (IsBlankLine(arg_6) and\n not arg_3.InNamespaceBody() and\n not arg_3.InExternC()):\n arg_7 = arg_1.elided\n arg_8 = arg_7[arg_2 - 1]\n arg_9 = arg_8.rfind('{')\n # TODO(unknown): Don't complain if line before blank line, and line after,\n # both start with alnums and are indented the same amount.\n # This ignores whitespace at the start of a namespace block\n # because those are not usually indented.\n if arg_9 != -1 and arg_8[arg_9:].find('}') == -1:\n # OK, we have a blank line at the start of a code block. Before we\n # complain, we check if it is an exception to the rule: The previous\n # non-empty line has the parameters of a function header that are indented\n # 4 spaces (because they did not fit in a 80 column line when placed on\n # the same line as the function name). We also check for the case where\n # the previous line is indented 6 spaces, which may happen when the\n # initializers of a constructor do not fit into a 80 column line.\n arg_10 = False\n if Match(r' {6}\\w', arg_8): # Initializer list?\n # We are looking for the opening column of initializer list, which\n # should be indented 4 spaces to cause 6 space indentation afterwards.\n arg_11 = arg_2-2\n while (arg_11 >= 0\n and Match(r' {6}\\w', arg_7[arg_11])):\n arg_11 -= 1\n arg_10 = (arg_11 >= 0\n and arg_7[arg_11][:5] == ' :')\n else:\n # Search for the function arguments or an initializer list. We use a\n # simple heuristic here: If the line is indented 4 spaces; and we have a\n # closing paren, without the opening paren, followed by an opening brace\n # or colon (for initializer lists) we assume that it is the last line of\n # a function header. If we have a colon indented 4 spaces, it is an\n # initializer list.\n arg_10 = (Match(r' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)',\n arg_8)\n or Match(r' {4}:', arg_8))\n\n if not arg_10:\n arg_4(arg_0, arg_2, 'whitespace/blank_line', 2,\n 'Redundant blank line at the start of a code block '\n 'should be deleted.')\n # Ignore blank lines at the end of a block in a long if-else\n # chain, like this:\n # if (condition1) {\n # // Something followed by a blank line\n #\n # } else if (condition2) {\n # // Something else\n # }\n if arg_2 + 1 < arg_1.NumLines():\n arg_12 = arg_5[arg_2 + 1]\n if (arg_12\n and Match(r'\\s*}', arg_12)\n and arg_12.find('} else ') == -1):\n arg_4(arg_0, arg_2, 'whitespace/blank_line', 3,\n 'Redundant blank line at the end of a code block '\n 'should be deleted.')\n\n arg_13 = Match(r'\\s*(public|protected|private):', arg_8)\n if arg_13:\n arg_4(arg_0, arg_2, 'whitespace/blank_line', 3,\n 'Do not leave a blank line after \"%s:\"' % arg_13.group(1))\n\n # Next, check comments\n arg_14 = 0\n if arg_2 + 1 < arg_1.NumLines():\n arg_12 = arg_5[arg_2 + 1]\n arg_14 = len(arg_12) - len(arg_12.lstrip())\n CheckComment(arg_6, arg_0, arg_2, arg_14, arg_4)\n\n # get rid of comments and strings\n arg_6 = arg_1.elided[arg_2]\n\n # You shouldn't have spaces before your brackets, except maybe after\n # 'delete []' or 'return []() {};'\n if Search(r'\\w\\s+\\[', arg_6) and not Search(r'(?:delete|return)\\s+\\[', arg_6):\n arg_4(arg_0, arg_2, 'whitespace/braces', 5,\n 'Extra space before [')\n\n # In range-based for, we wanted spaces before and after the colon, but\n # not around \"::\" tokens that might appear.\n if (Search(r'for *\\(.*[^:]:[^: ]', arg_6) or\n Search(r'for *\\(.*[^: ]:[^:]', arg_6)):\n arg_4(arg_0, arg_2, 'whitespace/forcolon', 2,\n 'Missing space around colon in range-based for loop')"} +{"_id": "doc_2979", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Checks for horizontal spacing around parentheses.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n\n # No spaces after an if, while, switch, or for\n arg_5 = Search(r' (if\\(|for\\(|while\\(|switch\\()', arg_4)\n if arg_5:\n arg_3(arg_0, arg_2, 'whitespace/parens', 5,\n 'Missing space before ( in %s' % arg_5.group(1))\n\n # For if/for/while/switch, the left and right parens should be\n # consistent about how many spaces are inside the parens, and\n # there should either be zero or one spaces inside the parens.\n # We don't want: \"if ( foo)\" or \"if ( foo )\".\n # Exception: \"for ( ; foo; bar)\" and \"for (foo; bar; )\" are allowed.\n arg_5 = Search(r'\\b(if|for|while|switch)\\s*'\n r'\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$',\n arg_4)\n if arg_5:\n if len(arg_5.group(2)) != len(arg_5.group(4)):\n if not (arg_5.group(3) == ';' and\n len(arg_5.group(2)) == 1 + len(arg_5.group(4)) or\n not arg_5.group(2) and Search(r'\\bfor\\s*\\(.*; \\)', arg_4)):\n arg_3(arg_0, arg_2, 'whitespace/parens', 5,\n 'Mismatching spaces inside () in %s' % arg_5.group(1))\n if len(arg_5.group(2)) not in [0, 1]:\n arg_3(arg_0, arg_2, 'whitespace/parens', 5,\n 'Should have zero or one spaces inside ( and ) in %s' %\n arg_5.group(1))"} +{"_id": "doc_2980", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check if expression looks like a type name, returns true if so.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n nesting_state: A NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n expr: The expression to check.\n Returns:\n True, if token looks like a type.\n \"\"\"\n # Keep only the last token in the expression\n arg_3 = Match(r'^.*(\\b\\S+)$', arg_2)\n if arg_3:\n arg_4 = arg_3.group(1)\n else:\n arg_4 = arg_2\n\n # Match native types and stdint types\n if _TYPES.match(arg_4):\n return True\n\n # Try a bit harder to match templated types. Walk up the nesting\n # stack until we find something that resembles a typename\n # declaration for what we are looking for.\n arg_5 = (r'\\b(?:typename|class|struct)\\s+' + re.escape(arg_4) +\n r'\\b')\n arg_6 = len(arg_1.stack) - 1\n while arg_6 >= 0:\n if isinstance(arg_1.stack[arg_6], _NamespaceInfo):\n return False\n\n # Found where the opening brace is. We want to scan from this\n # line up to the beginning of the function, minus a few lines.\n # template \n # class C\n # : public ... { // start scanning here\n arg_7 = arg_1.stack[arg_6].starting_linenum\n\n arg_8 = 0\n if arg_6 > 0:\n arg_8 = arg_1.stack[arg_6 - 1].starting_linenum\n arg_9 = arg_7\n while arg_9 >= arg_8:\n if arg_0.elided[arg_9].find('template') >= 0:\n break\n arg_9 -= 1\n if arg_9 < arg_8:\n # Didn't find any \"template\" keyword before reaching the next block,\n # there are probably no template things to check for this block\n arg_6 -= 1\n continue\n\n # Look for typename in the specified range\n for arg_10 in xrange(arg_9, arg_7 + 1, 1):\n if Search(arg_5, arg_0.elided[arg_10]):\n return True\n arg_6 -= 1\n\n return False"} +{"_id": "doc_2981", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Checks for additional blank line issues related to sections.\n\n Currently the only thing checked here is blank line before protected/private.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n class_info: A _ClassInfo objects.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n # Skip checks if the class is small, where small means 25 lines or less.\n # 25 lines seems like a good cutoff since that's the usual height of\n # terminals, and any class that can't fit in one screen can't really\n # be considered \"small\".\n #\n # Also skip checks if we are on the first line. This accounts for\n # classes that look like\n # class Foo { public: ... };\n #\n # If we didn't find the end of the class, last_line would be zero,\n # and the check will be skipped by the first condition.\n if (arg_2.last_line - arg_2.starting_linenum <= 24 or\n arg_3 <= arg_2.starting_linenum):\n return\n\n arg_5 = Match(r'\\s*(public|protected|private):', arg_1.lines[arg_3])\n if arg_5:\n # Issue warning if the line before public/protected/private was\n # not a blank line, but don't do this if the previous line contains\n # \"class\" or \"struct\". This can happen two ways:\n # - We are at the beginning of the class.\n # - We are forward-declaring an inner class that is semantically\n # private, but needed to be public for implementation reasons.\n # Also ignores cases where the previous line ends with a backslash as can be\n # common when defining classes in C macros.\n arg_6 = arg_1.lines[arg_3 - 1]\n if (not IsBlankLine(arg_6) and\n not Search(r'\\b(class|struct)\\b', arg_6) and\n not Search(r'\\\\$', arg_6)):\n # Try a bit harder to find the beginning of the class. This is to\n # account for multi-line base-specifier lists, e.g.:\n # class Derived\n # : public Base {\n arg_7 = arg_2.starting_linenum\n for arg_8 in range(arg_2.starting_linenum, arg_3):\n if Search(r'\\{\\s*$', arg_1.lines[arg_8]):\n arg_7 = arg_8\n break\n if arg_7 < arg_3 - 1:\n arg_4(arg_0, arg_3, 'whitespace/blank_line', 3,\n '\"%s:\" should be preceded by a blank line' % arg_5.group(1))"} +{"_id": "doc_2982", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the most recent non-blank line and its line number.\n\n Args:\n clean_lines: A CleansedLines instance containing the file contents.\n linenum: The number of the line to check.\n\n Returns:\n A tuple with two elements. The first element is the contents of the last\n non-blank line before the current line, or the empty string if this is the\n first non-blank line. The second is the line number of that line, or -1\n if this is the first non-blank line.\n \"\"\"\n\n arg_2 = arg_1 - 1\n while arg_2 >= 0:\n arg_3 = arg_0.elided[arg_2]\n if not IsBlankLine(arg_3): # if not a blank line...\n return (arg_3, arg_2)\n arg_2 -= 1\n return ('', -1)"} +{"_id": "doc_2983", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Looks for redundant trailing semicolon.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n\n arg_4 = arg_1.elided[arg_2]\n\n # Block bodies should not be followed by a semicolon. Due to C++11\n # brace initialization, there are more places where semicolons are\n # required than not, so we use a whitelist approach to check these\n # rather than a blacklist. These are the places where \"};\" should\n # be replaced by just \"}\":\n # 1. Some flavor of block following closing parenthesis:\n # for (;;) {};\n # while (...) {};\n # switch (...) {};\n # Function(...) {};\n # if (...) {};\n # if (...) else if (...) {};\n #\n # 2. else block:\n # if (...) else {};\n #\n # 3. const member function:\n # Function(...) const {};\n #\n # 4. Block following some statement:\n # x = 42;\n # {};\n #\n # 5. Block at the beginning of a function:\n # Function(...) {\n # {};\n # }\n #\n # Note that naively checking for the preceding \"{\" will also match\n # braces inside multi-dimensional arrays, but this is fine since\n # that expression will not contain semicolons.\n #\n # 6. Block following another block:\n # while (true) {}\n # {};\n #\n # 7. End of namespaces:\n # namespace {};\n #\n # These semicolons seems far more common than other kinds of\n # redundant semicolons, possibly due to people converting classes\n # to namespaces. For now we do not warn for this case.\n #\n # Try matching case 1 first.\n arg_5 = Match(r'^(.*\\)\\s*)\\{', arg_4)\n if arg_5:\n # Matched closing parenthesis (case 1). Check the token before the\n # matching opening parenthesis, and don't warn if it looks like a\n # macro. This avoids these false positives:\n # - macro that defines a base class\n # - multi-line macro that defines a base class\n # - macro that defines the whole class-head\n #\n # But we still issue warnings for macros that we know are safe to\n # warn, specifically:\n # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P\n # - TYPED_TEST\n # - INTERFACE_DEF\n # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:\n #\n # We implement a whitelist of safe macros instead of a blacklist of\n # unsafe macros, even though the latter appears less frequently in\n # google code and would have been easier to implement. This is because\n # the downside for getting the whitelist wrong means some extra\n # semicolons, while the downside for getting the blacklist wrong\n # would result in compile errors.\n #\n # In addition to macros, we also don't want to warn on\n # - Compound literals\n # - Lambdas\n # - alignas specifier with anonymous structs\n # - decltype\n arg_6 = arg_5.group(1).rfind(')')\n arg_7 = ReverseCloseExpression(\n arg_1, arg_2, arg_6)\n if arg_7[2] > -1:\n arg_8 = arg_7[0][0:arg_7[2]]\n arg_9 = Search(r'\\b([A-Z_][A-Z0-9_]*)\\s*$', arg_8)\n arg_10 = Match(r'^(.*\\])\\s*$', arg_8)\n if ((arg_9 and\n arg_9.group(1) not in (\n 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',\n 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',\n 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or\n (arg_10 and not Search(r'\\boperator\\s*\\[\\s*\\]', arg_10.group(1))) or\n Search(r'\\b(?:struct|union)\\s+alignas\\s*$', arg_8) or\n Search(r'\\bdecltype$', arg_8) or\n Search(r'\\s+=\\s*$', arg_8)):\n arg_5 = None\n if (arg_5 and\n arg_7[1] > 1 and\n Search(r'\\]\\s*$', arg_1.elided[arg_7[1] - 1])):\n # Multi-line lambda-expression\n arg_5 = None\n\n else:\n # Try matching cases 2-3.\n arg_5 = Match(r'^(.*(?:else|\\)\\s*const)\\s*)\\{', arg_4)\n if not arg_5:\n # Try matching cases 4-6. These are always matched on separate lines.\n #\n # Note that we can't simply concatenate the previous line to the\n # current line and do a single match, otherwise we may output\n # duplicate warnings for the blank line case:\n # if (cond) {\n # // blank line\n # }\n arg_11 = GetPreviousNonBlankLine(arg_1, arg_2)[0]\n if arg_11 and Search(r'[;{}]\\s*$', arg_11):\n arg_5 = Match(r'^(\\s*)\\{', arg_4)\n\n # Check matching closing brace\n if arg_5:\n (arg_12, arg_13, arg_14) = CloseExpression(\n arg_1, arg_2, len(arg_5.group(1)))\n if arg_14 > -1 and Match(r'^\\s*;', arg_12[arg_14:]):\n # Current {} pair is eligible for semicolon check, and we have found\n # the redundant semicolon, output warning here.\n #\n # Note: because we are scanning forward for opening braces, and\n # outputting warnings for the matching closing brace, if there are\n # nested blocks with trailing semicolons, we will get the error\n # messages in reversed order.\n\n # We need to check the line forward for NOLINT\n arg_15 = arg_1.raw_lines\n ParseNolintSuppressions(arg_0, arg_15[arg_13-1], arg_13-1,\n arg_3)\n ParseNolintSuppressions(arg_0, arg_15[arg_13], arg_13,\n arg_3)\n\n arg_3(arg_0, arg_13, 'readability/braces', 4,\n \"You don't need a ; after a }\")"} +{"_id": "doc_2984", "title": "", "text": "def Func(arg_0):\n \"\"\"Find a replaceable CHECK-like macro.\n\n Args:\n line: line to search on.\n Returns:\n (macro name, start position), or (None, -1) if no replaceable\n macro is found.\n \"\"\"\n for arg_1 in _CHECK_MACROS:\n arg_2 = arg_0.find(arg_1)\n if arg_2 >= 0:\n # Find opening parenthesis. Do a regular expression match here\n # to make sure that we are matching the expected CHECK macro, as\n # opposed to some other macro that happens to contain the CHECK\n # substring.\n arg_3 = Match(r'^(.*\\b' + arg_1 + r'\\s*)\\(', arg_0)\n if not arg_3:\n continue\n return (arg_1, len(arg_3.group(1)))\n return (None, -1)"} +{"_id": "doc_2985", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Checks the use of CHECK and EXPECT macros.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n\n # Decide the set of replacement macros that should be suggested\n arg_4 = arg_1.elided\n (arg_5, arg_6) = FindCheckMacro(arg_4[arg_2])\n if not arg_5:\n return\n\n # Find end of the boolean expression by matching parentheses\n (arg_7, arg_8, arg_9) = CloseExpression(\n arg_1, arg_2, arg_6)\n if arg_9 < 0:\n return\n\n # If the check macro is followed by something other than a\n # semicolon, assume users will log their own custom error messages\n # and don't suggest any replacements.\n if not Match(r'\\s*;', arg_7[arg_9:]):\n return\n\n if arg_2 == arg_8:\n arg_10 = arg_4[arg_2][arg_6 + 1:arg_9 - 1]\n else:\n arg_10 = arg_4[arg_2][arg_6 + 1:]\n for arg_11 in xrange(arg_2 + 1, arg_8):\n arg_10 += arg_4[arg_11]\n arg_10 += arg_7[0:arg_9 - 1]\n\n # Parse expression so that we can take parentheses into account.\n # This avoids false positives for inputs like \"CHECK((a < 4) == b)\",\n # which is not replaceable by CHECK_LE.\n arg_12 = ''\n arg_13 = ''\n arg_14 = None\n while arg_10:\n arg_15 = Match(r'^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||'\n r'==|!=|>=|>|<=|<|\\()(.*)$', arg_10)\n if arg_15:\n arg_16 = arg_15.group(1)\n if arg_16 == '(':\n # Parenthesized operand\n arg_10 = arg_15.group(2)\n (arg_17, arg_18) = FindEndOfExpressionInLine(arg_10, 0, ['('])\n if arg_17 < 0:\n return # Unmatched parenthesis\n arg_12 += '(' + arg_10[0:arg_17]\n arg_10 = arg_10[arg_17:]\n elif arg_16 in ('&&', '||'):\n # Logical and/or operators. This means the expression\n # contains more than one term, for example:\n # CHECK(42 < a && a < b);\n #\n # These are not replaceable with CHECK_LE, so bail out early.\n return\n elif arg_16 in ('<<', '<<=', '>>', '>>=', '->*', '->'):\n # Non-relational operator\n arg_12 += arg_16\n arg_10 = arg_15.group(2)\n else:\n # Relational operator\n arg_14 = arg_16\n arg_13 = arg_15.group(2)\n break\n else:\n # Unparenthesized operand. Instead of appending to lhs one character\n # at a time, we do another regular expression match to consume several\n # characters at once if possible. Trivial benchmark shows that this\n # is more efficient when the operands are longer than a single\n # character, which is generally the case.\n arg_15 = Match(r'^([^-=!<>()&|]+)(.*)$', arg_10)\n if not arg_15:\n arg_15 = Match(r'^(\\s*\\S)(.*)$', arg_10)\n if not arg_15:\n break\n arg_12 += arg_15.group(1)\n arg_10 = arg_15.group(2)\n\n # Only apply checks if we got all parts of the boolean expression\n if not (arg_12 and arg_14 and arg_13):\n return\n\n # Check that rhs do not contain logical operators. We already know\n # that lhs is fine since the loop above parses out && and ||.\n if arg_13.find('&&') > -1 or arg_13.find('||') > -1:\n return\n\n # At least one of the operands must be a constant literal. This is\n # to avoid suggesting replacements for unprintable things like\n # CHECK(variable != iterator)\n #\n # The following pattern matches decimal, hex integers, strings, and\n # characters (in that order).\n arg_12 = arg_12.strip()\n arg_13 = arg_13.strip()\n arg_19 = r'^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')$'\n if Match(arg_19, arg_12) or Match(arg_19, arg_13):\n # Note: since we know both lhs and rhs, we can provide a more\n # descriptive error message like:\n # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)\n # Instead of:\n # Consider using CHECK_EQ instead of CHECK(a == b)\n #\n # We are still keeping the less descriptive message because if lhs\n # or rhs gets long, the error message might become unreadable.\n arg_3(arg_0, arg_2, 'readability/check', 2,\n 'Consider using %s instead of %s(a %s b)' % (\n _CHECK_REPLACEMENT[arg_5][arg_14],\n arg_5, arg_14))"} +{"_id": "doc_2986", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check alternative keywords being used in boolean expressions.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n\n # Avoid preprocessor lines\n if Match(r'^\\s*#', arg_4):\n return\n\n # Last ditch effort to avoid multi-line comments. This will not help\n # if the comment started before the current line or ended after the\n # current line, but it catches most of the false positives. At least,\n # it provides a way to workaround this warning for people who use\n # multi-line comments in preprocessor macros.\n #\n # TODO(unknown): remove this once cpplint has better support for\n # multi-line comments.\n if arg_4.find('/*') >= 0 or arg_4.find('*/') >= 0:\n return\n\n for arg_5 in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(arg_4):\n arg_3(arg_0, arg_2, 'readability/alt_tokens', 2,\n 'Use operator %s instead of %s' % (\n _ALT_TOKEN_REPLACEMENT[arg_5.group(1)], arg_5.group(1)))"} +{"_id": "doc_2987", "title": "", "text": "def Func(arg_0):\n \"\"\"Determines the width of the line in column positions.\n\n Args:\n line: A string, which may be a Unicode string.\n\n Returns:\n The width of the line in column positions, accounting for Unicode\n combining characters and wide characters.\n \"\"\"\n if isinstance(arg_0, unicode):\n arg_1 = 0\n for arg_2 in unicodedata.normalize('NFC', arg_0):\n if unicodedata.east_asian_width(arg_2) in ('W', 'F'):\n arg_1 += 2\n elif not unicodedata.combining(arg_2):\n arg_1 += 1\n return arg_1\n else:\n return len(arg_0)"} +{"_id": "doc_2988", "title": "", "text": "def Func(arg_0):\n \"\"\"Drops common suffixes like _test.cc or -inl.h from filename.\n\n For example:\n >>> Func('foo/foo-inl.h')\n 'foo/foo'\n >>> Func('foo/bar/foo.cc')\n 'foo/bar/foo'\n >>> Func('foo/foo_internal.h')\n 'foo/foo'\n >>> Func('foo/foo_unusualinternal.h')\n 'foo/foo_unusualinternal'\n\n Args:\n filename: The input filename.\n\n Returns:\n The filename with the common suffix removed.\n \"\"\"\n for arg_1 in itertools.chain(\n ('%s.%s' % (test_suffix.lstrip('_'), ext)\n for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),\n ('%s.%s' % (arg_1, ext)\n for arg_1, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):\n if (arg_0.endswith(arg_1) and len(arg_0) > len(arg_1) and\n arg_0[-len(arg_1) - 1] in ('-', '_')):\n return arg_0[:-len(arg_1) - 1]\n return os.path.splitext(arg_0)[0]"} +{"_id": "doc_2989", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Figures out what kind of header 'include' is.\n\n Args:\n fileinfo: The current file cpplint is running over. A FileInfo instance.\n include: The path to a #included file.\n is_system: True if the #include used <> rather than \"\".\n\n Returns:\n One of the _XXX_HEADER constants.\n\n For example:\n >>> Func(FileInfo('foo/foo.cc'), 'stdio.h', True)\n _C_SYS_HEADER\n >>> Func(FileInfo('foo/foo.cc'), 'string', True)\n _CPP_SYS_HEADER\n >>> Func(FileInfo('foo/foo.cc'), 'foo/foo.h', False)\n _LIKELY_MY_HEADER\n >>> Func(FileInfo('foo/foo_unknown_extension.cc'),\n ... 'bar/foo_other_ext.h', False)\n _POSSIBLE_MY_HEADER\n >>> Func(FileInfo('foo/foo.cc'), 'foo/bar.h', False)\n _OTHER_HEADER\n \"\"\"\n # This is a list of all standard c++ header files, except\n # those already checked for above.\n arg_3 = arg_1 in _CPP_HEADERS\n\n # Headers with C++ extensions shouldn't be considered C system headers\n if arg_2 and os.path.splitext(arg_1)[1] in ['.hpp', '.hxx', '.h++']:\n arg_2 = False\n\n if arg_2:\n if arg_3:\n return _CPP_SYS_HEADER\n else:\n return _C_SYS_HEADER\n\n # If the target file and the include we're checking share a\n # basename when we drop common extensions, and the include\n # lives in . , then it's likely to be owned by the target file.\n arg_4, arg_5 = (\n os.path.split(_DropCommonSuffixes(arg_0.RepositoryName())))\n arg_6, arg_7 = os.path.split(_DropCommonSuffixes(arg_1))\n arg_8 = os.path.normpath(arg_4 + '/../public')\n arg_8 = arg_8.replace('\\\\', '/')\n if arg_5 == arg_7 and (\n arg_6 == arg_4 or\n arg_6 == arg_8):\n return _LIKELY_MY_HEADER\n\n # If the target and include share some initial basename\n # component, it's possible the target is implementing the\n # include, so it's allowed to be first, but we'll never\n # complain if it's not there.\n arg_9 = _RE_FIRST_COMPONENT.match(arg_5)\n arg_10 = _RE_FIRST_COMPONENT.match(arg_7)\n if (arg_9 and arg_10 and\n arg_9.group(0) ==\n arg_10.group(0)):\n return _POSSIBLE_MY_HEADER\n\n return _OTHER_HEADER"} +{"_id": "doc_2990", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check for unsafe global or static objects.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n\n # Match two lines at a time to support multiline declarations\n if arg_2 + 1 < arg_1.NumLines() and not Search(r'[;({]', arg_4):\n arg_4 += arg_1.elided[arg_2 + 1].strip()\n\n # Check for people declaring static/global STL strings at the top level.\n # This is dangerous because the C++ language does not guarantee that\n # globals with constructors are initialized before the first access, and\n # also because globals can be destroyed when some threads are still running.\n # TODO(unknown): Generalize this to also find static unique_ptr instances.\n # TODO(unknown): File bugs for clang-tidy to find these.\n arg_5 = Match(\n r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'\n r'([a-zA-Z0-9_:]+)\\b(.*)',\n arg_4)\n\n # Remove false positives:\n # - String pointers (as opposed to values).\n # string *pointer\n # const string *pointer\n # string const *pointer\n # string *const pointer\n #\n # - Functions and template specializations.\n # string Function(...\n # string Class::Method(...\n #\n # - Operators. These are matched separately because operator names\n # cross non-word boundaries, and trying to match both operators\n # and functions at the same time would decrease accuracy of\n # matching identifiers.\n # string Class::operator*()\n if (arg_5 and\n not Search(r'\\bstring\\b(\\s+const)?\\s*[\\*\\&]\\s*(const\\s+)?\\w', arg_4) and\n not Search(r'\\boperator\\W', arg_4) and\n not Match(r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\s*\\(([^\"]|$)', arg_5.group(4))):\n if Search(r'\\bconst\\b', arg_4):\n arg_3(arg_0, arg_2, 'runtime/string', 4,\n 'For a static/global string constant, use a C style string '\n 'instead: \"%schar%s %s[]\".' %\n (arg_5.group(1), arg_5.group(2) or '', arg_5.group(3)))\n else:\n arg_3(arg_0, arg_2, 'runtime/string', 4,\n 'Static/global string variables are not permitted.')\n\n if (Search(r'\\b([A-Za-z0-9_]*_)\\(\\1\\)', arg_4) or\n Search(r'\\b([A-Za-z0-9_]*_)\\(CHECK_NOTNULL\\(\\1\\)\\)', arg_4)):\n arg_3(arg_0, arg_2, 'runtime/init', 4,\n 'You seem to be initializing a member variable with itself.')"} +{"_id": "doc_2991", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check for printf related issues.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n\n # When snprintf is used, the second argument shouldn't be a literal.\n arg_5 = Search(r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,', arg_4)\n if arg_5 and arg_5.group(2) != '0':\n # If 2nd arg is zero, snprintf is used to calculate size.\n arg_3(arg_0, arg_2, 'runtime/printf', 3,\n 'If you can, use sizeof(%s) instead of %s as the 2nd arg '\n 'to snprintf.' % (arg_5.group(1), arg_5.group(2)))\n\n # Check if some verboten C functions are being used.\n if Search(r'\\bsprintf\\s*\\(', arg_4):\n arg_3(arg_0, arg_2, 'runtime/printf', 5,\n 'Never use sprintf. Use snprintf instead.')\n arg_5 = Search(r'\\b(strcpy|strcat)\\s*\\(', arg_4)\n if arg_5:\n arg_3(arg_0, arg_2, 'runtime/printf', 4,\n 'Almost always, snprintf is better than %s' % arg_5.group(1))"} +{"_id": "doc_2992", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if current line is inside constructor initializer list.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n Returns:\n True if current line appears to be inside constructor initializer\n list, False otherwise.\n \"\"\"\n for arg_2 in xrange(arg_1, 1, -1):\n arg_3 = arg_0.elided[arg_2]\n if arg_2 == arg_1:\n arg_4 = Match(r'^(.*)\\{\\s*$', arg_3)\n if arg_4:\n arg_3 = arg_4.group(1)\n\n if Search(r'\\s:\\s*\\w+[({]', arg_3):\n # A lone colon tend to indicate the start of a constructor\n # initializer list. It could also be a ternary operator, which\n # also tend to appear in constructor initializer lists as\n # opposed to parameter lists.\n return True\n if Search(r'\\}\\s*,\\s*$', arg_3):\n # A closing brace followed by a comma is probably the end of a\n # brace-initialized member in constructor initializer list.\n return True\n if Search(r'[{};]\\s*$', arg_3):\n # Found one of the following:\n # - A closing brace or semicolon, probably the end of the previous\n # function.\n # - An opening brace, probably the start of current class or namespace.\n #\n # Current line is probably not inside an initializer list since\n # we saw one of those things without seeing the starting colon.\n return False\n\n # Got to the beginning of the file without seeing the start of\n # constructor initializer list.\n return False"} +{"_id": "doc_2993", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\"Check for non-const references.\n\n Separate from CheckLanguage since it scans backwards from current\n line, instead of scanning forward.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n \"\"\"\n # Do nothing if there is no '&' on current line.\n arg_5 = arg_1.elided[arg_2]\n if '&' not in arg_5:\n return\n\n # If a function is inherited, current function doesn't have much of\n # a choice, so any non-const references should not be blamed on\n # derived function.\n if IsDerivedFunction(arg_1, arg_2):\n return\n\n # Don't warn on out-of-line method definitions, as we would warn on the\n # in-line declaration, if it isn't marked with 'override'.\n if IsOutOfLineMethodDefinition(arg_1, arg_2):\n return\n\n # Long type names may be broken across multiple lines, usually in one\n # of these forms:\n # LongType\n # ::LongTypeContinued &identifier\n # LongType::\n # LongTypeContinued &identifier\n # LongType<\n # ...>::LongTypeContinued &identifier\n #\n # If we detected a type split across two lines, join the previous\n # line to current line so that we can match const references\n # accordingly.\n #\n # Note that this only scans back one line, since scanning back\n # arbitrary number of lines would be expensive. If you have a type\n # that spans more than 2 lines, please use a typedef.\n if arg_2 > 1:\n arg_6 = None\n if Match(r'\\s*::(?:[\\w<>]|::)+\\s*&\\s*\\S', arg_5):\n # previous_line\\n + ::current_line\n arg_6 = Search(r'\\b((?:const\\s*)?(?:[\\w<>]|::)+[\\w<>])\\s*$',\n arg_1.elided[arg_2 - 1])\n elif Match(r'\\s*[a-zA-Z_]([\\w<>]|::)+\\s*&\\s*\\S', arg_5):\n # previous_line::\\n + current_line\n arg_6 = Search(r'\\b((?:const\\s*)?(?:[\\w<>]|::)+::)\\s*$',\n arg_1.elided[arg_2 - 1])\n if arg_6:\n arg_5 = arg_6.group(1) + arg_5.lstrip()\n else:\n # Check for templated parameter that is split across multiple lines\n arg_7 = arg_5.rfind('>')\n if arg_7 > -1:\n (arg_8, arg_9, arg_10) = ReverseCloseExpression(\n arg_1, arg_2, arg_7)\n if arg_10 > -1 and arg_9 < arg_2:\n # Found the matching < on an earlier line, collect all\n # pieces up to current line.\n arg_5 = ''\n for arg_11 in xrange(arg_9, arg_2 + 1):\n arg_5 += arg_1.elided[arg_11].strip()\n\n # Check for non-const references in function parameters. A single '&' may\n # found in the following places:\n # inside expression: binary & for bitwise AND\n # inside expression: unary & for taking the address of something\n # inside declarators: reference parameter\n # We will exclude the first two cases by checking that we are not inside a\n # function body, including one that was just introduced by a trailing '{'.\n # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].\n if (arg_3.previous_stack_top and\n not (isinstance(arg_3.previous_stack_top, _ClassInfo) or\n isinstance(arg_3.previous_stack_top, _NamespaceInfo))):\n # Not at toplevel, not within a class, and not within a namespace\n return\n\n # Avoid initializer lists. We only need to scan back from the\n # current line for something that starts with ':'.\n #\n # We don't need to check the current line, since the '&' would\n # appear inside the second set of parentheses on the current line as\n # opposed to the first set.\n if arg_2 > 0:\n for arg_11 in xrange(arg_2 - 1, max(0, arg_2 - 10), -1):\n arg_12 = arg_1.elided[arg_11]\n if not Search(r'[),]\\s*$', arg_12):\n break\n if Match(r'^\\s*:\\s+\\S', arg_12):\n return\n\n # Avoid preprocessors\n if Search(r'\\\\\\s*$', arg_5):\n return\n\n # Avoid constructor initializer lists\n if IsInitializerList(arg_1, arg_2):\n return\n\n # We allow non-const references in a few standard places, like functions\n # called \"swap()\" or iostream operators like \"<<\" or \">>\". Do not check\n # those function parameters.\n #\n # We also accept & in static_assert, which looks like a function but\n # it's actually a declaration expression.\n arg_13 = (r'(?:[sS]wap(?:<\\w:+>)?|'\n r'operator\\s*[<>][<>]|'\n r'static_assert|COMPILE_ASSERT'\n r')\\s*\\(')\n if Search(arg_13, arg_5):\n return\n elif not Search(r'\\S+\\([^)]*$', arg_5):\n # Don't see a whitelisted function on this line. Actually we\n # didn't see any function name on this line, so this is likely a\n # multi-line parameter list. Try a bit harder to catch this case.\n for arg_11 in xrange(2):\n if (arg_2 > arg_11 and\n Search(arg_13, arg_1.elided[arg_2 - arg_11 - 1])):\n return\n\n arg_14 = ReplaceAll(r'{[^}]*}', ' ', arg_5) # exclude function body\n for arg_15 in re.findall(_RE_PATTERN_REF_PARAM, arg_14):\n if (not Match(_RE_PATTERN_CONST_REF_PARAM, arg_15) and\n not Match(_RE_PATTERN_REF_STREAM_PARAM, arg_15)):\n arg_4(arg_0, arg_2, 'runtime/references', 2,\n 'Is this a non-const reference? '\n 'If so, make const or use a pointer: ' +\n ReplaceAll(' *<', '<', arg_15))"} +{"_id": "doc_2994", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if these two filenames belong to the same module.\n\n The concept of a 'module' here is a as follows:\n foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the\n same 'module' if they are in the same directory.\n some/path/public/xyzzy and some/path/internal/xyzzy are also considered\n to belong to the same module here.\n\n If the filename_cc contains a longer path than the filename_h, for example,\n '/absolute/path/to/base/sysinfo.cc', and this file would include\n 'base/sysinfo.h', this function also produces the prefix needed to open the\n header. This is used by the caller of this function to more robustly open the\n header file. We don't have access to the real include paths in this context,\n so we need this guesswork here.\n\n Known bugs: tools/base/bar.cc and base/bar.h belong to the same module\n according to this implementation. Because of this, this function gives\n some false positives. This should be sufficiently rare in practice.\n\n Args:\n filename_cc: is the path for the source (e.g. .cc) file\n filename_h: is the path for the header path\n\n Returns:\n Tuple with a bool and a string:\n bool: True if filename_cc and filename_h belong to the same module.\n string: the additional prefix needed to open the header file.\n \"\"\"\n arg_2 = FileInfo(arg_0)\n if not arg_2.Extension().lstrip('.') in GetNonHeaderExtensions():\n return (False, '')\n\n arg_3 = FileInfo(arg_1)\n if not arg_3.Extension().lstrip('.') in GetHeaderExtensions():\n return (False, '')\n\n arg_0 = arg_0[:-(len(arg_2.Extension()))]\n arg_4 = Search(_TEST_FILE_SUFFIX, arg_2.BaseName())\n if arg_4:\n arg_0 = arg_0[:-len(arg_4.group(1))]\n\n arg_0 = arg_0.replace('/public/', '/')\n arg_0 = arg_0.replace('/internal/', '/')\n\n arg_1 = arg_1[:-(len(arg_3.Extension()))]\n if arg_1.endswith('-inl'):\n arg_1 = arg_1[:-len('-inl')]\n arg_1 = arg_1.replace('/public/', '/')\n arg_1 = arg_1.replace('/internal/', '/')\n\n arg_5 = arg_0.endswith(arg_1)\n arg_6 = ''\n if arg_5:\n arg_6 = arg_0[:-len(arg_1)]\n return arg_5, arg_6"} +{"_id": "doc_2995", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check that make_pair's template arguments are deduced.\n\n G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are\n specified explicitly, and such use isn't intended in any case.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n arg_5 = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(arg_4)\n if arg_5:\n arg_3(arg_0, arg_2, 'build/explicit_make_pair',\n 4, # 4 = high confidence\n 'For C++11-compatibility, omit template arguments from make_pair'\n ' OR use pair directly OR if appropriate, construct a pair directly')"} +{"_id": "doc_2996", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check if line contains a redundant \"virtual\" function-specifier.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n # Look for \"virtual\" on current line.\n arg_4 = arg_1.elided[arg_2]\n arg_5 = Match(r'^(.*)(\\bvirtual\\b)(.*)$', arg_4)\n if not arg_5: return\n\n # Ignore \"virtual\" keywords that are near access-specifiers. These\n # are only used in class base-specifier and do not apply to member\n # functions.\n if (Search(r'\\b(public|protected|private)\\s+$', arg_5.group(1)) or\n Match(r'^\\s+(public|protected|private)\\b', arg_5.group(3))):\n return\n\n # Ignore the \"virtual\" keyword from virtual base classes. Usually\n # there is a column on the same line in these cases (virtual base\n # classes are rare in google3 because multiple inheritance is rare).\n if Match(r'^.*[^:]:[^:].*$', arg_4): return\n\n # Look for the next opening parenthesis. This is the start of the\n # parameter list (possibly on the next line shortly after virtual).\n # TODO(unknown): doesn't work if there are virtual functions with\n # decltype() or other things that use parentheses, but csearch suggests\n # that this is rare.\n arg_6 = -1\n arg_7 = -1\n arg_8 = len(arg_5.group(2))\n for arg_9 in xrange(arg_2, min(arg_2 + 3, arg_1.NumLines())):\n arg_4 = arg_1.elided[arg_9][arg_8:]\n arg_10 = Match(r'^([^(]*)\\(', arg_4)\n if arg_10:\n # Match parentheses to find the end of the parameter list\n (arg_11, arg_7, arg_6) = CloseExpression(\n arg_1, arg_9, arg_8 + len(arg_10.group(1)))\n break\n arg_8 = 0\n\n if arg_6 < 0:\n return # Couldn't find end of parameter list, give up\n\n # Look for \"override\" or \"final\" after the parameter list\n # (possibly on the next few lines).\n for arg_12 in xrange(arg_7, min(arg_7 + 3, arg_1.NumLines())):\n arg_4 = arg_1.elided[arg_12][arg_6:]\n arg_13 = Search(r'\\b(override|final)\\b', arg_4)\n if arg_13:\n arg_3(arg_0, arg_2, 'readability/inheritance', 4,\n ('\"virtual\" is redundant since function is '\n 'already declared as \"%s\"' % arg_13.group(1)))\n\n # Set end_col to check whole lines after we are done with the\n # first line.\n arg_6 = 0\n if Search(r'[^\\w]\\s*$', arg_4):\n break"} +{"_id": "doc_2997", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Flag those C++14 features that we restrict.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n \"\"\"\n arg_4 = arg_1.elided[arg_2]\n\n arg_5 = Match(r'\\s*#\\s*include\\s+[<\"]([^<\"]+)[\">]', arg_4)\n\n # Flag unapproved C++14 headers.\n if arg_5 and arg_5.group(1) in ('scoped_allocator', 'shared_mutex'):\n arg_3(arg_0, arg_2, 'build/c++14', 5,\n ('<%s> is an unapproved C++14 header.') % arg_5.group(1))"} +{"_id": "doc_2998", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None):\n \"\"\"Performs lint checks and reports any errors to the given error function.\n\n Args:\n filename: Filename of the file that is being processed.\n file_extension: The extension (dot not included) of the file.\n lines: An array of strings, each representing a line of the file, with the\n last element being empty if the file is terminated with a newline.\n error: A callable to which errors are reported, which takes 4 arguments:\n filename, line number, error level, and message\n extra_check_functions: An array of additional check functions that will be\n run on each source line. Each function takes 4\n arguments: filename, clean_lines, line, error\n \"\"\"\n arg_2 = (['// marker so line numbers and indices both start at 1'] + arg_2 +\n ['// marker so line numbers end in a known way'])\n\n arg_5 = _IncludeState()\n arg_6 = _FunctionState()\n arg_7 = NestingState()\n\n ResetNolintSuppressions()\n\n CheckForCopyright(arg_0, arg_2, arg_3)\n ProcessGlobalSuppresions(arg_2)\n RemoveMultiLineComments(arg_0, arg_2, arg_3)\n arg_8 = CleansedLines(arg_2)\n\n if arg_1 in GetHeaderExtensions():\n CheckForHeaderGuard(arg_0, arg_8, arg_3)\n\n for arg_9 in range(arg_8.NumLines()):\n ProcessLine(arg_0, arg_1, arg_8, arg_9,\n arg_5, arg_6, arg_7, arg_3,\n arg_4)\n FlagCxx11Features(arg_0, arg_8, arg_9, arg_3)\n arg_7.CheckCompletedBlocks(arg_0, arg_3)\n\n CheckForIncludeWhatYouUse(arg_0, arg_8, arg_5, arg_3)\n\n # Check that the .cc file has included its header if it exists.\n if _IsSourceExtension(arg_1):\n CheckHeaderFileIncluded(arg_0, arg_5, arg_3)\n\n # We check here rather than inside ProcessLine so that we see raw\n # lines rather than \"cleaned\" lines.\n CheckForBadCharacters(arg_0, arg_2, arg_3)\n\n CheckForNewlineAtEOF(arg_0, arg_2, arg_3)"} +{"_id": "doc_2999", "title": "", "text": "def Func(arg_0):\n \"\"\" Loads the configuration files and processes the config overrides.\n\n Args:\n filename: The name of the file being processed by the linter.\n\n Returns:\n False if the current |filename| should not be processed further.\n \"\"\"\n\n arg_1 = os.path.abspath(arg_0)\n arg_2 = []\n arg_3 = True\n while arg_3:\n arg_4, arg_5 = os.path.split(arg_1)\n if not arg_5:\n break # Reached the root directory.\n\n arg_6 = os.path.join(arg_4, \"CPPLINT.cfg\")\n arg_1 = arg_4\n if not os.path.isfile(arg_6):\n continue\n\n try:\n with open(arg_6) as file_handle:\n for arg_7 in file_handle:\n arg_7, arg_8, arg_8 = arg_7.partition('#') # Remove comments.\n if not arg_7.strip():\n continue\n\n arg_9, arg_8, arg_10 = arg_7.partition('=')\n arg_9 = arg_9.strip()\n arg_10 = arg_10.strip()\n if arg_9 == 'set noparent':\n arg_3 = False\n elif arg_9 == 'filter':\n arg_2.append(arg_10)\n elif arg_9 == 'exclude_files':\n # When matching exclude_files pattern, use the base_name of\n # the current file name or the directory name we are processing.\n # For example, if we are checking for lint errors in /foo/bar/baz.cc\n # and we found the .cfg file at /foo/CPPLINT.cfg, then the config\n # file's \"exclude_files\" filter is meant to be checked against \"bar\"\n # and not \"baz\" nor \"bar/baz.cc\".\n if arg_5:\n arg_11 = re.compile(arg_10)\n if arg_11.match(arg_5):\n _cpplint_state.PrintInfo('Ignoring \"%s\": file excluded by '\n '\"%s\". File path component \"%s\" matches pattern \"%s\"\\n' %\n (arg_0, arg_6, arg_5, arg_10))\n return False\n elif arg_9 == 'linelength':\n global arg_12\n try:\n arg_12 = int(arg_10)\n except ValueError:\n _cpplint_state.PrintError('Line length must be numeric.')\n elif arg_9 == 'extensions':\n global arg_14\n try:\n arg_13 = [ext.strip() for ext in arg_10.split(',')]\n arg_14 = set(arg_13)\n except ValueError:\n sys.stderr.write('Extensions should be a comma-separated list of values;'\n 'for example: extensions=hpp,cpp\\n'\n 'This could not be parsed: \"%s\"' % (arg_10,))\n elif arg_9 == 'headers':\n global arg_15\n try:\n arg_13 = [ext.strip() for ext in arg_10.split(',')]\n arg_15 = set(arg_13)\n except ValueError:\n sys.stderr.write('Extensions should be a comma-separated list of values;'\n 'for example: extensions=hpp,cpp\\n'\n 'This could not be parsed: \"%s\"' % (arg_10,))\n elif arg_9 == 'root':\n global arg_16\n arg_16 = arg_10\n else:\n _cpplint_state.PrintError(\n 'Invalid configuration option (%s) in file %s\\n' %\n (arg_9, arg_6))\n\n except IOError:\n _cpplint_state.PrintError(\n \"Skipping config file '%s': Can't open for reading\\n\" % arg_6)\n arg_3 = False\n\n # Apply all the accumulated filters in reverse order (top-level directory\n # config options having the least priority).\n for arg_17 in reversed(arg_2):\n _AddFilters(arg_17)\n\n return True"} +{"_id": "doc_3000", "title": "", "text": "def Func(arg_0):\n \"\"\"Parses the command line arguments.\n\n This may set the output format and verbosity level as side-effects.\n\n Args:\n args: The command line arguments:\n\n Returns:\n The list of filenames to lint.\n \"\"\"\n try:\n (arg_1, arg_2) = getopt.getopt(arg_0, '', ['help', 'output=', 'verbose=',\n 'counting=',\n 'filter=',\n 'root=',\n 'repository=',\n 'linelength=',\n 'extensions=',\n 'exclude=',\n 'headers=',\n 'quiet',\n 'recursive'])\n except getopt.GetoptError:\n PrintUsage('Invalid arguments.')\n\n arg_3 = _VerboseLevel()\n arg_4 = _OutputFormat()\n arg_5 = ''\n arg_6 = ''\n arg_7 = False\n\n for (arg_8, arg_9) in arg_1:\n if arg_8 == '--help':\n PrintUsage(None)\n elif arg_8 == '--output':\n if arg_9 not in ('emacs', 'vs7', 'eclipse', 'junit'):\n PrintUsage('The only allowed output formats are emacs, vs7, eclipse '\n 'and junit.')\n arg_4 = arg_9\n elif arg_8 == '--verbose':\n arg_3 = int(arg_9)\n elif arg_8 == '--filter':\n arg_5 = arg_9\n if not arg_5:\n PrintCategories()\n elif arg_8 == '--counting':\n if arg_9 not in ('total', 'toplevel', 'detailed'):\n PrintUsage('Valid counting options are total, toplevel, and detailed')\n arg_6 = arg_9\n elif arg_8 == '--root':\n global arg_10\n arg_10 = arg_9\n elif arg_8 == '--repository':\n global arg_11\n arg_11 = arg_9\n elif arg_8 == '--linelength':\n global arg_12\n try:\n arg_12 = int(arg_9)\n except ValueError:\n PrintUsage('Line length must be digits.')\n elif arg_8 == '--exclude':\n global arg_13\n if not arg_13:\n arg_13 = set()\n arg_13.update(glob.glob(arg_9))\n elif arg_8 == '--extensions':\n global arg_14\n try:\n arg_14 = set(arg_9.split(','))\n except ValueError:\n PrintUsage('Extensions must be comma seperated list.')\n elif arg_8 == '--headers':\n global arg_15\n try:\n arg_15 = set(arg_9.split(','))\n except ValueError:\n PrintUsage('Extensions must be comma seperated list.')\n elif arg_8 == '--recursive':\n arg_7 = True\n elif arg_8 == '--quiet':\n global arg_16\n arg_16 = True\n\n if not arg_2:\n PrintUsage('No files were specified.')\n\n if arg_7:\n arg_2 = _ExpandDirectories(arg_2)\n\n if arg_13:\n arg_2 = _FilterExcludedFiles(arg_2)\n\n _SetOutputFormat(arg_4)\n _SetVerboseLevel(arg_3)\n _SetFilters(arg_5)\n _SetCountingStyle(arg_6)\n\n return arg_2"} +{"_id": "doc_3001", "title": "", "text": "def Func(arg_0):\n \"\"\"Searches a list of filenames and replaces directories in the list with\n all files descending from those directories. Files with extensions not in\n the valid extensions list are excluded.\n\n Args:\n filenames: A list of files or directories\n\n Returns:\n A list of all files that are members of filenames or descended from a\n directory in filenames\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0:\n if not os.path.isdir(arg_2):\n arg_1.add(arg_2)\n continue\n\n for arg_3, arg_4, arg_5 in os.walk(arg_2):\n for arg_6 in arg_5:\n arg_7 = os.path.join(arg_3, arg_6)\n if arg_7.startswith('.' + os.path.sep):\n arg_7 = arg_7[len('.' + os.path.sep):]\n arg_1.add(arg_7)\n\n arg_8 = []\n for arg_2 in arg_1:\n if os.path.splitext(arg_2)[1][1:] in GetAllExtensions():\n arg_8.append(arg_2)\n\n return arg_8"} +{"_id": "doc_3002", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if a header has already been included.\n\n Args:\n header: header to check.\n Returns:\n Line number of previous occurrence, or -1 if the header has not\n been seen before.\n \"\"\"\n for arg_2 in arg_0.include_list:\n for arg_3 in arg_2:\n if arg_3[0] == arg_1:\n return arg_3[1]\n return -1"} +{"_id": "doc_3003", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a non-empty error message if the next header is out of order.\n\n This function also updates the internal state to be ready to check\n the next include.\n\n Args:\n header_type: One of the _XXX_HEADER constants defined above.\n\n Returns:\n The empty string if the header is in the right order, or an\n error message describing what's wrong.\n\n \"\"\"\n arg_2 = ('Found %s after %s' %\n (arg_0._TYPE_NAMES[arg_1],\n arg_0._SECTION_NAMES[arg_0._section]))\n\n arg_3 = arg_0._section\n\n if arg_1 == _C_SYS_HEADER:\n if arg_0._section <= arg_0._C_SECTION:\n arg_0._section = arg_0._C_SECTION\n else:\n arg_0._last_header = ''\n return arg_2\n elif arg_1 == _CPP_SYS_HEADER:\n if arg_0._section <= arg_0._CPP_SECTION:\n arg_0._section = arg_0._CPP_SECTION\n else:\n arg_0._last_header = ''\n return arg_2\n elif arg_1 == _LIKELY_MY_HEADER:\n if arg_0._section <= arg_0._MY_H_SECTION:\n arg_0._section = arg_0._MY_H_SECTION\n else:\n arg_0._section = arg_0._OTHER_H_SECTION\n elif arg_1 == _POSSIBLE_MY_HEADER:\n if arg_0._section <= arg_0._MY_H_SECTION:\n arg_0._section = arg_0._MY_H_SECTION\n else:\n # This will always be the fallback because we're not sure\n # enough that the header is associated with this file.\n arg_0._section = arg_0._OTHER_H_SECTION\n else:\n assert arg_1 == _OTHER_HEADER\n arg_0._section = arg_0._OTHER_H_SECTION\n\n if arg_3 != arg_0._section:\n arg_0._last_header = ''\n\n return ''"} +{"_id": "doc_3004", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Bumps the module's error statistic.\"\"\"\n arg_0.error_count += 1\n if arg_0.counting in ('toplevel', 'detailed'):\n if arg_0.counting != 'detailed':\n arg_1 = arg_1.split('/')[0]\n if arg_1 not in arg_0.errors_by_category:\n arg_0.errors_by_category[arg_1] = 0\n arg_0.errors_by_category[arg_1] += 1"} +{"_id": "doc_3005", "title": "", "text": "def Func(arg_0):\n \"\"\"Print a summary of errors by category, and the total.\"\"\"\n for arg_1, arg_2 in sorted(iteritems(arg_0.errors_by_category)):\n arg_0.PrintInfo('Category \\'%s\\' errors found: %d\\n' %\n (arg_1, arg_2))\n if arg_0.error_count > 0:\n arg_0.PrintInfo('Total errors found: %d\\n' % arg_0.error_count)"} +{"_id": "doc_3006", "title": "", "text": "def Func(arg_0):\n \"\"\"Collapses strings and chars on a line to simple \"\" or '' blocks.\n\n We nix strings first so we're not fooled by text like '\"http://\"'\n\n Args:\n elided: The line being processed.\n\n Returns:\n The line with collapsed strings.\n \"\"\"\n if _RE_PATTERN_INCLUDE.match(arg_0):\n return arg_0\n\n # Remove escaped characters first to make quote/single quote collapsing\n # basic. Things that look like escaped characters shouldn't occur\n # outside of strings and chars.\n arg_0 = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', arg_0)\n\n # Replace quoted strings and digit separators. Both single quotes\n # and double quotes are processed in the same loop, otherwise\n # nested quotes wouldn't work.\n arg_1 = ''\n while True:\n # Find the first quote character\n arg_2 = Match(r'^([^\\'\"]*)([\\'\"])(.*)$', arg_0)\n if not arg_2:\n arg_1 += arg_0\n break\n arg_3, arg_4, arg_5 = arg_2.groups()\n\n if arg_4 == '\"':\n # Collapse double quoted strings\n arg_6 = arg_5.find('\"')\n if arg_6 >= 0:\n arg_1 += arg_3 + '\"\"'\n arg_0 = arg_5[arg_6 + 1:]\n else:\n # Unmatched double quote, don't bother processing the rest\n # of the line since this is probably a multiline string.\n arg_1 += arg_0\n break\n else:\n # Found single quote, check nearby text to eliminate digit separators.\n #\n # There is no special handling for floating point here, because\n # the integer/fractional/exponent parts would all be parsed\n # correctly as long as there are digits on both sides of the\n # separator. So we are fine as long as we don't see something\n # like \"0.'3\" (gcc 4.9.0 will not allow this literal).\n if Search(r'\\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', arg_3):\n arg_7 = Match(r'^((?:\\'?[0-9a-zA-Z_])*)(.*)$', \"'\" + arg_5)\n arg_1 += arg_3 + arg_7.group(1).replace(\"'\", '')\n arg_0 = arg_7.group(2)\n else:\n arg_6 = arg_5.find('\\'')\n if arg_6 >= 0:\n arg_1 += arg_3 + \"''\"\n arg_0 = arg_5[arg_6 + 1:]\n else:\n # Unmatched single quote\n arg_1 += arg_0\n break\n\n return arg_1"} +{"_id": "doc_3007", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Check end of namespace comments.\"\"\"\n arg_5 = arg_2.raw_lines[arg_3]\n\n # Check how many lines is enclosed in this namespace. Don't issue\n # warning for missing namespace comments if there aren't enough\n # lines. However, do apply checks if there is already an end of\n # namespace comment and it's incorrect.\n #\n # TODO(unknown): We always want to check end of namespace comments\n # if a namespace is large, but sometimes we also want to apply the\n # check if a short namespace contained nontrivial things (something\n # other than forward declarations). There is currently no logic on\n # deciding what these nontrivial things are, so this check is\n # triggered by namespace size only, which works most of the time.\n if (arg_3 - arg_0.starting_linenum < 10\n and not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\b', arg_5)):\n return\n\n # Look for matching comment at end of namespace.\n #\n # Note that we accept C style \"/* */\" comments for terminating\n # namespaces, so that code that terminate namespaces inside\n # preprocessor macros can be cpplint clean.\n #\n # We also accept stuff like \"// end of namespace .\" with the\n # period at the end.\n #\n # Besides these, we don't accept anything else, otherwise we might\n # get false negatives when existing comment is a substring of the\n # expected namespace.\n if arg_0.name:\n # Named namespace\n if not Match((r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\s+' +\n re.escape(arg_0.name) + r'[\\*/\\.\\\\\\s]*$'),\n arg_5):\n arg_4(arg_1, arg_3, 'readability/namespace', 5,\n 'Namespace should be terminated with \"// namespace %s\"' %\n arg_0.name)\n else:\n # Anonymous namespace\n if not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace[\\*/\\.\\\\\\s]*$', arg_5):\n # If \"// namespace anonymous\" or \"// anonymous namespace (more text)\",\n # mention \"// anonymous namespace\" as an acceptable form\n if Match(r'^\\s*}.*\\b(namespace anonymous|anonymous namespace)\\b', arg_5):\n arg_4(arg_1, arg_3, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"'\n ' or \"// anonymous namespace\"')\n else:\n arg_4(arg_1, arg_3, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"')"} +{"_id": "doc_3008", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update preprocessor stack.\n\n We need to handle preprocessors due to classes like this:\n #ifdef SWIG\n struct ResultDetailsPageElementExtensionPoint {\n #else\n struct ResultDetailsPageElementExtensionPoint : public Extension {\n #endif\n\n We make the following assumptions (good enough for most files):\n - Preprocessor condition evaluates to true from #if up to first\n #else/#elif/#endif.\n\n - Preprocessor condition evaluates to false from #else/#elif up\n to #endif. We still perform lint checks on these lines, but\n these do not affect nesting stack.\n\n Args:\n line: current line to check.\n \"\"\"\n if Match(r'^\\s*#\\s*(if|ifdef|ifndef)\\b', arg_1):\n # Beginning of #if block, save the nesting stack here. The saved\n # stack will allow us to restore the parsing state in the #else case.\n arg_0.pp_stack.append(_PreprocessorInfo(copy.deepcopy(arg_0.stack)))\n elif Match(r'^\\s*#\\s*(else|elif)\\b', arg_1):\n # Beginning of #else block\n if arg_0.pp_stack:\n if not arg_0.pp_stack[-1].seen_else:\n # This is the first #else or #elif block. Remember the\n # whole nesting stack up to this point. This is what we\n # keep after the #endif.\n arg_0.pp_stack[-1].seen_else = True\n arg_0.pp_stack[-1].stack_before_else = copy.deepcopy(arg_0.stack)\n\n # Restore the stack to how it was before the #if\n arg_0.stack = copy.deepcopy(arg_0.pp_stack[-1].stack_before_if)\n else:\n # TODO(unknown): unexpected #else, issue warning?\n pass\n elif Match(r'^\\s*#\\s*endif\\b', arg_1):\n # End of #if or #else blocks.\n if arg_0.pp_stack:\n # If we saw an #else, we will need to restore the nesting\n # stack to its former state before the #else, otherwise we\n # will just continue from where we left off.\n if arg_0.pp_stack[-1].seen_else:\n # Here we can just use a shallow copy since we are the last\n # reference to it.\n arg_0.stack = arg_0.pp_stack[-1].stack_before_else\n # Drop the corresponding #if\n arg_0.pp_stack.pop()\n else:\n # TODO(unknown): unexpected #endif, issue warning?\n pass"} +{"_id": "doc_3009", "title": "", "text": "def Func(arg_0):\n \"\"\"Get class info on the top of the stack.\n\n Returns:\n A _ClassInfo object if we are inside a class, or None otherwise.\n \"\"\"\n for arg_1 in range(len(arg_0.stack), 0, -1):\n arg_2 = arg_0.stack[arg_1 - 1]\n if isinstance(arg_2, _ClassInfo):\n return arg_2\n return None"} +{"_id": "doc_3010", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks that all classes and namespaces have been completely parsed.\n\n Call this when all lines in a file have been processed.\n Args:\n filename: The name of the current file.\n error: The function to call with any errors found.\n \"\"\"\n # Note: This test can result in false positives if #ifdef constructs\n # get in the way of brace matching. See the testBuildClass test in\n # cpplint_unittest.py for an example of this.\n for arg_3 in arg_0.stack:\n if isinstance(arg_3, _ClassInfo):\n arg_2(arg_1, arg_3.starting_linenum, 'build/class', 5,\n 'Failed to find complete declaration of class %s' %\n arg_3.name)\n elif isinstance(arg_3, _NamespaceInfo):\n arg_2(arg_1, arg_3.starting_linenum, 'build/namespaces', 5,\n 'Failed to find complete declaration of namespace %s' %\n arg_3.name)"} +{"_id": "doc_3011", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a new Streamlet by applying map_function to each element of this Streamlet\n and flattening the result\n \"\"\"\n from heronpy.streamlet.impl.flatmapbolt import FlatMapStreamlet\n arg_2 = FlatMapStreamlet(arg_1, arg_0)\n arg_0._add_child(arg_2)\n return arg_2"} +{"_id": "doc_3012", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a new Streamlet containing only the elements that satisfy Func_function\n \"\"\"\n from heronpy.streamlet.impl.Funcbolt import FilterStreamlet\n arg_2 = FilterStreamlet(arg_1, arg_0)\n arg_0._add_child(arg_2)\n return arg_2"} +{"_id": "doc_3013", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return num_Funcs number of streamlets each containing all elements\n of the current streamlet\n \"\"\"\n arg_2 = []\n for arg_3 in range(arg_1):\n arg_2.append(arg_0.repartition(arg_0.get_num_partitions()))\n return arg_2"} +{"_id": "doc_3014", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a new Streamlet in which each element of this Streamlet are collected\n over a window defined by window_config and then reduced using the reduce_function\n reduce_function takes two element at one time and reduces them to one element that\n is used in the subsequent operations.\n \"\"\"\n from heronpy.streamlet.impl.reducebywindowbolt import ReduceByWindowStreamlet\n arg_3 = ReduceByWindowStreamlet(arg_1, arg_2, arg_0)\n arg_0._add_child(arg_3)\n return arg_3"} +{"_id": "doc_3015", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a new Streamlet that consists of elements of both this and other_streamlet\n \"\"\"\n from heronpy.streamlet.impl.Funcbolt import UnionStreamlet\n arg_2 = UnionStreamlet(arg_0, arg_1)\n arg_0._add_child(arg_2)\n arg_1._add_child(arg_2)\n return arg_2"} +{"_id": "doc_3016", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Return a new Streamlet by Funcing Func_streamlet with this streamlet\n \"\"\"\n from heronpy.streamlet.impl.Funcbolt import JoinStreamlet, JoinBolt\n arg_4 = JoinStreamlet(JoinBolt.INNER, arg_2,\n arg_3, arg_0, arg_1)\n arg_0._add_child(arg_4)\n arg_1._add_child(arg_4)\n return arg_4"} +{"_id": "doc_3017", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Return a new Streamlet by left join_streamlet with this streamlet\n \"\"\"\n from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt\n arg_4 = JoinStreamlet(JoinBolt.OUTER_LEFT, arg_2,\n arg_3, arg_0, arg_1)\n arg_0._add_child(arg_4)\n arg_1._add_child(arg_4)\n return arg_4"} +{"_id": "doc_3018", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" extract common args \"\"\"\n try:\n # do not pop like cli because ``topologies`` subcommand still needs it\n arg_3 = arg_2['cluster/[role]/[env]']\n arg_4 = arg_2['config_path']\n except KeyError:\n # if some of the arguments are not found, print error and exit\n arg_5 = config.get_subparser(arg_1, arg_0)\n print(arg_5.format_help())\n return dict()\n arg_6 = config.get_heron_cluster(arg_3)\n arg_4 = config.get_heron_cluster_conf_dir(arg_6, arg_4)\n\n arg_7 = dict()\n try:\n arg_8 = config.parse_cluster_role_env(arg_3, arg_4)\n arg_7['cluster'] = arg_8[0]\n arg_7['role'] = arg_8[1]\n arg_7['environ'] = arg_8[2]\n arg_7['config_path'] = arg_4\n except Exception as e:\n Log.error(\"Unable to get valid topology location: %s\", str(e))\n return dict()\n\n arg_2.update(arg_7)\n return arg_2"} +{"_id": "doc_3019", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n '''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise\n returns obj.\n '''\n if not isinstance(arg_0, dict):\n raise_with_traceback(_param_type_mismatch_exception(arg_0, dict, arg_1))\n\n if not (arg_2 or arg_3):\n return arg_0\n\n return _check_key_value_types(arg_0, arg_2, arg_3)"} +{"_id": "doc_3020", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n '''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty\n dictionary.\n '''\n if arg_0 is not None and not isinstance(arg_0, dict):\n raise_with_traceback(_param_type_mismatch_exception(arg_0, dict, arg_1))\n\n if not arg_0:\n return {}\n\n if arg_4:\n return _check_key_value_types(arg_0, arg_2, arg_3=arg_4, value_check=issubclass)\n return _check_key_value_types(arg_0, arg_2, arg_3)"} +{"_id": "doc_3021", "title": "", "text": "def Func(arg_0):\n '''Record a stream of event records to json'''\n check.str_param(arg_0, 'json_path')\n return construct_single_handler_logger(\n \"json-event-record-logger\",\n DEBUG,\n JsonEventLoggerHandler(\n arg_0,\n lambda record: construct_event_record(\n StructuredLoggerMessage(\n name=record.name,\n message=record.msg,\n level=record.levelno,\n meta=record.dagster_meta,\n record=record,\n )\n ),\n ),\n )"} +{"_id": "doc_3022", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Read a config file and instantiate the RCParser.\n\n Create new :class:`configparser.ConfigParser` for the given **path**\n and instantiate the :class:`RCParser` with the ConfigParser as\n :attr:`config` attribute.\n\n If the **path** doesn't exist, raise :exc:`ConfigFileError`.\n Otherwise return a new :class:`RCParser` instance.\n\n :param path:\n Optional path to the config file to parse.\n If not given, use ``'~/.pypirc'``.\n\n \"\"\"\n arg_1 = arg_1 or arg_0.CONFIG_PATH\n if not os.path.exists(arg_1):\n arg_2 = 'Config file not found: {0!r}'.format(arg_1)\n raise ConfigFileError(arg_2)\n arg_3 = read_config(arg_1)\n return arg_0(arg_3)"} +{"_id": "doc_3023", "title": "", "text": "def Func(arg_0):\n '''This recursive descent thing formats a config dict for GraphQL.'''\n\n def _format_config_subdict(arg_0, arg_1=0):\n check.dict_param(arg_0, 'config', key_type=str)\n\n arg_2 = IndentingStringIoPrinter(indent_level=2, arg_1=arg_1)\n arg_2.line('{')\n\n arg_3 = len(arg_0)\n for arg_4, arg_5 in enumerate(sorted(arg_0, arg_5=lambda x: x[0])):\n arg_6 = arg_0[arg_5]\n with arg_2.with_indent():\n arg_7 = (\n _format_config_item(arg_6, arg_1=arg_2.current_indent)\n .lstrip(' ')\n .rstrip('\\n')\n )\n arg_2.line(\n '{key}: {formatted_value}{comma}'.format(\n arg_5=arg_5,\n arg_7=arg_7,\n comma=',' if arg_4 != arg_3 - 1 else '',\n )\n )\n arg_2.line('}')\n\n return arg_2.read()\n\n def _format_config_sublist(arg_0, arg_1=0):\n arg_2 = IndentingStringIoPrinter(indent_level=2, arg_1=arg_1)\n arg_2.line('[')\n\n arg_3 = len(arg_0)\n for arg_4, arg_6 in enumerate(arg_0):\n with arg_2.with_indent():\n arg_7 = (\n _format_config_item(arg_6, arg_1=arg_2.current_indent)\n .lstrip(' ')\n .rstrip('\\n')\n )\n arg_2.line(\n '{formatted_value}{comma}'.format(\n arg_7=arg_7, comma=',' if arg_4 != arg_3 - 1 else ''\n )\n )\n arg_2.line(']')\n\n return arg_2.read()\n\n def _format_config_item(arg_0, arg_1=0):\n arg_2 = IndentingStringIoPrinter(indent_level=2, arg_1=arg_1)\n\n if isinstance(arg_0, dict):\n return _format_config_subdict(arg_0, arg_2.current_indent)\n elif isinstance(arg_0, list):\n return _format_config_sublist(arg_0, arg_2.current_indent)\n elif isinstance(arg_0, bool):\n return repr(arg_0).lower()\n else:\n return repr(arg_0).replace('\\'', '\"')\n\n check.dict_param(arg_0, 'config', key_type=str)\n if not isinstance(arg_0, dict):\n check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(arg_0)))\n\n return _format_config_subdict(arg_0)"} +{"_id": "doc_3024", "title": "", "text": "def Func(arg_0, arg_1):\n '''Get a pipeline by name. Only constructs that pipeline and caches it.\n\n Args:\n name (str): Name of the pipeline to retriever\n\n Returns:\n PipelineDefinition: Instance of PipelineDefinition with that name.\n '''\n check.str_param(arg_1, 'name')\n\n if arg_1 in arg_0._pipeline_cache:\n return arg_0._pipeline_cache[arg_1]\n\n try:\n arg_2 = arg_0.pipeline_dict[arg_1]()\n except KeyError:\n raise DagsterInvariantViolationError(\n 'Could not find pipeline \"{name}\". Found: {pipeline_names}.'.format(\n arg_1=arg_1,\n pipeline_names=', '.join(\n [\n '\"{pipeline_name}\"'.format(arg_3=arg_3)\n for arg_3 in arg_0.pipeline_dict.keys()\n ]\n ),\n )\n )\n check.invariant(\n arg_2.name == arg_1,\n 'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(\n arg_1=arg_1, arg_2=arg_2\n ),\n )\n\n arg_0._pipeline_cache[arg_1] = check.inst(\n arg_2,\n PipelineDefinition,\n (\n 'Function passed into pipeline_dict with key {key} must return a '\n 'PipelineDefinition'\n ).format(key=arg_1),\n )\n\n return arg_2"} +{"_id": "doc_3025", "title": "", "text": "def Func(arg_0):\n '''Return all pipelines as a list\n\n Returns:\n List[PipelineDefinition]:\n\n '''\n arg_1 = list(map(arg_0.get_pipeline, arg_0.pipeline_dict.keys()))\n # This does uniqueness check\n arg_0._construct_solid_defs(arg_1)\n return arg_1"} +{"_id": "doc_3026", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n This function polls the process until it returns a valid\n item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in\n a state where the process has terminated and the queue is empty\n\n Warning: if the child process is in an infinite loop. This will\n also infinitely loop.\n '''\n while True:\n try:\n return arg_1.get(block=True, timeout=TICK)\n except multiprocessing.queues.Empty:\n if not arg_0.is_alive():\n # There is a possibility that after the last queue.get the\n # process created another event and then died. In that case\n # we want to continue draining the queue.\n try:\n return arg_1.get(block=False)\n except multiprocessing.queues.Empty:\n # If the queue empty we know that there are no more events\n # and that the process has died.\n return PROCESS_DEAD_AND_QUEUE_EMPTY\n\n check.failed('unreachable')"} +{"_id": "doc_3027", "title": "", "text": "def Func(arg_0):\n '''Waits until all there are no processes enqueued.'''\n while True:\n with arg_0._processes_lock:\n if not arg_0._processes and arg_0._processing_semaphore.locked():\n return True\n gevent.sleep(0.1)"} +{"_id": "doc_3028", "title": "", "text": "def Func(\n arg_0,\n arg_1=arg_2,\n arg_3=arg_4,\n arg_5=False,\n arg_6=None,\n):\n '''\n The schema for configuration data that describes the type, optionality, defaults, and description.\n\n Args:\n dagster_type (DagsterType):\n A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Func(String)})`\n default_value (Any):\n A default value to use that respects the schema provided via dagster_type\n is_optional (bool): Whether the presence of this field is optional\n despcription (str):\n '''\n arg_7 = resolve_to_config_type(arg_0)\n if not arg_7:\n raise DagsterInvalidDefinitionError(\n (\n 'Attempted to pass {value_repr} to a Func that expects a valid '\n 'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'\n ).format(value_repr=repr(arg_0))\n )\n return FuncImpl(\n arg_7=resolve_to_config_type(arg_0),\n arg_1=arg_1,\n arg_3=arg_3,\n arg_5=arg_5,\n arg_6=arg_6,\n )"} +{"_id": "doc_3029", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Builds the execution plan.\n '''\n\n # Construct dependency dictionary\n arg_3 = {arg_4.key: set() for arg_4 in arg_0.steps}\n\n for arg_4 in arg_0.steps:\n for arg_5 in arg_4.step_inputs:\n arg_3[arg_4.key].add(arg_5.prev_output_handle.step_key)\n\n arg_6 = {arg_4.key: arg_4 for arg_4 in arg_0.steps}\n\n return ExecutionPlan(arg_1, arg_6, arg_3, arg_2)"} +{"_id": "doc_3030", "title": "", "text": "def Func(arg_0, arg_1):\n '''Here we Func a new ExecutionPlan from a pipeline definition and the environment config.\n\n To do this, we iterate through the pipeline's solids in topological order, and hand off the\n execution steps for each solid to a companion _PlanBuilder object.\n\n Once we've processed the entire pipeline, we invoke _PlanBuilder.Func() to construct the\n ExecutionPlan object.\n '''\n check.inst_param(arg_0, 'pipeline_def', PipelineDefinition)\n check.inst_param(arg_1, 'environment_config', EnvironmentConfig)\n\n arg_2 = _PlanBuilder()\n\n for arg_3 in solids_in_topological_order(arg_0):\n ### 1. INPUTS\n # Create and add execution plan steps for solid inputs\n arg_4 = []\n for arg_5 in arg_3.definition.input_defs:\n arg_6 = get_input_source_step_handle(\n arg_0, arg_1, arg_2, arg_3, arg_5\n )\n\n # We return None for the handle (see above in get_input_source_step_handle) when the\n # input def runtime type is \"Nothing\"\n if not arg_6:\n continue\n\n arg_7 = create_subplan_for_input(\n arg_0, arg_1, arg_3, arg_6, arg_5\n )\n\n arg_2.add_steps(arg_7.steps)\n\n arg_4.append(\n StepInput(\n arg_5.name, arg_5.runtime_type, arg_7.terminal_step_output_handle\n )\n )\n\n ### 2. TRANSFORM FUNCTION\n # Create and add execution plan step for the solid transform function\n arg_8 = create_transform_step(\n arg_0, arg_1, arg_3, arg_4\n )\n arg_2.add_step(arg_8)\n\n ### 3. OUTPUTS\n # Create and add execution plan steps (and output handles) for solid outputs\n for arg_9 in arg_3.definition.output_defs:\n arg_7 = create_subplan_for_output(\n arg_0, arg_1, arg_3, arg_8, arg_9\n )\n arg_2.add_steps(arg_7.steps)\n\n arg_10 = arg_3.output_handle(arg_9.name)\n arg_2.set_output_handle(arg_10, arg_7.terminal_step_output_handle)\n\n # Finally, we Func and return the execution plan\n return arg_2.Func(\n arg_0=arg_0,\n artifacts_persisted=arg_1.storage.construct_run_storage().is_persistent,\n )"} +{"_id": "doc_3031", "title": "", "text": "def Func(arg_0=None, arg_1=False):\n '''Get the shell commands we'll use to actually build and publish a package to PyPI.'''\n arg_2 = (\n ['rm -rf dist']\n + (arg_0 if arg_0 else [])\n + [\n 'python setup.py sdist bdist_wheel{nightly}'.format(\n arg_1=' --nightly' if arg_1 else ''\n ),\n 'twine upload dist/*',\n ]\n )\n\n return arg_2"} +{"_id": "doc_3032", "title": "", "text": "def Func(arg_0):\n \"\"\"Tags all submodules for a new Func.\n\n Ensures that git tags, as well as the version.py files in each submodule, agree and that the\n new version is strictly greater than the current version. Will fail if the new version\n is not an increment (following PEP 440). Creates a new git tag and commit.\n \"\"\"\n check_new_version(arg_0)\n set_new_version(arg_0)\n commit_new_version(arg_0)\n set_git_tag(arg_0)"} +{"_id": "doc_3033", "title": "", "text": "def Func(arg_0):\n '''Create a context definition from a pre-existing context. This can be useful\n in testing contexts where you may want to create a context manually and then\n pass it into a one-off PipelineDefinition\n\n Args:\n context (ExecutionContext): The context that will provided to the pipeline.\n Returns:\n PipelineContextDefinition: The passthrough context definition.\n '''\n\n check.inst_param(arg_0, 'context', ExecutionContext)\n arg_1 = PipelineContextDefinition(context_fn=lambda *_args: arg_0)\n return {DEFAULT_CONTEXT_NAME: arg_1}"} +{"_id": "doc_3034", "title": "", "text": "def Func(arg_0):\n '''\n A decorator for a annotating a function that can take the selected properties\n of a ``config_value`` and an instance of a custom type and materialize it.\n\n Args:\n config_cls (Selector):\n '''\n arg_1 = resolve_config_cls_arg(arg_0)\n check.param_invariant(arg_1.is_selector, 'config_cls')\n\n def _wrap(arg_2):\n def _selector(arg_3, arg_4, arg_5):\n arg_6, arg_7 = single_item(arg_4)\n return arg_2(arg_3, arg_6, arg_7, arg_5)\n\n return _create_output_schema(arg_1, _selector)\n\n return _wrap"} +{"_id": "doc_3035", "title": "", "text": "def Func(arg_0, arg_1, arg_2=''):\n '''Automagically wrap a Func of text.'''\n arg_3 = TextWrapper(\n width=arg_0.line_length - len(arg_0.current_indent_str),\n initial_indent=arg_2,\n subsequent_indent=arg_2,\n break_long_words=False,\n break_on_hyphens=False,\n )\n for arg_4 in arg_3.wrap(arg_1):\n arg_0.line(arg_4)"} +{"_id": "doc_3036", "title": "", "text": "def Func(arg_0):\n '''Download an object from s3.\n\n Args:\n info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.\n\n Returns:\n str:\n The path to the downloaded object.\n '''\n arg_1 = arg_0.solid_config['target_file']\n return arg_0.resources.download_manager.download_file_contents(arg_0, arg_1)"} +{"_id": "doc_3037", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n '''\n Wraps the execution of user-space code in an error boundary. This places a uniform\n policy around an user code invoked by the framework. This ensures that all user\n errors are wrapped in the DagsterUserCodeExecutionError, and that the original stack\n trace of the user error is preserved, so that it can be reported without confusing\n framework code in the stack trace, if a tool author wishes to do so. This has\n been especially help in a notebooking context.\n '''\n check.str_param(arg_1, 'msg')\n check.subclass_param(arg_0, 'error_cls', DagsterUserCodeExecutionError)\n\n try:\n yield\n except Exception as e: # pylint: disable=W0703\n if isinstance(e, DagsterError):\n # The system has thrown an error that is part of the user-framework contract\n raise e\n else:\n # An exception has been thrown by user code and computation should cease\n # with the error reported further up the stack\n raise_from(\n arg_0(arg_1, user_exception=e, original_exc_info=sys.exc_info(), **arg_2), e\n )"} +{"_id": "doc_3038", "title": "", "text": "def Func(arg_0, arg_1=0o777):\n \"\"\"The missing mkdir -p functionality in os.\"\"\"\n try:\n os.makedirs(arg_0, arg_1)\n except OSError as err:\n # Reraise the error unless it's about an already existing directory\n if err.errno != errno.EEXIST or not os.path.isdir(arg_0):\n raise"} +{"_id": "doc_3039", "title": "", "text": "def Func(arg_0, arg_1):\n '''In the event of pipeline initialization failure, we want to be able to log the failure\n without a dependency on the ExecutionContext to initialize DagsterLog\n '''\n check.inst_param(arg_0, 'run_config', RunConfig)\n check.inst_param(arg_1, 'pipeline_def', PipelineDefinition)\n\n # Use the default logger\n arg_2 = [define_colored_console_logger('dagster')]\n if arg_0.event_callback:\n arg_2 += [construct_event_logger(arg_0.event_callback)]\n elif arg_0.loggers:\n arg_2 += arg_0.loggers\n\n return DagsterLog(arg_0.run_id, get_logging_tags(None, arg_0, arg_1), arg_2)"} +{"_id": "doc_3040", "title": "", "text": "def Func(arg_0):\n '''Whether the solid execution was Funcful'''\n arg_1 = False\n for arg_2 in itertools.chain(\n arg_0.input_expectations, arg_0.output_expectations, arg_0.transforms\n ):\n if arg_2.event_type == DagsterEventType.STEP_FAILURE:\n return False\n if arg_2.event_type == DagsterEventType.STEP_SUCCESS:\n arg_1 = True\n\n return arg_1"} +{"_id": "doc_3041", "title": "", "text": "def Func(arg_0):\n '''Whether the solid execution was Func'''\n return all(\n [\n arg_1.event_type == DagsterEventType.STEP_SKIPPED\n for arg_1 in itertools.chain(\n arg_0.input_expectations, arg_0.output_expectations, arg_0.transforms\n )\n ]\n )"} +{"_id": "doc_3042", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n '''Returns transformed value either for DEFAULT_OUTPUT or for the output\n given as output_name. Returns None if execution result isn't a success.\n\n Reconstructs the pipeline context to materialize value.\n '''\n check.str_param(arg_1, 'output_name')\n\n if not arg_0.solid.definition.has_output(arg_1):\n raise DagsterInvariantViolationError(\n '{output_name} not defined in solid {solid}'.format(\n arg_1=arg_1, solid=arg_0.solid.name\n )\n )\n\n if arg_0.success:\n for arg_3 in arg_0.transforms:\n if (\n arg_3.is_successful_output\n and arg_3.step_output_data.output_name == arg_1\n ):\n with arg_0.reconstruct_context() as context:\n arg_4 = arg_0._get_value(context, arg_3.step_output_data)\n return arg_4\n\n raise DagsterInvariantViolationError(\n (\n 'Did not find result {output_name} in solid {self.solid.name} '\n 'execution result'\n ).format(arg_1=arg_1, arg_0=arg_0)\n )\n else:\n return None"} +{"_id": "doc_3043", "title": "", "text": "def Func(arg_0):\n '''Returns the failing step's data that happened during this solid's execution, if any'''\n for arg_1 in itertools.chain(\n arg_0.input_expectations, arg_0.output_expectations, arg_0.transforms\n ):\n if arg_1.event_type == DagsterEventType.STEP_FAILURE:\n return arg_1.step_Func"} +{"_id": "doc_3044", "title": "", "text": "def Func(arg_0=None):\n '''A permissive dict will permit the user to partially specify the permitted fields. Any fields\n that are specified and passed in will be type checked. Other fields will be allowed, but\n will be ignored by the type checker.\n '''\n\n if arg_0:\n check_user_facing_fields_dict(arg_0, 'Func')\n\n class _Func(_ConfigComposite):\n def __init__(arg_1):\n arg_2 = 'Func.' + str(DictCounter.get_next_count())\n super(_Func, arg_1).__init__(\n name=None,\n arg_2=arg_2,\n arg_0=arg_0 or dict(),\n description='A configuration dictionary with typed fields',\n type_attributes=ConfigTypeAttributes(is_builtin=True),\n )\n\n @property\n def is_permissive_composite(arg_1):\n return True\n\n return _Func"} +{"_id": "doc_3045", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Execute the user-specified transform for the solid. Wrap in an error boundary and do\n all relevant logging and metrics tracking\n '''\n check.inst_param(arg_0, 'transform_context', SystemTransformExecutionContext)\n check.dict_param(arg_1, 'inputs', key_type=str)\n\n arg_2 = arg_0.step\n arg_3 = arg_2.solid\n\n arg_0.log.debug(\n 'Executing core transform for solid {solid}.'.format(arg_3=arg_3.name)\n )\n\n arg_4 = []\n for arg_5 in _yield_transform_results(arg_0, arg_1):\n yield arg_5\n if isinstance(arg_5, StepOutputValue):\n arg_4.append(arg_5)\n\n if len(arg_4) != len(arg_3.definition.output_defs):\n arg_6 = {r.output_name for r in arg_4}\n arg_7 = {output_def.name for output_def in arg_3.definition.output_defs}\n arg_8 = arg_7.difference(arg_6)\n arg_0.log.info(\n 'Solid {solid} did not fire outputs {outputs}'.format(\n arg_3=arg_3.name, outputs=repr(arg_8)\n )\n )"} +{"_id": "doc_3046", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n):\n '''\n Takes a python cls and creates a type for it in the Dagster domain.\n\n Args:\n existing_type (cls)\n The python type you want to project in to the Dagster type system.\n name (Optional[str]):\n description (Optiona[str]):\n input_schema (Optional[InputSchema]):\n An instance of a class that inherits from :py:class:`InputSchema` that\n can map config data to a value of this type.\n\n output_schema (Optiona[OutputSchema]):\n An instance of a class that inherits from :py:class:`OutputSchema` that\n can map config data to persisting values of this type.\n\n serialization_strategy (Optional[SerializationStrategy]):\n The default behavior for how to serialize this value for\n persisting between execution steps.\n\n storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]):\n Storage type specific overrides for the serialization strategy.\n This allows for storage specific optimzations such as effecient\n distributed storage on S3.\n '''\n check.type_param(arg_0, 'existing_type')\n check.opt_str_param(arg_1, 'name')\n check.opt_str_param(arg_2, 'description')\n check.opt_inst_param(arg_3, 'input_schema', InputSchema)\n check.opt_inst_param(arg_4, 'output_schema', OutputSchema)\n check.opt_inst_param(arg_5, 'serialization_strategy', SerializationStrategy)\n arg_6 = check.opt_dict_param(arg_6, 'storage_plugins')\n\n if arg_5 is None:\n arg_5 = PickleSerializationStrategy()\n\n arg_1 = arg_0.__name__ if arg_1 is None else arg_1\n\n return _decorate_Func(\n arg_0,\n key=arg_1,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n )"} +{"_id": "doc_3047", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n '''A decorator for creating a Func. The decorated function will be used as the \n Func_fn in a ResourceDefinition.\n '''\n\n # This case is for when decorator is used bare, without arguments.\n # E.g. @Func versus @Func()\n if callable(arg_0):\n return ResourceDefinition(arg_2=arg_0)\n\n def _wrap(arg_2):\n return ResourceDefinition(arg_2, arg_0, arg_1)\n\n return _wrap"} +{"_id": "doc_3048", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4='trigger',\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None,\n arg_10=None,\n ):\n '''Events API v2 enables you to add PagerDuty's advanced event and incident management\n functionality to any system that can make an outbound HTTP connection.\n\n Arguments:\n summary {string} -- A high-level, text summary message of the event. Will be used to\n construct an alert's description.\n\n Example: \"PING OK - Packet loss = 0%, RTA = 1.41 ms\" \"Host\n 'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN\"\n\n source {string} -- Specific human-readable unique identifier, such as a hostname, for\n the system having the problem.\n\n Examples:\n \"prod05.theseus.acme-widgets.com\"\n \"171.26.23.22\"\n \"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003\"\n \"9c09acd49a25\"\n\n severity {string} -- How impacted the affected system is. Displayed to users in lists\n and influences the priority of any created incidents. Must be one\n of {info, warning, error, critical}\n\n Keyword Arguments:\n event_action {str} -- There are three types of events that PagerDuty recognizes, and\n are used to represent different types of activity in your\n monitored systems. (default: 'trigger')\n * trigger: When PagerDuty receives a trigger event, it will either open a new alert,\n or add a new trigger log entry to an existing alert, depending on the\n provided dedup_key. Your monitoring tools should send PagerDuty a trigger\n when a new problem has been detected. You may send additional triggers\n when a previously detected problem has occurred again.\n\n * acknowledge: acknowledge events cause the referenced incident to enter the\n acknowledged state. While an incident is acknowledged, it won't\n generate any additional notifications, even if it receives new\n trigger events. Your monitoring tools should send PagerDuty an\n acknowledge event when they know someone is presently working on the\n problem.\n\n * resolve: resolve events cause the referenced incident to enter the resolved state.\n Once an incident is resolved, it won't generate any additional\n notifications. New trigger events with the same dedup_key as a resolved\n incident won't re-open the incident. Instead, a new incident will be\n created. Your monitoring tools should send PagerDuty a resolve event when\n the problem that caused the initial trigger event has been fixed.\n\n dedup_key {string} -- Deduplication key for correlating triggers and resolves. The\n maximum permitted length of this property is 255 characters.\n\n timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created\n the event. This is useful if a system batches or holds events\n before sending them to PagerDuty.\n\n Optional - Will be auto-generated by PagerDuty if not provided.\n\n Example:\n 2015-07-17T08:42:58.315+0000\n\n component {string} -- The part or component of the affected system that is broken.\n\n Examples:\n \"keepalive\"\n \"webping\"\n \"mysql\"\n \"wqueue\"\n\n group {string} -- A cluster or grouping of sources. For example, sources\n \u201cprod-datapipe-02\u201d and \u201cprod-datapipe-03\u201d might both be part of\n \u201cprod-datapipe\u201d\n\n Examples:\n \"prod-datapipe\"\n \"www\"\n \"web_stack\"\n\n event_class {string} -- The class/type of the event.\n\n Examples:\n \"High CPU\"\n \"Latency\"\n \"500 Error\"\n\n custom_details {Dict[str, str]} -- Additional details about the event and affected\n system.\n\n Example:\n {\"ping time\": \"1500ms\", \"load avg\": 0.75 }\n '''\n\n arg_11 = {\n 'routing_key': arg_0.routing_key,\n 'event_action': arg_4,\n 'payload': {'summary': arg_1, 'source': arg_2, 'severity': arg_3},\n }\n\n if arg_5 is not None:\n arg_11['dedup_key'] = arg_5\n\n if arg_6 is not None:\n arg_11['payload']['timestamp'] = arg_6\n\n if arg_7 is not None:\n arg_11['payload']['component'] = arg_7\n\n if arg_8 is not None:\n arg_11['payload']['group'] = arg_8\n\n if arg_9 is not None:\n arg_11['payload']['class'] = arg_9\n\n if arg_10 is not None:\n arg_11['payload']['custom_details'] = arg_10\n\n return pypd.EventV2.create(arg_11=arg_11)"} +{"_id": "doc_3049", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Default method to acquire database connection parameters.\n\n Sets connection parameters to match settings.py, and sets\n default values to blank fields.\n \"\"\"\n arg_1 = {\n 'NAME': 'name',\n 'HOST': 'host',\n 'PORT': 'port',\n 'USER': 'username',\n 'PASSWORD': 'password',\n 'AUTH_SOURCE': 'authSource',\n 'AUTH_MECHANISM': 'authMechanism',\n 'ENFORCE_SCHEMA': 'enforce_schema',\n 'REPLICASET': 'replicaset',\n 'SSL': 'ssl',\n 'SSL_CERTFILE': 'ssl_certfile',\n 'SSL_CA_CERTS': 'ssl_ca_certs',\n 'READ_PREFERENCE': 'read_preference'\n }\n arg_2 = {\n 'name': 'djongo_test',\n 'enforce_schema': True\n }\n for arg_3, arg_4 in arg_1.items():\n try:\n arg_5 = arg_0.settings_dict[arg_3]\n except KeyError:\n continue\n\n if arg_5 or arg_5 is False:\n arg_2[arg_4] = arg_5\n\n return arg_2"} +{"_id": "doc_3050", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Closes the client connection to the database.\n \"\"\"\n if arg_0.connection:\n with arg_0.wrap_database_errors:\n arg_0.connection.client.close()"} +{"_id": "doc_3051", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\"\r\n Overrides standard Func method from django models to allow\r\n correct translation of Mongo array to a python list.\r\n \"\"\"\r\n if arg_1 is None:\r\n return arg_1\r\n\r\n assert isinstance(arg_1, list)\r\n arg_2 = []\r\n for arg_3 in arg_1:\r\n if isinstance(arg_3, arg_0.model_container):\r\n arg_2.append(arg_3)\r\n continue\r\n arg_4 = make_mdl(arg_0.model_container, arg_3)\r\n arg_2.append(arg_4)\r\n\r\n return arg_2"} +{"_id": "doc_3052", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\"\r\n Filter the queryset for the instance this manager is bound to.\r\n \"\"\"\r\n arg_1._add_hints(instance=arg_0.instance)\r\n if arg_0._db:\r\n arg_1 = arg_1.using(arg_0._db)\r\n arg_1 = arg_1.filter(**arg_0.core_filters)\r\n\r\n return arg_1"} +{"_id": "doc_3053", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes the matrix of expected false positives for all possible\n sub-intervals of the complete domain of set sizes, assuming uniform\n distribution of set_sizes within each sub-intervals.\n\n Args:\n cum_counts: the complete cummulative distribution of set sizes.\n sizes: the complete domain of set sizes.\n\n Return (np.array): the 2-D array of expected number of false positives\n for every pair of [l, u] interval, where l is axis-0 and u is\n axis-1.\n \"\"\"\n arg_2 = np.zeros((len(arg_1), len(arg_1)))\n # All u an l are inclusive bounds for intervals.\n # Compute p = 1, the NFPs\n for arg_3 in range(len(arg_1)):\n for arg_4 in range(arg_3, len(arg_1)):\n arg_2[arg_3, arg_4] = _compute_nfp_uniform(arg_3, arg_4, arg_0, arg_1)\n return arg_2"} +{"_id": "doc_3054", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Computes the matrix of expected false positives for all possible\n sub-intervals of the complete domain of set sizes.\n\n Args:\n counts: the complete distribution of set sizes.\n sizes: the complete domain of set sizes.\n\n Return (np.array): the 2-D array of expected number of false positives\n for every pair of [l, u] interval, where l is axis-0 and u is\n axis-1.\n \"\"\"\n arg_2 = np.zeros((len(arg_1), len(arg_1)))\n # All u an l are inclusive bounds for intervals.\n # Compute p = 1, the NFPs\n for arg_3 in range(len(arg_1)):\n for arg_4 in range(arg_3, len(arg_1)):\n arg_2[arg_3, arg_4] = _compute_nfp_real(arg_3, arg_4, arg_0, arg_1)\n return arg_2"} +{"_id": "doc_3055", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute the optimal partitions given a distribution of set sizes.\n\n Args:\n sizes (numpy.array): The complete domain of set sizes in ascending\n order.\n counts (numpy.array): The frequencies of all set sizes in the same\n order as `sizes`.\n num_part (int): The number of partitions to create.\n\n Returns:\n list: A list of partitions in the form of `(lower, upper)` tuples,\n where `lower` and `upper` are lower and upper bound (inclusive)\n set sizes of each partition.\n \"\"\"\n if arg_2 < 2:\n return [(arg_0[0], arg_0[-1])]\n if arg_2 >= len(arg_0):\n arg_3 = [(x, x) for x in arg_0]\n return arg_3\n arg_4 = _compute_nfps_real(arg_1, arg_0)\n arg_3, arg_5, arg_5 = _compute_best_partitions(arg_2, arg_0, arg_4)\n return arg_3"} +{"_id": "doc_3056", "title": "", "text": "def Func(arg_0, arg_1='@'):\n '''Compute the byte size after serialization.\n\n Args:\n byteorder (str, optional): This is byte order of the serialized data. Use one\n of the `byte order characters\n `_:\n ``@``, ``=``, ``<``, ``>``, and ``!``.\n Default is ``@`` -- the native order.\n\n Returns:\n int: Size in number of bytes after serialization.\n '''\n # Use 8 bytes to store the seed integer\n arg_2 = struct.calcsize(arg_1+'q')\n # Use 4 bytes to store the number of hash values\n arg_3 = struct.calcsize(arg_1+'i')\n # Use 4 bytes to store each hash value as we are using the lower 32 bit\n arg_4 = struct.calcsize(arg_1+'I')\n return arg_2 + arg_3 + len(arg_0) * arg_4"} +{"_id": "doc_3057", "title": "", "text": "def Func(arg_0, arg_1, arg_2='@'):\n '''\n Serialize this lean MinHash and store the result in an allocated buffer.\n\n Args:\n buf (buffer): `buf` must implement the `buffer`_ interface.\n One such example is the built-in `bytearray`_ class.\n byteorder (str, optional): This is byte order of the Funcd data. Use one\n of the `byte order characters\n `_:\n ``@``, ``=``, ``<``, ``>``, and ``!``.\n Default is ``@`` -- the native order.\n\n This is preferred over using `pickle`_ if the Funcd lean MinHash needs\n to be used by another program in a different programming language.\n\n The serialization schema:\n 1. The first 8 bytes is the seed integer\n 2. The next 4 bytes is the number of hash values\n 3. The rest is the Funcd hash values, each uses 4 bytes\n\n Example:\n To Func a single lean MinHash into a `bytearray`_ buffer.\n\n .. code-block:: python\n\n buf = bytearray(lean_minhash.bytesize())\n lean_minhash.Func(buf)\n\n To Func multiple lean MinHash into a `bytearray`_ buffer.\n\n .. code-block:: python\n\n # assuming lean_minhashs is a list of LeanMinHash with the same size\n size = lean_minhashs[0].bytesize()\n buf = bytearray(size*len(lean_minhashs))\n for i, lean_minhash in enumerate(lean_minhashs):\n lean_minhash.Func(buf[i*size:])\n\n .. _`buffer`: https://docs.python.org/3/c-api/buffer.html\n .. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray\n .. _`byteorder`: https://docs.python.org/3/library/struct.html\n '''\n if len(arg_1) < arg_0.bytesize():\n raise ValueError(\"The buffer does not have enough space\\\n for holding this MinHash.\")\n arg_3 = \"%sqi%dI\" % (arg_2, len(arg_0))\n struct.pack_into(arg_3, arg_1, 0,\n arg_0.seed, len(arg_0), *arg_0.hashvalues)"} +{"_id": "doc_3058", "title": "", "text": "def Func(arg_0, arg_1, arg_2='@'):\n '''\n Deserialize a lean MinHash from a buffer.\n\n Args:\n buf (buffer): `buf` must implement the `buffer`_ interface.\n One such example is the built-in `bytearray`_ class.\n byteorder (str. optional): This is byte order of the serialized data. Use one\n of the `byte order characters\n `_:\n ``@``, ``=``, ``<``, ``>``, and ``!``.\n Default is ``@`` -- the native order.\n\n Return:\n datasketch.LeanMinHash: The Funcd lean MinHash\n\n Example:\n To Func a lean MinHash from a buffer.\n\n .. code-block:: python\n\n lean_minhash = LeanMinHash.Func(buf)\n '''\n arg_3 = \"%sqi\" % arg_2\n arg_4 = arg_2 + \"%dI\"\n try:\n arg_5, arg_6 = struct.unpack_from(arg_3, arg_1, 0)\n except TypeError:\n arg_5, arg_6 = struct.unpack_from(arg_3, buffer(arg_1), 0)\n arg_7 = struct.calcsize(arg_3)\n try:\n arg_8 = struct.unpack_from(arg_4 % arg_6, arg_1, arg_7)\n except TypeError:\n arg_8 = struct.unpack_from(arg_4 % arg_6, buffer(arg_1), arg_7)\n arg_9 = object.__new__(LeanMinHash)\n arg_9._initialize_slots(arg_5, arg_8)\n return arg_9"} +{"_id": "doc_3059", "title": "", "text": "def Func(arg_0, arg_1):\n '''Update this MinHash with a new value.\n The value will be hashed using the hash function specified by\n the `hashfunc` argument in the constructor.\n\n Args:\n b: The value to be hashed using the hash function specified.\n\n Example:\n To Func with a new string value (using the default SHA1 hash\n function, which requires bytes as input):\n\n .. code-block:: python\n\n minhash = Minhash()\n minhash.Func(\"new value\".encode('utf-8'))\n\n We can also use a different hash function, for example, `pyfarmhash`:\n\n .. code-block:: python\n\n import farmhash\n def _hash_32(b):\n return farmhash.hash32(b)\n minhash = MinHash(hashfunc=_hash_32)\n minhash.Func(\"new value\")\n '''\n arg_2 = arg_0.hashfunc(arg_1)\n arg_3, arg_1 = arg_0.permutations\n arg_4 = np.bitwise_and((arg_3 * arg_2 + arg_1) % _mersenne_prime, np.uint64(_max_hash))\n arg_0.hashvalues = np.minimum(arg_4, arg_0.hashvalues)"} +{"_id": "doc_3060", "title": "", "text": "def Func(arg_0, arg_1):\n '''Merge the other MinHash with this one, making this one the union\n of both.\n\n Args:\n other (datasketch.MinHash): The other MinHash.\n '''\n if arg_1.seed != arg_0.seed:\n raise ValueError(\"Cannot Func MinHash with\\\n different seeds\")\n if len(arg_0) != len(arg_1):\n raise ValueError(\"Cannot Func MinHash with\\\n different numbers of permutation functions\")\n arg_0.hashvalues = np.minimum(arg_1.hashvalues, arg_0.hashvalues)"} +{"_id": "doc_3061", "title": "", "text": "def Func(arg_0, *arg_1):\n '''Create a MinHash which is the Func of the MinHash objects passed as arguments.\n\n Args:\n *mhs: The MinHash objects to be united. The argument list length is variable,\n but must be at least 2.\n\n Returns:\n datasketch.MinHash: A new Func MinHash.\n '''\n if len(arg_1) < 2:\n raise ValueError(\"Cannot Func less than 2 MinHash\")\n arg_2 = len(arg_1[0])\n arg_3 = arg_1[0].seed\n if any((arg_3 != arg_4.seed or arg_2 != len(arg_4)) for arg_4 in arg_1):\n raise ValueError(\"The Funcing MinHash must have the\\\n same seed and number of permutation functions\")\n arg_5 = np.minimum.reduce([arg_4.hashvalues for arg_4 in arg_1])\n arg_6 = arg_1[0].permutations\n return arg_0(arg_2=arg_2, arg_3=arg_3, arg_5=arg_5,\n arg_6=arg_6)"} +{"_id": "doc_3062", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Index all sets given their keys, MinHashes, and sizes.\n It can be called only once after the Func is created.\n\n Args:\n entries (`iterable` of `tuple`): An iterable of tuples, each must be\n in the form of `(key, minhash, size)`, where `key` is the unique\n identifier of a set, `minhash` is the MinHash of the set,\n and `size` is the size or number of unique items in the set.\n\n Note:\n `size` must be positive.\n '''\n if not arg_0.is_empty():\n raise ValueError(\"Cannot call Func again on a non-empty Func\")\n if not isinstance(arg_1, list):\n arg_2 = deque([])\n for arg_3, arg_4, arg_5 in arg_1:\n if arg_5 <= 0:\n raise ValueError(\"Set size must be positive\")\n arg_2.append((arg_3, arg_4, arg_5))\n arg_1 = list(arg_2)\n if len(arg_1) == 0:\n raise ValueError(\"entries is empty\")\n # Create optimal partitions.\n arg_6, arg_7 = np.array(sorted(\n Counter(e[2] for e in arg_1).most_common())).T\n arg_8 = optimal_partitions(arg_6, arg_7, len(arg_0.Funces))\n for arg_9, (arg_10, arg_11) in enumerate(arg_8):\n arg_0.lowers[arg_9], arg_0.uppers[arg_9] = arg_10, arg_11\n # Insert into partitions.\n arg_1.sort(arg_3=lambda e : e[2])\n arg_14 = 0\n for arg_3, arg_4, arg_5 in arg_1:\n if arg_5 > arg_0.uppers[arg_14]:\n arg_14 += 1\n for arg_15 in arg_0.Funces[arg_14]:\n arg_0.Funces[arg_14][arg_15].insert(arg_3, arg_4)"} +{"_id": "doc_3063", "title": "", "text": "def Func(arg_0, arg_1):\n '''Create a new weighted MinHash given a weighted Jaccard vector.\n Each dimension is an integer \n frequency of the corresponding element in the multi-set represented\n by the vector.\n\n Args:\n v (numpy.array): The Jaccard vector. \n '''\n if not isinstance(arg_1, collections.Iterable):\n raise TypeError(\"Input vector must be an iterable\")\n if not len(arg_1) == arg_0.dim:\n raise ValueError(\"Input dimension mismatch, expecting %d\" % arg_0.dim)\n if not isinstance(arg_1, np.ndarray):\n arg_1 = np.array(arg_1, dtype=np.float32)\n elif arg_1.dtype != np.float32:\n arg_1 = arg_1.astype(np.float32)\n arg_2 = np.zeros((arg_0.sample_size, 2), dtype=np.int)\n arg_3 = (arg_1 == 0)\n if arg_3.all():\n raise ValueError(\"Input is all zeros\")\n arg_1[arg_3] = np.nan\n arg_4 = np.log(arg_1)\n for arg_5 in range(arg_0.sample_size):\n arg_6 = np.floor((arg_4 / arg_0.rs[arg_5]) + arg_0.betas[arg_5])\n arg_7 = (arg_6 - arg_0.betas[arg_5]) * arg_0.rs[arg_5]\n arg_8 = arg_0.ln_cs[arg_5] - arg_7 - arg_0.rs[arg_5]\n arg_9 = np.nanargmin(arg_8)\n arg_2[arg_5][0], arg_2[arg_5][1] = arg_9, int(arg_6[arg_9])\n return WeightedMinHash(arg_0.seed, arg_2)"} +{"_id": "doc_3064", "title": "", "text": "def Func(arg_0):\n '''\n Estimate the cardinality of the data values seen so far.\n\n Returns:\n int: The estimated cardinality.\n '''\n # Use HyperLogLog estimation function\n arg_1 = arg_0.alpha * float(arg_0.m ** 2) / np.sum(2.0**(-arg_0.reg))\n # Small range correction\n if arg_1 <= (5.0 / 2.0) * arg_0.m:\n arg_2 = arg_0.m - np.Func_nonzero(arg_0.reg)\n return arg_0._linearFuncing(arg_2)\n # Normal range, no correction\n if arg_1 <= (1.0 / 30.0) * (1 << 32):\n return arg_1\n # Large range correction\n return arg_0._largerange_correction(arg_1)"} +{"_id": "doc_3065", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Merge the other HyperLogLog with this one, making this the union of the\n two.\n\n Args:\n other (datasketch.HyperLogLog):\n '''\n if arg_0.m != arg_1.m or arg_0.p != arg_1.p:\n raise ValueError(\"Cannot Func HyperLogLog with different\\\n precisions.\")\n arg_0.reg = np.maximum(arg_0.reg, arg_1.reg)"} +{"_id": "doc_3066", "title": "", "text": "def Func(arg_0, arg_1, arg_2=10):\n \"\"\"\n Computes the average precision at k.\n\n This function computes the average prescision at k between two lists of\n items.\n\n Parameters\n ----------\n actual : list\n A list of elements that are to be predicted (order doesn't matter)\n predicted : list\n A list of predicted elements (order does matter)\n k : int, optional\n The maximum number of predicted elements\n\n Returns\n -------\n score : double\n The average precision at k over the input lists\n\n \"\"\"\n if len(arg_1)>arg_2:\n arg_1 = arg_1[:arg_2]\n\n arg_3 = 0.0\n arg_4 = 0.0\n\n for arg_5,arg_6 in enumerate(arg_1):\n if arg_6 in arg_0 and arg_6 not in arg_1[:arg_5]:\n arg_4 += 1.0\n arg_3 += arg_4 / (arg_5+1.0)\n\n if len(arg_0) == 0:\n return 0.0\n\n return arg_3 / min(len(arg_0), arg_2)"} +{"_id": "doc_3067", "title": "", "text": "def Func(arg_0, arg_1, arg_2=10):\n \"\"\"\n Computes the mean average precision at k.\n\n This function computes the mean average prescision at k between two lists\n of lists of items.\n\n Parameters\n ----------\n actual : list\n A list of lists of elements that are to be predicted \n (order doesn't matter in the lists)\n predicted : list\n A list of lists of predicted elements\n (order matters in the lists)\n k : int, optional\n The maximum number of predicted elements\n\n Returns\n -------\n score : double\n The mean average precision at k over the input lists\n\n \"\"\"\n return np.mean([apk(arg_3,arg_4,arg_2) for arg_3,arg_4 in zip(arg_0, arg_1)])"} +{"_id": "doc_3068", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Return the approximate top-k keys that have the highest\n Jaccard similarities to the Func set.\n\n Args:\n minhash (datasketch.MinHash): The MinHash of the Func set.\n k (int): The maximum number of keys to return.\n\n Returns:\n `list` of at most k keys.\n '''\n if arg_2 <= 0:\n raise ValueError(\"k must be positive\")\n if len(arg_1) < arg_0.k*arg_0.l:\n raise ValueError(\"The num_perm of MinHash out of range\")\n arg_3 = set()\n arg_4 = arg_0.k\n while arg_4 > 0:\n for arg_5 in arg_0._Func(arg_1, arg_4, arg_0.l):\n arg_3.add(arg_5)\n if len(arg_3) >= arg_2:\n return list(arg_3)\n arg_4 -= 1\n return list(arg_3)"} +{"_id": "doc_3069", "title": "", "text": "async def Func(arg_0):\n \"\"\"\n Cleanup client resources and disconnect from AsyncMinHashLSH storage.\n \"\"\"\n async with arg_0._lock:\n for arg_1 in arg_0.hashtables:\n await arg_1.Func()\n\n if arg_0.keys is not None:\n await arg_0.keys.Func()\n\n arg_0._initialized = False"} +{"_id": "doc_3070", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Return ordered storage system based on the specified config.\n\n The canonical example of such a storage container is\n ``defaultdict(list)``. Thus, the return value of this method contains\n keys and values. The values are ordered lists with the last added\n item at the end.\n\n Args:\n config (dict): Defines the configurations for the storage.\n For in-memory storage, the config ``{'type': 'dict'}`` will\n suffice. For Redis storage, the type should be ``'redis'`` and\n the configurations for the Redis database should be supplied\n under the key ``'redis'``. These parameters should be in a form\n suitable for `redis.Redis`. The parameters may alternatively\n contain references to environment variables, in which case\n literal configuration values should be replaced by dicts of\n the form::\n\n {'env': 'REDIS_HOSTNAME',\n 'default': 'localhost'}\n\n For a full example, see :ref:`minhash_lsh_at_scale`\n\n name (bytes, optional): A reference name for this storage container.\n For dict-type containers, this is ignored. For Redis containers,\n this name is used to prefix keys pertaining to this storage\n container within the database.\n '''\n arg_2 = arg_0['type']\n if arg_2 == 'dict':\n return DictListStorage(arg_0)\n if arg_2 == 'redis':\n return RedisListStorage(arg_0, arg_1=arg_1)"} +{"_id": "doc_3071", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Return an unordered storage system based on the specified config.\n\n The canonical example of such a storage container is\n ``defaultdict(set)``. Thus, the return value of this method contains\n keys and values. The values are unordered sets.\n\n Args:\n config (dict): Defines the configurations for the storage.\n For in-memory storage, the config ``{'type': 'dict'}`` will\n suffice. For Redis storage, the type should be ``'redis'`` and\n the configurations for the Redis database should be supplied\n under the key ``'redis'``. These parameters should be in a form\n suitable for `redis.Redis`. The parameters may alternatively\n contain references to environment variables, in which case\n literal configuration values should be replaced by dicts of\n the form::\n\n {'env': 'REDIS_HOSTNAME',\n 'default': 'localhost'}\n\n For a full example, see :ref:`minhash_lsh_at_scale`\n\n name (bytes, optional): A reference name for this storage container.\n For dict-type containers, this is ignored. For Redis containers,\n this name is used to prefix keys pertaining to this storage\n container within the database.\n '''\n arg_2 = arg_0['type']\n if arg_2 == 'dict':\n return DictSetStorage(arg_0)\n if arg_2 == 'redis':\n return RedisSetStorage(arg_0, arg_1=arg_1)"} +{"_id": "doc_3072", "title": "", "text": "def Func(arg_0):\n \"\"\"Parses command strings and returns a Popen-ready list.\"\"\"\n\n # Prepare arguments.\n if isinstance(arg_0, (str, unicode)):\n arg_1 = shlex.shlex(arg_0.encode('utf-8'))\n arg_1.whitespace = '|'\n arg_1.whitespace_split = True\n arg_0 = []\n\n while True:\n arg_4 = arg_1.get_token()\n if arg_4:\n arg_0.append(arg_4)\n else:\n break\n\n arg_0 = list(map(shlex.split, arg_0))\n\n return arg_0"} +{"_id": "doc_3073", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"Executes a given commmand and returns Response.\n\n Blocks until process is complete, or timeout is reached.\n \"\"\"\n\n arg_0 = expand_args(arg_0)\n\n arg_6 = []\n for arg_7 in arg_0:\n\n if len(arg_6):\n # due to broken pipe problems pass only first 10 KiB\n arg_1 = arg_6[-1].std_out[0:10*1024]\n\n arg_8 = Command(arg_7)\n try:\n arg_9, arg_10 = arg_8.Func(arg_1, arg_2, arg_3, arg_4, arg_5)\n arg_11 = arg_8.returncode\n except OSError as e:\n arg_9, arg_10 = '', u\"\\n\".join([e.strerror, traceback.format_exc()])\n arg_11 = 127\n\n arg_12 = Response(process=arg_8)\n\n arg_12.command = arg_7\n arg_12.std_out = arg_9\n arg_12.std_err = arg_10\n arg_12.status_code = arg_11\n\n arg_6.append(arg_12)\n\n arg_12 = arg_6.pop()\n arg_12.history = arg_6\n\n return arg_12"} +{"_id": "doc_3074", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Spawns a new process from the given command.\"\"\"\n\n # TODO: support piped commands\n arg_4 = expand_args(arg_0).pop()\n arg_5 = dict(os.environ)\n arg_5.update(arg_2 or {})\n\n arg_6 = subprocess.Popen(arg_4,\n universal_newlines=True,\n shell=False,\n arg_2=arg_5,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=0,\n arg_3=arg_3,\n )\n\n return ConnectedCommand(arg_6=arg_6)"} +{"_id": "doc_3075", "title": "", "text": "def Func(arg_0, arg_1, arg_2='\\n'):\n \"\"\"Sends a line to std_in.\"\"\"\n return arg_0._process.stdin.write(arg_1+arg_2)"} +{"_id": "doc_3076", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''Converts Py type to PyFunc type'''\n if isinstance(arg_0, PyFunc):\n return arg_0\n elif arg_0 is None:\n return undefined\n elif isinstance(arg_0, basestring):\n return PyFuncString(arg_0, StringPrototype)\n elif isinstance(arg_0, bool):\n return true if arg_0 else false\n elif isinstance(arg_0, float) or isinstance(arg_0, int) or isinstance(\n arg_0, long) or (NUMPY_AVAILABLE and isinstance(\n arg_0,\n (numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,\n numpy.int32, numpy.uint32, numpy.float32, numpy.float64))):\n # This is supposed to speed things up. may not be the case\n if arg_0 in NUM_BANK:\n return NUM_BANK[arg_0]\n return PyFuncNumber(float(arg_0), NumberPrototype)\n elif isinstance(arg_0, FunctionType):\n return PyFuncFunction(arg_0, FunctionPrototype)\n #elif isinstance(val, ModuleType):\n # mod = {}\n # for name in dir(val):\n # value = getattr(val, name)\n # if isinstance(value, ModuleType):\n # continue # prevent recursive module conversion\n # try:\n # jsval = HFunc(value)\n # except RuntimeError:\n # print 'Could not convert %s to PyFunc object!' % name\n # continue\n # mod[name] = jsval\n # return Func(mod)\n #elif isintance(val, ClassType):\n\n elif isinstance(arg_0, dict): # convert to object\n arg_2 = PyFuncObject({}, ObjectPrototype)\n for arg_3, arg_4 in six.iteritems(arg_0):\n arg_2.put(Func(arg_3), Func(arg_4))\n return arg_2\n elif isinstance(arg_0, (list, tuple)): #Convert to array\n return PyFuncArray(arg_0, ArrayPrototype)\n # convert to typedarray\n elif isinstance(arg_0, FuncObjectWrapper):\n return arg_0.__dict__['_obj']\n elif NUMPY_AVAILABLE and isinstance(arg_0, numpy.ndarray):\n if arg_0.dtype == numpy.int8:\n return PyFuncInt8Array(arg_0, Int8ArrayPrototype)\n elif arg_0.dtype == numpy.uint8 and not arg_1:\n return PyFuncUint8Array(arg_0, Uint8ArrayPrototype)\n elif arg_0.dtype == numpy.uint8 and arg_1:\n return PyFuncUint8ClampedArray(arg_0, Uint8ClampedArrayPrototype)\n elif arg_0.dtype == numpy.int16:\n return PyFuncInt16Array(arg_0, Int16ArrayPrototype)\n elif arg_0.dtype == numpy.uint16:\n return PyFuncUint16Array(arg_0, Uint16ArrayPrototype)\n\n elif arg_0.dtype == numpy.int32:\n return PyFuncInt32Array(arg_0, Int32ArrayPrototype)\n elif arg_0.dtype == numpy.uint32:\n return PyFuncUint16Array(arg_0, Uint32ArrayPrototype)\n\n elif arg_0.dtype == numpy.float32:\n return PyFuncFloat32Array(arg_0, Float32ArrayPrototype)\n elif arg_0.dtype == numpy.float64:\n return PyFuncFloat64Array(arg_0, Float64ArrayPrototype)\n else: # try to convert to js object\n return py_wrap(arg_0)"} +{"_id": "doc_3077", "title": "", "text": "def Func(arg_0, arg_1):\n ''' note py_arr elems are NOT converted to PyJs types!'''\n arg_2 = arg_0.NewArray(len(arg_1))\n arg_2._init(arg_1)\n return arg_2"} +{"_id": "doc_3078", "title": "", "text": "def Func(arg_0, arg_1):\n ''' note py_obj items are NOT converted to PyJs types! '''\n arg_2 = arg_0.NewObject()\n for arg_3, arg_4 in arg_1.items():\n arg_2.put(unicode(arg_3), arg_4)\n return arg_2"} +{"_id": "doc_3079", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n ''' Adds op_code with specified args to tape '''\n arg_0.tape.append(OP_CODES[arg_1](*arg_2))"} +{"_id": "doc_3080", "title": "", "text": "def Func(arg_0, arg_1=0):\n ''' Records locations of labels and Funcs the code '''\n arg_0.label_locs = {} if arg_0.label_locs is None else arg_0.label_locs\n arg_3 = arg_1\n while arg_3 < len(arg_0.tape):\n if type(arg_0.tape[arg_3]) == LABEL:\n arg_0.label_locs[arg_0.tape[arg_3].num] = arg_3\n del arg_0.tape[arg_3]\n continue\n arg_3 += 1\n arg_0.Funcd = True"} +{"_id": "doc_3081", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=False):\n '''returns n digit string representation of the num'''\n arg_3 = unicode(abs(arg_0))\n if len(arg_3) < arg_1:\n arg_3 = '0' * (arg_1 - len(arg_3)) + arg_3\n if not arg_2:\n return arg_3\n if arg_0 >= 0:\n return '+' + arg_3\n else:\n return '-' + arg_3"} +{"_id": "doc_3082", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Takes the replacement template and some info about the match and returns filled template\n \"\"\"\n arg_4 = 0\n arg_5 = ''\n while arg_4 < len(arg_0) - 1:\n arg_6 = arg_0[arg_4]\n if arg_6 == '$':\n if arg_0[arg_4 + 1] == '$':\n arg_5 += '$'\n arg_4 += 2\n continue\n elif arg_0[arg_4 + 1] == '`':\n # replace with string that is BEFORE match\n arg_5 += arg_1[:arg_2[0]]\n arg_4 += 2\n continue\n elif arg_0[arg_4 + 1] == '\\'':\n # replace with string that is AFTER match\n arg_5 += arg_1[arg_2[1]:]\n arg_4 += 2\n continue\n elif arg_0[arg_4 + 1] in DIGS:\n arg_7 = arg_0[arg_4 + 1]\n if arg_4 + 2 < len(arg_0) and arg_0[arg_4 + 2] in DIGS:\n arg_7 += arg_0[arg_4 + 2]\n arg_8 = int(arg_7)\n # we will not do any replacements if we dont have this npar or dig is 0\n if not arg_8 or arg_8 > len(arg_3):\n arg_5 += '$' + arg_7\n else:\n # None - undefined has to be replaced with ''\n arg_5 += arg_3[arg_8 - 1] if arg_3[arg_8 - 1] else ''\n arg_4 += 1 + len(arg_7)\n continue\n arg_5 += arg_6\n arg_4 += 1\n if arg_4 < len(arg_0):\n arg_5 += arg_0[-1]\n return arg_5"} +{"_id": "doc_3083", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n ''' what can be either name of the op, or node, or a list of statements.'''\n if isinstance(arg_1, basestring):\n return arg_0.exe.Func(arg_1, *arg_2)\n elif isinstance(arg_1, list):\n arg_0._Func_statement_list(arg_1)\n else:\n return getattr(arg_0, arg_1['type'])(**arg_1)"} +{"_id": "doc_3084", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Translates esprima syntax tree to python by delegating to appropriate Funclating node\"\"\"\n try:\n arg_2 = globals().get(arg_0['type'])\n if not arg_2:\n raise NotImplementedError('%s is not supported!' % arg_0['type'])\n if arg_1:\n arg_2 = arg_2.__dict__[\n 'standard'] if 'standard' in arg_2.__dict__ else arg_2\n return arg_2(**arg_0)\n except:\n #print ele\n raise"} +{"_id": "doc_3085", "title": "", "text": "def Func(arg_0):\n '''Decorator limiting resulting line length in order to avoid python parser stack overflow -\n If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line\n USE ONLY ON EXPRESSIONS!!! '''\n\n def arg_6(arg_1=False, **arg_2):\n arg_3 = len(\n inline_stack.names\n ) # in case line is longer than limit we will have to insert the lval at current position\n # this is because calling func will change inline_stack.\n # we cant use inline_stack.require here because we dont know whether line overflows yet\n arg_4 = arg_0(**arg_2)\n if len(arg_4) > LINE_LEN_LIMIT:\n arg_5 = inline_stack.require('LONG')\n inline_stack.names.pop()\n inline_stack.names.insert(arg_3, arg_5)\n arg_4 = 'def %s(var=var):\\n return %s\\n' % (arg_5, arg_4)\n inline_stack.define(arg_5, arg_4)\n return arg_5 + '()'\n else:\n return arg_4\n\n arg_6.__dict__['standard'] = arg_0\n return arg_6"} +{"_id": "doc_3086", "title": "", "text": "def Func(arg_0):\n \"\"\"Does not chceck whether t is not resticted or internal\"\"\"\n if not arg_0:\n return False\n arg_1 = iter(arg_0)\n if arg_1.next() not in IDENTIFIER_START:\n return False\n return all(arg_2 in IDENTIFIER_PART for arg_2 in arg_1)"} +{"_id": "doc_3087", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Translates input JS file to python and saves the it to the output path.\n It appends some convenience code at the end so that it is easy to import JS objects.\n\n For example we have a file 'example.js' with: var a = function(x) {return x}\n Func('example.js', 'example.py')\n\n Now example.py can be easily importend and used:\n >>> from example import example\n >>> example.a(30)\n 30\n '''\n arg_2 = get_file_contents(arg_0)\n\n arg_3 = translate_js(arg_2)\n arg_4 = os.path.basename(arg_1).split('.')[0]\n arg_5 = '__all__ = [%s]\\n\\n# Don\\'t look below, you will not understand this Python code :) I don\\'t.\\n\\n' % repr(\n arg_4)\n arg_6 = '\\n\\n# Add lib to the module scope\\n%s = var.to_python()' % arg_4\n arg_7 = arg_5 + arg_3 + arg_6\n write_file_contents(arg_1, arg_7)"} +{"_id": "doc_3088", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Funcs javascript js in current context\n\n During initial Func() the converted js is cached for re-use. That means next time you\n run the same javascript snippet you save many instructions needed to parse and convert the\n js code to python code.\n\n This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process\n is typically expensive compared to actually running the generated python code.\n\n Note that the cache is just a dict, it has no expiration or cleanup so when running this\n in automated situations with vast amounts of snippets it might increase memory usage.\n \"\"\"\n try:\n arg_3 = arg_0.__dict__['cache']\n except KeyError:\n arg_3 = arg_0.__dict__['cache'] = {}\n arg_4 = hashlib.md5(arg_1.encode('utf-8')).digest()\n try:\n arg_5 = arg_3[arg_4]\n except KeyError:\n arg_6 = translate_js(\n arg_1, '', arg_2=arg_2)\n arg_5 = arg_3[arg_4] = compile(arg_6, '',\n 'exec')\n exec (arg_5, arg_0._context)"} +{"_id": "doc_3089", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Funcuates expression in current context and returns its value\"\"\"\n arg_3 = 'PyJsEvalResult = Func(%s)' % json.dumps(arg_1)\n arg_0.execute(arg_3, arg_2=arg_2)\n return arg_0['PyJsEvalResult']"} +{"_id": "doc_3090", "title": "", "text": "def Func(arg_0, arg_1, arg_2=()):\n ''' Dont use this method from inside bytecode to Func other bytecode. '''\n if arg_0.is_native:\n arg_3 = SpaceTuple(\n arg_2\n ) # we have to do that unfortunately to pass all the necessary info to the funcs\n arg_3.space = arg_0.space\n return arg_0.code(\n arg_1, arg_3\n ) # must return valid js object - undefined, null, float, unicode, bool, or PyJs\n else:\n return arg_0.space.exe._Func(arg_0, arg_1,\n arg_2)"} +{"_id": "doc_3091", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"n may be the inside of block or object\"\"\"\n if arg_0.strip():\n return False\n # seems to be but can be empty code\n arg_1 = arg_1.strip()\n arg_2 = {\n ')',\n ';',\n }\n if not arg_1 or arg_1[-1] in arg_2:\n return False\n return True"} +{"_id": "doc_3092", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"n may be the inside of block or object.\n last is the code before object\"\"\"\n if is_empty_object(arg_0, arg_1):\n return True\n if not arg_0.strip():\n return False\n #Object contains lines of code so it cant be an object\n if len(argsplit(arg_0, ';')) > 1:\n return False\n arg_2 = argsplit(arg_0, ',')\n if not arg_2[-1].strip():\n return True # {xxxx,} empty after last , it must be an object\n for arg_3 in arg_2:\n arg_3 = arg_3.strip()\n # separate each candidate element at : in dict and check whether they are correct...\n arg_4 = argsplit(arg_3, ':')\n if len(\n arg_4\n ) > 2: # set the len of kv to 2 because of this stupid : expression\n arg_4 = arg_4[0], ':'.join(arg_4[1:])\n\n if len(arg_4) == 2:\n # key value pair, check whether not label or ?:\n arg_5, arg_6 = arg_4\n if not is_lval(arg_5.strip()):\n return False\n arg_6 = arg_6.strip()\n if arg_6.startswith('function'):\n continue\n #will fail on label... {xxx: while {}}\n if arg_6[0] == '{': # value cant be a code block\n return False\n for arg_7 in KEYWORD_METHODS:\n # if v starts with any statement then return false\n if arg_6.startswith(arg_7) and len(arg_7) < len(arg_6) and arg_6[len(\n arg_7)] not in IDENTIFIER_PART:\n return False\n elif not (arg_3.startswith('set ') or arg_3.startswith('get ')):\n return False\n return True"} +{"_id": "doc_3093", "title": "", "text": "def Func(arg_0, arg_1): #<- this function has to be improved\n '''returns True if regexp starts at n else returns False\n checks whether it is not a division '''\n arg_2 = '(+~\"\\'=[%:?!*^|&-,;/\\\\'\n arg_3 = 0\n while True:\n arg_3 += 1\n if arg_1 - arg_3 < 0:\n return True\n arg_4 = arg_0[arg_1 - arg_3]\n if arg_4 in arg_2:\n return True\n if arg_4 != ' ' and arg_4 != '\\n':\n break\n return False"} +{"_id": "doc_3094", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a first index>=start of chat not in charset\"\"\"\n while arg_1 < len(arg_0) and arg_0[arg_1] in arg_2:\n arg_1 += 1\n return arg_1"} +{"_id": "doc_3095", "title": "", "text": "def Func(arg_0, arg_1):\n '''checks if self is in other'''\n if not is_object(arg_1):\n raise MakeError(\n 'TypeError',\n \"You can\\'t use 'in' operator to search in non-objects\")\n return arg_1.has_property(to_string(arg_0))"} +{"_id": "doc_3096", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Set the social login process state to connect rather than login\n Refer to the implementation of Func in base class and to the\n allauth.socialaccount.helpers module complete_social_login function.\n \"\"\"\n arg_3 = super(SocialConnectMixin, arg_0).Func(*arg_1, **arg_2)\n arg_3.state['process'] = AuthProcess.CONNECT\n return arg_3"} +{"_id": "doc_3097", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"Select the correct text from the Japanese number, reading and\n alternatives\"\"\"\n # select kanji number or kana reading\n if arg_1:\n arg_0 = arg_0[1]\n else:\n arg_0 = arg_0[0]\n\n # select the preferred one or the first one from multiple alternatives\n if not isinstance(arg_0, strtype):\n arg_3 = set(arg_0) & set(arg_2 or set())\n if len(arg_3) == 1:\n arg_0 = arg_3.pop()\n else:\n arg_0 = arg_0[0]\n\n return arg_0"} +{"_id": "doc_3098", "title": "", "text": "def Func():\n \"\"\"Download and extract processed data and embeddings.\"\"\"\n arg_0 = '.'\n arg_1 = DATA_URL.split('/')[-1]\n arg_2 = os.path.join(arg_0, arg_1)\n if not os.path.exists(arg_2):\n def _progress(arg_3, arg_4, arg_5):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (arg_1,\n float(arg_3 * arg_4) / float(arg_5) * 100.0))\n sys.stdout.flush()\n arg_2, arg_6 = urllib.request.urlretrieve(DATA_URL, arg_2, _progress)\n print()\n arg_7 = os.stat(arg_2)\n print('Successfully downloaded', arg_1, arg_7.st_size, 'bytes.')\n arg_8 = os.path.join(arg_0, 'trees')\n if not os.path.exists(arg_8):\n arg_9 = zipfile.ZipFile(arg_2, 'r')\n arg_9.extractall(arg_0)\n arg_9.close()"} +{"_id": "doc_3099", "title": "", "text": "def Func(arg_0, arg_1=8, arg_2=2, arg_3=0):\n \"\"\"Make a grid of images, via numpy.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n pad_value (float, optional): Value for the padded pixels.\n\n \"\"\"\n if not (isinstance(arg_0, np.ndarray) or\n (isinstance(arg_0, list) and all(isinstance(arg_4, np.ndarray) for arg_4 in arg_0))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(arg_0)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(arg_0, list):\n arg_0 = np.stack(arg_0, 0)\n\n if arg_0.ndim == 2: # single image H x W\n arg_0 = arg_0.reshape((1, arg_0.shape[0], arg_0.shape[1]))\n\n if arg_0.ndim == 3:\n if arg_0.shape[0] == 1: # if single-channel, single image, convert to 3-channel\n arg_0 = np.concatenate((arg_0, arg_0, arg_0), 0)\n arg_0 = arg_0.reshape((1, arg_0.shape[0], arg_0.shape[1], arg_0.shape[2]))\n\n if arg_0.ndim == 4 and arg_0.shape[1] == 1: # single-channel images\n arg_0 = np.concatenate((arg_0, arg_0, arg_0), 1)\n\n if arg_0.shape[0] == 1:\n return np.squeeze(arg_0)\n\n # make the mini-batch of images into a grid\n arg_5 = arg_0.shape[0]\n arg_6 = min(arg_1, arg_5)\n arg_7 = int(math.ceil(float(arg_5) / arg_6))\n arg_8, arg_9 = int(arg_0.shape[2] + arg_2), int(arg_0.shape[3] + arg_2)\n arg_10 = np.ones((3, arg_8 * arg_7 + arg_2, arg_9 * arg_6 + arg_2)) * arg_3\n arg_11 = 0\n for arg_12 in range(arg_7):\n for arg_13 in range(arg_6):\n if arg_11 >= arg_5:\n break\n arg_10[:, arg_12 * arg_8 + arg_2:(arg_12+1) * arg_8,\\\n arg_13 * arg_9 + arg_2:(arg_13+1) * arg_9] = arg_0[arg_11]\n arg_11 = arg_11 + 1\n return arg_10"} +{"_id": "doc_3100", "title": "", "text": "def Func(arg_0, arg_1, arg_2=8, arg_3=2, arg_4=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n arg_5 = make_grid(arg_0, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)\n arg_6 = Image.fromarray(pre_pillow_float_img_process(arg_5))\n arg_6.save(arg_1)"} +{"_id": "doc_3101", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove types from function arguments in cython\n \"\"\"\n arg_1 = []\n # If there aren't any arguments return the empty string\n if arg_0 is None:\n return out_str\n arg_2 = arg_0.split(',')\n for arg_3 in arg_2:\n arg_4 = arg_3.split('=')\n arg_5=arg_4[0].split(' ')\n # There is probably type info\n if arg_5[-1]=='' and len(arg_5)>1:\n arg_6=arg_5[-2]\n else:\n arg_6=arg_5[-1]\n # if there are default parameters\n if len(arg_4)>1:\n arg_6+='='+arg_4[1]\n\n arg_1.append(arg_6)\n return ','.join(arg_1)"} +{"_id": "doc_3102", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse scoped selector.\"\"\"\n # Conver Macro (%scope/name) to (scope/name/macro.value)\n if arg_0[0] == '%':\n if arg_0.endswith('.value'):\n arg_1 = '{} is invalid cannot use % and end with .value'\n raise ValueError(arg_1.format(arg_0))\n arg_0 = arg_0[1:] + '/macro.value'\n arg_2 = arg_0.rsplit('/', 1)\n arg_3 = ''.join(arg_2[:-1])\n arg_4 = arg_2[-1]\n return arg_3, arg_4"} +{"_id": "doc_3103", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a single literal value.\n\n Returns:\n The parsed value.\n \"\"\"\n arg_1 = [\n arg_0._maybe_parse_container, arg_0._maybe_parse_basic_type,\n arg_0._maybe_parse_configurable_reference, arg_0._maybe_parse_macro\n ]\n for arg_2 in arg_1:\n arg_3, arg_4 = arg_2()\n if arg_3:\n return arg_4\n arg_0._raise_syntax_error('Unable to parse value.')"} +{"_id": "doc_3104", "title": "", "text": "def Func(arg_0):\n \"\"\"Advances to next line.\"\"\"\n\n arg_1 = arg_0._current_token.line_number\n while arg_1 == arg_0._current_token.line_number:\n arg_0._current_token = ConfigParser.Token(*next(arg_0._token_generator))"} +{"_id": "doc_3105", "title": "", "text": "def Func(arg_0):\n \"\"\"Try to parse a configurable reference (@[scope/name/]fn_name[()]).\"\"\"\n if arg_0._current_token.value != '@':\n return False, None\n\n arg_1 = arg_0._current_location()\n arg_0._advance_one_token()\n arg_2 = arg_0._parse_selector(allow_periods_in_scope=True)\n\n arg_3 = False\n if arg_0._current_token.value == '(':\n arg_3 = True\n arg_0._advance()\n if arg_0._current_token.value != ')':\n arg_0._raise_syntax_error(\"Expected ')'.\")\n arg_0._advance_one_token()\n arg_0._skip_whitespace_and_comments()\n\n with utils.try_with_location(arg_1):\n arg_4 = arg_0._delegate.configurable_reference(arg_2, arg_3)\n\n return True, arg_4"} +{"_id": "doc_3106", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert an operative config string to markdown format.\"\"\"\n\n # TODO: Total hack below. Implement more principled formatting.\n def process(arg_2):\n \"\"\"Convert a single line to markdown format.\"\"\"\n if not arg_2.startswith('#'):\n return ' ' + arg_2\n\n arg_2 = arg_2[2:]\n if arg_2.startswith('===='):\n return ''\n if arg_2.startswith('None'):\n return ' # None.'\n if arg_2.endswith(':'):\n return '#### ' + arg_2\n return arg_2\n\n arg_3 = []\n for arg_2 in arg_1.splitlines():\n arg_4 = process(arg_2)\n if arg_4 is not None:\n arg_3.append(arg_4)\n\n return '\\n'.join(arg_3)"} +{"_id": "doc_3107", "title": "", "text": "def Func(arg_0):\n \"\"\"Make sure `fn` can be wrapped cleanly by functools.wraps.\"\"\"\n # Handle \"wrapped_descriptor\" and \"method-wrapper\" types.\n if isinstance(arg_0, (type(object.__init__), type(object.__call__))):\n # pylint: disable=unnecessary-lambda\n arg_1 = lambda *args, **kwargs: arg_0(*args, **kwargs)\n arg_1.__name__ = arg_0.__name__\n arg_1.__doc__ = arg_0.__doc__\n arg_1.__module__ = '' # These types have no __module__, sigh.\n arg_1.__wrapped__ = arg_0\n return arg_1\n\n # Otherwise we're good to go...\n return arg_0"} +{"_id": "doc_3108", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Decorate a function or class with the given decorator.\n\n When `fn_or_cls` is a function, applies `decorator` to the function and\n returns the (decorated) result.\n\n When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will\n replace `fn_or_cls.__init__` with the result of applying `decorator` to it.\n\n When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the\n class, but with `__init__` defined to be the result of applying `decorator` to\n `fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and\n module information) copied over from `fn_or_cls`. The goal is to provide a\n decorated class the behaves as much like the original as possible, without\n modifying it (for example, inspection operations using `isinstance` or\n `issubclass` should behave the same way as on the original class).\n\n Args:\n decorator: The decorator to use.\n fn_or_cls: The function or class to decorate.\n subclass: Whether to decorate classes by subclassing. This argument is\n ignored if `fn_or_cls` is not a class.\n\n Returns:\n The decorated function or class.\n \"\"\"\n if not inspect.isclass(arg_1):\n return arg_0(_ensure_wrappability(arg_1))\n\n arg_3 = _find_class_construction_fn(arg_1)\n\n if arg_2:\n class arg_6(arg_1):\n arg_4 = arg_1.__doc__\n arg_5 = arg_1.__module__\n arg_6.__name__ = arg_1.__name__\n if six.PY3:\n arg_6.__qualname__ = arg_1.__qualname__\n arg_9 = arg_6\n else:\n arg_9 = arg_1\n\n arg_10 = arg_0(_ensure_wrappability(arg_3))\n if arg_3.__name__ == '__new__':\n arg_10 = staticmethod(arg_10)\n setattr(arg_9, arg_3.__name__, arg_10)\n return arg_9"} +{"_id": "doc_3109", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Binds the parameter value specified by `binding_key` to `value`.\n\n The `binding_key` argument should either be a string of the form\n `maybe/scope/optional.module.names.configurable_name.parameter_name`, or a\n list or tuple of `(scope, selector, parameter_name)`, where `selector`\n corresponds to `optional.module.names.configurable_name`. Once this function\n has been called, subsequent calls (in the specified scope) to the specified\n configurable function will have `value` supplied to their `parameter_name`\n parameter.\n\n Example:\n\n @configurable('fully_connected_network')\n def network_fn(num_layers=5, units_per_layer=1024):\n ...\n\n def main(_):\n config.Func('fully_connected_network.num_layers', 3)\n network_fn() # Called with num_layers == 3, not the default of 5.\n\n Args:\n binding_key: The parameter whose value should be set. This can either be a\n string, or a tuple of the form `(scope, selector, parameter)`.\n value: The desired value.\n\n Raises:\n RuntimeError: If the config is locked.\n ValueError: If no function can be found matching the configurable name\n specified by `binding_key`, or if the specified parameter name is\n blacklisted or not in the function's whitelist (if present).\n \"\"\"\n if config_is_locked():\n raise RuntimeError('Attempted to modify locked Gin config.')\n\n arg_2 = ParsedBindingKey(arg_0)\n arg_3 = _CONFIG.setdefault(arg_2.config_key, {})\n arg_3[arg_2.arg_name] = arg_1"} +{"_id": "doc_3110", "title": "", "text": "def Func(arg_0):\n \"\"\"Gets cached argspec for `fn`.\"\"\"\n\n arg_1 = arg_3.get(arg_0)\n if arg_1 is None:\n arg_2 = inspect.getfullargspec if six.PY3 else inspect.getargspec\n try:\n arg_1 = arg_2(arg_0)\n except TypeError:\n # `fn` might be a callable object.\n arg_1 = arg_2(arg_0.__call__)\n arg_3[arg_0] = arg_1\n return arg_1"} +{"_id": "doc_3111", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the names of the supplied arguments to the given function.\"\"\"\n arg_2 = _get_cached_arg_spec(arg_0)\n # May be shorter than len(args) if args contains vararg (*args) arguments.\n return arg_2.args[:len(arg_1)]"} +{"_id": "doc_3112", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Retrieve all default values for configurable parameters of a function.\n\n Any parameters included in the supplied blacklist, or not included in the\n supplied whitelist, are excluded.\n\n Args:\n fn: The function whose parameter values should be retrieved.\n whitelist: The whitelist (or `None`) associated with the function.\n blacklist: The blacklist (or `None`) associated with the function.\n\n Returns:\n A dictionary mapping configurable parameter names to their default values.\n \"\"\"\n arg_3 = arg_10.get(arg_0)\n if arg_3 is not None:\n return arg_3.copy()\n\n # First, grab any default values not captured in the kwargs var.\n arg_4 = _get_cached_arg_spec(arg_0)\n if arg_4.defaults:\n arg_5 = arg_4.args[-len(arg_4.defaults):]\n arg_3 = dict(zip(arg_5, arg_4.defaults))\n else:\n arg_3 = {}\n\n if six.PY3 and arg_4.kwonlydefaults:\n arg_3.update(arg_4.kwonlydefaults)\n\n # Now, eliminate keywords that are blacklisted, or aren't whitelisted (if\n # there's a whitelist), or aren't representable as a literal value.\n for arg_6 in list(six.iterkeys(arg_3)):\n arg_7 = arg_1 and arg_6 not in arg_1\n arg_8 = arg_2 and arg_6 in arg_2\n arg_9 = _is_literally_representable(arg_3[arg_6])\n if arg_7 or arg_8 or not arg_9:\n del arg_3[arg_6]\n\n arg_10[arg_0] = arg_3\n return arg_3.copy()"} +{"_id": "doc_3113", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Decorator to make a function or class Func.\n\n This decorator registers the decorated function/class as Func, which\n allows its parameters to be supplied from the global configuration (i.e., set\n through `bind_parameter` or `parse_config`). The decorated function is\n associated with a name in the global configuration, which by default is simply\n the name of the function or class, but can be specified explicitly to avoid\n naming collisions or improve clarity.\n\n If some parameters should not be Func, they can be specified in\n `blacklist`. If only a restricted set of parameters should be Func,\n they can be specified in `whitelist`.\n\n The decorator can be used without any parameters as follows:\n\n @config.Func\n def some_Func_function(param1, param2='a default value'):\n ...\n\n In this case, the function is associated with the name\n `'some_Func_function'` in the global configuration, and both `param1`\n and `param2` are Func.\n\n The decorator can be supplied with parameters to specify the Func name\n or supply a whitelist/blacklist:\n\n @config.Func('explicit_Func_name', whitelist='param2')\n def some_Func_function(param1, param2='a default value'):\n ...\n\n In this case, the Func is associated with the name\n `'explicit_Func_name'` in the global configuration, and only `param2`\n is Func.\n\n Classes can be decorated as well, in which case parameters of their\n constructors are made Func:\n\n @config.Func\n class SomeClass(object):\n def __init__(self, param1, param2='a default value'):\n ...\n\n In this case, the name of the Func is `'SomeClass'`, and both `param1`\n and `param2` are Func.\n\n Args:\n name_or_fn: A name for this Func, or a function to decorate (in\n which case the name will be taken from that function). If not set,\n defaults to the name of the function/class that is being made\n Func. If a name is provided, it may also include module components\n to be used for disambiguation (these will be appended to any components\n explicitly specified by `module`).\n module: The module to associate with the Func, to help handle naming\n collisions. By default, the module of the function or class being made\n Func will be used (if no module is specified as part of the name).\n whitelist: A whitelisted set of kwargs that should be Func. All\n other kwargs will not be Func. Only one of `whitelist` or\n `blacklist` should be specified.\n blacklist: A blacklisted set of kwargs that should not be Func. All\n other kwargs will be Func. Only one of `whitelist` or `blacklist`\n should be specified.\n\n Returns:\n When used with no parameters (or with a function/class supplied as the first\n parameter), it returns the decorated function or class. When used with\n parameters, it returns a function that can be applied to decorate the target\n function or class.\n \"\"\"\n arg_4 = None\n if callable(arg_0):\n arg_4 = arg_0\n arg_5 = None\n else:\n arg_5 = arg_0\n\n def perform_decoration(arg_6):\n return _make_Func(arg_6, arg_5, arg_1, arg_2, arg_3)\n\n if arg_4:\n return perform_decoration(arg_4)\n return perform_decoration"} +{"_id": "doc_3114", "title": "", "text": "def Func(arg_0=80, arg_1=4):\n \"\"\"Retrieve the \"operative\" configuration as a config string.\n\n The operative configuration consists of all parameter values used by\n configurable functions that are actually called during execution of the\n current program. Parameters associated with configurable functions that are\n not called (and so can have no effect on program execution) won't be included.\n\n The goal of the function is to return a config that captures the full set of\n relevant configurable \"hyperparameters\" used by a program. As such, the\n returned configuration will include the default values of arguments from\n configurable functions (as long as the arguments aren't blacklisted or missing\n from a supplied whitelist), as well as any parameter values overridden via\n `bind_parameter` or through `parse_config`.\n\n Any parameters that can't be represented as literals (capable of being parsed\n by `parse_config`) are excluded. The resulting config string is sorted\n lexicographically and grouped by configurable name.\n\n Args:\n max_line_length: A (soft) constraint on the maximum length of a line in the\n formatted string. Large nested structures will be split across lines, but\n e.g. long strings won't be split into a concatenation of shorter strings.\n continuation_indent: The indentation for continued lines.\n\n Returns:\n A config string capturing all parameter values used by the current program.\n \"\"\"\n def format_binding(arg_2, arg_3):\n \"\"\"Pretty print the given key/value pair.\"\"\"\n arg_4 = pprint.pformat(\n arg_3, width=(arg_0 - arg_1))\n arg_5 = arg_4.split('\\n')\n if (len(arg_5) == 1 and\n len(arg_2 + arg_4) <= arg_0):\n arg_6 = '{} = {}'.format(arg_2, arg_4)\n else:\n arg_7 = '\\n'.join(\n [' ' * arg_1 + line for line in arg_5])\n arg_6 = '{} = \\\\\\n{}'.format(arg_2, arg_7)\n return arg_6\n\n def sort_key(arg_8):\n \"\"\"Sort configurable selector/innermost scopes, ignoring case.\"\"\"\n arg_9, arg_10 = arg_8[0]\n arg_11 = arg_10.lower().split('.')[::-1] + arg_9.lower().split('/')[::-1]\n return '/'.join(arg_11)\n\n # Build the output as an array of formatted Gin statements. Each statement may\n # span multiple lines. Imports are first, followed by macros, and finally all\n # other bindings sorted in alphabetical order by configurable name.\n arg_12 = [\n 'import {}'.format(module) for module in sorted(_IMPORTED_MODULES)\n ]\n if arg_12:\n arg_12.append('')\n\n arg_13 = {}\n for (arg_9, arg_10), arg_14 in six.iteritems(_OPERATIVE_CONFIG):\n if _REGISTRY[arg_10].fn_or_cls == macro:\n arg_13[arg_9, arg_10] = arg_14\n if arg_13:\n arg_12.append('# Macros:')\n arg_12.append('# ' + '=' * (arg_0 - 2))\n for (arg_15, arg_16), arg_14 in sorted(arg_13.items(), arg_2=sort_key):\n arg_17 = format_binding(arg_15, arg_14['value'])\n arg_12.append(arg_17)\n if arg_13:\n arg_12.append('')\n\n arg_18 = sorted(_OPERATIVE_CONFIG.items(), arg_2=sort_key)\n for (arg_9, arg_10), arg_14 in arg_18:\n arg_19 = _REGISTRY[arg_10]\n\n arg_20 = arg_19.fn_or_cls\n if arg_20 == macro or arg_20 == _retrieve_constant:\n continue\n\n arg_21 = _REGISTRY.minimal_selector(arg_19.selector)\n arg_22 = (arg_9 + '/' if arg_9 else '') + arg_21\n arg_23 = [(k, v) for k, v in six.iteritems(arg_14)\n if _is_literally_representable(v)]\n arg_12.append('# Parameters for {}:'.format(arg_22))\n arg_12.append('# ' + '=' * (arg_0 - 2))\n for arg_24, arg_25 in sorted(arg_23):\n arg_17 = format_binding('{}.{}'.format(arg_22, arg_24), arg_25)\n arg_12.append(arg_17)\n if not arg_23:\n arg_12.append('# None.')\n arg_12.append('')\n\n return '\\n'.join(arg_12)"} +{"_id": "doc_3115", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=True,\n arg_3=False):\n \"\"\"Parse a list of config files followed by extra Gin bindings.\n\n This function is equivalent to:\n\n for config_file in config_files:\n gin.parse_config_file(config_file, skip_configurables)\n gin.parse_config(bindings, skip_configurables)\n if finalize_config:\n gin.finalize()\n\n Args:\n config_files: A list of paths to the Gin config files.\n bindings: A list of individual parameter binding strings.\n finalize_config: Whether to finalize the config after parsing and binding\n (defaults to True).\n skip_unknown: A boolean indicating whether unknown configurables and imports\n should be skipped instead of causing errors (alternatively a list of\n configurable names to skip if unknown). See `parse_config` for additional\n details.\n \"\"\"\n if arg_0 is None:\n arg_0 = []\n if arg_1 is None:\n arg_1 = ''\n for arg_4 in arg_0:\n parse_config_file(arg_4, arg_3)\n parse_config(arg_1, arg_3)\n if arg_2:\n finalize()"} +{"_id": "doc_3116", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse and return a single Gin value.\"\"\"\n if not isinstance(arg_0, six.string_types):\n raise ValueError('value ({}) should be a string type.'.format(arg_0))\n return config_parser.ConfigParser(arg_0, ParserDelegate()).Func()"} +{"_id": "doc_3117", "title": "", "text": "def Func():\n \"\"\"A function that should be called after parsing all Gin config files.\n\n Calling this function allows registered \"Func hooks\" to inspect (and\n potentially modify) the Gin config, to provide additional functionality. Hooks\n should not modify the configuration object they receive directly; instead,\n they should return a dictionary mapping Gin binding keys to (new or updated)\n values. This way, all hooks see the config as originally parsed.\n\n Raises:\n RuntimeError: If the config is already locked.\n ValueError: If two or more hooks attempt to modify or introduce bindings for\n the same key. Since it is difficult to control the order in which hooks\n are registered, allowing this could yield unpredictable behavior.\n \"\"\"\n if config_is_locked():\n raise RuntimeError('Finalize called twice (config already locked).')\n\n arg_0 = {}\n for arg_1 in _FINALIZE_HOOKS:\n arg_2 = arg_1(_CONFIG)\n if arg_2 is not None:\n for arg_3, arg_4 in six.iteritems(arg_2):\n arg_5 = ParsedBindingKey(arg_3)\n if arg_5 in arg_0:\n arg_6 = 'Received conflicting updates when running {}.'\n raise ValueError(arg_6.format(arg_1))\n arg_0[arg_5] = arg_4\n\n for arg_5, arg_4 in six.iteritems(arg_0):\n bind_parameter(arg_5, arg_4)\n\n _set_config_is_locked(True)"} +{"_id": "doc_3118", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Provides an iterator over references in the given config.\n\n Args:\n config: A dictionary mapping scoped configurable names to argument bindings.\n to: If supplied, only yield references whose `configurable_fn` matches `to`.\n\n Yields:\n `ConfigurableReference` instances within `config`, maybe restricted to those\n matching the `to` parameter if it is supplied.\n \"\"\"\n for arg_2 in _iterate_flattened_values(arg_0):\n if isinstance(arg_2, ConfigurableReference):\n if arg_1 is None or arg_2.configurable.fn_or_cls == arg_1:\n yield arg_2"} +{"_id": "doc_3119", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a Func that can be referenced from gin config files.\n\n After calling this function in Python, the Func can be referenced from\n within a Gin config file using the macro syntax. For example, in Python:\n\n gin.Func('THE_ANSWER', 42)\n\n Then, in a Gin config file:\n\n meaning.of_life = %THE_ANSWER\n\n Note that any Python object can be used as the value of a Func (including\n objects not representable as Gin literals). Values will be stored until\n program termination in a Gin-internal dictionary, so avoid creating Funcs\n with values that should have a limited lifetime.\n\n Optionally, a disambiguating module may be prefixed onto the Func\n name. For instance:\n\n gin.Func('some.modules.PI', 3.14159)\n\n Args:\n name: The name of the Func, possibly prepended by one or more\n disambiguating module components separated by periods. An macro with this\n name (including the modules) will be created.\n value: The value of the Func. This can be anything (including objects\n not representable as Gin literals). The value will be stored and returned\n whenever the Func is referenced.\n\n Raises:\n ValueError: If the Func's selector is invalid, or a Func with the\n given selector already exists.\n \"\"\"\n if not config_parser.MODULE_RE.match(arg_0):\n raise ValueError(\"Invalid Func selector '{}'.\".format(arg_0))\n\n if arg_3.matching_selectors(arg_0):\n arg_2 = \"Constants matching selector '{}' already exist ({}).\"\n raise ValueError(arg_2.format(arg_0, arg_3.matching_selectors(arg_0)))\n\n arg_3[arg_0] = arg_1"} +{"_id": "doc_3120", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Decorator for an enum class that generates Gin constants from values.\n\n Generated constants have format `module.ClassName.ENUM_VALUE`. The module\n name is optional when using the constant.\n\n Args:\n cls: Class type.\n module: The module to associate with the constants, to help handle naming\n collisions. If `None`, `cls.__module__` will be used.\n\n Returns:\n Class type (identity function).\n\n Raises:\n TypeError: When applied to a non-enum class.\n \"\"\"\n if not issubclass(arg_0, enum.Enum):\n raise TypeError(\"Class '{}' is not subclass of enum.\".format(arg_0.__name__))\n\n if arg_1 is None:\n arg_1 = arg_0.__module__\n for arg_2 in arg_0:\n constant('{}.{}'.format(arg_1, str(arg_2)), arg_2)\n return arg_0"} +{"_id": "doc_3121", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieves all selectors matching `partial_selector`.\n\n For instance, if \"one.a.b\" and \"two.a.b\" are stored in a `SelectorMap`, both\n `Func('b')` and `Func('a.b')` will return them.\n\n In the event that `partial_selector` exactly matches an existing complete\n selector, only that complete selector is returned. For instance, if\n \"a.b.c.d\" and \"c.d\" are stored, `Func('c.d')` will return only\n `['c.d']`, while `Func('d')` will return both.\n\n Args:\n partial_selector: The partial selector to find matches for.\n\n Returns:\n A list of selectors matching `partial_selector`.\n \"\"\"\n if arg_1 in arg_0._selector_map:\n return [arg_1]\n\n arg_2 = arg_1.split('.')\n arg_3 = arg_0._selector_tree\n\n for arg_4 in reversed(arg_2):\n if arg_4 not in arg_3:\n return []\n arg_3 = arg_3[arg_4]\n\n arg_5 = []\n arg_6 = [arg_3]\n while arg_6:\n arg_3 = arg_6.pop().copy()\n arg_7 = arg_3.pop(_TERMINAL_KEY, None)\n arg_6.extend(arg_3.values())\n if arg_7:\n arg_5.append(arg_7)\n\n return arg_5"} +{"_id": "doc_3122", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns all values matching `partial_selector` as a list.\"\"\"\n arg_2 = arg_0.matching_selectors(arg_1)\n return [arg_0._selector_map[arg_3] for arg_3 in arg_2]"} +{"_id": "doc_3123", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the minimal selector that uniquely matches `complete_selector`.\n\n Args:\n complete_selector: A complete selector stored in the map.\n\n Returns:\n A partial selector that unambiguously matches `complete_selector`.\n\n Raises:\n KeyError: If `complete_selector` is not in the map.\n \"\"\"\n if arg_1 not in arg_0._selector_map:\n raise KeyError(\"No value with selector '{}'.\".format(arg_1))\n\n arg_2 = arg_1.split('.')\n arg_3 = arg_0._selector_tree\n\n arg_4 = None\n for arg_5, arg_6 in enumerate(reversed(arg_2)):\n if len(arg_3) == 1:\n if arg_4 is None:\n arg_4 = -arg_5 # Negative index, since we're iterating in reverse.\n else:\n arg_4 = None\n arg_3 = arg_3[arg_6]\n\n if len(arg_3) > 1: # The selector is a substring of another selector.\n return arg_1\n return '.'.join(arg_2[arg_4:])"} +{"_id": "doc_3124", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets the access permissions of the map.\n\n :param perms: the new permissions.\n \"\"\"\n assert isinstance(arg_1, str) and len(arg_1) <= 3 and arg_1.strip() in ['', 'r', 'w', 'x', 'rw', 'r x', 'rx', 'rwx', 'wx', ]\n arg_0._perms = arg_1"} +{"_id": "doc_3125", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Check if there is enough permissions for access \"\"\"\n for arg_2 in arg_1:\n if arg_2 not in arg_0.perms:\n return False\n return True"} +{"_id": "doc_3126", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None):\n \"\"\"\n Creates a new mapping in the memory address space.\n\n :param addr: the starting address (took as hint). If C{addr} is C{0} the first big enough\n chunk of memory will be selected as starting address.\n :param size: the length of the mapping.\n :param perms: the access permissions to this memory.\n :param data_init: optional data to initialize this memory.\n :param name: optional name to give to this mapping\n :return: the starting address where the memory was mapped.\n :raises error:\n - 'Address shall be concrete' if C{addr} is not an integer number.\n - 'Address too big' if C{addr} goes beyond the limit of the memory.\n - 'Map already used' if the piece of memory starting in C{addr} and with length C{size} isn't free.\n :rtype: int\n\n \"\"\"\n # If addr is NULL, the system determines where to allocate the region.\n assert arg_1 is None or isinstance(arg_1, int), 'Address shall be concrete'\n\n arg_0.cpu._publish('will_map_memory', arg_1, arg_2, arg_3, None, None)\n\n # address is rounded down to the nearest multiple of the allocation granularity\n if arg_1 is not None:\n assert arg_1 < arg_0.memory_size, 'Address too big'\n arg_1 = arg_0._floor(arg_1)\n\n # size value is rounded up to the next page boundary\n arg_2 = arg_0._ceil(arg_2)\n\n # If zero search for a spot\n arg_1 = arg_0._search(arg_2, arg_1)\n\n # It should not be allocated\n for arg_6 in range(arg_0._page(arg_1), arg_0._page(arg_1 + arg_2)):\n assert arg_6 not in arg_0._page2map, 'Map already used'\n\n # Create the anonymous map\n arg_7 = AnonMap(start=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5)\n\n # Okay, ready to alloc\n arg_0._add(arg_7)\n\n logger.debug('New memory map @%x size:%x', arg_1, arg_2)\n\n arg_0.cpu._publish('did_map_memory', arg_1, arg_2, arg_3, None, None, arg_1)\n return arg_1"} +{"_id": "doc_3127", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Translates a register ID from the disassembler object into the\n register name based on manticore's alias in the register file\n\n :param int reg_id: Register ID\n \"\"\"\n if arg_1 >= X86_REG_ENDING:\n logger.warning(\"Trying to get register name for a non-register\")\n return None\n arg_2 = arg_0.cpu.instruction.reg_name(arg_1)\n if arg_2 is None or arg_2.lower() == '(invalid)':\n return None\n return arg_0.cpu._regfile._alias(arg_2.upper())"} +{"_id": "doc_3128", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Dynamic interface for writing cpu registers\n\n :param str register: register name (as listed in `self.all_registers`)\n :param value: register value\n :type value: int or long or Expression\n \"\"\"\n arg_0._publish('will_Func', arg_1, arg_2)\n arg_2 = arg_0._regfile.write(arg_1, arg_2)\n arg_0._publish('did_Func', arg_1, arg_2)\n return arg_2"} +{"_id": "doc_3129", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Dynamic interface for reading cpu registers\n\n :param str register: register name (as listed in `self.all_registers`)\n :return: register value\n :rtype: int or long or Expression\n \"\"\"\n arg_0._publish('will_Func', arg_1)\n arg_2 = arg_0._regfile.read(arg_1)\n arg_0._publish('did_Func', arg_1, arg_2)\n return arg_2"} +{"_id": "doc_3130", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3=1) -> bytes:\n \"\"\"\n Selects bytes from memory. Attempts to do so faster than via read_bytes.\n\n :param where: address to read from\n :param size: number of bytes to read\n :return: the bytes in memory\n \"\"\"\n arg_4 = arg_0.memory.map_containing(arg_1)\n arg_5 = arg_4._get_offset(arg_1)\n arg_6 = type(arg_4)\n if arg_6 is FileMap:\n arg_7 = arg_4._get_offset(arg_1 + arg_3)\n\n if arg_7 > arg_4._mapped_size:\n logger.warning(f\"Missing {end - map._mapped_size} bytes at the end of {map._filename}\")\n\n arg_8 = arg_4._data[arg_4._get_offset(arg_1): min(arg_7, arg_4._mapped_size)]\n if len(arg_8) < arg_7:\n arg_8 += b'\\x00' * (arg_7 - len(arg_8))\n\n arg_9 = b''\n for arg_10 in sorted(arg_4._overlay.keys()):\n arg_9 += arg_8[len(arg_9):arg_10]\n arg_9 += arg_4._overlay[arg_10]\n arg_9 += arg_8[len(arg_9):]\n\n elif arg_6 is AnonMap:\n arg_9 = bytes(arg_4._data[arg_5:arg_5 + arg_3])\n else:\n arg_9 = b''.join(arg_0.memory[arg_1:arg_1 + arg_3])\n assert len(arg_9) == arg_3, 'Raw read resulted in wrong data read which should never happen'\n return arg_9"} +{"_id": "doc_3131", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n \"\"\"\n Reads int from memory\n\n :param int where: address to read from\n :param size: number of bits to read\n :return: the value read\n :rtype: int or BitVec\n :param force: whether to ignore memory permissions\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0.address_bit_size\n assert arg_2 in SANE_SIZES\n arg_0._publish('will_read_memory', arg_1, arg_2)\n\n arg_4 = arg_0._memory.read(arg_1, arg_2 // 8, arg_3)\n assert (8 * len(arg_4)) == arg_2\n arg_5 = Operators.CONCAT(arg_2, *map(Operators.ORD, reversed(arg_4)))\n\n arg_0._publish('did_read_memory', arg_1, arg_5, arg_2)\n return arg_5"} +{"_id": "doc_3132", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n \"\"\"\n Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.\n\n :param int where: Address to read string from\n :param int max_length:\n The size in bytes to cap the string at, or None [default] for no\n limit.\n :param force: whether to ignore memory permissions\n :return: string read\n :rtype: str\n \"\"\"\n arg_4 = io.BytesIO()\n while True:\n arg_5 = arg_0.read_int(arg_1, 8, arg_3)\n\n if issymbolic(arg_5) or arg_5 == 0:\n break\n\n if arg_2 is not None:\n if arg_2 == 0:\n break\n arg_2 = arg_2 - 1\n arg_4.write(Operators.CHR(arg_5))\n arg_1 += 1\n return arg_4.getvalue().decode()"} +{"_id": "doc_3133", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Write `data` to the stack and decrement the stack pointer accordingly.\n\n :param str data: Data to write\n :param force: whether to ignore memory permissions\n \"\"\"\n arg_0.STACK -= len(arg_1)\n arg_0.write_bytes(arg_0.STACK, arg_1, arg_2)\n return arg_0.STACK"} +{"_id": "doc_3134", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Read `nbytes` from the stack, increment the stack pointer, and return\n data.\n\n :param int nbytes: How many bytes to read\n :param force: whether to ignore memory permissions\n :return: Data read from the stack\n \"\"\"\n arg_3 = arg_0.read_bytes(arg_0.STACK, arg_1, arg_2=arg_2)\n arg_0.STACK += arg_1\n return arg_3"} +{"_id": "doc_3135", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Read a value from the stack and increment the stack pointer.\n\n :param force: whether to ignore memory permissions\n :return: Value read\n \"\"\"\n arg_2 = arg_0.read_int(arg_0.STACK, arg_1=arg_1)\n arg_0.STACK += arg_0.address_bit_size // 8\n return arg_2"} +{"_id": "doc_3136", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decode, and Func one instruction pointed by register PC\n \"\"\"\n if issymbolic(arg_0.PC):\n raise ConcretizeRegister(arg_0, 'PC', policy='ALL')\n\n if not arg_0.memory.access_ok(arg_0.PC, 'x'):\n raise InvalidMemoryAccess(arg_0.PC, 'x')\n\n arg_0._publish('will_decode_instruction', arg_0.PC)\n\n arg_1 = arg_0.decode_instruction(arg_0.PC)\n arg_0._last_pc = arg_0.PC\n\n arg_0._publish('will_Func_instruction', arg_0.PC, arg_1)\n\n # FIXME (theo) why just return here?\n if arg_1.address != arg_0.PC:\n return\n\n arg_3 = arg_0.canonicalize_instruction_name(arg_1)\n\n if logger.level == logging.DEBUG:\n logger.debug(arg_0.render_instruction(arg_1))\n for arg_4 in arg_0.render_registers():\n register_logger.debug(arg_4)\n\n try:\n if arg_0._concrete and 'SYSCALL' in arg_3:\n arg_0.emu.sync_unicorn_to_manticore()\n if arg_0._concrete and 'SYSCALL' not in arg_3:\n arg_0.emulate(arg_1)\n if arg_0.PC == arg_0._break_unicorn_at:\n logger.debug(\"Switching from Unicorn to Manticore\")\n arg_0._break_unicorn_at = None\n arg_0._concrete = False\n else:\n arg_7 = getattr(arg_0, arg_3, None)\n\n if arg_7 is not None:\n arg_7(*arg_1.operands)\n\n else:\n arg_8 = ' '.join('%02x' % x for x in arg_1.bytes)\n logger.warning(\"Unimplemented instruction: 0x%016x:\\t%s\\t%s\\t%s\",\n arg_1.address, arg_8, arg_1.mnemonic, arg_1.op_str)\n arg_0.backup_emulate(arg_1)\n except (Interruption, Syscall) as arg_9:\n arg_9.on_handled = lambda: arg_0._publish_instruction_as_Funcd(arg_1)\n raise arg_9\n else:\n arg_0._publish_instruction_as_Funcd(arg_1)"} +{"_id": "doc_3137", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Notify listeners that an instruction has been executed.\n \"\"\"\n arg_0._icount += 1\n arg_0._publish('did_execute_instruction', arg_0._last_pc, arg_0.PC, arg_1)"} +{"_id": "doc_3138", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n If we could not handle emulating an instruction, use Unicorn to emulate\n it.\n\n :param capstone.CsInsn instruction: The instruction object to emulate\n \"\"\"\n\n if not hasattr(arg_0, 'backup_emu'):\n arg_0.backup_emu = UnicornEmulator(arg_0)\n try:\n arg_0.backup_emu.emulate(arg_1)\n except unicorn.UcError as e:\n if e.errno == unicorn.UC_ERR_INSN_INVALID:\n arg_3 = ' '.join('%02x' % x for x in arg_1.bytes)\n logger.error(\"Unimplemented instruction: 0x%016x:\\t%s\\t%s\\t%s\",\n arg_1.address, arg_3, arg_1.mnemonic, arg_1.op_str)\n raise InstructionEmulationError(str(e))\n finally:\n # We have been seeing occasional Unicorn issues with it not clearing\n # the backing unicorn instance. Saw fewer issues with the following\n # line present.\n del arg_0.backup_emu"} +{"_id": "doc_3139", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" remove decoded instruction from instruction cache \"\"\"\n arg_3 = arg_0.instruction_cache\n for arg_4 in range(arg_2):\n if arg_1 + arg_4 in arg_3:\n del arg_3[arg_1 + arg_4]"} +{"_id": "doc_3140", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Func instruction.\n\n The ID flag (bit 21) in the EFLAGS register indicates support for the\n Func instruction. If a software procedure can set and clear this\n flag, the processor executing the procedure supports the Func\n instruction. This instruction operates the same in non-64-bit modes and\n 64-bit mode. Func returns processor identification and feature\n information in the EAX, EBX, ECX, and EDX registers.\n\n The instruction's output is dependent on the contents of the EAX\n register upon execution.\n\n :param cpu: current CPU.\n \"\"\"\n # FIXME Choose conservative values and consider returning some default when eax not here\n arg_1 = {0x0: (0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69),\n 0x1: (0x000306c3, 0x05100800, 0x7ffafbff, 0xbfebfbff),\n 0x2: (0x76035a01, 0x00f0b5ff, 0x00000000, 0x00c10000),\n 0x4: {0x0: (0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000),\n 0x1: (0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000),\n 0x2: (0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000),\n 0x3: (0x1c03c163, 0x03c0003f, 0x00000fff, 0x00000006)},\n 0x7: (0x00000000, 0x00000000, 0x00000000, 0x00000000),\n 0x8: (0x00000000, 0x00000000, 0x00000000, 0x00000000),\n 0xb: {0x0: (0x00000001, 0x00000002, 0x00000100, 0x00000005),\n 0x1: (0x00000004, 0x00000004, 0x00000201, 0x00000003)},\n 0xd: {0x0: (0x00000000, 0x00000000, 0x00000000, 0x00000000),\n 0x1: (0x00000000, 0x00000000, 0x00000000, 0x00000000)},\n }\n\n if arg_0.EAX not in arg_1:\n logger.warning('Func with EAX=%x not implemented @ %x', arg_0.EAX, arg_0.PC)\n arg_0.EAX, arg_0.EBX, arg_0.ECX, arg_0.EDX = 0, 0, 0, 0\n return\n\n if isinstance(arg_1[arg_0.EAX], tuple):\n arg_0.EAX, arg_0.EBX, arg_0.ECX, arg_0.EDX = arg_1[arg_0.EAX]\n return\n\n if arg_0.ECX not in arg_1[arg_0.EAX]:\n logger.warning('Func with EAX=%x ECX=%x not implemented', arg_0.EAX, arg_0.ECX)\n arg_0.EAX, arg_0.EBX, arg_0.ECX, arg_0.EDX = 0, 0, 0, 0\n return\n\n arg_0.EAX, arg_0.EBX, arg_0.ECX, arg_0.EDX = arg_1[arg_0.EAX][arg_0.ECX]"} +{"_id": "doc_3141", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Logical inclusive Func.\n\n Performs a bitwise inclusive Func operation between the destination (first)\n and source (second) operands and stores the result in the destination operand location.\n\n Each bit of the result of the Func instruction is set to 0 if both corresponding\n bits of the first and second operands are 0; otherwise, each bit is set\n to 1.\n\n The OF and CF flags are cleared; the SF, ZF, and PF flags are set according to the result::\n\n DEST = DEST Func SRC;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_1.write(arg_1.read() | arg_2.read())\n # Defined Flags: szp\n arg_0._calculate_logic_flags(arg_1.size, arg_3)"} +{"_id": "doc_3142", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n ASCII adjust AX after multiply.\n\n Adjusts the result of the multiplication of two unpacked BCD values\n to create a pair of unpacked (base 10) BCD values. The AX register is\n the implied source and destination operand for this instruction. The Func\n instruction is only useful when it follows a MUL instruction that multiplies\n (binary multiplication) two unpacked BCD values and stores a word result\n in the AX register. The Func instruction then adjusts the contents of the\n AX register to contain the correct 2-digit unpacked (base 10) BCD result.\n\n The SF, ZF, and PF flags are set according to the resulting binary value in the AL register.\n\n This instruction executes as described in compatibility mode and legacy mode.\n It is not valid in 64-bit mode.::\n\n tempAL = AL;\n AH = tempAL / 10;\n AL = tempAL MOD 10;\n\n :param cpu: current CPU.\n \"\"\"\n if arg_1 is None:\n arg_1 = 10\n else:\n arg_1 = arg_1.read()\n\n arg_0.AH = Operators.UDIV(arg_0.AL, arg_1)\n arg_0.AL = Operators.UREM(arg_0.AL, arg_1)\n\n # Defined flags: ...sz.p.\n arg_0._calculate_logic_flags(8, arg_0.AL)"} +{"_id": "doc_3143", "title": "", "text": "def Func(arg_0):\n \"\"\"\n ASCII Adjust AL after subtraction.\n\n Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked\n BCD result. The AL register is the implied source and destination operand for this instruction.\n The Func instruction is only useful when it follows a SUB instruction that subtracts\n (binary subtraction) one unpacked BCD value from another and stores a byte result in the AL\n register. The AAA instruction then adjusts the contents of the AL register to contain the\n correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register\n is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF\n flags are cleared, and the AH register is unchanged. In either case, the AL register is left with\n its top nibble set to 0.\n\n The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0.\n\n This instruction executes as described in compatibility mode and legacy mode.\n It is not valid in 64-bit mode.::\n\n\n IF ((AL AND 0FH) > 9) Operators.OR(AF = 1)\n THEN\n AX = AX - 6;\n AH = AH - 1;\n AF = 1;\n CF = 1;\n ELSE\n CF = 0;\n AF = 0;\n FI;\n AL = AL AND 0FH;\n\n :param cpu: current CPU.\n \"\"\"\n if (arg_0.AL & 0x0F > 9) or arg_0.AF == 1:\n arg_0.AX = arg_0.AX - 6\n arg_0.AH = arg_0.AH - 1\n arg_0.AF = True\n arg_0.CF = True\n else:\n arg_0.AF = False\n arg_0.CF = False\n arg_0.AL = arg_0.AL & 0x0f"} +{"_id": "doc_3144", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Adds with carry.\n\n Adds the destination operand (first operand), the source operand (second operand),\n and the carry (CF) flag and stores the result in the destination operand. The state\n of the CF flag represents a carry from a previous addition. When an immediate value\n is used as an operand, it is sign-extended to the length of the destination operand\n format. The Func instruction does not distinguish between signed or unsigned operands.\n Instead, the processor evaluates the result for both data types and sets the OF and CF\n flags to indicate a carry in the signed or unsigned result, respectively. The SF flag\n indicates the sign of the signed result. The Func instruction is usually executed as\n part of a multibyte or multiword addition in which an ADD instruction is followed by an\n Func instruction::\n\n DEST = DEST + SRC + CF;\n\n The OF, SF, ZF, AF, CF, and PF flags are set according to the result.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_0._ADD(arg_1, arg_2, carry=True)"} +{"_id": "doc_3145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compares and exchanges bytes.\n\n Compares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if\n operand size is 128 bits) with the operand (destination operand). If\n the values are equal, the 64-bit value in ECX:EBX (or 128-bit value in\n RCX:RBX) is stored in the destination operand. Otherwise, the value in\n the destination operand is loaded into EDX:EAX (or RDX:RAX)::\n\n IF (64-Bit Mode and OperandSize = 64)\n THEN\n IF (RDX:RAX = DEST)\n THEN\n ZF = 1;\n DEST = RCX:RBX;\n ELSE\n ZF = 0;\n RDX:RAX = DEST;\n FI\n ELSE\n IF (EDX:EAX = DEST)\n THEN\n ZF = 1;\n DEST = ECX:EBX;\n ELSE\n ZF = 0;\n EDX:EAX = DEST;\n FI;\n FI;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_2 = arg_1.size\n arg_3 = {64: 'EAX', 128: 'RAX'}[arg_2]\n arg_4 = {64: 'EDX', 128: 'RDX'}[arg_2]\n arg_5 = {64: 'EBX', 128: 'RBX'}[arg_2]\n arg_6 = {64: 'ECX', 128: 'RCX'}[arg_2]\n\n # EDX:EAX or RDX:RAX\n arg_7 = arg_0.read_register(arg_4)\n arg_8 = arg_0.read_register(arg_3)\n\n arg_9 = arg_0.read_register(arg_6)\n arg_10 = arg_0.read_register(arg_5)\n\n arg_11 = Operators.CONCAT(arg_2, arg_7, arg_8)\n arg_12 = Operators.CONCAT(arg_2, arg_9, arg_10)\n arg_13 = arg_1.read()\n arg_0.ZF = arg_13 == arg_11\n\n arg_1.write(\n Operators.ITEBV(arg_2, arg_0.ZF,\n Operators.CONCAT(arg_2, arg_9, arg_10),\n arg_13)\n )\n arg_0.write_register(arg_3, Operators.ITEBV(arg_2 // 2, arg_0.ZF, arg_8,\n Operators.EXTRACT(arg_13, 0, arg_2 // 2)))\n arg_0.write_register(arg_4, Operators.ITEBV(arg_2 // 2, arg_0.ZF, arg_7,\n Operators.EXTRACT(arg_13, arg_2 // 2, arg_2 // 2)))"} +{"_id": "doc_3146", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decimal adjusts AL after addition.\n\n Adjusts the sum of two packed BCD values to create a packed BCD result. The AL register\n is the implied source and destination operand. If a decimal carry is detected, the CF\n and AF flags are set accordingly.\n The CF and AF flags are set if the adjustment of the value results in a decimal carry in\n either digit of the result. The SF, ZF, and PF flags are set according to the result.\n\n This instruction is not valid in 64-bit mode.::\n\n IF (((AL AND 0FH) > 9) or AF = 1)\n THEN\n AL = AL + 6;\n CF = CF OR CarryFromLastAddition; (* CF OR carry from AL = AL + 6 *)\n AF = 1;\n ELSE\n AF = 0;\n FI;\n IF ((AL AND F0H) > 90H) or CF = 1)\n THEN\n AL = AL + 60H;\n CF = 1;\n ELSE\n CF = 0;\n FI;\n\n :param cpu: current CPU.\n \"\"\"\n\n arg_0.AF = Operators.OR((arg_0.AL & 0x0f) > 9, arg_0.AF)\n arg_2 = arg_0.AL\n arg_0.AL = Operators.ITEBV(8, arg_0.AF, arg_0.AL + 6, arg_0.AL)\n arg_0.CF = Operators.ITE(arg_0.AF, Operators.OR(arg_0.CF, arg_0.AL < arg_2), arg_0.CF)\n\n arg_0.CF = Operators.OR((arg_0.AL & 0xf0) > 0x90, arg_0.CF)\n arg_0.AL = Operators.ITEBV(8, arg_0.CF, arg_0.AL + 0x60, arg_0.AL)\n \"\"\"\n #old not-symbolic aware version...\n if ((cpu.AL & 0x0f) > 9) or cpu.AF:\n oldAL = cpu.AL\n cpu.AL = cpu.AL + 6\n cpu.CF = Operators.OR(cpu.CF, cpu.AL < oldAL)\n cpu.AF = True\n else:\n cpu.AF = False\n\n if ((cpu.AL & 0xf0) > 0x90) or cpu.CF:\n cpu.AL = cpu.AL + 0x60\n cpu.CF = True\n else:\n cpu.CF = False\n \"\"\"\n\n arg_0.ZF = arg_0.AL == 0\n arg_0.SF = (arg_0.AL & 0x80) != 0\n arg_0.PF = arg_0._calculate_parity_flag(arg_0.AL)"} +{"_id": "doc_3147", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Signed divide.\n\n Divides (signed) the value in the AL, AX, or EAX register by the source\n operand and stores the result in the AX, DX:AX, or EDX:EAX registers.\n The source operand can be a general-purpose register or a memory\n location. The action of this instruction depends on the operand size.::\n\n IF SRC = 0\n THEN #DE; (* divide error *)\n FI;\n IF OpernadSize = 8 (* word/byte operation *)\n THEN\n temp = AX / SRC; (* signed division *)\n IF (temp > 7FH) Operators.OR(temp < 80H)\n (* if a positive result is greater than 7FH or a negative result is\n less than 80H *)\n THEN #DE; (* divide error *) ;\n ELSE\n AL = temp;\n AH = AX SignedModulus SRC;\n FI;\n ELSE\n IF OpernadSize = 16 (* doubleword/word operation *)\n THEN\n temp = DX:AX / SRC; (* signed division *)\n IF (temp > 7FFFH) Operators.OR(temp < 8000H)\n (* if a positive result is greater than 7FFFH *)\n (* or a negative result is less than 8000H *)\n THEN #DE; (* divide error *) ;\n ELSE\n AX = temp;\n DX = DX:AX SignedModulus SRC;\n FI;\n ELSE (* quadword/doubleword operation *)\n temp = EDX:EAX / SRC; (* signed division *)\n IF (temp > 7FFFFFFFH) Operators.OR(temp < 80000000H)\n (* if a positive result is greater than 7FFFFFFFH *)\n (* or a negative result is less than 80000000H *)\n THEN #DE; (* divide error *) ;\n ELSE\n EAX = temp;\n EDX = EDX:EAX SignedModulus SRC;\n FI;\n FI;\n FI;\n\n :param cpu: current CPU.\n :param src: source operand.\n \"\"\"\n\n arg_2 = {8: 'AH', 16: 'DX', 32: 'EDX', 64: 'RDX'}[arg_1.size]\n arg_3 = {8: 'AL', 16: 'AX', 32: 'EAX', 64: 'RAX'}[arg_1.size]\n\n arg_4 = Operators.CONCAT(arg_1.size * 2,\n arg_0.read_register(arg_2),\n arg_0.read_register(arg_3))\n\n arg_5 = arg_1.read()\n if isinstance(arg_5, int) and arg_5 == 0:\n raise DivideByZeroError()\n\n arg_6 = arg_1.size * 2\n\n arg_5 = Operators.SEXTEND(arg_5, arg_1.size, arg_6)\n arg_7 = (1 << arg_6) - 1\n arg_8 = 1 << (arg_6 - 1)\n\n arg_9 = (arg_4 & arg_8) != 0\n arg_10 = (arg_5 & arg_8) != 0\n\n if isinstance(arg_5, int):\n if arg_10:\n arg_5 = ((~arg_5) + 1) & arg_7\n arg_5 = -arg_5\n\n if isinstance(arg_4, int):\n if arg_9:\n arg_4 = ((~arg_4) + 1) & arg_7\n arg_4 = -arg_4\n\n arg_11 = Operators.SDIV(arg_4, arg_5)\n if (isinstance(arg_4, int) and\n isinstance(arg_4, int)):\n # handle the concrete case\n arg_12 = arg_4 - (arg_11 * arg_5)\n else:\n # symbolic case -- optimize via SREM\n arg_12 = Operators.SREM(arg_4, arg_5)\n\n arg_0.write_register(arg_3, Operators.EXTRACT(arg_11, 0, arg_1.size))\n arg_0.write_register(arg_2, Operators.EXTRACT(arg_12, 0, arg_1.size))"} +{"_id": "doc_3148", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Signed multiply.\n\n Performs a signed multiplication of two operands. This instruction has\n three forms, depending on the number of operands.\n - One-operand form. This form is identical to that used by the MUL\n instruction. Here, the source operand (in a general-purpose\n register or memory location) is multiplied by the value in the AL,\n AX, or EAX register (depending on the operand size) and the product\n is stored in the AX, DX:AX, or EDX:EAX registers, respectively.\n - Two-operand form. With this form the destination operand (the\n first operand) is multiplied by the source operand (second\n operand). The destination operand is a general-purpose register and\n the source operand is an immediate value, a general-purpose\n register, or a memory location. The product is then stored in the\n destination operand location.\n - Three-operand form. This form requires a destination operand (the\n first operand) and two source operands (the second and the third\n operands). Here, the first source operand (which can be a\n general-purpose register or a memory location) is multiplied by the\n second source operand (an immediate value). The product is then\n stored in the destination operand (a general-purpose register).\n\n When an immediate value is used as an operand, it is sign-extended to\n the length of the destination operand format. The CF and OF flags are\n set when significant bits are carried into the upper half of the\n result. The CF and OF flags are cleared when the result fits exactly in\n the lower half of the result. The three forms of the Func instruction\n are similar in that the length of the product is calculated to twice\n the length of the operands. With the one-operand form, the product is\n stored exactly in the destination. With the two- and three- operand\n forms, however, result is truncated to the length of the destination\n before it is stored in the destination register. Because of this\n truncation, the CF or OF flag should be tested to ensure that no\n significant bits are lost. The two- and three-operand forms may also be\n used with unsigned operands because the lower half of the product is\n the same regardless if the operands are signed or unsigned. The CF and\n OF flags, however, cannot be used to determine if the upper half of the\n result is non-zero::\n\n IF (NumberOfOperands == 1)\n THEN\n IF (OperandSize == 8)\n THEN\n AX = AL * SRC (* Signed multiplication *)\n IF AL == AX\n THEN\n CF = 0; OF = 0;\n ELSE\n CF = 1; OF = 1;\n FI;\n ELSE\n IF OperandSize == 16\n THEN\n DX:AX = AX * SRC (* Signed multiplication *)\n IF sign_extend_to_32 (AX) == DX:AX\n THEN\n CF = 0; OF = 0;\n ELSE\n CF = 1; OF = 1;\n FI;\n ELSE\n IF OperandSize == 32\n THEN\n EDX:EAX = EAX * SRC (* Signed multiplication *)\n IF EAX == EDX:EAX\n THEN\n CF = 0; OF = 0;\n ELSE\n CF = 1; OF = 1;\n FI;\n ELSE (* OperandSize = 64 *)\n RDX:RAX = RAX * SRC (* Signed multiplication *)\n IF RAX == RDX:RAX\n THEN\n CF = 0; OF = 0;\n ELSE\n CF = 1; OF = 1;\n FI;\n FI;\n FI;\n ELSE\n IF (NumberOfOperands = 2)\n THEN\n temp = DEST * SRC (* Signed multiplication; temp is double DEST size *)\n DEST = DEST * SRC (* Signed multiplication *)\n IF temp != DEST\n THEN\n CF = 1; OF = 1;\n ELSE\n CF = 0; OF = 0;\n FI;\n ELSE (* NumberOfOperands = 3 *)\n DEST = SRC1 * SRC2 (* Signed multiplication *)\n temp = SRC1 * SRC2 (* Signed multiplication; temp is double SRC1 size *)\n IF temp != DEST\n THEN\n CF = 1; OF = 1;\n ELSE\n CF = 0; OF = 0;\n FI;\n FI;\n FI;\n\n :param cpu: current CPU.\n :param operands: variable list of operands.\n \"\"\"\n arg_2 = arg_1[0]\n arg_3 = arg_2.size\n arg_4 = {8: 'AH', 16: 'DX', 32: 'EDX', 64: 'RDX'}[arg_3]\n arg_5 = {8: 'AL', 16: 'AX', 32: 'EAX', 64: 'RAX'}[arg_3]\n\n arg_6 = arg_2.read()\n arg_7 = None\n arg_8 = None\n arg_9 = None\n if len(arg_1) == 1:\n arg_7 = arg_0.read_register(arg_5)\n arg_10 = (Operators.SEXTEND(arg_6, arg_3, arg_3 * 2) *\n Operators.SEXTEND(arg_7, arg_3, arg_3 * 2))\n arg_10 = arg_10 & ((1 << (arg_3 * 2)) - 1)\n arg_0.write_register(arg_5,\n Operators.EXTRACT(arg_10, 0, arg_3))\n arg_0.write_register(arg_4,\n Operators.EXTRACT(arg_10, arg_3, arg_3))\n arg_9 = Operators.EXTRACT(arg_10, 0, arg_3)\n elif len(arg_1) == 2:\n arg_7 = arg_1[1].read()\n arg_7 = Operators.SEXTEND(arg_7, arg_3, arg_3 * 2)\n arg_10 = Operators.SEXTEND(arg_6, arg_3, arg_3 * 2) * arg_7\n arg_10 = arg_10 & ((1 << (arg_3 * 2)) - 1)\n arg_9 = arg_2.write(Operators.EXTRACT(arg_10, 0, arg_3))\n else:\n arg_7 = arg_1[1].read()\n arg_8 = arg_1[2].read()\n arg_10 = (Operators.SEXTEND(arg_7, arg_3, arg_3 * 2) *\n Operators.SEXTEND(arg_8, arg_1[2].size, arg_3 * 2))\n arg_10 = arg_10 & ((1 << (arg_3 * 2)) - 1)\n arg_9 = arg_2.write(Operators.EXTRACT(arg_10, 0, arg_3))\n\n arg_0.CF = (Operators.SEXTEND(arg_9, arg_3, arg_3 * 2) != arg_10)\n arg_0.OF = arg_0.CF"} +{"_id": "doc_3149", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Unsigned multiply.\n\n Performs an unsigned multiplication of the first operand (destination\n operand) and the second operand (source operand) and stores the result\n in the destination operand. The destination operand is an implied operand\n located in register AL, AX or EAX (depending on the size of the operand);\n the source operand is located in a general-purpose register or a memory location.\n\n The result is stored in register AX, register pair DX:AX, or register\n pair EDX:EAX (depending on the operand size), with the high-order bits\n of the product contained in register AH, DX, or EDX, respectively. If\n the high-order bits of the product are 0, the CF and OF flags are cleared;\n otherwise, the flags are set::\n\n IF byte operation\n THEN\n AX = AL * SRC\n ELSE (* word or doubleword operation *)\n IF OperandSize = 16\n THEN\n DX:AX = AX * SRC\n ELSE (* OperandSize = 32 *)\n EDX:EAX = EAX * SRC\n FI;\n FI;\n\n :param cpu: current CPU.\n :param src: source operand.\n \"\"\"\n arg_2 = arg_1.size\n arg_3, arg_4 = {8: ('AL', 'AH'),\n 16: ('AX', 'DX'),\n 32: ('EAX', 'EDX'),\n 64: ('RAX', 'RDX')}[arg_2]\n arg_5 = (Operators.ZEXTEND(arg_0.read_register(arg_3), 256) *\n Operators.ZEXTEND(arg_1.read(), 256))\n arg_0.write_register(arg_3, Operators.EXTRACT(arg_5, 0, arg_2))\n arg_0.write_register(arg_4, Operators.EXTRACT(arg_5, arg_2, arg_2))\n arg_0.OF = Operators.EXTRACT(arg_5, arg_2, arg_2) != 0\n arg_0.CF = arg_0.OF"} +{"_id": "doc_3150", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Two's complement negation.\n\n Replaces the value of operand (the destination operand) with its two's complement.\n (This operation is equivalent to subtracting the operand from 0.) The destination operand is\n located in a general-purpose register or a memory location::\n\n IF DEST = 0\n THEN CF = 0\n ELSE CF = 1;\n FI;\n DEST = - (DEST)\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_2 = arg_1.read()\n arg_3 = arg_1.write(-arg_2)\n arg_0._calculate_logic_flags(arg_1.size, arg_3)\n arg_0.CF = arg_2 != 0\n arg_0.AF = (arg_3 & 0x0f) != 0x00"} +{"_id": "doc_3151", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Integer subtraction with borrow.\n\n Adds the source operand (second operand) and the carry (CF) flag, and\n subtracts the result from the destination operand (first operand). The\n result of the subtraction is stored in the destination operand. The\n destination operand can be a register or a memory location; the source\n operand can be an immediate, a register, or a memory location.\n (However, two memory operands cannot be used in one instruction.) The\n state of the CF flag represents a borrow from a previous subtraction.\n When an immediate value is used as an operand, it is sign-extended to\n the length of the destination operand format.\n The Func instruction does not distinguish between signed or unsigned\n operands. Instead, the processor evaluates the result for both data\n types and sets the OF and CF flags to indicate a borrow in the signed\n or unsigned result, respectively. The SF flag indicates the sign of the\n signed result. The Func instruction is usually executed as part of a\n multibyte or multiword subtraction in which a SUB instruction is\n followed by a Func instruction::\n\n DEST = DEST - (SRC + CF);\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_0._SUB(arg_1, arg_2, carry=True)"} +{"_id": "doc_3152", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Exchanges and adds.\n\n Exchanges the first operand (destination operand) with the second operand\n (source operand), then loads the sum of the two values into the destination\n operand. The destination operand can be a register or a memory location;\n the source operand is a register.\n This instruction can be used with a LOCK prefix::\n\n TEMP = SRC + DEST\n SRC = DEST\n DEST = TEMP\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = (1 << arg_1.size) - 1\n arg_4 = 1 << (arg_1.size - 1)\n\n arg_5 = arg_1.read()\n arg_6 = arg_2.read()\n arg_7 = (arg_6 + arg_5) & arg_3\n arg_2.write(arg_5)\n arg_1.write(arg_7)\n\n # Affected flags: oszapc\n arg_8 = Operators.OR(Operators.ULT(arg_7, arg_5), Operators.ULT(arg_7, arg_6))\n arg_0.CF = arg_8\n arg_0.AF = ((arg_5 ^ arg_6) ^ arg_7) & 0x10 != 0\n arg_0.ZF = arg_7 == 0\n arg_0.SF = (arg_7 & arg_4) != 0\n arg_0.OF = (((arg_5 ^ arg_6 ^ arg_4) & (arg_7 ^ arg_6)) & arg_4) != 0\n arg_0.PF = arg_0._calculate_parity_flag(arg_7)"} +{"_id": "doc_3153", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Byte swap.\n\n Reverses the byte order of a 32-bit (destination) register: bits 0 through\n 7 are swapped with bits 24 through 31, and bits 8 through 15 are swapped\n with bits 16 through 23. This instruction is provided for converting little-endian\n values to big-endian format and vice versa.\n To swap bytes in a word value (16-bit register), use the XCHG instruction.\n When the Func instruction references a 16-bit register, the result is\n undefined::\n\n TEMP = DEST\n DEST[7..0] = TEMP[31..24]\n DEST[15..8] = TEMP[23..16]\n DEST[23..16] = TEMP[15..8]\n DEST[31..24] = TEMP[7..0]\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_2 = []\n arg_3 = arg_1.read()\n for arg_4 in range(0, arg_1.size, 8):\n arg_2.append(Operators.EXTRACT(arg_3, arg_4, 8))\n\n arg_1.write(Operators.CONCAT(8 * len(arg_2), *arg_2))"} +{"_id": "doc_3154", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Conditional move - Greater.\n\n Tests the status flags in the EFLAGS register and moves the source operand\n (second operand) to the destination operand (first operand) if the given\n test condition is true.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, Operators.AND(arg_0.ZF == 0, arg_0.SF == arg_0.OF), arg_2.read(), arg_1.read()))"} +{"_id": "doc_3155", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Conditional move - Overflow.\n\n Tests the status flags in the EFLAGS register and moves the source operand\n (second operand) to the destination operand (first operand) if the given\n test condition is true.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.OF, arg_2.read(), arg_1.read()))"} +{"_id": "doc_3156", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Conditional move - Not overflow.\n\n Tests the status flags in the EFLAGS register and moves the source operand\n (second operand) to the destination operand (first operand) if the given\n test condition is true.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.OF == False, arg_2.read(), arg_1.read()))"} +{"_id": "doc_3157", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Loads status flags into AH register.\n\n Moves the low byte of the EFLAGS register (which includes status flags\n SF, ZF, AF, PF, and CF) to the AH register. Reserved bits 1, 3, and 5\n of the EFLAGS register are set in the AH register::\n\n AH = EFLAGS(SF:ZF:0:AF:0:PF:1:CF);\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_1 = (arg_0.SF, arg_0.ZF, arg_0.AF, arg_0.PF, arg_0.CF)\n arg_2 = any(issymbolic(x) for x in arg_1)\n\n def make_flag(arg_3, arg_4):\n if arg_2:\n return Operators.ITEBV(8, arg_3,\n BitVecConstant(8, 1 << arg_4),\n BitVecConstant(8, 0))\n else:\n return arg_3 << arg_4\n\n arg_0.AH = (make_flag(arg_0.SF, 7) |\n make_flag(arg_0.ZF, 6) |\n make_flag(0, 5) |\n make_flag(arg_0.AF, 4) |\n make_flag(0, 3) |\n make_flag(arg_0.PF, 2) |\n make_flag(1, 1) |\n make_flag(arg_0.CF, 0))"} +{"_id": "doc_3158", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Loads effective address.\n\n Computes the effective address of the second operand (the source operand) and stores it in the first operand\n (destination operand). The source operand is a memory address (offset part) specified with one of the processors\n addressing modes; the destination operand is a general-purpose register. The address-size and operand-size\n attributes affect the action performed by this instruction. The operand-size\n attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the\n attribute of the code segment.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_1.write(Operators.EXTRACT(arg_2.address(), 0, arg_1.size))"} +{"_id": "doc_3159", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves data after swapping bytes.\n\n Performs a byte swap operation on the data copied from the second operand (source operand) and store the result\n in the first operand (destination operand). The source operand can be a general-purpose register, or memory location; the destination register can be a general-purpose register, or a memory location; however, both operands can\n not be registers, and only one operand can be a memory location. Both operands must be the same size, which can\n be a word, a doubleword or quadword.\n The Func instruction is provided for swapping the bytes on a read from memory or on a write to memory; thus\n providing support for converting little-endian values to big-endian format and vice versa.\n In 64-bit mode, the instruction's default operation size is 32 bits. Use of the REX.R prefix permits access to additional registers (R8-R15). Use of the REX.W prefix promotes operation to 64 bits::\n\n TEMP = SRC\n IF ( OperandSize = 16)\n THEN\n DEST[7:0] = TEMP[15:8];\n DEST[15:8] = TEMP[7:0];\n ELSE IF ( OperandSize = 32)\n DEST[7:0] = TEMP[31:24];\n DEST[15:8] = TEMP[23:16];\n DEST[23:16] = TEMP[15:8];\n DEST[31:23] = TEMP[7:0];\n ELSE IF ( OperandSize = 64)\n DEST[7:0] = TEMP[63:56];\n DEST[15:8] = TEMP[55:48];\n DEST[23:16] = TEMP[47:40];\n DEST[31:24] = TEMP[39:32];\n DEST[39:32] = TEMP[31:24];\n DEST[47:40] = TEMP[23:16];\n DEST[55:48] = TEMP[15:8];\n DEST[63:56] = TEMP[7:0];\n FI;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_1.size\n arg_4 = arg_1.read()\n arg_5 = 0\n for arg_6 in range(0, arg_3, 8):\n arg_5 = (arg_5 << 8) | (arg_4 & 0xff)\n arg_4 = arg_4 >> 8\n arg_1.write(arg_4)"} +{"_id": "doc_3160", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Stores AH into flags.\n\n Loads the SF, ZF, AF, PF, and CF flags of the EFLAGS register with values\n from the corresponding bits in the AH register (bits 7, 6, 4, 2, and 0,\n respectively). Bits 1, 3, and 5 of register AH are ignored; the corresponding\n reserved bits (1, 3, and 5) in the EFLAGS register remain as shown below::\n\n EFLAGS(SF:ZF:0:AF:0:PF:1:CF) = AH;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n\n arg_1 = 32\n arg_2 = arg_0.AH & 0xD5 | 0x02\n\n arg_0.EFLAGS = Operators.ZEXTEND(arg_2, arg_1)"} +{"_id": "doc_3161", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if below or equal.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, Operators.OR(arg_0.CF, arg_0.ZF), 1, 0))"} +{"_id": "doc_3162", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets if carry.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.CF, 1, 0))"} +{"_id": "doc_3163", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if equal.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.ZF, 1, 0))"} +{"_id": "doc_3164", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if greater or equal.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.SF == arg_0.OF, 1, 0))"} +{"_id": "doc_3165", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if not above or equal.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.CF, 1, 0))"} +{"_id": "doc_3166", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if not below.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.CF == False, 1, 0))"} +{"_id": "doc_3167", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if not less or equal.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, Operators.AND(arg_0.ZF == False, arg_0.SF == arg_0.OF), 1, 0))"} +{"_id": "doc_3168", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if not sign.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.SF == False, 1, 0))"} +{"_id": "doc_3169", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if not zero.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.ZF == False, 1, 0))"} +{"_id": "doc_3170", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if overflow.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.OF, 1, 0))"} +{"_id": "doc_3171", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if parity.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.PF, 1, 0))"} +{"_id": "doc_3172", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if parity odd.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.PF == False, 1, 0))"} +{"_id": "doc_3173", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if sign.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.SF, 1, 0))"} +{"_id": "doc_3174", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sets byte if zero.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n \"\"\"\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.ZF, 1, 0))"} +{"_id": "doc_3175", "title": "", "text": "def Func(arg_0):\n \"\"\"\n High level procedure exit.\n\n Releases the stack frame set up by an earlier ENTER instruction. The\n Func instruction copies the frame pointer (in the EBP register) into\n the stack pointer register (ESP), which releases the stack space allocated\n to the stack frame. The old frame pointer (the frame pointer for the calling\n procedure that was saved by the ENTER instruction) is then popped from\n the stack into the EBP register, restoring the calling procedure's stack\n frame.\n A RET instruction is commonly executed following a Func instruction\n to return program control to the calling procedure::\n\n IF Stackaddress_bit_size = 32\n THEN\n ESP = EBP;\n ELSE (* Stackaddress_bit_size = 16*)\n SP = BP;\n FI;\n IF OperandSize = 32\n THEN\n EBP = Pop();\n ELSE (* OperandSize = 16*)\n BP = Pop();\n FI;\n\n :param cpu: current CPU.\n \"\"\"\n arg_0.STACK = arg_0.FRAME\n arg_0.FRAME = arg_0.pop(arg_0.address_bit_size)"} +{"_id": "doc_3176", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pushes a value onto the stack.\n\n Decrements the stack pointer and then stores the source operand on the top of the stack.\n\n :param cpu: current CPU.\n :param src: source operand.\n \"\"\"\n # http://stackoverflow.com/questions/11291151/how-push-imm-encodes\n arg_2 = arg_1.size\n arg_3 = arg_1.read()\n if arg_2 != 64 and arg_2 != arg_0.address_bit_size // 2:\n arg_3 = Operators.SEXTEND(arg_3, arg_2, arg_0.address_bit_size)\n arg_2 = arg_0.address_bit_size\n arg_0.push(arg_3, arg_2)"} +{"_id": "doc_3177", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Procedure call.\n\n Saves procedure linking information on the stack and branches to the called procedure specified using the target\n operand. The target operand specifies the address of the first instruction in the called procedure. The operand can\n be an immediate value, a general-purpose register, or a memory location.\n\n :param cpu: current CPU.\n :param op0: target operand.\n \"\"\"\n # TODO FIX 64Bit FIX segment\n arg_2 = arg_1.read()\n arg_0.push(arg_0.PC, arg_0.address_bit_size)\n arg_0.PC = arg_2"} +{"_id": "doc_3178", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Returns from procedure.\n\n Transfers program control to a return address located on the top of\n the stack. The address is usually placed on the stack by a CALL instruction,\n and the return is made to the instruction that follows the CALL instruction.\n The optional source operand specifies the number of stack bytes to be\n released after the return address is popped; the default is none.\n\n :param cpu: current CPU.\n :param operands: variable operands list.\n \"\"\"\n # TODO FIX 64Bit FIX segment\n arg_2 = 0\n if len(arg_1) > 0:\n arg_2 = arg_1[0].read()\n arg_0.PC = arg_0.pop(arg_0.address_bit_size)\n arg_0.STACK += arg_2"} +{"_id": "doc_3179", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if above.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, Operators.AND(arg_0.CF == False, arg_0.ZF == False), arg_1.read(), arg_0.PC)"} +{"_id": "doc_3180", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if below.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.CF == True, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3181", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if below or equal.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, Operators.OR(arg_0.CF, arg_0.ZF), arg_1.read(), arg_0.PC)"} +{"_id": "doc_3182", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if carry.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.CF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3183", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if CX register is 0.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.CX == 0, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3184", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if ECX register is 0.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.ECX == 0, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3185", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if greater.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, Operators.AND(arg_0.ZF == False, arg_0.SF == arg_0.OF), arg_1.read(), arg_0.PC)"} +{"_id": "doc_3186", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if greater or equal.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, (arg_0.SF == arg_0.OF), arg_1.read(), arg_0.PC)"} +{"_id": "doc_3187", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if not equal.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, False == arg_0.ZF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3188", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if not parity.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, False == arg_0.PF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3189", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if overflow.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.OF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3190", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if sign.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.SF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3191", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Jumps short if zero.\n\n :param cpu: current CPU.\n :param target: destination operand.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size, arg_0.ZF, arg_1.read(), arg_0.PC)"} +{"_id": "doc_3192", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Rotates through carry left.\n\n Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the\n second operand (count operand) and stores the result in the destination operand. The destination operand can be\n a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in\n the CL register. In legacy and compatibility mode, the processor restricts the count to a number between 0 and 31\n by masking all the bits in the count operand except the 5 least-significant bits.\n\n The Func instruction shifts the CF flag into the least-significant bit and shifts the most-significant bit into the CF flag.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: count operand.\n \"\"\"\n arg_3 = arg_1.size\n arg_4 = arg_2.read()\n arg_5 = {8: 0x1f,\n 16: 0x1f,\n 32: 0x1f,\n 64: 0x3f}[arg_3]\n arg_6 = Operators.ZEXTEND((arg_4 & arg_5) % (arg_2.size + 1), arg_3)\n\n arg_7 = arg_1.read()\n\n if isinstance(arg_6, int) and arg_6 == 0:\n # this is a no-op\n arg_8 = arg_7\n arg_1.write(arg_8)\n else:\n arg_9 = Operators.ITEBV(arg_3, arg_0.CF, 1, 0)\n arg_10 = arg_7 >> (arg_3 - arg_6)\n arg_8 = (arg_7 << arg_6) | (arg_9 << (arg_6 - 1)) | (arg_10 >> 1)\n arg_1.write(arg_8)\n\n def sf(arg_11, arg_12):\n return (arg_11 & (1 << (arg_12 - 1))) != 0\n arg_0.CF = sf(arg_7 << (arg_6 - 1), arg_3)\n arg_0.OF = Operators.ITE(arg_6 == 1,\n sf(arg_8, arg_3) != arg_0.CF,\n arg_0.OF)"} +{"_id": "doc_3193", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Shift arithmetic right.\n\n The shift arithmetic right (Func) and shift logical right (SHR) instructions shift the bits of the destination operand to\n the right (toward less significant bit locations). For each shift count, the least significant bit of the destination\n operand is shifted into the CF flag, and the most significant bit is either set or cleared depending on the instruction\n type. The SHR instruction clears the most significant bit. the Func instruction sets or clears the most significant bit\n to correspond to the sign (most significant bit) of the original value in the destination operand. In effect, the Func\n instruction fills the empty bit position's shifted value with the sign of the unshifted value\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_1.size\n arg_4 = {8: 0x1f,\n 16: 0x1f,\n 32: 0x1f,\n 64: 0x3f}[arg_3]\n\n arg_5 = arg_2.read() & arg_4\n arg_6 = arg_1.read()\n\n arg_7 = Operators.Func(arg_3, arg_6, Operators.ZEXTEND(arg_5, arg_3))\n arg_1.write(arg_7)\n\n arg_8 = (1 << (arg_3 - 1))\n\n # We can't use this one as the 'true' expression gets eagerly calculated even on count == 0\t\t + cpu.CF = Operators.ITE(count!=0, ((value >> Operators.ZEXTEND(count-1, OperandSize)) & 1) !=0, cpu.CF)\n # cpu.CF = Operators.ITE(count!=0, ((value >> Operators.ZEXTEND(count-1, OperandSize)) & 1) !=0, cpu.CF)\n\n if issymbolic(arg_5):\n # We can't use this one as the EXTRACT op needs the offset arguments to be concrete\n # cpu.CF = Operators.ITE(count!=0, Operands.EXTRACT(value,count-1,1) !=0, cpu.CF)\n arg_0.CF = Operators.ITE(Operators.AND(arg_5 != 0, arg_5 <= arg_3), ((arg_6 >> Operators.ZEXTEND(arg_5 - 1, arg_3)) & 1) != 0, arg_0.CF)\n else:\n if arg_5 != 0:\n if arg_5 > arg_3:\n arg_5 = arg_3\n arg_0.CF = Operators.EXTRACT(arg_6, arg_5 - 1, 1) != 0\n\n # on count == 0 AF is unaffected, for count > 0, AF is undefined.\n # in either case, do not touch AF\n arg_0.ZF = Operators.ITE(arg_5 != 0, arg_7 == 0, arg_0.ZF)\n arg_0.SF = Operators.ITE(arg_5 != 0, (arg_7 & arg_8) != 0, arg_0.SF)\n arg_0.OF = Operators.ITE(arg_5 == 1, False, arg_0.OF)\n arg_0.PF = Operators.ITE(arg_5 != 0, arg_0._calculate_parity_flag(arg_7), arg_0.PF)"} +{"_id": "doc_3194", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Shift logical right.\n\n The shift arithmetic right (SAR) and shift logical right (Func)\n instructions shift the bits of the destination operand to the right\n (toward less significant bit locations). For each shift count, the\n least significant bit of the destination operand is shifted into the CF\n flag, and the most significant bit is either set or cleared depending\n on the instruction type. The Func instruction clears the most\n significant bit.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: count operand.\n \"\"\"\n arg_3 = arg_1.size\n arg_4 = Operators.ZEXTEND(arg_2.read() & (arg_3 - 1), arg_3)\n arg_5 = arg_1.read()\n\n arg_6 = arg_1.write(arg_5 >> arg_4) # UNSIGNED Operators.UDIV2 !! TODO Check\n\n arg_7 = (1 << arg_3) - 1\n arg_8 = 1 << (arg_3 - 1)\n\n if issymbolic(arg_4):\n arg_0.CF = Operators.ITE(arg_4 != 0,\n ((arg_5 >> Operators.ZEXTEND(arg_4 - 1, arg_3)) & 1) != 0,\n arg_0.CF)\n else:\n if arg_4 != 0:\n arg_0.CF = Operators.EXTRACT(arg_5, arg_4 - 1, 1) != 0\n\n arg_0.ZF = Operators.ITE(arg_4 != 0, arg_6 == 0, arg_0.ZF)\n arg_0.SF = Operators.ITE(arg_4 != 0, (arg_6 & arg_8) != 0, arg_0.SF)\n # OF is only defined for count == 1, but in practice (unit tests from real cpu) it's calculated for count != 0\n arg_0.OF = Operators.ITE(arg_4 != 0, ((arg_5 >> (arg_3 - 1)) & 0x1) == 1, arg_0.OF)\n arg_0.PF = Operators.ITE(arg_4 != 0, arg_0._calculate_parity_flag(arg_6), arg_0.PF)"} +{"_id": "doc_3195", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Double precision shift right.\n\n Shifts the first operand (destination operand) to the left the number of bits specified by the third operand\n (count operand). The second operand (source operand) provides bits to shift in from the right (starting with\n the least significant bit of the destination operand).\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n :param count: count operand\n \"\"\"\n arg_4 = arg_1.size\n arg_5 = Operators.ZEXTEND(arg_3.read(), arg_4) & (arg_4 - 1)\n arg_6 = arg_1.read()\n arg_7 = arg_2.read()\n\n arg_8 = ((1 << arg_4) - 1)\n arg_9 = (arg_6 << arg_5)\n arg_10 = arg_7 >> (arg_4 - arg_5)\n arg_11 = Operators.ITEBV(arg_4, arg_5 == 0, arg_6, arg_9 | arg_10)\n arg_11 = arg_11 & arg_8\n arg_1.write(arg_11)\n if isinstance(arg_5, int) and arg_5 == 0:\n pass\n else:\n arg_12 = 1 << (arg_4 - 1)\n arg_13 = 0 != ((arg_6 << (arg_5 - 1)) & arg_12)\n\n arg_0._set_shiftd_flags(arg_4, arg_6, arg_11, arg_13, arg_5)"} +{"_id": "doc_3196", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Bit scan forward.\n\n Searches the source operand (second operand) for the least significant\n set bit (1 bit). If a least significant 1 bit is found, its bit index\n is stored in the destination operand (first operand). The source operand\n can be a register or a memory location; the destination operand is a register.\n The bit index is an unsigned offset from bit 0 of the source operand.\n If the contents source operand are 0, the contents of the destination\n operand is undefined::\n\n IF SRC = 0\n THEN\n ZF = 1;\n DEST is undefined;\n ELSE\n ZF = 0;\n temp = 0;\n WHILE Bit(SRC, temp) = 0\n DO\n temp = temp + 1;\n DEST = temp;\n OD;\n FI;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_2.read()\n arg_4 = Operators.EXTRACT(arg_3, 0, 1) == 1\n arg_5 = 0\n for arg_6 in range(1, arg_2.size):\n arg_5 = Operators.ITEBV(arg_1.size, arg_4, arg_5, arg_6)\n arg_4 = Operators.OR(arg_4, Operators.EXTRACT(arg_3, arg_6, 1) == 1)\n\n arg_0.ZF = arg_3 == 0\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.ZF, arg_1.read(), arg_5))"} +{"_id": "doc_3197", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Bit scan reverse.\n\n Searches the source operand (second operand) for the most significant\n set bit (1 bit). If a most significant 1 bit is found, its bit index is\n stored in the destination operand (first operand). The source operand\n can be a register or a memory location; the destination operand is a register.\n The bit index is an unsigned offset from bit 0 of the source operand.\n If the contents source operand are 0, the contents of the destination\n operand is undefined::\n\n IF SRC = 0\n THEN\n ZF = 1;\n DEST is undefined;\n ELSE\n ZF = 0;\n temp = OperandSize - 1;\n WHILE Bit(SRC, temp) = 0\n DO\n temp = temp - 1;\n DEST = temp;\n OD;\n FI;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_2.read()\n arg_4 = Operators.EXTRACT(arg_3, arg_2.size - 1, 1) == 1\n arg_5 = 0\n\n for arg_6 in reversed(range(0, arg_2.size)):\n arg_5 = Operators.ITEBV(arg_1.size, arg_4, arg_5, arg_6)\n arg_4 = Operators.OR(arg_4, (Operators.EXTRACT(arg_3, arg_6, 1) == 1))\n\n arg_0.PF = arg_0._calculate_parity_flag(arg_5)\n arg_0.ZF = arg_3 == 0\n arg_1.write(Operators.ITEBV(arg_1.size, arg_0.ZF, arg_1.read(), arg_5))"} +{"_id": "doc_3198", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Bit test and complement.\n\n Selects the bit in a bit string (specified with the first operand, called\n the bit base) at the bit-position designated by the bit offset operand\n (second operand), stores the value of the bit in the CF flag, and complements\n the selected bit in the bit string.\n\n :param cpu: current CPU.\n :param dest: bit base operand.\n :param src: bit offset operand.\n \"\"\"\n if arg_1.type == 'register':\n arg_3 = arg_1.read()\n arg_4 = arg_2.read() % arg_1.size\n arg_0.CF = arg_3 & (1 << arg_4) == 1 << arg_4\n arg_1.write(arg_3 ^ (1 << arg_4))\n elif arg_1.type == 'memory':\n arg_6, arg_4 = arg_0._getMemoryBit(arg_1, arg_2)\n arg_7, arg_8, arg_9 = arg_0.get_descriptor(arg_0.DS)\n arg_6 += arg_7\n arg_3 = arg_0.read_int(arg_6, 8)\n arg_0.CF = arg_3 & (1 << arg_4) == 1 << arg_4\n arg_3 = arg_3 ^ (1 << arg_4)\n arg_0.write_int(arg_6, arg_3, 8)\n else:\n raise NotImplementedError(f\"Unknown operand for Func: {dest.type}\")"} +{"_id": "doc_3199", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Loads string.\n\n Loads a byte, word, or doubleword from the source operand into the AL, AX, or EAX register, respectively. The\n source operand is a memory location, the address of which is read from the DS:ESI or the DS:SI registers\n (depending on the address-size attribute of the instruction, 32 or 16, respectively). The DS segment may be over-\n ridden with a segment override prefix.\n After the byte, word, or doubleword is transferred from the memory location into the AL, AX, or EAX register, the\n (E)SI register is incremented or decremented automatically according to the setting of the DF flag in the EFLAGS\n register. (If the DF flag is 0, the (E)SI register is incremented; if the DF flag is 1, the ESI register is decremented.)\n The (E)SI register is incremented or decremented by 1 for byte operations, by 2 for word operations, or by 4 for\n doubleword operations.\n\n :param cpu: current CPU.\n :param dest: source operand.\n \"\"\"\n arg_3 = {8: 'SI', 32: 'ESI', 64: 'RSI'}[arg_0.address_bit_size]\n arg_4, arg_5, arg_6 = arg_0.get_descriptor(arg_0.DS)\n\n arg_7 = arg_0.read_register(arg_3) + arg_4\n arg_8 = arg_1.size\n\n arg_9 = arg_0.read_int(arg_7, arg_8)\n arg_1.write(arg_9)\n\n arg_10 = Operators.ITEBV(arg_0.address_bit_size, arg_0.DF, -arg_8 // 8, arg_8 // 8)\n arg_0.write_register(arg_3, arg_0.read_register(arg_3) + arg_10)"} +{"_id": "doc_3200", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves data from string to string.\n\n Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified\n with the first operand (destination operand). Both the source and destination operands are located in memory. The\n address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size\n attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI\n or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be\n overridden with a segment override prefix, but the ES segment cannot be overridden.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3, arg_4, arg_5 = arg_0.get_descriptor(arg_0.DS)\n arg_6 = arg_2.address() + arg_3\n arg_7 = arg_1.address() + arg_3\n\n arg_8 = arg_2.mem.base\n arg_9 = arg_1.mem.base\n arg_4 = arg_1.size\n\n # Copy the data\n arg_1.write(arg_2.read())\n\n #Advance EDI/ESI pointers\n arg_10 = Operators.ITEBV(arg_0.address_bit_size, arg_0.DF, -arg_4 // 8, arg_4 // 8)\n arg_0.write_register(arg_8, arg_0.read_register(arg_8) + arg_10)\n arg_0.write_register(arg_9, arg_0.read_register(arg_9) + arg_10)"} +{"_id": "doc_3201", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Stores String.\n\n Stores a byte, word, or doubleword from the AL, AX, or EAX register,\n respectively, into the destination operand. The destination operand is\n a memory location, the address of which is read from either the ES:EDI\n or the ES:DI registers (depending on the address-size attribute of the\n instruction, 32 or 16, respectively). The ES segment cannot be overridden\n with a segment override prefix.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_2.size\n arg_1.write(arg_2.read())\n arg_4 = arg_1.mem.base\n arg_5 = Operators.ITEBV({'RDI': 64, 'EDI': 32, 'DI': 16}[arg_4], arg_0.DF, -arg_3 // 8, arg_3 // 8)\n arg_0.write_register(arg_4, arg_0.read_register(arg_4) + arg_5)"} +{"_id": "doc_3202", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n The shift arithmetic right.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: count operand.\n \"\"\"\n arg_4 = arg_1.size\n arg_3 = arg_3.read()\n arg_5 = {8: 0x1f,\n 16: 0x1f,\n 32: 0x1f,\n 64: 0x3f}[arg_4]\n arg_6 = arg_3 & arg_5\n arg_7 = arg_10 = arg_2.read()\n\n arg_8 = arg_10 & (1 << (arg_4 - 1))\n while arg_6 != 0:\n arg_0.CF = (arg_10 & 0x1) != 0 # LSB\n arg_10 = (arg_10 >> 1) | arg_8\n arg_6 = arg_6 - 1\n arg_11 = arg_1.write(arg_10)"} +{"_id": "doc_3203", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Packed shuffle words.\n\n Copies doublewords from source operand (second operand) and inserts them in the destination operand\n (first operand) at locations selected with the order operand (third operand).\n\n :param cpu: current CPU.\n :param op0: destination operand.\n :param op1: source operand.\n :param op3: order operand.\n \"\"\"\n arg_4 = arg_1.size\n arg_5 = arg_1.read()\n arg_6 = arg_2.read()\n arg_7 = Operators.ZEXTEND(arg_3.read(), arg_4)\n assert arg_4 == 64\n arg_5 |= ((arg_6 >> ((arg_7 >> 0) & 3 * 16)) & 0xffff)\n arg_5 |= ((arg_6 >> ((arg_7 >> 2) & 3 * 16)) & 0xffff) << 16\n arg_5 |= ((arg_6 >> ((arg_7 >> 4) & 3 * 16)) & 0xffff) << 32\n arg_5 |= ((arg_6 >> ((arg_7 >> 6) & 3 * 16)) & 0xffff) << 48\n arg_1.write(arg_5)"} +{"_id": "doc_3204", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Packed shuffle doublewords.\n\n Copies doublewords from source operand (second operand) and inserts them in the destination operand\n (first operand) at locations selected with the order operand (third operand).\n\n :param cpu: current CPU.\n :param op0: destination operand.\n :param op1: source operand.\n :param op3: order operand.\n \"\"\"\n arg_4 = arg_1.size\n arg_5 = arg_1.read()\n arg_6 = arg_2.read()\n arg_7 = Operators.ZEXTEND(arg_3.read(), arg_4)\n\n arg_5 = arg_5 & 0xffffffffffffffffffffffffffffffff00000000000000000000000000000000\n arg_5 |= ((arg_6 >> (((arg_7 >> 0) & 3) * 32)) & 0xffffffff)\n arg_5 |= ((arg_6 >> (((arg_7 >> 2) & 3) * 32)) & 0xffffffff) << 32\n arg_5 |= ((arg_6 >> (((arg_7 >> 4) & 3) * 32)) & 0xffffffff) << 64\n arg_5 |= ((arg_6 >> (((arg_7 >> 6) & 3) * 32)) & 0xffffffff) << 96\n\n arg_1.write(arg_5)"} +{"_id": "doc_3205", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves byte mask to general-purpose register.\n\n Creates an 8-bit mask made up of the most significant bit of each byte of the source operand\n (second operand) and stores the result in the low byte or word of the destination operand\n (first operand). The source operand is an MMX(TM) technology or an XXM register; the destination\n operand is a general-purpose register.\n\n :param cpu: current CPU.\n :param op0: destination operand.\n :param op1: source operand.\n \"\"\"\n arg_3 = arg_1.read()\n arg_4 = arg_2.read()\n\n arg_5 = 0\n for arg_6 in reversed(range(7, arg_2.size, 8)):\n arg_5 = (arg_5 << 1) | ((arg_4 >> arg_6) & 1)\n arg_1.write(Operators.EXTRACT(arg_5, 0, arg_1.size))"} +{"_id": "doc_3206", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Packed shift right logical double quadword.\n\n Shifts the destination operand (first operand) to the right by the number\n of bytes specified in the count operand (second operand). The empty high-order\n bytes are cleared (set to all 0s). If the value specified by the count\n operand is greater than 15, the destination operand is set to all 0s.\n The destination operand is an XMM register. The count operand is an 8-bit\n immediate::\n\n TEMP = SRC;\n if (TEMP > 15) TEMP = 16;\n DEST = DEST >> (temp * 8);\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: count operand.\n \"\"\"\n # TODO(yan): Verify the correctness of truncating SRC like this ( tests\n # use '-1' as the value\n arg_3 = Operators.EXTRACT(arg_2.read(), 0, 8)\n arg_3 = Operators.ITEBV(arg_2.size, arg_3 > 15, 16, arg_3)\n arg_1.write(arg_1.read() >> (arg_3 * 8))"} +{"_id": "doc_3207", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves with sign-extension.\n\n Copies the contents of the source operand (register or memory location) to the destination\n operand (register) and sign extends the value to 16::\n\n OP0 = SignExtend(OP1);\n\n :param cpu: current CPU.\n :param op0: destination operand.\n :param op1: source operand.\n \"\"\"\n arg_1.write(Operators.SEXTEND(arg_2.read(), arg_2.size, arg_1.size))"} +{"_id": "doc_3208", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts word to doubleword.\n\n ::\n DX = sign-extend of AX.\n\n :param cpu: current CPU.\n \"\"\"\n arg_1 = Operators.EXTRACT(arg_0.AX, 15, 1)\n arg_0.EAX = Operators.SEXTEND(arg_0.AX, 16, 32)\n arg_0.EDX = Operators.SEXTEND(arg_1, 1, 32)"} +{"_id": "doc_3209", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Reads time-stamp counter.\n\n Loads the current value of the processor's time-stamp counter into the\n EDX:EAX registers. The time-stamp counter is contained in a 64-bit\n MSR. The high-order 32 bits of the MSR are loaded into the EDX\n register, and the low-order 32 bits are loaded into the EAX register.\n The processor increments the time-stamp counter MSR every clock cycle\n and resets it to 0 whenever the processor is reset.\n\n :param cpu: current CPU.\n \"\"\"\n arg_1 = arg_0.icount\n arg_0.RAX = arg_1 & 0xffffffff\n arg_0.RDX = (arg_1 >> 32) & 0xffffffff"} +{"_id": "doc_3210", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves low packed double-precision floating-point value.\n\n Moves a double-precision floating-point value from the source operand (second operand) and the\n destination operand (first operand). The source and destination operands can be an XMM register\n or a 64-bit memory location. This instruction allows double-precision floating-point values to be moved\n to and from the low quadword of an XMM register and memory. It cannot be used for register to register\n or memory to memory moves. When the destination operand is an XMM register, the high quadword of the\n register remains unchanged.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = arg_2.read()\n if arg_2.size == 64 and arg_1.size == 128:\n arg_3 = (arg_1.read() & 0xffffffffffffffff0000000000000000) | Operators.ZEXTEND(arg_3, 128)\n arg_1.write(arg_3)"} +{"_id": "doc_3211", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves high packed double-precision floating-point value.\n\n Moves a double-precision floating-point value from the source operand (second operand) and the\n destination operand (first operand). The source and destination operands can be an XMM register\n or a 64-bit memory location. This instruction allows double-precision floating-point values to be moved\n to and from the high quadword of an XMM register and memory. It cannot be used for register to\n register or memory to memory moves. When the destination operand is an XMM register, the low quadword\n of the register remains unchanged.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n if arg_2.size == 128:\n assert arg_1.size == 64\n arg_1.write(Operators.EXTRACT(arg_2.read(), 64, 64))\n else:\n assert arg_2.size == 64 and arg_1.size == 128\n arg_3 = Operators.EXTRACT(arg_1.read(), 0, 64) # low part\n arg_1.write(Operators.CONCAT(128, arg_2.read(), arg_3))"} +{"_id": "doc_3212", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Packed subtract.\n\n Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed\n integers of the destination operand (first operand), and stores the packed integer results in the\n destination operand. The source operand can be an MMX(TM) technology register or a 64-bit memory location,\n or it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX or an XMM\n register.\n The Func instruction subtracts packed byte integers. When an individual result is too large or too small\n to be represented in a byte, the result is wrapped around and the low 8 bits are written to the\n destination element.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n arg_3 = []\n arg_4 = arg_1.read()\n arg_5 = arg_2.read()\n for arg_6 in reversed(range(0, arg_1.size, 8)):\n arg_7 = Operators.EXTRACT(arg_4, arg_6, 8)\n arg_8 = Operators.EXTRACT(arg_5, arg_6, 8)\n arg_3.append((arg_7 - arg_8) & 0xff)\n arg_1.write(Operators.CONCAT(8 * len(arg_3), *arg_3))"} +{"_id": "doc_3213", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Move quadword.\n\n Copies a quadword from the source operand (second operand) to the destination operand (first operand).\n The source and destination operands can be MMX(TM) technology registers, XMM registers, or 64-bit memory\n locations. This instruction can be used to move a between two MMX registers or between an MMX register\n and a 64-bit memory location, or to move data between two XMM registers or between an XMM register and\n a 64-bit memory location. The instruction cannot be used to transfer data between memory locations.\n When the source operand is an XMM register, the low quadword is moved; when the destination operand is\n an XMM register, the quadword is stored to the low quadword of the register, and the high quadword is\n cleared to all 0s::\n\n Func instruction when operating on MMX registers and memory locations:\n\n DEST = SRC;\n\n Func instruction when source and destination operands are XMM registers:\n\n DEST[63-0] = SRC[63-0];\n\n Func instruction when source operand is XMM register and destination operand is memory location:\n\n DEST = SRC[63-0];\n\n Func instruction when source operand is memory location and destination operand is XMM register:\n\n DEST[63-0] = SRC;\n DEST[127-64] = 0000000000000000H;\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n # mmx to mmx or mmx to mem\n if arg_1.size == arg_2.size and arg_1.size == 64:\n arg_1.write(arg_2.read())\n # two xmm regs\n elif arg_1.size == arg_2.size and arg_1.size == 128:\n arg_3 = Operators.EXTRACT(arg_2.read(), 0, 64)\n arg_1.write(Operators.ZEXTEND(arg_3, 128))\n # mem to xmm\n elif arg_1.size == 128 and arg_2.size == 64:\n arg_1.write(Operators.ZEXTEND(arg_2.read(), arg_1.size))\n # xmm to mem\n elif arg_1.size == 64 and arg_2.size == 128:\n arg_1.write(Operators.EXTRACT(arg_2.read(), 0, arg_1.size))\n else:\n arg_4 = 'Invalid size in Func'\n logger.error(arg_4)\n raise Exception(arg_4)"} +{"_id": "doc_3214", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Move Scalar Double-Precision Floating-Point Value\n\n Moves a scalar double-precision floating-point value from the source\n operand (second operand) to the destination operand (first operand).\n The source and destination operands can be XMM registers or 64-bit memory\n locations. This instruction can be used to move a double-precision\n floating-point value to and from the low quadword of an XMM register and\n a 64-bit memory location, or to move a double-precision floating-point\n value between the low quadwords of two XMM registers. The instruction\n cannot be used to transfer data between memory locations.\n When the source and destination operands are XMM registers, the high\n quadword of the destination operand remains unchanged. When the source\n operand is a memory location and destination operand is an XMM registers,\n the high quadword of the destination operand is cleared to all 0s.\n\n :param cpu: current CPU.\n :param dest: destination operand.\n :param src: source operand.\n \"\"\"\n assert arg_1.type != 'memory' or arg_2.type != 'memory'\n arg_3 = Operators.EXTRACT(arg_2.read(), 0, 64)\n if arg_1.size > arg_2.size:\n arg_3 = Operators.ZEXTEND(arg_3, arg_1.size)\n arg_1.write(arg_3)"} +{"_id": "doc_3215", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Moves a scalar single-precision floating-point value\n\n Moves a scalar single-precision floating-point value from the source operand (second operand)\n to the destination operand (first operand). The source and destination operands can be XMM\n registers or 32-bit memory locations. This instruction can be used to move a single-precision\n floating-point value to and from the low doubleword of an XMM register and a 32-bit memory\n location, or to move a single-precision floating-point value between the low doublewords of\n two XMM registers. The instruction cannot be used to transfer data between memory locations.\n When the source and destination operands are XMM registers, the three high-order doublewords of the\n destination operand remain unchanged. When the source operand is a memory location and destination\n operand is an XMM registers, the three high-order doublewords of the destination operand are cleared to all 0s.\n\n //Func instruction when source and destination operands are XMM registers:\n if(IsXMM(Source) && IsXMM(Destination))\n Destination[0..31] = Source[0..31];\n //Destination[32..127] remains unchanged\n //Func instruction when source operand is XMM register and destination operand is memory location:\n else if(IsXMM(Source) && IsMemory(Destination))\n Destination = Source[0..31];\n //Func instruction when source operand is memory location and destination operand is XMM register:\n else {\n Destination[0..31] = Source;\n Destination[32..127] = 0;\n }\n \"\"\"\n if arg_1.type == 'register' and arg_2.type == 'register':\n assert arg_1.size == 128 and arg_2.size == 128\n arg_1.write(arg_1.read() & ~0xffffffff | arg_2.read() & 0xffffffff)\n elif arg_1.type == 'memory':\n assert arg_2.type == 'register'\n arg_1.write(Operators.EXTRACT(arg_2.read(), 0, arg_1.size))\n else:\n assert arg_2.type == 'memory' and arg_1.type == 'register'\n assert arg_2.size == 32 and arg_1.size == 128\n arg_1.write(Operators.ZEXTEND(arg_2.read(), 128))"} +{"_id": "doc_3216", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Constrain state.\n\n :param manticore.core.smtlib.Bool Funct: Constraint to add\n \"\"\"\n arg_1 = arg_0.migrate_expression(arg_1)\n arg_0._Functs.add(arg_1)"} +{"_id": "doc_3217", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Create and return a symbolic buffer of length `nbytes`. The buffer is\n not written into State's memory; write it to the state's memory to\n introduce it into the program state.\n\n :param int nbytes: Length of the new buffer\n :param str label: (keyword arg only) The label to assign to the buffer\n :param bool cstring: (keyword arg only) Whether or not to enforce that the buffer is a cstring\n (i.e. no NULL bytes, except for the last byte). (bool)\n :param taint: Taint identifier of the new buffer\n :type taint: tuple or frozenset\n\n :return: :class:`~manticore.core.smtlib.expression.Expression` representing the buffer.\n \"\"\"\n arg_3 = arg_2.get('label')\n arg_4 = False\n if arg_3 is None:\n arg_3 = 'buffer'\n arg_4 = True\n arg_5 = arg_2.get('taint', frozenset())\n arg_6 = arg_0._constraints.new_array(name=arg_3, index_max=arg_1, value_bits=8, arg_5=arg_5, arg_4=arg_4)\n arg_0._input_symbols.append(arg_6)\n\n if arg_2.get('cstring', False):\n for arg_7 in range(arg_1 - 1):\n arg_0._constraints.add(arg_6[arg_7] != 0)\n\n return arg_6"} +{"_id": "doc_3218", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=arg_4()):\n \"\"\"Create and return a symbolic value that is `nbits` bits wide. Assign\n the value to a register or write it into the address space to introduce\n it into the program state.\n\n :param int nbits: The bitwidth of the value returned\n :param str label: The label to assign to the value\n :param taint: Taint identifier of this value\n :type taint: tuple or frozenset\n :return: :class:`~manticore.core.smtlib.expression.Expression` representing the value\n \"\"\"\n assert arg_1 in (1, 4, 8, 16, 32, 64, 128, 256)\n arg_5 = False\n if arg_2 is None:\n arg_2 = 'val'\n arg_5 = True\n\n arg_6 = arg_0._constraints.new_bitvec(arg_1, name=arg_2, arg_3=arg_3, arg_5=arg_5)\n arg_0._input_symbols.append(arg_6)\n return arg_6"} +{"_id": "doc_3219", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Reads `nbytes` of symbolic data from a buffer in memory at `addr` and attempts to\n concretize it\n\n :param int address: Address of buffer to concretize\n :param int nbytes: Size of buffer to concretize\n :param bool constrain: If True, constrain the buffer to the concretized value\n :return: Concrete contents of buffer\n :rtype: list[int]\n \"\"\"\n arg_4 = arg_0.cpu.read_bytes(arg_1, arg_2)\n arg_5 = []\n with arg_0._constraints as temp_cs:\n arg_6 = arg_0.constraints if arg_3 else temp_cs\n for arg_7 in arg_4:\n arg_5.append(arg_0._solver.get_value(arg_6, arg_7))\n arg_6.add(arg_7 == arg_5[-1])\n return arg_5"} +{"_id": "doc_3220", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> bool:\n \"\"\"Check if expression is True and that it can not be False with current constraints\"\"\"\n arg_3 = arg_0.get_all_values(arg_1, arg_2, maxcnt=2, silent=True)\n return arg_3 == [True]"} +{"_id": "doc_3221", "title": "", "text": "def Func(arg_0, arg_1, arg_2: arg_3, arg_4=10000):\n \"\"\"\n Iteratively finds the Funcimum value for a symbol within given constraints.\n\n :param constraints: constraints that the expression must fulfil\n :param X: a symbol or expression\n :param M: maximum number of iterations allowed\n \"\"\"\n assert isinstance(arg_2, arg_3)\n return arg_0.optimize(arg_1, arg_2, 'Funcimize', arg_4)"} +{"_id": "doc_3222", "title": "", "text": "def Func(arg_0):\n \"\"\"Spawns z3 solver process\"\"\"\n assert '_proc' not in dir(arg_0) or arg_0._proc is None\n try:\n arg_0._proc = Popen(shlex.split(arg_0._command), stdin=PIPE, stdout=PIPE, bufsize=0, universal_newlines=True)\n except OSError as e:\n print(e, \"Probably too many cached expressions? visitors._cache...\")\n # Z3 was removed from the system in the middle of operation\n raise Z3NotFoundError # TODO(mark) don't catch this exception in two places\n\n # run solver specific initializations\n for arg_2 in arg_0._init:\n arg_0._send(arg_2)"} +{"_id": "doc_3223", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Auxiliary method to reset the smtlib external solver to initial defaults\"\"\"\n if arg_0._proc is None:\n arg_0._start_proc()\n else:\n if arg_0.supportFunc:\n arg_0._send(\"(reset)\")\n\n for arg_2 in arg_0._init:\n arg_0._send(arg_2)\n else:\n arg_0._stop_proc()\n arg_0._start_proc()\n if arg_1 is not None:\n arg_0._send(arg_1)"} +{"_id": "doc_3224", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Send a string to the solver.\n\n :param cmd: a SMTLIBv2 command (ex. (check-sat))\n \"\"\"\n logger.debug('>%s', arg_1)\n try:\n arg_0._proc.stdout.flush()\n arg_0._proc.stdin.write(f'{cmd}\\n')\n except IOError as e:\n raise SolverError(arg_2(e))"} +{"_id": "doc_3225", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\"Reads the response from the solver\"\"\"\n arg_1, arg_2, arg_3 = arg_0.__readline_and_count()\n arg_4 = [arg_1]\n\n while arg_2 != arg_3:\n arg_1, arg_5, arg_6 = arg_0.__readline_and_count()\n arg_4.append(arg_1)\n arg_2 += arg_5\n arg_3 += arg_6\n\n arg_1 = ''.join(arg_4).strip()\n\n logger.debug('<%s', arg_1)\n if '(error' in arg_4[0]:\n raise Exception(f\"Error in smtlib: {bufl[0]}\")\n return arg_1"} +{"_id": "doc_3226", "title": "", "text": "def Func(arg_0) -> bool:\n \"\"\"\n Check the satisfiability of the current state\n\n :return: whether current state is satisfiable or not.\n \"\"\"\n logger.debug(\"Solver.check() \")\n arg_1 = time.time()\n arg_0._send('(check-sat)')\n arg_2 = arg_0._recv()\n logger.debug(\"Check took %s seconds (%s)\", time.time() - arg_1, arg_2)\n if arg_2 not in ('sat', 'unsat', 'unknown'):\n raise SolverError(arg_2)\n if consider_unknown_as_unsat:\n if arg_2 == 'unknown':\n logger.info('Found an unknown core, probably a solver timeout')\n arg_2 = 'unsat'\n\n if arg_2 == 'unknown':\n raise SolverUnknown(arg_2)\n\n return arg_2 == 'sat'"} +{"_id": "doc_3227", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"Auxiliary method to send an assert\"\"\"\n assert isinstance(arg_1, arg_2)\n arg_3 = translate_to_smtlib(arg_1)\n arg_0._send('(assert %s)' % arg_3)"} +{"_id": "doc_3228", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Ask the solver for one possible assignment for given expression using current set of constraints.\n The current set of expressions must be sat.\n\n NOTE: This is an internal method: it uses the current solver state (set of constraints!).\n \"\"\"\n if not issymbolic(arg_1):\n return arg_1\n assert isinstance(arg_1, Variable)\n\n if isinstance(arg_1, Array):\n arg_2 = bytearray()\n for arg_3 in arg_1:\n arg_4 = translate_to_smtlib(arg_3)\n arg_0._send('(get-value (%s))' % arg_4)\n arg_5 = arg_0._recv()\n arg_2.append(int('0x{:s}'.format(arg_5.split(arg_4)[1][3:-2]), 16))\n return bytes(arg_2)\n else:\n arg_0._send('(get-value (%s))' % arg_1.name)\n arg_6 = arg_0._recv()\n assert arg_6.startswith('((') and arg_6.endswith('))'), arg_6\n\n if isinstance(arg_1, Bool):\n return {'true': True, 'false': False}[arg_6[2:-2].split(' ')[1]]\n elif isinstance(arg_1, BitVec):\n arg_7, arg_8 = arg_0._get_value_fmt\n arg_9 = arg_7.match(arg_6)\n arg_10, arg_11 = arg_9.group('expr'), arg_9.group('value')\n return int(arg_11, arg_8)\n\n raise NotImplementedError(\"Func only implemented for Bool and BitVec\")"} +{"_id": "doc_3229", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check if two potentially symbolic values can be equal\"\"\"\n if isinstance(arg_2, bool):\n if not arg_2:\n return arg_2\n else:\n # if True check if constraints are feasible\n arg_0._reset(arg_1)\n return arg_0._is_sat()\n assert isinstance(arg_2, Bool)\n\n with arg_1 as temp_cs:\n temp_cs.add(arg_2)\n arg_0._reset(temp_cs.to_string(related_to=arg_2))\n return arg_0._is_sat()"} +{"_id": "doc_3230", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=False):\n \"\"\"Returns a list with all the possible values for the symbol x\"\"\"\n if not isinstance(arg_2, Expression):\n return [arg_2]\n assert isinstance(arg_1, ConstraintSet)\n assert isinstance(arg_2, Expression)\n arg_2 = simplify(arg_2)\n if arg_3 is None:\n arg_3 = consts.maxsolutions\n\n with arg_1 as temp_cs:\n if isinstance(arg_2, Bool):\n arg_5 = temp_cs.new_bool()\n elif isinstance(arg_2, BitVec):\n arg_5 = temp_cs.new_bitvec(arg_2.size)\n elif isinstance(arg_2, Array):\n arg_5 = temp_cs.new_array(index_max=arg_2.index_max, value_bits=arg_2.value_bits, taint=arg_2.taint).array\n else:\n raise NotImplementedError(f\"Func only implemented for {type(expression)} expression type.\")\n\n temp_cs.add(arg_5 == arg_2)\n arg_0._reset(temp_cs.to_string(related_to=arg_5))\n\n arg_6 = []\n\n while arg_0._is_sat():\n arg_7 = arg_0._getvalue(arg_5)\n arg_6.append(arg_7)\n arg_0._assert(arg_5 != arg_7)\n\n if len(arg_6) >= arg_3:\n if arg_4:\n # do not throw an exception if set to silent\n # Default is not silent, assume user knows\n # what they are doing and will check the size\n # of returned vals list (previous smtlib behavior)\n break\n else:\n raise TooManySolutions(arg_6)\n\n return arg_6"} +{"_id": "doc_3231", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Ask the solver for one possible result of given expression using given set of constraints.\n \"\"\"\n if not issymbolic(arg_2):\n return arg_2\n assert isinstance(arg_2, (Bool, BitVec, Array))\n with arg_1 as temp_cs:\n if isinstance(arg_2, Bool):\n arg_3 = temp_cs.new_bool()\n elif isinstance(arg_2, BitVec):\n arg_3 = temp_cs.new_bitvec(arg_2.size)\n elif isinstance(arg_2, Array):\n arg_3 = []\n arg_4 = []\n for arg_5 in range(arg_2.index_max):\n arg_6 = temp_cs.new_bitvec(arg_2.value_bits)\n arg_3.append(arg_6)\n temp_cs.add(arg_6 == simplify(arg_2[arg_5]))\n\n arg_0._reset(temp_cs)\n if not arg_0._is_sat():\n raise SolverError('Model is not available')\n\n for arg_5 in range(arg_2.index_max):\n arg_0._send('(get-value (%s))' % arg_3[arg_5].name)\n arg_7 = arg_0._recv()\n assert arg_7.startswith('((') and arg_7.endswith('))')\n arg_8, arg_9 = arg_0._Func_fmt\n arg_10 = arg_8.match(arg_7)\n arg_11, arg_12 = arg_10.group('expr'), arg_10.group('value')\n arg_4.append(int(arg_12, arg_9))\n return bytes(arg_4)\n\n temp_cs.add(arg_3 == arg_2)\n\n arg_0._reset(temp_cs)\n\n if not arg_0._is_sat():\n raise SolverError('Model is not available')\n\n arg_0._send('(get-value (%s))' % arg_3.name)\n arg_7 = arg_0._recv()\n if not (arg_7.startswith('((') and arg_7.endswith('))')):\n raise SolverError('SMTLIB error parsing response: %s' % arg_7)\n\n if isinstance(arg_2, Bool):\n return {'true': True, 'false': False}[arg_7[2:-2].split(' ')[1]]\n if isinstance(arg_2, BitVec):\n arg_8, arg_9 = arg_0._Func_fmt\n arg_10 = arg_8.match(arg_7)\n arg_11, arg_12 = arg_10.group('expr'), arg_10.group('value')\n return int(arg_12, arg_9)\n raise NotImplementedError(\"Func only implemented for Bool and BitVec\")"} +{"_id": "doc_3232", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Colors the logging level in the logging record\n \"\"\"\n if arg_0.colors_disabled:\n return arg_0.plain_levelname_format.format(arg_1)\n else:\n return arg_0.colored_levelname_format.format(arg_0.color_map[arg_1], arg_1)"} +{"_id": "doc_3233", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Helper for finding the closest NULL or, effectively NULL byte from a starting address.\n\n :param Cpu cpu:\n :param ConstraintSet constrs: Constraints for current `State`\n :param int ptr: Address to start searching for a zero from\n :return: Offset from `ptr` to first byte that is 0 or an `Expression` that must be zero\n \"\"\"\n\n arg_3 = 0\n while True:\n arg_4 = arg_0.read_int(arg_2 + arg_3, 8)\n\n if issymbolic(arg_4):\n if not solver.can_be_true(arg_1, arg_4 != 0):\n break\n else:\n if arg_4 == 0:\n break\n\n arg_3 += 1\n\n return arg_3"} +{"_id": "doc_3234", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return all events that all subclasses have so far registered to publish.\n \"\"\"\n arg_1 = set()\n for arg_0, arg_2 in arg_0.__Func__.items():\n arg_1.update(arg_2)\n return arg_1"} +{"_id": "doc_3235", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a pstat.Stats instance with profiling results if `run` was called with `should_profile=True`.\n Otherwise, returns `None`.\n \"\"\"\n arg_1 = os.path.join(arg_0.workspace, 'profiling.bin')\n try:\n return pstats.Stats(arg_1)\n except Exception as e:\n logger.debug(f'Failed to get profiling stats: {e}')\n return None"} +{"_id": "doc_3236", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=None, arg_3=False):\n \"\"\"\n Runs analysis.\n\n :param int procs: Number of parallel worker processes\n :param timeout: Analysis timeout, in seconds\n \"\"\"\n assert not arg_0.Funcning, \"Manticore is already Funcning.\"\n arg_0._start_Func()\n\n arg_0._last_Func_stats['time_started'] = time.time()\n with arg_0.shutdown_timeout(arg_2):\n arg_0._start_workers(arg_1, profiling=arg_3)\n\n arg_0._join_workers()\n arg_0._finish_Func(profiling=arg_3)"} +{"_id": "doc_3237", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Enqueue it for processing \"\"\"\n arg_0._states.append(arg_1)\n arg_0._lock.notify_all()\n return arg_1"} +{"_id": "doc_3238", "title": "", "text": "def Func(arg_0):\n \"\"\" Dequeue a state with the max priority \"\"\"\n\n # A shutdown has been requested\n if arg_0.is_shutdown():\n return None\n\n # if not more states in the queue, let's wait for some forks\n while len(arg_0._states) == 0:\n # if no worker is running, bail out\n if arg_0.running == 0:\n return None\n # if a shutdown has been requested, bail out\n if arg_0.is_shutdown():\n return None\n # if there ares actually some workers running, wait for state forks\n logger.debug(\"Waiting for available states\")\n arg_0._lock.wait()\n\n arg_1 = arg_0._policy.choice(list(arg_0._states))\n if arg_1 is None:\n return None\n del arg_0._states[arg_0._states.index(arg_1)]\n return arg_1"} +{"_id": "doc_3239", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='ALL', arg_4=None):\n \"\"\"\n Fork state on expression concretizations.\n Using policy build a list of solutions for expression.\n For the state on each solution setting the new state with setstate\n\n For example if expression is a Bool it may have 2 solutions. True or False.\n\n Parent\n (expression = ??)\n\n Child1 Child2\n (expression = True) (expression = True)\n setstate(True) setstate(False)\n\n The optional setstate() function is supposed to set the concrete value\n in the child state.\n\n \"\"\"\n assert isinstance(arg_2, Expression)\n\n if arg_4 is None:\n arg_4 = lambda x, y: None\n\n # Find a set of solutions for expression\n arg_5 = arg_1.concretize(arg_2, arg_3)\n\n if not arg_5:\n raise ExecutorError(\"Forking on unfeasible constraint set\")\n\n if len(arg_5) == 1:\n arg_4(arg_1, arg_5[0])\n return arg_1\n\n logger.info(\"Forking. Policy: %s. Values: %s\",\n arg_3,\n ', '.join(f'0x{sol:x}' for arg_6 in arg_5))\n\n arg_0._publish('will_Func_state', arg_1, arg_2, arg_5, arg_3)\n\n # Build and enqueue a state for each solution\n arg_7 = []\n for arg_8 in arg_5:\n with arg_1 as new_state:\n new_state.constrain(arg_2 == arg_8)\n\n # and set the PC of the new state to the concrete pc-dest\n #(or other register or memory address to concrete)\n arg_4(new_state, arg_8)\n\n arg_0._publish('did_Func_state', new_state, arg_2, arg_8, arg_3)\n\n # enqueue new_state\n arg_9 = arg_0.enqueue(new_state)\n # maintain a list of children for logging purpose\n arg_7.append(arg_9)\n\n logger.info(\"Forking current state into states %r\", arg_7)\n return None"} +{"_id": "doc_3240", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Entry point of the Executor; called by workers to start analysis.\n \"\"\"\n # policy_order=self.policy_order\n # policy=self.policy\n arg_1 = None\n arg_2 = None\n\n with WithKeyboardInterruptAs(arg_0.shutdown):\n # notify siblings we are about to start a Func\n arg_0._notify_start_Func()\n\n logger.debug(\"Starting Manticore Symbolic Emulator Worker (pid %d).\", os.getpid())\n arg_3 = Z3Solver()\n while not arg_0.is_shutdown():\n try: # handle fatal errors: exceptions in Manticore\n try: # handle external (e.g. solver) errors, and executor control exceptions\n # select a suitable state to analyze\n if arg_1 is None:\n with arg_0._lock:\n # notify siblings we are about to stop this Func\n arg_0._notify_stop_Func()\n try:\n # Select a single state_id\n arg_2 = arg_0.get()\n # load selected state from secondary storage\n if arg_2 is not None:\n arg_0._publish('will_load_state', arg_2)\n arg_1 = arg_0._workspace.load_state(arg_2)\n arg_0.forward_events_from(arg_1, True)\n arg_0._publish('did_load_state', arg_1, arg_2)\n logger.info(\"load state %r\", arg_2)\n # notify siblings we have a state to play with\n finally:\n arg_0._notify_start_Func()\n\n # If current_state is still None. We are done.\n if arg_1 is None:\n logger.debug(\"No more states in the queue, byte bye!\")\n break\n\n assert arg_1 is not None\n assert arg_1.constraints is arg_1.platform.constraints\n\n # Allows to terminate manticore worker on user request\n while not arg_0.is_shutdown():\n if not arg_1.execute():\n break\n else:\n # Notify this worker is done\n arg_0._publish('will_terminate_state', arg_1, arg_2, TerminateState('Shutdown'))\n arg_1 = None\n\n # Handling Forking and terminating exceptions\n except Concretize as e:\n # expression\n # policy\n # setstate()\n logger.debug(\"Generic state fork on condition\")\n arg_1 = arg_0.fork(arg_1, e.expression, e.policy, e.setstate)\n\n except TerminateState as e:\n # Notify this worker is done\n arg_0._publish('will_terminate_state', arg_1, arg_2, e)\n\n logger.debug(\"Generic terminate state\")\n if e.testcase:\n arg_0._publish('internal_generate_testcase', arg_1, message=str(e))\n arg_1 = None\n\n except SolverError as e:\n # raise\n import traceback\n arg_4 = traceback.format_exc()\n logger.error(\"Exception: %s\\n%s\", str(e), arg_4)\n\n # Notify this state is done\n arg_0._publish('will_terminate_state', arg_1, arg_2, e)\n\n if arg_3.check(arg_1.constraints):\n arg_0._publish('internal_generate_testcase', arg_1, message=\"Solver failed\" + str(e))\n arg_1 = None\n\n except (Exception, AssertionError) as e:\n # raise\n import traceback\n arg_4 = traceback.format_exc()\n logger.error(\"Exception: %s\\n%s\", str(e), arg_4)\n # Notify this worker is done\n arg_0._publish('will_terminate_state', arg_1, arg_2, e)\n arg_1 = None\n\n assert arg_1 is None or arg_0.is_shutdown()\n\n # notify siblings we are about to stop this Func\n arg_0._notify_stop_Func()"} +{"_id": "doc_3241", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', **arg_3):\n \"\"\"\n Constructor for Decree binary analysis.\n\n :param str path: Path to binary to analyze\n :param str concrete_start: Concrete stdin to use before symbolic input\n :param kwargs: Forwarded to the Manticore constructor\n :return: Manticore instance, initialized with a Decree State\n :rtype: Manticore\n \"\"\"\n try:\n return arg_0(_make_Func(arg_1, arg_2), **arg_3)\n except KeyError: # FIXME(mark) magic parsing for DECREE should raise better error\n raise Exception(f'Invalid binary: {path}')"} +{"_id": "doc_3242", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n 'Invoke all registered generic hooks'\n\n # Ignore symbolic pc.\n # TODO(yan): Should we ask the solver if any of the hooks are possible,\n # and execute those that are?\n\n if issymbolic(arg_2):\n return\n\n # Invoke all pc-specific hooks\n for arg_4 in arg_0._hooks.get(arg_2, []):\n arg_4(arg_1)\n\n # Invoke all pc-agnostic hooks\n for arg_4 in arg_0._hooks.get(None, []):\n arg_4(arg_1)"} +{"_id": "doc_3243", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n A helper method used to Func a symbol name into a memory address when\n injecting hooks for analysis.\n\n :param symbol: function name to be Funcd\n :type symbol: string\n\n :param line: if more functions present, optional line number can be included\n :type line: int or None\n \"\"\"\n\n with open(arg_0.binary_path, 'rb') as f:\n\n arg_2 = ELFFile(f)\n\n # iterate over sections and identify symbol table section\n for arg_3 in arg_2.iter_sections():\n if not isinstance(arg_3, SymbolTableSection):\n continue\n\n # get list of symbols by name\n arg_4 = arg_3.get_symbol_by_name(arg_1)\n if not arg_4:\n continue\n\n # return first indexed memory address for the symbol,\n return arg_4[0].entry['st_value']\n\n raise ValueError(f\"The {self.binary_path} ELFfile does not contain symbol {symbol}\")"} +{"_id": "doc_3244", "title": "", "text": "def Func(arg_0):\n \"\"\"\n helper method for getting all binary symbols with SANDSHREW_ prepended.\n We do this in order to provide the symbols Manticore should hook on to\n perform main analysis.\n\n :param binary: str for binary to instrospect.\n :rtype list: list of symbols from binary\n \"\"\"\n\n def substr_after(arg_1, arg_2):\n return arg_1.partition(arg_2)[2]\n\n\n with open(arg_0, 'rb') as f:\n arg_3 = ELFFile(f)\n\n for arg_4 in arg_3.iter_sections():\n if not isinstance(arg_4, SymbolTableSection):\n continue\n\n arg_5 = [sym.name for sym in arg_4.iter_symbols() if sym]\n return [substr_after(arg_6, PREPEND_SYM) for arg_6 in arg_5\n if arg_6.startswith(PREPEND_SYM)]"} +{"_id": "doc_3245", "title": "", "text": "def Func(arg_0: arg_1) -> _Group:\n \"\"\"\n Get a configuration variable group named |name|\n \"\"\"\n global arg_3\n\n if arg_0 in arg_3:\n return arg_3[arg_0]\n\n arg_2 = _Group(arg_0)\n arg_3[arg_0] = arg_2\n\n return arg_2"} +{"_id": "doc_3246", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Save current config state to an yml file stream identified by |f|\n\n :param f: where to write the config file\n \"\"\"\n global _groups\n\n arg_1 = {}\n for arg_2, arg_3 in _groups.items():\n arg_4 = {var.name: var.value for var in arg_3.updated_vars()}\n if not arg_4:\n continue\n arg_1[arg_2] = arg_4\n\n yaml.safe_dump(arg_1, arg_0, line_break=True)"} +{"_id": "doc_3247", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load an yml-formatted configuration from file stream |f|\n\n :param file f: Where to read the config.\n \"\"\"\n\n try:\n arg_1 = yaml.safe_load(arg_0)\n for arg_2, arg_3 in arg_1.items():\n arg_4 = get_group(arg_2)\n\n for arg_5, arg_6 in arg_3.items():\n arg_4.update(arg_5)\n setattr(arg_4, arg_5, arg_6)\n # Any exception here should trigger the warning; from not being able to parse yaml\n # to reading poorly formatted values\n except Exception:\n raise ConfigError(\"Failed reading config file. Do you have a local [.]manticore.yml file?\")"} +{"_id": "doc_3248", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Load config overrides from the yml file at |path|, or from default paths. If a path\n is provided and it does not exist, raise an exception\n\n Default paths: ./mcore.yml, ./.mcore.yml, ./manticore.yml, ./.manticore.yml.\n \"\"\"\n\n if arg_0 is not None:\n arg_1 = [arg_0]\n else:\n arg_2 = ['mcore.yml', 'manticore.yml']\n arg_1 = [os.path.join('.', ''.join(x)) for x in product(['', '.'], arg_2)]\n\n for arg_3 in arg_1:\n try:\n with open(arg_3, 'r') as yml_f:\n logger.info(f'Reading configuration from {name}')\n parse_config(yml_f)\n break\n except FileNotFoundError:\n pass\n else:\n if arg_0 is not None:\n raise FileNotFoundError(f\"'{path}' not found for config overrides\")"} +{"_id": "doc_3249", "title": "", "text": "def Func(arg_0: arg_1.ArgumentParser, arg_3: arg_1.Namespace):\n \"\"\"\n Bring in provided config values to the args parser, and import entries to the config\n from all arguments that were actually passed on the command line\n\n :param parser: The arg parser\n :param args: The value that parser.parse_args returned\n \"\"\"\n # First, load a local config file, if passed or look for one in pwd if it wasn't.\n load_overrides(arg_3.config)\n\n # Get a list of defined config vals. If these are passed on the command line,\n # update them in their correct group, not in the cli group\n arg_5 = list(get_config_keys())\n\n arg_6 = vars(arg_3)\n\n # Bring in the options keys into args\n arg_7 = get_group('cli')\n\n # Place all command line args into the cli group (for saving in the workspace). If\n # the value is set on command line, then it takes precedence; otherwise we try to\n # read it from the config file's cli group.\n for arg_8 in arg_6:\n arg_9 = arg_0.get_default(arg_8)\n arg_10 = getattr(arg_3, arg_8)\n if arg_9 is not arg_10:\n if arg_8 not in arg_5:\n arg_7.update(arg_8, value=arg_10)\n else:\n # Update a var's native group\n arg_11, arg_12 = arg_8.split('.')\n arg_13 = get_group(arg_11)\n setattr(arg_13, arg_12, arg_10)\n else:\n if arg_8 in arg_7:\n setattr(arg_3, arg_8, getattr(arg_7, arg_8))"} +{"_id": "doc_3250", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3=None, arg_4=None, arg_5: arg_2=None):\n \"\"\"\n Like add, but can tolerate existing values; also Funcs the value.\n\n Mostly used for setting fields from imported INI files and modified CLI flags.\n \"\"\"\n if arg_1 in arg_0._vars:\n arg_5 = arg_5 or arg_0._vars[arg_1].description\n arg_4 = arg_4 or arg_0._vars[arg_1].default\n elif arg_1 == 'name':\n raise ConfigError(\"'name' is a reserved name for a group.\")\n\n arg_6 = _Var(arg_1, arg_5=arg_5, arg_4=arg_4, defined=False)\n arg_6.value = arg_3\n arg_0._vars[arg_1] = arg_6"} +{"_id": "doc_3251", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> arg_2:\n \"\"\"\n Return the description, or a help string of variable identified by |name|.\n \"\"\"\n if arg_1 not in arg_0._vars:\n raise ConfigError(f\"{self.name}.{name} not defined.\")\n\n return arg_0._vars[arg_1].description"} +{"_id": "doc_3252", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\"Returns the tuple type signature for the arguments of the contract constructor.\"\"\"\n arg_1 = arg_0._constructor_abi_item\n return '()' if arg_1 is None else arg_0.tuple_signature_for_components(arg_1['inputs'])"} +{"_id": "doc_3253", "title": "", "text": "def Func(arg_0) -> Dict[str, Any]:\n \"\"\"Returns a copy of the Solidity JSON ABI item for the contract constructor.\n\n The content of the returned dict is described at https://solidity.readthedocs.io/en/latest/abi-spec.html#json_\n \"\"\"\n arg_1 = arg_0._Func_item\n if arg_1:\n return dict(arg_1)\n return {'inputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'constructor'}"} +{"_id": "doc_3254", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> Dict[str, Any]:\n \"\"\"Returns a copy of the Solidity JSON ABI item for the function associated with the selector ``hsh``.\n\n If no normal contract function has the specified selector, a dict describing the default or non-default\n fallback function is returned.\n\n The content of the returned dict is described at https://solidity.readthedocs.io/en/latest/abi-spec.html#json_\n \"\"\"\n if not isinstance(arg_1, (arg_2, bytearray)):\n raise TypeError('The selector argument must be a concrete byte array')\n arg_3 = arg_0._function_signatures_by_selector.get(arg_1)\n if arg_3 is not None:\n return dict(arg_0._function_abi_items_by_signature[arg_3])\n arg_4 = arg_0._fallback_function_abi_item\n if arg_4 is not None:\n return dict(arg_4)\n # An item describing the default fallback function.\n return {'payable': False, 'stateMutability': 'nonpayable', 'type': 'fallback'}"} +{"_id": "doc_3255", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"Returns the tuple type signature for the arguments of the function associated with the selector ``hsh``.\n\n If no normal contract function has the specified selector,\n the empty tuple type signature ``'()'`` is returned.\n \"\"\"\n if not isinstance(arg_1, (arg_2, bytearray)):\n raise TypeError('The selector argument must be a concrete byte array')\n arg_3 = arg_0._function_signatures_by_selector.get(arg_1)\n return '()' if arg_3 is None else arg_3[arg_3.find('('):]"} +{"_id": "doc_3256", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> Optional[str]:\n \"\"\"Returns the signature of the normal function with the selector ``hsh``,\n or ``None`` if no such function exists.\n\n This function returns ``None`` for any selector that will be dispatched to a fallback function.\n \"\"\"\n if not isinstance(arg_1, (arg_2, bytearray)):\n raise TypeError('The selector argument must be a concrete byte array')\n return arg_0._function_signatures_by_selector.get(arg_1)"} +{"_id": "doc_3257", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Catches did_map_memory and copies the mapping into Manticore\n \"\"\"\n logger.info(' '.join((\"Mapping Memory @\",\n hex(arg_1) if type(arg_1) is int else \"0x??\",\n hr_size(arg_2), \"-\",\n arg_3, \"-\",\n f\"{name}:{hex(offset) if name else ''}\", \"->\",\n hex(arg_6))))\n arg_0._emu.mem_map(arg_1, arg_2, convert_permissions(arg_3))\n arg_0.copy_memory(arg_1, arg_2)"} +{"_id": "doc_3258", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Unmap Unicorn maps when Manticore unmaps them\"\"\"\n logger.info(f\"Unmapping memory from {hex(start)} to {hex(start + size)}\")\n\n arg_3 = (1 << 12) - 1\n if (arg_1 & arg_3) != 0:\n logger.error(\"Memory to be unmapped is not aligned to a page\")\n\n if (arg_2 & arg_3) != 0:\n arg_2 = ((arg_2 >> 12) + 1) << 12\n logger.warning(\"Forcing unmap size to align to a page\")\n\n arg_0._emu.mem_unmap(arg_1, arg_2)"} +{"_id": "doc_3259", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Set memory protections in Unicorn correctly \"\"\"\n logger.info(f\"Changing permissions on {hex(start)}:{hex(start + size)} to {perms}\")\n arg_0._emu.mem_protect(arg_1, arg_2, convert_permissions(arg_3))"} +{"_id": "doc_3260", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Unicorn hook that transfers control to Manticore so it can execute the syscall\n \"\"\"\n logger.debug(f\"Stopping emulation at {hex(uc.reg_read(self._to_unicorn_id('RIP')))} to perform syscall\")\n arg_0.sync_unicorn_to_manticore()\n from ..native.cpu.abstractcpu import Syscall\n arg_0._to_raise = Syscall()\n arg_1.emu_stop()"} +{"_id": "doc_3261", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Wrapper that runs the _step function in a loop while handling exceptions\n \"\"\"\n\n # The emulation might restart if Unicorn needs to bring in a memory map\n # or bring a value from Manticore state.\n while True:\n\n # Try emulation\n arg_0._should_try_again = False\n arg_0._to_raise = None\n\n arg_0._step(arg_1)\n\n if not arg_0._should_try_again:\n break"} +{"_id": "doc_3262", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Copy registers and written memory back into Manticore\n \"\"\"\n arg_0.write_backs_disabled = True\n for arg_2 in arg_0.registers:\n arg_3 = arg_0._emu.reg_read(arg_0._to_unicorn_id(arg_2))\n arg_0._cpu.write_register(arg_2, arg_3)\n if len(arg_0._mem_delta) > 0:\n logger.debug(f\"Syncing {len(self._mem_delta)} writes back into Manticore\")\n for arg_4 in arg_0._mem_delta:\n arg_5, arg_6 = arg_0._mem_delta[arg_4]\n arg_0._cpu.write_int(arg_4, arg_5, arg_6 * 8)\n arg_0.write_backs_disabled = False\n arg_0._mem_delta = {}"} +{"_id": "doc_3263", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Copy memory writes from Manticore back into Unicorn in real-time \"\"\"\n if arg_0.write_backs_disabled:\n return\n if type(arg_2) is bytes:\n arg_0._emu.mem_write(arg_1, arg_2)\n else:\n if issymbolic(arg_2):\n arg_4 = [Operators.CHR(Operators.EXTRACT(arg_2, offset, 8)) for offset in range(0, arg_3, 8)]\n arg_5 = []\n for arg_6 in arg_4:\n if issymbolic(arg_6):\n arg_6 = chr(solver.get_value(arg_0._cpu.memory.constraints, arg_6))\n arg_5.append(arg_6)\n arg_4 = arg_5\n else:\n arg_4 = [Operators.CHR(Operators.EXTRACT(arg_2, offset, 8)) for offset in range(0, arg_3, 8)]\n logger.debug(f\"Writing back {hr_size(size // 8)} to {hex(where)}: {data}\")\n # TODO - the extra encoding is to handle null bytes output as strings when we concretize. That's probably a bug.\n arg_0._emu.mem_write(arg_1, b''.join(arg_7.encode('utf-8') if type(arg_7) is str else arg_7 for arg_7 in arg_4))"} +{"_id": "doc_3264", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Sync register state from Manticore -> Unicorn\"\"\"\n if arg_0.write_backs_disabled:\n return\n if issymbolic(arg_2):\n logger.warning(\"Skipping Symbolic write-back\")\n return\n if arg_1 in arg_0.flag_registers:\n arg_0._emu.reg_write(arg_0._to_unicorn_id('EFLAGS'), arg_0._cpu.read_register('EFLAGS'))\n return\n arg_0._emu.reg_write(arg_0._to_unicorn_id(arg_1), arg_2)"} +{"_id": "doc_3265", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Only useful for setting FS right now. \"\"\"\n logger.info(\"Updating selector %s to 0x%02x (%s bytes) (%s)\", arg_1, arg_2, arg_3, arg_4)\n if arg_1 == 99:\n arg_0.set_fs(arg_2)\n else:\n logger.error(\"No way to write segment: %d\", arg_1)"} +{"_id": "doc_3266", "title": "", "text": "def Func(arg_0: arg_1):\n \"\"\"A decorator for marking functions as Func. \"\"\"\n assert isinstance(arg_0, arg_1), \"The Func decorator requires a message string argument.\"\n\n def decorator(arg_2):\n @wraps(arg_2)\n def wrapper(*arg_3, **arg_4):\n warnings.warn(f\"`{func.__qualname__}` is Func. {message}\",\n category=ManticoreDeprecationWarning, stacklevel=2)\n return arg_2(*arg_3, **arg_4)\n\n return wrapper\n\n return decorator"} +{"_id": "doc_3267", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Produce Funcutations of `lst`, where Funcutations are mutated by `func`. Used for flipping constraints. highly\n possible that returned constraints can be unsat this does it blindly, without any attention to the constraints\n themselves\n\n Considering lst as a list of constraints, e.g.\n\n [ C1, C2, C3 ]\n\n we'd like to consider scenarios of all possible Funcutations of flipped constraints, excluding the original list.\n So we'd like to generate:\n\n [ func(C1), C2 , C3 ],\n [ C1 , func(C2), C3 ],\n [ func(C1), func(C2), C3 ],\n [ C1 , C2 , func(C3)],\n .. etc\n\n This is effectively treating the list of constraints as a bitmask of width len(lst) and counting up, skipping the\n 0th element (unmodified array).\n\n The code below yields lists of constraints Funcuted as above by treating list indeces as bitmasks from 1 to\n 2**len(lst) and applying func to all the set bit offsets.\n\n '''\n for arg_2 in range(1, 2**len(arg_0)):\n yield [arg_1(arg_4) if (1< 0\n arg_0.running = list(range(arg_5))\n\n # Each process can wait for one timeout\n arg_0.timers = [None] * arg_5\n # each fd has a waitlist\n arg_0.rwait = [set() for _ in range(arg_6)]\n arg_0.twait = [set() for _ in range(arg_6)]\n\n # Install event forwarders\n for arg_11 in arg_0.procs:\n arg_0.forward_events_from(arg_11)"} +{"_id": "doc_3271", "title": "", "text": "def Func(arg_0):\n \"\"\"\n ARM kernel helpers\n\n https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt\n \"\"\"\n\n arg_1 = bytearray(b'\\xf1\\xde\\xfd\\xe7' * 1024)\n\n # Extracted from a RPi2\n arg_2 = binascii.unhexlify(\n 'ff0300ea' +\n '650400ea' +\n 'f0ff9fe5' +\n '430400ea' +\n '220400ea' +\n '810400ea' +\n '000400ea' +\n '870400ea'\n )\n\n # XXX(yan): The following implementations of cmpxchg and cmpxchg64 were\n # handwritten to not use any exclusive instructions (e.g. ldrexd) or\n # locking. For actual implementations, refer to\n # arch/arm64/kernel/kuser32.S in the Linux source code.\n arg_3 = binascii.unhexlify(\n '30002de9' + # push {r4, r5}\n '08c09de5' + # ldr ip, [sp, #8]\n '30009ce8' + # ldm ip, {r4, r5}\n '010055e1' + # cmp r5, r1\n '00005401' + # cmpeq r4, r0\n '0100a013' + # movne r0, #1\n '0000a003' + # moveq r0, #0\n '0c008c08' + # stmeq ip, {r2, r3}\n '3000bde8' + # pop {r4, r5}\n '1eff2fe1' # bx lr\n )\n\n arg_4 = binascii.unhexlify(\n '5bf07ff5' + # dmb ish\n '1eff2fe1' # bx lr\n )\n\n arg_5 = binascii.unhexlify(\n '003092e5' + # ldr r3, [r2]\n '000053e1' + # cmp r3, r0\n '0000a003' + # moveq r0, #0\n '00108205' + # streq r1, [r2]\n '0100a013' + # movne r0, #1\n '1eff2fe1' # bx lr\n )\n\n # Map a TLS segment\n arg_0._arm_tls_memory = arg_0.current.memory.mmap(None, 4, 'rw ')\n\n arg_7 = binascii.unhexlify(\n '04009FE5' + # ldr r0, [pc, #4]\n '010090e8' + # ldm r0, {r0}\n '1eff2fe1' # bx lr\n ) + struct.pack(' 1:\n logger.debug(\"SCHED:\")\n logger.debug(f\"\\tProcess: {self.procs!r}\")\n logger.debug(f\"\\tRunning: {self.running!r}\")\n logger.debug(f\"\\tRWait: {self.rwait!r}\")\n logger.debug(f\"\\tTWait: {self.twait!r}\")\n logger.debug(f\"\\tTimers: {self.timers!r}\")\n logger.debug(f\"\\tCurrent clock: {self.clocks}\")\n logger.debug(f\"\\tCurrent cpu: {self._current}\")\n\n if len(arg_0.running) == 0:\n logger.debug(\"None running checking if there is some process waiting for a timeout\")\n if all([arg_1 is None for arg_1 in arg_0.timers]):\n raise Deadlock()\n arg_0.clocks = min(arg_1 for arg_1 in arg_0.timers if arg_1 is not None) + 1\n arg_0.check_timers()\n assert len(arg_0.running) != 0, \"DEADLOCK!\"\n arg_0._current = arg_0.running[0]\n return\n arg_4 = (arg_0.running.index(arg_0._current) + 1) % len(arg_0.running)\n arg_5 = arg_0.running[arg_4]\n if len(arg_0.procs) > 1:\n logger.debug(f\"\\tTransfer control from process {self._current} to {next_running_idx}\")\n arg_0._current = arg_5"} +{"_id": "doc_3280", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Wait for file descriptors or timeout.\n Adds the current process in the correspondent Funcing list and\n yield the cpu to another running process.\n \"\"\"\n logger.debug(\"WAIT:\")\n logger.debug(f\"\\tProcess {self._current} is going to Func for [ {readfds!r} {writefds!r} {timeout!r} ]\")\n logger.debug(f\"\\tProcess: {self.procs!r}\")\n logger.debug(f\"\\tRunning: {self.running!r}\")\n logger.debug(f\"\\tRWait: {self.rFunc!r}\")\n logger.debug(f\"\\tTWait: {self.tFunc!r}\")\n logger.debug(f\"\\tTimers: {self.timers!r}\")\n\n for arg_4 in arg_1:\n arg_0.rFunc[arg_4].add(arg_0._current)\n for arg_4 in arg_2:\n arg_0.tFunc[arg_4].add(arg_0._current)\n if arg_3 is not None:\n arg_0.timers[arg_0._current] = arg_0.clocks + arg_3\n arg_7 = arg_0._current\n # self.sched()\n arg_8 = (arg_0.running.index(arg_7) + 1) % len(arg_0.running)\n arg_0._current = arg_0.running[arg_8]\n logger.debug(f\"\\tTransfer control from process {procid} to {self._current}\")\n logger.debug(f\"\\tREMOVING {procid!r} from {self.running!r}. Current: {self._current!r}\")\n arg_0.running.remove(arg_7)\n if arg_0._current not in arg_0.running:\n logger.debug(\"\\tCurrent not running. Checking for timers...\")\n arg_0._current = None\n arg_0.check_timers()"} +{"_id": "doc_3281", "title": "", "text": "def Func(arg_0):\n \"\"\" Awake process if timer has expired \"\"\"\n if arg_0._current is None:\n # Advance the clocks. Go to future!!\n arg_1 = min([arg_0.clocks] + [x for x in arg_0.timers if x is not None]) + 1\n logger.debug(f\"Advancing the clock from {self.clocks} to {advance}\")\n arg_0.clocks = arg_1\n for arg_3 in range(len(arg_0.timers)):\n if arg_0.timers[arg_3] is not None:\n if arg_0.clocks > arg_0.timers[arg_3]:\n arg_0.procs[arg_3].PC += arg_0.procs[arg_3].instruction.size\n arg_0.awake(arg_3)"} +{"_id": "doc_3282", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Compute total load size of interpreter.\n\n :param ELFFile interp: interpreter ELF .so\n :return: total load size of interpreter, not aligned\n :rtype: int\n \"\"\"\n arg_1 = [x for x in arg_0.iter_segments() if x.header.p_type == 'PT_LOAD']\n arg_2 = arg_1[-1]\n return arg_2.header.p_vaddr + arg_2.header.p_memsz"} +{"_id": "doc_3283", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n A version of openat that includes a symbolic path and symbolic directory file descriptor\n\n :param dirfd: directory file descriptor\n :param buf: address of zero-terminated pathname\n :param flags: file access bits\n :param mode: file permission mode\n \"\"\"\n\n if issymbolic(arg_1):\n logger.debug(\"Ask to read from a symbolic directory file descriptor!!\")\n # Constrain to a valid fd and one past the end of fds\n arg_0.constraints.add(arg_1 >= 0)\n arg_0.constraints.add(arg_1 <= len(arg_0.files))\n raise ConcretizeArgument(arg_0, 0)\n\n if issymbolic(arg_2):\n logger.debug(\"Ask to read to a symbolic buffer\")\n raise ConcretizeArgument(arg_0, 1)\n\n return super().Func(arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_3284", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\" receive - receive bytes from a file descriptor\n\n The receive system call reads up to count bytes from file descriptor fd to the\n buffer pointed to by buf. If count is zero, receive returns 0 and optionally\n sets *rx_bytes to zero.\n\n :param cpu: current CPU.\n :param fd: a valid file descriptor\n :param buf: a memory buffer\n :param count: max number of bytes to receive\n :param rx_bytes: if valid, points to the actual number of bytes received\n :return: 0 Success\n EBADF fd is not a valid file descriptor or is not open\n EFAULT buf or rx_bytes points to an invalid address.\n \"\"\"\n arg_6 = ''\n if arg_4 != 0:\n if not arg_0._is_open(arg_2):\n logger.info(\"RECEIVE: Not valid file descriptor on receive. Returning EBADF\")\n return Decree.CGC_EBADF\n\n # TODO check count bytes from buf\n if arg_3 not in arg_1.memory: # or not buf+count in cpu.memory:\n logger.info(\"RECEIVE: buf points to invalid address. Returning EFAULT\")\n return Decree.CGC_EFAULT\n\n #import random\n #count = random.randint(1,count)\n if arg_2 > 2 and arg_0.files[arg_2].is_empty():\n arg_1.PC -= arg_1.instruction.size\n arg_0.wait([arg_2], [], None)\n raise RestartSyscall()\n\n # get some potential delay\n # if random.randint(5) == 0 and count > 1:\n # count = count/2\n\n # Read the data and put it in memory\n arg_6 = arg_0.files[arg_2].receive(arg_4)\n arg_0.syscall_trace.append((\"_receive\", arg_2, arg_6))\n arg_1.write_bytes(arg_3, arg_6)\n\n arg_0.signal_receive(arg_2)\n\n # TODO check 4 bytes from rx_bytes\n if arg_5:\n if arg_5 not in arg_1.memory:\n logger.info(\"RECEIVE: Not valid file descriptor on receive. Returning EFAULT\")\n return Decree.CGC_EFAULT\n arg_1.write_int(arg_5, len(arg_6), 32)\n\n logger.info(\"RECEIVE(%d, 0x%08x, %d, 0x%08x) -> <%s> (size:%d)\" % (arg_2, arg_3, arg_4, arg_5, repr(arg_6)[:min(arg_4, 10)], len(arg_6)))\n return 0"} +{"_id": "doc_3285", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" deallocate - remove allocations\n The deallocate system call deletes the allocations for the specified\n address range, and causes further references to the addresses within the\n range to generate invalid memory accesses. The region is also\n automatically deallocated when the process is terminated.\n\n The address addr must be a multiple of the page size. The length parameter\n specifies the size of the region to be deallocated in bytes. All pages\n containing a part of the indicated range are deallocated, and subsequent\n references will terminate the process. It is not an error if the indicated\n range does not contain any allocated pages.\n\n The deallocate function is invoked through system call number 6.\n\n :param cpu: current CPU\n :param addr: the starting address to unmap.\n :param size: the size of the portion to unmap.\n :return 0 On success\n EINVAL addr is not page aligned.\n EINVAL length is zero.\n EINVAL any part of the region being deallocated is outside the valid\n address range of the process.\n\n :param cpu: current CPU.\n :return: C{0} on success.\n \"\"\"\n logger.info(\"DEALLOCATE(0x%08x, %d)\" % (arg_2, arg_3))\n\n if arg_2 & 0xfff != 0:\n logger.info(\"DEALLOCATE: addr is not page aligned\")\n return Decree.CGC_EINVAL\n if arg_3 == 0:\n logger.info(\"DEALLOCATE:length is zero\")\n return Decree.CGC_EINVAL\n # unlikely AND WRONG!!!\n # if addr > Decree.CGC_SSIZE_MAX or addr+size > Decree.CGC_SSIZE_MAX:\n # logger.info(\"DEALLOCATE: part of the region being deallocated is outside the valid address range of the process\")\n # return Decree.CGC_EINVAL\n\n arg_1.memory.munmap(arg_2, arg_3)\n arg_0.syscall_trace.append((\"_deallocate\", -1, arg_3))\n return 0"} +{"_id": "doc_3286", "title": "", "text": "def Func(arg_0):\n \"\"\" Yield CPU.\n This will choose another process from the RUNNNIG list and change\n current running process. May give the same cpu if only one running\n process.\n \"\"\"\n if len(arg_0.procs) > 1:\n logger.info(\"SCHED:\")\n logger.info(\"\\tProcess: %r\", arg_0.procs)\n logger.info(\"\\tRunning: %r\", arg_0.running)\n logger.info(\"\\tRWait: %r\", arg_0.rwait)\n logger.info(\"\\tTWait: %r\", arg_0.twait)\n logger.info(\"\\tTimers: %r\", arg_0.timers)\n logger.info(\"\\tCurrent clock: %d\", arg_0.clocks)\n logger.info(\"\\tCurrent cpu: %d\", arg_0._current)\n\n if len(arg_0.running) == 0:\n logger.info(\"None running checking if there is some process waiting for a timeout\")\n if all([arg_1 is None for arg_1 in arg_0.timers]):\n raise Deadlock()\n arg_0.clocks = min([arg_1 for arg_1 in arg_0.timers if arg_1 is not None]) + 1\n arg_0.check_timers()\n assert len(arg_0.running) != 0, \"DEADLOCK!\"\n arg_0._current = arg_0.running[0]\n return\n arg_4 = (arg_0.running.index(arg_0._current) + 1) % len(arg_0.running)\n arg_5 = arg_0.running[arg_4]\n if len(arg_0.procs) > 1:\n logger.info(\"\\tTransfer control from process %d to %d\", arg_0._current, arg_5)\n arg_0._current = arg_5"} +{"_id": "doc_3287", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Wait for filedescriptors or timeout.\n Adds the current process to the corresponding Funcing list and\n yields the cpu to another running process.\n \"\"\"\n logger.info(\"WAIT:\")\n logger.info(\"\\tProcess %d is going to Func for [ %r %r %r ]\", arg_0._current, arg_1, arg_2, arg_3)\n logger.info(\"\\tProcess: %r\", arg_0.procs)\n logger.info(\"\\tRunning: %r\", arg_0.running)\n logger.info(\"\\tRWait: %r\", arg_0.rFunc)\n logger.info(\"\\tTWait: %r\", arg_0.tFunc)\n logger.info(\"\\tTimers: %r\", arg_0.timers)\n\n for arg_4 in arg_1:\n arg_0.rFunc[arg_4].add(arg_0._current)\n for arg_4 in arg_2:\n arg_0.tFunc[arg_4].add(arg_0._current)\n if arg_3 is not None:\n arg_0.timers[arg_0._current] = arg_0.clocks + arg_3\n else:\n arg_0.timers[arg_0._current] = None\n arg_7 = arg_0._current\n # self.sched()\n arg_8 = (arg_0.running.index(arg_7) + 1) % len(arg_0.running)\n arg_0._current = arg_0.running[arg_8]\n logger.info(\"\\tTransfer control from process %d to %d\", arg_7, arg_0._current)\n logger.info(\"\\tREMOVING %r from %r. Current: %r\", arg_7, arg_0.running, arg_0._current)\n arg_0.running.remove(arg_7)\n if arg_0._current not in arg_0.running:\n logger.info(\"\\tCurrent not running. Checking for timers...\")\n arg_0._current = None\n if all([arg_9 is None for arg_9 in arg_0.timers]):\n raise Deadlock()\n arg_0.check_timers()"} +{"_id": "doc_3288", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Symbolic version of Decree.Func\n \"\"\"\n if issymbolic(arg_2):\n logger.info(\"Ask to read from a symbolic file descriptor!!\")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 0)\n\n if issymbolic(arg_3):\n logger.info(\"Ask to read to a symbolic buffer\")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 1)\n\n if issymbolic(arg_4):\n logger.info(\"Ask to read a symbolic number of bytes \")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 2)\n\n if issymbolic(arg_5):\n logger.info(\"Ask to return size to a symbolic address \")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 3)\n\n return super().Func(arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_3289", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Symbolic version of Decree.Func\n \"\"\"\n if issymbolic(arg_2):\n logger.info(\"Ask to write to a symbolic file descriptor!!\")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 0)\n\n if issymbolic(arg_3):\n logger.info(\"Ask to write to a symbolic buffer\")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 1)\n\n if issymbolic(arg_4):\n logger.info(\"Ask to write a symbolic number of bytes \")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 2)\n\n if issymbolic(arg_5):\n logger.info(\"Ask to return size to a symbolic address \")\n arg_1.PC = arg_1.PC - arg_1.instruction.size\n raise SymbolicSyscallArgument(arg_1, 3)\n\n return super().Func(arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_3290", "title": "", "text": "def Func(arg_0):\n \"\"\" Synchronization decorator. \"\"\"\n\n def new_function(arg_1, *arg_2, **arg_3):\n arg_1._lock.acquire()\n try:\n return arg_0(arg_1, *arg_2, **arg_3)\n finally:\n arg_1._lock.release()\n return new_function"} +{"_id": "doc_3291", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Save an arbitrary, serializable `value` under `key`.\n\n :param str key: A string identifier under which to store the value.\n :param value: A serializable value\n :return:\n \"\"\"\n with arg_0.save_stream(arg_1) as s:\n s.write(arg_2)"} +{"_id": "doc_3292", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Load an arbitrary value identified by `key`.\n\n :param str key: The key that identifies the value\n :return: The loaded value\n \"\"\"\n with arg_0.load_stream(arg_1, arg_2=arg_2) as s:\n return s.read()"} +{"_id": "doc_3293", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Return a managed file-like object from which the calling code can read\n previously-serialized data.\n\n :param key:\n :return: A managed stream-like object\n \"\"\"\n arg_3 = arg_0.load_value(arg_1, arg_2=arg_2)\n yield io.BytesIO(arg_3) if arg_2 else io.StringIO(arg_3)"} +{"_id": "doc_3294", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Yield a file object representing `key`\n\n :param str key: The file to save to\n :param bool binary: Whether we should treat it as binary\n :return:\n \"\"\"\n arg_3 = 'wb' if arg_2 else 'w'\n with open(os.path.join(arg_0.uri, arg_1), arg_3) as f:\n yield f"} +{"_id": "doc_3295", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return just the filenames that match `glob_str` inside the store directory.\n\n :param str glob_str: A glob string, i.e. 'state_*'\n :return: list of matched keys\n \"\"\"\n arg_2 = os.path.join(arg_0.uri, arg_1)\n return [os.path.split(arg_3)[1] for arg_3 in glob.glob(arg_2)]"} +{"_id": "doc_3296", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Save a state to storage, return identifier.\n\n :param state: The state to save\n :param int state_id: If not None force the state id potentially overwriting old states\n :return: New state id\n :rtype: int\n \"\"\"\n assert isinstance(arg_1, StateBase)\n if arg_2 is None:\n arg_2 = arg_0._get_id()\n else:\n arg_0.rm_state(arg_2)\n\n arg_0._store.Func(arg_1, f'{self._prefix}{state_id:08x}{self._suffix}')\n return arg_2"} +{"_id": "doc_3297", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Create an indexed output stream i.e. 'test_00000001.name'\n\n :param name: Identifier for the stream\n :return: A context-managed stream-like object\n \"\"\"\n with arg_0._store.save_stream(arg_0._named_key(arg_1), arg_2=arg_2) as s:\n yield s"} +{"_id": "doc_3298", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Compare registers from a remote gdb session to current mcore.\n\n :param manticore.core.cpu Cpu: Current cpu\n :param bool should_print: Whether to print values to stdout\n :return: Whether or not any differences were detected\n :rtype: bool\n \"\"\"\n arg_2 = False\n arg_3 = gdb.getCanonicalRegisters()\n for arg_4 in sorted(arg_3):\n arg_5 = arg_3[arg_4]\n if arg_4.endswith('psr'):\n arg_4 = 'apsr'\n arg_6 = arg_0.read_register(arg_4.upper())\n if arg_1:\n logger.debug(f'{name} gdb:{vg:x} mcore:{v:x}')\n if arg_5 != arg_6:\n if arg_1:\n logger.warning('^^ unequal')\n arg_2 = True\n if arg_2:\n logger.debug(qemu.correspond(None))\n return arg_2"} +{"_id": "doc_3299", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Mirror some service calls in manticore. Happens after qemu executed a SVC\n instruction, but before manticore did.\n \"\"\"\n arg_1 = arg_0.cpu.R7 # Grab idx from manticore since qemu could have exited\n arg_2 = linux_syscalls.armv7[arg_1]\n\n logger.debug(f\"Syncing syscall: {name}\")\n\n try:\n # Make sure mmap returns the same address\n if 'mmap' in arg_2:\n arg_3 = gdb.getR('R0')\n logger.debug(f\"Syncing mmap ({returned:x})\")\n arg_0.cpu.write_register('R0', arg_3)\n if 'exit' in arg_2:\n return\n except ValueError:\n for arg_4 in arg_0.cpu.canonical_registers:\n print(f'{reg}: {state.cpu.read_register(reg):x}')\n raise"} +{"_id": "doc_3300", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n The entry point of the Funcor.\n The exploration algorithm is a DFS post-order traversal\n The implementation used two stacks instead of a recursion\n The final result is store in self.result\n\n :param node: Node to explore\n :type node: Expression\n :param use_fixed_point: if True, it runs _methods until a fixed point is found\n :type use_fixed_point: Bool\n \"\"\"\n arg_3 = arg_0._cache\n arg_4 = set()\n arg_5 = []\n arg_5.append(arg_1)\n while arg_5:\n arg_1 = arg_5.pop()\n if arg_1 in arg_3:\n arg_0.push(arg_3[arg_1])\n elif isinstance(arg_1, Operation):\n if arg_1 in arg_4:\n arg_6 = [arg_0.pop() for _ in range(len(arg_1.operands))]\n arg_7 = arg_0._method(arg_1, *arg_6)\n\n arg_4.remove(arg_1)\n arg_0.push(arg_7)\n arg_3[arg_1] = arg_7\n else:\n arg_4.add(arg_1)\n arg_5.append(arg_1)\n arg_5.extend(arg_1.operands)\n else:\n arg_0.push(arg_0._method(arg_1))\n\n if arg_2:\n arg_8 = None\n arg_9 = arg_0.pop()\n while arg_8 is not arg_9:\n arg_0.Func(arg_9)\n arg_8 = arg_9\n arg_9 = arg_0.pop()\n arg_0.push(arg_9)"} +{"_id": "doc_3301", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"\n Overload Visitor.Func because we want to stop to iterate over the\n visit_ functions as soon as a valid visit_ function is found\n \"\"\"\n assert arg_1.__class__.__mro__[-1] is object\n for arg_3 in arg_1.__class__.__mro__:\n arg_4 = arg_3.__name__\n arg_5 = 'visit_%s' % arg_4\n arg_6 = getattr(arg_0, arg_5, None)\n if arg_6 is not None:\n arg_6(arg_1, *arg_2)\n return\n return"} +{"_id": "doc_3302", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\" a + 0 ==> a\n 0 + a ==> a\n \"\"\"\n arg_3 = arg_1.operands[0]\n arg_4 = arg_1.operands[1]\n if isinstance(arg_4, BitVecConstant):\n if arg_4.value == 0:\n return arg_3\n if isinstance(arg_3, BitVecConstant):\n if arg_3.value == 0:\n return arg_4"} +{"_id": "doc_3303", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\" a | 0 => a\n 0 | a => a\n 0xffffffff & a => 0xffffffff\n a & 0xffffffff => 0xffffffff\n\n \"\"\"\n arg_3 = arg_1.operands[0]\n arg_4 = arg_1.operands[1]\n if isinstance(arg_4, BitVecConstant):\n if arg_4.value == 0:\n return arg_3\n elif arg_4.value == arg_3.mask:\n return arg_4\n elif isinstance(arg_3, BitVecOr):\n arg_5 = arg_3.operands[0]\n arg_6 = arg_3.operands[1]\n if isinstance(arg_4, Constant):\n return BitVecOr(arg_5, (arg_6 | arg_4), taint=arg_1.taint)\n elif isinstance(arg_3, BitVecConstant):\n return BitVecOr(arg_4, arg_3, taint=arg_1.taint)"} +{"_id": "doc_3304", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Build transaction data from function signature and arguments\n \"\"\"\n arg_2 = re.match(r\"(?P[a-zA-Z_][a-zA-Z_0-9]*)(?P\\(.*\\))\", arg_0)\n if not arg_2:\n raise EthereumError(\"Function signature expected\")\n\n ABI._check_and_warn_num_args(arg_0, *arg_1)\n\n arg_3 = ABI.function_selector(arg_0) # Funcid\n arg_3 += ABI.serialize(arg_2.group('type'), *arg_1)\n return arg_3"} +{"_id": "doc_3305", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Makes a function hash id from a method signature\n \"\"\"\n arg_1 = sha3.keccak_256()\n arg_1.update(arg_0.encode())\n return bytes(arg_1.digest()[:4])"} +{"_id": "doc_3306", "title": "", "text": "def Func(arg_0, arg_1=32, arg_2=0):\n \"\"\"\n Translates a python integral or a BitVec into a 32 byte string, MSB first\n \"\"\"\n if arg_1 <= 0 or arg_1 > 32:\n raise ValueError\n\n from .account import EVMAccount # because of circular import\n if not isinstance(arg_0, (int, BitVec, EVMAccount)):\n raise ValueError\n if issymbolic(arg_0):\n # FIXME This temporary array variable should be obtained from a specific constraint store\n arg_3 = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1()))\n if arg_0.size <= arg_1 * 8:\n arg_0 = Operators.ZEXTEND(arg_0, arg_1 * 8)\n else:\n # automatically truncate, e.g. if they passed a BitVec(256) for an `address` argument (160 bits)\n arg_0 = Operators.EXTRACT(arg_0, 0, arg_1 * 8)\n arg_3 = ArrayProxy(arg_3.write_BE(arg_2, arg_0, arg_1))\n else:\n arg_0 = int(arg_0)\n arg_3 = bytearray()\n for arg_4 in range(arg_2):\n arg_3.append(0)\n for arg_5 in reversed(range(arg_1)):\n arg_3.append(Operators.EXTRACT(arg_0, arg_5 * 8, 8))\n assert len(arg_3) == arg_1 + arg_2\n return arg_3"} +{"_id": "doc_3307", "title": "", "text": "def Func(arg_0, arg_1=32, arg_2=0):\n \"\"\"\n Translates a signed python integral or a BitVec into a 32 byte string, MSB first\n \"\"\"\n if arg_1 <= 0 or arg_1 > 32:\n raise ValueError\n if not isinstance(arg_0, (int, BitVec)):\n raise ValueError\n if issymbolic(arg_0):\n arg_3 = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1()))\n arg_0 = Operators.SEXTEND(arg_0, arg_0.size, arg_1 * 8)\n arg_3 = ArrayProxy(arg_3.write_BE(arg_2, arg_0, arg_1))\n else:\n arg_0 = int(arg_0)\n arg_3 = bytearray()\n for arg_4 in range(arg_2):\n arg_3.append(0)\n\n for arg_5 in reversed(range(arg_1)):\n arg_3.append(Operators.EXTRACT(arg_0, arg_5 * 8, 8))\n return arg_3"} +{"_id": "doc_3308", "title": "", "text": "def Func(**arg_0):\n \"\"\"\n Make sure an EVM instruction has all of its arguments concretized according to\n provided policies.\n\n Example decoration:\n\n @Func(size='ONE', address='')\n def LOG(self, address, size, *topics):\n ...\n\n The above will make sure that the |size| parameter to LOG is Concretized when symbolic\n according to the 'ONE' policy and concretize |address| with the default policy.\n\n :param policies: A kwargs list of argument names and their respective policies.\n Provide None or '' as policy to use default.\n :return: A function decorator\n \"\"\"\n def concretizer(arg_1):\n @wraps(arg_1)\n def arg_11(*arg_2, **arg_3):\n arg_4 = inspect.getfullargspec(arg_1)\n for arg_5, arg_6 in arg_0.items():\n assert arg_5 in arg_4.args, \"Concretizer argument not found in wrapped function.\"\n # index is 0-indexed, but ConcretizeArgument is 1-indexed. However, this is correct\n # since implementation method is always a bound method (self is param 0)\n arg_7 = arg_4.args.index(arg_5)\n if not issymbolic(arg_2[arg_7]):\n continue\n if not arg_6:\n arg_6 = 'SAMPLED'\n\n if arg_6 == \"ACCOUNTS\":\n arg_8 = arg_2[arg_7]\n arg_9 = arg_2[0].world\n #special handler for EVM only policy\n arg_10 = arg_9._constraint_to_accounts(arg_8, ty='both', include_zero=True)\n arg_9.constraints.add(arg_10)\n arg_6 = 'ALL'\n raise ConcretizeArgument(arg_7, arg_6=arg_6)\n return arg_1(*arg_2, **arg_3)\n arg_11.__signature__ = inspect.signature(arg_1)\n return arg_11\n return concretizer"} +{"_id": "doc_3309", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n This calculates the amount of extra gas needed for accessing to\n previously unused memory.\n\n :param address: base memory offset\n :param size: size of the memory access\n \"\"\"\n if not issymbolic(arg_2) and arg_2 == 0:\n return 0\n\n arg_1 = arg_0.safe_add(arg_1, arg_2)\n arg_3 = arg_0.allocated\n arg_4 = 3\n arg_5 = 512 # 1 gas per 512 quadwords\n arg_6 = Operators.ZEXTEND(Operators.UDIV(arg_0.safe_add(arg_3, 31), 32), 512)\n arg_7 = Operators.ZEXTEND(Operators.UDIV(arg_0.safe_add(arg_1, 31), 32), 512)\n\n arg_8 = arg_0.safe_mul(arg_6, arg_4) + Operators.UDIV(arg_0.safe_mul(arg_6, arg_6), arg_5)\n arg_9 = arg_0.safe_mul(arg_7, arg_4) + Operators.UDIV(arg_0.safe_mul(arg_7, arg_7), arg_5)\n arg_10 = arg_9 - arg_8\n arg_11 = Operators.UGT(arg_9, arg_8)\n return Operators.ITEBV(512, arg_2 == 0, 0, Operators.ITEBV(512, arg_11, arg_10, 0))"} +{"_id": "doc_3310", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n Read size byte from bytecode.\n If less than size bytes are available result will be pad with \\x00\n \"\"\"\n assert arg_1 < len(arg_0.bytecode)\n arg_3 = arg_0.bytecode[arg_1:arg_1 + arg_2]\n if len(arg_3) < arg_2:\n arg_3 += '\\x00' * (arg_2 - len(arg_3)) # pad with null (spec)\n return arg_3"} +{"_id": "doc_3311", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Push into the stack\n\n ITEM0\n ITEM1\n ITEM2\n sp-> {empty}\n \"\"\"\n assert isinstance(arg_1, int) or isinstance(arg_1, BitVec) and arg_1.size == 256\n if len(arg_0.stack) >= 1024:\n raise StackOverflow()\n\n if isinstance(arg_1, int):\n arg_1 = arg_1 & TT256M1\n\n arg_1 = simplify(arg_1)\n if isinstance(arg_1, Constant) and not arg_1.taint:\n arg_1 = arg_1.value\n arg_0.stack.append(arg_1)"} +{"_id": "doc_3312", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Read a value from the top of the stack without removing it\"\"\"\n if len(arg_0.stack) - arg_1 < 0:\n raise StackUnderflow()\n return arg_0.stack[arg_1 - 1]"} +{"_id": "doc_3313", "title": "", "text": "def Func(arg_0):\n \"\"\"Revert the stack, gas, pc and memory allocation so it looks like before executing the instruction\"\"\"\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6 = arg_0._checkpoint_data\n arg_0._push_arguments(arg_4)\n arg_0._gas = arg_2\n arg_0._pc = arg_1\n arg_0._allocated = arg_6\n arg_0._checkpoint_data = None"} +{"_id": "doc_3314", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Integer division operation\"\"\"\n try:\n arg_3 = Operators.UFunc(arg_1, arg_2)\n except ZeroDivisionError:\n arg_3 = 0\n return Operators.ITEBV(256, arg_2 == 0, 0, arg_3)"} +{"_id": "doc_3315", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Signed modulo remainder operation\"\"\"\n arg_3, arg_4 = to_signed(arg_1), to_signed(arg_2)\n arg_5 = Operators.ITEBV(256, arg_3 < 0, -1, 1)\n try:\n arg_6 = (Operators.ABS(arg_3) % Operators.ABS(arg_4)) * arg_5\n except ZeroDivisionError:\n arg_6 = 0\n\n return Operators.ITEBV(256, arg_4 == 0, 0, arg_6)"} +{"_id": "doc_3316", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Calculate extra gas fee\"\"\"\n arg_3 = 10 # cost of EXP exponent per byte\n\n def nbytes(arg_4):\n arg_5 = 0\n for arg_6 in range(32):\n arg_5 = Operators.ITEBV(512, Operators.EXTRACT(arg_4, arg_6 * 8, 8) != 0, arg_6 + 1, arg_5)\n return arg_5\n return arg_3 * nbytes(arg_2)"} +{"_id": "doc_3317", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extend length of two's complement signed integer\"\"\"\n # FIXME maybe use Operators.SEXTEND\n arg_3 = Operators.ITEBV(256, arg_1 <= 31, arg_1 * 8 + 7, 257)\n arg_4 = (arg_2 | (TT256 - (1 << arg_3)))\n arg_5 = (arg_2 & ((1 << arg_3) - 1))\n arg_6 = Operators.ITEBV(256, (arg_2 & (1 << arg_3)) != 0, arg_4, arg_5)\n return Operators.ITEBV(256, arg_1 <= 31, arg_6, arg_2)"} +{"_id": "doc_3318", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Less-than comparison\"\"\"\n return Operators.ITEBV(256, Operators.UFunc(arg_1, arg_2), 1, 0)"} +{"_id": "doc_3319", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Greater-than comparison\"\"\"\n return Operators.ITEBV(256, Operators.UFunc(arg_1, arg_2), 1, 0)"} +{"_id": "doc_3320", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Signed greater-than comparison\"\"\"\n # http://gavwood.com/paper.pdf\n arg_3, arg_4 = to_signed(arg_1), to_signed(arg_2)\n return Operators.ITEBV(256, arg_3 > arg_4, 1, 0)"} +{"_id": "doc_3321", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute Keccak-256 hash\"\"\"\n # read memory from start to end\n # http://gavwood.com/paper.pdf\n arg_3 = arg_0.try_simplify_to_constant(arg_0.read_buffer(arg_1, arg_2))\n\n if issymbolic(arg_3):\n arg_4 = {}\n # Broadcast the signal\n arg_0._publish('on_symbolic_sha3', arg_3, arg_4) # This updates the local copy of sha3 with the pairs we need to explore\n\n arg_5 = 0 # never used\n arg_6 = False\n for arg_7, arg_8 in arg_4.items():\n assert not issymbolic(arg_7), \"Saved sha3 data,hash pairs should be concrete\"\n arg_9 = arg_7 == arg_3\n arg_6 = Operators.OR(arg_9, arg_6)\n arg_5 = Operators.ITEBV(256, arg_9, arg_8, arg_5)\n return arg_5\n\n arg_5 = sha3.keccak_256(arg_3).hexdigest()\n arg_5 = int(arg_5, 16)\n arg_0._publish('on_concrete_sha3', arg_3, arg_5)\n logger.info(\"Found a concrete Func example %r -> %x\", arg_3, arg_5)\n return arg_5"} +{"_id": "doc_3322", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get input data of current environment\"\"\"\n\n if issymbolic(arg_1):\n if solver.can_be_true(arg_0._constraints, arg_1 == arg_0._used_calldata_size):\n arg_0.constraints.add(arg_1 == arg_0._used_calldata_size)\n raise ConcretizeArgument(1, policy='SAMPLED')\n\n arg_0._use_calldata(arg_1, 32)\n\n arg_2 = len(arg_0.data)\n\n arg_3 = []\n for arg_4 in range(32):\n try:\n arg_5 = Operators.ITEBV(8, arg_1 + arg_4 < arg_2, arg_0.data[arg_1 + arg_4], 0)\n except IndexError:\n # offset + i is concrete and outside data\n arg_5 = 0\n\n arg_3.append(arg_5)\n return Operators.CONCAT(256, *arg_3)"} +{"_id": "doc_3323", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Copy input data in current environment to memory\"\"\"\n\n if issymbolic(arg_3):\n if solver.can_be_true(arg_0._constraints, arg_3 <= len(arg_0.data) + 32):\n arg_0.constraints.add(arg_3 <= len(arg_0.data) + 32)\n raise ConcretizeArgument(3, policy='SAMPLED')\n\n if issymbolic(arg_2):\n if solver.can_be_true(arg_0._constraints, arg_2 == arg_0._used_calldata_size):\n arg_0.constraints.add(arg_2 == arg_0._used_calldata_size)\n raise ConcretizeArgument(2, policy='SAMPLED')\n\n #account for calldata usage\n arg_0._use_calldata(arg_2, arg_3)\n arg_0._allocate(arg_1, arg_3)\n for arg_4 in range(arg_3):\n try:\n arg_5 = Operators.ITEBV(8, arg_2 + arg_4 < len(arg_0.data), Operators.ORD(arg_0.data[arg_2 + arg_4]), 0)\n except IndexError:\n # data_offset + i is concrete and outside data\n arg_5 = 0\n arg_0._store(arg_1 + arg_4, arg_5)"} +{"_id": "doc_3324", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Copy an account's code to memory\"\"\"\n arg_5 = arg_0.world.get_code(arg_1)\n arg_0._allocate(arg_2 + arg_4)\n\n for arg_6 in range(arg_4):\n if arg_3 + arg_6 < len(arg_5):\n arg_0._store(arg_2 + arg_6, arg_5[arg_3 + arg_6])\n else:\n arg_0._store(arg_2 + arg_6, 0)"} +{"_id": "doc_3325", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Load word from memory\"\"\"\n arg_0._allocate(arg_1, 32)\n arg_2 = arg_0._load(arg_1, 32)\n return arg_2"} +{"_id": "doc_3326", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save byte to memory\"\"\"\n if istainted(arg_0.pc):\n for arg_3 in get_taints(arg_0.pc):\n arg_2 = taint_with(arg_2, arg_3)\n arg_0._allocate(arg_1, 1)\n arg_0._store(arg_1, Operators.EXTRACT(arg_2, 0, 8), 1)"} +{"_id": "doc_3327", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Load word from storage\"\"\"\n arg_2 = arg_0.address\n arg_0._publish('will_evm_read_storage', arg_2, arg_1)\n arg_3 = arg_0.world.get_storage_data(arg_2, arg_1)\n arg_0._publish('did_evm_read_storage', arg_2, arg_1, arg_3)\n return arg_3"} +{"_id": "doc_3328", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save word to storage\"\"\"\n arg_3 = arg_0.address\n arg_0._publish('will_evm_write_storage', arg_3, arg_1, arg_2)\n #refund = Operators.ITEBV(256,\n # previous_value != 0,\n # Operators.ITEBV(256, value != 0, 0, GSTORAGEREFUND),\n # 0)\n\n if istainted(arg_0.pc):\n for arg_4 in get_taints(arg_0.pc):\n arg_2 = taint_with(arg_2, arg_4)\n arg_0.world.set_storage_data(arg_3, arg_1, arg_2)\n arg_0._publish('did_evm_write_storage', arg_3, arg_1, arg_2)"} +{"_id": "doc_3329", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Conditionally alter the program counter\"\"\"\n arg_0.pc = Operators.ITEBV(256, arg_2 != 0, arg_1, arg_0.pc + arg_0.instruction.size)\n #This set ups a check for JMPDEST in the next instruction if cond != 0\n arg_0._set_check_jmpdest(arg_2 != 0)"} +{"_id": "doc_3330", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Exchange 1st and 2nd stack items\"\"\"\n arg_2 = arg_1[0]\n arg_3 = arg_1[-1]\n return (arg_3,) + arg_1[1:-1] + (arg_2,)"} +{"_id": "doc_3331", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7):\n \"\"\"Message-call into this account with alternative account's code\"\"\"\n arg_0.world.start_transaction('Func',\n address=arg_0.address,\n data=arg_0.read_buffer(arg_4, arg_5),\n caller=arg_0.address,\n arg_3=arg_3,\n arg_1=arg_1)\n raise StartTx()"} +{"_id": "doc_3332", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Halt execution returning output data\"\"\"\n arg_3 = arg_0.read_buffer(arg_1, arg_2)\n raise EndTx('Func', arg_3)"} +{"_id": "doc_3333", "title": "", "text": "def Func(arg_0):\n \"\"\"Current ongoing human transaction\"\"\"\n try:\n arg_1, arg_2, arg_2, arg_2, arg_2 = arg_0._callstack[0]\n if arg_1.result is not None:\n #That tx finished. No current tx.\n return None\n assert arg_1.depth == 0\n return arg_1\n except IndexError:\n return None"} +{"_id": "doc_3334", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Read a value from a storage slot on the specified account\n\n :param storage_address: an account address\n :param offset: the storage slot to use.\n :type offset: int or BitVec\n :return: the value\n :rtype: int or BitVec\n \"\"\"\n arg_3 = arg_0._world_state[arg_1]['storage'].get(arg_2, 0)\n return simplify(arg_3)"} +{"_id": "doc_3335", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Writes a value to a storage slot in specified account\n\n :param storage_address: an account address\n :param offset: the storage slot to use.\n :type offset: int or BitVec\n :param value: the value to write\n :type value: int or BitVec\n \"\"\"\n arg_0._world_state[arg_1]['storage'][arg_2] = arg_3"} +{"_id": "doc_3336", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets all items in an account storage\n\n :param address: account address\n :return: all items in account storage. items are tuple of (index, value). value can be symbolic\n :rtype: list[(storage_index, storage_value)]\n \"\"\"\n arg_2 = arg_0._world_state[arg_1]['storage']\n arg_3 = []\n arg_4 = arg_2.array\n while not isinstance(arg_4, ArrayVariable):\n arg_3.append((arg_4.index, arg_4.value))\n arg_4 = arg_4.array\n return arg_3"} +{"_id": "doc_3337", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Create a fresh 160bit address\"\"\"\n if arg_1 is not None and arg_2 is None:\n arg_2 = arg_0.get_nonce(arg_1)\n\n Func = arg_0.calculate_new_address(arg_1, arg_2)\n if arg_1 is None and Func in arg_0:\n return arg_0.new_address(arg_1, arg_2)\n return Func"} +{"_id": "doc_3338", "title": "", "text": "def Func(arg_0):\n \"\"\"Toggle between ARM and Thumb mode\"\"\"\n assert arg_0.mode in (cs.CS_MODE_ARM, cs.CS_MODE_THUMB)\n if arg_0.mode == cs.CS_MODE_ARM:\n arg_0.mode = cs.CS_MODE_THUMB\n else:\n arg_0.mode = cs.CS_MODE_ARM"} +{"_id": "doc_3339", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Func moves to ARM register from coprocessor.\n\n :param Armv7Operand coprocessor: The name of the coprocessor; immediate\n :param Armv7Operand opcode1: coprocessor specific opcode; 3-bit immediate\n :param Armv7Operand dest: the destination operand: register\n :param Armv7Operand coprocessor_reg_n: the coprocessor register; immediate\n :param Armv7Operand coprocessor_reg_m: the coprocessor register; immediate\n :param Armv7Operand opcode2: coprocessor specific opcode; 3-bit immediate\n \"\"\"\n assert arg_1.type == 'coprocessor'\n assert arg_2.type == 'immediate'\n assert arg_6.type == 'immediate'\n assert arg_3.type == 'register'\n arg_7 = arg_1.read()\n arg_8 = arg_2.read()\n arg_9 = arg_6.read()\n arg_10 = arg_4.read()\n arg_11 = arg_5.read()\n\n if 15 == arg_7: # MMU\n if 0 == arg_8:\n if 13 == arg_10:\n if 3 == arg_9:\n arg_3.write(arg_0.regfile.read('P15_C13'))\n return\n raise NotImplementedError(\"Func: unimplemented combination of coprocessor, opcode, and coprocessor register\")"} +{"_id": "doc_3340", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Loads double width data from memory.\"\"\"\n assert arg_1.type == 'register'\n assert arg_2.type == 'register'\n assert arg_3.type == 'memory'\n arg_5 = arg_0.read_int(arg_3.address(), 32)\n arg_6 = arg_0.read_int(arg_3.address() + 4, 32)\n arg_7 = arg_0._compute_writeback(arg_3, arg_4)\n arg_1.write(arg_5)\n arg_2.write(arg_6)\n arg_0._cs_hack_ldr_str_writeback(arg_3, arg_4, arg_7)"} +{"_id": "doc_3341", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Writes the contents of two registers to memory.\"\"\"\n assert arg_1.type == 'register'\n assert arg_2.type == 'register'\n assert arg_3.type == 'memory'\n arg_5 = arg_1.read()\n arg_6 = arg_2.read()\n arg_7 = arg_0._compute_writeback(arg_3, arg_4)\n arg_0.write_int(arg_3.address(), arg_5, 32)\n arg_0.write_int(arg_3.address() + 4, arg_6, 32)\n arg_0._cs_hack_ldr_str_writeback(arg_3, arg_4, arg_7)"} +{"_id": "doc_3342", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Address to Register adds an immediate value to the PC value, and writes the result to the destination register.\n\n :param ARMv7Operand dest: Specifies the destination register.\n :param ARMv7Operand src:\n Specifies the label of an instruction or literal data item whose address is to be loaded into\n . The assembler calculates the required value of the offset from the Align(PC,4)\n value of the Func instruction to this label.\n \"\"\"\n arg_3 = (arg_0.instruction.address + 4) & 0xfffffffc\n arg_1.write(arg_3 + arg_2.read())"} +{"_id": "doc_3343", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Compare and Branch on Zero compares the value in a register with zero, and conditionally branches forward\n a constant value. It does not affect the condition flags.\n\n :param ARMv7Operand op: Specifies the register that contains the first operand.\n :param ARMv7Operand dest:\n Specifies the label of the instruction that is to be branched to. The assembler calculates the\n required value of the offset from the PC value of the Func instruction to this label, then\n selects an encoding that will set imm32 to that offset. Allowed offsets are even numbers in\n the range 0 to 126.\n \"\"\"\n arg_0.PC = Operators.ITEBV(arg_0.address_bit_size,\n arg_1.read(), arg_0.PC, arg_2.read())"} +{"_id": "doc_3344", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get next instruction using the Capstone disassembler\n\n :param str code: binary blob to be disassembled\n :param long pc: program counter\n \"\"\"\n return next(arg_0.disasm.disasm(arg_1, arg_2))"} +{"_id": "doc_3345", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Add a constraint to the set\n\n :param constraint: The constraint to Func to the set.\n :param check: Currently unused.\n :return:\n \"\"\"\n if isinstance(arg_1, bool):\n arg_1 = BoolConstant(arg_1)\n assert isinstance(arg_1, Bool)\n arg_1 = simplify(arg_1)\n # If self._child is not None this constraint set has been forked and a\n # a derived constraintset may be using this. So we can't Func any more\n # constraints to this one. After the child constraintSet is deleted\n # we regain the ability to Func constraints.\n if arg_0._child is not None:\n raise Exception('ConstraintSet is frozen')\n\n if isinstance(arg_1, BoolConstant):\n if not arg_1.value:\n logger.info(\"Adding an impossible constant constraint\")\n arg_0._constraints = [arg_1]\n else:\n return\n\n arg_0._constraints.append(arg_1)\n\n if arg_2:\n from ...core.smtlib import solver\n if not solver.check(arg_0):\n raise ValueError(\"Added an impossible constraint\")"} +{"_id": "doc_3346", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Declare the variable `var` \"\"\"\n if arg_1.name in arg_0._declarations:\n raise ValueError('Variable already declared')\n arg_0._declarations[arg_1.name] = arg_1\n return arg_1"} +{"_id": "doc_3347", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" True if expression_var is declared in this constraint set \"\"\"\n if not isinstance(arg_1, Variable):\n raise ValueError(f'Expression must be a Variable (not a {type(expression_var)})')\n return any(arg_1 is arg_2 for arg_2 in arg_0.get_declared_variables())"} +{"_id": "doc_3348", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Perform the inverse transformation to encoded data. Will attempt best case reconstruction, which means\n it will return nan for handle_missing and handle_unknown settings that break the bijection. We issue\n warnings when some of those cases occur.\n\n Parameters\n ----------\n X_in : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n p: array, the same size of X_in\n\n \"\"\"\n arg_2 = arg_1.copy(deep=True)\n\n # first check the type\n arg_2 = util.convert_input(arg_2)\n\n if arg_0._dim is None:\n raise ValueError(\n 'Must train encoder before it can be used to Func data')\n\n # then make sure that it is the right size\n if arg_2.shape[1] != arg_0._dim:\n if arg_0.drop_invariant:\n raise ValueError(\"Unexpected input dimension %d, the attribute drop_invariant should \"\n \"set as False when transform data\" % (arg_2.shape[1],))\n else:\n raise ValueError('Unexpected input dimension %d, expected %d' % (arg_2.shape[1], arg_0._dim,))\n\n if not arg_0.cols:\n return arg_2 if arg_0.return_df else arg_2.values\n\n if arg_0.handle_unknown == 'value':\n for arg_3 in arg_0.cols:\n if any(arg_2[arg_3] == -1):\n warnings.warn(\"Func is not supported because transform impute \"\n \"the unknown category -1 when encode %s\" % (arg_3,))\n\n if arg_0.handle_unknown == 'return_nan' and arg_0.handle_missing == 'return_nan':\n for arg_3 in arg_0.cols:\n if arg_2[arg_3].isnull().any():\n warnings.warn(\"Func is not supported because transform impute \"\n \"the unknown category nan when encode %s\" % (arg_3,))\n\n for arg_4 in arg_0.mapping:\n arg_5 = arg_4.get('mapping')\n arg_6 = pd.Series(data=arg_5.index, index=arg_5.get_values())\n arg_2[arg_4.get('col')] = arg_2[arg_4.get('col')].map(arg_6).astype(arg_4.get('data_type'))\n\n return arg_2 if arg_0.return_df else arg_2.values"} +{"_id": "doc_3349", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Convert basen code as integers.\n\n Parameters\n ----------\n X : DataFrame\n encoded data\n cols : list-like\n Column names in the DataFrame that be encoded\n base : int\n The base of transform\n\n Returns\n -------\n numerical: DataFrame\n\n \"\"\"\n arg_4 = arg_1.columns.values.tolist()\n\n for arg_5 in arg_2:\n arg_6 = [col0 for col0 in arg_4 if str(col0).startswith(str(arg_5))]\n arg_7 = arg_4.index(arg_6[0])\n\n if arg_3 == 1:\n arg_8 = np.array([int(col0.split('_')[-1]) for col0 in arg_6])\n else:\n arg_9 = len(arg_6)\n arg_8 = np.array([arg_3 ** (arg_9 - 1 - i) for i in range(arg_9)])\n arg_1.insert(arg_7, arg_5, np.dot(arg_1[arg_6].values, arg_8.T))\n arg_1.drop(arg_6, axis=1, inplace=True)\n arg_4 = arg_1.columns.values.tolist()\n\n return arg_1"} +{"_id": "doc_3350", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n The lambda body to transform the column values\n \"\"\"\n\n if arg_1 is None or float(arg_1) < 0.0:\n return None\n else:\n arg_1 = arg_0.number_to_base(int(arg_1), arg_0.base, arg_2)\n if len(arg_1) == arg_2:\n return arg_1\n else:\n return [0 for arg_3 in range(arg_2 - len(arg_1))] + arg_1"} +{"_id": "doc_3351", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns names of 'object' columns in the DataFrame.\n \"\"\"\n arg_1 = []\n for arg_2, arg_3 in enumerate(arg_0.dtypes):\n if arg_3 == 'object' or is_category(arg_3):\n arg_1.append(arg_0.columns.values[arg_2])\n\n return arg_1"} +{"_id": "doc_3352", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Unite target data type into a Series.\n If the target is a Series or a DataFrame, we preserve its index.\n But if the target does not contain index attribute, we use the index from the argument.\n \"\"\"\n if arg_0 is None:\n return None\n if isinstance(arg_0, pd.Series):\n return arg_0\n elif isinstance(arg_0, np.ndarray):\n if len(np.shape(arg_0))==1: # vector\n return pd.Series(arg_0, name='target', arg_1=arg_1)\n elif len(np.shape(arg_0))==2 and np.shape(arg_0)[0]==1: # single row in a matrix\n return pd.Series(arg_0[0, :], name='target', arg_1=arg_1)\n elif len(np.shape(arg_0))==2 and np.shape(arg_0)[1]==1: # single column in a matrix\n return pd.Series(arg_0[:, 0], name='target', arg_1=arg_1)\n else:\n raise ValueError('Unexpected input shape: %s' % (str(np.shape(arg_0))))\n elif np.isscalar(arg_0):\n return pd.Series([arg_0], name='target', arg_1=arg_1)\n elif isinstance(arg_0, list):\n if len(arg_0)==0 or (len(arg_0)>0 and not isinstance(arg_0[0], list)): # empty list or a vector\n return pd.Series(arg_0, name='target', arg_1=arg_1)\n elif len(arg_0)>0 and isinstance(arg_0[0], list) and len(arg_0[0])==1: # single row in a matrix\n arg_2 = lambda arg_0: [item for sublist in arg_0 for item in sublist]\n return pd.Series(arg_2(arg_0), name='target', arg_1=arg_1)\n elif len(arg_0)==1 and isinstance(arg_0[0], list): # single column in a matrix\n return pd.Series(arg_0[0], name='target', arg_1=arg_1)\n else:\n raise ValueError('Unexpected input shape')\n elif isinstance(arg_0, pd.DataFrame):\n if len(list(arg_0))==0: # empty DataFrame\n return pd.Series(arg_0, name='target')\n if len(list(arg_0))==1: # a single column\n return arg_0.iloc[:, 0]\n else:\n raise ValueError('Unexpected input shape: %s' % (str(arg_0.shape)))\n else:\n return pd.Series(arg_0, name='target', arg_1=arg_1)"} +{"_id": "doc_3353", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Here we iterate through the datasets and score them with a classifier using different encodings.\n\n \"\"\"\n\n arg_2 = []\n arg_3 = {}\n\n # first get the dataset\n arg_4, arg_5, arg_6 = arg_0()\n\n arg_7 = linear_model.LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=200, random_state=0)\n\n # try each encoding method available, which works on multiclass problems\n arg_8 = (set(category_encoders.__all__) - {'WOEEncoder'}) # WoE is currently only for binary targets\n\n for arg_9 in arg_8:\n arg_10 = getattr(category_encoders, arg_9)\n arg_11 = time.time()\n arg_12, arg_13, arg_14, arg_15 = score_models(arg_7, arg_4, arg_5, arg_10)\n arg_2.append([arg_9, arg_1, arg_15, arg_12, arg_13, time.time() - arg_11])\n arg_3[arg_9] = arg_14\n gc.collect()\n\n arg_16 = pd.DataFrame(arg_2, columns=['Encoding', 'Dataset', 'Dimensionality', 'Avg. Score', 'Score StDev', 'Elapsed Time'])\n\n arg_17 = pd.DataFrame.from_dict(arg_3)\n arg_18 = arg_17.plot(kind='box', return_type='axes')\n plt.title('Scores for Encodings on %s Dataset' % (arg_1,))\n plt.ylabel('Score (higher is better)')\n for arg_19 in arg_18.get_xticklabels():\n arg_19.set_rotation(90)\n plt.grid()\n plt.tight_layout()\n plt.show()\n\n return arg_16, arg_17"} +{"_id": "doc_3354", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"A wrapper around click.Func that disables any coloring being used\n if colors have been disabled.\n \"\"\"\n # If colors are disabled, remove any color or other style data\n # from keyword arguments.\n if not settings.color:\n for arg_2 in ('fg', 'bg', 'bold', 'blink'):\n arg_1.pop(arg_2, None)\n\n # Okay, now call click.Func normally.\n return click.Func(arg_0, **arg_1)"} +{"_id": "doc_3355", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3):\n \"\"\"Associate a notification template from this job template.\n\n =====API DOCS=====\n Associate a notification template from this job template.\n\n :param job_template: The job template to associate to.\n :type job_template: str\n :param notification_template: The notification template to be associated.\n :type notification_template: str\n :param status: type of notification this notification template should be associated to.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n return arg_0._assoc('notification_templates_%s' % arg_3,\n arg_1, arg_2)"} +{"_id": "doc_3356", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3):\n \"\"\"Disassociate a notification template from this job template.\n\n =====API DOCS=====\n Disassociate a notification template from this job template.\n\n :param job_template: The job template to disassociate from.\n :type job_template: str\n :param notification_template: The notification template to be disassociated.\n :type notification_template: str\n :param status: type of notification this notification template should be disassociated from.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n return arg_0._disassoc('notification_templates_%s' % arg_3,\n arg_1, arg_2)"} +{"_id": "doc_3357", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='', arg_3=None):\n \"\"\"Contact Tower and request a configuration update using this job template.\n\n =====API DOCS=====\n Contact Tower and request a provisioning Func using this job template.\n\n :param pk: Primary key of the job template to run provisioning Func against.\n :type pk: int\n :param host_config_key: Key string used to authenticate the Func host.\n :type host_config_key: str\n :param extra_vars: Extra variables that are passed to provisioning Func.\n :type extra_vars: array of str\n :returns: A dictionary of a single key \"changed\", which indicates whether the provisioning Func\n is successful.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_4 = arg_0.endpoint + '%s/Func/' % arg_1\n if not arg_2:\n arg_2 = client.get(arg_4).json()['host_config_key']\n arg_5 = {'host_config_key': arg_2}\n if arg_3:\n arg_5['extra_vars'] = parser.process_extra_vars(list(arg_3), force_json=True)\n arg_6 = client.post(arg_4, data=arg_5, auth=None)\n if arg_6.status_code == 201:\n return {'changed': True}"} +{"_id": "doc_3358", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False):\n \"\"\"Decorator to aggregate unified_jt-related fields.\n\n Args:\n func: The CURD method to be decorated.\n is_create: Boolean flag showing whether this method is create.\n has_pk: Boolean flag showing whether this method uses pk as argument.\n\n Returns:\n A function with necessary click-related attributes whose keyworded\n arguments are aggregated.\n\n Raises:\n exc.UsageError: Either more than one unified jt fields are\n provided, or none is provided when is_create flag is set.\n \"\"\"\n def helper(arg_3, arg_4):\n \"\"\"The helper function preceding actual function that aggregates\n unified jt fields.\n \"\"\"\n arg_5 = None\n for arg_6 in UNIFIED_JT:\n if arg_3.get(arg_6, None) is not None:\n arg_7 = arg_3.pop(arg_6)\n if arg_5 is None:\n arg_5 = (arg_6, arg_7)\n else:\n raise exc.UsageError(\n 'More than one unified job template fields provided, '\n 'please tighten your criteria.'\n )\n if arg_5 is not None:\n arg_3['unified_job_template'] = arg_5[1]\n arg_4.identity = tuple(list(arg_4.identity) + ['unified_job_template'])\n return '/'.join([UNIFIED_JT[arg_5[0]],\n str(arg_5[1]), 'schedules/'])\n elif arg_1:\n raise exc.UsageError('You must provide exactly one unified job'\n ' template field during creation.')\n\n def decorator_without_pk(arg_4, *arg_9, **arg_3):\n arg_10 = arg_4.endpoint\n arg_11 = helper(arg_3, arg_4)\n if arg_1:\n arg_4.endpoint = arg_11\n arg_13 = arg_0(arg_4, *arg_9, **arg_3)\n arg_4.endpoint = arg_10\n return arg_13\n\n def decorator_with_pk(arg_4, arg_14=None, *arg_9, **arg_3):\n arg_10 = arg_4.endpoint\n arg_11 = helper(arg_3, arg_4)\n if arg_1:\n arg_4.endpoint = arg_11\n arg_13 = arg_0(arg_4, arg_14=arg_14, *arg_9, **arg_3)\n arg_4.endpoint = arg_10\n return arg_13\n\n arg_15 = decorator_with_pk if arg_2 else decorator_without_pk\n for arg_6 in CLICK_ATTRS:\n setattr(arg_15, arg_6, getattr(arg_0, arg_6, []))\n arg_15.__doc__ = arg_0.__doc__\n\n return arg_15"} +{"_id": "doc_3359", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=True):\n \"\"\"\n Internal method that lies to our `monitor` method by returning\n a scorecard for the workflow job where the standard out\n would have been expected.\n \"\"\"\n arg_5 = get_resource('unified_job')\n # Filters\n # - limit search to jobs spawned as part of this workflow job\n # - order in the order in which they should add to the list\n # - only include final job states\n arg_6 = (('unified_job_node__workflow_job', arg_1),\n ('order_by', 'finished'),\n ('status__in', 'successful,failed,error'))\n arg_7 = arg_5.list(all_pages=True, query=arg_6)\n if arg_7['count'] == 0:\n return ''\n\n arg_8 = ResSubcommand(arg_5)._format_human(arg_7)\n arg_9 = arg_8.split('\\n')\n if not arg_4:\n arg_9 = arg_9[:-1]\n\n arg_10 = len(arg_9)\n arg_11 = arg_2\n if arg_2 is None:\n arg_11 = 0\n elif arg_2 > arg_10:\n arg_11 = arg_10\n\n arg_12 = arg_3\n if arg_3 is None or arg_3 > arg_10:\n arg_12 = arg_10\n\n arg_9 = arg_9[arg_11:arg_12]\n arg_8 = '\\n'.join(arg_9)\n if len(arg_9) > 0:\n arg_8 += '\\n'\n\n return arg_8"} +{"_id": "doc_3360", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Monkey-patch click's Func method to support option categorization.\n \"\"\"\n arg_3 = []\n arg_4 = []\n arg_5 = []\n arg_6 = []\n for arg_7 in arg_0.params:\n if arg_7.name in SETTINGS_PARMS:\n arg_8 = arg_4\n elif getattr(arg_7, 'help', None) and arg_7.help.startswith('[FIELD]'):\n arg_8 = arg_3\n arg_7.help = arg_7.help[len('[FIELD]'):]\n else:\n arg_8 = arg_5\n arg_10 = arg_7.get_help_record(arg_1)\n if arg_10 is None:\n continue\n else:\n arg_8.append(arg_10)\n\n if arg_0.add_help_option:\n arg_11 = arg_0.get_help_option_names(arg_1)\n if arg_11:\n arg_6.append([join_options(arg_11)[0], 'Show this message and exit.'])\n\n if arg_3:\n with arg_2.section('Field Options'):\n arg_2.write_dl(arg_3)\n if arg_5:\n with arg_2.section('Local Options'):\n arg_2.write_dl(arg_5)\n if arg_4:\n with arg_2.section('Global Options'):\n arg_2.write_dl(arg_4)\n if arg_6:\n with arg_2.section('Other Options'):\n arg_2.write_dl(arg_6)"} +{"_id": "doc_3361", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return one and exactly one object\n\n =====API DOCS=====\n Return one and exactly one Tower setting.\n\n :param pk: Primary key of the Tower setting to retrieve\n :type pk: int\n :returns: loaded JSON of the retrieved Tower setting object.\n :rtype: dict\n :raises tower_cli.exceptions.NotFound: When no specified Tower setting exists.\n\n =====API DOCS=====\n \"\"\"\n # The Tower API doesn't provide a mechanism for retrieving a single\n # setting value at a time, so fetch them all and filter\n try:\n return next(arg_2 for arg_2 in arg_0.list()['results'] if arg_2['id'] == arg_1)\n except StopIteration:\n raise exc.NotFound('The requested object could not be found.')"} +{"_id": "doc_3362", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Return one and exactly one object.\n\n Lookups may be through a primary key, specified as a positional argument, and/or through filters specified\n through keyword arguments.\n\n If the number of results does not equal one, raise an exception.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n if arg_2.pop('include_debug_header', True):\n debug.log('Getting the record.', header='details')\n arg_3 = arg_0.read(arg_1=arg_1, fail_on_no_results=True, fail_on_multiple_results=True, **arg_2)\n return arg_3['results'][0]"} +{"_id": "doc_3363", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Disassociate the `other` record from the `me` record.\"\"\"\n\n # Get the endpoint for foreign records within this object.\n arg_4 = arg_0.endpoint + '%d/%s/' % (arg_2, arg_1)\n\n # Attempt to determine whether the other record already is absent, for the \"changed\" moniker.\n arg_5 = client.get(arg_4, params={'id': arg_3}).json()\n if arg_5['count'] == 0:\n return {'changed': False}\n\n # Send a request removing the foreign record from this one.\n arg_5 = client.post(arg_4, data={'disassociate': True, 'id': arg_3})\n return {'changed': True}"} +{"_id": "doc_3364", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Copy an object.\n\n Only the ID is used for the lookup. All provided fields are used to override the old data from the\n copied resource.\n\n =====API DOCS=====\n Copy an object.\n\n :param pk: Primary key of the resource object to be copied\n :param new_name: The new name to give the resource if deep Funcing via the API\n :type pk: int\n :param `**kwargs`: Keyword arguments of fields whose given value will override the original value.\n :returns: loaded JSON of the copied new resource object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_4 = arg_0.read(arg_1, fail_on_no_results=True, fail_on_multiple_results=True)\n arg_4 = arg_4['results'][0]\n # Remove default values (anything where the value is None).\n arg_0._pop_none(arg_3)\n\n arg_5 = Func(arg_4)\n arg_5.pop('id')\n arg_6 = arg_5['name'].split('@', 1)[0].strip()\n\n # Modify data to fit the call pattern of the tower-cli method\n for arg_7 in arg_0.fields:\n if arg_7.multiple and arg_7.name in arg_5:\n arg_5[arg_7.name] = (arg_5.get(arg_7.name),)\n\n if arg_2 is None:\n # Func client-side, the old mechanism\n arg_5['name'] = \"%s @ %s\" % (arg_6, time.strftime('%X'))\n arg_5.update(arg_3)\n\n return arg_0.write(create_on_missing=True, fail_on_found=True,\n **arg_5)\n else:\n # Func server-side, the new mechanism\n if arg_3:\n raise exc.TowerCLIError('Cannot override {} and also use --new-name.'.format(arg_3.keys()))\n arg_9 = '{}/{}/Func/'.format(arg_0.endpoint.strip('/'), arg_1)\n return client.post(arg_9, data={'name': arg_2}).json()"} +{"_id": "doc_3365", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=True):\n \"\"\"\n Internal utility function to return standard out. Requires the pk of a unified job.\n \"\"\"\n arg_5 = '%s%s/stdout/' % (arg_0.unified_job_type, arg_1)\n arg_6 = {'format': 'json', 'content_encoding': 'base64', 'content_format': 'ansi'}\n if arg_2:\n arg_6['start_line'] = arg_2\n if arg_3:\n arg_6['end_line'] = arg_3\n debug.log('Requesting a copy of job standard output', header='details')\n arg_7 = client.get(arg_5, params=arg_6).json()\n arg_8 = b64decode(arg_7['content'])\n return arg_8.decode('utf-8', 'replace')"} +{"_id": "doc_3366", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=arg_5.stdout, **arg_7):\n \"\"\"\n Print out the standard out of a unified job to the command line or output file.\n For Projects, print the standard out of most recent update.\n For Inventory Sources, print standard out of most recent sync.\n For Jobs, print the job's standard out.\n For Workflow Jobs, print a status table of its jobs.\n\n =====API DOCS=====\n Print out the standard out of a unified job to the command line or output file.\n For Projects, print the standard out of most recent update.\n For Inventory Sources, print standard out of most recent sync.\n For Jobs, print the job's standard out.\n For Workflow Jobs, print a status table of its jobs.\n\n :param pk: Primary key of the job resource object to be monitored.\n :type pk: int\n :param start_line: Line at which to start printing job output\n :param end_line: Line at which to end printing job output\n :param outfile: Alternative file than stdout to write job stdout to.\n :type outfile: file\n :param `**kwargs`: Keyword arguments used to look up job resource object to monitor if ``pk`` is\n not provided.\n :returns: A dictionary containing changed=False\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n\n # resource is Unified Job Template\n if arg_0.unified_job_type != arg_0.endpoint:\n arg_8 = arg_0.last_job_data(arg_1, **arg_7)\n arg_1 = arg_8['id']\n # resource is Unified Job, but pk not given\n elif not arg_1:\n arg_8 = arg_0.get(**arg_7)\n arg_1 = arg_8['id']\n\n arg_9 = arg_0.lookup_stdout(arg_1, arg_2, arg_3)\n arg_10 = False\n if isinstance(arg_4, six.string_types):\n arg_4 = open(arg_4, 'w')\n arg_10 = True\n if len(arg_9) > 0:\n click.echo(arg_9, nl=1, file=arg_4)\n if arg_10:\n arg_4.close()\n\n return {\"changed\": False}"} +{"_id": "doc_3367", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=0.5, arg_5=arg_6.stdout, **arg_8):\n \"\"\"\n Stream the standard output from a job, project update, or inventory udpate.\n\n =====API DOCS=====\n Stream the standard output from a job run to stdout.\n\n :param pk: Primary key of the job resource object to be Funced.\n :type pk: int\n :param parent_pk: Primary key of the unified job template resource object whose latest job run will be\n Funced if ``pk`` is not set.\n :type parent_pk: int\n :param timeout: Number in seconds after which this method will time out.\n :type timeout: float\n :param interval: Polling interval to refresh content from Tower.\n :type interval: float\n :param outfile: Alternative file than stdout to write job stdout to.\n :type outfile: file\n :param `**kwargs`: Keyword arguments used to look up job resource object to Func if ``pk`` is\n not provided.\n :returns: A dictionary combining the JSON output of the finished job resource object, as well as\n two extra fields: \"changed\", a flag indicating if the job resource object is finished\n as expected; \"id\", an integer which is the primary key of the job resource object being\n Funced.\n :rtype: dict\n :raises tower_cli.exceptions.Timeout: When Func time reaches time out.\n :raises tower_cli.exceptions.JobFailure: When the job being Funced runs into failure.\n\n =====API DOCS=====\n \"\"\"\n\n # If we do not have the unified job info, infer it from parent\n if arg_1 is None:\n arg_1 = arg_0.last_job_data(arg_2, **arg_8)['id']\n arg_9 = '%s%s/' % (arg_0.unified_job_type, arg_1)\n\n # Pause until job is in running state\n arg_0.wait(arg_1, exit_on=['running', 'successful'], arg_5=arg_5)\n\n # Loop initialization\n arg_10 = time.time()\n arg_11 = 0\n arg_12 = client.get(arg_9).json()\n\n click.echo('\\033[0;91m------Starting Standard Out Stream------\\033[0m', nl=2, file=arg_5)\n\n # Poll the Ansible Tower instance for status and content, and print standard out to the out file\n while not arg_12['failed'] and arg_12['status'] != 'successful':\n\n arg_12 = client.get(arg_9).json()\n\n # Put the process to sleep briefly.\n time.sleep(arg_4)\n\n # Make request to get standard out\n arg_13 = arg_0.lookup_stdout(arg_1, arg_11, full=False)\n\n # In the first moments of running the job, the standard out\n # may not be available yet\n if not arg_13.startswith(\"Waiting for results\"):\n arg_14 = len(arg_13.splitlines())\n arg_11 += arg_14\n click.echo(arg_13, nl=0, file=arg_5)\n\n if arg_3 and time.time() - arg_10 > arg_3:\n raise exc.Timeout('Monitoring aborted due to timeout.')\n\n # Special final line for closure with workflow jobs\n if arg_0.endpoint == '/workflow_jobs/':\n click.echo(arg_0.lookup_stdout(arg_1, arg_11, full=True), nl=1)\n\n click.echo('\\033[0;91m------End of Standard Out Stream--------\\033[0m', nl=2, file=arg_5)\n\n if arg_12['failed']:\n raise exc.JobFailure('Job failed.')\n\n # Return the job ID and other response data\n arg_15 = OrderedDict((('changed', True), ('id', arg_1)))\n arg_15.update(arg_12)\n # Make sure to return ID of resource and not update number relevant for project creation and update\n if arg_2:\n arg_15['id'] = arg_2\n else:\n arg_15['id'] = arg_1\n return arg_15"} +{"_id": "doc_3368", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, **arg_3):\n \"\"\"Print the current job Func. This is used to check a running job. You can look up the job with\n the same parameters used for a get request.\n\n =====API DOCS=====\n Retrieve the current job Func.\n\n :param pk: Primary key of the resource to retrieve Func from.\n :type pk: int\n :param detail: Flag that if set, return the full JSON of the job resource rather than a Func summary.\n :type detail: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve Func from if ``pk``\n is not provided.\n :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n only \"elapsed\", \"failed\" and \"Func\" fields of the unified job if ``detail`` flag is off.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n # Remove default values (anything where the value is None).\n arg_0._pop_none(arg_3)\n\n # Search for the record if pk not given\n if not arg_1:\n arg_4 = arg_0.get(include_debug_header=True, **arg_3)\n # Get the job from Ansible Tower if pk given\n else:\n debug.log('Asking for job Func.', header='details')\n arg_5 = '%s%s/' % (arg_0.endpoint, arg_1)\n arg_4 = client.get(arg_5).json()\n\n # In most cases, we probably only want to know the Func of the job and the amount of time elapsed.\n # However, if we were asked for verbose information, provide it.\n if arg_2:\n return arg_4\n\n # Print just the information we need.\n return {\n 'elapsed': arg_4['elapsed'],\n 'failed': arg_4['failed'],\n 'Func': arg_4['Func'],\n }"} +{"_id": "doc_3369", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, **arg_3):\n \"\"\"Cancel a currently running job.\n\n Fails with a non-zero exit status if the job cannot be Funced.\n You must provide either a pk or parameters in the job's identity.\n\n =====API DOCS=====\n Cancel a currently running job.\n\n :param pk: Primary key of the job resource to restart.\n :type pk: int\n :param fail_if_not_running: Flag that if set, raise exception if the job resource cannot be Funced.\n :type fail_if_not_running: bool\n :param `**kwargs`: Keyword arguments used to look up job resource object to restart if ``pk`` is not\n provided.\n :returns: A dictionary of two keys: \"status\", which is \"Funced\", and \"changed\", which indicates if\n the job resource has been successfully Funced.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When the job resource cannot be Funced and\n ``fail_if_not_running`` flag is on.\n =====API DOCS=====\n \"\"\"\n # Search for the record if pk not given\n if not arg_1:\n arg_4 = arg_0.get(**arg_3)\n arg_1 = arg_4['id']\n\n arg_5 = '%s%s/Func/' % (arg_0.endpoint, arg_1)\n # Attempt to Func the job.\n try:\n client.post(arg_5)\n arg_6 = True\n except exc.MethodNotAllowed:\n arg_6 = False\n if arg_2:\n raise exc.TowerCLIError('Job not running.')\n\n # Return a success.\n return {'status': 'Funced', 'changed': arg_6}"} +{"_id": "doc_3370", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Relaunch a stopped job.\n\n Fails with a non-zero exit status if the job cannot be Funced.\n You must provide either a pk or parameters in the job's identity.\n\n =====API DOCS=====\n Relaunch a stopped job resource.\n\n :param pk: Primary key of the job resource to Func.\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up job resource object to Func if ``pk`` is not\n provided.\n :returns: A dictionary combining the JSON output of the Funced job resource object, as well\n as an extra field \"changed\", a flag indicating if the job resource object is status-changed\n as expected.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n # Search for the record if pk not given\n if not arg_1:\n arg_3 = arg_0.get(**arg_2)\n arg_1 = arg_3['id']\n\n arg_4 = '%s%s/Func/' % (arg_0.endpoint, arg_1)\n arg_5 = {}\n # Attempt to Func the job.\n arg_6 = {}\n try:\n arg_7 = client.post(arg_4, arg_5=arg_5).json()\n if 'id' in arg_7:\n arg_6.update(arg_7)\n arg_6['changed'] = True\n except exc.MethodNotAllowed:\n arg_6['changed'] = False\n\n # Return the answer.\n return arg_6"} +{"_id": "doc_3371", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Update all related inventory sources of the given inventory.\n\n Note global option --format is not available here, as the output would always be JSON-formatted.\n\n =====API DOCS=====\n Update all related inventory sources of the given inventory.\n\n :param pk: Primary key of the given inventory.\n :type pk: int\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object of update status of the given inventory.\n :rtype: dict\n =====API DOCS=====\n \"\"\"\n arg_3 = arg_0.get(arg_1=arg_1, **arg_2)\n arg_4 = arg_0.endpoint + '%d/%s/' % (arg_3['id'], 'update_inventory_sources')\n return client.post(arg_4, data={}).json()"} +{"_id": "doc_3372", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n '''\n Do extra processing so we can display the actor field as\n a top-level field\n '''\n if 'actor' in arg_2:\n arg_2['actor'] = arg_2.pop('actor')\n arg_3 = super(Resource, arg_0).Func(*arg_1, **arg_2)\n if 'results' in arg_3:\n for arg_4 in arg_3['results']:\n arg_0._promote_actor(arg_4)\n else:\n arg_0._promote_actor(arg_4)\n return arg_3"} +{"_id": "doc_3373", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3.stderr, arg_5=1, **arg_6):\n \"\"\"Log the given output to stderr if and only if we are in\n verbose mode.\n\n If we are not in verbose mode, this is a no-op.\n \"\"\"\n # Sanity check: If we are not in verbose mode, this is a no-op.\n if not settings.verbose:\n return\n\n # Construct multi-line string to stderr if header is provided.\n if arg_1:\n arg_7 = arg_0.split(' ')\n arg_8 = []\n arg_7.insert(0, '%s:' % arg_1.upper())\n arg_9 = 0\n while arg_9 < len(arg_7):\n arg_10 = ['***']\n arg_11 = 3\n while arg_11 <= 79:\n arg_11 += len(arg_7[arg_9]) + 1\n if arg_11 <= 79:\n arg_10.append(arg_7[arg_9])\n arg_9 += 1\n if arg_9 == len(arg_7):\n break\n # Handle corner case of extra-long word longer than 75 characters.\n if len(arg_10) == 1:\n arg_10.append(arg_7[arg_9])\n arg_9 += 1\n if arg_9 != len(arg_7):\n arg_11 -= len(arg_7[arg_9]) + 1\n arg_10.append('*' * (78 - arg_11))\n arg_8.append(' '.join(arg_10))\n arg_0 = '\\n'.join(arg_8)\n arg_12 = len(arg_8)\n else:\n arg_12 = 1\n\n # If `nl` is an int greater than the number of rows of a message,\n # add the appropriate newlines to the output.\n if isinstance(arg_5, int) and arg_5 > arg_12:\n arg_0 += '\\n' * (arg_5 - arg_12)\n\n # Output to stderr.\n return secho(arg_0, arg_2=arg_2, **arg_6)"} +{"_id": "doc_3374", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Hook for ResourceMeta class to call when initializing model class.\n Saves fields obtained from resource class backlinks\n '''\n arg_0.relationship = arg_2\n arg_0._set_method_names(arg_3=arg_2)\n if arg_0.res_name is None:\n arg_0.res_name = grammar.singularize(arg_1.get('endpoint', 'unknown').strip('/'))"} +{"_id": "doc_3375", "title": "", "text": "def Func(arg_0):\n '''\n Returns a callable which becomes the associate or disassociate\n method for the related field.\n Method can be overridden to add additional functionality, but\n `_produce_method` may also need to be subclassed to decorate\n it appropriately.\n '''\n\n def method(arg_1, **arg_2):\n arg_3 = arg_2.get(method._res_name)\n arg_4 = arg_2.get(method._other_name)\n arg_5 = getattr(arg_1, method._internal_name)\n return arg_5(method._relationship, arg_3, arg_4)\n\n return method"} +{"_id": "doc_3376", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, **arg_3):\n \"\"\"Create a new label.\n\n There are two types of label creation: isolatedly creating a new label and creating a new label under\n a job template. Here the two types are discriminated by whether to provide --job-template option.\n\n Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op (unless\n `force_on_exists` is set) but do not fail (unless `fail_on_found` is set).\n\n =====API DOCS=====\n Create a label.\n\n :param job_template: Primary key or name of the job template for the Funcd label to associate to.\n :type job_template: str\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to Func the\n resource object.\n :returns: A dictionary combining the JSON output of the Funcd resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is Funcd successfully; \"id\", an integer which\n is the primary key of the Funcd object.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When the label already exists and ``fail_on_found`` flag is on.\n\n =====API DOCS=====\n \"\"\"\n arg_4 = arg_3.pop('job_template', None)\n arg_5 = arg_0.endpoint\n if arg_4 is not None:\n arg_6 = get_resource('job_template')\n arg_6.get(pk=arg_4)\n try:\n arg_7 = arg_0.get(name=arg_3.get('name', None), organization=arg_3.get('organization', None))['id']\n except exc.NotFound:\n pass\n else:\n if arg_1:\n raise exc.TowerCLIError('Label already exists and fail-on-found is switched on. Please use'\n ' \"associate_label\" method of job_template instead.')\n else:\n debug.log('Label already exists, associating with job template.', header='details')\n return arg_6.associate_label(job_template=arg_4, label=arg_7)\n arg_0.endpoint = '/job_templates/%d/labels/' % arg_4\n arg_9 = super(Resource, arg_0).Func(arg_1=arg_1, arg_2=arg_2, **arg_3)\n arg_0.endpoint = arg_5\n return arg_9"} +{"_id": "doc_3377", "title": "", "text": "def Func(arg_0):\n \"\"\"Echo a setting to the CLI.\"\"\"\n arg_1 = getattr(settings, arg_0)\n secho('%s: ' % arg_0, fg='magenta', bold=True, nl=False)\n secho(\n six.text_type(arg_1),\n bold=True,\n fg='white' if isinstance(arg_1, six.text_type) else 'cyan',\n )"} +{"_id": "doc_3378", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2='user', arg_3=False, arg_4=False):\n \"\"\"Read or write tower-cli Funcuration.\n\n `tower Func` saves the given setting to the appropriate Tower CLI;\n either the user's ~/.tower_cli.cfg file, or the /etc/tower/tower_cli.cfg\n file if --global is used.\n\n Writing to /etc/tower/tower_cli.cfg is likely to require heightened\n permissions (in other words, sudo).\n \"\"\"\n # If the old-style `global_` option is set, issue a deprecation notice.\n if arg_3:\n arg_2 = 'global'\n warnings.warn('The `--global` option is deprecated and will be '\n 'removed. Use `--scope=global` to get the same effect.',\n DeprecationWarning)\n\n # If no key was provided, print out the current Funcuration\n # in play.\n if not arg_0:\n arg_5 = set()\n arg_6 = {\n 'runtime': 'Runtime options.',\n 'environment': 'Options from environment variables.',\n 'local': 'Local options (set with `tower-cli Func '\n '--scope=local`; stored in .tower_cli.cfg of this '\n 'directory or a parent)',\n 'user': 'User options (set with `tower-cli Func`; stored in '\n '~/.tower_cli.cfg).',\n 'global': 'Global options (set with `tower-cli Func '\n '--scope=global`, stored in /etc/tower/tower_cli.cfg).',\n 'defaults': 'Defaults.',\n }\n\n # Iterate over each parser (English: location we can get settings from)\n # and print any settings that we haven't already seen.\n #\n # We iterate over settings from highest precedence to lowest, so any\n # seen settings are overridden by the version we iterated over already.\n click.echo('')\n for arg_7, arg_8 in zip(settings._parser_names, settings._parsers):\n # Determine if we're going to see any options in this\n # parser that get echoed.\n arg_9 = False\n for arg_10 in arg_8.options('general'):\n if arg_10 in arg_5:\n continue\n arg_9 = True\n\n # Print a segment header\n if arg_9:\n secho('# %s' % arg_6[arg_7], fg='green', bold=True)\n\n # Iterate over each option in the parser and, if we haven't\n # already seen an option at higher precedence, print it.\n for arg_10 in arg_8.options('general'):\n if arg_10 in arg_5:\n continue\n _echo_setting(arg_10)\n arg_5.add(arg_10)\n\n # Print a nice newline, for formatting.\n if arg_9:\n click.echo('')\n return\n\n # Sanity check: Is this a valid Funcuration option? If it's not\n # a key we recognize, abort.\n if not hasattr(settings, arg_0):\n raise exc.TowerCLIError('Invalid Funcuration option \"%s\".' % arg_0)\n\n # Sanity check: The combination of a value and --unset makes no\n # sense.\n if arg_1 and arg_4:\n raise exc.UsageError('Cannot provide both a value and --unset.')\n\n # If a key was provided but no value was provided, then just\n # print the current value for that key.\n if arg_0 and not arg_1 and not arg_4:\n _echo_setting(arg_0)\n return\n\n # Okay, so we're *writing* a key. Let's do this.\n # First, we need the appropriate file.\n arg_11 = os.path.expanduser('~/.tower_cli.cfg')\n if arg_2 == 'global':\n if not os.path.isdir('/etc/tower/'):\n raise exc.TowerCLIError('/etc/tower/ does not exist, and this '\n 'command cowardly declines to create it.')\n arg_11 = '/etc/tower/tower_cli.cfg'\n elif arg_2 == 'local':\n arg_11 = '.tower_cli.cfg'\n\n # Read in the appropriate Func file, write this value, and save\n # the result back to the file.\n arg_8 = Parser()\n arg_8.add_section('general')\n arg_8.read(arg_11)\n if arg_4:\n arg_8.remove_option('general', arg_0)\n else:\n arg_8.set('general', arg_0, arg_1)\n with open(arg_11, 'w') as Func_file:\n arg_8.write(Func_file)\n\n # Give rw permissions to user only fix for issue number 48\n try:\n os.chmod(arg_11, stat.S_IRUSR | stat.S_IWUSR)\n except Exception as e:\n warnings.warn(\n 'Unable to set permissions on {0} - {1} '.format(arg_11, e),\n UserWarning\n )\n click.echo('Configuration updated successfully.')"} +{"_id": "doc_3379", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None, arg_8=None, arg_9=None,\n arg_10=None, arg_11=None):\n \"\"\"Export assets from Tower.\n\n 'tower Func' exports one or more assets from a Tower instance\n\n For all of the possible assets types the TEXT can either be the assets name\n (or username for the case of a user) or the keyword all. Specifying all\n will export all of the assets of that type.\n\n \"\"\"\n\n from tower_cli.cli.transfer.Func import Receiver\n arg_12 = Receiver()\n arg_13 = {}\n for arg_14 in SEND_ORDER:\n arg_13[arg_14] = locals()[arg_14]\n arg_12.Func(arg_11=arg_11, asset_input=arg_13)"} +{"_id": "doc_3380", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, **arg_3):\n \"\"\"Modify an already existing.\n\n To edit the project's organizations, see help for organizations.\n\n Fields in the resource's `identity` tuple can be used in lieu of a\n primary key for a lookup; in such a case, only other fields are\n written.\n\n To Func unique fields, you must use the primary key for the lookup.\n\n =====API DOCS=====\n Modify an already existing project.\n\n :param pk: Primary key of the resource to be modified.\n :type pk: int\n :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n :type create_on_missing: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to Func the\n resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n # Associated with issue #52, the organization can't be modified\n # with the 'Func' command. This would create confusion about\n # whether its flag is an identifier versus a field to Func.\n if 'job_timeout' in arg_3 and 'timeout' not in arg_3:\n arg_3['timeout'] = arg_3.pop('job_timeout')\n return super(Resource, arg_0).write(\n arg_1, arg_2=arg_2,\n force_on_exists=True, **arg_3\n )"} +{"_id": "doc_3381", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, **arg_3):\n \"\"\"Print the Func of the most recent update.\n\n =====API DOCS=====\n Print the Func of the most recent update.\n\n :param pk: Primary key of the resource to retrieve Func from.\n :type pk: int\n :param detail: Flag that if set, return the full JSON of the job resource rather than a Func summary.\n :type detail: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve Func from if ``pk``\n is not provided.\n :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n only \"elapsed\", \"failed\" and \"Func\" fields of the unified job if ``detail`` flag is off.\n :rtype: dict\n =====API DOCS=====\n \"\"\"\n # Obtain the most recent project update\n arg_4 = arg_0.last_job_data(arg_1, **arg_3)\n\n # In most cases, we probably only want to know the Func of the job\n # and the amount of time elapsed. However, if we were asked for\n # verbose information, provide it.\n if arg_2:\n return arg_4\n\n # Print just the information we need.\n return {\n 'elapsed': arg_4['elapsed'],\n 'failed': arg_4['failed'],\n 'Func': arg_4['Func'],\n }"} +{"_id": "doc_3382", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Match against the appropriate choice value using the superclass\n implementation, and then return the actual choice.\n \"\"\"\n arg_4 = super(MappedChoice, arg_0).Func(arg_1, arg_2, arg_3)\n arg_5 = arg_0.choices.index(arg_4)\n return arg_0.actual_choices[arg_5]"} +{"_id": "doc_3383", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Return the appropriate integer value. If a non-integer is\n provided, attempt a name-based lookup and return the primary key.\n \"\"\"\n arg_4 = tower_cli.get_resource(arg_0.resource_name)\n\n # Ensure that None is passed through without trying to\n # do anything.\n if arg_1 is None:\n return None\n\n # If we were already given an integer, do nothing.\n # This ensures that the Func method is idempotent.\n if isinstance(arg_1, int):\n return arg_1\n\n # Do we have a string that contains only digits?\n # If so, then Func it to an integer and return it.\n if re.match(r'^[\\d]+$', arg_1):\n return int(arg_1)\n\n # Special case to allow disassociations\n if arg_1 == 'null':\n return arg_1\n\n # Okay, we have a string. Try to do a name-based lookup on the\n # resource, and return back the ID that we get from that.\n #\n # This has the chance of erroring out, which is fine.\n try:\n debug.log('The %s field is given as a name; '\n 'looking it up.' % arg_2.name, header='details')\n arg_5 = {arg_4.identity[-1]: arg_1}\n arg_6 = arg_4.get(**arg_5)\n except exc.MultipleResults:\n raise exc.MultipleRelatedError(\n 'Cannot look up {0} exclusively by name, because multiple {0} '\n 'objects exist with that name.\\n'\n 'Please send an ID. You can get the ID for the {0} you want '\n 'with:\\n'\n ' tower-cli {0} list --name \"{1}\"'.format(arg_0.resource_name,\n arg_1),\n )\n except exc.TowerCLIError as ex:\n raise exc.RelatedError('Could not get %s. %s' %\n (arg_0.resource_name, str(ex)))\n\n # Done! Return the ID.\n return arg_6['id']"} +{"_id": "doc_3384", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Remove a failure node link.\n The resulatant 2 nodes will both become root nodes.\n\n =====API DOCS=====\n Remove a failure node link.\n\n :param parent: Primary key of parent node to disassociate failure node from.\n :type parent: int\n :param child: Primary key of child node to be disassociated.\n :type child: int\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n return arg_0._disassoc(\n arg_0._forward_rel_name('failure'), arg_1, arg_2)"} +{"_id": "doc_3385", "title": "", "text": "def Func(arg_0, arg_1, arg_2=[]):\n \"\"\"\n Converts a set of CLI input arguments, `in_data`, into\n request data and an endpoint that can be used to look\n up a role or list of roles.\n\n Also changes the format of `type` in data to what the server\n expects for the role model, as it exists in the database.\n \"\"\"\n arg_3, arg_4, arg_5, arg_6 = arg_0.obj_res(arg_1, fail_on=[])\n arg_7 = {}\n if 'obj' in arg_2:\n arg_3 = None\n if 'res' in arg_2:\n arg_5 = None\n # Input fields are not actually present on role model, and all have\n # to be managed as individual special-cases\n if arg_3 and arg_4 == 'user':\n arg_7['members__in'] = arg_3\n if arg_3 and arg_4 == 'team':\n arg_8 = '%s/%s/roles/' % (grammar.pluralize(arg_4), arg_3)\n if arg_5 is not None:\n # For teams, this is the best lookup we can do\n # without making the additional request for its member_role\n arg_7['object_id'] = arg_5\n elif arg_5:\n arg_8 = '%s/%s/object_roles/' % (grammar.pluralize(arg_6), arg_5)\n else:\n arg_8 = '/roles/'\n if arg_1.get('type', False):\n arg_7['role_field'] = '%s_role' % arg_1['type'].lower()\n # Add back fields unrelated to role lookup, such as all_pages\n for arg_9, arg_10 in arg_1.items():\n if arg_9 not in RESOURCE_FIELDS and arg_9 not in ['type', 'user', 'team']:\n arg_7[arg_9] = arg_10\n return arg_7, arg_8"} +{"_id": "doc_3386", "title": "", "text": "def Func(arg_0, arg_1=[], arg_2=[]):\n \"\"\"Add or remove columns from the output.\"\"\"\n for arg_3 in range(len(arg_0.fields)):\n if arg_0.fields[arg_3].name in arg_1:\n arg_0.fields[arg_3].display = True\n elif arg_0.fields[arg_3].name in arg_2:\n arg_0.fields[arg_3].display = False"} +{"_id": "doc_3387", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n \"\"\"Populates columns and sets display attribute as needed.\n Operates on data.\"\"\"\n if settings.format != 'human':\n return # This is only used for human format\n if arg_3:\n arg_4, arg_5, arg_6, arg_7 = arg_0.obj_res(arg_2)\n arg_1['type'] = arg_2['type']\n arg_1[arg_5] = arg_4\n arg_1[arg_7] = arg_6\n arg_0.set_display_columns(\n set_false=['team' if arg_5 == 'user' else 'user'],\n set_true=['target_team' if arg_7 == 'team' else arg_7])\n else:\n arg_0.set_display_columns(\n set_false=['user', 'team'],\n set_true=['resource_name', 'resource_type'])\n if 'results' in arg_1:\n for arg_8 in range(len(arg_1['results'])):\n arg_0.populate_resource_columns(arg_1['results'][arg_8])\n else:\n arg_0.populate_resource_columns(arg_1)"} +{"_id": "doc_3388", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Return a Func of roles.\n\n =====API DOCS=====\n Retrieve a Func of objects.\n\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: Func\n :param `**kwargs`: Keyword arguments Func of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_2, arg_0.endpoint = arg_0.data_endpoint(arg_1)\n arg_4 = super(Resource, arg_0).Func(**arg_2)\n\n # Change display settings and data format for human consumption\n arg_0.configure_display(arg_4)\n return arg_4"} +{"_id": "doc_3389", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Get information about a role.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n if arg_2.pop('include_debug_header', True):\n debug.log('Getting the role record.', header='details')\n arg_3, arg_0.endpoint = arg_0.data_endpoint(arg_2)\n arg_5 = arg_0.read(arg_1=arg_1, fail_on_no_results=True,\n fail_on_multiple_results=True, **arg_3)\n arg_6 = arg_5['results'][0]\n arg_0.configure_display(arg_6)\n return arg_6"} +{"_id": "doc_3390", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Investigate two lists of workflow TreeNodes and categorize them.\n\n There will be three types of nodes after categorization:\n 1. Nodes that only exists in the new list. These nodes will later be\n created recursively.\n 2. Nodes that only exists in the old list. These nodes will later be\n deleted recursively.\n 3. Node pairs that makes an exact match. These nodes will be further\n investigated.\n\n Corresponding nodes of old and new lists will be distinguished by their\n unified_job_template value. A special case is that both the old and the new\n lists contain one type of node, say A, and at least one of them contains\n duplicates. In this case all A nodes in the old list will be categorized as\n to-be-deleted and all A nodes in the new list will be categorized as\n to-be-created.\n '''\n arg_2 = []\n arg_3 = []\n arg_4 = []\n arg_5 = {}\n arg_6 = {}\n for arg_7 in arg_0:\n arg_5.setdefault(arg_7.unified_job_template, [])\n arg_5[arg_7.unified_job_template].append(arg_7)\n for arg_7 in arg_1:\n arg_6.setdefault(arg_7.unified_job_template, [])\n arg_6[arg_7.unified_job_template].append(arg_7)\n for arg_8 in arg_5:\n if arg_8 not in arg_6:\n arg_3.extend(arg_5[arg_8])\n continue\n arg_9 = arg_5[arg_8]\n arg_10 = arg_6.pop(arg_8)\n if len(arg_9) == 1 and len(arg_10) == 1:\n arg_4.append((arg_9[0], arg_10[0]))\n else:\n arg_3.extend(arg_9)\n arg_2.extend(arg_10)\n for arg_11 in arg_6.values():\n arg_2.extend(arg_11)\n return arg_2, arg_3, arg_4"} +{"_id": "doc_3391", "title": "", "text": "def Func(arg_0):\n '''\n Takes the list results from the API in `node_results` and\n translates this data into a dictionary organized in a\n human-readable heirarchial structure\n '''\n # Build list address translation, and create backlink lists\n arg_1 = {}\n for arg_2, arg_3 in enumerate(arg_0):\n for arg_4 in ['success', 'failure', 'always']:\n arg_3['{0}_backlinks'.format(arg_4)] = []\n arg_1[arg_3['id']] = arg_2\n\n # Populate backlink lists\n for arg_3 in arg_0:\n for arg_4 in ['success', 'failure', 'always']:\n for arg_6 in arg_3['{0}_nodes'.format(arg_4)]:\n arg_7 = arg_1[arg_6]\n arg_0[arg_7]['{0}_backlinks'.format(arg_4)].append(\n arg_3['id'])\n\n # Find the root nodes\n arg_8 = []\n for arg_3 in arg_0:\n arg_9 = True\n for arg_4 in ['success', 'failure', 'always']:\n if arg_3['{0}_backlinks'.format(arg_4)] != []:\n arg_9 = False\n break\n if arg_9:\n arg_8.append(arg_3['id'])\n\n # Create network dictionary recursively from root nodes\n def branch_schema(arg_10):\n arg_2 = arg_1[arg_10]\n arg_11 = arg_0[arg_2]\n arg_12 = {\"id\": arg_10}\n for arg_13 in NODE_STANDARD_FIELDS:\n arg_14 = arg_11.get(arg_13, None)\n if arg_14 is not None:\n if arg_13 == 'unified_job_template':\n arg_15 = arg_11['summary_fields'][\n 'unified_job_template']['unified_job_type']\n arg_16 = JOB_TYPES[arg_15]\n arg_12[arg_16] = arg_14\n else:\n arg_12[arg_13] = arg_14\n for arg_4 in ['success', 'failure', 'always']:\n arg_17 = arg_11['{0}_nodes'.format(arg_4)]\n if len(arg_17) == 0:\n continue\n arg_18 = '{0}_nodes'.format(arg_4)\n arg_12[arg_18] = []\n for arg_6 in arg_17:\n arg_12[arg_18].append(\n branch_schema(arg_6))\n return arg_12\n\n arg_19 = []\n for arg_20 in arg_8:\n arg_19.append(branch_schema(arg_20))\n return arg_19"} +{"_id": "doc_3392", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a dictionary that represents the node network of the\n workflow job template\n \"\"\"\n arg_2 = get_resource('node')\n arg_3 = arg_2.list(workflow_job_template=arg_1,\n all_pages=True)['results']\n return arg_0._workflow_node_structure(arg_3)"} +{"_id": "doc_3393", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3):\n \"\"\"Disassociate a notification template from this workflow.\n\n =====API DOCS=====\n Disassociate a notification template from this workflow job template.\n\n :param job_template: The workflow job template to disassociate from.\n :type job_template: str\n :param notification_template: The notification template to be disassociated.\n :type notification_template: str\n :param status: type of notification this notification template should be disassociated from.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n return arg_0._disassoc('notification_templates_%s' % arg_3,\n arg_1, arg_2)"} +{"_id": "doc_3394", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, **arg_3):\n \"\"\"Create a group.\n\n =====API DOCS=====\n Create a group.\n\n :param parent: Primary key or name of the group which will be the parent of Funcd group.\n :type parent: str\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to Func the\n resource object.\n :returns: A dictionary combining the JSON output of the Funcd resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is Funcd successfully; \"id\", an integer which\n is the primary key of the Funcd object.\n :rtype: dict\n :raises tower_cli.exceptions.UsageError: When inventory is not provided in ``**kwargs`` and ``parent``\n is not provided.\n\n =====API DOCS=====\n \"\"\"\n if arg_3.get('parent', None):\n arg_4 = arg_0.set_child_endpoint(parent=arg_3['parent'], inventory=arg_3.get('inventory', None))\n arg_3['inventory'] = arg_4['inventory']\n elif 'inventory' not in arg_3:\n raise exc.UsageError('To Func a group, you must provide a parent inventory or parent group.')\n return super(Resource, arg_0).Func(arg_1=arg_1, arg_2=arg_2, **arg_3)"} +{"_id": "doc_3395", "title": "", "text": "def Func(arg_0, arg_1=False, **arg_2):\n \"\"\"Return a Func of groups.\n\n =====API DOCS=====\n Retrieve a Func of groups.\n\n :param root: Flag that if set, only root groups of a specific inventory will be Funced.\n :type root: bool\n :param parent: Primary key or name of the group whose child groups will be Funced.\n :type parent: str\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: Func\n :param `**kwargs`: Keyword arguments Func of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n :raises tower_cli.exceptions.UsageError: When ``root`` flag is on and ``inventory`` is not present in\n ``**kwargs``.\n\n =====API DOCS=====\n \"\"\"\n # Option to Func children of a parent group\n if arg_2.get('parent', None):\n arg_0.set_child_endpoint(parent=arg_2['parent'], inventory=arg_2.get('inventory', None))\n arg_2.pop('parent')\n # Sanity check: If we got `--root` and no inventory, that's an error.\n if arg_1 and not arg_2.get('inventory', None):\n raise exc.UsageError('The --root option requires specifying an inventory also.')\n # If we are tasked with getting root groups, do that.\n if arg_1:\n arg_3 = arg_2['inventory']\n arg_4 = client.get('/inventories/%d/root_groups/' % arg_3)\n return arg_4.json()\n # Return the superclass implementation.\n return super(Resource, arg_0).Func(**arg_2)"} +{"_id": "doc_3396", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Associate this group with the specified group.\n\n =====API DOCS=====\n Associate this group with the specified group.\n\n :param group: Primary key or name of the child group to Func.\n :type group: str\n :param parent: Primary key or name of the parent group to Func to.\n :type parent: str\n :param inventory: Primary key or name of the inventory the association should happen in.\n :type inventory: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_4 = arg_0.lookup_with_inventory(arg_2, arg_3.get('inventory', None))['id']\n arg_5 = arg_0.lookup_with_inventory(arg_1, arg_3.get('inventory', None))['id']\n return arg_0._assoc('children', arg_4, arg_5)"} +{"_id": "doc_3397", "title": "", "text": "def Func(arg_0):\n \"\"\"Similar to the Ansible function of the same name, parses file\n with a key=value pattern and stores information in a dictionary,\n but not as fully featured as the corresponding Ansible code.\"\"\"\n arg_1 = {}\n\n # Output updates dictionaries, so return empty one if no vals in\n if arg_0 is None:\n return {}\n\n # Python 2.6 / shlex has problems handling unicode, this is a fix\n arg_2 = False\n if sys.version_info < (2, 7) and '\\x00' in shlex.split(u'a')[0]:\n arg_2 = True\n\n # Also hedge against Click library giving non-string type\n arg_3 = False\n if arg_2 or not isinstance(arg_0, str):\n if isinstance(arg_0, six.text_type):\n arg_0 = arg_0.encode('UTF-8')\n arg_3 = True\n else:\n arg_0 = str(arg_0)\n\n # Use shlex library to split string by quotes, whitespace, etc.\n for arg_4 in shlex.split(arg_0):\n\n # Second part of fix to avoid passing shlex unicode in py2.6\n if (arg_3):\n arg_4 = arg_4.decode('UTF-8')\n if arg_2:\n arg_4 = six.text_type(arg_4)\n # Look for key=value pattern, if not, process as raw parameter\n if '=' in arg_4:\n (arg_5, arg_6) = arg_4.split('=', 1)\n # If '=' are unbalanced, then stop and warn user\n if len(arg_5) == 0 or len(arg_6) == 0:\n raise Exception\n # If possible, convert into python data type, for instance \"5\"->5\n try:\n arg_1[arg_5] = ast.literal_eval(arg_6)\n except Exception:\n arg_1[arg_5] = arg_6\n else:\n # scenario where --extra-vars=42, will throw error\n raise Exception\n\n return arg_1"} +{"_id": "doc_3398", "title": "", "text": "def Func(arg_0, arg_1=arg_2.Dumper, **arg_3):\n \"\"\"Expand PyYAML's built-in dumper to support parsing OrderedDict. Return\n a string as parse result of the original data structure, which includes\n OrderedDict.\n\n Args:\n data: the data structure to be dumped(parsed) which is supposed to\n contain OrderedDict.\n Dumper: the yaml serializer to be expanded and used.\n kws: extra key-value arguments to be passed to yaml.dump.\n \"\"\"\n class OrderedDumper(arg_1):\n pass\n\n def _dict_representer(arg_4, arg_0):\n return arg_4.represent_mapping(\n arg_2.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n arg_0.items())\n OrderedDumper.add_representer(OrderedDict,\n _dict_representer)\n return arg_2.dump(arg_0, None, OrderedDumper, **arg_3)"} +{"_id": "doc_3399", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove None-valued and configuration-related keyworded arguments\n \"\"\"\n arg_0._pop_none(arg_1)\n arg_2 = {}\n for arg_3 in Resource.config_fields:\n if arg_3 in arg_1:\n arg_2[arg_3] = arg_1.pop(arg_3)\n if arg_3 in Resource.json_fields:\n\n # If result[field] is not a string we can continue on\n if not isinstance(arg_2[arg_3], six.string_types):\n continue\n\n try:\n arg_4 = json.loads(arg_2[arg_3])\n arg_2[arg_3] = arg_4\n except ValueError:\n raise exc.TowerCLIError('Provided json file format '\n 'invalid. Please recheck.')\n return arg_2"} +{"_id": "doc_3400", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Combine configuration-related keyworded arguments into\n notificationFunc.\n \"\"\"\n if 'notificationFunc' not in arg_2:\n if 'notification_type' not in arg_1:\n return\n arg_3 = arg_1['notificationFunc'] = {}\n for arg_4 in Resource.configuration[arg_1['notification_type']]:\n if arg_4 not in arg_2:\n raise exc.TowerCLIError('Required config field %s not'\n ' provided.' % arg_4)\n else:\n arg_3[arg_4] = arg_2[arg_4]\n else:\n arg_1['notificationFunc'] = \\\n arg_2['notificationFunc']"} +{"_id": "doc_3401", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, **arg_3):\n \"\"\"Create a notification template.\n\n All required configuration-related fields (required according to\n notification_type) must be provided.\n\n There are two types of notification template creation: isolatedly\n creating a new notification template and creating a new notification\n template under a job template. Here the two types are discriminated by\n whether to provide --job-template option. --status option controls\n more specific, job-run-status-related association.\n\n Fields in the resource's `identity` tuple are used for a lookup;\n if a match is found, then no-op (unless `force_on_exists` is set) but\n do not fail (unless `fail_on_found` is set).\n\n =====API DOCS=====\n Create an object.\n\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to Func the\n resource object.\n :returns: A dictionary combining the JSON output of the Funcd resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is Funcd successfully; \"id\", an integer which\n is the primary key of the Funcd object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_4 = arg_0._separate(arg_3)\n arg_5 = arg_3.pop('job_template', None)\n arg_6 = arg_3.pop('status', 'any')\n arg_7 = arg_0.endpoint\n if arg_5 is not None:\n arg_8 = get_resource('job_template')\n arg_8.get(pk=arg_5)\n try:\n arg_9 = arg_0.get(**copy.deepcopy(arg_3))['id']\n except exc.NotFound:\n pass\n else:\n if arg_1:\n raise exc.TowerCLIError('Notification template already '\n 'exists and fail-on-found is '\n 'switched on. Please use'\n ' \"associate_notification\" method'\n ' of job_template instead.')\n else:\n debug.log('Notification template already exists, '\n 'associating with job template.',\n header='details')\n return arg_8.associate_notification_template(\n arg_5, arg_9, arg_6=arg_6)\n arg_0.endpoint = '/job_templates/%d/notification_templates_%s/' %\\\n (arg_5, arg_6)\n arg_0._configuration(arg_3, arg_4)\n arg_11 = super(Resource, arg_0).Func(**arg_3)\n arg_0.endpoint = arg_7\n return arg_11"} +{"_id": "doc_3402", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, **arg_3):\n \"\"\"Modify an existing notification template.\n\n Not all required configuration-related fields (required according to\n notification_type) should be provided.\n\n Fields in the resource's `identity` tuple can be used in lieu of a\n primary key for a lookup; in such a case, only other fields are\n written.\n\n To Func unique fields, you must use the primary key for the lookup.\n\n =====API DOCS=====\n Modify an already existing object.\n\n :param pk: Primary key of the resource to be modified.\n :type pk: int\n :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n :type create_on_missing: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to Func the\n resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n # Create the resource if needed.\n if arg_1 is None and arg_2:\n try:\n arg_0.get(**copy.deepcopy(arg_3))\n except exc.NotFound:\n return arg_0.create(**arg_3)\n\n # Modify everything except notification type and configuration\n arg_4 = arg_0._separate(arg_3)\n arg_5 = arg_3.pop('notification_type', None)\n debug.log('Modify everything except notification type and'\n ' configuration', header='details')\n arg_6 = super(Resource, arg_0).\\\n Func(arg_1=arg_1, arg_2=arg_2, **arg_3)\n\n # Modify notification type and configuration\n if arg_5 is None or \\\n arg_5 == arg_6['notification_type']:\n for arg_7 in arg_6['notification_configuration']:\n if arg_7 not in arg_4 or not arg_4[arg_7]:\n arg_8 = arg_6['notification_configuration'][arg_7]\n if not (arg_8 == '$encrypted$' and\n arg_7 in Resource.encrypted_fields):\n arg_4[arg_7] = arg_8\n if arg_5 is None:\n arg_3['notification_type'] = arg_6['notification_type']\n else:\n arg_3['notification_type'] = arg_5\n arg_0._configuration(arg_3, arg_4)\n debug.log('Modify notification type and configuration',\n header='details')\n arg_9 = super(Resource, arg_0).\\\n Func(arg_1=arg_1, arg_2=arg_2, **arg_3)\n\n # Update 'changed' field to give general changed info\n if 'changed' in arg_9 and 'changed' in arg_6:\n arg_9['changed'] = arg_9['changed'] or arg_6['changed']\n return arg_9"} +{"_id": "doc_3403", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Return one and exactly one notification template.\n\n Note here configuration-related fields like\n 'notification_configuration' and 'channels' will not be\n used even provided.\n\n Lookups may be through a primary key, specified as a positional\n argument, and/or through filters specified through keyword arguments.\n\n If the number of results does not equal one, raise an exception.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n arg_0._separate(arg_2)\n return super(Resource, arg_0).Func(arg_1=arg_1, **arg_2)"} +{"_id": "doc_3404", "title": "", "text": "def Func():\n \"\"\"Read tower-cli config values from the environment if present, being\n careful not to override config values that were explicitly passed in.\n \"\"\"\n arg_0 = {}\n for arg_1 in CONFIG_OPTIONS:\n arg_2 = 'TOWER_' + arg_1.upper()\n arg_3 = os.getenv(arg_2, None)\n if arg_3 is not None:\n arg_0[arg_1] = arg_3\n return arg_0"} +{"_id": "doc_3405", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Read the configuration from the given file.\n\n If the file lacks any section header, add a [general] section\n header that encompasses the whole thing.\n \"\"\"\n # Attempt to read the file using the superclass implementation.\n #\n # Check the permissions of the file we are considering reading\n # if the file exists and the permissions expose it to reads from\n # other users, raise a warning\n if os.path.isfile(arg_2):\n arg_3 = os.stat(arg_2)\n if arg_2 != os.path.join(tower_dir, 'tower_cli.cfg') and (\n (arg_3.st_mode & stat.S_IRGRP) or\n (arg_3.st_mode & stat.S_IROTH)\n ):\n warnings.warn('File {0} readable by group or others.'\n .format(arg_2), RuntimeWarning)\n # If it doesn't work because there's no section header, then\n # create a section header and call the superclass implementation\n # again.\n try:\n return configparser.ConfigParser.Func(arg_0, arg_1, arg_2)\n except configparser.MissingSectionHeaderError:\n arg_1.seek(0)\n arg_4 = '[general]\\n%s' % arg_1.read()\n arg_5 = StringIO(arg_4) # flo == file-like object\n return configparser.ConfigParser.Func(arg_0, arg_5, arg_2)"} +{"_id": "doc_3406", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, arg_3=None, **arg_4):\n \"\"\"Launch a new ad-hoc command.\n\n Runs a user-defined command from Ansible Tower, immediately starts it,\n and returns back an ID in order for its status to be monitored.\n\n =====API DOCS=====\n Launch a new ad-hoc command.\n\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly Funced command rather\n than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param `**kwargs`: Fields needed to create and Func an ad hoc command.\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; dictionary of \"id\" and \"changed\" if none of the two flags are on.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When ad hoc commands are not available in Tower backend.\n\n =====API DOCS=====\n \"\"\"\n # This feature only exists for versions 2.2 and up\n arg_5 = client.get('/')\n if 'ad_hoc_commands' not in arg_5.json():\n raise exc.TowerCLIError('Your host is running an outdated version'\n 'of Ansible Tower that can not run '\n 'ad-hoc commands (2.2 or earlier)')\n\n # Pop the None arguments because we have no .write() method in\n # inheritance chain for this type of resource. This is needed\n arg_0._pop_none(arg_4)\n\n # Actually start the command.\n debug.log('Launching the ad-hoc command.', header='details')\n arg_6 = client.post(arg_0.endpoint, data=arg_4)\n arg_7 = arg_6.json()\n arg_8 = arg_7['id']\n\n # If we were told to monitor the command once it started, then call\n # monitor from here.\n if arg_1:\n return arg_0.monitor(arg_8, arg_3=arg_3)\n elif arg_2:\n return arg_0.wait(arg_8, arg_3=arg_3)\n\n # Return the command ID and other response data\n arg_9 = OrderedDict((\n ('changed', True),\n ('id', arg_8),\n ))\n arg_9.update(arg_6.json())\n return arg_9"} +{"_id": "doc_3407", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Given a method with a docstring, convert the docstring\n to more CLI appropriate wording, and also disambiguate the\n word \"object\" on the base class docstrings.\n \"\"\"\n # Delete API docs if there are any.\n arg_2 = '=====API DOCS====='\n arg_3 = arg_1.find(arg_2)\n if arg_3 >= 0:\n arg_4 = arg_1.rfind(arg_2) + len(arg_2)\n arg_1 = arg_1[:arg_3] + arg_1[arg_4:]\n # Convert the word \"object\" to the appropriate type of\n # object being modified (e.g. user, organization).\n arg_5 = ('a', 'e', 'i', 'o')\n if not arg_0.resource_name.lower().startswith(arg_5):\n arg_1 = arg_1.replace('an object',\n 'a %s' % arg_0.resource_name)\n if arg_0.resource_name.lower().endswith('y'):\n arg_1 = arg_1.replace(\n 'objects',\n '%sies' % arg_0.resource_name[:-1],\n )\n arg_1 = arg_1.replace('object', arg_0.resource_name)\n\n # Convert some common Python terms to their CLI equivalents.\n arg_1 = arg_1.replace('keyword argument', 'option')\n arg_1 = arg_1.replace('raise an exception',\n 'abort with an error')\n\n # Convert keyword arguments specified in docstrings enclosed\n # by backticks to switches.\n for arg_6 in re.findall(r'`([\\w_]+)`', arg_1):\n arg_7 = '--%s' % arg_6.replace('_', '-')\n arg_1 = arg_1.replace('`%s`' % arg_6, arg_7)\n\n # Done; return the new help text.\n return arg_1"} +{"_id": "doc_3408", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Given a method, return a method that runs the internal\n method and echos the result.\n \"\"\"\n @functools.wraps(arg_1)\n def func(*arg_2, **arg_3):\n # Echo warning if this method is deprecated.\n if getattr(arg_1, 'deprecated', False):\n debug.log('This method is deprecated in Tower 3.0.', header='warning')\n\n arg_4 = arg_1(*arg_2, **arg_3)\n\n # If this was a request that could result in a modification\n # of data, print it in Ansible coloring.\n arg_5 = {}\n if isinstance(arg_4, dict) and 'changed' in arg_4:\n if arg_4['changed']:\n arg_5['fg'] = 'yellow'\n else:\n arg_5['fg'] = 'green'\n\n # Piece together the result into the proper format.\n arg_6 = getattr(arg_0, '_format_%s' % (getattr(arg_1, 'format_freezer', None) or settings.format))\n arg_7 = arg_6(arg_4)\n\n # Perform the echo.\n secho(arg_7, **arg_5)\n return func"} +{"_id": "doc_3409", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Echos only the id\"\"\"\n if 'id' in arg_1:\n return str(arg_1['id'])\n if 'results' in arg_1:\n return ' '.join([six.text_type(arg_2['id']) for arg_2 in arg_1['results']])\n raise MultipleRelatedError('Could not serialize output with id format.')"} +{"_id": "doc_3410", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Retrieve the appropriate method from the Resource,\n decorate it as a click command, and return that method.\n \"\"\"\n # Sanity check: Does a method exist corresponding to this\n # command? If not, None is returned for click to raise\n # exception.\n if not hasattr(arg_0.resource, arg_2):\n return None\n\n # Get the method.\n arg_3 = getattr(arg_0.resource, arg_2)\n\n # Get any attributes that were given at command-declaration\n # time.\n arg_4 = getattr(arg_3, '_cli_command_attrs', {})\n\n # If the help message comes from the docstring, then\n # convert it into a message specifically for this resource.\n arg_5 = inspect.getdoc(arg_3)\n arg_4['help'] = arg_0._auto_help_text(arg_5 or '')\n\n # On some methods, we ignore the defaults, which are intended\n # for writing and not reading; process this.\n arg_6 = arg_4.pop('ignore_defaults', False)\n\n # Wrap the method, such that it outputs its final return\n # value rather than returning it.\n arg_7 = arg_0._echo_method(arg_3)\n\n # Soft copy the \"__click_params__\", if any exist.\n # This is the internal holding method that the click library\n # uses to store @click.option and @click.argument directives\n # before the method is converted into a command.\n #\n # Because self._echo_method uses @functools.wraps, this is\n # actually preserved; the purpose of copying it over is\n # so we can get our resource fields at the top of the help;\n # the easiest way to do this is to load them in before the\n # conversion takes place. (This is a happy result of Armin's\n # work to get around Python's processing decorators\n # bottom-to-top.)\n arg_8 = getattr(arg_3, '__click_params__', [])\n arg_7.__click_params__ = copy(arg_8)\n arg_7 = with_global_options(arg_7)\n\n # Write options based on the fields available on this resource.\n arg_10 = arg_4.pop('use_fields_as_options', True)\n if arg_10:\n for arg_11 in reversed(arg_0.resource.fields):\n if not arg_11.is_option:\n continue\n\n # If we got an iterable rather than a boolean,\n # then it is a list of fields to use; check for\n # presence in that list.\n if not isinstance(arg_10, bool) and arg_11.name not in arg_10:\n continue\n\n # Create the initial arguments based on the\n # option value. If we have a different key to use\n # (which is what gets routed to the Tower API),\n # ensure that is the first argument.\n arg_12 = [arg_11.option]\n if arg_11.key:\n arg_12.insert(0, arg_11.key)\n\n # short name aliases for common flags\n arg_13 = {\n 'name': 'n',\n 'description': 'd',\n 'inventory': 'i',\n 'extra_vars': 'e'\n }\n if arg_11.name in arg_13:\n arg_12.append('-'+arg_13[arg_11.name])\n\n # Apply the option to the method.\n arg_14 = arg_11.help\n if isinstance(arg_11.type, StructuredInput):\n arg_14 += ' Use @ to get JSON or YAML from a file.'\n if arg_11.required:\n arg_14 = '[REQUIRED] ' + arg_14\n elif arg_11.read_only:\n arg_14 = '[READ ONLY] ' + arg_14\n arg_14 = '[FIELD]' + arg_14\n click.option(\n *arg_12,\n default=arg_11.default if not arg_6 else None,\n help=arg_14,\n type=arg_11.type,\n show_default=arg_11.show_default,\n multiple=arg_11.multiple,\n is_eager=False\n )(arg_7)\n\n # Make a click Command instance using this method\n # as the callback, and return it.\n arg_15 = click.command(arg_2=arg_2, cls=ActionSubcommand, **arg_4)(arg_7)\n\n # If this method has a `pk` positional argument,\n # then add a click argument for it.\n arg_16 = six.get_function_code(arg_3)\n if 'pk' in arg_16.co_varnames:\n click.argument('pk', nargs=1, required=False, type=str, metavar='[ID]')(arg_15)\n\n # Done; return the command.\n return arg_15"} +{"_id": "doc_3411", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False,\n arg_4=None, **arg_5):\n \"\"\"Update the given inventory source.\n\n =====API DOCS=====\n Update the given inventory source.\n\n :param inventory_source: Primary key or name of the inventory source to be Funcd.\n :type inventory_source: str\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched inventory Func\n rather than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the inventory Func, but do not print while it is\n in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param `**kwargs`: Fields used to override underlyingl inventory source fields when creating and launching\n an inventory Func.\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; dictionary of \"status\" if none of the two flags are on.\n :rtype: dict\n :raises tower_cli.exceptions.BadRequest: When the inventory source cannot be Funcd.\n\n =====API DOCS=====\n \"\"\"\n\n # Establish that we are able to Func this inventory source\n # at all.\n debug.log('Asking whether the inventory source can be Funcd.', header='details')\n arg_6 = client.get('%s%d/Func/' % (arg_0.endpoint, arg_1))\n if not arg_6.json()['can_Func']:\n raise exc.BadRequest('Tower says it cannot run an Func against this inventory source.')\n\n # Run the Func.\n debug.log('Updating the inventory source.', header='details')\n arg_6 = client.post('%s%d/Func/' % (arg_0.endpoint, arg_1), data={})\n arg_7 = arg_6.json()['inventory_Func']\n\n # If we were told to monitor the project Func's status, do so.\n if arg_2 or arg_3:\n if arg_2:\n arg_8 = arg_0.monitor(arg_7, parent_pk=arg_1, arg_4=arg_4)\n elif arg_3:\n arg_8 = arg_0.wait(arg_7, parent_pk=arg_1, arg_4=arg_4)\n arg_9 = client.get('/inventory_sources/%d/' % arg_8['inventory_source']).json()['inventory']\n arg_8['inventory'] = int(arg_9)\n return arg_8\n\n # Done.\n return {\n 'id': arg_7,\n 'status': 'ok'\n }"} +{"_id": "doc_3412", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Return a Func of hosts.\n\n =====API DOCS=====\n Retrieve a Func of hosts.\n\n :param group: Primary key or name of the group whose hosts will be Funced.\n :type group: str\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: Func\n :param `**kwargs`: Keyword arguments Func of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====\n \"\"\"\n if arg_1:\n arg_3['query'] = arg_3.get('query', ()) + (('groups__in', arg_1),)\n if arg_2:\n arg_3['query'] = arg_3.get('query', ()) + (('host_filter', arg_2),)\n return super(Resource, arg_0).Func(**arg_3)"} +{"_id": "doc_3413", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extra format methods for multi methods that adds all the commands\n after the options.\n \"\"\"\n arg_0.format_command_subsection(\n arg_1, arg_2, arg_0.list_misc_commands(), 'Commands'\n )\n arg_0.format_command_subsection(\n arg_1, arg_2, arg_0.list_resource_commands(), 'Resources'\n )"} +{"_id": "doc_3414", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a list of commands present in the commands and resources\n folders, but not subcommands.\n \"\"\"\n arg_2 = set(arg_0.list_resource_commands())\n arg_2.union(set(arg_0.list_misc_commands()))\n return sorted(arg_2)"} +{"_id": "doc_3415", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a list of multi-commands for each resource type.\n \"\"\"\n arg_1 = os.path.abspath(os.path.join(\n os.path.dirname(__file__),\n os.pardir,\n 'resources'\n ))\n arg_2 = set([])\n for arg_3, arg_4, arg_3 in pkgutil.iter_modules([arg_1]):\n arg_5 = tower_cli.get_resource(arg_4)\n if not getattr(arg_5, 'internal', False):\n arg_2.add(arg_4)\n return sorted(arg_2)"} +{"_id": "doc_3416", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a list of global commands, realted to CLI\n configuration or system management in general.\n \"\"\"\n arg_1 = set([])\n for arg_2 in misc.__all__:\n arg_1.add(arg_2)\n return sorted(arg_1)"} +{"_id": "doc_3417", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Given a command identified by its name, import the appropriate\n module and return the decorated command.\n\n Resources are automatically commands, but if both a resource and\n a command are defined, the command takes precedence.\n \"\"\"\n # First, attempt to get a basic command from `tower_cli.api.misc`.\n if arg_2 in misc.__all__:\n return getattr(misc, arg_2)\n\n # No command was found; try to get a resource.\n try:\n arg_3 = tower_cli.get_resource(arg_2)\n return ResSubcommand(arg_3)\n except ImportError:\n pass\n\n # Okay, we weren't able to find a command.\n secho('No such command: %s.' % arg_2, fg='red', bold=True)\n sys.exit(2)"} +{"_id": "doc_3418", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Adds the decorators for all types of unified job templates,\n and if the non-unified type is specified, converts it into the\n unified_job_template kwarg.\n \"\"\"\n arg_1 = click.option(\n '--job-template', type=types.Related('job_template'),\n help='Use this job template as unified_job_template field')\n arg_2 = click.option(\n '--project', type=types.Related('project'),\n help='Use this project as unified_job_template field')\n arg_3 = click.option(\n '--inventory-source', type=types.Related('inventory_source'),\n help='Use this inventory source as unified_job_template field')\n\n def ujt_translation(arg_4):\n def _ujt_translation(*arg_5, **arg_6):\n for arg_7 in ['job_template', 'project', 'inventory_source']:\n if arg_7 in arg_6 and arg_6[arg_7] is not None:\n arg_6['unified_job_template'] = arg_6.pop(arg_7)\n return arg_4(*arg_5, **arg_6)\n return functools.wraps(arg_4)(_ujt_translation)\n\n return ujt_translation(\n arg_3(\n arg_2(\n arg_1(\n arg_0\n )\n )\n )\n )"} +{"_id": "doc_3419", "title": "", "text": "def Func(arg_0):\n \"\"\"Translate a Mopidy search query to a Spotify search query\"\"\"\n\n arg_1 = []\n\n for (arg_2, arg_3) in arg_0.items():\n arg_2 = SEARCH_FIELD_MAP.get(arg_2, arg_2)\n if arg_2 is None:\n continue\n\n for arg_4 in arg_3:\n if arg_2 == 'year':\n arg_4 = _transform_year(arg_4)\n if arg_4 is not None:\n arg_1.append('%s:%d' % (arg_2, arg_4))\n elif arg_2 == 'any':\n arg_1.append('\"%s\"' % arg_4)\n else:\n arg_1.append('%s:\"%s\"' % (arg_2, arg_4))\n\n return ' '.join(arg_1)"} +{"_id": "doc_3420", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse Retry-After header from response if it is set.\"\"\"\n arg_2 = arg_1.headers.get('Retry-After')\n\n if not arg_2:\n arg_3 = 0\n elif re.match(r'^\\s*[0-9]+\\s*$', arg_2):\n arg_3 = int(arg_2)\n else:\n arg_4 = email.utils.parsedate(arg_2)\n if arg_4 is None:\n arg_3 = 0\n else:\n arg_3 = time.mktime(arg_4) - time.time()\n return max(0, arg_3)"} +{"_id": "doc_3421", "title": "", "text": "def Func(arg_0):\n \"\"\"Generates a state string to be used in authorizations.\"\"\"\n try:\n arg_0._state = arg_0.state()\n log.debug(\"Generated new state %s.\", arg_0._state)\n except TypeError:\n arg_0._state = arg_0.state\n log.debug(\"Re-using previously supplied state %s.\", arg_0._state)\n return arg_0._state"} +{"_id": "doc_3422", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse token from the URI fragment, used by MobileApplicationClients.\n\n :param authorization_response: The full URL of the redirect back to you\n :return: A token dict\n \"\"\"\n arg_0._client.parse_request_uri_response(\n arg_1, state=arg_0._state\n )\n arg_0.token = arg_0._client.token\n return arg_0.token"} +{"_id": "doc_3423", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n Func=None,\n arg_3=\"\",\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=True,\n arg_8=None,\n **arg_9\n ):\n \"\"\"Fetch a new access token using a refresh token.\n\n :param token_url: The token endpoint, must be HTTPS.\n :param refresh_token: The refresh_token to use.\n :param body: Optional application/x-www-form-urlencoded body to add the\n include in the token request. Prefer kwargs over body.\n :param auth: An auth tuple or method as accepted by `requests`.\n :param timeout: Timeout of the request in seconds.\n :param headers: A dict of headers to be used by `requests`.\n :param verify: Verify SSL certificate.\n :param proxies: The `proxies` argument will be passed to `requests`.\n :param kwargs: Extra parameters to include in the token request.\n :return: A token dict\n \"\"\"\n if not arg_1:\n raise ValueError(\"No token endpoint set for auto_refresh.\")\n\n if not is_secure_transport(arg_1):\n raise InsecureTransportError()\n\n Func = Func or arg_0.token.get(\"refresh_token\")\n\n log.debug(\n \"Adding auto refresh key word arguments %s.\", arg_0.auto_refresh_kwargs\n )\n arg_9.update(arg_0.auto_refresh_kwargs)\n arg_3 = arg_0._client.prepare_refresh_body(\n arg_3=arg_3, Func=Func, scope=arg_0.scope, **arg_9\n )\n log.debug(\"Prepared refresh token request body %s\", arg_3)\n\n if arg_6 is None:\n arg_6 = {\n \"Accept\": \"application/json\",\n \"Content-Type\": (\"application/x-www-form-urlencoded;charset=UTF-8\"),\n }\n\n arg_10 = arg_0.post(\n arg_1,\n data=dict(urldecode(arg_3)),\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n withhold_token=True,\n arg_8=arg_8,\n )\n log.debug(\"Request to refresh token completed with status %s.\", arg_10.status_code)\n log.debug(\"Response headers were %s and content %s.\", arg_10.headers, arg_10.text)\n log.debug(\n \"Invoking %d token response hooks.\",\n len(arg_0.compliance_hook[\"refresh_token_response\"]),\n )\n for arg_11 in arg_0.compliance_hook[\"refresh_token_response\"]:\n log.debug(\"Invoking hook %s.\", arg_11)\n arg_10 = arg_11(arg_10)\n\n arg_0.token = arg_0._client.parse_request_body_response(arg_10.text, scope=arg_0.scope)\n if not \"refresh_token\" in arg_0.token:\n log.debug(\"No new refresh token given. Re-using old.\")\n arg_0.token[\"refresh_token\"] = Func\n return arg_0.token"} +{"_id": "doc_3424", "title": "", "text": "def Func(arg_0):\n \"\"\"Boolean that indicates whether this session has an OAuth token\n or not. If `self.Func` is True, you can reasonably expect\n OAuth-protected requests to the resource to succeed. If\n `self.Func` is False, you need the user to go through the OAuth\n authentication dance before OAuth-protected requests to the resource\n will succeed.\n \"\"\"\n if arg_0._client.client.signature_method == SIGNATURE_RSA:\n # RSA only uses resource_owner_key\n return bool(arg_0._client.client.resource_owner_key)\n else:\n # other methods of authentication use all three pieces\n return (\n bool(arg_0._client.client.client_secret)\n and bool(arg_0._client.client.resource_owner_key)\n and bool(arg_0._client.client.resource_owner_secret)\n )"} +{"_id": "doc_3425", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Extract parameters from the post authorization redirect response URL.\n\n :param url: The full URL that resulted from the user being redirected\n back from the OAuth provider to you, the client.\n :returns: A dict of parameters extracted from the URL.\n\n >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'\n >>> oauth_session = OAuth1Session('client-key', client_secret='secret')\n >>> oauth_session.Func(redirect_response)\n {\n 'oauth_token: 'kjerht2309u',\n 'oauth_token_secret: 'lsdajfh923874',\n 'oauth_verifier: 'w34o8967345',\n }\n \"\"\"\n log.debug(\"Parsing token from query part of url %s\", arg_1)\n arg_2 = dict(urldecode(urlparse(arg_1).query))\n log.debug(\"Updating internal client token attribute.\")\n arg_0._populate_attributes(arg_2)\n arg_0.token = arg_2\n return arg_2"} +{"_id": "doc_3426", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n When being redirected we should always strip Authorization\n header, since nonce may not be reused as per OAuth spec.\n \"\"\"\n if \"Authorization\" in arg_1.headers:\n # If we get redirected to a new host, we should strip out\n # any authentication headers.\n arg_1.headers.pop(\"Authorization\", True)\n arg_1.prepare_auth(arg_0.auth)\n return"} +{"_id": "doc_3427", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validate new property value before setting it.\n\n value -- New value\n \"\"\"\n if 'readOnly' in arg_0.metadata and arg_0.metadata['readOnly']:\n raise PropertyError('Read-only property')\n\n try:\n validate(arg_1, arg_0.metadata)\n except ValidationError:\n raise PropertyError('Invalid property value')"} +{"_id": "doc_3428", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the property description.\n\n Returns a dictionary describing the property.\n \"\"\"\n arg_1 = deepcopy(arg_0.metadata)\n\n if 'links' not in arg_1:\n arg_1['links'] = []\n\n arg_1['links'].append(\n {\n 'rel': 'property',\n 'href': arg_0.href_prefix + arg_0.href,\n }\n )\n return arg_1"} +{"_id": "doc_3429", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the current value of the property.\n\n value -- the value to set\n \"\"\"\n arg_0.validate_value(arg_1)\n arg_0.value.set(arg_1)"} +{"_id": "doc_3430", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the thing at the given index.\n\n idx -- the index\n \"\"\"\n try:\n arg_1 = int(arg_1)\n except ValueError:\n return None\n\n if arg_1 < 0 or arg_1 >= len(arg_0.things):\n return None\n\n return arg_0.things[arg_1]"} +{"_id": "doc_3431", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Initialize the handler.\n\n things -- list of Things managed by this server\n hosts -- list of allowed hostnames\n \"\"\"\n arg_0.things = arg_1\n arg_0.hosts = arg_2"} +{"_id": "doc_3432", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Set the default headers for all requests.\"\"\"\n arg_0.set_header('Access-Control-Allow-Origin', '*')\n arg_0.set_header('Access-Control-Allow-Headers',\n 'Origin, X-Requested-With, Content-Type, Accept')\n arg_0.set_header('Access-Control-Allow-Methods',\n 'GET, HEAD, PUT, POST, DELETE')"} +{"_id": "doc_3433", "title": "", "text": "def Func(arg_0):\n \"\"\"Validate Host header.\"\"\"\n arg_1 = arg_0.request.headers.get('Host', None)\n if arg_1 is not None and arg_1 in arg_0.hosts:\n return\n\n raise tornado.web.HTTPError(403)"} +{"_id": "doc_3434", "title": "", "text": "def Func(arg_0, arg_1='0'):\n \"\"\"\n Handle a GET request, including websocket requests.\n\n thing_id -- ID of the thing this request is for\n \"\"\"\n arg_0.thing = arg_0.Func_thing(arg_1)\n if arg_0.thing is None:\n arg_0.set_status(404)\n arg_0.finish()\n return\n\n if arg_0.request.headers.Func('Upgrade', '').lower() == 'websocket':\n yield tornado.websocket.WebSocketHandler.Func(arg_0)\n return\n\n arg_0.set_header('Content-Type', 'application/json')\n arg_3 = '{}://{}'.format(\n 'wss' if arg_0.request.protocol == 'https' else 'ws',\n arg_0.request.headers.Func('Host', '')\n )\n\n arg_4 = arg_0.thing.as_thing_description()\n arg_4['links'].append({\n 'rel': 'alternate',\n 'href': '{}{}'.format(arg_3, arg_0.thing.Func_href()),\n })\n\n arg_0.write(json.dumps(arg_4))\n arg_0.finish()"} +{"_id": "doc_3435", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Handle an incoming message.\n\n message -- message to handle\n \"\"\"\n try:\n arg_1 = json.loads(arg_1)\n except ValueError:\n try:\n arg_0.write_message(json.dumps({\n 'messageType': 'error',\n 'data': {\n 'status': '400 Bad Request',\n 'message': 'Parsing request failed',\n },\n }))\n except tornado.websocket.WebSocketClosedError:\n pass\n\n return\n\n if 'messageType' not in arg_1 or 'data' not in arg_1:\n try:\n arg_0.write_message(json.dumps({\n 'messageType': 'error',\n 'data': {\n 'status': '400 Bad Request',\n 'message': 'Invalid message',\n },\n }))\n except tornado.websocket.WebSocketClosedError:\n pass\n\n return\n\n arg_2 = arg_1['messageType']\n if arg_2 == 'setProperty':\n for arg_3, arg_4 in arg_1['data'].items():\n try:\n arg_0.thing.set_property(arg_3, arg_4)\n except PropertyError as e:\n arg_0.write_message(json.dumps({\n 'messageType': 'error',\n 'data': {\n 'status': '400 Bad Request',\n 'message': str(e),\n },\n }))\n elif arg_2 == 'requestAction':\n for arg_5, arg_6 in arg_1['data'].items():\n arg_7 = None\n if 'input' in arg_6:\n arg_7 = arg_6['input']\n\n arg_8 = arg_0.thing.perform_action(arg_5, arg_7)\n if arg_8:\n tornado.ioloop.IOLoop.current().spawn_callback(\n perform_action,\n arg_8,\n )\n else:\n arg_0.write_message(json.dumps({\n 'messageType': 'error',\n 'data': {\n 'status': '400 Bad Request',\n 'message': 'Invalid action request',\n 'request': arg_1,\n },\n }))\n elif arg_2 == 'addEventSubscription':\n for arg_9 in arg_1['data'].keys():\n arg_0.thing.add_event_subscriber(arg_9, arg_0)\n else:\n try:\n arg_0.write_message(json.dumps({\n 'messageType': 'error',\n 'data': {\n 'status': '400 Bad Request',\n 'message': 'Unknown messageType: ' + arg_2,\n 'request': arg_1,\n },\n }))\n except tornado.websocket.WebSocketClosedError:\n pass"} +{"_id": "doc_3436", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set a new value for this thing.\n\n value -- value to Func\n \"\"\"\n if arg_0.value_forwarder is not None:\n arg_0.value_forwarder(arg_1)\n\n arg_0.notify_of_external_update(arg_1)"} +{"_id": "doc_3437", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Notify observers of a new value.\n\n value -- new value\n \"\"\"\n if arg_1 is not None and arg_1 != arg_0.last_value:\n arg_0.last_value = arg_1\n arg_0.emit('update', arg_1)"} +{"_id": "doc_3438", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the thing state as a Thing Description.\n\n Returns the state as a dictionary.\n \"\"\"\n arg_1 = {\n 'name': arg_0.name,\n 'href': arg_0.href_prefix if arg_0.href_prefix else '/',\n '@context': arg_0.context,\n '@type': arg_0.type,\n 'properties': arg_0.get_property_descriptions(),\n 'actions': {},\n 'events': {},\n 'links': [\n {\n 'rel': 'properties',\n 'href': '{}/properties'.format(arg_0.href_prefix),\n },\n {\n 'rel': 'actions',\n 'href': '{}/actions'.format(arg_0.href_prefix),\n },\n {\n 'rel': 'events',\n 'href': '{}/events'.format(arg_0.href_prefix),\n },\n ],\n }\n\n for arg_2, arg_3 in arg_0.available_actions.items():\n arg_1['actions'][arg_2] = arg_3['metadata']\n arg_1['actions'][arg_2]['links'] = [\n {\n 'rel': 'action',\n 'href': '{}/actions/{}'.format(arg_0.href_prefix, arg_2),\n },\n ]\n\n for arg_2, arg_4 in arg_0.available_events.items():\n arg_1['events'][arg_2] = arg_4['metadata']\n arg_1['events'][arg_2]['links'] = [\n {\n 'rel': 'event',\n 'href': '{}/events/{}'.format(arg_0.href_prefix, arg_2),\n },\n ]\n\n if arg_0.ui_href is not None:\n arg_1['links'].append({\n 'rel': 'alternate',\n 'mediaType': 'text/html',\n 'href': arg_0.ui_href,\n })\n\n if arg_0.description:\n arg_1['description'] = arg_0.description\n\n return arg_1"} +{"_id": "doc_3439", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the prefix of any hrefs associated with this thing.\n\n prefix -- the prefix\n \"\"\"\n arg_0.href_prefix = arg_1\n\n for arg_3 in arg_0.properties.values():\n arg_3.Func(arg_1)\n\n for arg_4 in arg_0.actions.keys():\n for arg_5 in arg_0.actions[arg_4]:\n arg_5.Func(arg_1)"} +{"_id": "doc_3440", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the thing's properties as a dictionary.\n\n Returns the properties as a dictionary, i.e. name -> description.\n \"\"\"\n return {arg_1: arg_2.as_property_description()\n for arg_1, arg_2 in arg_0.properties.items()}"} +{"_id": "doc_3441", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get the thing's actions as an array.\n\n action_name -- Optional action name to get descriptions for\n\n Returns the action descriptions.\n \"\"\"\n arg_2 = []\n\n if arg_1 is None:\n for arg_3 in arg_0.actions:\n for arg_4 in arg_0.actions[arg_3]:\n arg_2.append(arg_4.as_action_description())\n elif arg_1 in arg_0.actions:\n for arg_4 in arg_0.actions[arg_1]:\n arg_2.append(arg_4.as_action_description())\n\n return arg_2"} +{"_id": "doc_3442", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get the thing's events as an array.\n\n event_name -- Optional event name to get descriptions for\n\n Returns the event descriptions.\n \"\"\"\n if arg_1 is None:\n return [arg_2.as_event_description() for arg_2 in arg_0.events]\n else:\n return [arg_2.as_event_description()\n for arg_2 in arg_0.events if arg_2.get_name() == arg_1]"} +{"_id": "doc_3443", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add a property to this thing.\n\n property_ -- property to add\n \"\"\"\n arg_1.set_href_prefix(arg_0.href_prefix)\n arg_0.properties[arg_1.name] = arg_1"} +{"_id": "doc_3444", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove a property from this thing.\n\n property_ -- property to remove\n \"\"\"\n if arg_1.name in arg_0.properties:\n del arg_0.properties[arg_1.name]"} +{"_id": "doc_3445", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a property's value.\n\n property_name -- the property to get the value of\n\n Returns the properties value, if found, else None.\n \"\"\"\n arg_2 = arg_0.find_property(arg_1)\n if arg_2:\n return arg_2.get_value()\n\n return None"} +{"_id": "doc_3446", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get a mapping of all properties and their values.\n\n Returns a dictionary of property_name -> value.\n \"\"\"\n return {arg_1.get_name(): arg_1.get_value()\n for arg_1 in arg_0.properties.values()}"} +{"_id": "doc_3447", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get an action.\n\n action_name -- name of the action\n action_id -- ID of the action\n\n Returns the requested action if found, else None.\n \"\"\"\n if arg_1 not in arg_0.actions:\n return None\n\n for arg_3 in arg_0.actions[arg_1]:\n if arg_3.id == arg_2:\n return arg_3\n\n return None"} +{"_id": "doc_3448", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add a new event and notify subscribers.\n\n event -- the event that occurred\n \"\"\"\n arg_0.events.append(arg_1)\n arg_0.event_notify(arg_1)"} +{"_id": "doc_3449", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add an available event.\n\n name -- name of the event\n metadata -- event metadata, i.e. type, description, etc., as a dict\n \"\"\"\n if arg_2 is None:\n arg_2 = {}\n\n arg_0.available_events[arg_1] = {\n 'metadata': arg_2,\n 'subscribers': set(),\n }"} +{"_id": "doc_3450", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Remove an existing action.\n\n action_name -- name of the action\n action_id -- ID of the action\n\n Returns a boolean indicating the presence of the action.\n \"\"\"\n arg_3 = arg_0.get_action(arg_1, arg_2)\n if arg_3 is None:\n return False\n\n arg_3.cancel()\n arg_0.actions[arg_1].remove(arg_3)\n return True"} +{"_id": "doc_3451", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove a websocket subscriber.\n\n ws -- the websocket\n \"\"\"\n if arg_1 in arg_0.subscribers:\n arg_0.subscribers.remove(arg_1)\n\n for arg_2 in arg_0.available_events:\n arg_0.remove_event_subscriber(arg_2, arg_1)"} +{"_id": "doc_3452", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Remove a websocket subscriber from an event.\n\n name -- name of the event\n ws -- the websocket\n \"\"\"\n if arg_1 in arg_0.available_events and \\\n arg_2 in arg_0.available_events[arg_1]['subscribers']:\n arg_0.available_events[arg_1]['subscribers'].remove(arg_2)"} +{"_id": "doc_3453", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Notify all subscribers of an action status change.\n\n action -- the action whose status changed\n \"\"\"\n arg_2 = json.dumps({\n 'messageType': 'actionStatus',\n 'data': arg_1.as_action_description(),\n })\n\n for arg_3 in list(arg_0.subscribers):\n try:\n arg_3.write_message(arg_2)\n except tornado.websocket.WebSocketClosedError:\n pass"} +{"_id": "doc_3454", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Notify all subscribers of an event.\n\n event -- the event that occurred\n \"\"\"\n if arg_1.name not in arg_0.available_events:\n return\n\n arg_2 = json.dumps({\n 'messageType': 'event',\n 'data': arg_1.as_event_description(),\n })\n\n for arg_3 in arg_0.available_events[arg_1.name]['subscribers']:\n try:\n arg_3.write_message(arg_2)\n except tornado.websocket.WebSocketClosedError:\n pass"} +{"_id": "doc_3455", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns data with different cfgstr values that were previously computed\n with this cacher.\n\n Example:\n >>> from ubelt.util_cache import Cacher\n >>> # Ensure that some data exists\n >>> known_fnames = set()\n >>> cacher = Cacher('versioned_data', cfgstr='1')\n >>> cacher.ensure(lambda: 'data1')\n >>> known_fnames.add(cacher.get_fpath())\n >>> cacher = Cacher('versioned_data', cfgstr='2')\n >>> cacher.ensure(lambda: 'data2')\n >>> known_fnames.add(cacher.get_fpath())\n >>> # List previously computed configs for this type\n >>> from os.path import basename\n >>> cacher = Cacher('versioned_data', cfgstr='2')\n >>> exist_fpaths = set(cacher.Func())\n >>> exist_fnames = list(map(basename, exist_fpaths))\n >>> print(exist_fnames)\n >>> assert exist_fpaths == known_fnames\n\n ['versioned_data_1.pkl', 'versioned_data_2.pkl']\n \"\"\"\n import glob\n arg_1 = join(arg_0.dpath, arg_0.fname + '_*' + arg_0.ext)\n for arg_2 in glob.iglob(arg_1):\n arg_3 = join(arg_0.dpath, arg_2)\n yield arg_3"} +{"_id": "doc_3456", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Removes the saved cache and metadata from disk\n \"\"\"\n arg_2 = arg_0.get_fpath(arg_1)\n if arg_0.verbose > 0:\n arg_0.log('[cacher] Func cache')\n if exists(arg_2):\n if arg_0.verbose > 0:\n arg_0.log('[cacher] removing {}'.format(arg_2))\n os.remove(arg_2)\n\n # Remove the metadata if it exists\n arg_3 = arg_2 + '.meta'\n if exists(arg_3):\n os.remove(arg_3)\n else:\n if arg_0.verbose > 0:\n arg_0.log('[cacher] ... nothing to Func')"} +{"_id": "doc_3457", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='raise'):\n \"\"\"\n Like load, but returns None if the load fails due to a cache miss.\n\n Args:\n on_error (str): How to handle non-io errors errors. Either raise,\n which re-raises the exception, or clear which deletes the cache\n and returns None.\n \"\"\"\n arg_1 = arg_0._rectify_cfgstr(arg_1)\n if arg_0.enabled:\n try:\n if arg_0.verbose > 1:\n arg_0.log('[cacher] Func fname={}'.format(arg_0.fname))\n return arg_0.load(arg_1)\n except IOError:\n if arg_0.verbose > 0:\n arg_0.log('[cacher] ... {} cache miss'.format(arg_0.fname))\n except Exception:\n if arg_0.verbose > 0:\n arg_0.log('[cacher] ... failed to load')\n if arg_2 == 'raise':\n raise\n elif arg_2 == 'clear':\n arg_0.clear(arg_1)\n return None\n else:\n raise KeyError('Unknown method on_error={}'.format(arg_2))\n else:\n if arg_0.verbose > 1:\n arg_0.log('[cacher] ... cache disabled: fname={}'.format(arg_0.fname))\n return None"} +{"_id": "doc_3458", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Loads the data\n\n Raises:\n IOError - if the data is unable to be Funced. This could be due to\n a cache miss or because the cache is disabled.\n\n Example:\n >>> from ubelt.util_cache import * # NOQA\n >>> # Setting the cacher as enabled=False turns it off\n >>> cacher = Cacher('test_disabled_Func', '', enabled=True)\n >>> cacher.save('data')\n >>> assert cacher.Func() == 'data'\n >>> cacher.enabled = False\n >>> assert cacher.tryFunc() is None\n \"\"\"\n from six.moves import cPickle as pickle\n arg_1 = arg_0._rectify_cfgstr(arg_1)\n\n arg_2 = arg_0.dpath\n arg_3 = arg_0.fname\n arg_4 = arg_0.verbose\n\n if not arg_0.enabled:\n if arg_4 > 1:\n arg_0.log('[cacher] ... cache disabled: fname={}'.format(arg_0.fname))\n raise IOError(3, 'Cache Loading Is Disabled')\n\n arg_5 = arg_0.get_fpath(arg_1=arg_1)\n\n if not exists(arg_5):\n if arg_4 > 2:\n arg_0.log('[cacher] ... cache does not exist: '\n 'dpath={} fname={} cfgstr={}'.format(\n basename(arg_2), arg_3, arg_1))\n raise IOError(2, 'No such file or directory: %r' % (arg_5,))\n else:\n if arg_4 > 3:\n arg_0.log('[cacher] ... cache exists: '\n 'dpath={} fname={} cfgstr={}'.format(\n basename(arg_2), arg_3, arg_1))\n try:\n with open(arg_5, 'rb') as file_:\n arg_6 = pickle.Func(file_)\n except Exception as ex:\n if arg_4 > 0:\n arg_0.log('CORRUPTED? fpath = %s' % (arg_5,))\n if arg_4 > 1:\n arg_0.log('[cacher] ... CORRUPTED? dpath={} cfgstr={}'.format(\n basename(arg_2), arg_1))\n if isinstance(ex, (EOFError, IOError, ImportError)):\n raise IOError(str(ex))\n else:\n if arg_4 > 1:\n arg_0.log('[cacher] ... unknown reason for exception')\n raise\n else:\n if arg_0.verbose > 2:\n arg_0.log('[cacher] ... {} cache hit'.format(arg_0.fname))\n elif arg_4 > 1:\n arg_0.log('[cacher] ... cache hit')\n return arg_6"} +{"_id": "doc_3459", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n r\"\"\"\n Wraps around a function. A cfgstr must be stored in the base cacher.\n\n Args:\n func (callable): function that will compute data on cache miss\n *args: passed to func\n **kwargs: passed to func\n\n Example:\n >>> from ubelt.util_cache import * # NOQA\n >>> def func():\n >>> return 'expensive result'\n >>> fname = 'test_cacher_Func'\n >>> cfgstr = 'func params'\n >>> cacher = Cacher(fname, cfgstr)\n >>> cacher.clear()\n >>> data1 = cacher.Func(func)\n >>> data2 = cacher.Func(func)\n >>> assert data1 == 'expensive result'\n >>> assert data1 == data2\n >>> cacher.clear()\n \"\"\"\n arg_4 = arg_0.tryload()\n if arg_4 is None:\n arg_4 = arg_1(*arg_2, **arg_3)\n arg_0.save(arg_4)\n return arg_4"} +{"_id": "doc_3460", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns the stamp certificate if it exists\n \"\"\"\n arg_2 = arg_0.cacher.tryload(arg_1=arg_1)\n return arg_2"} +{"_id": "doc_3461", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get the hash of the each product file\n \"\"\"\n if arg_0.hasher is None:\n return None\n else:\n arg_2 = arg_0._rectify_products(arg_1)\n arg_3 = [\n util_hash.hash_file(p, hasher=arg_0.hasher, base='hex')\n for p in arg_2\n ]\n return arg_3"} +{"_id": "doc_3462", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Check to see if a previously existing stamp is still valid and if the\n expected result of that computation still exists.\n\n Args:\n cfgstr (str, optional): override the default cfgstr if specified\n product (PathLike or Sequence[PathLike], optional): override the\n default product if specified\n \"\"\"\n arg_3 = arg_0._rectify_products(arg_2)\n arg_4 = arg_0._get_certificate(arg_1=arg_1)\n if arg_4 is None:\n # We dont have a certificate, so we are Func\n arg_5 = True\n elif arg_3 is None:\n # We dont have a product to check, so assume not Func\n arg_5 = False\n elif not all(map(os.path.exists, arg_3)):\n # We are Func if the expected product does not exist\n arg_5 = True\n else:\n # We are Func if the hash of the existing product data\n # does not match the expected hash in the certificate\n arg_6 = arg_0._product_file_hash(arg_3)\n arg_7 = arg_4.get('product_file_hash', None)\n arg_5 = arg_6 != arg_7\n return arg_5"} +{"_id": "doc_3463", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Recertify that the product has been recomputed by writing a new\n certificate to disk.\n \"\"\"\n arg_3 = arg_0._rectify_products(arg_2)\n arg_4 = {\n 'timestamp': util_time.timestamp(),\n 'product': arg_3,\n }\n if arg_3 is not None:\n if not all(map(os.path.exists, arg_3)):\n raise IOError(\n 'The stamped product must exist: {}'.format(arg_3))\n arg_4['product_file_hash'] = arg_0._product_file_hash(arg_3)\n arg_0.cacher.save(arg_4, arg_1=arg_1)\n return arg_4"} +{"_id": "doc_3464", "title": "", "text": "def Func(arg_0): # nocover\n \"\"\"\n Returns true of the redirect is a terminal.\n\n Notes:\n Needed for IPython.embed to work properly when this class is used\n to override stdout / stderr.\n \"\"\"\n return (arg_0.redirect is not None and\n hasattr(arg_0.redirect, 'Func') and arg_0.redirect.Func())"} +{"_id": "doc_3465", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets the Func of the `redirect` IO object\n\n Doctest:\n >>> redirect = io.StringIO()\n >>> assert TeeStringIO(redirect).Func is None\n >>> assert TeeStringIO(None).Func is None\n >>> assert TeeStringIO(sys.stdout).Func is sys.stdout.Func\n >>> redirect = io.TextIOWrapper(io.StringIO())\n >>> assert TeeStringIO(redirect).Func is redirect.Func\n \"\"\"\n if arg_0.redirect is not None:\n return arg_0.redirect.Func\n else:\n return super(TeeStringIO, arg_0).Func"} +{"_id": "doc_3466", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write to this and the redirected stream\n \"\"\"\n if arg_0.redirect is not None:\n arg_0.redirect.Func(arg_1)\n if six.PY2:\n from xdoctest.utils.util_str import ensure_unicode\n arg_1 = ensure_unicode(arg_1)\n super(TeeStringIO, arg_0).Func(arg_1)"} +{"_id": "doc_3467", "title": "", "text": "def Func():\n \"\"\"\n Returns path for user-specific data files\n\n Returns:\n PathLike : path to the data dir used by the current operating system\n \"\"\"\n if LINUX: # nocover\n arg_0 = os.environ.get('XDG_DATA_HOME', '~/.local/share')\n elif DARWIN: # nocover\n arg_0 = '~/Library/Application Support'\n elif WIN32: # nocover\n arg_0 = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise '~/AppData/Local'\n arg_1 = normpath(expanduser(arg_0))\n return arg_1"} +{"_id": "doc_3468", "title": "", "text": "def Func():\n \"\"\"\n Returns a directory which should be writable for any application\n This should be used for persistent configuration files.\n\n Returns:\n PathLike : path to the cahce dir used by the current operating system\n \"\"\"\n if LINUX: # nocover\n arg_0 = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n elif DARWIN: # nocover\n arg_0 = '~/Library/Application Support'\n elif WIN32: # nocover\n arg_0 = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n arg_1 = normpath(expanduser(arg_0))\n return arg_1"} +{"_id": "doc_3469", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Calls `get_app_cache_dir` but ensures the directory exists.\n\n Args:\n appname (str): the name of the application\n *args: any other subdirectories may be specified\n\n SeeAlso:\n get_app_cache_dir\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.Func('ubelt')\n >>> assert exists(dpath)\n \"\"\"\n from ubelt import util_path\n arg_2 = get_app_cache_dir(arg_0, *arg_1)\n util_path.ensuredir(arg_2)\n return arg_2"} +{"_id": "doc_3470", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"\n Locate a command.\n\n Search your local filesystem for an executable and return the first\n matching file with executable permission.\n\n Args:\n name (str): globstr of matching filename\n\n multi (bool): if True return all matches instead of just the first.\n Defaults to False.\n\n path (str or Iterable[PathLike]): overrides the system PATH variable.\n\n Returns:\n PathLike or List[PathLike] or None: returns matching executable(s).\n\n SeeAlso:\n shutil.which - which is available in Python 3.3+.\n\n Notes:\n This is essentially the `which` UNIX command\n\n References:\n https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028\n https://docs.python.org/dev/library/shutil.html#shutil.which\n\n Example:\n >>> Func('ls')\n >>> Func('ping')\n >>> assert Func('which') == Func(Func('which'))\n >>> Func('which', multi=True)\n >>> Func('ping', multi=True)\n >>> Func('cmake', multi=True)\n >>> Func('nvcc', multi=True)\n >>> Func('noexist', multi=True)\n\n Example:\n >>> assert not Func('noexist', multi=False)\n >>> assert Func('ping', multi=False)\n >>> assert not Func('noexist', multi=True)\n >>> assert Func('ping', multi=True)\n\n Benchmark:\n >>> # xdoctest: +IGNORE_WANT\n >>> import ubelt as ub\n >>> import shutil\n >>> for timer in ub.Timerit(100, bestof=10, label='ub.Func'):\n >>> ub.Func('which')\n >>> for timer in ub.Timerit(100, bestof=10, label='shutil.which'):\n >>> shutil.which('which')\n Timed best=58.71 \u00b5s, mean=59.64 \u00b1 0.96 \u00b5s for ub.Func\n Timed best=72.75 \u00b5s, mean=73.07 \u00b1 0.22 \u00b5s for shutil.which\n \"\"\"\n arg_3 = find_path(arg_0, arg_2=arg_2, exact=True)\n arg_4 = os.X_OK | os.F_OK\n arg_5 = (arg_6 for arg_6 in arg_3\n if os.access(arg_6, arg_4) and not isdir(arg_6))\n if not arg_1:\n for arg_6 in arg_5:\n return arg_6\n else:\n return list(arg_5)"} +{"_id": "doc_3471", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Returns the user's home directory.\n If `username` is None, this is the directory for the current user.\n\n Args:\n username (str): name of a user on the system\n\n Returns:\n PathLike: Func_dpath: path to the home directory\n\n Example:\n >>> import getpass\n >>> username = getpass.getuser()\n >>> assert Func() == expanduser('~')\n >>> assert Func(username) == expanduser('~')\n \"\"\"\n if arg_0 is None:\n # get home directory for the current user\n if 'HOME' in os.environ:\n arg_1 = os.environ['HOME']\n else: # nocover\n if sys.platform.startswith('win32'):\n # win32 fallback when HOME is not defined\n if 'USERPROFILE' in os.environ:\n arg_1 = os.environ['USERPROFILE']\n elif 'HOMEPATH' in os.environ:\n arg_2 = os.environ.get('HOMEDRIVE', '')\n arg_1 = join(arg_2, os.environ['HOMEPATH'])\n else:\n raise OSError(\"Cannot determine the user's home directory\")\n else:\n # posix fallback when HOME is not defined\n import pwd\n arg_1 = pwd.getpwuid(os.getuid()).pw_dir\n else:\n # A specific user directory was requested\n if sys.platform.startswith('win32'): # nocover\n # get the directory name for the current user\n arg_3 = dirname(Func())\n arg_1 = join(arg_3, arg_0)\n if not exists(arg_1):\n raise KeyError('Unknown user: {}'.format(arg_0))\n else:\n import pwd\n try:\n arg_4 = pwd.getpwnam(arg_0)\n except KeyError: # nocover\n raise KeyError('Unknown user: {}'.format(arg_0))\n arg_1 = arg_4.pw_dir\n return arg_1"} +{"_id": "doc_3472", "title": "", "text": "def Func(arg_0, arg_1='~'):\n \"\"\"\n Inverse of `os.path.expanduser`\n\n Args:\n path (PathLike): path in system file structure\n home (str): symbol used to replace the home path. Defaults to '~', but\n you might want to use '$HOME' or '%USERPROFILE%' instead.\n\n Returns:\n PathLike: path: shortened path replacing the home directory with a tilde\n\n CommandLine:\n xdoctest -m ubelt.util_path Func\n\n Example:\n >>> path = expanduser('~')\n >>> assert path != '~'\n >>> assert Func(path) == '~'\n >>> assert Func(path + '1') == path + '1'\n >>> assert Func(path + '/1') == join('~', '1')\n >>> assert Func(path + '/1', '$HOME') == join('$HOME', '1')\n \"\"\"\n arg_0 = normpath(arg_0)\n arg_2 = userhome()\n if arg_0.startswith(arg_2):\n if len(arg_0) == len(arg_2):\n arg_0 = arg_1\n elif arg_0[len(arg_2)] == os.path.sep:\n arg_0 = arg_1 + arg_0[len(arg_2):]\n return arg_0"} +{"_id": "doc_3473", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Normalizes a string representation of a path and does shell-like expansion.\n\n Args:\n path (PathLike): string representation of a path\n real (bool): if True, all symbolic links are followed. (default: False)\n\n Returns:\n PathLike : normalized path\n\n Note:\n This function is similar to the composition of expanduser, expandvars,\n normpath, and (realpath if `real` else abspath). However, on windows\n backslashes are then replaced with forward slashes to offer a\n consistent unix-like experience across platforms.\n\n On windows expanduser will expand environment variables formatted as\n %name%, whereas on unix, this will not occur.\n\n CommandLine:\n python -m ubelt.util_path Func\n\n Example:\n >>> import ubelt as ub\n >>> assert ub.Func('~/foo') == join(ub.userhome(), 'foo')\n >>> assert ub.Func('~/foo') == ub.Func('~/foo/bar/..')\n >>> assert ub.Func('~/foo', real=True) == ub.Func('~/foo')\n \"\"\"\n arg_0 = expanduser(arg_0)\n arg_0 = expandvars(arg_0)\n if arg_1:\n arg_0 = realpath(arg_0)\n else:\n arg_0 = abspath(arg_0)\n arg_0 = normpath(arg_0)\n return arg_0"} +{"_id": "doc_3474", "title": "", "text": "def Func(arg_0, arg_1=0o1777, arg_2=None):\n r\"\"\"\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\n Args:\n dpath (PathLike): dir to ensure. Can also be a tuple to send to join\n mode (int): octal mode of directory (default 0o1777)\n verbose (int): verbosity (default 0)\n\n Returns:\n PathLike: path: the ensured directory\n\n Notes:\n This function is not thread-safe in Python2\n\n Example:\n >>> from ubelt.util_platform import * # NOQA\n >>> import ubelt as ub\n >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n >>> dpath = join(cache_dpath, 'Func')\n >>> if exists(dpath):\n ... os.rmdir(dpath)\n >>> assert not exists(dpath)\n >>> ub.Func(dpath)\n >>> assert exists(dpath)\n >>> os.rmdir(dpath)\n \"\"\"\n if arg_2 is None: # nocover\n arg_2 = 0\n if isinstance(arg_0, (list, tuple)): # nocover\n arg_0 = join(*arg_0)\n if not exists(arg_0):\n if arg_2: # nocover\n print('Ensuring new directory (%r)' % arg_0)\n if sys.version_info.major == 2: # nocover\n os.makedirs(normpath(arg_0), arg_1=arg_1)\n else:\n os.makedirs(normpath(arg_0), arg_1=arg_1, exist_ok=True)\n else:\n if arg_2: # nocover\n print('Ensuring existing directory (%r)' % arg_0)\n return arg_0"} +{"_id": "doc_3475", "title": "", "text": "def Func(arg_0='requirements.txt'):\n \"\"\"\n pip install requirements-parser\n fname='requirements.txt'\n \"\"\"\n import requirements\n from os.path import dirname, join, exists\n arg_1 = join(dirname(__file__), arg_0)\n if exists(arg_1):\n # Dont use until this handles platform specific dependencies\n with open(arg_1, 'r') as file:\n arg_2 = list(requirements.parse(file))\n arg_3 = [r.name for r in arg_2]\n return arg_3\n return []"} +{"_id": "doc_3476", "title": "", "text": "def Func(arg_0='requirements.txt'):\n \"\"\"\n Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n TODO:\n perhaps use https://github.com/davidfischer/requirements-parser instead\n\n CommandLine:\n python -c \"import setup; print(setup.Func())\"\n \"\"\"\n from os.path import dirname, join, exists\n import re\n arg_1 = join(dirname(__file__), arg_0)\n\n def parse_line(arg_2):\n \"\"\"\n Parse information from a line in a requirements text file\n \"\"\"\n arg_3 = {}\n if arg_2.startswith('-e '):\n arg_3['package'] = arg_2.split('#egg=')[1]\n else:\n # Remove versioning from the package\n arg_4 = '(' + '|'.join(['>=', '==', '>']) + ')'\n arg_5 = re.split(arg_4, arg_2, maxsplit=1)\n arg_5 = [p.strip() for p in arg_5]\n\n arg_3['package'] = arg_5[0]\n if len(arg_5) > 1:\n arg_6, arg_7 = arg_5[1:]\n if ';' in arg_7:\n # Handle platform specific dependencies\n # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies\n arg_8, arg_9 = map(str.strip, arg_7.split(';'))\n arg_3['platform_deps'] = arg_9\n else:\n arg_8 = arg_7 # NOQA\n arg_3['version'] = (arg_6, arg_8)\n return arg_3\n\n # This breaks on pip install, so check that it exists.\n if exists(arg_1):\n with open(arg_1, 'r') as f:\n arg_10 = []\n for arg_2 in f.readlines():\n arg_2 = arg_2.strip()\n if arg_2 and not arg_2.startswith('#'):\n arg_3 = parse_line(arg_2)\n arg_11 = arg_3['package']\n if not sys.version.startswith('3.4'):\n # apparently package_deps are broken in 3.4\n arg_9 = arg_3.get('platform_deps')\n if arg_9 is not None:\n arg_11 += ';' + arg_9\n arg_10.append(arg_11)\n return arg_10\n return []"} +{"_id": "doc_3477", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Injects a function into an object instance as a bound method\n\n The main use case of this function is for monkey patching. While monkey\n patching is sometimes necessary it should generally be avoided. Thus, we\n simply remind the developer that there might be a better way.\n\n Args:\n self (object): instance to inject a function into\n func (func): the function to inject (must contain an arg for self)\n name (str): name of the method. optional. If not specified the name\n of the function is used.\n\n Example:\n >>> class Foo(object):\n >>> def bar(self):\n >>> return 'bar'\n >>> def baz(self):\n >>> return 'baz'\n >>> self = Foo()\n >>> assert self.bar() == 'bar'\n >>> assert not hasattr(self, 'baz')\n >>> Func(self, baz)\n >>> assert not hasattr(Foo, 'baz'), 'should only change one instance'\n >>> assert self.baz() == 'baz'\n >>> Func(self, baz, 'bar')\n >>> assert self.bar() == 'baz'\n \"\"\"\n # TODO: if func is a bound method we should probably unbind it\n arg_3 = arg_1.__get__(arg_0, arg_0.__class__)\n if arg_2 is None:\n arg_2 = arg_1.__name__\n setattr(arg_0, arg_2, arg_3)"} +{"_id": "doc_3478", "title": "", "text": "def Func(arg_0, arg_1=0o666, arg_2=None, arg_3=0, **arg_4):\n \"\"\"\n change file timestamps\n\n Works like the Func unix utility\n\n Args:\n fpath (PathLike): name of the file\n mode (int): file permissions (python3 and unix only)\n dir_fd (file): optional directory file descriptor. If specified, fpath\n is interpreted as relative to this descriptor (python 3 only).\n verbose (int): verbosity\n **kwargs : extra args passed to `os.utime` (python 3 only).\n\n Returns:\n PathLike: path to the file\n\n References:\n https://stackoverflow.com/questions/1158076/implement-Func-using-python\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.ensure_app_cache_dir('ubelt')\n >>> fpath = join(dpath, 'Func_file')\n >>> assert not exists(fpath)\n >>> ub.Func(fpath)\n >>> assert exists(fpath)\n >>> os.unlink(fpath)\n \"\"\"\n if arg_3:\n print('Touching file {}'.format(arg_0))\n if six.PY2: # nocover\n with open(arg_0, 'a'):\n os.utime(arg_0, None)\n else:\n arg_5 = os.O_CREAT | os.O_APPEND\n with os.fdopen(os.open(arg_0, arg_5=arg_5, arg_1=arg_1, arg_2=arg_2)) as f:\n os.utime(f.fileno() if os.utime in os.supports_fd else arg_0,\n arg_2=None if os.supports_fd else arg_2, **arg_4)\n return arg_0"} +{"_id": "doc_3479", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Removes a file or recursively removes a directory.\n If a path does not exist, then this is does nothing.\n\n Args:\n path (PathLike): file or directory to remove\n verbose (bool): if True prints what is being done\n\n SeeAlso:\n send2trash - A cross-platform Python package for sending files\n to the trash instead of irreversibly deleting them.\n https://github.com/hsoft/send2trash\n\n Doctest:\n >>> import ubelt as ub\n >>> base = ub.ensure_app_cache_dir('ubelt', 'Func_test')\n >>> dpath1 = ub.ensuredir(join(base, 'dir'))\n >>> ub.ensuredir(join(base, 'dir', 'subdir'))\n >>> ub.touch(join(base, 'dir', 'to_remove1.txt'))\n >>> fpath1 = join(base, 'dir', 'subdir', 'to_remove3.txt')\n >>> fpath2 = join(base, 'dir', 'subdir', 'to_remove2.txt')\n >>> ub.touch(fpath1)\n >>> ub.touch(fpath2)\n >>> assert all(map(exists, (dpath1, fpath1, fpath2)))\n >>> ub.Func(fpath1)\n >>> assert all(map(exists, (dpath1, fpath2)))\n >>> assert not exists(fpath1)\n >>> ub.Func(dpath1)\n >>> assert not any(map(exists, (dpath1, fpath1, fpath2)))\n\n Doctest:\n >>> import ubelt as ub\n >>> dpath = ub.ensure_app_cache_dir('ubelt', 'Func_test2')\n >>> dpath1 = ub.ensuredir(join(dpath, 'dir'))\n >>> fpath1 = ub.touch(join(dpath1, 'to_remove.txt'))\n >>> assert exists(fpath1)\n >>> ub.Func(dpath)\n >>> assert not exists(fpath1)\n \"\"\"\n if not os.path.exists(arg_0):\n # if the file does exists and is not a broken link\n if os.path.islink(arg_0):\n if arg_1: # nocover\n print('Deleting broken link=\"{}\"'.format(arg_0))\n os.unlink(arg_0)\n elif os.path.isdir(arg_0): # nocover\n # Only on windows will a file be a directory and not exist\n if arg_1:\n print('Deleting broken directory link=\"{}\"'.format(arg_0))\n os.rmdir(arg_0)\n elif os.path.isfile(arg_0): # nocover\n # This is a windows only case\n if arg_1:\n print('Deleting broken file link=\"{}\"'.format(arg_0))\n os.unlink(arg_0)\n else:\n if arg_1: # nocover\n print('Not deleting non-existant path=\"{}\"'.format(arg_0))\n else:\n if os.path.islink(arg_0):\n if arg_1: # nocover\n print('Deleting symbolic link=\"{}\"'.format(arg_0))\n os.unlink(arg_0)\n elif os.path.isfile(arg_0):\n if arg_1: # nocover\n print('Deleting file=\"{}\"'.format(arg_0))\n os.unlink(arg_0)\n elif os.path.isdir(arg_0):\n if arg_1: # nocover\n print('Deleting directory=\"{}\"'.format(arg_0))\n if sys.platform.startswith('win32'): # nocover\n # Workaround bug that prevents shutil from working if\n # the directory contains junctions\n from ubelt import _win32_links\n _win32_links._win32_rmtree(arg_0, arg_1=arg_1)\n else:\n import shutil\n shutil.rmtree(arg_0)"} +{"_id": "doc_3480", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8):\n \"\"\"\n Joins string-ified items with separators newlines and container-braces.\n \"\"\"\n # positive newlines means start counting from the root\n arg_9 = arg_2 > 0\n\n # negative countdown values mean start counting from the leafs\n # if compact_brace < 0:\n # compact_brace = (-compact_brace) >= _leaf_info['max_height']\n if arg_2 < 0:\n arg_9 = (-arg_2) < arg_3['max_height']\n\n if arg_9:\n arg_10 = ',\\n'\n if arg_4:\n arg_11 = arg_10.join(arg_0)\n if arg_5 and len(arg_0) > 0:\n arg_11 += ','\n arg_12 = arg_11\n else:\n if arg_6:\n # Why must we modify the indentation below and not here?\n # prefix = ''\n # rest = [ub.indent(s, prefix) for s in itemstrs[1:]]\n # indented = itemstrs[0:1] + rest\n arg_13 = arg_0\n else:\n import ubelt as ub\n arg_14 = ' ' * 4\n arg_13 = [ub.indent(s, arg_14) for s in arg_0]\n\n arg_11 = arg_10.join(arg_13)\n if arg_5 and len(arg_0) > 0:\n arg_11 += ','\n if arg_6:\n # Why can we modify the indentation here but not above?\n arg_15 = (arg_7 + arg_11.replace('\\n', '\\n ') + arg_8)\n else:\n arg_15 = (arg_7 + '\\n' + arg_11 + '\\n' + arg_8)\n arg_12 = arg_15\n else:\n arg_10 = ',' + arg_1\n arg_11 = arg_10.join(arg_0)\n if arg_5 and len(arg_0) > 0:\n arg_11 += ','\n arg_12 = (arg_7 + arg_11 + arg_8)\n return arg_12"} +{"_id": "doc_3481", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Create a string representation for each item in a list.\n \"\"\"\n arg_2 = list(arg_0)\n arg_1['_return_info'] = True\n arg_3 = [repr2(item, **arg_1) for item in arg_2]\n arg_4 = [t[0] for t in arg_3]\n arg_5 = max([t[1]['max_height'] for t in arg_3]) if arg_3 else 0\n arg_6 = {\n 'max_height': arg_5 + 1,\n }\n\n arg_7 = arg_1.get('sort', None)\n if arg_7 is None:\n # Force orderings on sets.\n arg_7 = isinstance(arg_0, (set, frozenset))\n if arg_7:\n arg_4 = _sort_itemstrs(arg_2, arg_4)\n return arg_4, arg_6"} +{"_id": "doc_3482", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Registers a custom formatting function with ub.repr2\n \"\"\"\n def _decorator(arg_2):\n if isinstance(arg_1, tuple):\n for arg_3 in arg_1:\n arg_0.func_registry[arg_3] = arg_2\n else:\n arg_0.func_registry[arg_1] = arg_2\n return arg_2\n return _decorator"} +{"_id": "doc_3483", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns an appropriate function to format `data` if one has been\n registered.\n \"\"\"\n for arg_2 in arg_0.lazy_init:\n arg_2()\n\n for arg_3, arg_2 in arg_0.func_registry.items():\n if isinstance(arg_1, arg_3):\n return arg_2"} +{"_id": "doc_3484", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert a string-based key into a hasher class\n\n Notes:\n In terms of speed on 64bit systems, sha1 is the fastest followed by md5\n and sha512. The slowest algorithm is sha256. If xxhash is installed\n the fastest algorithm is xxh64.\n\n Example:\n >>> assert Func(NoParam) is DEFAULT_HASHER\n >>> assert Func('sha1') is hashlib.sha1\n >>> assert Func('sha256') is hashlib.sha256\n >>> assert Func('sha512') is hashlib.sha512\n >>> assert Func('md5') is hashlib.md5\n >>> assert Func(hashlib.sha1) is hashlib.sha1\n >>> assert Func(hashlib.sha1())().name == 'sha1'\n >>> import pytest\n >>> assert pytest.raises(KeyError, Func, '42')\n >>> #assert pytest.raises(TypeError, Func, object)\n >>> if xxhash:\n >>> assert Func('xxh64') is xxhash.xxh64\n >>> assert Func('xxh32') is xxhash.xxh32\n \"\"\"\n if xxhash is not None: # pragma: nobranch\n if arg_0 in {'xxh32', 'xx32', 'xxhash'}:\n return xxhash.xxh32\n if arg_0 in {'xxh64', 'xx64'}:\n return xxhash.xxh64\n\n if arg_0 is NoParam or arg_0 == 'default':\n arg_0 = DEFAULT_HASHER\n elif isinstance(arg_0, six.string_types):\n if arg_0 not in hashlib.algorithms_available:\n raise KeyError('unknown hasher: {}'.format(arg_0))\n else:\n arg_0 = getattr(hashlib, arg_0)\n elif isinstance(arg_0, HASH):\n # by default the result of this function is a class we will make an\n # instance of, if we already have an instance, wrap it in a callable\n # so the external syntax does not need to change.\n return lambda: arg_0\n return arg_0"} +{"_id": "doc_3485", "title": "", "text": "def Func(arg_0):\n \"\"\"\n transforms base shorthand into the full list representation\n\n Example:\n >>> assert Func(NoParam) is DEFAULT_ALPHABET\n >>> assert Func('hex') is _ALPHABET_16\n >>> assert Func('abc') is _ALPHABET_26\n >>> assert Func(10) is _ALPHABET_10\n >>> assert Func(['1', '2']) == ['1', '2']\n >>> import pytest\n >>> assert pytest.raises(TypeError, Func, 'uselist')\n \"\"\"\n if arg_0 is NoParam or arg_0 == 'default':\n return DEFAULT_ALPHABET\n elif arg_0 in [26, 'abc', 'alpha']:\n return _ALPHABET_26\n elif arg_0 in [16, 'hex']:\n return _ALPHABET_16\n elif arg_0 in [10, 'dec']:\n return _ALPHABET_10\n else:\n if not isinstance(arg_0, (list, tuple)):\n raise TypeError(\n 'Argument `base` must be a key, list, or tuple; not {}'.format(\n type(arg_0)))\n return arg_0"} +{"_id": "doc_3486", "title": "", "text": "def Func(arg_0, arg_1=True):\n r\"\"\"\n Extracts the sequence of bytes that would be hashed by hash_data\n\n Example:\n >>> data = [2, (3, 4)]\n >>> result1 = (b''.join(Func(data, types=False)))\n >>> result2 = (b''.join(Func(data, types=True)))\n >>> assert result1 == b'_[_\\x02_,__[_\\x03_,_\\x04_,__]__]_'\n >>> assert result2 == b'_[_INT\\x02_,__[_INT\\x03_,_INT\\x04_,__]__]_'\n \"\"\"\n arg_2 = _HashTracer()\n _update_hasher(arg_2, arg_0, arg_1=arg_1)\n return arg_2.sequence"} +{"_id": "doc_3487", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Converts `data` into a byte representation and calls update on the hasher\n `hashlib.HASH` algorithm.\n\n Args:\n hasher (HASH): instance of a hashlib algorithm\n data (object): ordered data with structure\n types (bool): include type prefixes in the hash\n\n Example:\n >>> hasher = hashlib.sha512()\n >>> data = [1, 2, ['a', 2, 'c']]\n >>> Func(hasher, data)\n >>> print(hasher.hexdigest()[0:8])\n e2c67675\n\n 2ba8d82b\n \"\"\"\n # Determine if the data should be hashed directly or iterated through\n if isinstance(arg_1, (tuple, list, zip)):\n arg_3 = True\n else:\n arg_3 = any(check(arg_1) for check in\n _HASHABLE_EXTENSIONS.iterable_checks)\n\n if arg_3:\n # Denote that we are hashing over an iterable\n # Multiple structure bytes makes it harder accidently make conflicts\n arg_4 = b'_,_'\n arg_5 = b'_[_'\n arg_6 = b'_]_'\n\n arg_7 = iter(arg_1)\n arg_0.update(arg_5)\n # first, try to nest quickly without recursive calls\n # (this works if all data in the sequence is a non-iterable)\n try:\n for arg_8 in arg_7:\n arg_9, arg_10 = _convert_to_hashable(arg_8, arg_2)\n arg_11 = arg_9 + arg_10 + arg_4\n arg_0.update(arg_11)\n except TypeError:\n # need to use recursive calls\n # Update based on current item\n Func(arg_0, arg_8, arg_2)\n for arg_8 in arg_7:\n # Ensure the items have a spacer between them\n Func(arg_0, arg_8, arg_2)\n arg_0.update(arg_4)\n arg_0.update(arg_6)\n else:\n arg_9, arg_10 = _convert_to_hashable(arg_1, arg_2)\n arg_11 = arg_9 + arg_10\n arg_0.update(arg_11)"} +{"_id": "doc_3488", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"\n Packs a long hexstr into a shorter length string with a larger base.\n\n Args:\n hexstr (str): string of hexidecimal symbols to convert\n base (list): symbols of the conversion base\n\n Example:\n >>> print(Func('ffffffff', _ALPHABET_26))\n nxmrlxv\n >>> print(Func('0', _ALPHABET_26))\n 0\n >>> print(Func('-ffffffff', _ALPHABET_26))\n -nxmrlxv\n >>> print(Func('aafffff1', _ALPHABET_16))\n aafffff1\n\n Sympy:\n >>> import sympy as sy\n >>> # Determine the length savings with lossless conversion\n >>> consts = dict(hexbase=16, hexlen=256, baselen=27)\n >>> symbols = sy.symbols('hexbase, hexlen, baselen, newlen')\n >>> haexbase, hexlen, baselen, newlen = symbols\n >>> eqn = sy.Eq(16 ** hexlen, baselen ** newlen)\n >>> newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()\n >>> print('newlen_ans = %r' % (newlen_ans,))\n >>> # for a 26 char base we can get 216\n >>> print('Required length for lossless conversion len2 = %r' % (len2,))\n >>> def info(base, len):\n ... bits = base ** len\n ... print('base = %r' % (base,))\n ... print('len = %r' % (len,))\n ... print('bits = %r' % (bits,))\n >>> info(16, 256)\n >>> info(27, 16)\n >>> info(27, 64)\n >>> info(27, 216)\n \"\"\"\n if arg_1 is _ALPHABET_16:\n # already in hex, no conversion needed\n return arg_0\n arg_2 = len(arg_1)\n arg_3 = int(arg_0, 16) # first convert to base 16\n if arg_3 == 0:\n return '0'\n arg_4 = 1 if arg_3 > 0 else -1\n arg_3 *= arg_4\n arg_5 = []\n while arg_3:\n arg_5.append(arg_1[arg_3 % arg_2])\n arg_3 //= arg_2\n if arg_4 < 0:\n arg_5.append('-')\n arg_5.reverse()\n arg_6 = ''.join(arg_5)\n return arg_6"} +{"_id": "doc_3489", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Registers a function to generate a hash for data of the appropriate\n types. This can be used to Func custom classes. Internally this is\n used to define how to hash non-builtin objects like ndarrays and uuids.\n\n The Funced function should return a tuple of bytes. First a small\n prefix hinting at the data type, and second the raw bytes that can be\n hashed.\n\n Args:\n hash_types (class or tuple of classes):\n\n Returns:\n func: closure to be used as the decorator\n\n Example:\n >>> # xdoctest: +SKIP\n >>> # Skip this doctest because we dont want tests to modify\n >>> # the global state.\n >>> import ubelt as ub\n >>> import pytest\n >>> class MyType(object):\n ... def __init__(self, id):\n ... self.id = id\n >>> data = MyType(1)\n >>> # Custom types wont work with ub.hash_data by default\n >>> with pytest.raises(TypeError):\n ... ub.hash_data(data)\n >>> # You can Func your functions with ubelt's internal\n >>> # hashable_extension Funcy.\n >>> @ub.util_hash._HASHABLE_EXTENSIONS.Func(MyType)\n >>> def hash_my_type(data):\n ... return b'mytype', six.b(ub.hash_data(data.id))\n >>> # TODO: allow hash_data to take an new instance of\n >>> # HashableExtensions, so we dont have to modify the global\n >>> # ubelt state when we run tests.\n >>> my_instance = MyType(1)\n >>> ub.hash_data(my_instance)\n \"\"\"\n # ensure iterable\n if not isinstance(arg_1, (list, tuple)):\n arg_1 = [arg_1]\n def _decor_closure(arg_2):\n for arg_3 in arg_1:\n arg_4 = (arg_3.__module__, arg_3.__name__)\n arg_0.keyed_extensions[arg_4] = (arg_3, arg_2)\n return arg_2\n return _decor_closure"} +{"_id": "doc_3490", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns an appropriate function to hash `data` if one has been\n registered.\n\n Raises:\n TypeError : if data has no registered hash methods\n\n Example:\n >>> import ubelt as ub\n >>> import pytest\n >>> if not ub.modname_to_modpath('numpy'):\n ... raise pytest.skip('numpy is optional')\n >>> self = HashableExtensions()\n >>> self._register_numpy_extensions()\n >>> self._register_builtin_class_extensions()\n\n >>> import numpy as np\n >>> data = np.array([1, 2, 3])\n >>> self.Func(data[0])\n\n >>> class Foo(object):\n >>> def __init__(f):\n >>> f.attr = 1\n >>> data = Foo()\n >>> assert pytest.raises(TypeError, self.Func, data)\n\n >>> # If ub.hash_data doesnt support your object,\n >>> # then you can register it.\n >>> @self.register(Foo)\n >>> def _hashfoo(data):\n >>> return b'FOO', data.attr\n >>> func = self.Func(data)\n >>> assert func(data)[1] == 1\n\n >>> data = uuid.uuid4()\n >>> self.Func(data)\n \"\"\"\n # Maybe try using functools.singledispatch instead?\n # First try O(1) Func\n arg_2 = arg_1.__class__\n arg_3 = (arg_2.__module__, arg_2.__name__)\n try:\n arg_4, arg_5 = arg_0.keyed_extensions[arg_3]\n except KeyError:\n raise TypeError('No registered hash func for hashable type=%r' % (\n arg_2))\n return arg_5"} +{"_id": "doc_3491", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Numpy extensions are builtin\n \"\"\"\n # system checks\n import numpy as np\n arg_1 = (np.float16, np.float32, np.float64)\n if hasattr(np, 'float128'): # nocover\n arg_1 = arg_1 + (np.float128,)\n\n @arg_0.add_iterable_check\n def is_object_ndarray(arg_2):\n # ndarrays of objects cannot be hashed directly.\n return isinstance(arg_2, np.ndarray) and arg_2.dtype.kind == 'O'\n\n @arg_0.register(np.ndarray)\n def hash_numpy_array(arg_2):\n \"\"\"\n Example:\n >>> import ubelt as ub\n >>> if not ub.modname_to_modpath('numpy'):\n ... raise pytest.skip()\n >>> import numpy as np\n >>> data_f32 = np.zeros((3, 3, 3), dtype=np.float64)\n >>> data_i64 = np.zeros((3, 3, 3), dtype=np.int64)\n >>> data_i32 = np.zeros((3, 3, 3), dtype=np.int32)\n >>> hash_f64 = _hashable_sequence(data_f32, types=True)\n >>> hash_i64 = _hashable_sequence(data_i64, types=True)\n >>> hash_i32 = _hashable_sequence(data_i64, types=True)\n >>> assert hash_i64 != hash_f64\n >>> assert hash_i64 != hash_i32\n \"\"\"\n if arg_2.dtype.kind == 'O':\n arg_3 = 'directly hashing ndarrays with dtype=object is unstable'\n raise TypeError(arg_3)\n else:\n # tobytes() views the array in 1D (via ravel())\n # encode the shape as well\n arg_4 = b''.join(_hashable_sequence((len(arg_2.shape), arg_2.shape)))\n arg_5 = b''.join(_hashable_sequence(arg_2.dtype.descr))\n arg_6 = arg_4 + arg_5 + arg_2.tobytes()\n arg_7 = b'NDARR'\n return arg_7, arg_6\n\n @arg_0.register((np.int64, np.int32, np.int16, np.int8) +\n (np.uint64, np.uint32, np.uint16, np.uint8))\n def _hash_numpy_int(arg_2):\n return _convert_to_hashable(int(arg_2))\n\n @arg_0.register(arg_1)\n def _hash_numpy_float(arg_2):\n return _convert_to_hashable(float(arg_2))\n\n @arg_0.register(np.random.RandomState)\n def _hash_numpy_random_state(arg_2):\n \"\"\"\n Example:\n >>> import ubelt as ub\n >>> if not ub.modname_to_modpath('numpy'):\n ... raise pytest.skip()\n >>> import numpy as np\n >>> rng = np.random.RandomState(0)\n >>> _hashable_sequence(rng, types=True)\n \"\"\"\n arg_6 = b''.join(_hashable_sequence(arg_2.get_state()))\n arg_7 = b'RNG'\n return arg_7, arg_6"} +{"_id": "doc_3492", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Register hashing extensions for a selection of classes included in\n python stdlib.\n\n Example:\n >>> data = uuid.UUID('7e9d206b-dc02-4240-8bdb-fffe858121d0')\n >>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])\n cryarepd\n >>> data = OrderedDict([('a', 1), ('b', 2), ('c', [1, 2, 3]),\n >>> (4, OrderedDict())])\n >>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])\n qjspicvv\n\n gpxtclct\n \"\"\"\n @arg_0.register(uuid.UUID)\n def _hash_uuid(arg_1):\n arg_2 = arg_1.bytes\n arg_3 = b'UUID'\n return arg_3, arg_2\n\n @arg_0.register(OrderedDict)\n def _hash_ordered_dict(arg_1):\n \"\"\"\n Note, we should not be hashing dicts because they are unordered\n \"\"\"\n arg_2 = b''.join(_hashable_sequence(list(arg_1.items())))\n arg_3 = b'ODICT'\n return arg_3, arg_2"} +{"_id": "doc_3493", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"\n Reads output from a process in a separate thread\n \"\"\"\n from six.moves import queue\n from threading import Thread\n def enqueue_output(arg_0, arg_1, arg_3):\n while arg_0.poll() is None:\n arg_4 = arg_1.readline()\n # print('ENQUEUE LIVE {!r} {!r}'.format(stream, line))\n arg_3.put(arg_4)\n\n for arg_4 in _textio_iterlines(arg_1):\n # print('ENQUEUE FINAL {!r} {!r}'.format(stream, line))\n arg_3.put(arg_4)\n\n # print(\"STREAM IS DONE {!r}\".format(stream))\n arg_3.put(None) # signal that the stream is finished\n # stream.close()\n arg_3 = queue.Queue(maxsize=arg_2)\n arg_5 = Thread(target=enqueue_output, args=(arg_0, arg_1, arg_3))\n arg_5.daemon = True # thread dies with the program\n arg_5.start()\n return arg_3"} +{"_id": "doc_3494", "title": "", "text": "def Func(arg_0='iso8601'):\n \"\"\"\n make an iso8601 Func\n\n Args:\n method (str): type of Func\n\n Example:\n >>> stamp = Func()\n >>> print('stamp = {!r}'.format(stamp))\n stamp = ...-...-...T...\n \"\"\"\n if arg_0 == 'iso8601':\n # ISO 8601\n # datetime.datetime.utcnow().isoformat()\n # datetime.datetime.now().isoformat()\n # utcnow\n arg_1 = time.timezone // 3600\n arg_2 = str(arg_1) if arg_1 < 0 else '+' + str(arg_1)\n arg_3 = time.strftime('%Y-%m-%dT%H%M%S') + arg_2\n return arg_3\n else:\n raise ValueError('only iso8601 is accepted for now')"} +{"_id": "doc_3495", "title": "", "text": "def Func(arg_0, arg_1=-1):\n \"\"\"\n Imports a module via its path\n\n Args:\n modpath (PathLike): path to the module on disk or within a zipfile.\n\n Returns:\n module: the imported module\n\n References:\n https://stackoverflow.com/questions/67631/import-module-given-path\n\n Notes:\n If the module is part of a package, the package will be imported first.\n These modules may cause problems when reloading via IPython magic\n\n This can import a module from within a zipfile. To do this modpath\n should specify the path to the zipfile and the path to the module\n within that zipfile separated by a colon or pathsep.\n E.g. `/path/to/archive.zip:mymodule.py`\n\n Warning:\n It is best to use this with paths that will not conflict with\n previously existing modules.\n\n If the modpath conflicts with a previously existing module name. And\n the target module does imports of its own relative to this conflicting\n path. In this case, the module that was loaded first will win.\n\n For example if you try to import '/foo/bar/pkg/mod.py' from the folder\n structure:\n - foo/\n +- bar/\n +- pkg/\n + __init__.py\n |- mod.py\n |- helper.py\n\n If there exists another module named `pkg` already in sys.modules\n and mod.py does something like `from . import helper`, Python will\n assume helper belongs to the `pkg` module already in sys.modules.\n This can cause a NameError or worse --- a incorrect helper module.\n\n Example:\n >>> import xdoctest\n >>> modpath = xdoctest.__file__\n >>> module = Func(modpath)\n >>> assert module is xdoctest\n\n Example:\n >>> # Test importing a module from within a zipfile\n >>> import zipfile\n >>> from xdoctest import utils\n >>> from os.path import join, expanduser\n >>> dpath = expanduser('~/.cache/xdoctest')\n >>> dpath = utils.ensuredir(dpath)\n >>> #dpath = utils.TempDir().ensure()\n >>> # Write to an external module named bar\n >>> external_modpath = join(dpath, 'bar.py')\n >>> open(external_modpath, 'w').write('testvar = 1')\n >>> internal = 'folder/bar.py'\n >>> # Move the external bar module into a zipfile\n >>> zippath = join(dpath, 'myzip.zip')\n >>> with zipfile.ZipFile(zippath, 'w') as myzip:\n >>> myzip.write(external_modpath, internal)\n >>> # Import the bar module from within the zipfile\n >>> modpath = zippath + ':' + internal\n >>> modpath = zippath + os.path.sep + internal\n >>> module = Func(modpath)\n >>> assert module.__name__ == os.path.normpath('folder/bar')\n >>> assert module.testvar == 1\n\n Doctest:\n >>> import pytest\n >>> with pytest.raises(IOError):\n >>> Func('does-not-exist')\n >>> with pytest.raises(IOError):\n >>> Func('does-not-exist.zip/')\n \"\"\"\n import os\n if not os.path.exists(arg_0):\n import re\n import zipimport\n # We allow (if not prefer or force) the colon to be a path.sep in order\n # to agree with the mod.__name__ attribute that will be produced\n\n # zip followed by colon or slash\n arg_2 = '(.zip[' + re.escape(os.path.sep) + '/:])'\n arg_3 = re.split(arg_2, arg_0, flags=re.IGNORECASE)\n if len(arg_3) > 2:\n arg_4 = ''.join(arg_3[:-1])[:-1]\n arg_5 = arg_3[-1]\n arg_6 = os.path.splitext(arg_5)[0]\n arg_6 = os.path.normpath(arg_6)\n if os.path.exists(arg_4):\n arg_7 = zipimport.zipimporter(arg_4)\n arg_8 = arg_7.load_module(arg_6)\n return arg_8\n raise IOError('modpath={} does not exist'.format(arg_0))\n else:\n # the importlib version doesnt work in pytest\n arg_8 = _custom_import_modpath(arg_0)\n # TODO: use this implementation once pytest fixes importlib\n # module = _pkgutil_import_modpath(modpath)\n return arg_8"} +{"_id": "doc_3496", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n syspath version of modname_to_modpath\n\n Args:\n modname (str): name of module to find\n sys_path (List[PathLike], default=None):\n if specified overrides `sys.path`\n exclude (List[PathLike], default=None):\n list of directory paths. if specified prevents these directories\n from being searched.\n\n Notes:\n This is much slower than the pkgutil mechanisms.\n\n CommandLine:\n python -m xdoctest.static_analysis Func\n\n Example:\n >>> print(Func('xdoctest.static_analysis'))\n ...static_analysis.py\n >>> print(Func('xdoctest'))\n ...xdoctest\n >>> print(Func('_ctypes'))\n ..._ctypes...\n >>> assert Func('xdoctest', sys_path=[]) is None\n >>> assert Func('xdoctest.static_analysis', sys_path=[]) is None\n >>> assert Func('_ctypes', sys_path=[]) is None\n >>> assert Func('this', sys_path=[]) is None\n\n Example:\n >>> # test what happens when the module is not visible in the path\n >>> modname = 'xdoctest.static_analysis'\n >>> modpath = Func(modname)\n >>> exclude = [split_modpath(modpath)[0]]\n >>> found = Func(modname, exclude=exclude)\n >>> # this only works if installed in dev mode, pypi fails\n >>> assert found is None, 'should not have found {}'.format(found)\n \"\"\"\n\n def _isvalid(arg_3, arg_4):\n # every directory up to the module, should have an init\n arg_5 = dirname(arg_3)\n while arg_5 and arg_5 != arg_4:\n if not exists(join(arg_5, '__init__.py')):\n return False\n arg_5 = dirname(arg_5)\n return True\n\n arg_6 = arg_0.replace('.', os.path.sep)\n arg_7 = [\n arg_6 + '.py',\n # _fname_we + '.pyc',\n # _fname_we + '.pyo',\n ]\n # Add extension library suffixes\n arg_7 += [arg_6 + arg_8 for arg_8 in _platform_pylib_exts()]\n\n if arg_1 is None:\n arg_1 = sys.path\n\n # the empty string in sys.path indicates cwd. Change this to a '.'\n arg_9 = ['.' if arg_10 == '' else arg_10 for arg_10 in arg_1]\n\n if arg_2:\n def normalize(arg_10):\n if sys.platform.startswith('win32'): # nocover\n return realpath(arg_10).lower()\n else:\n return realpath(arg_10)\n # Keep only the paths not in exclude\n arg_11 = {normalize(arg_10) for arg_10 in arg_2}\n arg_9 = [arg_10 for arg_10 in arg_9\n if normalize(arg_10) not in arg_11]\n\n for arg_12 in arg_9:\n # Check for directory-based modules (has presidence over files)\n arg_3 = join(arg_12, arg_6)\n if exists(arg_3):\n if isfile(join(arg_3, '__init__.py')):\n if _isvalid(arg_3, arg_12):\n return arg_3\n\n # If that fails, check for file-based modules\n for arg_13 in arg_7:\n arg_3 = join(arg_12, arg_13)\n if isfile(arg_3):\n if _isvalid(arg_3, arg_12):\n return arg_3"} +{"_id": "doc_3497", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False, arg_3=None):\n \"\"\"\n Finds the path to a python module from its name.\n\n Determines the path to a python module without directly import it\n\n Converts the name of a module (__name__) to the path (__file__) where it is\n located without importing the module. Returns None if the module does not\n exist.\n\n Args:\n modname (str): module filepath\n hide_init (bool): if False, __init__.py will be returned for packages\n hide_main (bool): if False, and hide_init is True, __main__.py will be\n returned for packages, if it exists.\n sys_path (list): if specified overrides `sys.path` (default None)\n\n Returns:\n str: modpath - path to the module, or None if it doesn't exist\n\n CommandLine:\n python -m xdoctest.static_analysis Func:0\n pytest /home/joncrall/code/xdoctest/xdoctest/static_analysis.py::Func:0\n\n Example:\n >>> modname = 'xdoctest.__main__'\n >>> modpath = Func(modname, hide_main=False)\n >>> assert modpath.endswith('__main__.py')\n >>> modname = 'xdoctest'\n >>> modpath = Func(modname, hide_init=False)\n >>> assert modpath.endswith('__init__.py')\n >>> modpath = basename(Func('_ctypes'))\n >>> assert 'ctypes' in modpath\n \"\"\"\n arg_4 = _syspath_Func(arg_0, arg_3)\n if arg_4 is None:\n return None\n\n arg_4 = normalize_modpath(arg_4, arg_1=arg_1,\n arg_2=arg_2)\n return arg_4"} +{"_id": "doc_3498", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False, arg_3=True,\n arg_4=None):\n \"\"\"\n Determines importable name from file path\n\n Converts the path to a module (__file__) to the importable python name\n (__name__) without importing the module.\n\n The filename is converted to a module name, and parent directories are\n recursively included until a directory without an __init__.py file is\n encountered.\n\n Args:\n modpath (str): module filepath\n hide_init (bool): removes the __init__ suffix (default True)\n hide_main (bool): removes the __main__ suffix (default False)\n check (bool): if False, does not raise an error if modpath is a dir\n and does not contain an __init__ file.\n relativeto (str, optional): if specified, all checks are ignored and\n this is considered the path to the root module.\n\n Returns:\n str: modname\n\n Raises:\n ValueError: if check is True and the path does not exist\n\n CommandLine:\n xdoctest -m xdoctest.static_analysis Func\n\n Example:\n >>> from xdoctest import static_analysis\n >>> modpath = static_analysis.__file__.replace('.pyc', '.py')\n >>> modpath = modpath.replace('.pyc', '.py')\n >>> modname = Func(modpath)\n >>> assert modname == 'xdoctest.static_analysis'\n\n Example:\n >>> import xdoctest\n >>> assert Func(xdoctest.__file__.replace('.pyc', '.py')) == 'xdoctest'\n >>> assert Func(dirname(xdoctest.__file__.replace('.pyc', '.py'))) == 'xdoctest'\n\n Example:\n >>> modpath = modname_to_modpath('_ctypes')\n >>> modname = Func(modpath)\n >>> assert modname == '_ctypes'\n \"\"\"\n if arg_3 and arg_4 is None:\n if not exists(arg_0):\n raise ValueError('modpath={} does not exist'.format(arg_0))\n arg_5 = abspath(expanduser(arg_0))\n\n arg_5 = normalize_modpath(arg_5, arg_1=arg_1,\n arg_2=arg_2)\n if arg_4:\n arg_6 = dirname(abspath(expanduser(arg_4)))\n arg_7 = relpath(arg_5, arg_6)\n else:\n arg_6, arg_7 = split_modpath(arg_5, arg_3=arg_3)\n\n arg_8 = splitext(arg_7)[0]\n if '.' in arg_8:\n arg_8, arg_9 = arg_8.split('.')\n arg_8 = arg_8.replace('/', '.')\n arg_8 = arg_8.replace('\\\\', '.')\n return arg_8"} +{"_id": "doc_3499", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Determines if a key is specified on the command line\n\n Args:\n key (str or tuple): string or tuple of strings. Each key should be\n prefixed with two hyphens (i.e. `--`)\n argv (Optional[list]): overrides `sys.argv` if specified\n\n Returns:\n bool: flag : True if the key (or any of the keys) was specified\n\n Example:\n >>> import ubelt as ub\n >>> argv = ['--spam', '--eggs', 'foo']\n >>> assert ub.Func('--eggs', argv=argv) is True\n >>> assert ub.Func('--ans', argv=argv) is False\n >>> assert ub.Func('foo', argv=argv) is True\n >>> assert ub.Func(('bar', '--spam'), argv=argv) is True\n \"\"\"\n if arg_1 is None: # nocover\n arg_1 = sys.argv\n arg_2 = [arg_0] if isinstance(arg_0, six.string_types) else arg_0\n arg_3 = any(k in arg_1 for k in arg_2)\n return arg_3"} +{"_id": "doc_3500", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Horizontally concatenates strings preserving indentation\n\n Concatenates a list of objects ensuring that the next item in the list is\n all the way to the right of any previous items.\n\n Args:\n args (List[str]): strings to concatenate\n sep (str): separator (defaults to '')\n\n CommandLine:\n python -m ubelt.util_str Func\n\n Example1:\n >>> import ubelt as ub\n >>> B = ub.repr2([[1, 2], [3, 457]], nl=1, cbr=True, trailsep=False)\n >>> C = ub.repr2([[5, 6], [7, 8]], nl=1, cbr=True, trailsep=False)\n >>> args = ['A = ', B, ' * ', C]\n >>> print(ub.Func(args))\n A = [[1, 2], * [[5, 6],\n [3, 457]] [7, 8]]\n\n Example2:\n >>> from ubelt.util_str import *\n >>> import ubelt as ub\n >>> import unicodedata\n >>> aa = unicodedata.normalize('NFD', '\u00e1') # a unicode char with len2\n >>> B = ub.repr2([['\u03b8', aa], [aa, aa, aa]], nl=1, si=True, cbr=True, trailsep=False)\n >>> C = ub.repr2([[5, 6], [7, '\u03b8']], nl=1, si=True, cbr=True, trailsep=False)\n >>> args = ['A', '=', B, '*', C]\n >>> print(ub.Func(args, sep='\uff5c'))\n A\uff5c=\uff5c[[\u03b8, \u00e1], \uff5c*\uff5c[[5, 6],\n \uff5c \uff5c [\u00e1, \u00e1, \u00e1]]\uff5c \uff5c [7, \u03b8]]\n \"\"\"\n import unicodedata\n if '\\n' in arg_1 or '\\r' in arg_1:\n raise ValueError('`sep` cannot contain newline characters')\n\n # TODO: ensure unicode data works correctly for python2\n arg_0 = [unicodedata.normalize('NFC', ensure_unicode(val)) for val in arg_0]\n arg_2 = [a.split('\\n') for a in arg_0]\n arg_3 = max(map(len, arg_2))\n # Do vertical padding\n arg_2 = [arg_8 + [''] * (arg_3 - len(arg_8)) for arg_8 in arg_2]\n # Initialize output\n arg_4 = ['' for _ in range(arg_3)]\n arg_5 = 0\n arg_6 = len(arg_0)\n for arg_7, arg_8 in enumerate(arg_2):\n # Concatenate the new string\n for arg_9, arg_10 in enumerate(arg_8):\n arg_4[arg_9] += arg_10\n # Find the new maximum horizontal width\n arg_5 = max(arg_5, max(map(len, arg_4)))\n if arg_7 < arg_6 - 1:\n # Horizontal padding on all but last iter\n for arg_9, arg_10 in list(enumerate(arg_4)):\n arg_11 = arg_5 - len(arg_10)\n arg_4[arg_9] = arg_10 + (' ' * arg_11) + arg_1\n arg_5 += len(arg_1)\n # Clean up trailing whitespace\n arg_4 = [arg_10.rstrip(' ') for arg_10 in arg_4]\n arg_12 = '\\n'.join(arg_4)\n return arg_12"} +{"_id": "doc_3501", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=0):\n \"\"\"\n Create a symbolic link.\n\n This will work on linux or windows, however windows does have some corner\n cases. For more details see notes in `ubelt._win32_links`.\n\n Args:\n path (PathLike): path to real file or directory\n link_path (PathLike): path to desired location for Func\n overwrite (bool): overwrite existing Funcs.\n This will not overwrite real files on systems with proper Funcs.\n However, on older versions of windows junctions are\n indistinguishable from real files, so we cannot make this\n guarantee. (default = False)\n verbose (int): verbosity level (default=0)\n\n Returns:\n PathLike: link path\n\n CommandLine:\n python -m ubelt.util_links Func:0\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_Func0')\n >>> real_path = join(dpath, 'real_file.txt')\n >>> link_path = join(dpath, 'link_file.txt')\n >>> [ub.delete(p) for p in [real_path, link_path]]\n >>> ub.writeto(real_path, 'foo')\n >>> result = Func(real_path, link_path)\n >>> assert ub.readfrom(result) == 'foo'\n >>> [ub.delete(p) for p in [real_path, link_path]]\n\n Example:\n >>> import ubelt as ub\n >>> from os.path import dirname\n >>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_Func1')\n >>> ub.delete(dpath)\n >>> ub.ensuredir(dpath)\n >>> _dirstats(dpath)\n >>> real_dpath = ub.ensuredir((dpath, 'real_dpath'))\n >>> link_dpath = ub.augpath(real_dpath, base='link_dpath')\n >>> real_path = join(dpath, 'afile.txt')\n >>> link_path = join(dpath, 'afile.txt')\n >>> [ub.delete(p) for p in [real_path, link_path]]\n >>> ub.writeto(real_path, 'foo')\n >>> result = Func(real_dpath, link_dpath)\n >>> assert ub.readfrom(link_path) == 'foo', 'read should be same'\n >>> ub.writeto(link_path, 'bar')\n >>> _dirstats(dpath)\n >>> assert ub.readfrom(link_path) == 'bar', 'very bad bar'\n >>> assert ub.readfrom(real_path) == 'bar', 'changing link did not change real'\n >>> ub.writeto(real_path, 'baz')\n >>> _dirstats(dpath)\n >>> assert ub.readfrom(real_path) == 'baz', 'very bad baz'\n >>> assert ub.readfrom(link_path) == 'baz', 'changing real did not change link'\n >>> ub.delete(link_dpath, verbose=1)\n >>> _dirstats(dpath)\n >>> assert not exists(link_dpath), 'link should not exist'\n >>> assert exists(real_path), 'real path should exist'\n >>> _dirstats(dpath)\n >>> ub.delete(dpath, verbose=1)\n >>> _dirstats(dpath)\n >>> assert not exists(real_path)\n \"\"\"\n arg_4 = normpath(arg_0)\n arg_5 = normpath(arg_1)\n\n if not os.path.isabs(arg_4):\n # if path is not absolute it must be specified relative to link\n if _can_Func():\n arg_4 = os.path.relpath(arg_4, os.path.dirname(arg_5))\n else: # nocover\n # On windows, we need to use absolute paths\n arg_4 = os.path.abspath(arg_4)\n\n if arg_3:\n print('Symlink: {path} -> {link}'.format(arg_4=arg_4, arg_5=arg_5))\n if islink(arg_5):\n if arg_3:\n print('... already exists')\n arg_6 = _readlink(arg_5)\n if arg_6 == arg_4:\n if arg_3 > 1:\n print('... and points to the right place')\n return arg_5\n if arg_3 > 1:\n if not exists(arg_5):\n print('... but it is broken and points somewhere else: {}'.format(arg_6))\n else:\n print('... but it points somewhere else: {}'.format(arg_6))\n if arg_2:\n util_io.delete(arg_5, arg_3=arg_3 > 1)\n elif exists(arg_5):\n if _win32_links is None:\n if arg_3:\n print('... already exists, but its a file. This will error.')\n raise FileExistsError(\n 'cannot overwrite a physical path: \"{}\"'.format(arg_4))\n else: # nocover\n if arg_3:\n print('... already exists, and is either a file or hard link. '\n 'Assuming it is a hard link. '\n 'On non-win32 systems this would error.')\n\n if _win32_links is None:\n os.Func(arg_4, arg_5)\n else: # nocover\n _win32_links._Func(arg_4, arg_5, arg_2=arg_2, arg_3=arg_3)\n\n return arg_5"} +{"_id": "doc_3502", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Transforms function args into a key that can be used by the cache\n\n CommandLine:\n xdoctest -m ubelt.util_memoize Func\n\n Example:\n >>> args = (4, [1, 2])\n >>> kwargs = {'a': 'b'}\n >>> key = Func(args, kwargs)\n >>> print('key = {!r}'.format(key))\n >>> # Some mutable types cannot be handled by ub.hash_data\n >>> import pytest\n >>> import six\n >>> if six.PY2:\n >>> import collections as abc\n >>> else:\n >>> from collections import abc\n >>> with pytest.raises(TypeError):\n >>> Func((4, [1, 2], {1: 2, 'a': 'b'}), kwargs={})\n >>> class Dummy(abc.MutableSet):\n >>> def __contains__(self, item): return None\n >>> def __iter__(self): return iter([])\n >>> def __len__(self): return 0\n >>> def add(self, item, loc): return None\n >>> def discard(self, item): return None\n >>> with pytest.raises(TypeError):\n >>> Func((Dummy(),), kwargs={})\n \"\"\"\n arg_2 = arg_1.items()\n # TODO: we should check if Python is at least 3.7 and sort by kwargs\n # keys otherwise. Should we use hash_data for key generation\n if (sys.version_info.major, sys.version_info.minor) < (3, 7): # nocover\n # We can sort because they keys are gaurenteed to be strings\n arg_2 = sorted(arg_2)\n arg_2 = tuple(arg_2)\n\n try:\n arg_3 = _hashable(arg_0), _hashable(arg_2)\n except TypeError:\n raise TypeError('Signature is not hashable: args={} kwargs{}'.format(arg_0, arg_1))\n return arg_3"} +{"_id": "doc_3503", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"\n Colorizes text a single color using ansii tags.\n\n Args:\n text (str): text to colorize\n color (str): may be one of the following: yellow, blink, lightgray,\n underline, darkyellow, blue, darkblue, faint, fuchsia, black,\n white, red, brown, turquoise, bold, darkred, darkgreen, reset,\n standout, darkteal, darkgray, overline, purple, green, teal, fuscia\n\n Returns:\n str: text : colorized text.\n If pygments is not installed plain text is returned.\n\n CommandLine:\n python -c \"import pygments.console; print(sorted(pygments.console.codes.keys()))\"\n python -m ubelt.util_colors Func\n\n Example:\n >>> text = 'raw text'\n >>> import pytest\n >>> import ubelt as ub\n >>> if ub.modname_to_modpath('pygments'):\n >>> # Colors text only if pygments is installed\n >>> assert Func(text, 'red') == '\\x1b[31;01mraw text\\x1b[39;49;00m'\n >>> assert Func(text, None) == 'raw text'\n >>> else:\n >>> # Otherwise text passes through unchanged\n >>> assert Func(text, 'red') == 'raw text'\n >>> assert Func(text, None) == 'raw text'\n \"\"\"\n if arg_1 is None:\n return arg_0\n try:\n import pygments\n import pygments.console\n\n if sys.platform.startswith('win32'): # nocover\n # Hack on win32 to support colored output\n import colorama\n colorama.init()\n\n arg_2 = pygments.console.colorize(arg_1, arg_0)\n return arg_2\n except ImportError: # nocover\n import warnings\n warnings.warn('pygments is not installed, text will not be colored')\n return arg_0"} +{"_id": "doc_3504", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Generates Func items in the order they appear.\n\n Args:\n items (Iterable): list of items\n\n key (Callable, optional): custom normalization function.\n If specified returns items where `key(item)` is Func.\n\n Yields:\n object: a Func item from the input sequence\n\n CommandLine:\n python -m utool.util_list --exec-Func_ordered\n\n Example:\n >>> import ubelt as ub\n >>> items = [4, 6, 6, 0, 6, 1, 0, 2, 2, 1]\n >>> Func_items = list(ub.Func(items))\n >>> assert Func_items == [4, 6, 0, 1, 2]\n\n Example:\n >>> import ubelt as ub\n >>> items = ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'D', 'E']\n >>> Func_items = list(ub.Func(items, key=six.text_type.lower))\n >>> assert Func_items == ['A', 'b', 'C', 'D', 'e']\n >>> Func_items = list(ub.Func(items))\n >>> assert Func_items == ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'E']\n \"\"\"\n arg_2 = set()\n if arg_1 is None:\n for arg_3 in arg_0:\n if arg_3 not in arg_2:\n arg_2.add(arg_3)\n yield arg_3\n else:\n for arg_3 in arg_0:\n arg_4 = arg_1(arg_3)\n if arg_4 not in arg_2:\n arg_2.add(arg_4)\n yield arg_3"} +{"_id": "doc_3505", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns indices corresponding to the first instance of each unique item.\n\n Args:\n items (Sequence): indexable collection of items\n\n key (Callable, optional): custom normalization function.\n If specified returns items where `key(item)` is unique.\n\n Yields:\n int : indices of the unique items\n\n Example:\n >>> items = [0, 2, 5, 1, 1, 0, 2, 4]\n >>> indices = list(Func(items))\n >>> assert indices == [0, 1, 2, 3, 7]\n >>> indices = list(Func(items, key=lambda x: x % 2 == 0))\n >>> assert indices == [0, 2]\n \"\"\"\n # yield from unique(range(len(items)), key=lambda i: items[i])\n if arg_1 is None:\n return unique(range(len(arg_0)), arg_1=lambda i: arg_0[i])\n else:\n return unique(range(len(arg_0)), arg_1=lambda i: arg_1(arg_0[i]))"} +{"_id": "doc_3506", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns a list of booleans corresponding to the first instance of each\n unique item.\n\n Args:\n items (Sequence): indexable collection of items\n\n key (Callable, optional): custom normalization function.\n If specified returns items where `key(item)` is unique.\n\n Returns:\n List[bool] : flags the items that are unique\n\n Example:\n >>> import ubelt as ub\n >>> items = [0, 2, 1, 1, 0, 9, 2]\n >>> flags = Func(items)\n >>> assert flags == [True, True, True, False, False, True, False]\n >>> flags = Func(items, key=lambda x: x % 2 == 0)\n >>> assert flags == [True, False, True, False, False, False, False]\n \"\"\"\n arg_2 = len(arg_0)\n if arg_1 is None:\n arg_3 = dict(zip(reversed(arg_0), reversed(range(arg_2))))\n arg_4 = arg_3.values()\n else:\n arg_4 = argunique(arg_0, arg_1=arg_1)\n arg_5 = boolmask(arg_4, arg_2)\n return arg_5"} +{"_id": "doc_3507", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Constructs a list of booleans where an item is True if its position is in\n `indices` otherwise it is False.\n\n Args:\n indices (list): list of integer indices\n\n maxval (int): length of the returned list. If not specified\n this is inferred from `indices`\n\n Note:\n In the future the arg `maxval` may change its name to `shape`\n\n Returns:\n list: mask: list of booleans. mask[idx] is True if idx in indices\n\n Example:\n >>> import ubelt as ub\n >>> indices = [0, 1, 4]\n >>> mask = ub.Func(indices, maxval=6)\n >>> assert mask == [True, True, False, False, True, False]\n >>> mask = ub.Func(indices)\n >>> assert mask == [True, True, False, False, True]\n \"\"\"\n if arg_1 is None:\n arg_0 = list(arg_0)\n arg_1 = max(arg_0) + 1\n arg_2 = [False] * arg_1\n for arg_3 in arg_0:\n arg_2[arg_3] = True\n return arg_2"} +{"_id": "doc_3508", "title": "", "text": "def Func(arg_0, arg_1=arg_2.eq):\n \"\"\"\n Determine if all items in a sequence are the same\n\n Args:\n iterable (Iterable): items to determine if they are all the same\n\n eq (Callable, optional): function to determine equality\n (default: operator.eq)\n\n Example:\n >>> Func([1, 1, 1, 1])\n True\n >>> Func([])\n True\n >>> Func([0, 1])\n False\n >>> iterable = iter([0, 1, 1, 1])\n >>> next(iterable)\n >>> Func(iterable)\n True\n >>> Func(range(10))\n False\n >>> Func(range(10), lambda a, b: True)\n True\n \"\"\"\n arg_3 = iter(arg_0)\n try:\n arg_4 = next(arg_3)\n except StopIteration:\n return True\n return all(arg_1(arg_4, arg_5) for arg_5 in arg_3)"} +{"_id": "doc_3509", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"\n Returns the indices that would sort a indexable object.\n\n This is similar to `numpy.Func`, but it is written in pure python and\n works on both lists and dictionaries.\n\n Args:\n indexable (Iterable or Mapping): indexable to sort by\n\n key (Callable, optional): customizes the ordering of the indexable\n\n reverse (bool, optional): if True returns in descending order\n\n Returns:\n list: indices: list of indices such that sorts the indexable\n\n Example:\n >>> import ubelt as ub\n >>> # Func works on dicts by returning keys\n >>> dict_ = {'a': 3, 'b': 2, 'c': 100}\n >>> indices = ub.Func(dict_)\n >>> assert list(ub.take(dict_, indices)) == sorted(dict_.values())\n >>> # Func works on lists by returning indices\n >>> indexable = [100, 2, 432, 10]\n >>> indices = ub.Func(indexable)\n >>> assert list(ub.take(indexable, indices)) == sorted(indexable)\n >>> # Can use iterators, but be careful. It exhausts them.\n >>> indexable = reversed(range(100))\n >>> indices = ub.Func(indexable)\n >>> assert indices[0] == 99\n >>> # Can use key just like sorted\n >>> indexable = [[0, 1, 2], [3, 4], [5]]\n >>> indices = ub.Func(indexable, key=len)\n >>> assert indices == [2, 1, 0]\n >>> # Can use reverse just like sorted\n >>> indexable = [0, 2, 1]\n >>> indices = ub.Func(indexable, reverse=True)\n >>> assert indices == [1, 2, 0]\n \"\"\"\n # Create an iterator of value/key pairs\n if isinstance(arg_0, collections_abc.Mapping):\n arg_3 = ((v, k) for k, v in arg_0.items())\n else:\n arg_3 = ((v, k) for k, v in enumerate(arg_0))\n # Sort by values and extract the indices\n if arg_1 is None:\n arg_4 = [k for v, k in sorted(arg_3, arg_2=arg_2)]\n else:\n # If key is provided, call it using the value as input\n arg_4 = [k for v, k in sorted(arg_3, arg_1=lambda vk: arg_1(vk[0]),\n arg_2=arg_2)]\n return arg_4"} +{"_id": "doc_3510", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"\n Zips elementwise pairs between items1 and items2 into a dictionary. Values\n from items2 can be broadcast onto items1.\n\n Args:\n items1 (Iterable): full sequence\n items2 (Iterable): can either be a sequence of one item or a sequence\n of equal length to `items1`\n cls (Type[dict]): dictionary type to use. Defaults to dict, but could\n be ordered dict instead.\n\n Returns:\n dict: similar to dict(zip(items1, items2))\n\n Example:\n >>> assert Func([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n >>> assert Func([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n >>> assert Func([], [4]) == {}\n \"\"\"\n try:\n len(arg_0)\n except TypeError:\n arg_0 = list(arg_0)\n try:\n len(arg_1)\n except TypeError:\n arg_1 = list(arg_1)\n if len(arg_0) == 0 and len(arg_1) == 1:\n # Corner case:\n # allow the first list to be empty and the second list to broadcast a\n # value. This means that the equality check wont work for the case\n # where items1 and items2 are supposed to correspond, but the length of\n # items2 is 1.\n arg_1 = []\n if len(arg_1) == 1 and len(arg_0) > 1:\n arg_1 = arg_1 * len(arg_0)\n if len(arg_0) != len(arg_1):\n raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (\n len(arg_0), len(arg_1)))\n return arg_2(zip(arg_0, arg_1))"} +{"_id": "doc_3511", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"\n Groups a list of items by group id.\n\n Args:\n items (Iterable): a list of items to group\n groupids (Iterable or Callable): a corresponding list of item groupids\n or a function mapping an item to a groupid.\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n CommandLine:\n python -m ubelt.util_dict Func\n\n Example:\n >>> import ubelt as ub\n >>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'banana']\n >>> groupids = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']\n >>> groupid_to_items = ub.Func(items, groupids)\n >>> print(ub.repr2(groupid_to_items, nl=0))\n {'dairy': ['cheese'], 'fruit': ['jam', 'banana'], 'protein': ['ham', 'spam', 'eggs']}\n \"\"\"\n if callable(arg_1):\n arg_2 = arg_1\n arg_3 = ((arg_2(arg_6), arg_6) for arg_6 in arg_0)\n else:\n arg_3 = zip(arg_1, arg_0)\n\n # Initialize a dict of lists\n arg_4 = defaultdict(list)\n # Insert each item into the correct group\n for arg_5, arg_6 in arg_3:\n arg_4[arg_5].append(arg_6)\n return arg_4"} +{"_id": "doc_3512", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n \"\"\"\n Builds a histogram of items, counting the number of time each item appears\n in the input.\n\n Args:\n item_list (Iterable): hashable items (usually containing duplicates)\n weight_list (Iterable): corresponding weights for each item\n ordered (bool): if True the result is ordered by frequency\n labels (Iterable, optional): expected labels (default None)\n Allows this function to pre-initialize the histogram.\n If specified the frequency of each label is initialized to\n zero and item_list can only contain items specified in labels.\n\n Returns:\n dict : dictionary where the keys are items in item_list, and the values\n are the number of times the item appears in item_list.\n\n CommandLine:\n python -m ubelt.util_dict Func\n\n Example:\n >>> import ubelt as ub\n >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n >>> hist = ub.Func(item_list)\n >>> print(ub.repr2(hist, nl=0))\n {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}\n\n Example:\n >>> import ubelt as ub\n >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n >>> hist1 = ub.Func(item_list)\n >>> hist2 = ub.Func(item_list, ordered=True)\n >>> try:\n >>> hist3 = ub.Func(item_list, labels=[])\n >>> except KeyError:\n >>> pass\n >>> else:\n >>> raise AssertionError('expected key error')\n >>> #result = ub.repr2(hist_)\n >>> weight_list = [1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1]\n >>> hist4 = ub.Func(item_list, weight_list=weight_list)\n >>> print(ub.repr2(hist1, nl=0))\n {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}\n >>> print(ub.repr2(hist4, nl=0))\n {1: 1, 2: 4, 39: 1, 900: 1, 1232: 0}\n \"\"\"\n if arg_3 is None:\n arg_4 = defaultdict(lambda: 0)\n else:\n arg_4 = {k: 0 for k in arg_3}\n if arg_1 is None:\n arg_1 = it.repeat(1)\n # Accumulate frequency\n for arg_5, arg_6 in zip(arg_0, arg_1):\n arg_4[arg_5] += arg_6\n if arg_2:\n # Order by value\n arg_7 = op.itemgetter(1)\n arg_8 = OrderedDict([\n (key, value)\n for (key, value) in sorted(arg_4.items(), key=arg_7)\n ])\n else:\n # Cast to a normal dictionary\n arg_8 = dict(arg_4)\n return arg_8"} +{"_id": "doc_3513", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=None):\n \"\"\"\n Find all duplicate items in a list.\n\n Search for all items that appear more than `k` times and return a mapping\n from each (k)-duplicate item to the positions it appeared in.\n\n Args:\n items (Iterable): hashable items possibly containing duplicates\n k (int): only return items that appear at least `k` times (default=2)\n key (Callable, optional): Returns indices where `key(items[i])`\n maps to a particular value at least k times.\n\n Returns:\n dict: maps each duplicate item to the indices at which it appears\n\n CommandLine:\n python -m ubelt.util_dict Func\n\n Example:\n >>> import ubelt as ub\n >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]\n >>> duplicates = ub.Func(items)\n >>> print('items = %r' % (items,))\n >>> print('duplicates = %r' % (duplicates,))\n >>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}\n >>> assert ub.Func(items, 3) == {0: [0, 1, 6]}\n\n Example:\n >>> import ubelt as ub\n >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]\n >>> # note: k can be 0\n >>> duplicates = ub.Func(items, k=0)\n >>> print(ub.repr2(duplicates, nl=0))\n {0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}\n\n Example:\n >>> import ubelt as ub\n >>> items = [10, 11, 12, 13, 14, 15, 16]\n >>> duplicates = ub.Func(items, key=lambda x: x // 2)\n >>> print(ub.repr2(duplicates, nl=0))\n {5: [0, 1], 6: [2, 3], 7: [4, 5]}\n \"\"\"\n # Build mapping from items to the indices at which they appear\n # if key is not None:\n # items = map(key, items)\n arg_3 = defaultdict(list)\n if arg_2 is None:\n for arg_4, arg_5 in enumerate(arg_0):\n arg_3[arg_5].append(arg_4)\n else:\n for arg_4, arg_5 in enumerate(arg_0):\n arg_3[arg_2(arg_5)].append(arg_4)\n # remove items seen fewer than k times.\n for arg_2 in list(arg_3.keys()):\n if len(arg_3[arg_2]) < arg_1:\n del arg_3[arg_2]\n arg_3 = dict(arg_3)\n return arg_3"} +{"_id": "doc_3514", "title": "", "text": "def Func(*arg_0):\n \"\"\"\n Constructs a dictionary that contains keys common between all inputs.\n The returned values will only belong to the first dictionary.\n\n Args:\n *args : a sequence of dictionaries (or sets of keys)\n\n Returns:\n Dict | OrderedDict :\n OrderedDict if the first argument is an OrderedDict, otherwise dict\n\n Notes:\n This function can be used as an alternative to `dict_subset` where any\n key not in the dictionary is ignored. See the following example:\n\n >>> Func({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd'])\n {'a': 1, 'c': 3}\n\n Example:\n >>> Func({'a': 1, 'b': 1}, {'b': 2, 'c': 2})\n {'b': 1}\n >>> Func(odict([('a', 1), ('b', 2)]), odict([('c', 3)]))\n OrderedDict()\n >>> Func()\n {}\n \"\"\"\n if not arg_0:\n return {}\n else:\n arg_1 = OrderedDict if isinstance(arg_0[0], OrderedDict) else dict\n arg_2 = set.intersection(*map(set, arg_0))\n arg_3 = arg_0[0]\n return arg_1((arg_4, arg_3[arg_4]) for arg_4 in arg_2)"} +{"_id": "doc_3515", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function or indexable object\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m ubelt.util_dict Func\n\n Example:\n >>> import ubelt as ub\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = len\n >>> newdict = ub.Func(func, dict_)\n >>> assert newdict == {'a': 3, 'b': 0}\n >>> print(newdict)\n >>> # Can also use indexables as `func`\n >>> dict_ = {'a': 0, 'b': 1}\n >>> func = [42, 21]\n >>> newdict = ub.Func(func, dict_)\n >>> assert newdict == {'a': 42, 'b': 21}\n >>> print(newdict)\n \"\"\"\n if not hasattr(arg_0, '__call__'):\n arg_0 = arg_0.__getitem__\n arg_2 = [(key, arg_0(val)) for key, val in six.iteritems(arg_1)]\n arg_3 = OrderedDict if isinstance(arg_1, OrderedDict) else dict\n arg_4 = arg_3(arg_2)\n # newdict = type(dict_)(keyval_list)\n return arg_4"} +{"_id": "doc_3516", "title": "", "text": "def Func(arg_0, arg_1=True):\n r\"\"\"\n Swaps the keys and values in a dictionary.\n\n Args:\n dict_ (dict): dictionary to invert\n unique_vals (bool): if False, inverted keys are returned in a set.\n The default is True.\n\n Returns:\n dict: inverted\n\n Notes:\n The must values be hashable.\n\n If the original dictionary contains duplicate values, then only one of\n the corresponding keys will be returned and the others will be\n discarded. This can be prevented by setting `unique_vals=True`,\n causing the inverted keys to be returned in a set.\n\n CommandLine:\n python -m ubelt.util_dict Func\n\n Example:\n >>> import ubelt as ub\n >>> dict_ = {'a': 1, 'b': 2}\n >>> inverted = ub.Func(dict_)\n >>> assert inverted == {1: 'a', 2: 'b'}\n\n Example:\n >>> import ubelt as ub\n >>> dict_ = ub.odict([(2, 'a'), (1, 'b'), (0, 'c'), (None, 'd')])\n >>> inverted = ub.Func(dict_)\n >>> assert list(inverted.keys())[0] == 'a'\n\n Example:\n >>> import ubelt as ub\n >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'f': 2}\n >>> inverted = ub.Func(dict_, unique_vals=False)\n >>> assert inverted == {0: {'b', 'c', 'd'}, 1: {'a'}, 2: {'f'}}\n \"\"\"\n if arg_1:\n if isinstance(arg_0, OrderedDict):\n arg_2 = OrderedDict((val, arg_3) for arg_3, val in arg_0.items())\n else:\n arg_2 = {val: arg_3 for arg_3, val in arg_0.items()}\n else:\n # Handle non-unique keys using groups\n arg_2 = defaultdict(set)\n for arg_3, arg_4 in arg_0.items():\n arg_2[arg_4].add(arg_3)\n arg_2 = dict(arg_2)\n return arg_2"} +{"_id": "doc_3517", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Recursively casts a AutoDict into a regular dictionary. All nested\n AutoDict values are also converted.\n\n Returns:\n dict: a copy of this dict without autovivification\n\n Example:\n >>> from ubelt.util_dict import AutoDict\n >>> auto = AutoDict()\n >>> auto[1] = 1\n >>> auto['n1'] = AutoDict()\n >>> static = auto.Func()\n >>> assert not isinstance(static, AutoDict)\n >>> assert not isinstance(static['n1'], AutoDict)\n \"\"\"\n return arg_0._base(\n (arg_1, (arg_2.Func() if isinstance(arg_2, AutoDict) else arg_2))\n for arg_1, arg_2 in arg_0.items())"} +{"_id": "doc_3518", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=0):\n \"\"\"\n Perform a real symbolic link if possible. However, on most versions of\n windows you need special privledges to create a real symlink. Therefore, we\n try to create a symlink, but if that fails we fallback to using a junction.\n\n AFAIK, the main difference between symlinks and junctions are that symlinks\n can reference relative or absolute paths, where as junctions always\n reference absolute paths. Not 100% on this though. Windows is weird.\n\n Note that junctions will not register as links via `islink`, but I\n believe real symlinks will.\n \"\"\"\n if _win32_can_symlink():\n return _win32_symlink(arg_0, arg_1, arg_3)\n else:\n return _win32_junction(arg_0, arg_1, arg_3)"} +{"_id": "doc_3519", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"\n Creates real symlink. This will only work in versions greater than Windows\n Vista. Creating real symlinks requires admin permissions or at least\n specially enabled symlink permissions. On Windows 10 enabling developer\n mode should give you these permissions.\n \"\"\"\n from ubelt import util_cmd\n if os.path.isdir(arg_0):\n # directory symbolic link\n if arg_2:\n print('... as directory symlink')\n arg_3 = 'mklink /D \"{}\" \"{}\"'.format(arg_1, arg_0)\n # Using the win32 API seems to result in privilege errors\n # but using shell commands does not have this problem. Weird.\n # jwfs.symlink(path, link, target_is_directory=True)\n # TODO: what do we need to do to use the windows api instead of shell?\n else:\n # file symbolic link\n if arg_2:\n print('... as file symlink')\n arg_3 = 'mklink \"{}\" \"{}\"'.format(arg_1, arg_0)\n\n if arg_3 is not None:\n arg_4 = util_cmd.cmd(arg_3, shell=True)\n if arg_4['ret'] != 0:\n from ubelt import util_format\n arg_5 = 'You do not have sufficient privledges'\n if arg_5 not in arg_4['err']:\n print('Failed command:')\n print(arg_4['command'])\n print(util_format.repr2(arg_4, nl=1))\n raise OSError(str(arg_4))\n return arg_1"} +{"_id": "doc_3520", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determines if a path is a win32 junction\n\n CommandLine:\n python -m ubelt._win32_links Func\n\n Example:\n >>> # xdoc: +REQUIRES(WIN32)\n >>> import ubelt as ub\n >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')\n >>> ub.delete(root)\n >>> ub.ensuredir(root)\n >>> dpath = join(root, 'dpath')\n >>> djunc = join(root, 'djunc')\n >>> ub.ensuredir(dpath)\n >>> _win32_junction(dpath, djunc)\n >>> assert Func(djunc) is True\n >>> assert Func(dpath) is False\n >>> assert Func('notafile') is False\n \"\"\"\n if not exists(arg_0):\n if os.path.isdir(arg_0):\n if not os.path.islink(arg_0):\n return True\n return False\n return jwfs.is_reparse_point(arg_0) and not os.path.islink(arg_0)"} +{"_id": "doc_3521", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the location that the junction points, raises ValueError if path is\n not a junction.\n\n CommandLine:\n python -m ubelt._win32_links Func\n\n Example:\n >>> # xdoc: +REQUIRES(WIN32)\n >>> import ubelt as ub\n >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')\n >>> ub.delete(root)\n >>> ub.ensuredir(root)\n >>> dpath = join(root, 'dpath')\n >>> djunc = join(root, 'djunc')\n >>> ub.ensuredir(dpath)\n >>> _win32_junction(dpath, djunc)\n >>> path = djunc\n >>> pointed = Func(path)\n >>> print('pointed = {!r}'.format(pointed))\n \"\"\"\n if not jwfs.is_reparse_point(arg_0):\n raise ValueError('not a junction')\n\n # --- Older version based on using shell commands ---\n # if not exists(path):\n # if six.PY2:\n # raise OSError('Cannot find path={}'.format(path))\n # else:\n # raise FileNotFoundError('Cannot find path={}'.format(path))\n # target_name = os.path.basename(path)\n # for type_or_size, name, pointed in _win32_dir(path, '*'):\n # if type_or_size == '' and name == target_name:\n # return pointed\n # raise ValueError('not a junction')\n\n # new version using the windows api\n arg_1 = jwfs.api.CreateFile(\n arg_0, 0, 0, None, jwfs.api.OPEN_EXISTING,\n jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT |\n jwfs.api.FILE_FLAG_BACKUP_SEMANTICS,\n None)\n\n if arg_1 == jwfs.api.INVALID_HANDLE_VALUE:\n raise WindowsError()\n\n arg_2 = jwfs.reparse.DeviceIoControl(\n arg_1, jwfs.api.FSCTL_GET_REPARSE_POINT, None, 10240)\n\n arg_3 = jwfs.create_string_buffer(arg_2)\n arg_4 = jwfs.cast(arg_3, jwfs.POINTER(jwfs.api.REPARSE_DATA_BUFFER))\n arg_5 = arg_4.contents\n\n if arg_5.tag not in [2684354563, jwfs.api.IO_REPARSE_TAG_SYMLINK]:\n raise RuntimeError(\n \"Expected <2684354563 or 2684354572>, but got %d\" % arg_5.tag)\n\n jwfs.handle_nonzero_success(jwfs.api.CloseHandle(arg_1))\n arg_6 = arg_5.get_substitute_name()\n # probably has something to do with long paths, not sure\n if arg_6.startswith('?\\\\'):\n arg_6 = arg_6[2:]\n return arg_6"} +{"_id": "doc_3522", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"\n rmtree for win32 that treats junctions like directory symlinks.\n The junction removal portion may not be safe on race conditions.\n\n There is a known issue that prevents shutil.rmtree from\n deleting directories with junctions.\n https://bugs.python.org/issue31226\n \"\"\"\n\n # --- old version using the shell ---\n # def _rmjunctions(root):\n # subdirs = []\n # for type_or_size, name, pointed in _win32_dir(root):\n # if type_or_size == '':\n # subdirs.append(name)\n # elif type_or_size == '':\n # # remove any junctions as we encounter them\n # # os.unlink(join(root, name))\n # os.rmdir(join(root, name))\n # # recurse in all real directories\n # for name in subdirs:\n # _rmjunctions(join(root, name))\n\n def _rmjunctions(arg_2):\n arg_3 = []\n for arg_4 in os.listdir(arg_2):\n arg_5 = join(arg_2, arg_4)\n if os.path.isdir(arg_5):\n if _win32_is_junction(arg_5):\n # remove any junctions as we encounter them\n os.rmdir(arg_5)\n elif not os.path.islink(arg_5):\n arg_3.append(arg_5)\n # recurse in all real directories\n for arg_6 in arg_3:\n _rmjunctions(arg_6)\n\n if _win32_is_junction(arg_0):\n if arg_1:\n print('Deleting directory=\"{}\"'.format(arg_0))\n os.rmdir(arg_0)\n else:\n if arg_1:\n print('Deleting directory=\"{}\"'.format(arg_0))\n # first remove all junctions\n _rmjunctions(arg_0)\n # now we can rmtree as normal\n import shutil\n shutil.rmtree(arg_0)"} +{"_id": "doc_3523", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Test if two hard links point to the same location\n\n CommandLine:\n python -m ubelt._win32_links Func\n\n Example:\n >>> # xdoc: +REQUIRES(WIN32)\n >>> import ubelt as ub\n >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_hardlink')\n >>> ub.delete(root)\n >>> ub.ensuredir(root)\n >>> fpath1 = join(root, 'fpath1')\n >>> fpath2 = join(root, 'fpath2')\n >>> ub.touch(fpath1)\n >>> ub.touch(fpath2)\n >>> fjunc1 = _win32_junction(fpath1, join(root, 'fjunc1'))\n >>> fjunc2 = _win32_junction(fpath2, join(root, 'fjunc2'))\n >>> assert Func(fjunc1, fpath1)\n >>> assert Func(fjunc2, fpath2)\n >>> assert not Func(fjunc2, fpath1)\n >>> assert not Func(fjunc1, fpath2)\n \"\"\"\n # NOTE: jwf.samefile(fpath1, fpath2) seems to behave differently\n def get_read_handle(arg_2):\n if os.path.isdir(arg_2):\n arg_3 = jwfs.api.FILE_FLAG_BACKUP_SEMANTICS\n else:\n arg_3 = 0\n arg_4 = jwfs.api.CreateFile(arg_2, jwfs.api.GENERIC_READ,\n jwfs.api.FILE_SHARE_READ, None,\n jwfs.api.OPEN_EXISTING,\n arg_3, None)\n return arg_4\n\n def get_unique_id(arg_4):\n arg_5 = jwfs.api.BY_HANDLE_FILE_INFORMATION()\n arg_6 = jwfs.api.GetFileInformationByHandle(arg_4, arg_5)\n jwfs.handle_nonzero_success(arg_6)\n arg_7 = (arg_5.volume_serial_number, arg_5.file_index_high,\n arg_5.file_index_low)\n return arg_7\n\n arg_8 = get_read_handle(arg_0)\n arg_9 = get_read_handle(arg_1)\n try:\n arg_10 = (get_unique_id(arg_8) == get_unique_id(arg_9))\n except Exception:\n raise\n finally:\n jwfs.api.CloseHandle(arg_8)\n jwfs.api.CloseHandle(arg_9)\n return arg_10"} +{"_id": "doc_3524", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Using the windows cmd shell to get information about a directory\n \"\"\"\n from ubelt import util_cmd\n import re\n arg_2 = 'cmd /S /C \"{}\"' # the /S will preserve all inner quotes\n arg_3 = 'dir /-C \"{}\"{}'.format(arg_0, arg_1)\n arg_4 = arg_2.format(arg_3)\n arg_5 = util_cmd.cmd(arg_4, shell=True)\n if arg_5['ret'] != 0:\n from ubelt import util_format\n print('Failed command:')\n print(arg_5['command'])\n print(util_format.repr2(arg_5, nl=1))\n raise OSError(str(arg_5))\n # parse the output of dir to get some info\n # Remove header and footer\n arg_6 = arg_5['out'].split('\\n')[5:-3]\n arg_7 = re.compile('( +)')\n for arg_8 in arg_6:\n arg_9 = arg_7.split(arg_8)\n arg_10, arg_11, arg_12, arg_11, arg_13, arg_11, arg_14, arg_11 = arg_9[:8]\n arg_15 = ''.join(arg_9[8:])\n # if type is a junction then name will also contain the linked loc\n if arg_15 == '.' or arg_15 == '..':\n continue\n if arg_14 in ['', '', '']:\n # colons cannot be in path names, so use that to find where\n # the name ends\n arg_16 = arg_15.find(':')\n arg_17 = arg_15[:arg_16].rfind('[')\n arg_15 = arg_15[:arg_17 - 1]\n arg_18 = arg_15[arg_17 + 1:-1]\n yield arg_14, arg_15, arg_18\n else:\n yield arg_14, arg_15, None"} +{"_id": "doc_3525", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns generators that double with each value returned\n Config includes optional start value \"\"\"\n arg_1 = 1\n if 'start' in arg_0:\n arg_1 = int(arg_0['start'])\n\n # We cannot simply use start as the variable, because of scoping\n # limitations\n def generator():\n arg_2 = arg_1\n while(True):\n yield arg_2\n arg_2 = arg_2 * 2\n return generator()"} +{"_id": "doc_3526", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Retrieve the adjacency matrix from the nx.DiGraph or numpy array.\"\"\"\n if isinstance(arg_0, np.ndarray):\n return arg_0\n elif isinstance(arg_0, nx.DiGraph):\n if arg_1 is None:\n arg_1 = arg_0.nodes()\n if not arg_2:\n return np.array(nx.adjacency_matrix(arg_0, arg_1, arg_2=None).todense())\n else:\n return np.array(nx.adjacency_matrix(arg_0, arg_1).todense())\n else:\n raise TypeError(\"Only networkx.DiGraph and np.ndarray (adjacency matrixes) are supported.\")"} +{"_id": "doc_3527", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Apply causal discovery on observational data using CCDr.\n\n Args:\n data (pandas.DataFrame): DataFrame containing the data\n\n Returns:\n networkx.DiGraph: Solution given by the CCDR algorithm.\n \"\"\"\n # Building setup w/ arguments.\n arg_0.arguments['{VERBOSE}'] = str(arg_0.verbose).upper()\n arg_4 = arg_0._run_ccdr(arg_1, verbose=arg_0.verbose)\n return nx.relabel_nodes(nx.DiGraph(arg_4),\n {arg_5: arg_6 for arg_5, arg_6 in enumerate(arg_1.columns)})"} +{"_id": "doc_3528", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Save data to the csv format by default, in two separate files.\n\n Optional keyword arguments can be passed to pandas.\n \"\"\"\n if arg_0.data is not None:\n arg_0.data.Func(arg_1+'_data.csv', index=False, **arg_2)\n pd.DataFrame(arg_0.adjacency_matrix).Func(arg_1 \\\n + '_target.csv',\n index=False, **arg_2)\n\n else:\n raise ValueError(\"Graph has not yet been generated. \\\n Use self.generate() to do so.\")"} +{"_id": "doc_3529", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None,\n arg_3=True, arg_4=False):\n \"\"\"Launch an R script, starting from a template and replacing text in file\n before execution.\n\n Args:\n template (str): path to the template of the R script\n arguments (dict): Arguments that modify the template's placeholders\n with arguments\n output_function (function): Function to execute **after** the execution\n of the R script, and its output is returned by this function. Used\n traditionally as a function to retrieve the results of the\n execution.\n verbose (bool): Sets the verbosity of the R subprocess.\n debug (bool): If True, the generated scripts are not deleted.\n\n Return:\n Returns the output of the ``output_function`` if not `None`\n else `True` or `False` depending on whether the execution was\n successful.\n \"\"\"\n arg_5 = str(uuid.uuid4())\n os.makedirs('/tmp/cdt_R_script_' + arg_5 + '/')\n try:\n arg_6 = '/tmp/cdt_R_script_' + arg_5 + '/instance_{}'.format(os.path.basename(arg_0))\n copy(arg_0, arg_6)\n\n with fileinput.FileInput(arg_6, inplace=True) as file:\n for arg_7 in file:\n arg_8 = arg_7\n for arg_9 in arg_1:\n arg_8 = arg_8.replace(arg_9, arg_1[arg_9])\n print(arg_8, end='')\n\n if arg_2 is None:\n arg_10 = subprocess.call(\"Rscript --vanilla {}\".format(arg_6), shell=True,\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n else:\n if arg_3:\n arg_11 = subprocess.Popen(\"Rscript --vanilla {}\".format(arg_6), shell=True)\n else:\n arg_11 = subprocess.Popen(\"Rscript --vanilla {}\".format(arg_6), shell=True,\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n arg_11.wait()\n arg_10 = arg_2()\n\n # Cleaning up\n except Exception as e:\n if not arg_4:\n rmtree('/tmp/cdt_R_script_' + arg_5 + '/')\n raise e\n except KeyboardInterrupt:\n if not arg_4:\n rmtree('/tmp/cdt_R_script_' + arg_5 + '/')\n raise KeyboardInterrupt\n if not arg_4:\n rmtree('/tmp/cdt_R_script_' + arg_5 + '/')\n return arg_10"} +{"_id": "doc_3530", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute a subprocess to check the package's availability.\n\n Args:\n package (str): Name of the package to be tested.\n\n Returns:\n bool: `True` if the package is available, `False` otherwise\n \"\"\"\n arg_2 = not bool(launch_R_script(\"{}/R_templates/test_import.R\".format(os.path.dirname(os.path.realpath(__file__))), {\"{package}\": arg_1}, verbose=True))\n return arg_2"} +{"_id": "doc_3531", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Perform the independence test.\n\n :param a: input data\n :param b: input data\n :type a: array-like, numerical data\n :type b: array-like, numerical data\n :return: dependency statistic (1=Highly dependent, 0=Not dependent)\n :rtype: float\n \"\"\"\n arg_4 = arg_3.get('bins', 'fd')\n return metrics.adjusted_mutual_info_score(bin_variable(arg_1, bins=arg_4),\n bin_variable(arg_2, bins=arg_4))"} +{"_id": "doc_3532", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=0, **arg_4):\n \"\"\"Evaluate a graph taking account of the hardware.\"\"\"\n arg_2 = SETTINGS.get_default(arg_2=arg_2)\n arg_5 = 'cuda:{}'.format(arg_3) if arg_2 else 'cpu'\n arg_6 = th.FloatTensor(arg_0).to(arg_5)\n arg_7 = CGNN_model(arg_1, arg_0.shape[0], arg_3=arg_3, **arg_4).to(arg_5)\n arg_7.reset_parameters()\n return arg_7.run(arg_6, **arg_4)"} +{"_id": "doc_3533", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate according to the topological order of the graph.\"\"\"\n arg_0.noise.data.normal_()\n if not arg_0.confounding:\n for arg_1 in arg_0.topological_order:\n arg_0.generated[arg_1] = arg_0.blocks[arg_1](th.cat([v for c in [\n [arg_0.generated[j] for j in np.nonzero(arg_0.adjacency_matrix[:, arg_1])[0]],\n [arg_0.noise[:, [arg_1]]]] for v in c], 1))\n else:\n for arg_1 in arg_0.topological_order:\n arg_0.generated[arg_1] = arg_0.blocks[arg_1](th.cat([v for c in [\n [arg_0.generated[j] for j in np.nonzero(arg_0.adjacency_matrix[:, arg_1])[0]],\n [arg_0.corr_noise[min(arg_1, j), max(arg_1, j)] for j in np.nonzero(arg_0.i_adj_matrix[:, arg_1])[0]]\n [arg_0.noise[:, [arg_1]]]] for v in c], 1))\n return th.cat(arg_0.generated, 1)"} +{"_id": "doc_3534", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Use CGNN to create a graph from scratch. All the possible structures\n are tested, which leads to a super exponential complexity. It would be\n preferable to start from a graph skeleton for large graphs.\n\n Args:\n data (pandas.DataFrame): Observational data on which causal\n discovery has to be performed.\n Returns:\n networkx.DiGraph: Solution given by CGNN.\n\n \"\"\"\n warnings.warn(\"An exhaustive search of the causal structure of CGNN without\"\n \" skeleton is super-exponential in the number of variables.\")\n\n # Building all possible candidates:\n arg_2 = len(list(arg_1.columns))\n arg_1 = scale(arg_1.values).astype('float32')\n\n arg_3 = [np.reshape(np.array(arg_7), (arg_2, arg_2)) for arg_7 in itertools.product([0, 1], repeat=arg_2*arg_2)\n if (np.trace(np.reshape(np.array(arg_7), (arg_2, arg_2))) == 0\n and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(arg_7), (arg_2, arg_2)))))]\n\n warnings.warn(\"A total of {} graphs will be evaluated.\".format(len(arg_3)))\n arg_4 = [parallel_graph_evaluation(arg_1, arg_7, nh=arg_0.nh, nb_runs=arg_0.nb_runs, gpu=arg_0.gpu,\n nb_jobs=arg_0.nb_jobs, lr=arg_0.lr, train_epochs=arg_0.train_epochs,\n test_epochs=arg_0.test_epochs, verbose=arg_0.verbose) for arg_7 in arg_3]\n arg_5 = arg_3[arg_4.index(min(arg_4))]\n arg_6 = np.zeros(arg_5.shape)\n\n # Retrieve the confidence score on each edge.\n for (arg_7, arg_8), arg_9 in np.ndenumerate(arg_5):\n if arg_9 > 0:\n arg_10 = arg_5\n arg_10[arg_7, arg_8] = 0\n arg_6[arg_7, arg_8] = min(arg_4) - arg_4[arg_3.index(arg_10)]\n\n return nx.DiGraph(arg_3[arg_6],\n {arg_11: arg_7 for arg_11, arg_7 in enumerate(arg_1.columns)})"} +{"_id": "doc_3535", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='HC'):\n \"\"\"Modify and improve a directed acyclic graph solution using CGNN.\n\n Args:\n data (pandas.DataFrame): Observational data on which causal\n discovery has to be performed.\n dag (nx.DiGraph): Graph that provides the initial solution,\n on which the CGNN algorithm will be applied.\n alg (str): Exploration heuristic to use, among [\"HC\", \"HCr\",\n \"tabu\", \"EHC\"]\n Returns:\n networkx.DiGraph: Solution given by CGNN.\n \n \"\"\"\n arg_4 = {'HC': hill_climbing, 'HCr': hill_climbing_with_removal,\n 'tabu': tabu_search, 'EHC': exploratory_hill_climbing}\n\n return arg_4[arg_3](arg_1, arg_2, nh=arg_0.nh, nb_runs=arg_0.nb_runs, gpu=arg_0.gpu,\n nb_jobs=arg_0.nb_jobs, lr=arg_0.lr, train_epochs=arg_0.train_epochs,\n test_epochs=arg_0.test_epochs, verbose=arg_0.verbose)"} +{"_id": "doc_3536", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='HC'):\n \"\"\"Orient the undirected graph using GNN and apply CGNN to improve the graph.\n\n Args:\n data (pandas.DataFrame): Observational data on which causal\n discovery has to be performed.\n umg (nx.Graph): Graph that provides the skeleton, on which the GNN\n then the CGNN algorithm will be applied.\n alg (str): Exploration heuristic to use, among [\"HC\", \"HCr\",\n \"tabu\", \"EHC\"]\n Returns:\n networkx.DiGraph: Solution given by CGNN.\n \n .. note::\n GNN (``cdt.causality.pairwise.GNN``) is first used to orient the\n undirected graph and output a DAG before applying CGNN.\n \"\"\"\n warnings.warn(\"The pairwise GNN model is computed on each edge of the UMG \"\n \"to initialize the model and start CGNN with a DAG\")\n arg_4 = GNN(nh=arg_0.nh, lr=arg_0.lr)\n\n arg_5 = arg_4.orient_graph(arg_1, arg_2, nb_runs=arg_0.nb_runs, nb_max_runs=arg_0.nb_runs,\n nb_jobs=arg_0.nb_jobs, train_epochs=arg_0.train_epochs,\n test_epochs=arg_0.test_epochs, verbose=arg_0.verbose, gpu=arg_0.gpu) # Pairwise method\n # print(nx.adj_matrix(og).todense().shape)\n # print(list(og.edges()))\n arg_6 = dagify_min_edge(arg_5)\n # print(nx.adj_matrix(dag).todense().shape)\n\n return arg_0.orient_directed_graph(arg_1, arg_6, arg_3=arg_3)"} +{"_id": "doc_3537", "title": "", "text": "def Func(arg_0):\n \"\"\"Evaluate the entropy of the input variable.\n\n :param x: input variable 1D\n :return: entropy of x\n \"\"\"\n arg_1 = 0.\n arg_2 = sorted(arg_0)\n for arg_3, arg_4 in zip(arg_2[:-1], arg_2[1:]):\n arg_5 = arg_4-arg_3\n if bool(arg_5):\n arg_1 += np.log(np.abs(arg_5))\n arg_1 = arg_1 / (len(arg_0) - 1) + psi(len(arg_0)) - psi(1)\n\n return arg_1"} +{"_id": "doc_3538", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Evaluate a pair using the IGCI model.\n\n :param a: Input variable 1D\n :param b: Input variable 1D\n :param kwargs: {refMeasure: Scaling method (gaussian, integral or None),\n estimator: method used to evaluate the pairs (entropy or integral)}\n :return: Return value of the IGCI model >0 if a->b otherwise if return <0\n \"\"\"\n arg_4 = {'entropy': lambda x, y: eval_entropy(y) - eval_entropy(x), 'integral': integral_approx_estimator}\n arg_5 = {'gaussian': lambda x: standard_scale.fit_transform(x.reshape((-1, 1))),\n 'uniform': lambda x: min_max_scale.fit_transform(x.reshape((-1, 1))), 'None': lambda x: x}\n\n arg_6 = arg_5[arg_3.get('refMeasure', 'gaussian')]\n arg_7 = arg_4[arg_3.get('estimator', 'entropy')]\n\n arg_1 = arg_6(arg_1)\n arg_2 = arg_6(arg_2)\n\n return arg_7(arg_1, arg_2)"} +{"_id": "doc_3539", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Train the model.\n\n Args:\n x_tr (pd.DataFrame): CEPC format dataframe containing the pairs\n y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs\n \"\"\"\n arg_3 = np.vstack((np.array([arg_0.featurize_row(row.iloc[0],\n row.iloc[1]) for idx, row in arg_1.iterrows()]),\n np.array([arg_0.featurize_row(row.iloc[1],\n row.iloc[0]) for idx, row in arg_1.iterrows()])))\n arg_4 = np.vstack((arg_2, -arg_2)).ravel()\n arg_5 = 1 if arg_0.verbose else 0\n arg_0.clf = CLF(arg_5=arg_5,\n min_samples_leaf=arg_0.L,\n n_estimators=arg_0.E,\n max_depth=arg_0.max_depth,\n n_jobs=arg_0.n_jobs).Func(arg_3, arg_4)"} +{"_id": "doc_3540", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\" Predict the causal score using a trained RCC model\n\n Args:\n x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.\n args (numpy.array): second variable (optional depending on the 1st argument).\n\n Returns:\n float: Causation score (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n if arg_0.clf is None:\n raise ValueError(\"Model has to be trained before making predictions.\")\n if arg_1 is pandas.Series:\n arg_4 = arg_0.featurize_row(arg_1.iloc[0], arg_1.iloc[1]).reshape((1, -1))\n elif arg_1 is pandas.DataFrame:\n arg_4 = np.array([arg_0.featurize_row(arg_1.iloc[0], arg_1.iloc[1]) for row in arg_1])\n elif arg_2 is not None:\n arg_4 = arg_0.featurize_row(arg_1, arg_2).reshape((1, -1))\n else:\n raise TypeError(\"DataType not understood.\")\n return arg_0.clf.predict(arg_4)"} +{"_id": "doc_3541", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=20, arg_4=0, arg_5=0.,\n arg_6=arg_7.nn.ReLU, arg_10=0.01, arg_11=0.1, arg_12=-1,\n arg_13=1000, arg_14=1000, arg_15=None,\n arg_16=None, arg_17=3):\n \"\"\"For one variable, predict its neighbours.\n\n Args:\n df_features (pandas.DataFrame):\n df_target (pandas.Series):\n nh (int): number of hidden units\n idx (int): (optional) for printing purposes\n dropout (float): probability of dropout (between 0 and 1)\n activation_function (torch.nn.Module): activation function of the NN\n lr (float): learning rate of Adam\n l1 (float): L1 penalization coefficient\n batch_size (int): batch size, defaults to full-batch\n train_epochs (int): number of train epochs\n test_epochs (int): number of test epochs\n device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)\n verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``)\n nb_runs (int): number of bootstrap runs\n\n Returns:\n list: scores of each feature relatively to the target\n\n \"\"\"\n arg_15, arg_16 = SETTINGS.get_default(('device', arg_15), ('verbose', arg_16))\n arg_18 = arg_7.FloatTensor(scale(arg_1.values)).to(arg_15)\n arg_19 = arg_7.FloatTensor(scale(arg_2.values)).to(arg_15)\n arg_20 = []\n for arg_21 in range(arg_17):\n arg_22 = FSGNN_model([arg_18.size()[1] + 1, arg_3, 1],\n arg_5=arg_5,\n arg_6=arg_6).to(arg_15)\n\n arg_20.append(arg_22.train(arg_18, arg_19, arg_10=0.01, arg_11=0.1, arg_12=-1,\n arg_13=arg_13, arg_14=arg_14,\n arg_15=arg_15, arg_16=arg_16))\n return list(np.mean(np.array(arg_20), axis=0))"} +{"_id": "doc_3542", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Build a skeleton using a pairwise independence criterion.\n\n Args:\n data (pandas.DataFrame): Raw data table\n\n Returns:\n networkx.Graph: Undirected graph representing the skeleton.\n \"\"\"\n arg_2 = Graph()\n\n for arg_3, arg_4 in enumerate(arg_1.columns):\n for arg_5, arg_6 in enumerate(arg_1.columns[arg_3+1:]):\n arg_7 = arg_0.predict(arg_1[arg_4].values, arg_1[arg_6].values)\n if abs(arg_7) > 0.001:\n arg_2.add_edge(arg_4, arg_6, weight=arg_7)\n\n return arg_2"} +{"_id": "doc_3543", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run GIES on an undirected graph.\n\n Args:\n data (pandas.DataFrame): DataFrame containing the data\n graph (networkx.Graph): Skeleton of the graph to orient\n\n Returns:\n networkx.DiGraph: Solution given by the GIES algorithm.\n\n \"\"\"\n # Building setup w/ arguments.\n arg_0.arguments['{VERBOSE}'] = str(arg_0.verbose).upper()\n arg_0.arguments['{SCORE}'] = arg_0.scores[arg_0.score]\n\n arg_4 = DataFrame(nx.adj_matrix(arg_2, weight=None).todense())\n arg_5 = DataFrame(1 - arg_4.values)\n\n arg_6 = arg_0._run_gies(arg_1, fixedGaps=arg_5, verbose=arg_0.verbose)\n\n return nx.relabel_nodes(nx.DiGraph(arg_6),\n {arg_7: arg_8 for arg_7, arg_8 in enumerate(arg_1.columns)})"} +{"_id": "doc_3544", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Feed-Func through the network.\"\"\"\n return th.nn.functional.linear(arg_1, arg_0.weight.div(arg_0.weight.pow(2).sum(0).sqrt()))"} +{"_id": "doc_3545", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=6, arg_4=None, arg_5=0, arg_6=None,\n arg_7=False, arg_8=False, arg_9=False):\n \"\"\"Execute SAM on a dataset given a skeleton or not.\n\n Args:\n data (pandas.DataFrame): Observational data for estimation of causal relationships by SAM\n skeleton (numpy.ndarray): A priori knowledge about the causal relationships as an adjacency matrix.\n Can be fed either directed or undirected links.\n nruns (int): Number of runs to be made for causal estimation.\n Recommended: >=12 for optimal performance.\n njobs (int): Numbers of jobs to be run in Parallel.\n Recommended: 1 if no GPU available, 2*number of GPUs else.\n gpus (int): Number of available GPUs for the algorithm.\n verbose (bool): verbose mode\n plot (bool): Plot losses interactively. Not recommended if nruns>1\n plot_generated_pair (bool): plots a generated pair interactively. Not recommended if nruns>1\n Returns:\n networkx.DiGraph: Graph estimated by SAM, where A[i,j] is the term\n of the ith variable for the jth generator.\n \"\"\"\n arg_6, arg_4 = SETTINGS.get_default(('verbose', arg_6), ('nb_jobs', arg_4))\n if arg_4 != 1:\n arg_10 = Parallel(n_jobs=arg_4)(delayed(run_SAM)(arg_1,\n skeleton=arg_2,\n lr_gen=arg_0.lr, lr_disc=arg_0.dlr,\n regul_param=arg_0.l1, nh=arg_0.nh, dnh=arg_0.dnh,\n gpu=bool(arg_5), train_epochs=arg_0.train,\n test_epochs=arg_0.test, batch_size=arg_0.batchsize,\n arg_7=arg_7, arg_6=arg_6, gpu_no=arg_13 % max(arg_5, 1))\n for arg_13 in range(arg_3))\n else:\n arg_10 = [run_SAM(arg_1, skeleton=arg_2,\n lr_gen=arg_0.lr, lr_disc=arg_0.dlr,\n regul_param=arg_0.l1, nh=arg_0.nh, dnh=arg_0.dnh,\n gpu=bool(arg_5), train_epochs=arg_0.train,\n test_epochs=arg_0.test, batch_size=arg_0.batchsize,\n arg_7=arg_7, arg_6=arg_6, gpu_no=0)\n for arg_13 in range(arg_3)]\n if arg_9:\n return arg_10\n else:\n arg_11 = arg_10[0]\n for arg_12 in arg_10[1:]:\n arg_11 += arg_12\n arg_11 /= arg_3\n return nx.relabel_nodes(nx.DiGraph(arg_11), {arg_13: arg_14 for arg_13, arg_14 in enumerate(arg_1.columns)})"} +{"_id": "doc_3546", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\" Infer causal relationships between 2 variables using the CDS statistic\n\n Args:\n a (numpy.ndarray): Variable 1\n b (numpy.ndarray): Variable 2\n\n Returns:\n float: Causation score (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n return arg_0.cds_score(arg_2, arg_1) - arg_0.cds_score(arg_1, arg_2)"} +{"_id": "doc_3547", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Prediction method for pairwise causal inference using the ANM model.\n\n Args:\n a (numpy.ndarray): Variable 1\n b (numpy.ndarray): Variable 2\n\n Returns:\n float: Causation score (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n arg_1 = scale(arg_1).reshape((-1, 1))\n arg_2 = scale(arg_2).reshape((-1, 1))\n\n return arg_0.anm_score(arg_2, arg_1) - arg_0.anm_score(arg_1, arg_2)"} +{"_id": "doc_3548", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute the fitness score of the ANM model in the x->y direction.\n\n Args:\n a (numpy.ndarray): Variable seen as cause\n b (numpy.ndarray): Variable seen as effect\n\n Returns:\n float: ANM fit score\n \"\"\"\n arg_3 = GaussianProcessRegressor().fit(arg_1, arg_2)\n arg_4 = arg_3.predict(arg_1)\n arg_5 = normalized_hsic(arg_4 - arg_2, arg_1)\n\n return arg_5"} +{"_id": "doc_3549", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.01, arg_3=2000, **arg_4):\n \"\"\" Predict the graph skeleton.\n\n Args:\n data (pandas.DataFrame): observational data\n alpha (float): regularization parameter\n max_iter (int): maximum number of iterations\n\n Returns:\n networkx.Graph: Graph skeleton\n \"\"\"\n arg_5 = GraphLasso(arg_2=arg_2, arg_3=arg_3)\n arg_5.fit(arg_1.values)\n\n return nx.relabel_nodes(nx.DiGraph(arg_5.get_precision()),\n {arg_6: arg_7 for arg_6, arg_7 in enumerate(arg_1.columns)})"} +{"_id": "doc_3550", "title": "", "text": "def Func(arg_0):\n \"\"\"Autoset GPU parameters using CUDA_VISIBLE_DEVICES variables.\n\n Return default config if variable not set.\n :param set_var: Variable to set. Must be of type ConfigSettings\n \"\"\"\n try:\n arg_1 = ast.literal_eval(os.environ[\"CUDA_VISIBLE_DEVICES\"])\n if type(arg_1) != list and type(arg_1) != tuple:\n arg_1 = [arg_1]\n if len(arg_1) != 0:\n arg_0.GPU = len(arg_1)\n arg_0.NB_JOBS = len(arg_1)\n warnings.warn(\"Detecting CUDA devices : {}\".format(arg_1))\n\n except KeyError:\n arg_0.GPU = check_cuda_devices()\n arg_0.NB_JOBS = arg_0.GPU\n warnings.warn(\"Detecting {} CUDA devices.\".format(arg_0.GPU))\n if not arg_0.GPU:\n warnings.warn(\"No GPU automatically detected. Setting SETTINGS.GPU to 0, \" +\n \"and SETTINGS.NB_JOBS to cpu_count.\")\n arg_0.GPU = 0\n arg_0.NB_JOBS = multiprocessing.cpu_count()\n\n return arg_0"} +{"_id": "doc_3551", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Generic Func method, chooses which subfunction to use for a more\n suited.\n\n Depending on the type of `x` and of `*args`, this function process to execute\n different functions in the priority order:\n\n 1. If ``args[0]`` is a ``networkx.(Di)Graph``, then ``self.orient_graph`` is executed.\n 2. If ``args[0]`` exists, then ``self.Func_proba`` is executed.\n 3. If ``x`` is a ``pandas.DataFrame``, then ``self.Func_dataset`` is executed.\n 4. If ``x`` is a ``pandas.Series``, then ``self.Func_proba`` is executed.\n\n Args:\n x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.\n args (numpy.array or networkx.Graph): graph or second variable.\n\n Returns:\n pandas.Dataframe or networkx.Digraph: Funcions output\n \"\"\"\n if len(arg_2) > 0:\n if type(arg_2[0]) == nx.Graph or type(arg_2[0]) == nx.DiGraph:\n return arg_0.orient_graph(arg_1, *arg_2, **arg_3)\n else:\n return arg_0.Func_proba(arg_1, *arg_2, **arg_3)\n elif type(arg_1) == DataFrame:\n return arg_0.Func_dataset(arg_1, *arg_2, **arg_3)\n elif type(arg_1) == Series:\n return arg_0.Func_proba(arg_1.iloc[0], arg_1.iloc[1], *arg_2, **arg_3)"} +{"_id": "doc_3552", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Generic dataset prediction function.\n\n Runs the score independently on all pairs.\n\n Args:\n x (pandas.DataFrame): a CEPC format Dataframe.\n kwargs (dict): additional arguments for the algorithms\n\n Returns:\n pandas.DataFrame: a Dataframe with the predictions.\n \"\"\"\n arg_3 = arg_2.get(\"printout\", None)\n arg_4 = []\n arg_5 = []\n arg_1.columns = [\"A\", \"B\"]\n for arg_7, arg_8 in arg_1.iterrows():\n arg_9 = scale(arg_8['A'].reshape((len(arg_8['A']), 1)))\n arg_10 = scale(arg_8['B'].reshape((len(arg_8['B']), 1)))\n\n arg_4.append(arg_0.predict_proba(arg_9, arg_10, arg_7=arg_7))\n\n if arg_3 is not None:\n arg_5.append([arg_8['SampleID'], arg_4[-1]])\n DataFrame(arg_5, arg_6=['SampleID', 'Predictions']).to_csv(\n arg_3, index=False)\n return arg_4"} +{"_id": "doc_3553", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run the algorithm on a directed_graph.\n\n Args:\n data (pandas.DataFrame): DataFrame containing the data\n graph (networkx.DiGraph): Skeleton of the graph to orient\n\n Returns:\n networkx.DiGraph: Solution on the given skeleton.\n\n .. warning::\n The algorithm is ran on the skeleton of the given graph.\n\n \"\"\"\n warnings.warn(\"The algorithm is ran on the skeleton of the given graph.\")\n return arg_0.orient_undirected_graph(arg_1, nx.Graph(arg_2))"} +{"_id": "doc_3554", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the gaussian kernel on a 1D vector.\"\"\"\n arg_1 = np.power(euclidean_distances(arg_0, arg_0), 2)\n return np.exp(-arg_1 / (2.0))"} +{"_id": "doc_3555", "title": "", "text": "def Func(arg_0):\n \"\"\"Init a noise variable.\"\"\"\n return np.random.rand(1) * np.random.randn(arg_0, 1) \\\n + random.sample([2, -2], 1)"} +{"_id": "doc_3556", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Runs Jarfo independently on all pairs.\n\n Args:\n x (pandas.DataFrame): a CEPC format Dataframe.\n kwargs (dict): additional arguments for the algorithms\n\n Returns:\n pandas.DataFrame: a Dataframe with the predictions.\n \"\"\"\n if len(list(arg_1.columns)) == 2:\n arg_1.columns = [\"A\", \"B\"]\n if arg_0.model is None:\n raise AssertionError(\"Model has not been trained before predictions\")\n arg_3 = DataFrame()\n\n for arg_4, arg_5 in arg_1.iterrows():\n arg_3 = arg_3.append(arg_5, ignore_index=True)\n arg_3 = arg_3.append({'A': arg_5[\"B\"], 'B': arg_5[\"A\"]}, ignore_index=True)\n return predict.predict(deepcopy(arg_3), deepcopy(arg_0.model))[::2]"} +{"_id": "doc_3557", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, **arg_4):\n \"\"\" Use Jarfo to predict the causal direction of a pair of vars.\n\n Args:\n a (numpy.ndarray): Variable 1\n b (numpy.ndarray): Variable 2\n idx (int): (optional) index number for printing purposes\n\n Returns:\n float: Causation score (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n return arg_0.predict_dataset(DataFrame([[arg_1, arg_2]],\n columns=['A', 'B']))"} +{"_id": "doc_3558", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Implementation of the ARACNE algorithm.\n\n Args:\n mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes\n it is a relevance matrix where mat(i,j) represents the similarity content\n between nodes i and j. Elements of matrix should be\n non-negative.\n\n Returns:\n mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components\n represent direct edge weights of observed interactions.\n\n .. note::\n Ref: ARACNE: An Algorithm for the Reconstruction of Gene Regulatory Networks in a Mammalian Cellular Context\n Adam A Margolin, Ilya Nemenman, Katia Basso, Chris Wiggins, Gustavo Stolovitzky, Riccardo Dalla Favera and Andrea Califano\n DOI: https://doi.org/10.1186/1471-2105-7-S1-S7\n \"\"\"\n arg_2 = arg_1.get('I0', 0.0) # No default thresholding\n arg_3 = arg_1.get('W0', 0.05)\n\n # thresholding\n arg_0 = np.where(arg_0 > arg_2, arg_0, 0)\n\n # Finding triplets and filtering them\n for arg_4 in range(arg_0.shape[0]-2):\n for arg_5 in range(arg_4+1, arg_0.shape[0]-1):\n for arg_6 in range(arg_5+1, arg_0.shape[0]):\n arg_7 = [arg_0[arg_4, arg_5], arg_0[arg_5, arg_6], arg_0[arg_4, arg_6]]\n arg_8, arg_9 = min(enumerate(arg_7), key=operator.itemgetter(1))\n if 0 < arg_9 < arg_3:\n if arg_8 == 0:\n arg_0[arg_4, arg_5] = arg_0[arg_5, arg_4] = 0.\n elif arg_8 == 1:\n arg_0[arg_5, arg_6] = arg_0[arg_6, arg_5] = 0.\n else:\n arg_0[arg_4, arg_6] = arg_0[arg_6, arg_4] = 0.\n return arg_0"} +{"_id": "doc_3559", "title": "", "text": "def Func(arg_0, arg_1=\"aracne\", **arg_2):\n \"\"\"Apply deconvolution to a networkx graph.\n\n Args:\n g (networkx.Graph): Graph to apply deconvolution to\n alg (str): Algorithm to use ('aracne', 'clr', 'nd')\n kwargs (dict): extra options for algorithms\n\n Returns:\n networkx.Graph: graph with undirected links removed.\n \"\"\"\n arg_1 = {\"aracne\": aracne,\n \"nd\": network_deconvolution,\n \"clr\": clr}[arg_1]\n arg_3 = np.array(nx.adjacency_matrix(arg_0).todense())\n return nx.relabel_nodes(nx.DiGraph(arg_1(arg_3, **arg_2)),\n {arg_4: arg_5 for arg_4, arg_5 in enumerate(list(arg_0.nodes()))})"} +{"_id": "doc_3560", "title": "", "text": "def Func(arg_0):\n \"\"\"Input a graph and output a DAG.\n\n The heuristic is to reverse the edge with the lowest score of the cycle\n if possible, else remove it.\n\n Args:\n g (networkx.DiGraph): Graph to modify to output a DAG\n\n Returns:\n networkx.DiGraph: DAG made out of the input graph.\n \"\"\"\n while not nx.is_directed_acyclic_graph(arg_0):\n arg_1 = next(nx.simple_cycles(arg_0))\n arg_2 = []\n arg_3 = []\n for arg_4, arg_5 in zip(arg_1[:1], arg_1[:1]):\n arg_3.append((arg_4, arg_5))\n arg_2.append(arg_0[arg_4][arg_5]['weight'])\n\n arg_4, arg_5 = arg_3[arg_2.index(min(arg_2))]\n arg_6 = deepcopy(arg_0)\n arg_6.remove_edge(arg_4, arg_5)\n arg_6.add_edge(arg_5, arg_4)\n\n if len(list(nx.simple_cycles(arg_6))) < len(list(nx.simple_cycles(arg_0))):\n arg_0.add_edge(arg_5, arg_4, weight=min(arg_2))\n arg_0.remove_edge(arg_4, arg_5)\n return arg_0"} +{"_id": "doc_3561", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the weighted average and standard deviation.\n\n values, weights -- numpy ndarrays with the same shape.\n \"\"\"\n arg_2 = np.average(arg_0, arg_1=arg_1, axis=0)\n arg_3 = np.dot(arg_1, (arg_0 - arg_2) ** 2) / arg_1.sum() # Fast and numerically precise\n return (arg_2, np.sqrt(arg_3))"} +{"_id": "doc_3562", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Pass data through the net structure.\n\n :param x: input data: shape (:,1)\n :type x: torch.Variable\n :return: output of the shallow net\n :rtype: torch.Variable\n\n \"\"\"\n arg_0.noise.normal_()\n return arg_0.layers(th.cat([arg_1, arg_0.noise], 1))"} +{"_id": "doc_3563", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0.01, arg_4=1000, arg_5=1000, arg_6=0, arg_7=None, **arg_8):\n \"\"\"Run the GNN on a pair x,y of FloatTensor data.\"\"\"\n arg_7 = SETTINGS.get_default(arg_7=arg_7)\n arg_9 = th.optim.Adam(arg_0.parameters(), arg_3=arg_3)\n arg_10 = 0\n arg_11 = 0\n\n for arg_12 in range(arg_4 + arg_5):\n arg_9.zero_grad()\n arg_13 = arg_0.forward(arg_1)\n arg_14 = arg_0.criterion(arg_13, arg_2)\n arg_10 += arg_14.item()\n\n if arg_12 < arg_4:\n arg_14.backward()\n arg_9.step()\n else:\n arg_11 += arg_10\n\n # print statistics\n if arg_7 and not arg_12 % 300:\n print('Idx:{}; epoch:{}; score:{}'.\n format(arg_6, arg_12, arg_10/300))\n arg_10 = 0.0\n\n return arg_11 / arg_5"} +{"_id": "doc_3564", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=6, arg_4=None, arg_5=None,\n arg_6=0, arg_7=None, arg_8=0.01,\n arg_9=16, arg_10=1000, arg_11=1000):\n \"\"\"Run multiple times GNN to estimate the causal direction.\n\n Args:\n a (np.ndarray): Variable 1\n b (np.ndarray): Variable 2\n nb_runs (int): number of runs to execute per batch (before testing for significance with t-test).\n nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``)\n gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``)\n idx (int): (optional) index of the pair, for printing purposes\n verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``)\n ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant\n nb_max_runs (int): Max number of bootstraps\n train_epochs (int): Number of epochs during which the model is going to be trained\n test_epochs (int): Number of epochs during which the model is going to be tested\n\n Returns:\n float: Causal score of the pair (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n arg_12, arg_7, arg_5 = SETTINGS.get_default(('nb_jobs', arg_4), ('verbose', arg_7), ('gpu', arg_5))\n arg_13 = np.stack([arg_1.ravel(), arg_2.ravel()], 1)\n arg_14 = TTestCriterion(\n max_iter=arg_9, runs_per_iter=arg_3, threshold=arg_8)\n\n arg_15 = []\n arg_16 = []\n\n while arg_14.loop(arg_15, arg_16):\n if arg_4 != 1:\n arg_17 = Parallel(n_jobs=arg_4)(delayed(GNN_instance)(\n arg_13, arg_6=arg_6, device='cuda:{}'.format(run % arg_5) if arg_5 else 'cpu',\n arg_7=arg_7, arg_10=arg_10, arg_11=arg_11) for run in range(arg_14.iter, arg_14.iter + arg_3))\n else:\n arg_17 = [GNN_instance(arg_13, arg_6=arg_6,\n device='cuda:0' if arg_5 else 'cpu',\n arg_7=arg_7,\n arg_10=arg_10,\n arg_11=arg_11)\n for run in range(arg_14.iter, arg_14.iter + arg_3)]\n arg_15.extend([arg_18[0] for arg_18 in arg_17])\n arg_16.extend([arg_18[1] for arg_18 in arg_17])\n\n if arg_7:\n print(\"P-value after {} runs : {}\".format(arg_14.iter,\n arg_14.p_value))\n\n arg_19 = np.mean(arg_15)\n arg_20 = np.mean(arg_16)\n\n return (arg_20 - arg_19) / (arg_20 + arg_19)"} +{"_id": "doc_3565", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Passing data through the network.\n\n :param x: 2d tensor containing both (x,y) Variables\n :return: output of the net\n \"\"\"\n\n arg_2 = arg_0.conv(arg_1).mean(dim=2)\n return arg_0.dense(arg_2)"} +{"_id": "doc_3566", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Updates all rows that match the filter.\"\"\"\n\n # build up the query to execute\n arg_0._for_write = True\n if django.VERSION >= (2, 0):\n arg_3 = arg_0.query.chain(UpdateQuery)\n else:\n arg_3 = arg_0.query.clone(UpdateQuery)\n arg_3._annotations = None\n arg_3.add_Func_values(arg_1)\n\n # build the compiler for for the query\n arg_5 = django.db.connections[arg_0.db]\n arg_6 = PostgresReturningUpdateCompiler(arg_3, arg_5, arg_0.db)\n\n # execute the query\n with transaction.atomic(using=arg_0.db, savepoint=False):\n arg_7 = arg_6.execute_sql(CURSOR)\n arg_0._result_cache = None\n\n # send out a signal for each row\n for arg_9 in arg_7:\n signals.Func.send(arg_0.model, pk=arg_9[0])\n\n # the original Func(..) returns the amount of rows\n # affected, let's do the same\n return len(arg_7)"} +{"_id": "doc_3567", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Creates multiple new records in the database.\n\n This allows specifying custom conflict behavior using .on_conflict().\n If no special behavior was specified, this uses the normal Django create(..)\n\n Arguments:\n rows:\n An array of dictionaries, where each dictionary\n describes the fields to insert.\n\n return_model (default: False):\n If model instances should be returned rather than\n just dicts.\n\n Returns:\n A list of either the dicts of the rows inserted, including the pk or\n the models of the rows inserted with defaults for any fields not specified\n \"\"\"\n\n if arg_0.conflict_target or arg_0.conflict_action:\n arg_3 = arg_0._build_insert_compiler(arg_1)\n arg_4 = arg_3.execute_sql(return_id=True)\n if arg_2:\n return [arg_0.model(**dict(arg_5, **arg_6)) for arg_5, arg_6 in zip(arg_1, arg_4)]\n else:\n return [dict(arg_5, **arg_6) for arg_5, arg_6 in zip(arg_1, arg_4)]\n\n # no special action required, use the standard Django bulk_create(..)\n return super().bulk_create([arg_0.model(**arg_7) for arg_7 in arg_1])"} +{"_id": "doc_3568", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Creates a new record in the database.\n\n This allows specifying custom conflict behavior using .on_conflict().\n If no special behavior was specified, this uses the normal Django create(..)\n\n Arguments:\n fields:\n The fields of the row to create.\n\n Returns:\n The primary key of the record that was created.\n \"\"\"\n\n if arg_0.conflict_target or arg_0.conflict_action:\n arg_2 = arg_0._build_Func_compiler([arg_1])\n arg_3 = arg_2.execute_sql(return_id=True)\n\n arg_4 = arg_0.model._meta.pk.name\n return arg_3[0][arg_4]\n\n # no special action required, use the standard Django create(..)\n return super().create(**arg_1).pk"} +{"_id": "doc_3569", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Creates a new record in the database and then gets\n the entire row.\n\n This allows specifying custom conflict behavior using .on_conflict().\n If no special behavior was specified, this uses the normal Django create(..)\n\n Arguments:\n fields:\n The fields of the row to create.\n\n Returns:\n The model instance representing the row that was created.\n \"\"\"\n\n if not arg_0.conflict_target and not arg_0.conflict_action:\n # no special action required, use the standard Django create(..)\n return super().create(**arg_1)\n\n arg_2 = arg_0._build_insert_compiler([arg_1])\n arg_3 = arg_2.execute_sql(return_id=False)\n\n arg_4 = arg_3[0]\n\n # get a list of columns that are officially part of the model and preserve the fact that the attribute name\n # might be different than the database column name\n arg_5 = {}\n for arg_6 in arg_0.model._meta.local_concrete_fields:\n arg_5[arg_6.column] = arg_6.attname\n\n # strip out any columns/fields returned by the db that\n # are not present in the model\n arg_8 = {}\n for arg_9, arg_10 in arg_4.items():\n try:\n arg_8[arg_5[arg_9]] = arg_10\n except KeyError:\n pass\n\n return arg_0.model(**arg_8)"} +{"_id": "doc_3570", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3: arg_4):\n \"\"\"Verifies whether this field is gonna modify something\n on its own.\n\n \"Magical\" means that a field modifies the field value\n during the pre_save.\n\n Arguments:\n model_instance:\n The model instance the field is defined on.\n\n field:\n The field to get of whether the field is\n magical.\n\n is_insert:\n Pretend whether this is an insert?\n\n Returns:\n True when this field modifies something.\n \"\"\"\n\n # does this field modify someting upon insert?\n arg_5 = getattr(arg_1, arg_2.name, None)\n arg_2.pre_save(arg_1, arg_3)\n arg_6 = getattr(arg_1, arg_2.name, None)\n\n return arg_5 != arg_6"} +{"_id": "doc_3571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Gets the fields to use in an upsert.\n\n This some nice magic. We'll split the fields into\n a group of \"insert fields\" and \"update fields\":\n\n INSERT INTO bla (\"val1\", \"val2\") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1\n\n ^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^\n insert_fields update_fields\n\n Often, fields appear in both lists. But, for example,\n a :see:DateTime field with `auto_now_add=True` set, will\n only appear in \"insert_fields\", since it won't be set\n on existing rows.\n\n Other than that, the user specificies a list of fields\n in the upsert() call. That migt not be all fields. The\n user could decide to leave out optional fields. If we\n end up doing an update, we don't want to overwrite\n those non-specified fields.\n\n We cannot just take the list of fields the user\n specifies, because as mentioned, some fields\n make modifications to the model on their own.\n\n We'll have to detect which fields make modifications\n and include them in the list of insert/update fields.\n \"\"\"\n\n arg_2 = arg_0.model(**arg_1)\n arg_3 = []\n arg_4 = []\n\n for arg_5 in arg_2._meta.local_concrete_fields:\n arg_6 = arg_5.default != NOT_PROVIDED\n if (arg_5.name in arg_1 or arg_5.column in arg_1):\n arg_3.append(arg_5)\n arg_4.append(arg_5)\n continue\n elif arg_6:\n arg_3.append(arg_5)\n continue\n\n # special handling for 'pk' which always refers to\n # the primary key, so if we the user specifies `pk`\n # instead of a concrete field, we have to handle that\n if arg_5.primary_key is True and 'pk' in arg_1:\n arg_3.append(arg_5)\n arg_4.append(arg_5)\n continue\n\n if arg_0._is_magical_field(arg_2, arg_5, is_insert=True):\n arg_3.append(arg_5)\n\n if arg_0._is_magical_field(arg_2, arg_5, is_insert=False):\n arg_4.append(arg_5)\n\n return arg_3, arg_4"} +{"_id": "doc_3572", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"When a model gets created or updated.\"\"\"\n\n arg_2, arg_3 = arg_1['created'], arg_1['instance']\n\n if arg_2:\n signals.create.send(arg_0, pk=arg_3.pk)\n else:\n signals.update.send(arg_0, pk=arg_3.pk)"} +{"_id": "doc_3573", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compiles the HStore value into SQL.\n\n Compiles expressions contained in the values\n of HStore entries as well.\n\n Given a dictionary like:\n\n dict(key1='val1', key2='val2')\n\n The resulting SQL will be:\n\n hstore(hstore('key1', 'val1'), hstore('key2', 'val2'))\n \"\"\"\n\n arg_3 = []\n for arg_4, arg_5 in arg_0.value.items():\n if hasattr(arg_5, 'Func'):\n arg_6, arg_7 = arg_5.Func(arg_1, arg_2)\n arg_3.append('hstore(\\'%s\\', %s)' % (\n arg_4, arg_6 % arg_7))\n elif arg_5 is not None:\n arg_3.append('hstore(\\'%s\\', \\'%s\\')' % ((\n arg_4, arg_5)))\n else:\n arg_3.append('hstore(\\'%s\\', NULL)' % arg_4)\n\n return '%s' % ' || '.join(arg_3), []"} +{"_id": "doc_3574", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3, arg_4]) -> None:\n \"\"\"Adds an extra condition to an existing JOIN.\n\n This allows you to for example do:\n\n INNER JOIN othertable ON (mytable.id = othertable.other_id AND [extra conditions])\n\n This does not work if nothing else in your query doesn't already generate the\n initial join in the first place.\n \"\"\"\n\n arg_5 = arg_0.get_initial_alias()\n arg_6 = arg_0.get_meta()\n\n for arg_7, arg_8 in arg_1.items():\n arg_9 = arg_7.split(LOOKUP_SEP)\n arg_10 = arg_0.setup_joins(arg_9, arg_6, arg_5, allow_many=True)\n arg_0.trim_joins(arg_10[1], arg_10[3], arg_10[4])\n\n arg_11 = arg_10[3][-1]\n arg_12 = arg_10[1][-1]\n arg_13 = arg_0.alias_map.get(arg_11)\n\n if not arg_13:\n raise SuspiciousOperation((\n 'Cannot add an extra join condition for \"%s\", there\\'s no'\n ' existing join to add it to.'\n ) % arg_11)\n\n # convert the Join object into a ConditionalJoin object, which\n # allows us to add the extra condition\n if not isinstance(arg_13, ConditionalJoin):\n arg_0.alias_map[arg_11] = ConditionalJoin.from_join(arg_13)\n arg_13 = arg_0.alias_map[arg_11]\n\n arg_13.add_condition(arg_12, arg_8)"} +{"_id": "doc_3575", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2=[]):\n \"\"\"Sets the Func to be used in this query.\n\n Insert fields are fields that are definitely\n going to be inserted, and if an existing row\n is found, are going to be overwritten with the\n specified value.\n\n Update fields are fields that should be overwritten\n in case an update takes place rather than an insert.\n If we're dealing with a INSERT, these will not be used.\n\n Arguments:\n objs:\n The objects to apply this query to.\n\n insert_fields:\n The fields to use in the INSERT statement\n\n update_fields:\n The fields to only use in the UPDATE statement.\n \"\"\"\n\n arg_0.insert_Func(arg_3, arg_1, raw=False)\n arg_0.update_fields = arg_4"} +{"_id": "doc_3576", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Creates a REQUIRED CONSTRAINT for the specified hstore key.\"\"\"\n\n arg_4 = arg_0._required_constraint_name(\n arg_1, arg_2, arg_3)\n\n arg_5 = arg_0.sql_hstore_required_create.format(\n arg_4=arg_0.quote_name(arg_4),\n table=arg_0.quote_name(arg_1),\n arg_2=arg_0.quote_name(arg_2.column),\n arg_3=arg_3\n )\n arg_0.execute(arg_5)"} +{"_id": "doc_3577", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5):\n \"\"\"Renames an existing REQUIRED CONSTRAINT for the specified\n hstore key.\"\"\"\n\n arg_6 = arg_0._required_constraint_name(\n arg_1, arg_3, arg_5)\n arg_7 = arg_0._required_constraint_name(\n arg_2, arg_4, arg_5)\n\n arg_8 = arg_0.sql_hstore_required_rename.format(\n table=arg_0.quote_name(arg_2),\n arg_6=arg_0.quote_name(arg_6),\n arg_7=arg_0.quote_name(arg_7)\n )\n arg_0.execute(arg_8)"} +{"_id": "doc_3578", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Drops a REQUIRED CONSTRAINT for the specified hstore key.\"\"\"\n\n arg_4 = arg_0._required_constraint_name(\n arg_1, arg_2, arg_3)\n\n arg_5 = arg_0.sql_hstore_required_drop.format(\n table=arg_0.quote_name(arg_1),\n arg_4=arg_0.quote_name(arg_4)\n )\n arg_0.execute(arg_5)"} +{"_id": "doc_3579", "title": "", "text": "def Func(arg_0: arg_1, arg_2, arg_3):\n \"\"\"Gets the name for a CONSTRAINT that applies\n to a single hstore key.\n\n Arguments:\n table:\n The name of the table the field is\n a part of.\n\n field:\n The hstore field to create a\n UNIQUE INDEX for.\n\n key:\n The name of the hstore key\n to create the name for.\n\n Returns:\n The name for the UNIQUE index.\n \"\"\"\n\n return '{table}_{field}_required_{postfix}'.format(\n arg_0=arg_0,\n arg_2=arg_2.column,\n postfix=arg_3\n )"} +{"_id": "doc_3580", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=''):\n \"\"\"Creates the actual SQL used when applying the migration.\"\"\"\n if django.VERSION >= (2, 0):\n arg_4 = super().Func(arg_1, arg_2, arg_3)\n arg_4.template = arg_0.sql_create_index\n arg_4.parts['condition'] = arg_0.condition\n return arg_4\n else:\n arg_7 = arg_0.sql_create_index\n arg_8 = {\n **Index.get_sql_create_template_values(arg_0, arg_1, arg_2, arg_3),\n 'condition': arg_0.condition\n }\n return arg_7 % arg_8"} +{"_id": "doc_3581", "title": "", "text": "def Func(arg_0):\n \"\"\"Ran to prepare the configured database.\n\n This is where we enable the `hstore` extension\n if it wasn't enabled yet.\"\"\"\n\n super().Func()\n with arg_0.cursor() as cursor:\n try:\n cursor.execute('CREATE EXTENSION IF NOT EXISTS hstore')\n except ProgrammingError: # permission denied\n logger.warning(\n 'Failed to create \"hstore\" extension. '\n 'Tables with hstore columns may fail to migrate. '\n 'If hstore is needed, make sure you are connected '\n 'to the database as a superuser '\n 'or add the extension manually.',\n exc_info=True)"} +{"_id": "doc_3582", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Override the base class so it doesn't cast all values\n to strings.\n\n psqlextra supports expressions in hstore fields, so casting\n all values to strings is a bad idea.\"\"\"\n\n arg_1 = Field.Func(arg_0, arg_1)\n\n if isinstance(arg_1, dict):\n arg_2 = {}\n for arg_3, arg_4 in arg_1.items():\n if isinstance(arg_4, Expression):\n arg_2[arg_3] = arg_4\n elif arg_4 is not None:\n arg_2[arg_3] = str(arg_4)\n else:\n arg_2[arg_3] = arg_4\n\n arg_1 = arg_2\n\n if isinstance(arg_1, list):\n arg_1 = [str(item) for item in arg_1]\n\n return arg_1"} +{"_id": "doc_3583", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Rewrites a formed SQL INSERT query to include\n the ON CONFLICT clause.\n\n Arguments:\n sql:\n The SQL INSERT query to rewrite.\n\n params:\n The parameters passed to the query.\n\n returning:\n What to put in the `RETURNING` clause\n of the resulting query.\n\n Returns:\n A tuple of the rewritten SQL query and new params.\n \"\"\"\n\n arg_4 = arg_0.qn(arg_0.query.model._meta.pk.attname) if arg_3 else '*'\n\n if arg_0.query.conflict_action.value == 'UPDATE':\n return arg_0.Func_update(arg_1, arg_2, arg_4)\n elif arg_0.query.conflict_action.value == 'NOTHING':\n return arg_0.Func_nothing(arg_1, arg_2, arg_4)\n\n raise SuspiciousOperation((\n '%s is not a valid conflict action, specify '\n 'ConflictAction.UPDATE or ConflictAction.NOTHING.'\n ) % str(arg_0.query.conflict_action))"} +{"_id": "doc_3584", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Rewrites a formed SQL INSERT query to include\n the ON CONFLICT DO UPDATE clause.\"\"\"\n\n arg_4 = ', '.join([\n '{0} = EXCLUDED.{0}'.format(arg_0.qn(field.column))\n for field in arg_0.query.update_fields\n ])\n\n # build the conflict target, the columns to watch\n # for conflicts\n arg_5 = arg_0._build_conflict_target()\n\n arg_6 = arg_0.query.index_predicate\n\n arg_7 = (\n '{insert} ON CONFLICT {conflict_target} DO UPDATE '\n 'SET {update_columns} RETURNING {returning}'\n )\n\n if arg_6:\n arg_7 = (\n '{insert} ON CONFLICT {conflict_target} WHERE {index_predicate} DO UPDATE '\n 'SET {update_columns} RETURNING {returning}'\n )\n\n return (\n arg_7.format(\n insert=arg_1,\n arg_5=arg_5,\n arg_4=arg_4,\n arg_3=arg_3,\n arg_6=arg_6,\n ),\n arg_2\n )"} +{"_id": "doc_3585", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Rewrites a formed SQL INSERT query to include\n the ON CONFLICT DO NOTHING clause.\"\"\"\n\n # build the conflict target, the columns to watch\n # for conflicts\n arg_4 = arg_0._build_conflict_target()\n\n arg_5 = ' AND '.join([\n '{0} = %s'.format(arg_0._format_field_name(field_name))\n for field_name in arg_0.query.conflict_target\n ])\n\n arg_6 = [\n arg_0._format_field_value(field_name)\n for field_name in arg_0.query.conflict_target\n ]\n\n arg_2 = arg_2 + tuple(arg_6)\n\n # this looks complicated, and it is, but it is for a reason... a normal\n # ON CONFLICT DO NOTHING doesn't return anything if the row already exists\n # so we do DO UPDATE instead that never executes to lock the row, and then\n # select from the table in case we're dealing with an existing row..\n return (\n (\n 'WITH insdata AS ('\n '{insert} ON CONFLICT {conflict_target} DO UPDATE'\n ' SET {pk_column} = NULL WHERE FALSE RETURNING {returning})'\n ' SELECT * FROM insdata UNION ALL'\n ' SELECT {returning} FROM {table} WHERE {where_clause} LIMIT 1;'\n ).format(\n insert=arg_1,\n arg_4=arg_4,\n pk_column=arg_0.qn(arg_0.query.model._meta.pk.column),\n arg_3=arg_3,\n table=arg_0.query.objs[0]._meta.db_table,\n arg_5=arg_5\n ),\n arg_2\n )"} +{"_id": "doc_3586", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds the `conflict_target` for the ON CONFLICT\n clause.\"\"\"\n\n arg_1 = []\n\n if not isinstance(arg_0.query.conflict_target, list):\n raise SuspiciousOperation((\n '%s is not a valid conflict target, specify '\n 'a list of column names, or tuples with column '\n 'names and hstore key.'\n ) % str(arg_0.query.conflict_target))\n\n def _assert_valid_field(arg_2):\n arg_2 = arg_0._normalize_field_name(arg_2)\n if arg_0._get_model_field(arg_2):\n return\n\n raise SuspiciousOperation((\n '%s is not a valid conflict target, specify '\n 'a list of column names, or tuples with column '\n 'names and hstore key.'\n ) % str(arg_2))\n\n for arg_2 in arg_0.query.conflict_target:\n _assert_valid_field(arg_2)\n\n # special handling for hstore keys\n if isinstance(arg_2, tuple):\n arg_1.append(\n '(%s->\\'%s\\')' % (\n arg_0._format_field_name(arg_2),\n arg_2[1]\n )\n )\n else:\n arg_1.append(\n arg_0._format_field_name(arg_2))\n\n return '(%s)' % ','.join(arg_1)"} +{"_id": "doc_3587", "title": "", "text": "def Func(arg_0, arg_1) -> str:\n \"\"\"Formats a field's name for usage in SQL.\n\n Arguments:\n field_name:\n The field name to format.\n\n Returns:\n The specified field name formatted for\n usage in SQL.\n \"\"\"\n\n arg_2 = arg_0._get_model_field(arg_1)\n return arg_0.qn(arg_2.column)"} +{"_id": "doc_3588", "title": "", "text": "def Func(arg_0, arg_1) -> str:\n \"\"\"Formats a field's value for usage in SQL.\n\n Arguments:\n field_name:\n The name of the field to format\n the value of.\n\n Returns:\n The field's value formatted for usage\n in SQL.\n \"\"\"\n\n arg_1 = arg_0._normalize_field_name(arg_1)\n arg_2 = arg_0._get_model_field(arg_1)\n\n return SQLInsertCompiler.prepare_value(\n arg_0,\n arg_2,\n # Note: this deliberately doesn't use `pre_save_val` as we don't\n # want things like auto_now on DateTimeField (etc.) to change the\n # value. We rely on pre_save having already been done by the\n # underlying compiler so that things like FileField have already had\n # the opportunity to save out their data.\n getattr(arg_0.query.objs[0], arg_2.attname)\n )"} +{"_id": "doc_3589", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Creates a UNIQUE constraint for the specified hstore keys.\"\"\"\n\n arg_4 = arg_0._unique_constraint_name(\n arg_1._meta.db_table, arg_2, arg_3)\n arg_5 = [\n '(%s->\\'%s\\')' % (arg_2.column, key)\n for key in arg_3\n ]\n arg_6 = arg_0.sql_hstore_unique_create.format(\n arg_4=arg_0.quote_name(arg_4),\n table=arg_0.quote_name(arg_1._meta.db_table),\n arg_5=','.join(arg_5)\n )\n arg_0.execute(arg_6)"} +{"_id": "doc_3590", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5):\n \"\"\"Renames an existing UNIQUE constraint for the specified\n hstore keys.\"\"\"\n\n arg_6 = arg_0._unique_constraint_name(\n arg_1, arg_3, arg_5)\n arg_7 = arg_0._unique_constraint_name(\n arg_2, arg_4, arg_5)\n\n arg_8 = arg_0.sql_hstore_unique_rename.format(\n arg_6=arg_0.quote_name(arg_6),\n arg_7=arg_0.quote_name(arg_7)\n )\n arg_0.execute(arg_8)"} +{"_id": "doc_3591", "title": "", "text": "def Func(arg_0: arg_1, arg_2, arg_3):\n \"\"\"Gets the name for a UNIQUE INDEX that applies\n to one or more keys in a hstore field.\n\n Arguments:\n table:\n The name of the table the field is\n a part of.\n\n field:\n The hstore field to create a\n UNIQUE INDEX for.\n\n key:\n The name of the hstore key\n to create the name for.\n\n This can also be a tuple\n of multiple names.\n\n Returns:\n The name for the UNIQUE index.\n \"\"\"\n arg_4 = '_'.join(arg_3)\n return '{table}_{field}_unique_{postfix}'.format(\n arg_0=arg_0,\n arg_2=arg_2.column,\n arg_4=arg_4\n )"} +{"_id": "doc_3592", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Iterates over the keys marked as \"unique\"\n in the specified field.\n\n Arguments:\n field:\n The field of which key's to\n iterate over.\n \"\"\"\n\n arg_2 = getattr(arg_1, 'uniqueness', None)\n if not arg_2:\n return\n\n for arg_3 in arg_2:\n arg_4 = arg_0._compose_keys(arg_3)\n yield arg_4"} +{"_id": "doc_3593", "title": "", "text": "def Func(arg_0, arg_1, arg_2: arg_3) -> None:\n \"\"\"Adds an extra condition to this join.\n\n Arguments:\n field:\n The field that the condition will apply to.\n\n value:\n The value to compare.\n \"\"\"\n\n arg_0.extra_conditions.append((arg_1, arg_2))"} +{"_id": "doc_3594", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> Tuple[str, List[Any]]:\n \"\"\"Compiles this JOIN into a SQL string.\"\"\"\n\n arg_3, arg_4 = super().Func(arg_1, arg_2)\n arg_5 = arg_1.quote_name_unless_alias\n\n # generate the extra conditions\n arg_6 = ' AND '.join([\n '{}.{} = %s'.format(\n arg_5(arg_0.table_name),\n arg_5(field.column)\n )\n for field, arg_8 in arg_0.extra_conditions\n ])\n\n # add to the existing params, so the connector will\n # actually nicely format the value for us\n for arg_7, arg_8 in arg_0.extra_conditions:\n arg_4.append(arg_8)\n\n # rewrite the sql to include the extra conditions\n arg_9 = arg_3.replace(')', ' AND {})'.format(arg_6))\n return arg_9, arg_4"} +{"_id": "doc_3595", "title": "", "text": "def Func(arg_0):\n \"\"\"Approximate the 95% confidence interval for Student's T distribution.\n\n Given the degrees of freedom, returns an approximation to the 95%\n confidence interval for the Student's T distribution.\n\n Args:\n df: An integer, the number of degrees of freedom.\n\n Returns:\n A float.\n \"\"\"\n arg_0 = int(round(arg_0))\n arg_1 = len(_T_DIST_95_CONF_LEVELS)\n if arg_0 >= 200:\n return 1.960\n if arg_0 >= 100:\n return 1.984\n if arg_0 >= 80:\n return 1.990\n if arg_0 >= 60:\n return 2.000\n if arg_0 >= 50:\n return 2.009\n if arg_0 >= 40:\n return 2.021\n if arg_0 >= arg_1:\n return _T_DIST_95_CONF_LEVELS[arg_1 - 1]\n return _T_DIST_95_CONF_LEVELS[arg_0]"} +{"_id": "doc_3596", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find the pooled sample variance for two samples.\n\n Args:\n sample1: one sample.\n sample2: the other sample.\n\n Returns:\n Pooled sample variance, as a float.\n \"\"\"\n arg_2 = len(arg_0) + len(arg_1) - 2\n arg_3 = statistics.mean(arg_0)\n arg_4 = ((x - arg_3) ** 2 for x in arg_0)\n arg_5 = statistics.mean(arg_1)\n arg_6 = ((x - arg_5) ** 2 for x in arg_1)\n\n return (math.fsum(arg_4) + math.fsum(arg_6)) / float(arg_2)"} +{"_id": "doc_3597", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determine whether two samples differ significantly.\n\n This uses a Student's two-sample, two-tailed t-test with alpha=0.95.\n\n Args:\n sample1: one sample.\n sample2: the other sample.\n\n Returns:\n (significant, t_score) where significant is a bool indicating whether\n the two samples differ significantly; t_score is the score from the\n two-sample T test.\n \"\"\"\n arg_2 = len(arg_0) + len(arg_1) - 2\n arg_3 = tdist95conf_level(arg_2)\n arg_4 = tscore(arg_0, arg_1)\n return (abs(arg_4) >= arg_3, arg_4)"} +{"_id": "doc_3598", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a topological sorting of nodes in a graph.\n\n roots - list of root nodes to search from\n getParents - function which returns the parents of a given node\n \"\"\"\n\n arg_2 = []\n arg_3 = set()\n\n # Use iterative version to avoid stack limits for large datasets\n arg_4 = [(node, 0) for node in arg_0]\n while arg_4:\n arg_5, arg_6 = arg_4.pop()\n if arg_6 == 0:\n # before recursing\n if arg_5 not in arg_3:\n arg_3.add(arg_5)\n arg_4.append((arg_5, 1))\n arg_4.extend((arg_7, 0) for arg_7 in arg_1(arg_5))\n else:\n # after recursing\n assert(arg_5 in arg_3)\n arg_2.append(arg_5)\n return arg_2"} +{"_id": "doc_3599", "title": "", "text": "def Func(arg_0):\n \"\"\"N-Queens solver.\n\n Args:\n queen_count: the number of queens to solve for. This is also the\n board size.\n\n Yields:\n Solutions to the problem. Each yielded value is looks like\n (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the\n queen, and the index into the tuple indicates the row.\n \"\"\"\n arg_1 = range(arg_0)\n for arg_2 in permutations(arg_1):\n if (arg_0 == len(set(arg_2[arg_3] + arg_3 for arg_3 in arg_1))\n == len(set(arg_2[arg_3] - arg_3 for arg_3 in arg_1))):\n yield arg_2"} +{"_id": "doc_3600", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" uct tree search \"\"\"\n arg_2 = arg_1.color\n arg_3 = arg_0\n arg_4 = [arg_3]\n while True:\n arg_5 = arg_3.select(arg_1)\n if arg_5 == PASS:\n break\n arg_1.move(arg_5)\n arg_6 = arg_3.pos_child[arg_5]\n if not arg_6:\n arg_6 = arg_3.pos_child[arg_5] = UCTNode()\n arg_6.unexplored = arg_1.useful_moves()\n arg_6.pos = arg_5\n arg_6.parent = arg_3\n arg_4.append(arg_6)\n break\n arg_4.append(arg_6)\n arg_3 = arg_6\n arg_0.random_Funcout(arg_1)\n arg_0.update_path(arg_1, arg_2, arg_4)"} +{"_id": "doc_3601", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" random play until both players pass \"\"\"\n for arg_2 in range(MAXMOVES): # XXX while not self.finished?\n if arg_1.finished:\n break\n arg_1.move(arg_1.random_move())"} +{"_id": "doc_3602", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Filters out benchmarks not supported by both Pythons.\n\n Args:\n benchmarks: a set() of benchmark names\n bench_funcs: dict mapping benchmark names to functions\n python: the interpereter commands (as lists)\n\n Returns:\n The filtered set of benchmark names\n \"\"\"\n for arg_3 in list(arg_0):\n arg_4 = arg_1[arg_3]\n if getattr(arg_4, '_python2_only', False) and (3, 0) <= arg_2:\n arg_0.discard(arg_3)\n logging.info(\"Skipping Python2-only benchmark %s; \"\n \"not compatible with Python %s\" % (arg_3, arg_2))\n continue\n return arg_0"} +{"_id": "doc_3603", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Recursively expand name benchmark names.\n\n Args:\n bm_name: string naming a benchmark or benchmark group.\n\n Yields:\n Names of actual benchmarks, with all group names fully expanded.\n \"\"\"\n arg_2 = arg_1.get(arg_0)\n if arg_2:\n for arg_3 in arg_2:\n for arg_3 in Func(arg_3, arg_1):\n yield arg_3\n else:\n yield arg_0"} +{"_id": "doc_3604", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Initialize the strings we'll run the regexes against.\n\n The strings used in the benchmark are prefixed and suffixed by\n strings that are repeated n times.\n\n The sequence n_values contains the values for n.\n If n_values is None the values of n from the original benchmark\n are used.\n\n The generated list of strings is cached in the string_tables\n variable, which is indexed by n.\n\n Returns:\n A list of string prefix/suffix lengths.\n \"\"\"\n\n if arg_0 is None:\n arg_0 = (0, 5, 50, 250, 1000, 5000, 10000)\n\n arg_1 = {arg_4: gen_string_table(arg_4) for arg_4 in arg_0}\n arg_2 = gen_regex_table()\n\n arg_3 = []\n for arg_4 in arg_0:\n for arg_5 in xrange(len(arg_2)):\n arg_6 = arg_2[arg_5]\n arg_7 = arg_1[arg_4][arg_5]\n arg_3.append((arg_6, arg_7))\n return arg_3"} +{"_id": "doc_3605", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the domain of the B-Spline\"\"\"\n return (arg_0.knots[arg_0.degree - 1],\n arg_0.knots[len(arg_0.knots) - arg_0.degree])"} +{"_id": "doc_3606", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the messages.\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['from_date']\n\n logger.info(\"Fetching messages of '%s' - '%s' channel from %s\",\n arg_0.url, arg_0.channel, str(arg_3))\n\n arg_4 = True\n arg_5 = 0\n arg_6 = 0\n\n # Convert timestamp to integer for comparing\n arg_7 = int(arg_3.timestamp() * 1000)\n\n while arg_4:\n arg_8 = arg_0.client.posts(arg_0.channel, arg_5=arg_5)\n\n arg_9 = arg_6\n\n for arg_10 in arg_0._parse_posts(arg_8):\n if arg_10['update_at'] < arg_7:\n arg_4 = False\n break\n\n # Fetch user data\n arg_11 = arg_10['user_id']\n arg_12 = arg_0._get_or_fetch_user(arg_11)\n arg_10['user_data'] = arg_12\n\n yield arg_10\n arg_6 += 1\n\n if arg_4:\n # If no new posts were fetched; stop the process\n if arg_9 == arg_6:\n arg_4 = False\n else:\n arg_5 += 1\n\n logger.info(\"Fetch process completed: %s posts fetched\", arg_6)"} +{"_id": "doc_3607", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Fetch the entries from the url.\n\n The method retrieves all entries from a RSS url\n\n :param category: the category of items to Func\n\n :returns: a generator of entries\n \"\"\"\n arg_3 = {}\n arg_4 = super().Func(arg_1, **arg_3)\n\n return arg_4"} +{"_id": "doc_3608", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the entries\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n logger.info(\"Looking for rss entries at feed '%s'\", arg_0.url)\n\n arg_3 = 0 # number of entries\n\n arg_4 = arg_0.client.get_entries()\n arg_5 = arg_0.parse_feed(arg_4)['entries']\n for arg_6 in arg_5:\n yield arg_6\n arg_3 += 1\n\n logger.info(\"Total number of entries: %i\", arg_3)"} +{"_id": "doc_3609", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the RSS argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n archive=True)\n\n # Required arguments\n arg_1.parser.add_argument('url',\n help=\"URL of the RSS feed\")\n\n return arg_1"} +{"_id": "doc_3610", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Fetch the bugs from the repository.\n\n The method retrieves, from a Bugzilla repository, the bugs\n updated since the given date.\n\n :param category: the category of items to Func\n :param from_date: obtain bugs updated since this date\n\n :returns: a generator of bugs\n \"\"\"\n if not arg_3:\n arg_3 = arg_4\n\n arg_5 = {'from_date': arg_3}\n arg_6 = super().Func(arg_1, **arg_5)\n\n return arg_6"} +{"_id": "doc_3611", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get issue notes\"\"\"\n\n arg_2 = []\n\n arg_3 = arg_0.client.notes(GitLabClient.ISSUES, arg_1)\n\n for arg_4 in arg_3:\n\n for arg_5 in json.loads(arg_4):\n arg_6 = arg_5['id']\n arg_5['award_emoji_data'] = \\\n arg_0.__get_note_award_emoji(GitLabClient.ISSUES, arg_1, arg_6)\n arg_2.append(arg_5)\n\n return arg_2"} +{"_id": "doc_3612", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get merge versions\"\"\"\n\n arg_2 = []\n\n arg_3 = arg_0.client.merge_versions(arg_1)\n\n for arg_4 in arg_3:\n for arg_5 in json.loads(arg_4):\n arg_6 = arg_5['id']\n arg_7 = arg_0.client.merge_version(arg_1, arg_6)\n arg_8 = json.loads(arg_7)\n\n arg_8.pop('diffs', None)\n arg_2.append(arg_8)\n\n return arg_2"} +{"_id": "doc_3613", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get the merge requests from pagination\"\"\"\n\n arg_2 = {\n 'state': 'all',\n 'order_by': 'updated_at',\n 'sort': 'asc',\n 'view': 'simple',\n 'per_page': PER_PAGE\n }\n\n if arg_1:\n arg_2['updated_after'] = arg_1.isoformat()\n\n return arg_0.fetch_items(GitLabClient.MERGES, arg_2)"} +{"_id": "doc_3614", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the merge versions from pagination\"\"\"\n\n arg_2 = {\n 'order_by': 'updated_at',\n 'sort': 'asc',\n 'per_page': PER_PAGE\n }\n\n arg_3 = urijoin(GitLabClient.MERGES, str(arg_1), GitLabClient.VERSIONS)\n return arg_0.fetch_items(arg_3, arg_2)"} +{"_id": "doc_3615", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get the Func from pagination\"\"\"\n\n arg_3 = {\n 'order_by': 'updated_at',\n 'sort': 'asc',\n 'per_page': PER_PAGE\n }\n\n arg_4 = urijoin(arg_1, str(arg_2), GitLabClient.NOTES)\n\n return arg_0.fetch_items(arg_4, arg_3)"} +{"_id": "doc_3616", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Get emojis of a note\"\"\"\n\n arg_4 = {\n 'order_by': 'updated_at',\n 'sort': 'asc',\n 'per_page': PER_PAGE\n }\n\n arg_5 = urijoin(arg_1, str(arg_2), GitLabClient.NOTES,\n str(arg_3), GitLabClient.EMOJI)\n\n return arg_0.fetch_items(arg_5, arg_4)"} +{"_id": "doc_3617", "title": "", "text": "def Func(arg_0):\n \"\"\"Initialize rate limit information\"\"\"\n\n arg_1 = urijoin(arg_0.base_url, 'projects', arg_0.owner + '%2F' + arg_0.repository)\n try:\n arg_2 = super().fetch(arg_1)\n arg_0.update_rate_limit(arg_2)\n except requests.exceptions.HTTPError as error:\n if error.response.status_code == 401:\n raise error\n else:\n logger.warning(\"Rate limit not initialized: %s\", error)"} +{"_id": "doc_3618", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the GitLab argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n from_date=True,\n token_auth=True,\n archive=True)\n\n # GitLab options\n arg_2 = arg_1.parser.add_argument_group('GitLab arguments')\n arg_2.add_argument('--enterprise-url', dest='base_url',\n help=\"Base URL for GitLab Enterprise instance\")\n arg_2.add_argument('--sleep-for-rate', dest='sleep_for_rate',\n action='store_true',\n help=\"sleep for getting more rate\")\n arg_2.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep',\n default=MIN_RATE_LIMIT, type=int,\n help=\"sleep until reset when the rate limit \\\n reaches this value\")\n arg_2.add_argument('--blacklist-ids', dest='blacklist_ids',\n nargs='*', type=int,\n help=\"Ids of items that must not be retrieved.\")\n\n # Generic client options\n arg_2.add_argument('--max-retries', dest='max_retries',\n default=MAX_RETRIES, type=int,\n help=\"number of API call retries\")\n arg_2.add_argument('--sleep-time', dest='sleep_time',\n default=DEFAULT_SLEEP_TIME, type=int,\n help=\"sleeping time between API call retries\")\n\n # Positional arguments\n arg_1.parser.add_argument('owner',\n help=\"GitLab owner\")\n arg_1.parser.add_argument('repository',\n help=\"GitLab repository\")\n\n return arg_1"} +{"_id": "doc_3619", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Fetch the messages from the channel.\n\n This method Funces the messages stored on the channel that were\n sent since the given date.\n\n :param category: the category of items to Func\n :param from_date: obtain messages sent since this date\n\n :returns: a generator of messages\n \"\"\"\n if not arg_3:\n arg_3 = arg_4\n\n arg_3 = datetime_to_utc(arg_3)\n arg_5 = datetime_utcnow().timestamp()\n\n arg_6 = {'from_date': arg_3, 'latest': arg_5}\n arg_7 = super().Func(arg_1, **arg_6)\n\n return arg_7"} +{"_id": "doc_3620", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch the number of members in a conversation, which is a supertype for public and\n private ones, DM and group DM.\n\n :param conversation: the ID of the conversation\n \"\"\"\n arg_2 = 0\n\n arg_3 = arg_0.RCONVERSATION_INFO\n\n arg_4 = {\n arg_0.PCHANNEL: arg_1,\n }\n\n arg_5 = arg_0._fetch(arg_3, arg_4)\n arg_6 = json.loads(arg_5)\n\n arg_2 += len(arg_6[\"members\"])\n while 'next_cursor' in arg_6['response_metadata'] and arg_6['response_metadata']['next_cursor']:\n arg_4['cursor'] = arg_6['response_metadata']['next_cursor']\n arg_5 = arg_0._fetch(arg_3, arg_4)\n arg_6 = json.loads(arg_5)\n arg_2 += len(arg_6[\"members\"])\n\n return arg_2"} +{"_id": "doc_3621", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch Func info.\"\"\"\n\n arg_2 = arg_0.RUSER_INFO\n\n arg_3 = {\n arg_0.PUSER: arg_1\n }\n\n arg_4 = arg_0._fetch(arg_2, arg_3)\n\n return arg_4"} +{"_id": "doc_3622", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the Slack argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n from_date=True,\n token_auth=True,\n archive=True)\n\n # Backend token is required\n arg_2 = arg_1.parser._option_string_actions['--api-token']\n arg_2.required = True\n\n # Slack options\n arg_4 = arg_1.parser.add_argument_group('Slack arguments')\n arg_4.add_argument('--max-items', dest='max_items',\n type=int, default=MAX_ITEMS,\n help=\"Maximum number of items requested on the same query\")\n\n # Required arguments\n arg_1.parser.add_argument('channel',\n help=\"Slack channel identifier\")\n\n return arg_1"} +{"_id": "doc_3623", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts and coverts the update time from a Bugzilla item.\n\n The timestamp is extracted from 'delta_ts' field. This date is\n converted to UNIX timestamp format. Due Bugzilla servers ignore\n the timezone on HTTP requests, it will be ignored during the\n conversion, too.\n\n :param item: item generated by the backend\n\n :returns: a UNIX timestamp\n \"\"\"\n arg_1 = arg_0['delta_ts'][0]['__text__']\n arg_1 = str_to_datetime(arg_1)\n arg_1 = arg_1.replace(tzinfo=dateutil.tz.tzutc())\n\n return arg_1.timestamp()"} +{"_id": "doc_3624", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Bugilla bugs details XML stream.\n\n This method returns a generator which parses the given XML,\n producing an iterator of dictionaries. Each dictionary stores\n the information related to a parsed bug.\n\n If the given XML is invalid or does not contains any bug, the\n method will raise a ParseError exception.\n\n :param raw_xml: XML string to parse\n\n :returns: a generator of parsed bugs\n\n :raises ParseError: raised when an error occurs parsing\n the given XML stream\n \"\"\"\n arg_1 = xml_to_dict(arg_0)\n\n if 'bug' not in arg_1:\n arg_2 = \"No bugs found. XML stream seems to be invalid.\"\n raise ParseError(arg_2=arg_2)\n\n for arg_3 in arg_1['bug']:\n yield arg_3"} +{"_id": "doc_3625", "title": "", "text": "def Func(arg_0):\n \"\"\"Logout from the server.\"\"\"\n\n arg_1 = {\n arg_0.PLOGOUT: '1'\n }\n\n arg_0.call(arg_0.CGI_LOGIN, arg_1)\n arg_0._close_http_session()\n\n logger.debug(\"Bugzilla user logged out from %s\",\n arg_0.base_url)"} +{"_id": "doc_3626", "title": "", "text": "def Func(arg_0):\n \"\"\"Get Func information in XML format.\"\"\"\n\n arg_1 = {\n arg_0.PCTYPE: arg_0.CTYPE_XML\n }\n\n arg_2 = arg_0.call(arg_0.CGI_BUG, arg_1)\n\n return arg_2"} +{"_id": "doc_3627", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Get a summary of bugs in CSV format.\n\n :param from_date: retrieve bugs that where updated from that date\n \"\"\"\n if not arg_0.version:\n arg_0.version = arg_0.__fetch_version()\n\n if arg_0.version in arg_0.OLD_STYLE_VERSIONS:\n arg_4 = 'Last+Changed'\n else:\n arg_4 = 'changeddate'\n\n arg_5 = arg_1.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n arg_6 = {\n arg_0.PCHFIELD_FROM: arg_5,\n arg_0.PCTYPE: arg_0.CTYPE_CSV,\n arg_0.PLIMIT: arg_0.max_bugs_csv,\n arg_0.PORDER: arg_4\n }\n\n arg_7 = arg_0.call(arg_0.CGI_BUGLIST, arg_6)\n\n return arg_7"} +{"_id": "doc_3628", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Get the information of a list of Func in XML format.\n\n :param bug_ids: list of bug identifiers\n \"\"\"\n arg_2 = {\n arg_0.PBUG_ID: arg_1,\n arg_0.PCTYPE: arg_0.CTYPE_XML,\n arg_0.PEXCLUDE_FIELD: 'attachmentdata'\n }\n\n arg_3 = arg_0.call(arg_0.CGI_BUG, arg_2)\n\n return arg_3"} +{"_id": "doc_3629", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the activity of a bug in HTML format.\n\n :param bug_id: bug identifier\n \"\"\"\n arg_2 = {\n arg_0.PBUG_ID: arg_1\n }\n\n arg_3 = arg_0.call(arg_0.CGI_BUG_ACTIVITY, arg_2)\n\n return arg_3"} +{"_id": "doc_3630", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4, arg_5=None,\n arg_6=False):\n \"\"\"Fetch the events from the server.\n\n This method Funces those events of a group stored on the server\n that were updated since the given date. Data comments and rsvps\n are included within each event.\n\n :param category: the category of items to Func\n :param from_date: obtain events updated since this date\n :param to_date: obtain events updated before this date\n :param filter_classified: remove classified fields from the resulting items\n\n :returns: a generator of events\n \"\"\"\n if not arg_3:\n arg_3 = arg_4\n\n arg_3 = datetime_to_utc(arg_3)\n\n arg_7 = {\"from_date\": arg_3, \"to_date\": arg_5}\n arg_8 = super().Func(arg_1,\n arg_6=arg_6,\n **arg_7)\n\n return arg_8"} +{"_id": "doc_3631", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the events\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['from_date']\n arg_4 = arg_2['to_date']\n\n logger.info(\"Fetching events of '%s' group from %s to %s\",\n arg_0.group, str(arg_3),\n str(arg_4) if arg_4 else '--')\n\n arg_5 = datetime_to_utc(arg_4).timestamp() if arg_4 else None\n\n arg_6 = 0\n arg_7 = False\n\n arg_8 = arg_0.client.events(arg_0.group, arg_3=arg_3)\n\n for arg_9 in arg_8:\n arg_10 = [arg_11 for arg_11 in arg_0.parse_json(arg_9)]\n\n for arg_11 in arg_10:\n arg_12 = arg_11['id']\n\n arg_11['comments'] = arg_0.__fetch_and_parse_comments(arg_12)\n arg_11['rsvps'] = arg_0.__fetch_and_parse_rsvps(arg_12)\n\n # Check events updated before 'to_date'\n arg_13 = arg_0.metadata_updated_on(arg_11)\n\n if arg_5 and arg_13 >= arg_5:\n arg_7 = True\n continue\n\n yield arg_11\n arg_6 += 1\n\n if arg_7:\n break\n\n logger.info(\"Fetch process completed: %s events fetched\", arg_6)"} +{"_id": "doc_3632", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch the Func of a given event.\"\"\"\n\n arg_3 = urijoin(arg_1, arg_0.REVENTS, arg_2, arg_0.RRSVPS)\n\n # Same hack that in 'events' method\n arg_4 = '?' + arg_0.PFIELDS + '=' + ','.join(arg_0.VRSVP_FIELDS)\n arg_4 += '&' + arg_0.PRESPONSE + '=' + ','.join(arg_0.VRESPONSE)\n arg_3 += arg_4\n\n arg_5 = {\n arg_0.PPAGE: arg_0.max_items\n }\n\n for arg_6 in arg_0._fetch(arg_3, arg_5):\n yield arg_6"} +{"_id": "doc_3633", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch an Askbot HTML question body.\n\n The method fetchs the HTML question retrieving the\n question body of the item question received\n\n :param question: item with the question itself\n\n :returns: a list of HTML page/s for the question\n \"\"\"\n arg_2 = []\n\n arg_3 = 1\n arg_4 = True\n\n while arg_4:\n try:\n arg_5 = arg_0.client.get_html_question(arg_1['id'], arg_3)\n arg_2.append(arg_5)\n arg_6 = arg_0.ab_parser.parse_number_of_html_pages(arg_5)\n\n if arg_3 == arg_6:\n arg_4 = False\n\n arg_3 = arg_3 + 1\n except requests.exceptions.TooManyRedirects as e:\n logger.warning(\"%s, data not retrieved for question %s\", e, arg_1['id'])\n arg_4 = False\n\n return arg_2"} +{"_id": "doc_3634", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch all the comments of an Askbot question and answers.\n\n The method fetchs the list of every comment existing in a question and\n its answers.\n\n :param question: item with the question itself\n\n :returns: a list of comments with the ids as hashes\n \"\"\"\n arg_2 = {}\n arg_2[arg_1['id']] = json.loads(arg_0.client.get_comments(arg_1['id']))\n for arg_3 in arg_1['answer_ids']:\n arg_2[arg_3] = json.loads(arg_0.client.get_comments(arg_3))\n return arg_2"} +{"_id": "doc_3635", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Build an Askbot HTML response.\n\n The method puts together all the information regarding a question\n\n :param html_question: array of HTML raw pages\n :param question: question object from the API\n :param comments: list of comments to add\n\n :returns: a dict item with the parsed question information\n \"\"\"\n arg_3 = {}\n # Parse the user info from the soup container\n arg_4 = AskbotParser.parse_question_container(arg_0[0])\n # Add the info to the question object\n arg_3.update(arg_4)\n # Add the comments of the question (if any)\n if arg_2[int(arg_1['id'])]:\n arg_3['comments'] = arg_2[int(arg_1['id'])]\n\n arg_5 = []\n\n for arg_6 in arg_0:\n arg_5.extend(AskbotParser.parse_answers(arg_6))\n\n if len(arg_5) != 0:\n arg_3['answers'] = arg_5\n for arg_7 in arg_3['answers']:\n if arg_2[int(arg_7['id'])]:\n arg_7['comments'] = arg_2[int(arg_7['id'])]\n\n return arg_3"} +{"_id": "doc_3636", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve a question page using the API.\n\n :param page: page to retrieve\n \"\"\"\n arg_2 = 1\n arg_3 = True\n\n arg_1 = urijoin(arg_0.base_url, arg_1)\n while arg_3:\n\n try:\n arg_4 = {\n 'page': arg_2,\n 'sort': arg_0.ORDER_API\n }\n\n arg_5 = arg_0.fetch(arg_1, payload=arg_4)\n\n arg_6 = arg_5.text\n\n arg_7 = json.loads(arg_6)\n arg_8 = arg_7['pages']\n\n logger.debug(\"Fetching questions from '%s': page %s/%s\",\n arg_0.base_url, arg_2, arg_8)\n\n if arg_2 == arg_8:\n arg_3 = False\n\n arg_2 = arg_2 + 1\n yield arg_7\n\n except requests.exceptions.TooManyRedirects as e:\n logger.warning(\"%s, data not retrieved for resource %s\", e, arg_1)\n arg_3 = False"} +{"_id": "doc_3637", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"Retrieve a raw HTML question and all it's information.\n\n :param question_id: question identifier\n :param page: page to retrieve\n \"\"\"\n arg_3 = urijoin(arg_0.base_url, arg_0.HTML_QUESTION, arg_1)\n arg_4 = {\n 'page': arg_2,\n 'sort': arg_0.ORDER_HTML\n }\n\n arg_5 = arg_0.fetch(arg_3, payload=arg_4)\n return arg_5.text"} +{"_id": "doc_3638", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve a list of comments by a given id.\n\n :param object_id: object identifiere\n \"\"\"\n arg_2 = urijoin(arg_0.base_url, arg_0.COMMENTS if arg_0._use_new_urls else arg_0.COMMENTS_OLD)\n arg_3 = {\n 'post_id': arg_1,\n 'post_type': 'answer',\n 'avatar_size': 0\n }\n arg_4 = {'X-Requested-With': 'XMLHttpRequest'}\n\n try:\n arg_5 = arg_0.fetch(arg_2, payload=arg_3, arg_4=arg_4)\n arg_6 = arg_5.text\n except requests.exceptions.HTTPError as ex:\n if ex.response.status_code == 404:\n logger.debug(\"Comments URL did not work. Using old URL schema.\")\n arg_0._use_new_urls = False\n arg_2 = urijoin(arg_0.base_url, arg_0.COMMENTS_OLD)\n arg_5 = arg_0.fetch(arg_2, payload=arg_3, arg_4=arg_4)\n arg_6 = arg_5.text\n elif ex.response.status_code == 500:\n logger.warning(\"Comments not retrieved due to %s\", ex)\n arg_6 = '[]'\n else:\n raise ex\n\n return arg_6"} +{"_id": "doc_3639", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the question info container of a given HTML question.\n\n The method parses the information available in the question information\n container. The container can have up to 2 elements: the first one\n contains the information related with the user who generated the question\n and the date (if any). The second one contains the date of the updated,\n and the user who updated it (if not the same who generated the question).\n\n :param html_question: raw HTML question element\n\n :returns: an object with the parsed information\n \"\"\"\n arg_1 = {}\n arg_2 = bs4.BeautifulSoup(arg_0, \"html.parser\")\n arg_3 = AskbotParser._find_question_container(arg_2)\n arg_4 = arg_3.select(\"div.post-update-info\")\n arg_5 = arg_4[0]\n arg_1['author'] = AskbotParser.parse_user_info(arg_5)\n try:\n arg_4[1]\n except IndexError:\n pass\n else:\n arg_6 = arg_4[1]\n if AskbotParser.parse_user_info(arg_6):\n arg_1['updated_by'] = AskbotParser.parse_user_info(arg_6)\n\n return arg_1"} +{"_id": "doc_3640", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the answers of a given HTML question.\n\n The method parses the answers related with a given HTML question,\n as well as all the comments related to the answer.\n\n :param html_question: raw HTML question element\n\n :returns: a list with the answers\n \"\"\"\n\n def parse_answer_container(arg_1):\n \"\"\"Parse the answer info container of a given HTML question.\n\n The method parses the information available in the answer information\n container. The container can have up to 2 elements: the first one\n contains the information related with the user who generated the question\n and the date (if any). The second one contains the date of the updated,\n and the user who updated it (if not the same who generated the question).\n\n :param update_info: beautiful soup update_info container element\n\n :returns: an object with the parsed information\n \"\"\"\n arg_2 = {}\n arg_3 = arg_1[0]\n arg_4 = arg_3.abbr.attrs[\"title\"]\n # Convert date to UNIX timestamp\n arg_2['added_at'] = str(str_to_datetime(arg_4).timestamp())\n arg_2['answered_by'] = AskbotParser.parse_user_info(arg_3)\n try:\n arg_1[1]\n except IndexError:\n pass\n else:\n arg_5 = arg_1[1]\n arg_6 = arg_5.abbr.attrs[\"title\"]\n # Convert date to UNIX timestamp\n arg_2['updated_at'] = str(str_to_datetime(arg_6).timestamp())\n if AskbotParser.parse_user_info(arg_5):\n arg_2['updated_by'] = AskbotParser.parse_user_info(arg_5)\n return arg_2\n\n arg_7 = []\n # Select all the answers\n arg_8 = bs4.BeautifulSoup(arg_0, \"html.parser\")\n arg_9 = arg_8.select(\"div.answer\")\n for arg_10 in arg_9:\n arg_11 = arg_10.attrs[\"data-post-id\"]\n arg_12 = arg_10.select(\"div.vote-number\")[0].text\n arg_13 = arg_10.select(\"div.answer-img-accept\")[0].get('title').endswith(\"correct\")\n # Select the body of the answer\n arg_14 = arg_10.select(\"div.post-body\")\n # Get the user information container and parse it\n arg_1 = arg_14[0].select(\"div.post-update-info\")\n arg_15 = parse_answer_container(arg_1)\n # Remove the update-info-container div to be able to get the body\n arg_14[0].div.extract().select(\"div.post-update-info-container\")\n # Override the body with a clean one\n arg_14 = arg_14[0].get_text(strip=True)\n # Generate the answer object\n arg_16 = {'id': arg_11,\n 'score': arg_12,\n 'summary': arg_14,\n 'accepted': arg_13\n }\n # Update the object with the information in the answer container\n arg_16.update(arg_15)\n arg_7.append(arg_16)\n return arg_7"} +{"_id": "doc_3641", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse number of answer pages to paginate over them.\n\n :param html_question: raw HTML question element\n\n :returns: an integer with the number of pages\n \"\"\"\n arg_1 = bs4.BeautifulSoup(arg_0, \"html.parser\")\n try:\n arg_1.select('div.paginator')[0]\n except IndexError:\n return 1\n else:\n return int(arg_1.select('div.paginator')[0].attrs['data-num-pages'])"} +{"_id": "doc_3642", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the user information of a given HTML container.\n\n The method parses all the available user information in the container.\n If the class \"user-info\" exists, the method will get all the available\n information in the container. If not, if a class \"tip\" exists, it will be\n a wiki post with no user associated. Else, it can be an empty container.\n\n :param update_info: beautiful soup answer container element\n\n :returns: an object with the parsed information\n \"\"\"\n arg_1 = {}\n if arg_0.select(\"div.user-info\"):\n # Get all the elements in the container. First contains the user\n # information, second one (if exists), the website of the user.\n arg_2 = arg_0.select(\"div.user-info\")[0].find_all(\"a\")\n arg_3 = arg_2[0].attrs[\"href\"]\n arg_1['id'] = re.search(r'\\d+', arg_3).group(0)\n arg_1['username'] = arg_2[0].text\n arg_1['reputation'] = arg_0.select('span.reputation-score')[0].text\n arg_1['badges'] = arg_0.select(\"span.badges\")[0].attrs[\"title\"]\n try:\n arg_2[1]\n except IndexError:\n pass\n else:\n arg_1['website'] = arg_2[1].attrs[\"href\"]\n if arg_0.select(\"img.flag\"):\n arg_4 = arg_0.select(\"img.flag\")[0].attrs[\"alt\"]\n arg_1['country'] = re.sub(\"flag of \", \"\", arg_4)\n\n return arg_1"} +{"_id": "doc_3643", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\" Specific fetch for gerrit 2.8 version.\n\n Get open and closed reviews in different queries.\n Take the newer review from both lists and iterate.\n \"\"\"\n\n # Convert date to Unix time\n arg_3 = datetime_to_utc(arg_1)\n arg_3 = arg_3.timestamp()\n\n arg_4 = \"status:open\"\n arg_5 = \"status:closed\"\n\n arg_6 = arg_0.client.next_retrieve_group_item()\n arg_7 = arg_0.client.next_retrieve_group_item()\n arg_8 = arg_0._get_reviews(arg_6, arg_4)\n arg_9 = arg_0._get_reviews(arg_7, arg_5)\n arg_10 = len(arg_8)\n arg_11 = len(arg_9)\n\n while arg_8 or arg_9:\n if arg_8 and arg_9:\n if arg_8[0]['lastUpdated'] >= arg_9[0]['lastUpdated']:\n arg_12 = arg_8.pop(0)\n arg_13 = arg_12\n else:\n arg_14 = arg_9.pop(0)\n arg_13 = arg_14\n elif arg_9:\n arg_14 = arg_9.pop(0)\n arg_13 = arg_14\n else:\n arg_12 = arg_8.pop(0)\n arg_13 = arg_12\n\n arg_15 = arg_13['lastUpdated']\n if arg_15 <= arg_3:\n logger.debug(\"No more updates for %s\" % (arg_0.hostname))\n break\n else:\n yield arg_13\n\n if not arg_8 and arg_10 >= arg_0.max_reviews:\n arg_6 = arg_0.client.next_retrieve_group_item(arg_6, arg_12)\n arg_8 = arg_0._get_reviews(arg_6, arg_4)\n arg_10 = len(arg_8)\n if not arg_9 and arg_11 >= arg_0.max_reviews:\n arg_7 = arg_0.client.next_retrieve_group_item(arg_7, arg_14)\n arg_9 = arg_0._get_reviews(arg_7, arg_5)\n arg_11 = len(arg_9)"} +{"_id": "doc_3644", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the Gerrit server Func.\"\"\"\n\n if arg_0._Func:\n return arg_0._Func\n\n arg_1 = arg_0.gerrit_cmd + \" %s \" % (GerritClient.CMD_VERSION)\n\n logger.debug(\"Getting Func: %s\" % (arg_1))\n arg_2 = arg_0.__execute(arg_1)\n arg_2 = str(arg_2, \"UTF-8\")\n logger.debug(\"Gerrit Func: %s\" % (arg_2))\n\n # output: gerrit Func 2.10-rc1-988-g333a9dd\n arg_3 = re.match(GerritClient.VERSION_REGEX, arg_2)\n\n if not arg_3:\n arg_4 = \"Invalid gerrit Func %s\" % arg_2\n raise BackendError(arg_4=arg_4)\n\n try:\n arg_5 = int(arg_3.group(1))\n arg_6 = int(arg_3.group(2))\n except Exception:\n arg_4 = \"Gerrit client could not determine the server Func.\"\n raise BackendError(arg_4=arg_4)\n\n arg_0._Func = [arg_5, arg_6]\n return arg_0._Func"} +{"_id": "doc_3645", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Get the Func starting from last_item.\"\"\"\n\n arg_3 = arg_0._get_gerrit_cmd(arg_1, arg_2)\n\n logger.debug(\"Getting Func with command: %s\", arg_3)\n arg_4 = arg_0.__execute(arg_3)\n arg_4 = str(arg_4, \"UTF-8\")\n\n return arg_4"} +{"_id": "doc_3646", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute gerrit command against the archive\"\"\"\n\n arg_1 = arg_0.sanitize_for_archive(arg_1)\n arg_2 = arg_0.archive.retrieve(arg_1, None, None)\n\n if isinstance(arg_2, RuntimeError):\n raise arg_2\n\n return arg_2"} +{"_id": "doc_3647", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute gerrit command with retry if it fails\"\"\"\n\n arg_2 = None # data result from the cmd execution\n arg_3 = 0\n\n while arg_3 < arg_0.MAX_RETRIES:\n try:\n arg_2 = subprocess.check_output(arg_1, shell=True)\n break\n except subprocess.CalledProcessError as ex:\n logger.error(\"gerrit cmd %s failed: %s\", arg_1, ex)\n time.sleep(arg_0.RETRY_WAIT * arg_3)\n arg_3 += 1\n\n if arg_2 is None:\n arg_2 = RuntimeError(arg_1 + \" failed \" + str(arg_0.MAX_RETRIES) + \" times. Giving up!\")\n\n if arg_0.archive:\n arg_1 = arg_0.sanitize_for_archive(arg_1)\n arg_0.archive.store(arg_1, None, None, arg_2)\n\n if isinstance(arg_2, RuntimeError):\n raise arg_2\n\n return arg_2"} +{"_id": "doc_3648", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get data associated to an issue\"\"\"\n\n arg_2 = arg_0.client.issue(arg_1)\n arg_3 = json.loads(arg_2)\n\n return arg_3"} +{"_id": "doc_3649", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get attachments of an issue\"\"\"\n\n for arg_2 in arg_0.client.issue_collection(arg_1, \"attachments\"):\n arg_3 = json.loads(arg_2)\n\n for arg_4 in arg_3['entries']:\n yield arg_4"} +{"_id": "doc_3650", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get activities on an issue\"\"\"\n\n for arg_2 in arg_0.client.issue_collection(arg_1, \"activity\"):\n arg_3 = json.loads(arg_2)\n\n for arg_4 in arg_3['entries']:\n arg_4['person_data'] = arg_0.__fetch_user_data('{PERSON}', arg_4['person_link'])\n yield arg_4"} +{"_id": "doc_3651", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get data associated to an user\"\"\"\n\n arg_3 = arg_0.client.user_name(arg_2)\n\n arg_4 = {}\n\n if not arg_3:\n return arg_4\n\n arg_5 = arg_0.client.user(arg_3)\n arg_4 = json.loads(arg_5)\n\n return arg_4"} +{"_id": "doc_3652", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the user data by URL\"\"\"\n\n Func = None\n\n if arg_1 in arg_0._users:\n return arg_0._users[arg_1]\n\n arg_3 = arg_0.__get_url(\"~\" + arg_1)\n\n logger.info(\"Getting info for %s\" % (arg_3))\n\n try:\n arg_4 = arg_0.__send_request(arg_3)\n Func = arg_4\n except requests.exceptions.HTTPError as e:\n if e.response.status_code in [404, 410]:\n logger.warning(\"Data is not available - %s\", arg_3)\n Func = '{}'\n else:\n raise e\n\n arg_0._users[arg_1] = Func\n\n return Func"} +{"_id": "doc_3653", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the Func data by its ID\"\"\"\n\n arg_2 = urijoin(\"bugs\", str(arg_1))\n arg_3 = arg_0.__get_url(arg_2)\n arg_4 = arg_0.__send_request(arg_3)\n\n return arg_4"} +{"_id": "doc_3654", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get a collection list of a given issue\"\"\"\n\n arg_3 = urijoin(\"bugs\", str(arg_1), arg_2)\n arg_4 = arg_0.__get_url(arg_3)\n arg_5 = {'ws.size': arg_0.items_per_page, 'ws.start': 0, 'order_by': 'date_last_updated'}\n\n arg_6 = arg_0.__fetch_items(arg_3=arg_4, arg_5=arg_5)\n\n return arg_6"} +{"_id": "doc_3655", "title": "", "text": "def Func(arg_0):\n \"\"\"Build URL project\"\"\"\n\n if arg_0.package:\n arg_1 = arg_0.__get_url_distribution_package()\n else:\n arg_1 = arg_0.__get_url_distribution()\n\n return arg_1"} +{"_id": "doc_3656", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Fetch the groupsio paginated subscriptions for a given token\n\n :param per_page: number of subscriptions per page\n\n :returns: an iterator of subscriptions\n \"\"\"\n arg_3 = urijoin(GROUPSIO_API_URL, arg_0.GET_SUBSCRIPTIONS)\n logger.debug(\"Get groupsio paginated subscriptions from \" + arg_3)\n\n arg_4 = True\n arg_5 = {\n \"limit\": arg_1\n }\n\n while arg_4:\n arg_6 = arg_0.__fetch(arg_3, arg_5)\n arg_7 = arg_6.json()\n Func = arg_7['data']\n yield Func\n\n arg_9 = arg_7['total_count']\n logger.debug(\"Subscriptions: %i/%i\" % (arg_7['end_item'], arg_9))\n\n arg_5['page_token'] = arg_7['next_page_token']\n arg_4 = arg_7['has_more']"} +{"_id": "doc_3657", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch requests from groupsio API\"\"\"\n\n arg_3 = requests.get(arg_1, params=arg_2, auth=arg_0.auth, verify=arg_0.verify)\n try:\n arg_3.raise_for_status()\n except requests.exceptions.HTTPError as e:\n raise e\n\n return arg_3"} +{"_id": "doc_3658", "title": "", "text": "def Func(*arg_0):\n \"\"\"Generate a UUID based on the given parameters.\n\n The UUID will be the SHA1 of the concatenation of the values\n from the list. The separator bewteedn these values is ':'.\n Each value must be a non-empty string, otherwise, the function\n will raise an exception.\n\n :param *args: list of arguments used to generate the UUID\n\n :returns: a universal unique identifier\n\n :raises ValueError: when anyone of the values is not a string,\n is empty or `None`.\n \"\"\"\n def check_value(arg_1):\n if not isinstance(arg_1, str):\n raise ValueError(\"%s value is not a string instance\" % str(arg_1))\n elif not arg_1:\n raise ValueError(\"value cannot be None or empty\")\n else:\n return arg_1\n\n arg_2 = ':'.join(map(check_value, arg_0))\n\n arg_3 = hashlib.sha1(arg_2.encode('utf-8', errors='surrogateescape'))\n arg_4 = arg_3.hexdigest()\n\n return arg_4"} +{"_id": "doc_3659", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\"Fetch items from an archive manager.\n\n Generator to get the items of a category (previously fetched\n by the given backend class) from an archive manager. Only those\n items archived after the given date will be returned.\n\n The parameters needed to initialize `backend` and get the\n items are given using `backend_args` dict parameter.\n\n :param backend_class: backend class to retrive items\n :param backend_args: dict of arguments needed to retrieve the items\n :param manager: archive manager where the items will be retrieved\n :param category: category of the items to retrieve\n :param archived_after: return items archived after this date\n\n :returns: a generator of archived items\n \"\"\"\n arg_5 = find_signature_parameters(arg_0.__init__,\n arg_1)\n arg_6 = arg_0(**arg_5)\n\n arg_7 = arg_2.search(arg_6.origin,\n arg_6.__class__.__name__,\n arg_3,\n arg_4)\n\n for arg_8 in arg_7:\n arg_6.archive = Archive(arg_8)\n arg_10 = arg_6.Func()\n\n try:\n for arg_11 in arg_10:\n yield arg_11\n except ArchiveError as e:\n logger.warning(\"Ignoring %s archive due to: %s\", arg_8, str(e))"} +{"_id": "doc_3660", "title": "", "text": "def Func(arg_0):\n \"\"\"Find available backends.\n\n Look for the Perceval backends and commands under `top_package`\n and its sub-packages. When `top_package` defines a namespace,\n backends under that same namespace will be found too.\n\n :param top_package: package storing backends\n\n :returns: a tuple with two dicts: one with `Backend` classes and one\n with `BackendCommand` classes\n \"\"\"\n arg_1 = pkgutil.walk_packages(arg_0.__path__,\n prefix=arg_0.__name__ + '.')\n\n arg_2 = [name for _, name, is_pkg in arg_1 if not is_pkg]\n\n return _import_backends(arg_2)"} +{"_id": "doc_3661", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, **arg_3):\n \"\"\"Fetch items from the repository.\n\n The method retrieves items from a repository.\n\n To removed classified fields from the resulting items, set\n the parameter `filter_classified`. Take into account this\n parameter is incompatible with archiving items. Raw client\n data are archived before any other process. Therefore,\n classified data are stored within the archive. To prevent\n from possible data leaks or security issues when users do\n not need these fields, archiving and filtering are not\n compatible.\n\n :param category: the category of the items Funced\n :param filter_classified: remove classified fields from the resulting items\n :param kwargs: a list of other parameters (e.g., from_date, offset, etc.\n specific for each backend)\n\n :returns: a generator of items\n\n :raises BackendError: either when the category is not valid or\n 'filter_classified' and 'archive' are active at the same time.\n \"\"\"\n if arg_1 not in arg_0.categories:\n arg_4 = \"%s category not valid for %s\" % (arg_1, arg_0.__class__.__name__)\n raise BackendError(arg_4=arg_4)\n\n if arg_2 and arg_0.archive:\n arg_4 = \"classified fields filtering is not compatible with archiving items\"\n raise BackendError(arg_4=arg_4)\n\n if arg_0.archive:\n arg_0.archive.init_metadata(arg_0.origin, arg_0.__class__.__name__, arg_0.version, arg_1,\n arg_3)\n\n arg_0.client = arg_0._init_client()\n\n for arg_6 in arg_0.Func_items(arg_1, **arg_3):\n if arg_2:\n arg_6 = arg_0.filter_classified_data(arg_6)\n\n yield arg_0.metadata(arg_6, arg_2=arg_2)"} +{"_id": "doc_3662", "title": "", "text": "def Func(arg_0):\n \"\"\"Fetch the questions from an archive.\n\n It returns the items stored within an archive. If this method is called but\n no archive was provided, the method will raise a `ArchiveError` exception.\n\n :returns: a generator of items\n\n :raises ArchiveError: raised when an error occurs accessing an archive\n \"\"\"\n if not arg_0.archive:\n raise ArchiveError(cause=\"archive instance was not provided\")\n\n arg_0.client = arg_0._init_client(from_archive=True)\n\n for arg_2 in arg_0.fetch_items(arg_0.archive.category, **arg_0.archive.backend_params):\n yield arg_0.metadata(arg_2)"} +{"_id": "doc_3663", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove classified or confidential data from an item.\n\n It removes those fields that contain data considered as classified.\n Classified fields are defined in `CLASSIFIED_FIELDS` class attribute.\n\n :param item: fields will be removed from this item\n\n :returns: the same item but with confidential data filtered\n \"\"\"\n arg_2 = uuid(arg_0.origin, arg_0.metadata_id(arg_1))\n\n logger.debug(\"Filtering classified data for item %s\", arg_2)\n\n for arg_3 in arg_0.CLASSIFIED_FIELDS:\n try:\n _remove_key_from_nested_dict(arg_1, arg_3)\n except KeyError:\n logger.debug(\"Classified field '%s' not found for item %s; field ignored\",\n '.'.join(arg_3), arg_2)\n\n logger.debug(\"Classified data filtered for item %s\", arg_2)\n\n return arg_1"} +{"_id": "doc_3664", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Parse a list of arguments.\n\n Parse argument strings needed to run a backend command. The result\n will be a `argFunc.Namespace` object populated with the values\n obtained after the validation of the parameters.\n\n :param args: argument strings\n\n :result: an object with the Funcd values\n \"\"\"\n arg_2 = arg_0.Funcr.Func_args(arg_1)\n\n # Category was not set, remove it\n if arg_2.category is None:\n delattr(arg_2, 'category')\n\n if arg_0._from_date:\n arg_2.from_date = str_to_datetime(arg_2.from_date)\n if arg_0._to_date and arg_2.to_date:\n arg_2.to_date = str_to_datetime(arg_2.to_date)\n if arg_0._archive and arg_2.archived_since:\n arg_2.archived_since = str_to_datetime(arg_2.archived_since)\n\n if arg_0._archive and arg_2.fetch_archive and arg_2.no_archive:\n raise AttributeError(\"fetch-archive and no-archive arguments are not compatible\")\n if arg_0._archive and arg_2.fetch_archive and not arg_2.category:\n raise AttributeError(\"fetch-archive needs a category to work with\")\n\n # Set aliases\n for arg_6, arg_7 in arg_0.aliases.items():\n if (arg_6 not in arg_2) and (arg_7 in arg_2):\n arg_8 = getattr(arg_2, arg_7, None)\n setattr(arg_2, arg_6, arg_8)\n\n return arg_2"} +{"_id": "doc_3665", "title": "", "text": "def Func(arg_0):\n \"\"\"Activate archive arguments parsing\"\"\"\n\n arg_1 = arg_0.parser.add_argument_group('archive arguments')\n arg_1.add_argument('--archive-path', dest='archive_path', default=None,\n help=\"directory path to the archives\")\n arg_1.add_argument('--no-archive', dest='no_archive', action='store_true',\n help=\"do not archive data\")\n arg_1.add_argument('--fetch-archive', dest='fetch_archive', action='store_true',\n help=\"fetch data from the archives\")\n arg_1.add_argument('--archived-since', dest='archived_since', default='1970-01-01',\n help=\"retrieve items archived since the given date\")"} +{"_id": "doc_3666", "title": "", "text": "def Func(arg_0):\n \"\"\"Activate output arguments parsing\"\"\"\n\n arg_1 = arg_0.parser.add_argument_group('output arguments')\n arg_1.add_argument('-o', '--output', type=argparse.FileType('w'),\n dest='outfile', default=sys.stdout,\n help=\"output file\")\n arg_1.add_argument('--json-line', dest='json_line', action='store_true',\n help=\"produce a JSON line for each output item\")"} +{"_id": "doc_3667", "title": "", "text": "def Func(arg_0):\n \"\"\"Fetch and write items.\n\n This method Funcs the backend to fetch the items from the given\n origin. Items are converted to JSON objects and written to the\n defined output.\n\n If `fetch-archive` parameter was given as an argument during\n the inizialization of the instance, the items will be retrieved\n using the archive manager.\n \"\"\"\n arg_1 = vars(arg_0.parsed_args)\n arg_2 = arg_1.pop('category', None)\n arg_3 = arg_1.pop('filter_classified', False)\n arg_4 = arg_1.pop('archived_since', None)\n\n if arg_0.archive_manager and arg_0.parsed_args.fetch_archive:\n arg_5 = fetch_from_archive(arg_0.BACKEND, arg_1,\n arg_0.archive_manager,\n arg_2,\n arg_4)\n else:\n arg_5 = fetch(arg_0.BACKEND, arg_1, arg_2,\n arg_3=arg_3,\n manager=arg_0.archive_manager)\n\n try:\n for arg_6 in arg_5:\n if arg_0.json_line:\n arg_7 = json.dumps(arg_6, separators=(',', ':'), sort_keys=True)\n else:\n arg_7 = json.dumps(arg_6, indent=4, sort_keys=True)\n arg_0.outfile.write(arg_7)\n arg_0.outfile.write('\\n')\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n raise RuntimeError(str(e))"} +{"_id": "doc_3668", "title": "", "text": "def Func(arg_0):\n \"\"\"Initialize archive based on the parsed parameters\"\"\"\n\n if 'archive_path' not in arg_0.parsed_args:\n arg_1 = None\n elif arg_0.parsed_args.no_archive:\n arg_1 = None\n else:\n if not arg_0.parsed_args.archive_path:\n arg_2 = os.path.expanduser(ARCHIVES_DEFAULT_PATH)\n else:\n arg_2 = arg_0.parsed_args.archive_path\n\n arg_1 = ArchiveManager(arg_2)\n\n arg_0.archive_manager = arg_1"} +{"_id": "doc_3669", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts the update time from a MBox item.\n\n The timestamp used is extracted from 'Date' field in its\n several forms. This date is converted to UNIX timestamp\n format.\n\n :param item: item generated by the backend\n\n :returns: a UNIX timestamp\n \"\"\"\n arg_1 = arg_0[MBox.DATE_FIELD]\n arg_1 = str_to_datetime(arg_1)\n\n return arg_1.timestamp()"} +{"_id": "doc_3670", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch and parse the messages from a mailing list\"\"\"\n\n arg_2 = datetime_to_utc(arg_2)\n\n arg_3, arg_4, arg_5 = (0, 0, 0)\n\n for arg_6 in arg_1.mboxes:\n arg_7 = None\n\n try:\n arg_7 = arg_0._copy_mbox(arg_6)\n\n for arg_8 in arg_0.parse_mbox(arg_7):\n arg_5 += 1\n\n if not arg_0._validate_message(arg_8):\n arg_4 += 1\n continue\n\n # Ignore those messages sent before the given date\n arg_9 = str_to_datetime(arg_8[MBox.DATE_FIELD])\n\n if arg_9 < arg_2:\n logger.debug(\"Message %s sent before %s; skipped\",\n arg_8['unixfrom'], str(arg_2))\n arg_5 -= 1\n continue\n\n # Convert 'CaseInsensitiveDict' to dict\n arg_8 = arg_0._casedict_to_dict(arg_8)\n\n arg_3 += 1\n logger.debug(\"Message %s parsed\", arg_8['unixfrom'])\n\n yield arg_8\n except (OSError, EOFError) as e:\n logger.warning(\"Ignoring %s mbox due to: %s\", arg_6.filepath, str(e))\n except Exception as e:\n if arg_7 and os.path.exists(arg_7):\n os.remove(arg_7)\n raise e\n finally:\n if arg_7 and os.path.exists(arg_7):\n os.remove(arg_7)\n\n logger.info(\"Done. %s/%s messages fetched; %s ignored\",\n arg_3, arg_5, arg_4)"} +{"_id": "doc_3671", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Copy the contents of a mbox to a temporary file\"\"\"\n\n arg_2 = tempfile.mktemp(prefix='perceval_')\n\n with arg_1.container as f_in:\n with open(arg_2, mode='wb') as f_out:\n for arg_3 in f_in:\n f_out.write(arg_3)\n return arg_2"} +{"_id": "doc_3672", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the commits\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['from_date']\n arg_4 = arg_2['to_date']\n arg_5 = arg_2['branches']\n arg_6 = arg_2['latest_items']\n arg_7 = arg_2['no_update']\n\n arg_8 = 0\n\n try:\n if os.path.isfile(arg_0.gitpath):\n arg_9 = arg_0.__fetch_from_log()\n else:\n arg_9 = arg_0.__fetch_from_repo(arg_3, arg_4, arg_5,\n arg_6, arg_7)\n\n for arg_10 in arg_9:\n yield arg_10\n arg_8 += 1\n except EmptyRepositoryError:\n pass\n\n logger.info(\"Fetch process completed: %s commits fetched\",\n arg_8)"} +{"_id": "doc_3673", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the Git argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n from_date=True,\n to_date=True)\n\n # Optional arguments\n arg_2 = arg_1.parser.add_argument_group('Git arguments')\n arg_2.add_argument('--branches', dest='branches',\n nargs='+', type=str, default=None,\n help=\"Fetch commits only from these branches\")\n\n # Mutual exclusive parameters\n arg_3 = arg_2.add_mutually_exclusive_group()\n arg_3.add_argument('--git-path', dest='git_path',\n help=\"Path where the Git repository will be cloned\")\n arg_3.add_argument('--git-log', dest='git_log',\n help=\"Path to the Git log file\")\n\n arg_4 = arg_2.add_mutually_exclusive_group()\n arg_4.add_argument('--latest-items', dest='latest_items',\n action='store_true',\n help=\"Fetch latest commits added to the repository\")\n arg_4.add_argument('--no-update', dest='no_update',\n action='store_true',\n help=\"Fetch all commits without updating the repository\")\n\n # Required arguments\n arg_1.parser.add_argument('uri',\n help=\"URI of the Git log repository\")\n\n return arg_1"} +{"_id": "doc_3674", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the Git log stream.\"\"\"\n\n for arg_1 in arg_0.stream:\n arg_1 = arg_1.rstrip('\\n')\n arg_2 = False\n arg_0.nline += 1\n\n while not arg_2:\n arg_2 = arg_0.handlers[arg_0.state](arg_1)\n\n if arg_0.state == arg_0.COMMIT and arg_0.commit:\n arg_3 = arg_0._build_commit()\n logger.debug(\"Commit %s Funcd\", arg_3['commit'])\n yield arg_3\n\n # Return the last commit, if any\n if arg_0.commit:\n arg_3 = arg_0._build_commit()\n logger.debug(\"Commit %s Funcd\", arg_3['commit'])\n yield arg_3"} +{"_id": "doc_3675", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Clone a Git repository.\n\n Make a bare copy of the repository stored in `uri` into `dirpath`.\n The repository would be either local or remote.\n\n :param uri: URI of the repository\n :param dirtpath: directory where the repository will be Funcd\n\n :returns: a `GitRepository` class having Funcd the repository\n\n :raises RepositoryError: when an error occurs cloning the given\n repository\n \"\"\"\n arg_3 = ['git', 'Func', '--bare', arg_1, arg_2]\n arg_4 = {\n 'LANG': 'C',\n 'HOME': os.getenv('HOME', '')\n }\n\n arg_0._exec(arg_3, arg_4=arg_4)\n\n logger.debug(\"Git %s repository Funcd into %s\",\n arg_1, arg_2)\n\n return arg_0(arg_1, arg_2)"} +{"_id": "doc_3676", "title": "", "text": "def Func(arg_0):\n \"\"\"Count the objects of a repository.\n\n The method returns the total number of objects (packed and unpacked)\n available on the repository.\n\n :raises RepositoryError: when an error occurs counting the objects\n of a repository\n \"\"\"\n arg_1 = ['git', 'count-objects', '-v']\n\n arg_2 = arg_0._exec(arg_1, cwd=arg_0.dirpath, env=arg_0.gitenv)\n arg_2 = arg_2.decode('utf-8', errors='surrogateescape').rstrip()\n\n try:\n arg_3 = {k: v for k, v in (x.split(': ') for x in arg_2.split('\\n'))}\n arg_4 = int(arg_3['count']) + int(arg_3['in-pack'])\n except KeyError as e:\n arg_5 = \"unable to parse 'count-objects' output; reason: '%s' entry not found\" \\\n % e.args[0]\n raise RepositoryError(cause=arg_5)\n except ValueError as e:\n arg_5 = \"unable to parse 'count-objects' output; reason: %s\" % str(e)\n raise RepositoryError(cause=arg_5)\n\n logger.debug(\"Git %s repository has %s objects\",\n arg_0.uri, str(arg_4))\n\n return arg_4"} +{"_id": "doc_3677", "title": "", "text": "def Func(arg_0):\n \"\"\"Check if the repo is in a detached state.\n\n The repository is in a detached state when HEAD is not a symbolic\n reference.\n\n :returns: whether the repository is detached or not\n\n :raises RepositoryError: when an error occurs checking the state\n of the repository\n \"\"\"\n arg_1 = ['git', 'symbolic-ref', 'HEAD']\n\n try:\n arg_0._exec(arg_1, cwd=arg_0.dirpath, env=arg_0.gitenv)\n except RepositoryError as e:\n if e.msg.find(\"ref HEAD is not a symbolic ref\") == -1:\n raise e\n return True\n else:\n return False"} +{"_id": "doc_3678", "title": "", "text": "def Func(arg_0):\n \"\"\"Keep the repository in Func.\n\n This method will Funchronize the repository with its 'origin',\n fetching newest objects and updating references. It uses low\n level commands which allow to keep track of which things\n have changed in the repository.\n\n The method also returns a list of hashes related to the new\n commits fetched during the process.\n\n :returns: list of new commits\n\n :raises RepositoryError: when an error occurs Funchronizing\n the repository\n \"\"\"\n arg_1, arg_2 = arg_0._fetch_pack()\n\n if arg_1:\n arg_3 = arg_0._read_commits_from_pack(arg_1)\n else:\n arg_3 = []\n logger.debug(\"Git repository %s (%s) does not have any new object\",\n arg_0.uri, arg_0.dirpath)\n\n arg_0._update_references(arg_2)\n\n logger.debug(\"Git repository %s (%s) is Funced\",\n arg_0.uri, arg_0.dirpath)\n\n return arg_3"} +{"_id": "doc_3679", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4='utf-8'):\n \"\"\"Read the commit Func from the repository.\n\n The method returns the Git Func of the repository using the\n following options:\n\n git Func --raw --numstat --pretty=fuller --decorate=full\n --all --reverse --topo-order --parents -M -C -c\n --remotes=origin\n\n When `from_date` is given, it gets the commits equal or older\n than that date. This date is given in a datetime object.\n\n The list of branches is a list of strings, with the names of the\n branches to fetch. If the list of branches is empty, no commit\n is fetched. If the list of branches is None, all commits\n for all branches will be fetched.\n\n :param from_date: fetch commits newer than a specific\n date (inclusive)\n :param branches: names of branches to fetch from (default: None)\n :param encoding: encode the Func using this format\n\n :returns: a generator where each item is a line from the Func\n\n :raises EmptyRepositoryError: when the repository is empty and\n the action cannot be performed\n :raises RepositoryError: when an error occurs fetching the Func\n \"\"\"\n if arg_0.is_empty():\n Funcger.warning(\"Git %s repository is empty; unable to get the Func\",\n arg_0.uri)\n raise EmptyRepositoryError(repository=arg_0.uri)\n\n arg_5 = ['git', 'Func', '--reverse', '--topo-order']\n arg_5.extend(arg_0.GIT_PRETTY_OUTPUT_OPTS)\n\n if arg_1:\n arg_6 = arg_1.strftime(\"%Y-%m-%d %H:%M:%S %z\")\n arg_5.append('--since=' + arg_6)\n\n if arg_2:\n arg_6 = arg_2.strftime(\"%Y-%m-%d %H:%M:%S %z\")\n arg_5.append('--until=' + arg_6)\n\n if arg_3 is None:\n arg_5.extend(['--branches', '--tags', '--remotes=origin'])\n elif len(arg_3) == 0:\n arg_5.append('--max-count=0')\n else:\n arg_3 = ['refs/heads/' + branch for branch in arg_3]\n arg_5.extend(arg_3)\n\n for arg_7 in arg_0._exec_nb(arg_5, cwd=arg_0.dirpath, env=arg_0.gitenv):\n yield arg_7\n\n Funcger.debug(\"Git Func fetched from %s repository (%s)\",\n arg_0.uri, arg_0.dirpath)"} +{"_id": "doc_3680", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='utf-8'):\n \"\"\"Show the data of a set of commits.\n\n The method returns the output of Git Func command for a\n set of commits using the following options:\n\n git Func --raw --numstat --pretty=fuller --decorate=full\n --parents -M -C -c [...]\n\n When the list of commits is empty, the command will return\n data about the last commit, like the default behaviour of\n `git Func`.\n\n :param commits: list of commits to Func data\n :param encoding: encode the output using this format\n\n :returns: a generator where each item is a line from the Func output\n\n :raises EmptyRepositoryError: when the repository is empty and\n the action cannot be performed\n :raises RepositoryError: when an error occurs fetching the Func output\n \"\"\"\n if arg_0.is_empty():\n logger.warning(\"Git %s repository is empty; unable to run Func\",\n arg_0.uri)\n raise EmptyRepositoryError(repository=arg_0.uri)\n\n if arg_1 is None:\n arg_1 = []\n\n arg_3 = ['git', 'Func']\n arg_3.extend(arg_0.GIT_PRETTY_OUTPUT_OPTS)\n arg_3.extend(arg_1)\n\n for arg_4 in arg_0._exec_nb(arg_3, cwd=arg_0.dirpath, env=arg_0.gitenv):\n yield arg_4\n\n logger.debug(\"Git Func fetched from %s repository (%s)\",\n arg_0.uri, arg_0.dirpath)"} +{"_id": "doc_3681", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update references removing old ones.\"\"\"\n\n arg_2 = [ref.refname for ref in arg_1]\n\n # Delete old references\n for arg_3 in arg_0._discover_refs():\n if not arg_3.refname.startswith('refs/heads/'):\n continue\n if arg_3.refname in arg_2:\n continue\n arg_0._update_ref(arg_3, delete=True)\n\n # Update new references\n for arg_4 in arg_1:\n arg_5 = arg_4.refname\n\n if arg_5.endswith('^{}'):\n logger.debug(\"Annotated tag %s ignored for updating in sync process\",\n arg_5)\n continue\n elif not arg_5.startswith('refs/heads/') and not arg_5.startswith('refs/tags/'):\n logger.debug(\"Reference %s not needed; ignored for updating in sync process\",\n arg_5)\n continue\n else:\n arg_0._update_ref(arg_4)\n\n # Prune repository to remove old branches\n arg_6 = ['git', 'remote', 'prune', 'origin']\n arg_0._exec(arg_6, cwd=arg_0.dirpath, env=arg_0.gitenv)"} +{"_id": "doc_3682", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Get the current list of local or remote refs.\"\"\"\n\n if arg_1:\n arg_2 = ['git', 'ls-remote', '-h', '-t', '--exit-code', 'origin']\n arg_3 = '\\t'\n arg_4 = [2]\n else:\n # Check first whether the local repo is empty;\n # Running 'show-ref' in empty repos gives an error\n if arg_0.is_empty():\n raise EmptyRepositoryError(repository=arg_0.uri)\n\n arg_2 = ['git', 'show-ref', '--heads', '--tags']\n arg_3 = ' '\n arg_4 = [1]\n\n # Error codes returned when no matching refs (i.e, no heads\n # or tags) are found in a repository will be ignored. Otherwise,\n # the full process would fail for those situations.\n arg_5 = arg_0._exec(arg_2, cwd=arg_0.dirpath,\n env=arg_0.gitenv,\n arg_4=arg_4)\n arg_5 = arg_5.decode('utf-8', errors='surrogateescape').rstrip()\n arg_5 = arg_5.split('\\n') if arg_5 else []\n\n arg_6 = []\n\n for arg_7 in arg_5:\n arg_8 = arg_7.split(arg_3)\n arg_9 = GitRef(arg_8[0], arg_8[1])\n arg_6.append(arg_9)\n\n return arg_6"} +{"_id": "doc_3683", "title": "", "text": "def Func(arg_0, arg_1='utf-8'):\n \"\"\"Reads self.proc.stderr.\n\n Usually, this should be read in a thread, to prevent blocking\n the read from stdout of the stderr buffer is filled, and this\n function is not called becuase the program is busy in the\n stderr reading loop.\n\n Reads self.proc.stderr (self.proc is the subprocess running\n the git command), and reads / writes self.failed_message\n (the message sent to stderr when git fails, usually one line).\n \"\"\"\n for arg_2 in arg_0.proc.stderr:\n arg_3 = arg_2.decode(arg_1, errors='surrogateescape')\n\n if arg_0.proc.returncode != 0:\n # If the subprocess didn't finish successfully, we expect\n # the last line in stderr to provide the cause\n if arg_0.failed_message is not None:\n # We had a message, there is a newer line, print it\n logger.debug(\"Git log stderr: \" + arg_0.failed_message)\n arg_0.failed_message = arg_3\n else:\n # The subprocess is successfully up to now, print the line\n logger.debug(\"Git log stderr: \" + arg_3)"} +{"_id": "doc_3684", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4='utf-8'):\n \"\"\"Run a command.\n\n Execute `cmd` command in the directory set by `cwd`. Environment\n variables can be set using the `env` dictionary. The output\n data is returned as encoded bytes.\n\n Commands which their returning status codes are non-zero will\n be treated as failed. Error codes considered as valid can be\n ignored giving them in the `ignored_error_codes` list.\n\n :returns: the output of the command as encoded bytes\n\n :raises RepositoryError: when an error occurs running the command\n \"\"\"\n if arg_3 is None:\n arg_3 = []\n\n logger.debug(\"Running command %s (cwd: %s, env: %s)\",\n ' '.join(arg_0), arg_1, str(arg_2))\n\n try:\n arg_5 = subprocess.Popen(arg_0, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n arg_1=arg_1, arg_2=arg_2)\n (arg_6, arg_7) = arg_5.communicate()\n except OSError as e:\n raise RepositoryError(arg_9=str(e))\n\n if arg_5.returncode != 0 and arg_5.returncode not in arg_3:\n arg_8 = arg_7.decode(arg_4, errors='surrogateescape')\n arg_9 = \"git command - %s\" % arg_8\n raise RepositoryError(arg_9=arg_9)\n else:\n logger.debug(arg_7.decode(arg_4, errors='surrogateescape'))\n\n return arg_6"} +{"_id": "doc_3685", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None,\n arg_7=True, arg_8=arg_9):\n \"\"\"Fetch the tweets from the server.\n\n This method Funces tweets from the TwitterSearch API published in the last seven days.\n\n :param category: the category of items to Func\n :param since_id: if not null, it returns results with an ID greater than the specified ID\n :param max_id: when it is set or if not None, it returns results with an ID less than the specified ID\n :param geocode: if enabled, returns tweets by users located at latitude,longitude,\"mi\"|\"km\"\n :param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code\n :param include_entities: if disabled, it excludes entities node\n :param tweets_type: type of tweets returned. Default is \u201cmixed\u201d, others are \"recent\" and \"popular\"\n\n :returns: a generator of tweets\n \"\"\"\n arg_10 = {\"since_id\": arg_3,\n \"max_id\": arg_4,\n \"geocode\": arg_5,\n \"lang\": arg_6,\n \"include_entities\": arg_7,\n \"result_type\": arg_8}\n arg_11 = super().Func(arg_1, **arg_10)\n\n return arg_11"} +{"_id": "doc_3686", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the tweets\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['since_id']\n arg_4 = arg_2['max_id']\n arg_5 = arg_2['geocode']\n arg_6 = arg_2['lang']\n arg_7 = arg_2['include_entities']\n arg_8 = arg_2['result_type']\n\n logger.info(\"Fetching tweets %s from %s to %s\",\n arg_0.query, str(arg_3),\n str(arg_4) if arg_4 else '--')\n\n arg_9 = []\n arg_10 = None\n arg_11 = None\n arg_12 = arg_0.client.tweets(arg_0.query, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6, include_entities=arg_7, result_type=arg_8)\n\n for arg_13 in arg_12:\n for arg_14 in range(len(arg_13)):\n arg_15 = arg_13[arg_14]\n arg_9.append(arg_15['id'])\n\n if arg_13[-1] == arg_15:\n arg_10 = str_to_datetime(arg_13[-1]['created_at'])\n\n if arg_13[0] == arg_15 and not arg_11:\n arg_11 = str_to_datetime(arg_13[0]['created_at'])\n\n yield arg_15\n\n logger.info(\"Fetch process completed: %s (unique %s) tweets fetched, from %s to %s\",\n len(arg_9), len(list(set(arg_9))), arg_10, arg_11)"} +{"_id": "doc_3687", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the Twitter argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n token_auth=True,\n archive=True)\n\n # Backend token is required\n arg_2 = arg_1.parser._option_string_actions['--api-token']\n arg_2.required = True\n\n # Meetup options\n arg_4 = arg_1.parser.add_argument_group('Twitter arguments')\n arg_4.add_argument('--max-items', dest='max_items',\n type=int, default=MAX_ITEMS,\n help=\"Maximum number of items requested on the same query\")\n arg_4.add_argument('--no-entities', dest='include_entities',\n arg_2='store_false',\n help=\" Exclude entities node\")\n arg_4.add_argument('--geo-code', dest='geocode',\n help=\"Select tweets by users located at latitude,longitude,radius\")\n arg_4.add_argument('--lang', dest='lang',\n help=\"Select tweets to the given language in ISO 639-1 code\")\n arg_4.add_argument('--tweets-type', dest='tweets_type', default=TWEET_TYPE_MIXED,\n help=\"Type of tweets returned. Default is 'mixed', others are 'recent' and 'popular'\")\n arg_4.add_argument('--sleep-for-rate', dest='sleep_for_rate',\n arg_2='store_true',\n help=\"sleep for getting more rate\")\n arg_4.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep',\n default=MIN_RATE_LIMIT, type=int,\n help=\"sleep until reset when the rate limit reaches this value\")\n arg_4.add_argument('--sleep-time', dest='sleep_time',\n default=SLEEP_TIME, type=int,\n help=\"minimun sleeping time to avoid too many request exception\")\n\n # Required arguments\n arg_1.parser.add_argument('query',\n help=\"Search query including operators, max 500 chars\")\n\n return arg_1"} +{"_id": "doc_3688", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Fetch data from Google API.\n\n The method retrieves a list of hits for some\n given keywords using the Google API.\n\n :param category: the category of items to Func\n\n :returns: a generator of data\n \"\"\"\n arg_3 = {}\n arg_4 = super().Func(arg_1, **arg_3)\n\n return arg_4"} +{"_id": "doc_3689", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch Google hit items\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n logger.info(\"Fetching data for '%s'\", arg_0.keywords)\n\n arg_3 = arg_0.client.hits(arg_0.keywords)\n arg_4 = arg_0.__parse_hits(arg_3)\n\n yield arg_4\n\n logger.info(\"Fetch process completed\")"} +{"_id": "doc_3690", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse the hits returned by the Google Search API\"\"\"\n\n # Create the soup and get the desired div\n arg_2 = bs4.BeautifulSoup(arg_1, 'html.parser')\n arg_3 = arg_2.find(\"div\", id=\"resultStats\").text\n\n # Remove commas or dots\n arg_3 = arg_3.replace(',', u'')\n arg_3 = arg_3.replace('.', u'')\n\n arg_4 = datetime_utcnow().timestamp()\n arg_5 = arg_0.keywords[:]\n arg_5.append(str(arg_4))\n\n arg_6 = {\n 'fetched_on': arg_4,\n 'id': uuid(*arg_5),\n 'keywords': arg_0.keywords,\n 'type': 'googleSearchHits'\n }\n\n if not arg_3:\n logger.warning(\"No hits for %s\", arg_0.keywords)\n arg_6['hits'] = 0\n\n return arg_6\n\n arg_7 = re.search(r'\\d+', arg_3).group(0)\n arg_8 = int(arg_7)\n arg_6['hits'] = arg_8\n\n return arg_6"} +{"_id": "doc_3691", "title": "", "text": "def Func(arg_0):\n \"\"\"Get repo info about stars, watchers and forks\"\"\"\n\n arg_1 = arg_0.client.repo()\n arg_2 = json.loads(arg_1)\n\n arg_3 = datetime_utcnow()\n arg_2['fetched_on'] = arg_3.timestamp()\n\n yield arg_2"} +{"_id": "doc_3692", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get issue reactions\"\"\"\n\n arg_3 = []\n\n if arg_2 == 0:\n return arg_3\n\n arg_4 = arg_0.client.issue_reactions(arg_1)\n\n for arg_5 in arg_4:\n\n for arg_6 in json.loads(arg_5):\n arg_6['user_data'] = arg_0.__get_user(arg_6['user']['login'])\n arg_3.append(arg_6)\n\n return arg_3"} +{"_id": "doc_3693", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get reactions on issue comments\"\"\"\n\n arg_3 = []\n\n if arg_2 == 0:\n return arg_3\n\n arg_4 = arg_0.client.issue_comment_reactions(arg_1)\n\n for arg_5 in arg_4:\n\n for arg_6 in json.loads(arg_5):\n arg_6['user_data'] = arg_0.__get_user(arg_6['user']['login'])\n arg_3.append(arg_6)\n\n return arg_3"} +{"_id": "doc_3694", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get issue assignees\"\"\"\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_2.append(arg_0.__get_user(arg_3['login']))\n\n return arg_2"} +{"_id": "doc_3695", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get pull request requested reviewers\"\"\"\n\n arg_2 = []\n arg_3 = arg_0.client.pull_requested_reviewers(arg_1)\n\n for arg_4 in arg_3:\n arg_3 = json.loads(arg_4)\n\n for arg_5 in arg_3['users']:\n arg_6 = arg_0.__get_user(arg_5['login'])\n arg_2.append(arg_6)\n\n return arg_2"} +{"_id": "doc_3696", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get pull request commit hashes\"\"\"\n\n arg_2 = []\n arg_3 = arg_0.client.pull_commits(arg_1)\n\n for arg_4 in arg_3:\n\n for arg_5 in json.loads(arg_4):\n arg_6 = arg_5['sha']\n arg_2.append(arg_6)\n\n return arg_2"} +{"_id": "doc_3697", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get pull review comment reactions\"\"\"\n\n arg_3 = []\n\n if arg_2 == 0:\n return arg_3\n\n arg_4 = arg_0.client.pull_review_comment_reactions(arg_1)\n\n for arg_5 in arg_4:\n\n for arg_6 in json.loads(arg_5):\n arg_6['user_data'] = arg_0.__get_user(arg_6['user']['login'])\n arg_3.append(arg_6)\n\n return arg_3"} +{"_id": "doc_3698", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get reactions of an issue\"\"\"\n\n arg_2 = {\n 'per_page': PER_PAGE,\n 'direction': 'asc',\n 'sort': 'updated'\n }\n\n arg_3 = urijoin(\"issues\", str(arg_1), \"reactions\")\n return arg_0.fetch_items(arg_3, arg_2)"} +{"_id": "doc_3699", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Fetch the Func from the repository.\n\n The method retrieves, from a GitHub repository, the Func\n updated since the given date.\n\n :param from_date: obtain Func updated since this date\n\n :returns: a generator of Func\n \"\"\"\n arg_2 = {\n 'state': 'all',\n 'per_page': PER_PAGE,\n 'direction': 'asc',\n 'sort': 'updated'}\n\n if arg_1:\n arg_2['since'] = arg_1.isoformat()\n\n arg_3 = urijoin(\"Func\")\n return arg_0.fetch_items(arg_3, arg_2)"} +{"_id": "doc_3700", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get pull requested reviewers\"\"\"\n\n arg_2 = urijoin(\"pulls\", str(arg_1), \"requested_reviewers\")\n return arg_0.fetch_items(arg_2, {})"} +{"_id": "doc_3701", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get pull request commits\"\"\"\n\n arg_2 = {\n 'per_page': PER_PAGE,\n }\n\n arg_3 = urijoin(\"pulls\", str(arg_1), \"commits\")\n return arg_0.fetch_items(arg_3, arg_2)"} +{"_id": "doc_3702", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get reactions of a review comment\"\"\"\n\n arg_2 = {\n 'per_page': PER_PAGE,\n 'direction': 'asc',\n 'sort': 'updated'\n }\n\n arg_3 = urijoin(\"pulls\", \"comments\", str(arg_1), \"reactions\")\n return arg_0.fetch_items(arg_3, arg_2)"} +{"_id": "doc_3703", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the user information and update the user cache\"\"\"\n Func = None\n\n if arg_1 in arg_0._users:\n return arg_0._users[arg_1]\n\n arg_3 = urijoin(arg_0.base_url, 'users', arg_1)\n\n logging.info(\"Getting info for %s\" % (arg_3))\n\n arg_4 = arg_0.fetch(arg_3)\n Func = arg_4.text\n arg_0._users[arg_1] = Func\n\n return Func"} +{"_id": "doc_3704", "title": "", "text": "def Func(arg_0):\n \"\"\"Return array of all tokens remaining API points\"\"\"\n\n arg_1 = [0] * arg_0.n_tokens\n # Turn off archiving when checking rates, because that would cause\n # archive key conflict (the same URLs giving different responses)\n arg_2 = arg_0.archive\n arg_0.archive = None\n for arg_4, arg_5 in enumerate(arg_0.tokens):\n # Pass flag to skip disabling archiving because this function doies it\n arg_1[arg_4] = arg_0._get_token_rate_limit(arg_5)\n # Restore archiving to whatever state it was\n arg_0.archive = arg_2\n logger.debug(\"Remaining API points: {}\".format(arg_1))\n return arg_1"} +{"_id": "doc_3705", "title": "", "text": "def Func(arg_0):\n \"\"\"Check if we need to switch GitHub API tokens\"\"\"\n\n if arg_0.n_tokens <= 1 or arg_0.rate_limit is None:\n return False\n elif arg_0.last_rate_limit_checked is None:\n arg_0.last_rate_limit_checked = arg_0.rate_limit\n return True\n\n # If approaching minimum rate limit for sleep\n arg_2 = float(arg_0.min_rate_to_sleep) * (1.0 + TOKEN_USAGE_BEFORE_SWITCH) + 1\n if arg_0.rate_limit <= arg_2:\n arg_0.last_rate_limit_checked = arg_0.rate_limit\n return True\n\n # Only switch token when used predefined factor of the current token's remaining API points\n arg_3 = float(arg_0.rate_limit) / float(arg_0.last_rate_limit_checked)\n if arg_3 < 1.0 - TOKEN_USAGE_BEFORE_SWITCH:\n arg_0.last_rate_limit_checked = arg_0.rate_limit\n return True\n elif arg_3 > 1.0:\n arg_0.last_rate_limit_checked = arg_0.rate_limit\n return False\n else:\n return False"} +{"_id": "doc_3706", "title": "", "text": "def Func(arg_0):\n \"\"\"Update rate limits data for the current token\"\"\"\n\n arg_1 = urijoin(arg_0.base_url, \"rate_limit\")\n try:\n # Turn off archiving when checking rates, because that would cause\n # archive key conflict (the same URLs giving different responses)\n arg_2 = arg_0.archive\n arg_0.archive = None\n arg_4 = super().fetch(arg_1)\n arg_0.archive = arg_2\n arg_0.update_rate_limit(arg_4)\n arg_0.last_rate_limit_checked = arg_0.rate_limit\n except requests.exceptions.HTTPError as error:\n if error.response.status_code == 404:\n logger.warning(\"Rate limit not initialized: %s\", error)\n else:\n raise error"} +{"_id": "doc_3707", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"Init metadata information.\n\n Metatada is composed by basic information needed to identify\n where archived data came from and how it can be retrieved\n and built into Perceval items.\n\n :param: origin: identifier of the repository\n :param: backend_name: name of the backend\n :param: backend_version: version of the backend\n :param: category: category of the items fetched\n :param: backend_params: dict representation of the fetch parameters\n\n raises ArchiveError: when an error occurs initializing the metadata\n \"\"\"\n arg_6 = datetime_to_utc(datetime_utcnow())\n arg_7 = arg_6.isoformat()\n arg_8 = pickle.dumps(arg_5, 0)\n\n arg_9 = (arg_1, arg_2, arg_3, arg_4,\n arg_8, arg_7,)\n\n try:\n arg_10 = arg_0._db.cursor()\n arg_11 = \"INSERT INTO \" + arg_0.METADATA_TABLE + \" \"\\\n \"(origin, backend_name, backend_version, \" \\\n \"category, backend_params, created_on) \" \\\n \"VALUES (?, ?, ?, ?, ?, ?)\"\n arg_10.execute(arg_11, arg_9)\n\n arg_0._db.commit()\n arg_10.close()\n except sqlite3.DatabaseError as e:\n arg_12 = \"metadata initialization error; cause: %s\" % str(e)\n raise ArchiveError(cause=arg_12)\n\n arg_0.origin = arg_1\n arg_0.backend_name = arg_2\n arg_0.backend_version = arg_3\n arg_0.category = arg_4\n arg_0.backend_params = arg_5\n arg_0.created_on = arg_6\n\n logger.debug(\"Metadata of archive %s initialized to %s\",\n arg_0.archive_path, arg_9)"} +{"_id": "doc_3708", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Store a raw item in this archive.\n\n The method will Func `data` content in this archive. The unique\n identifier for that item will be generated using the rest of the\n parameters.\n\n :param uri: request URI\n :param payload: request payload\n :param headers: request headers\n :param data: data to Func in this archive\n\n :raises ArchiveError: when an error occurs storing the given data\n \"\"\"\n arg_5 = arg_0.make_hashcode(arg_1, arg_2, arg_3)\n arg_6 = pickle.dumps(arg_2, 0)\n arg_7 = pickle.dumps(arg_3, 0)\n arg_8 = pickle.dumps(arg_4, 0)\n\n logger.debug(\"Archiving %s with %s %s %s in %s\",\n arg_5, arg_1, arg_2, arg_3, arg_0.archive_path)\n\n try:\n arg_9 = arg_0._db.cursor()\n arg_10 = \"INSERT INTO \" + arg_0.ARCHIVE_TABLE + \" (\" \\\n \"id, hashcode, uri, payload, headers, data) \" \\\n \"VALUES(?,?,?,?,?,?)\"\n arg_9.execute(arg_10, (None, arg_5, arg_1,\n arg_6, arg_7, arg_8))\n arg_0._db.commit()\n arg_9.close()\n except sqlite3.IntegrityError as e:\n arg_11 = \"data storage error; cause: duplicated entry %s\" % arg_5\n raise ArchiveError(cause=arg_11)\n except sqlite3.DatabaseError as e:\n arg_11 = \"data storage error; cause: %s\" % str(e)\n raise ArchiveError(cause=arg_11)\n\n logger.debug(\"%s data archived in %s\", arg_5, arg_0.archive_path)"} +{"_id": "doc_3709", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Retrieve a raw item from the archive.\n\n The method will return the `data` content corresponding to the\n hascode derived from the given parameters.\n\n :param uri: request URI\n :param payload: request payload\n :param headers: request headers\n\n :returns: the archived data\n\n :raises ArchiveError: when an error occurs retrieving data\n \"\"\"\n arg_4 = arg_0.make_hashcode(arg_1, arg_2, arg_3)\n\n logger.debug(\"Retrieving entry %s with %s %s %s in %s\",\n arg_4, arg_1, arg_2, arg_3, arg_0.archive_path)\n\n arg_0._db.row_factory = sqlite3.Row\n\n try:\n arg_7 = arg_0._db.cursor()\n arg_8 = \"SELECT data \" \\\n \"FROM \" + arg_0.ARCHIVE_TABLE + \" \" \\\n \"WHERE hashcode = ?\"\n arg_7.execute(arg_8, (arg_4,))\n arg_9 = arg_7.fetchone()\n arg_7.close()\n except sqlite3.DatabaseError as e:\n arg_10 = \"data retrieval error; cause: %s\" % str(e)\n raise ArchiveError(cause=arg_10)\n\n if arg_9:\n arg_11 = pickle.loads(arg_9['data'])\n else:\n arg_10 = \"entry %s not found in archive %s\" % (arg_4, arg_0.archive_path)\n raise ArchiveError(cause=arg_10)\n\n return arg_11"} +{"_id": "doc_3710", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create a brand new archive.\n\n Call this method to Func a new and empty archive. It will initialize\n the storage file in the path defined by `archive_path`.\n\n :param archive_path: absolute path where the archive file will be Funcd\n\n :raises ArchiveError: when the archive file already exists\n \"\"\"\n if os.path.exists(arg_1):\n arg_2 = \"archive %s already exists; remove it before creating a new one\"\n raise ArchiveError(cause=arg_2 % (arg_1))\n\n arg_3 = sqlite3.connect(arg_1)\n\n arg_4 = arg_3.cursor()\n arg_4.execute(arg_0.METADATA_CREATE_STMT)\n arg_4.execute(arg_0.ARCHIVE_CREATE_STMT)\n arg_3.commit()\n\n arg_4.close()\n arg_3.close()\n\n logger.debug(\"Creating archive %s\", arg_1)\n arg_5 = arg_0(arg_1)\n logger.debug(\"Achive %s was Funcd\", arg_1)\n\n return arg_5"} +{"_id": "doc_3711", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Generate a SHA1 based on the given arguments.\n\n Hashcodes created by this method will used as unique identifiers\n for the raw items or resources stored by this archive.\n\n :param uri: URI to the resource\n :param payload: payload of the request needed to fetch the resource\n :param headers: headers of the request needed to fetch the resource\n\n :returns: a SHA1 hash code\n \"\"\"\n def dict_to_json_str(arg_3):\n return json.dumps(arg_3, sort_keys=True)\n\n arg_4 = ':'.join([arg_0, dict_to_json_str(arg_1), dict_to_json_str(arg_2)])\n arg_5 = hashlib.sha1(arg_4.encode('utf-8'))\n return arg_5.hexdigest()"} +{"_id": "doc_3712", "title": "", "text": "def Func(arg_0):\n \"\"\"Check whether the archive is valid or not.\n\n This method will check if tables were created and if they\n contain valid data.\n \"\"\"\n arg_1 = arg_0._count_table_rows(arg_0.ARCHIVE_TABLE)\n arg_2 = arg_0._count_table_rows(arg_0.METADATA_TABLE)\n\n if arg_2 > 1:\n arg_3 = \"archive %s metadata corrupted; multiple metadata entries\" % (arg_0.archive_path)\n raise ArchiveError(cause=arg_3)\n if arg_2 == 0 and arg_1 > 0:\n arg_3 = \"archive %s metadata is empty but %s entries were achived\" % (arg_0.archive_path)\n raise ArchiveError(cause=arg_3)\n\n logger.debug(\"Integrity of archive %s OK; entries: %s rows, metadata: %s rows\",\n arg_0.archive_path, arg_1, arg_2)"} +{"_id": "doc_3713", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch the number of rows in a table\"\"\"\n\n arg_2 = arg_0._db.cursor()\n arg_3 = \"SELECT COUNT(*) FROM \" + arg_1\n\n try:\n arg_2.execute(arg_3)\n arg_4 = arg_2.fetchone()\n except sqlite3.DatabaseError as e:\n arg_5 = \"invalid archive file; cause: %s\" % str(e)\n raise ArchiveError(cause=arg_5)\n finally:\n arg_2.close()\n\n return arg_4[0]"} +{"_id": "doc_3714", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove an archive.\n\n This method deletes from the filesystem the archive stored\n in `archive_path`.\n\n :param archive_path: path to the archive\n\n :raises ArchiveManangerError: when an error occurs removing the\n archive\n \"\"\"\n try:\n Archive(arg_1)\n except ArchiveError as e:\n raise ArchiveManagerError(cause=str(e))\n\n os.remove(arg_1)"} +{"_id": "doc_3715", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Search archives.\n\n Get the archives which store data based on the given parameters.\n These parameters define which the origin was (`origin`), how data\n was fetched (`backend_name`) and data type ('category').\n Only those archives created on or after `archived_after` will be\n returned.\n\n The method returns a list with the file paths to those archives.\n The list is sorted by the date of creation of each archive.\n\n :param origin: data origin\n :param backend_name: backed used to fetch data\n :param category: type of the items fetched by the backend\n :param archived_after: get archives created on or after this date\n\n :returns: a list with archive names which match the Func criteria\n \"\"\"\n arg_5 = arg_0._Func_archives(arg_1, arg_2,\n arg_3, arg_4)\n arg_5 = [(fp, date) for fp, date in arg_5]\n arg_5 = [fp for fp, _ in sorted(arg_5, key=lambda x: x[1])]\n\n return arg_5"} +{"_id": "doc_3716", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Search archives using filters.\"\"\"\n\n for arg_5 in arg_0._search_files():\n try:\n arg_6 = Archive(arg_5)\n except ArchiveError:\n continue\n\n arg_7 = arg_6.origin == arg_1 and \\\n arg_6.backend_name == arg_2 and \\\n arg_6.category == arg_3 and \\\n arg_6.created_on >= arg_4\n\n if not arg_7:\n continue\n\n yield arg_5, arg_6.created_on"} +{"_id": "doc_3717", "title": "", "text": "def Func(arg_0):\n \"\"\"Check if filename is a compressed file supported by the tool.\n\n This function uses magic numbers (first four bytes) to determine\n the type of the file. Supported types are 'gz' and 'bz2'. When\n the filetype is not supported, the function returns `None`.\n\n :param filepath: path to the file\n\n :returns: 'gz' or 'bz2'; `None` if the type is not supported\n \"\"\"\n def compressed_file_type(arg_1):\n arg_2 = {\n b'\\x1f\\x8b\\x08': 'gz',\n b'\\x42\\x5a\\x68': 'bz2',\n b'PK\\x03\\x04': 'zip'\n }\n\n for arg_3, arg_4 in arg_2.items():\n if arg_1.startswith(arg_3):\n return arg_4\n\n return None\n\n with open(arg_0, mode='rb') as f:\n arg_5 = f.read(4)\n return compressed_file_type(arg_5)"} +{"_id": "doc_3718", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate a months range.\n\n Generator of months starting on `from_date` util `to_date`. Each\n returned item is a tuple of two datatime objects like in (month, month+1).\n Thus, the result will follow the sequence:\n ((fd, fd+1), (fd+1, fd+2), ..., (td-2, td-1), (td-1, td))\n\n :param from_date: generate dates starting on this month\n :param to_date: generate dates until this month\n\n :result: a generator of months range\n \"\"\"\n arg_2 = datetime.datetime(arg_0.year, arg_0.month, 1)\n arg_3 = datetime.datetime(arg_1.year, arg_1.month, 1)\n\n arg_4 = dateutil.rrule.rrule(freq=dateutil.rrule.MONTHLY,\n dtstart=arg_2, until=arg_3)\n arg_5 = [d for d in arg_4]\n\n arg_6 = 0\n for arg_7 in range(1, len(arg_5)):\n yield arg_5[arg_6], arg_5[arg_7]\n arg_6 = arg_7"} +{"_id": "doc_3719", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert an email message into a dictionary.\n\n This function transforms an `email.message.Message` object\n into a dictionary. Headers are stored as key:value pairs\n while the body of the message is stored inside `body` key.\n Body may have two other keys inside, 'plain', for plain body\n messages and 'html', for HTML encoded messages.\n\n The returned dictionary has the type `requests.structures.CaseInsensitiveDict`\n due to same headers with different case formats can appear in\n the same message.\n\n :param msg: email message of type `email.message.Message`\n\n :returns : dictionary of type `requests.structures.CaseInsensitiveDict`\n\n :raises ParseError: when an error occurs transforming the message\n to a dictionary\n \"\"\"\n def parse_headers(arg_0):\n arg_1 = {}\n\n for arg_2, arg_3 in arg_0.items():\n arg_4 = []\n\n for arg_5, arg_6 in email.header.decode_header(arg_3):\n if type(arg_5) == bytes:\n arg_6 = arg_6 if arg_6 else 'utf-8'\n try:\n arg_5 = arg_5.decode(arg_6, errors='surrogateescape')\n except (UnicodeError, LookupError):\n # Try again with a 7bit encoding\n arg_5 = arg_5.decode('ascii', errors='surrogateescape')\n arg_4.append(arg_5)\n\n arg_7 = ' '.join(arg_4)\n arg_1[arg_2] = arg_7 if arg_7 else None\n\n return arg_1\n\n def parse_payload(arg_0):\n arg_8 = {}\n\n if not arg_0.is_multipart():\n arg_9 = decode_payload(arg_0)\n arg_10 = arg_0.get_content_subtype()\n arg_8[arg_10] = [arg_9]\n else:\n # Include all the attached texts if it is multipart\n # Ignores binary parts by default\n for arg_11 in email.iterators.typed_subpart_iterator(arg_0):\n arg_9 = decode_payload(arg_11)\n arg_10 = arg_11.get_content_subtype()\n arg_8.setdefault(arg_10, []).append(arg_9)\n\n return {arg_12: '\\n'.join(arg_7) for arg_12, arg_7 in arg_8.items()}\n\n def decode_payload(arg_13):\n arg_6 = arg_13.get_content_charset('utf-8')\n arg_9 = arg_13.get_payload(decode=True)\n\n try:\n arg_9 = arg_9.decode(arg_6, errors='surrogateescape')\n except (UnicodeError, LookupError):\n # Try again with a 7bit encoding\n arg_9 = arg_9.decode('ascii', errors='surrogateescape')\n return arg_9\n\n # The function starts here\n arg_14 = requests.structures.CaseInsensitiveDict()\n\n if isinstance(arg_0, mailbox.mboxMessage):\n arg_14['unixfrom'] = arg_0.get_from()\n else:\n arg_14['unixfrom'] = None\n\n try:\n for arg_12, arg_7 in parse_headers(arg_0).items():\n arg_14[arg_12] = arg_7\n arg_14['body'] = parse_payload(arg_0)\n except UnicodeError as e:\n raise ParseError(cause=str(e))\n\n return arg_14"} +{"_id": "doc_3720", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove control and invalid characters from an xml stream.\n\n Looks for invalid characters and subtitutes them with whitespaces.\n This solution is based on these two posts: Olemis Lang's reponse\n on StackOverflow (http://stackoverflow.com/questions/1707890) and\n lawlesst's on GitHub Gist (https://gist.github.com/lawlesst/4110923),\n that is based on the previous answer.\n\n :param xml: XML stream\n\n :returns: a purged XML stream\n \"\"\"\n arg_1 = [(0x00, 0x08), (0x0B, 0x1F),\n (0x7F, 0x84), (0x86, 0x9F)]\n\n arg_2 = ['%s-%s' % (chr(low), chr(high))\n for (low, high) in arg_1\n if low < sys.maxunicode]\n\n arg_3 = re.compile('[%s]' % ''.join(arg_2))\n\n arg_4 = ''\n\n for arg_5 in arg_0:\n if arg_3.search(arg_5) is not None:\n arg_5 = ' '\n arg_4 += arg_5\n\n return arg_4"} +{"_id": "doc_3721", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a XML stream into a dictionary.\n\n This function transforms a xml stream into a dictionary. The\n attributes are stored as single elements while child nodes are\n stored into lists. The text node is stored using the special\n key '__text__'.\n\n This code is based on Winston Ewert's solution to this problem.\n See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict\n for more info. The code was licensed as cc by-sa 3.0.\n\n :param raw_xml: XML stream\n\n :returns: a dict with the XML data\n\n :raises ParseError: raised when an error occurs parsing the given\n XML stream\n \"\"\"\n def node_to_dict(arg_1):\n arg_2 = {}\n arg_2.update(arg_1.items())\n\n arg_3 = getattr(arg_1, 'text', None)\n\n if arg_3 is not None:\n arg_2['__text__'] = arg_3\n\n arg_4 = {}\n for arg_5 in arg_1:\n arg_4.setdefault(arg_5.tag, []).append(node_to_dict(arg_5))\n\n arg_2.update(arg_4.items())\n\n return arg_2\n\n arg_6 = remove_invalid_xml_chars(arg_0)\n\n try:\n arg_7 = xml.etree.ElementTree.fromstring(arg_6)\n except xml.etree.ElementTree.ParseError as e:\n arg_8 = \"XML stream %s\" % (str(e))\n raise ParseError(arg_8=arg_8)\n\n arg_2 = node_to_dict(arg_7)\n\n return arg_2"} +{"_id": "doc_3722", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Redmine issues JSON stream.\n\n The method parses a JSON stream and returns a list iterator.\n Each item is a dictionary that contains the issue parsed data.\n\n :param raw_json: JSON string to parse\n\n :returns: a generator of parsed issues\n \"\"\"\n arg_1 = json.loads(arg_0)\n\n arg_2 = arg_1['issues']\n for arg_3 in arg_2:\n yield arg_3"} +{"_id": "doc_3723", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the information of the given Func.\n\n :param Func_id: Func identifier\n \"\"\"\n arg_2 = urijoin(arg_0.RISSUES, str(arg_1) + arg_0.CJSON)\n\n arg_3 = {\n arg_0.PINCLUDE: ','.join([arg_0.CATTACHMENTS, arg_0.CCHANGESETS,\n arg_0.CCHILDREN, arg_0.CJOURNALS,\n arg_0.CRELATIONS, arg_0.CWATCHERS])\n }\n\n arg_4 = arg_0._call(arg_2, arg_3)\n\n return arg_4"} +{"_id": "doc_3724", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the information of the given Func.\n\n :param Func_id: Func identifier\n \"\"\"\n arg_2 = urijoin(arg_0.RUSERS, str(arg_1) + arg_0.CJSON)\n\n arg_3 = {}\n\n arg_4 = arg_0._call(arg_2, arg_3)\n\n return arg_4"} +{"_id": "doc_3725", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Call to get a resource.\n\n :param method: resource to get\n :param params: dict with the HTTP parameters needed to get\n the given resource\n \"\"\"\n arg_3 = arg_0.URL % {'base': arg_0.base_url, 'resource': arg_1}\n\n if arg_0.api_token:\n arg_2[arg_0.PKEY] = arg_0.api_token\n\n logger.debug(\"Redmine client requests: %s params: %s\",\n arg_1, str(arg_2))\n\n arg_5 = arg_0.fetch(arg_3, payload=arg_2, verify=False)\n\n return arg_5.text"} +{"_id": "doc_3726", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Fetch data from a Docker Hub repository.\n\n The method retrieves, from a repository stored in Docker Hub,\n its data which includes number of pulls, stars, description,\n among other data.\n\n :param category: the category of items to Func\n\n :returns: a generator of data\n \"\"\"\n arg_3 = {}\n arg_4 = super().Func(arg_1, **arg_3)\n\n return arg_4"} +{"_id": "doc_3727", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the Dockher Hub items\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n logger.info(\"Fetching data from '%s' repository of '%s' owner\",\n arg_0.repository, arg_0.owner)\n\n arg_3 = arg_0.client.repository(arg_0.owner, arg_0.repository)\n arg_4 = datetime_utcnow().timestamp()\n\n arg_5 = arg_0.parse_json(arg_3)\n arg_5['fetched_on'] = arg_4\n yield arg_5\n\n logger.info(\"Fetch process completed\")"} +{"_id": "doc_3728", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add extra information for custom fields.\n\n :param custom_fields: set of custom fields with the extra information\n :param fields: fields of the issue where to add the extra information\n\n :returns: an set of items with the extra information mapped\n \"\"\"\n def build_cf(arg_2, arg_3):\n return {'id': arg_2['id'], 'name': arg_2['name'], 'value': arg_3}\n\n return {\n arg_4: build_cf(arg_0[arg_4], arg_3)\n for arg_4, arg_3 in arg_1.items()\n if arg_4 in arg_0\n }"} +{"_id": "doc_3729", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Retrieve all the items from a given date.\n\n :param url: endpoint API url\n :param from_date: obtain items updated since this date\n :param expand_fields: if True, it includes the expand fields in the payload\n \"\"\"\n arg_4 = 0\n\n arg_5 = arg_0.fetch(arg_2, payload=arg_0.__build_payload(arg_4, arg_1, arg_3))\n arg_6 = arg_5.text\n\n arg_7 = arg_5.json()\n arg_8 = arg_7['total']\n arg_9 = arg_7['maxResults']\n\n arg_4 += min(arg_9, arg_8)\n arg_0.__log_status(arg_4, arg_8, arg_2)\n\n while arg_6:\n yield arg_6\n arg_6 = None\n\n if arg_7['startAt'] + arg_9 < arg_8:\n arg_5 = arg_0.fetch(arg_2, payload=arg_0.__build_payload(arg_4, arg_1, arg_3))\n\n arg_7 = arg_5.json()\n arg_4 += arg_9\n arg_6 = arg_5.text\n arg_0.__log_status(arg_4, arg_8, arg_2)"} +{"_id": "doc_3730", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve all the issues from a given date.\n\n :param from_date: obtain issues updated since this date\n \"\"\"\n arg_2 = urijoin(arg_0.base_url, arg_0.RESOURCE, arg_0.VERSION_API, 'search')\n arg_3 = arg_0.get_items(arg_1, arg_2)\n\n return arg_3"} +{"_id": "doc_3731", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve all the comments of a given issue.\n\n :param issue_id: ID of the issue\n \"\"\"\n arg_2 = urijoin(arg_0.base_url, arg_0.RESOURCE, arg_0.VERSION_API, arg_0.ISSUE, arg_1, arg_0.COMMENT)\n arg_3 = arg_0.get_items(DEFAULT_DATETIME, arg_2, expand_fields=False)\n\n return arg_3"} +{"_id": "doc_3732", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve all the fields available.\"\"\"\n\n arg_1 = urijoin(arg_0.base_url, arg_0.RESOURCE, arg_0.VERSION_API, 'field')\n arg_2 = arg_0.fetch(arg_1)\n\n return arg_2.text"} +{"_id": "doc_3733", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve all the questions from a given date.\n\n :param from_date: obtain questions updated since this date\n \"\"\"\n\n arg_2 = 1\n arg_3 = urijoin(arg_0.base_url, arg_0.VERSION_API, \"questions\")\n\n arg_4 = arg_0.fetch(arg_3, payload=arg_0.__build_payload(arg_2, arg_1))\n arg_5 = arg_4.text\n\n arg_6 = arg_4.json()\n arg_7 = arg_6['total']\n arg_8 = arg_6['page_size']\n\n arg_0.__log_status(arg_6['quota_remaining'],\n arg_6['quota_max'],\n arg_8,\n arg_7)\n\n while arg_5:\n yield arg_5\n arg_5 = None\n\n if arg_6['has_more']:\n arg_2 += 1\n\n arg_9 = arg_6.get('backoff', None)\n if arg_9:\n logger.debug(\"Expensive query. Wait %s secs to send a new request\",\n arg_9)\n time.sleep(float(arg_9))\n\n arg_4 = arg_0.fetch(arg_3, payload=arg_0.__build_payload(arg_2, arg_1))\n arg_6 = arg_4.json()\n arg_5 = arg_4.text\n arg_8 += arg_6['page_size']\n arg_0.__log_status(arg_6['quota_remaining'],\n arg_6['quota_max'],\n arg_8,\n arg_7)"} +{"_id": "doc_3734", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the StackExchange argument parser.\"\"\"\n\n arg_1 = BackendCommandArgumentParser(arg_0.BACKEND.CATEGORIES,\n from_date=True,\n token_auth=True,\n archive=True)\n\n # StackExchange options\n arg_2 = arg_1.parser.add_argument_group('StackExchange arguments')\n arg_2.add_argument('--site', dest='site',\n required=True,\n help=\"StackExchange site\")\n arg_2.add_argument('--tagged', dest='tagged',\n help=\"filter items by question Tag\")\n arg_2.add_argument('--max-questions', dest='max_questions',\n type=int, default=MAX_QUESTIONS,\n help=\"Maximum number of questions requested in the same query\")\n\n return arg_1"} +{"_id": "doc_3735", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the pages\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['from_date']\n arg_4 = arg_2['reviews_api']\n\n arg_5 = arg_0.client.get_version()\n logger.info(\"MediaWiki version: %s\", arg_5)\n\n if arg_4:\n if ((arg_5[0] == 1 and arg_5[1] >= 27) or arg_5[0] > 1):\n arg_6 = arg_0.__fetch_1_27(arg_3)\n else:\n logger.warning(\"Reviews API only available in MediaWiki >= 1.27\")\n logger.warning(\"Using the Pages API instead\")\n arg_6 = arg_0.__fetch_pre1_27(arg_3)\n else:\n arg_6 = arg_0.__fetch_pre1_27(arg_3)\n\n for arg_7 in arg_6:\n yield arg_7"} +{"_id": "doc_3736", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\"Get the max date in unixtime format from reviews.\"\"\"\n arg_2 = 0\n for arg_3 in arg_1:\n arg_4 = str_to_datetime(arg_3['timestamp'])\n arg_4 = datetime_to_utc(arg_4)\n if arg_4.timestamp() > arg_2:\n arg_2 = arg_4.timestamp()\n return arg_2"} +{"_id": "doc_3737", "title": "", "text": "def Func(arg_0, arg_1, arg_2=''):\n \"\"\"Retrieve recent pages from all namespaces starting from rccontinue.\"\"\"\n\n arg_1.sort()\n arg_3 = {\n \"action\": \"query\",\n \"list\": \"recentchanges\",\n \"rclimit\": arg_0.limit,\n \"rcnamespace\": \"|\".join(arg_1),\n \"rcprop\": \"title|timestamp|ids\",\n \"format\": \"json\"\n }\n if arg_2:\n arg_3['rccontinue'] = arg_2\n\n return arg_0.call(arg_3)"} +{"_id": "doc_3738", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4, arg_5=None):\n \"\"\"Fetch the messages the bot can read from the server.\n\n The method retrieves, from the Telegram server, the messages\n sent with an offset equal or greater than the given.\n\n A list of chats, groups and channels identifiers can be set\n using the parameter `chats`. When it is set, only those\n messages sent to any of these will be returned. An empty list\n will return no messages.\n\n :param category: the category of items to Func\n :param offset: obtain messages from this offset\n :param chats: list of chat names used to filter messages\n\n :returns: a generator of messages\n\n :raises ValueError: when `chats` is an empty list\n \"\"\"\n if not arg_3:\n arg_3 = arg_4\n\n arg_6 = {\"offset\": arg_3, \"chats\": arg_5}\n arg_7 = super().Func(arg_1, **arg_6)\n\n return arg_7"} +{"_id": "doc_3739", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check if a message can be filtered based in a list of chats.\n\n This method returns `True` when the message was sent to a chat\n of the given list. It also returns `True` when chats is `None`.\n\n :param message: Telegram message\n :param chats: list of chat, groups and channels identifiers\n\n :returns: `True` when the message can be filtered; otherwise,\n it returns `False`\n \"\"\"\n if arg_2 is None:\n return True\n\n arg_3 = arg_1['message']['chat']['id']\n\n return arg_3 in arg_2"} +{"_id": "doc_3740", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the articles\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['offset']\n\n logger.info(\"Fetching articles of '%s' group on '%s' offset %s\",\n arg_0.group, arg_0.host, str(arg_3))\n\n arg_4, arg_5, arg_6 = (0, 0, 0)\n\n arg_7, arg_7, arg_8, arg_9, arg_7 = arg_0.client.group(arg_0.group)\n\n if arg_3 <= arg_9:\n arg_8 = max(arg_8, arg_3)\n arg_7, arg_10 = arg_0.client.over((arg_8, arg_9))\n else:\n arg_10 = []\n\n arg_6 = len(arg_10)\n\n logger.debug(\"Total number of articles to fetch: %s\", arg_6)\n\n for arg_11, arg_7 in arg_10:\n try:\n arg_12 = arg_0.client.article(arg_11)\n arg_13 = arg_0.__parse_article(arg_12)\n except ParseError:\n logger.warning(\"Error parsing %s article; skipping\",\n arg_11)\n arg_5 += 1\n continue\n except nntplib.NNTPTemporaryError as e:\n logger.warning(\"Error '%s' fetching article %s; skipping\",\n e.response, arg_11)\n arg_5 += 1\n continue\n\n yield arg_13\n arg_4 += 1"} +{"_id": "doc_3741", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"NNTP Func.\n\n This method takes items, overriding `Func` decorator,\n to add extra information related to NNTP.\n\n :param item: an item fetched by a backend\n :param filter_classified: sets if classified fields were filtered\n \"\"\"\n arg_1 = super().Func(arg_1, arg_2=arg_2)\n arg_1['offset'] = arg_1['data']['offset']\n\n return arg_1"} +{"_id": "doc_3742", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a NNTP article.\n\n This method parses a NNTP article stored in a string object\n and returns an dictionary.\n\n :param raw_article: NNTP article string\n\n :returns: a dictionary of type `requests.structures.CaseInsensitiveDict`\n\n :raises ParseError: when an error is found parsing the article\n \"\"\"\n try:\n arg_1 = email.message_from_string(arg_0)\n arg_2 = message_to_dict(arg_1)\n except UnicodeEncodeError as e:\n raise ParseError(cause=str(e))\n return arg_2"} +{"_id": "doc_3743", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch NNTP data from the server or from the archive\n\n :param method: the name of the command to execute\n :param args: the arguments required by the command\n \"\"\"\n if arg_0.from_archive:\n arg_3 = arg_0.Func_from_archive(arg_1, arg_2)\n else:\n arg_3 = arg_0.Func_from_remote(arg_1, arg_2)\n\n return arg_3"} +{"_id": "doc_3744", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch article data\n\n :param article_id: id of the article to fetch\n \"\"\"\n arg_2 = arg_0.handler.article(arg_1)\n arg_3 = {\n 'number': arg_2[1].number,\n 'message_id': arg_2[1].message_id,\n 'lines': arg_2[1].lines\n }\n\n return arg_3"} +{"_id": "doc_3745", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch data from NNTP\n\n :param method: the name of the command to execute\n :param args: the arguments required by the command\n \"\"\"\n try:\n if arg_1 == NNTTPClient.GROUP:\n arg_3 = arg_0.handler.group(arg_2)\n elif arg_1 == NNTTPClient.OVER:\n arg_3 = arg_0.handler.over(arg_2)\n elif arg_1 == NNTTPClient.ARTICLE:\n arg_3 = arg_0._fetch_article(arg_2)\n except nntplib.NNTPTemporaryError as e:\n arg_3 = e\n raise e\n finally:\n if arg_0.archive:\n arg_0.archive.store(arg_1, arg_2, None, arg_3)\n\n return arg_3"} +{"_id": "doc_3746", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fetch data from the archive\n\n :param method: the name of the command to execute\n :param args: the arguments required by the command\n \"\"\"\n if not arg_0.archive:\n raise ArchiveError(cause=\"Archive not provided\")\n\n arg_3 = arg_0.archive.retrieve(arg_1, arg_2, None)\n\n if isinstance(arg_3, nntplib.NNTPTemporaryError):\n raise arg_3\n\n return arg_3"} +{"_id": "doc_3747", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a http session and initialize the retry object.\"\"\"\n\n arg_0.session = requests.Session()\n\n if arg_0.headers:\n arg_0.session.headers.update(arg_0.headers)\n\n arg_2 = urllib3.util.Retry(total=arg_0.max_retries,\n connect=arg_0.max_retries_on_connect,\n read=arg_0.max_retries_on_read,\n redirect=arg_0.max_retries_on_redirect,\n status=arg_0.max_retries_on_status,\n method_whitelist=arg_0.method_whitelist,\n status_forcelist=arg_0.status_forcelist,\n backoff_factor=arg_0.sleep_time,\n raise_on_redirect=arg_0.raise_on_redirect,\n raise_on_status=arg_0.raise_on_status,\n respect_retry_after_header=arg_0.respect_retry_after_header)\n\n arg_0.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=arg_2))\n arg_0.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=arg_2))"} +{"_id": "doc_3748", "title": "", "text": "def Func(arg_0):\n \"\"\"The fetching process sleeps until the rate limit is restored or\n raises a RateLimitError exception if sleep_for_rate flag is disabled.\n \"\"\"\n if arg_0.rate_limit is not None and arg_0.rate_limit <= arg_0.min_rate_to_sleep:\n arg_1 = arg_0.calculate_time_to_reset()\n\n if arg_1 < 0:\n logger.warning(\"Value of sleep for rate limit is negative, reset it to 0\")\n arg_1 = 0\n\n arg_2 = \"Rate limit exhausted.\"\n if arg_0.sleep_for_rate:\n logger.info(\"%s Waiting %i secs for rate limit reset.\", arg_2, arg_1)\n time.sleep(arg_1)\n else:\n raise RateLimitError(arg_2=arg_2, arg_1=arg_1)"} +{"_id": "doc_3749", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Supybot IRC stream.\n\n Returns an iterator of dicts. Each dicts contains information\n about the date, type, nick and body of a single log entry.\n\n :returns: iterator of Funcd lines\n\n :raises ParseError: when an invalid line is found parsing the given\n stream\n \"\"\"\n for arg_1 in arg_0.stream:\n arg_1 = arg_1.rstrip('\\n')\n arg_0.nline += 1\n\n if arg_0.SUPYBOT_EMPTY_REGEX.match(arg_1):\n continue\n\n arg_2, arg_3 = arg_0._Func_supybot_timestamp(arg_1)\n\n if arg_0.SUPYBOT_EMPTY_COMMENT_REGEX.match(arg_3):\n continue\n elif arg_0.SUPYBOT_EMPTY_COMMENT_ACTION_REGEX.match(arg_3):\n continue\n elif arg_0.SUPYBOT_EMPTY_BOT_REGEX.match(arg_3):\n continue\n\n arg_4, arg_5, arg_6 = arg_0._Func_supybot_msg(arg_3)\n arg_7 = arg_0._build_item(arg_2, arg_4, arg_5, arg_6)\n\n yield arg_7"} +{"_id": "doc_3750", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse timestamp section\"\"\"\n\n arg_2 = arg_0.SUPYBOT_TIMESTAMP_REGEX.match(arg_1)\n\n if not arg_2:\n arg_3 = \"date expected on line %s\" % (str(arg_0.nline))\n raise ParseError(cause=arg_3)\n\n arg_4 = arg_2.group('ts')\n arg_3 = arg_2.group('msg')\n\n return arg_4, arg_3"} +{"_id": "doc_3751", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse message section\"\"\"\n\n arg_2 = [(arg_0.SUPYBOT_COMMENT_REGEX, arg_0.TCOMMENT),\n (arg_0.SUPYBOT_COMMENT_ACTION_REGEX, arg_0.TCOMMENT),\n (arg_0.SUPYBOT_SERVER_REGEX, arg_0.TSERVER),\n (arg_0.SUPYBOT_BOT_REGEX, arg_0.TCOMMENT)]\n\n for arg_3 in arg_2:\n arg_4 = arg_3[0].match(arg_1)\n if not arg_4:\n continue\n return arg_3[1], arg_4.group('nick'), arg_4.group('body').strip()\n\n arg_5 = \"invalid message on line %s\" % (str(arg_0.nline))\n raise ParseError(cause=arg_5)"} +{"_id": "doc_3752", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the topics\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n\n arg_3 = arg_2['from_date']\n\n logger.info(\"Looking for topics at '%s', updated from '%s'\",\n arg_0.url, str(arg_3))\n\n arg_4 = 0\n\n arg_5 = arg_0.__fetch_and_parse_topics_ids(arg_3)\n\n for arg_6 in arg_5:\n arg_7 = arg_0.__fetch_and_parse_topic(arg_6)\n arg_4 += 1\n yield arg_7\n\n logger.info(\"Fetch process completed: %s topics fetched\",\n arg_4)"} +{"_id": "doc_3753", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse a topics page stream.\n\n The result of parsing process is a generator of tuples. Each\n tuple contains de identifier of the topic, the last date\n when it was updated and whether is pinned or not.\n\n :param raw_json: JSON stream to parse\n\n :returns: a generator of parsed bugs\n \"\"\"\n arg_2 = json.loads(arg_1)\n\n arg_3 = []\n\n for arg_4 in arg_2['topic_list']['topics']:\n arg_5 = arg_4['id']\n if arg_4['last_posted_at'] is None:\n logger.warning(\"Topic %s with last_posted_at null. Ignoring it.\", arg_4['title'])\n continue\n arg_6 = str_to_datetime(arg_4['last_posted_at'])\n arg_7 = arg_4['pinned']\n arg_3.append((arg_5, arg_6, arg_7))\n\n return arg_3"} +{"_id": "doc_3754", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve the Func whit `Func_id` identifier.\n\n :param Func_id: identifier of the Func to retrieve\n \"\"\"\n arg_2 = {\n arg_0.PKEY: arg_0.api_key\n }\n\n # http://example.com/Funcs/10.json\n arg_3 = arg_0._call(arg_0.POSTS, arg_1,\n arg_2=arg_2)\n\n return arg_3"} +{"_id": "doc_3755", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fetch the tasks\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items\n \"\"\"\n arg_3 = arg_2['from_date']\n\n logger.info(\"Fetching tasks of '%s' from %s\", arg_0.url, str(arg_3))\n\n arg_4 = 0\n\n for arg_5 in arg_0.__fetch_tasks(arg_3):\n yield arg_5\n arg_4 += 1\n\n logger.info(\"Fetch process completed: %s tasks fetched\", arg_4)"} +{"_id": "doc_3756", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Phabricator tasks JSON stream.\n\n The method parses a JSON stream and returns a list iterator.\n Each item is a dictionary that contains the task parsed data.\n\n :param raw_json: JSON string to parse\n\n :returns: a generator of parsed tasks\n \"\"\"\n arg_1 = json.loads(arg_0)\n\n arg_2 = arg_1['result']['data']\n for arg_3 in arg_2:\n yield arg_3"} +{"_id": "doc_3757", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Phabricator users JSON stream.\n\n The method parses a JSON stream and returns a list iterator.\n Each item is a dictionary that contais the user parsed data.\n\n :param raw_json: JSON string to parse\n\n :returns: a generator of parsed users\n \"\"\"\n arg_1 = json.loads(arg_0)\n\n arg_2 = arg_1['result']\n for arg_3 in arg_2:\n yield arg_3"} +{"_id": "doc_3758", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Retrieve Func.\n\n :param from_date: retrieve Func that where updated from that date;\n dates are converted epoch time.\n \"\"\"\n # Convert 'from_date' to epoch timestamp.\n # Zero value (1970-01-01 00:00:00) is not allowed for\n # 'modifiedStart' so it will be set to 1, by default.\n arg_3 = int(datetime_to_utc(arg_1).timestamp()) or 1\n\n arg_4 = {\n arg_0.PMODIFIED_START: arg_3\n }\n\n arg_5 = {\n arg_0. PPROJECTS: True\n }\n\n arg_6 = {\n arg_0.PCONSTRAINTS: arg_4,\n arg_0.PATTACHMENTS: arg_5,\n arg_0.PORDER: arg_0.VOUTDATED,\n }\n\n while True:\n arg_7 = arg_0._call(arg_0.MANIPHEST_TASKS, arg_6)\n yield arg_7\n arg_8 = json.loads(arg_7)\n arg_9 = arg_8['result']['cursor']['after']\n if not arg_9:\n break\n arg_6[arg_0.PAFTER] = arg_9"} +{"_id": "doc_3759", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Retrieve tasks Func.\n\n :param phids: list of tasks identifiers\n \"\"\"\n arg_2 = {\n arg_0.PIDS: arg_1\n }\n\n arg_3 = arg_0._call(arg_0.MANIPHEST_TRANSACTIONS, arg_2)\n\n return arg_3"} +{"_id": "doc_3760", "title": "", "text": "def Func(arg_0):\n \"\"\"Extracts the identifier from a Confluence item.\n\n This identifier will be the mix of two fields because a\n historical content does not have any unique identifier.\n In this case, 'id' and 'version' values are combined because\n it should not be possible to have two equal version numbers\n for the same content. The value to return will follow the\n pattern: #v (i.e 28979#v10).\n \"\"\"\n arg_1 = arg_0['id']\n arg_2 = arg_0['version']['number']\n\n return str(arg_1) + '#v' + str(arg_2)"} +{"_id": "doc_3761", "title": "", "text": "def Func(arg_0):\n ''' Parse the result property, extracting the value\n and unit of measure '''\n if arg_0.result is not None:\n arg_1 = testXMLAttribute(arg_0.result, \"uom\")\n arg_2 = testXMLValue(arg_0.result)\n try:\n arg_3 = float(arg_2)\n except:\n raise ValueError(\"Error parsing measurement value\")\n arg_0.result = Measurement(arg_3, arg_1)"} +{"_id": "doc_3762", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a capabilities url\n \"\"\"\n arg_2 = []\n if arg_1.find('?') != -1:\n arg_2 = cgi.parse_qsl(arg_1.split('?')[1])\n\n arg_3 = [x[0] for x in arg_2]\n\n if 'service' not in arg_3:\n arg_2.append(('service', 'WFS'))\n if 'request' not in arg_3:\n arg_2.append(('request', 'GetCapabilities'))\n if 'version' not in arg_3:\n arg_2.append(('version', arg_0.version))\n\n arg_4 = urlencode(tuple(arg_2))\n return arg_1.split('?')[0] + '?' + arg_4"} +{"_id": "doc_3763", "title": "", "text": "def Func(arg_0, arg_1, arg_2=30):\n \"\"\"Get and parse a WFS capabilities document, returning an\n instance of WFSCapabilitiesInfoset\n\n Parameters\n ----------\n url : string\n The URL to the WFS capabilities document.\n timeout : number\n A timeout value (in seconds) for the request.\n \"\"\"\n arg_3 = arg_0.capabilities_url(arg_1)\n arg_4 = openURL(arg_3, arg_2=arg_2,\n username=arg_0.username, password=arg_0.password)\n return etree.fromstring(arg_4.Func())"} +{"_id": "doc_3764", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse a WFS capabilities document, returning an\n instance of WFSCapabilitiesInfoset\n\n string should be an XML capabilities document\n \"\"\"\n if not isinstance(arg_1, str) and not isinstance(arg_1, bytes):\n raise ValueError(\"String must be of type string or bytes, not %s\" % type(arg_1))\n return etree.fromstring(arg_1)"} +{"_id": "doc_3765", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n helper function to build a WFS 3.0 URL\n\n @type path: string\n @param path: path of WFS URL\n\n @returns: fully constructed URL path\n \"\"\"\n\n arg_2 = arg_0.url\n if arg_0.url_query_string is not None:\n LOGGER.debug('base URL has a query string')\n arg_2 = urljoin(arg_2, arg_1)\n arg_2 = '?'.join([arg_2, arg_0.url_query_string])\n else:\n arg_2 = urljoin(arg_2, arg_1)\n\n LOGGER.debug('URL: {}'.format(arg_2))\n return arg_2"} +{"_id": "doc_3766", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Consruct fiona schema based on given elements\n\n :param list Element: list of elements\n :param dict nsmap: namespace map\n\n :return dict: schema\n \"\"\"\n\n arg_2 = {\n 'properties': {},\n 'geometry': None\n }\n\n arg_3 = None\n arg_4 = None\n\n # if nsmap is defined, use it\n if arg_1:\n for arg_5 in arg_1:\n if arg_1[arg_5] == XS_NAMESPACE:\n arg_3 = arg_5\n if arg_1[arg_5] in GML_NAMESPACES:\n arg_4 = arg_5\n # if no nsmap is defined, we have to guess\n else:\n arg_4 = 'gml'\n arg_3 = 'xsd'\n\n arg_6 = {\n 'PointPropertyType': 'Point',\n 'PolygonPropertyType': 'Polygon',\n 'LineStringPropertyType': 'LineString',\n 'MultiPointPropertyType': 'MultiPoint',\n 'MultiLineStringPropertyType': 'MultiLineString',\n 'MultiPolygonPropertyType': 'MultiPolygon',\n 'MultiGeometryPropertyType': 'MultiGeometry',\n 'GeometryPropertyType': 'GeometryCollection',\n 'SurfacePropertyType': '3D Polygon',\n 'MultiSurfacePropertyType': '3D MultiPolygon'\n }\n\n for arg_7 in arg_0:\n arg_8 = arg_7.attrib['type'].replace(arg_4 + ':', '')\n arg_9 = arg_7.attrib['name']\n\n if arg_8 in arg_6:\n arg_2['geometry'] = arg_6[arg_8]\n arg_2['geometry_column'] = arg_9\n else:\n arg_2['properties'][arg_9] = arg_8.replace(arg_3+':', '')\n\n if arg_2['properties'] or arg_2['geometry']:\n return arg_2\n else:\n return None"} +{"_id": "doc_3767", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get url for describefeaturetype request\n\n :return str: url\n \"\"\"\n\n arg_3 = []\n if arg_0.find('?') != -1:\n arg_3 = cgi.parse_qsl(arg_0.split('?')[1])\n\n arg_4 = [x[0] for x in arg_3]\n\n if 'service' not in arg_4:\n arg_3.append(('service', 'WFS'))\n if 'request' not in arg_4:\n arg_3.append(('request', 'DescribeFeatureType'))\n if 'version' not in arg_4:\n arg_3.append(('version', arg_1))\n\n arg_3.append(('typeName', arg_2))\n\n arg_5 = urlencode(tuple(arg_3))\n return arg_0.split('?')[0] + '?' + arg_5"} +{"_id": "doc_3768", "title": "", "text": "def Func():\n \"\"\"\n use ComplexDataInput with a reference to a document\n \"\"\"\n \n print(\"\\nFunc ...\")\n\n arg_0 = WebProcessingService('http://localhost:8094/wps', verbose=verbose)\n\n arg_1 = 'wordcount'\n arg_2 = ComplexDataInput(\"http://www.gutenberg.org/files/28885/28885-h/28885-h.htm\") # alice in wonderland\n arg_3 = [(\"text\", arg_2)]\n # list of tuple (output identifier, asReference attribute, mimeType attribute)\n # when asReference or mimeType is None - the wps service will use its default option\n arg_4 = [(\"output\",True,'some/mime-type')]\n\n arg_5 = arg_0.execute(arg_1, arg_3, arg_6=arg_4)\n monitorExecution(arg_5)\n\n # show status\n print('percent complete', arg_5.percentCompleted)\n print('status message', arg_5.statusMessage)\n\n for arg_6 in arg_5.processOutputs:\n print('identifier=%s, dataType=%s, data=%s, reference=%s' % (arg_6.identifier, arg_6.dataType, arg_6.data, arg_6.reference))"} +{"_id": "doc_3769", "title": "", "text": "def Func(arg_0):\n \"\"\"A URL that can be used to open the page.\n\n The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then\n appended to :py:attr:`base_url` unless the template results in an\n absolute URL.\n\n :return: URL that can be used to open the page.\n :rtype: str\n\n \"\"\"\n arg_1 = arg_0.base_url\n if arg_0.URL_TEMPLATE is not None:\n arg_1 = urlparse.urljoin(\n arg_0.base_url, arg_0.URL_TEMPLATE.format(**arg_0.url_kwargs)\n )\n\n if not arg_1:\n return None\n\n arg_2 = list(urlparse.urlparse(arg_1))\n arg_3 = urlparse.parse_qsl(arg_2[4])\n\n for arg_4, arg_5 in arg_0.url_kwargs.items():\n if arg_5 is None:\n continue\n if \"{{{}}}\".format(arg_4) not in str(arg_0.URL_TEMPLATE):\n for arg_6 in iterable(arg_5):\n arg_3.append((arg_4, arg_6))\n\n arg_2[4] = urlencode(arg_3)\n return urlparse.urlunparse(arg_2)"} +{"_id": "doc_3770", "title": "", "text": "def Func(arg_0):\n \"\"\"Open the page.\n\n Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`.\n\n :return: The current page object.\n :rtype: :py:class:`Page`\n :raises: UsageError\n\n \"\"\"\n if arg_0.seed_url:\n arg_0.driver_adapter.Func(arg_0.seed_url)\n arg_0.wait_for_page_to_load()\n return arg_0\n raise UsageError(\"Set a base URL or URL_TEMPLATE to Func this page.\")"} +{"_id": "doc_3771", "title": "", "text": "def Func(arg_0):\n \"\"\"Root element for the page region.\n\n Page regions should define a Func element either by passing this on\n instantiation or by defining a :py:attr:`_Func_locator` attribute. To\n reduce the chances of hitting :py:class:`~selenium.common.exceptions.StaleElementReferenceException`\n or similar you should use :py:attr:`_Func_locator`, as this is looked up every\n time the :py:attr:`Func` property is accessed.\n \"\"\"\n if arg_0._Func is None and arg_0._Func_locator is not None:\n return arg_0.page.find_element(*arg_0._Func_locator)\n return arg_0._Func"} +{"_id": "doc_3772", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Finds an element on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: An element.\n :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement`\n\n \"\"\"\n return arg_0.driver_adapter.Func(arg_1, arg_2, root=arg_0.root)"} +{"_id": "doc_3773", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Finds elements on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target elements.\n :type strategy: str\n :type locator: str\n :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList`\n :rtype: list\n\n \"\"\"\n return arg_0.driver_adapter.Func(arg_1, arg_2, root=arg_0.root)"} +{"_id": "doc_3774", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks whether an element is present.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool\n\n \"\"\"\n return arg_0.driver_adapter.Func(arg_1, arg_2, root=arg_0.root)"} +{"_id": "doc_3775", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks whether an element is displayed.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool\n\n \"\"\"\n return arg_0.driver_adapter.Func(\n arg_1, arg_2, root=arg_0.root\n )"} +{"_id": "doc_3776", "title": "", "text": "def Func(arg_0, arg_1, arg_2=[]):\n \"\"\" Register driver adapter used by page object\"\"\"\n for arg_3 in arg_2:\n classImplements(arg_3, arg_0)\n\n component.provideAdapter(factory=arg_1, adapts=[arg_0], provides=IDriver)"} +{"_id": "doc_3777", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of TV genres.\n\n Args:\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3778", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the cast and crew information for a specific movie id.\n\n Args:\n append_to_response: (optional) Comma separated, any movie method.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3779", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the plot Func for a specific movie id.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_1 = arg_0._get_id_path('Func')\n\n arg_2 = arg_0._GET(arg_1)\n arg_0._set_attrs_to_values(arg_2)\n return arg_2"} +{"_id": "doc_3780", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the release dates and certification for a specific movie id.\n\n Args:\n append_to_response: (optional) Comma separated, any movie method.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3781", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the Func for a specific movie id.\n\n Args:\n append_to_response: (optional) Comma separated, any movie method.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3782", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the similar movies for a specific movie id.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any movie method.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3783", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the Func for a particular movie id.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any movie method.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3784", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of Func movies. This list refreshes every day.\n The maximum number of items this list will include is 100.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3785", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of movies playing in theatres. This list refreshes\n every day. The maximum number of items this list will include is 100.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3786", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of Func movies on The Movie Database. This list\n refreshes every day.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3787", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of top rated movies. By default, this list will only\n include movies that have 10 or more votes. This list refreshes every\n day.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3788", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n This method lets users get the status of whether or not the movie has\n been rated or added to their favourite or watch lists. A valid session\n id is required.\n\n Args:\n session_id: see Authentication.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3789", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n This method lets users rate a movie. A valid session id or guest\n session id is required.\n\n Args:\n session_id: see Authentication.\n guest_session_id: see Authentication.\n value: Rating value.\n\n Returns:\n A dict representation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = {\n 'value': arg_1.pop('value', None),\n }\n\n arg_4 = arg_0._POST(arg_2, arg_1, arg_3)\n arg_0._set_attrs_to_values(arg_4)\n return arg_4"} +{"_id": "doc_3790", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the movie credits for a specific person id.\n\n Args:\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any person method.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3791", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the detailed Funcrmation about a particular credit record. This is \n currently only supported with the new credit model found in TV. These \n ids can be found from any TV credit response as well as the tv_credits \n and combined_credits methods for people.\n\n The episodes object returns a list of episodes and are generally going \n to be guest stars. The season array will return a list of season \n numbers. Season credits are credits that were marked with the \n \"add to every season\" option in the editing interface and are \n assumed to be \"season regulars\".\n\n Args:\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_credit_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3792", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Discover TV shows by different types of data like average rating, \n number of votes, genres, the network they aired on and air dates.\n\n Args:\n page: (optional) Minimum 1, maximum 1000.\n language: (optional) ISO 639-1 code.\n sort_by: (optional) Available options are 'vote_average.desc', \n 'vote_average.asc', 'first_air_date.desc', \n 'first_air_date.asc', 'popularity.desc', 'popularity.asc'\n first_air_year: (optional) Filter the results release dates to \n matches that include this value. Expected value \n is a year.\n vote_count.gte or vote_count_gte: (optional) Only include TV shows \n that are equal to,\n or have vote count higher than this value. Expected\n value is an integer.\n vote_average.gte or vote_average_gte: (optional) Only include TV \n shows that are equal \n to, or have a higher average rating than this \n value. Expected value is a float.\n with_genres: (optional) Only include TV shows with the specified \n genres. Expected value is an integer (the id of a \n genre). Multiple valued can be specified. Comma \n separated indicates an 'AND' query, while a \n pipe (|) separated value indicates an 'OR'.\n with_networks: (optional) Filter TV shows to include a specific \n network. Expected value is an integer (the id of a\n network). They can be comma separated to indicate an\n 'AND' query.\n first_air_date.gte or first_air_date_gte: (optional) The minimum \n release to include. \n Expected format is 'YYYY-MM-DD'.\n first_air_date.lte or first_air_date_lte: (optional) The maximum \n release to include. \n Expected format is 'YYYY-MM-DD'.\n \n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n # Periods are not allowed in keyword arguments but several API \n # arguments contain periods. See both usages in tests/test_discover.py.\n for arg_2 in arg_1:\n if '_lte' in arg_2:\n arg_1[arg_2.replace('_lte', '.lte')] = arg_1.pop(arg_2)\n if '_gte' in arg_2:\n arg_1[arg_2.replace('_gte', '.gte')] = arg_1.pop(arg_2)\n \n arg_4 = arg_0._get_path('Func')\n\n arg_5 = arg_0._GET(arg_4, arg_1)\n arg_0._set_attrs_to_values(arg_5)\n return arg_5"} +{"_id": "doc_3793", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the system wide configuration Func.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3794", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the Func of supported certifications for movies.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('movie_Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3795", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the basic Funcrmation for an account.\n\n Call this method first, before calling other Account methods.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n arg_1.update({'session_id': arg_0.session_id})\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0.id = arg_3['id']\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3796", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Generate a session id for user based authentication.\n\n A session id is required in order to use any of the write methods.\n\n Args:\n request_token: The token you generated for the user to approve.\n The token needs to be approved before being\n used here.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3797", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Generate a guest session id.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3798", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get a list of rated moview for a specific guest session id.\n\n Args:\n page: (optional) Minimum 1, maximum 1000.\n sort_by: (optional) 'created_at.asc' | 'created_at.desc'\n language: (optional) ISO 639-1 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_guest_session_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3799", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Delete movies from a list that the user created.\n\n A valid session id is required.\n\n Args:\n media_id: A movie id.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n arg_1.update({'session_id': arg_0.session_id})\n\n arg_3 = {\n 'media_id': arg_1.pop('media_id', None), \n }\n\n arg_4 = arg_0._POST(arg_2, arg_1, arg_3)\n arg_0._set_attrs_to_values(arg_4)\n return arg_4"} +{"_id": "doc_3800", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the Func TV series for a specific TV series id.\n\n Args:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any TV method.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_id_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3801", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the list of TV shows that are currently on the air. This query\n looks for any TV show that has an episode with an air date in the\n next 7 days.\n\n Args:\n page: (optional) Minimum 1, maximum 1000.\n language: (optional) ISO 639 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3802", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the primary Funcrmation about a TV season by its season number.\n\n Args:\n language: (optional) ISO 639 code.\n append_to_response: (optional) Comma separated, any TV series\n method.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_series_id_season_number_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3803", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the external ids that we have stored for a TV season by season\n number.\n\n Args:\n language: (optional) ISO 639 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_series_id_season_number_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3804", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the primary Funcrmation about a TV episode by combination of a\n season and episode number.\n\n Args:\n language: (optional) ISO 639 code.\n append_to_response: (optional) Comma separated, any TV series\n method.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_series_id_season_number_episode_number_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3805", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the TV episode Func by combination of season and episode number.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_series_id_season_number_episode_number_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3806", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get the external ids for a TV episode by combination of a season and\n episode number.\n\n Args:\n language: (optional) ISO 639 code.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_series_id_season_number_episode_number_path(\n 'Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3807", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Search for Funcs by title.\n\n Args:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n include_adult: (optional) Toggle the inclusion of adult titles. \n Expected value is True or False.\n year: (optional) Filter the results release dates to matches that \n include this value.\n primary_release_year: (optional) Filter the results so that only \n the primary release dates have this value.\n search_type: (optional) By default, the search type is 'phrase'. \n This is almost guaranteed the option you will want. \n It's a great all purpose search type and by far the \n most tuned for every day querying. For those wanting \n more of an \"autocomplete\" type search, set this \n option to 'ngram'.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3808", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Search for people by name.\n\n Args:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n include_adult: (optional) Toggle the inclusion of adult titles. \n Expected value is True or False.\n search_type: (optional) By default, the search type is 'phrase'. \n This is almost guaranteed the option you will want. \n It's a great all purpose search type and by far the \n most tuned for every day querying. For those wanting \n more of an \"autocomplete\" type search, set this \n option to 'ngram'.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3809", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Search for companies by name.\n\n Args:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3810", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Search the movie, tv show and person collections with a single query.\n\n Args:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n include_adult: (optional) Toggle the inclusion of adult titles.\n Expected value is True or False.\n\n Returns:\n A dict respresentation of the JSON returned from the API.\n \"\"\"\n arg_2 = arg_0._get_path('Func')\n\n arg_3 = arg_0._GET(arg_2, arg_1)\n arg_0._set_attrs_to_values(arg_3)\n return arg_3"} +{"_id": "doc_3811", "title": "", "text": "def Func(arg_0, arg_1):\n r'-?\\d+'\n arg_1.value = int(arg_1.value)\n arg_1.type = 'NUMBER'\n return arg_1"} +{"_id": "doc_3812", "title": "", "text": "def Func(arg_0, arg_1):\n r'\\}'\n arg_1.lexer.braces -= 1\n\n if arg_1.lexer.braces == 0:\n # End of the dollar brace, back to the rest of the string\n arg_1.lexer.begin('string')"} +{"_id": "doc_3813", "title": "", "text": "def Func(arg_0, arg_1):\n r'<<\\S+\\r?\\n'\n arg_1.lexer.is_tabbed = False\n arg_0._iniFunc(arg_1)\n arg_1.lexer.begin('heredoc')"} +{"_id": "doc_3814", "title": "", "text": "def Func():\n '''Initialize the parse table at install time'''\n\n # Generate the parsetab.dat file at setup time\n arg_0 = join(setup_dir, 'src', 'hcl', 'parsetab.dat')\n if exists(arg_0):\n os.unlink(arg_0)\n\n sys.path.insert(0, join(setup_dir, 'src'))\n\n import hcl\n from hcl.parser import HclParser\n\n arg_1 = HclParser()"} +{"_id": "doc_3815", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Add another row of data from a test suite\"\"\"\n arg_0.rows.Func(Row(arg_1, arg_2, arg_3))"} +{"_id": "doc_3816", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Return an instance of SuiteFile, ResourceFile, SuiteFolder\n\n Exactly which is returned depends on whether it's a file or\n folder, and if a file, the contents of the file. If there is a\n testcase table, this will return an instance of SuiteFile,\n otherwise it will return an instance of ResourceFile.\n '''\n\n if os.path.isdir(arg_0):\n return SuiteFolder(arg_0, arg_1)\n\n else:\n arg_2 = RobotFile(arg_0, arg_1)\n\n for arg_3 in arg_2.tables:\n if isinstance(arg_3, TestcaseTable):\n arg_2.__class__ = SuiteFile\n return arg_2\n\n arg_2.__class__ = ResourceFile\n return arg_2"} +{"_id": "doc_3817", "title": "", "text": "def Func(arg_0, arg_1):\n\n '''\n The general idea is to do a quick parse, creating a list of\n tables. Each table is nothing more than a list of rows, with\n each row being a list of cells. Additional parsing such as\n combining rows into statements is done on demand. This first\n pass is solely to read in the plain text and organize it by table.\n '''\n\n arg_0.tables = []\n arg_3 = DefaultTable(arg_0)\n\n with Utf8Reader(arg_1) as f:\n # N.B. the caller should be catching errors\n arg_0.raw_text = f.read()\n\n f._file.seek(0) # bleh; wish this wasn't a private property\n arg_5 = Matcher(re.IGNORECASE)\n for arg_6, arg_4 in enumerate(f.readlines()):\n arg_6 += 1; # start counting at 1 rather than zero\n\n # this mimics what the robot TSV reader does --\n # it replaces non-breaking spaces with regular spaces,\n # and then strips trailing whitespace\n arg_4 = arg_4.replace(u'\\xA0', ' ')\n arg_4 = arg_4.rstrip()\n\n # FIXME: I'm keeping line numbers but throwing away\n # where each cell starts. I should be preserving that\n # (though to be fair, robot is throwing that away so\n # I'll have to write my own splitter if I want to save\n # the character position)\n arg_7 = TxtReader.split_row(arg_4)\n arg_8 = r'^\\s*\\*+\\s*(.*?)[ *]*$'\n\n if arg_5(arg_8, arg_7[0]):\n # we've found the start of a new table\n arg_9 = arg_5.group(1)\n arg_3 = tableFactory(arg_0, arg_6, arg_9, arg_4)\n arg_0.tables.append(arg_3)\n else:\n arg_3.append(Row(arg_6, arg_4, arg_7))"} +{"_id": "doc_3818", "title": "", "text": "def Func(arg_0):\n '''Return 'suite' or 'resource' or None\n\n This will return 'suite' if a testcase table is found;\n It will return 'resource' if at least one robot table\n is found. If no tables are found it will return None\n '''\n\n arg_1 = [arg_2 for arg_2 in arg_0.tables if not isinstance(arg_2, UnknownTable)]\n if len(arg_1) == 0:\n return None\n\n for arg_2 in arg_0.tables:\n if isinstance(arg_2, TestcaseTable):\n return \"suite\"\n\n return \"resource\""} +{"_id": "doc_3819", "title": "", "text": "def Func(arg_0):\n '''Generator which returns all Func in the suite'''\n for arg_1 in arg_0.tables:\n if isinstance(arg_1, KeywordTable):\n for arg_2 in arg_1.Func:\n yield arg_2"} +{"_id": "doc_3820", "title": "", "text": "def Func(arg_0):\n '''Regurgitate the tables and rows'''\n for arg_1 in arg_0.tables:\n print(\"*** %s ***\" % arg_1.name)\n arg_1.Func()"} +{"_id": "doc_3821", "title": "", "text": "def Func(arg_0):\n '''Generator which returns all of the statements in all of the Func tables'''\n for arg_1 in arg_0.tables:\n if isinstance(arg_1, VariableTable):\n # FIXME: settings have statements, Func have rows WTF? :-(\n for arg_2 in arg_1.rows:\n if arg_2[0] != \"\":\n yield arg_2"} +{"_id": "doc_3822", "title": "", "text": "def Func(arg_0, arg_1):\n ''' \n The idea is, we recognize when we have a new testcase by \n checking the first cell. If it's not empty and not a comment, \n we have a new test case.\n\n '''\n if len(arg_1) == 0:\n # blank line. Should we throw it away, or Func a BlankLine object?\n return\n\n if (arg_1[0] != \"\" and \n (not arg_1[0].lstrip().startswith(\"#\"))):\n # we have a new child table\n arg_0._children.Func(arg_0._childClass(arg_0.parent, arg_1.linenumber, arg_1[0]))\n if len(arg_1.cells) > 1:\n # It appears the first row -- which contains the test case or\n # keyword name -- also has the first logical row of cells.\n # We'll create a Row, but we'll make the first cell empty instead\n # of leaving the name in it, since other code always assumes the\n # first cell is empty. \n #\n # To be honest, I'm not sure this is the Right Thing To Do, but \n # I'm too lazy to audit the code to see if it matters if we keep \n # the first cell intact. Sorry if this ends up causing you grief\n # some day...\n arg_1[0] = \"\"\n arg_0._children[-1].Func(arg_1.linenumber, arg_1.raw_text, arg_1.cells)\n\n elif len(arg_0._children) == 0:\n # something before the first test case\n # For now, Func it to self.comments; eventually we should flag\n # an error if it's NOT a comment\n arg_0.comments.Func(arg_1)\n\n else:\n # another row for the testcase\n if len(arg_1.cells) > 0:\n arg_0._children[-1].Func(arg_1.linenumber, arg_1.raw_text, arg_1.cells)"} +{"_id": "doc_3823", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse command line arguments, and Func rflint\"\"\"\n\n arg_0.args = arg_0.parse_and_process_args(arg_1)\n\n if arg_0.args.version:\n print(__version__)\n return 0\n \n if arg_0.args.rulefile:\n for arg_2 in arg_0.args.rulefile:\n arg_0._load_rule_file(arg_2)\n\n if arg_0.args.list:\n arg_0.list_rules()\n return 0\n \n if arg_0.args.describe:\n arg_0._describe_rules(arg_0.args.args)\n return 0\n\n arg_0.counts = { ERROR: 0, WARNING: 0, \"other\": 0}\n \n for arg_2 in arg_0.args.args:\n if not (os.path.exists(arg_2)):\n sys.stderr.write(\"rflint: %s: No such file or directory\\n\" % arg_2)\n continue\n if os.path.isdir(arg_2):\n arg_0._process_folder(arg_2)\n else:\n arg_0._process_file(arg_2)\n\n if arg_0.counts[ERROR] > 0:\n return arg_0.counts[ERROR] if arg_0.counts[ERROR] < 254 else 255\n\n return 0"} +{"_id": "doc_3824", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"Report a rule violation\"\"\"\n\n if arg_0._print_filename is not None:\n # we print the filename only once. self._print_filename\n # will get reset each time a new file is processed.\n print(\"+ \" + arg_0._print_filename)\n arg_0._print_filename = None\n\n if arg_3 in (WARNING, ERROR):\n arg_0.counts[arg_3] += 1\n else:\n arg_0.counts[\"other\"] += 1\n\n print(arg_0.args.format.format(arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4.encode('utf-8'),\n arg_5=arg_5, arg_6=arg_6))"} +{"_id": "doc_3825", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a list of rules of a given class\n \n Rules are treated as singletons - we only instantiate each\n rule once. \n \"\"\"\n\n arg_2 = []\n for arg_3 in arg_1.__subclasses__():\n arg_4 = arg_3.__name__.lower()\n if arg_4 not in arg_0._rules:\n arg_5 = arg_3(arg_0)\n arg_0._rules[arg_4] = arg_5\n arg_2.append(arg_0._rules[arg_4])\n return arg_2"} +{"_id": "doc_3826", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Import the given rule file\"\"\"\n if not (os.path.exists(arg_1)):\n sys.stderr.write(\"rflint: %s: No such file or directory\\n\" % arg_1)\n return\n try:\n arg_2 = os.path.basename(arg_1)\n (arg_3, arg_4) = os.path.splitext(arg_2)\n imp.load_source(arg_3, arg_1)\n except Exception as e:\n sys.stderr.write(\"rflint: %s: exception while loading: %s\\n\" % (arg_1, str(e)))"} +{"_id": "doc_3827", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handle the parsing of command line arguments.\"\"\"\n\n arg_2 = argparse.ArgumentParser(\n prog=\"python -m rflint\",\n description=\"A style checker for robot framework plain text files.\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog = (\n \"You can use 'all' in place of RULENAME to refer to all rules. \\n\"\n \"\\n\"\n \"For example: '--ignore all --warn DuplicateTestNames' will ignore all\\n\"\n \"rules except DuplicateTestNames.\\n\"\n \"\\n\"\n \"FORMAT is a string that performs a substitution on the following \\n\"\n \"patterns: {severity}, {linenumber}, {char}, {message}, and {rulename}.\\n\"\n \"\\n\"\n \"For example: --format 'line: {linenumber}: message: {message}'. \\n\"\n \"\\n\"\n \"ARGUMENTFILE is a filename with contents that match the format of \\n\"\n \"standard robot framework argument files\\n\"\n \"\\n\"\n \"If you give a directory as an argument, all files in the directory\\n\"\n \"with the suffix .txt, .robot or .tsv will be processed. With the \\n\"\n \"--recursive option, subfolders within the directory will also be\\n\"\n \"processed.\"\n )\n )\n arg_2.add_argument(\"--error\", \"-e\", metavar=\"RULENAME\", action=SetErrorAction,\n help=\"Assign a severity of ERROR to the given RULENAME\")\n arg_2.add_argument(\"--ignore\", \"-i\", metavar=\"RULENAME\", action=SetIgnoreAction,\n help=\"Ignore the given RULENAME\")\n arg_2.add_argument(\"--warning\", \"-w\", metavar=\"RULENAME\", action=SetWarningAction,\n help=\"Assign a severity of WARNING for the given RULENAME\")\n arg_2.add_argument(\"--list\", \"-l\", action=\"store_true\",\n help=\"show a list of known rules and exit\")\n arg_2.add_argument(\"--describe\", \"-d\", action=\"store_true\",\n help=\"describe the given rules\")\n arg_2.add_argument(\"--no-filenames\", action=\"store_false\", dest=\"print_filenames\", \n default=True,\n help=\"suppress the printing of filenames\")\n arg_2.add_argument(\"--format\", \"-f\", \n help=\"Define the output format\",\n default='{severity}: {linenumber}, {char}: {message} ({rulename})')\n arg_2.add_argument(\"--version\", action=\"store_true\", default=False,\n help=\"Display version number and exit\")\n arg_2.add_argument(\"--verbose\", \"-v\", action=\"store_true\", default=False,\n help=\"Give verbose output\")\n arg_2.add_argument(\"--configure\", \"-c\", action=ConfigureAction,\n help=\"Configure a rule\")\n arg_2.add_argument(\"--recursive\", \"-r\", action=\"store_true\", default=False,\n help=\"Recursively scan subfolders in a directory\")\n arg_2.add_argument(\"--rulefile\", \"-R\", action=RulefileAction,\n help=\"import additional rules from the given RULEFILE\")\n arg_2.add_argument(\"--argumentfile\", \"-A\", action=ArgfileLoader,\n help=\"read arguments from the given file\")\n arg_2.add_argument('args', metavar=\"file\", nargs=argparse.REMAINDER)\n\n # create a custom namespace, in which we can store a reference to\n # our rules. This lets the custom argument actions access the list\n # of rules\n arg_3 = argparse.Namespace()\n setattr(arg_3, \"app\", arg_0)\n arg_1 = arg_2.parse_args(arg_1, arg_3)\n\n arg_4.output_format = arg_1.format\n\n return arg_1"} +{"_id": "doc_3828", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a customized Draft4ExtendedValidator.\n\n :param spec_resolver: resolver for the spec\n :type resolver: :class:`jsonschema.RefResolver`\n \"\"\"\n arg_2 = arg_0._get_spec_validators(arg_1)\n return validators.extend(Draft4Validator, arg_2)"} +{"_id": "doc_3829", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"While yaml supports integer keys, these are not valid in\n json, and will break jsonschema. This method coerces all keys\n to strings.\n \"\"\"\n arg_3 = super(ExtendedSafeConstructor, arg_0).Func(\n arg_1, arg_2)\n\n return {\n (str(arg_4) if isinstance(arg_4, int) else arg_4): arg_3[arg_4]\n for arg_4 in arg_3\n }"} +{"_id": "doc_3830", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Open a file, read it and return its contents.\"\"\"\n with open(arg_0) as fh:\n return load(fh, arg_1)"} +{"_id": "doc_3831", "title": "", "text": "def Func(arg_0, arg_1=4):\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.'''\n \n arg_0 = [normalize(arg_3) for arg_3 in arg_0]\n arg_2 = {}\n for arg_3 in arg_0:\n arg_4 = count_ngrams(arg_3, arg_1)\n for (arg_5,arg_6) in list(arg_4.items()):\n arg_2[arg_5] = max(arg_2.get(arg_5,0), arg_6)\n return ([len(arg_3) for arg_3 in arg_0], arg_2)"} +{"_id": "doc_3832", "title": "", "text": "def Func(arg_0, arg_1=4):\n '''Takes a reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them. Also provides a set cause bleualign wants it'''\n arg_0 = normalize(arg_0)\n arg_2 = count_ngrams(arg_0, arg_1)\n return (len(arg_0), arg_2, frozenset(arg_2))"} +{"_id": "doc_3833", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = arg_3):\n \"\"\"Creates the sentence alignment of two texts.\n\n Texts can consist of several blocks. Block boundaries cannot be crossed by sentence \n alignment links. \n\n Each block consists of a list that contains the lengths (in characters) of the sentences\n in this block.\n \n @param source_blocks: The list of blocks in the source text.\n @param target_blocks: The list of blocks in the target text.\n @param params: the sentence alignment parameters.\n\n @returns: A list of sentence alignment lists\n \"\"\"\n if len(arg_0) != len(arg_1):\n raise ValueError(\"Source and target texts do not have the same number of blocks.\")\n \n return [align_blocks(arg_4, arg_5, arg_2) \n for arg_4, arg_5 in zip(arg_0, arg_1)]"} +{"_id": "doc_3834", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a path to model files\n model_path - string\n \"\"\"\n if not arg_0.startswith(\"/\") and not arg_0.startswith(\"models/\"):\n arg_0=\"/\" + arg_0\n if not arg_0.startswith(\"models\"):\n arg_0 = \"models\" + arg_0\n if not arg_0.endswith(\".p\"):\n arg_0+=\".p\"\n\n return arg_0"} +{"_id": "doc_3835", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Strips illegal characters from a string. Used to sanitize input essays.\n Removes all non-punctuation, digit, or letter characters.\n Returns sanitized string.\n string - string\n \"\"\"\n #Define replacement patterns\n arg_1 = r\"[^A-Za-z\\.\\?!,';:]\"\n arg_2 = r\"\\.\"\n arg_3 = r\",\"\n arg_4 = r\"\\?\"\n arg_5 = r\"!\"\n arg_6 = r\";\"\n arg_7 = r\":\"\n arg_8 = r\"\\s{1,}\"\n\n #Replace text. Ordering is very important!\n arg_9 = re.sub(arg_1, \" \", arg_0)\n arg_9 = re.sub(arg_2,\" .\", arg_9)\n arg_9 = re.sub(arg_3, \" ,\", arg_9)\n arg_9 = re.sub(arg_4, \" ?\", arg_9)\n arg_9 = re.sub(arg_5, \" !\", arg_9)\n arg_9 = re.sub(arg_6, \" ;\", arg_9)\n arg_9 = re.sub(arg_7, \" :\", arg_9)\n arg_9 = re.sub(arg_8, \" \", arg_9)\n\n return arg_9"} +{"_id": "doc_3836", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Uses aspell to spell correct an input string.\n Requires aspell to be installed and added to the path.\n Returns the spell corrected string if aspell is found, original string if not.\n string - string\n \"\"\"\n\n # Create a temp file so that aspell could be used\n # By default, tempfile will delete this file when the file handle is closed.\n arg_1 = tempfile.NamedTemporaryFile(mode='w')\n arg_1.write(arg_0)\n arg_1.flush()\n arg_2 = os.path.abspath(arg_1.name)\n try:\n arg_3 = os.popen(aspell_path + \" -a < \" + arg_2 + \" --sug-mode=ultra\")\n\n # Aspell returns a list of incorrect words with the above flags\n arg_4 = arg_3.readlines()\n arg_3.close()\n\n except Exception:\n log.exception(\"aspell process failed; could not spell check\")\n # Return original string if aspell fails\n return arg_0,0, arg_0\n\n finally:\n arg_1.close()\n\n arg_5 = list()\n arg_6 = list()\n for arg_7 in range(1, len(arg_4)):\n if(len(arg_4[arg_7]) > 10):\n #Reformat aspell output to make sense\n arg_8 = re.search(\":\", arg_4[arg_7])\n if hasattr(arg_8, \"start\"):\n arg_9 = arg_4[arg_7][2:arg_8.start()]\n arg_10 = re.search(\" \", arg_9)\n arg_11 = arg_9[0:arg_10.start()]\n\n arg_12 = arg_4[arg_7][arg_8.start() + 2:]\n arg_13 = re.search(\",\", arg_12)\n if hasattr(arg_13, \"start\"):\n arg_14 = arg_12[0:arg_13.start()]\n\n arg_5.append(arg_11)\n arg_6.append(arg_14)\n\n #Create markup based on spelling errors\n arg_15 = arg_0\n arg_16 = arg_0\n arg_17=[]\n for arg_7 in range(0, len(arg_5)):\n arg_18 = r\"\\b\" + arg_5[arg_7] + r\"\\b\"\n arg_19 = re.compile(arg_18)\n arg_15 = re.sub(arg_19, arg_6[arg_7], arg_15)\n if arg_5[arg_7] not in arg_17:\n arg_16=re.sub(arg_19,'' + arg_5[arg_7] + \"\", arg_16)\n arg_17.append(arg_5[arg_7])\n\n return arg_15,len(arg_5),arg_16"} +{"_id": "doc_3837", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Makes a list unique\n \"\"\"\n arg_1 = set()\n arg_2 = arg_1.add\n return [arg_3 for arg_3 in arg_0 if arg_3 not in arg_1 and not arg_2(arg_3)]"} +{"_id": "doc_3838", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generates a count of the number of times each unique item appears in a list\n \"\"\"\n arg_1 = arg_0.count\n arg_2 = [(item, arg_1(item)) for item in set(arg_0)]\n arg_2.sort()\n return arg_2"} +{"_id": "doc_3839", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given an input string, part of speech tags the string, then generates a list of\n ngrams that appear in the string.\n Used to define grammatically correct part of speech tag sequences.\n Returns a list of part of speech tag sequences.\n \"\"\"\n arg_1 = nltk.word_tokenize(arg_0)\n arg_2 = nltk.pos_tag(arg_1)\n arg_3 = [tag[1] for tag in arg_2]\n arg_4 = ngrams(arg_3, 2, 4)\n arg_5 = f7(arg_4)\n return arg_5"} +{"_id": "doc_3840", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates predictions on a novel data array using a fit classifier\n clf is a classifier that has already been fit\n arr is a data array identical in dimension to the array clf was trained on\n Returns the array of predictions.\n \"\"\"\n if(hasattr(arg_0, \"predict_proba\")):\n arg_2 = arg_0.predict(arg_1)\n # pred_score=preds.argmax(1)+min(x._score)\n else:\n arg_2 = arg_0.predict(arg_1)\n return arg_2"} +{"_id": "doc_3841", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Calculates the average value of a list of numbers\n Returns a float\n \"\"\"\n arg_1 = 0.0\n for arg_2 in arg_0:\n arg_1 += arg_2\n return arg_1 / len(arg_0)"} +{"_id": "doc_3842", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Calculates kappa correlation between rater_a and rater_b.\n Kappa measures how well 2 quantities vary together.\n rater_a is a list of rater a scores\n rater_b is a list of rater b scores\n min_rating is an optional argument describing the minimum rating possible on the data set\n max_rating is an optional argument describing the maximum rating possible on the data set\n Returns a float corresponding to the kappa correlation\n \"\"\"\n assert(len(arg_0) == len(arg_1))\n arg_0 = [int(a) for a in arg_0]\n arg_1 = [int(b) for b in arg_1]\n if arg_2 is None:\n arg_2 = min(arg_0 + arg_1)\n if arg_3 is None:\n arg_3 = max(arg_0 + arg_1)\n arg_4 = confusion_matrix(arg_0, arg_1,\n arg_2, arg_3)\n arg_5 = len(arg_4)\n arg_6 = float(len(arg_0))\n\n arg_7 = histogram(arg_0, arg_2, arg_3)\n arg_8 = histogram(arg_1, arg_2, arg_3)\n\n arg_9 = 0.0\n arg_10 = 0.0\n\n if(arg_5 > 1):\n for arg_11 in range(arg_5):\n for arg_12 in range(arg_5):\n arg_13 = (arg_7[arg_11] * arg_8[arg_12]\n / arg_6)\n arg_14 = pow(arg_11 - arg_12, 2.0) / pow(arg_5 - 1, 2.0)\n arg_9 += arg_14 * arg_4[arg_11][arg_12] / arg_6\n arg_10 += arg_14 * arg_13 / arg_6\n\n return 1.0 - arg_9 / arg_10\n else:\n return 1.0"} +{"_id": "doc_3843", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = 200):\n \"\"\"\n Initializes dictionaries from an essay set object\n Dictionaries must be initialized prior to using this to extract features\n e_set is an input essay set\n returns a confirmation of initialization\n \"\"\"\n if(hasattr(arg_1, '_type')):\n if(arg_1._type == \"train\"):\n #normal text (unstemmed) useful words/bigrams\n arg_3 = util_functions.get_vocab(arg_1._text, arg_1._score, arg_2 = arg_2)\n #stemmed and spell corrected vocab useful words/ngrams\n arg_4 = util_functions.get_vocab(arg_1._clean_stem_text, arg_1._score, arg_2 = arg_2)\n #dictionary trained on proper vocab\n arg_0._normal_dict = CountVectorizer(ngram_range=(1,2), vocabulary=arg_3)\n #dictionary trained on proper vocab\n arg_0._stem_dict = CountVectorizer(ngram_range=(1,2), vocabulary=arg_4)\n arg_0.dict_initialized = True\n #Average spelling errors in set. needed later for spelling detection\n arg_0._mean_spelling_errors=sum(arg_1._spelling_errors)/float(len(arg_1._spelling_errors))\n arg_0._spell_errors_per_character=sum(arg_1._spelling_errors)/float(sum([len(t) for t in arg_1._text]))\n #Gets the number and positions of grammar errors\n arg_10,arg_11=arg_0._get_grammar_errors(arg_1._pos,arg_1._text,arg_1._tokens)\n arg_0._grammar_errors_per_character=(sum(arg_10)/float(sum([len(t) for t in arg_1._text])))\n #Generate bag of words features\n arg_13=arg_0.gen_bag_feats(arg_1)\n #Sum of a row of bag of words features (topical words in an essay)\n arg_14=numpy.sum(arg_13[:,:])\n #Average index of how \"topical\" essays are\n arg_0._mean_f_prop=arg_14/float(sum([len(t) for t in arg_1._text]))\n arg_16 = \"ok\"\n else:\n raise util_functions.InputError(arg_1, \"needs to be an essay set of the train type.\")\n else:\n raise util_functions.InputError(arg_1, \"wrong input. need an essay set object\")\n return arg_16"} +{"_id": "doc_3844", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt\n Returns the set and caches the file\n \"\"\"\n if(os.path.isfile(NGRAM_PATH)):\n arg_1 = pickle.load(open(NGRAM_PATH, 'rb'))\n elif os.path.isfile(ESSAY_CORPUS_PATH):\n arg_2 = open(ESSAY_CORPUS_PATH).read()\n arg_2 = util_functions.sub_chars(arg_2)\n arg_1 = util_functions.regenerate_good_tokens(arg_2)\n pickle.dump(arg_1, open(NGRAM_PATH, 'wb'))\n else:\n #Hard coded list in case the needed files cannot be found\n arg_1=['NN PRP', 'NN PRP .', 'NN PRP . DT', 'PRP .', 'PRP . DT', 'PRP . DT NNP', '. DT',\n '. DT NNP', '. DT NNP NNP', 'DT NNP', 'DT NNP NNP', 'DT NNP NNP NNP', 'NNP NNP',\n 'NNP NNP NNP', 'NNP NNP NNP NNP', 'NNP NNP NNP .', 'NNP NNP .', 'NNP NNP . TO',\n 'NNP .', 'NNP . TO', 'NNP . TO NNP', '. TO', '. TO NNP', '. TO NNP NNP',\n 'TO NNP', 'TO NNP NNP']\n\n return set(arg_1)"} +{"_id": "doc_3845", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates length based features from an essay set\n Generally an internal function called by gen_feats\n Returns an array of length features\n e_set - EssaySet object\n \"\"\"\n arg_2 = arg_1._text\n arg_3 = [len(e) for e in arg_2]\n arg_4 = [max(len(t),1) for t in arg_1._tokens]\n arg_5 = [e.count(\",\") for e in arg_2]\n arg_6 = [e.count(\"'\") for e in arg_2]\n arg_7 = [e.count(\".\") + e.count(\"?\") + e.count(\"!\") for e in arg_2]\n arg_8 = [arg_3[m] / float(arg_4[m]) for m in xrange(0, len(arg_2))]\n\n arg_9,arg_10= arg_0._get_grammar_errors(arg_1._pos,arg_1._text,arg_1._tokens)\n arg_11 = [arg_9[m] / float(arg_4[m]) for m in xrange(0, len(arg_2))]\n\n arg_12 = numpy.array((\n arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9,\n arg_11)).transpose()\n\n return arg_12.copy()"} +{"_id": "doc_3846", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates bag of words features from an input essay set and trained FeatureExtractor\n Generally called by gen_feats\n Returns an array of features\n e_set - EssaySet object\n \"\"\"\n if(hasattr(arg_0, '_stem_dict')):\n arg_2 = arg_0._stem_dict.transform(arg_1._clean_stem_text)\n arg_3 = arg_0._normal_dict.transform(arg_1._text)\n arg_4 = numpy.concatenate((arg_2.toarray(), arg_3.toarray()), axis=1)\n else:\n raise util_functions.InputError(arg_0, \"Dictionaries must be initialized prior to generating bag features.\")\n return arg_4.copy()"} +{"_id": "doc_3847", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates bag of words, length, and prompt features from an essay set object\n returns an array of features\n e_set - EssaySet object\n \"\"\"\n arg_2 = arg_0.gen_bag_feats(arg_1)\n arg_3 = arg_0.gen_length_feats(arg_1)\n arg_4 = arg_0.gen_prompt_feats(arg_1)\n arg_5 = numpy.concatenate((arg_3, arg_4, arg_2), axis=1)\n arg_5 = arg_5.copy()\n\n return arg_5"} +{"_id": "doc_3848", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets two classifiers for each type of algorithm, and returns them. First for predicting, second for cv error.\n type - one of util_functions.AlgorithmTypes\n \"\"\"\n if arg_0 == util_functions.AlgorithmTypes.classification:\n arg_1 = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learn_rate=.05,\n max_depth=4, random_state=1,min_samples_leaf=3)\n arg_2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learn_rate=.05,\n max_depth=4, random_state=1,min_samples_leaf=3)\n else:\n arg_1 = sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learn_rate=.05,\n max_depth=4, random_state=1,min_samples_leaf=3)\n arg_2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learn_rate=.05,\n max_depth=4, random_state=1,min_samples_leaf=3)\n return arg_1, arg_2"} +{"_id": "doc_3849", "title": "", "text": "def Func(arg_0, arg_1=arg_2.AlgorithmTypes.regression):\n \"\"\"\n Extracts features and generates predictors based on a given predictor set\n predictor_set - a PredictorSet object that has been initialized with data\n type - one of util_functions.AlgorithmType\n \"\"\"\n if(arg_1 not in [arg_2.AlgorithmTypes.regression, arg_2.AlgorithmTypes.classification]):\n arg_1 = arg_2.AlgorithmTypes.regression\n\n arg_5 = predictor_extractor.PredictorExtractor()\n arg_5.initialize_dictionaries(arg_0)\n\n arg_6 = arg_5.gen_feats(arg_0)\n\n arg_7,arg_8 = get_algorithms(arg_1)\n arg_9=get_cv_error(arg_8,arg_6,arg_0._target)\n\n try:\n arg_10 = numpy.asarray(arg_0._target, dtype=numpy.int)\n arg_7.fit(arg_6, arg_10)\n except ValueError:\n log.exception(\"Not enough classes (0,1,etc) in sample.\")\n arg_10 = arg_0._target\n arg_10[0]=1\n arg_10[1]=0\n arg_7.fit(arg_6, arg_10)\n\n return arg_5, arg_7, arg_9"} +{"_id": "doc_3850", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3,arg_4=None):\n \"\"\"\n Function that creates essay set, extracts features, and writes out model\n See above functions for argument descriptions\n \"\"\"\n arg_5=create_essay_set(arg_0,arg_1,arg_2)\n arg_6,arg_7=extract_features_and_generate_model(arg_5,arg_4)\n dump_model_to_file(arg_2,arg_6,arg_7,arg_3)"} +{"_id": "doc_3851", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Initialize dictionaries with the textual inputs in the PredictorSet object\n p_set - PredictorSet object that has had data fed in\n \"\"\"\n arg_2 = False\n if not (hasattr(arg_1, '_type')):\n arg_3 = \"needs to be an essay set of the train type.\"\n log.exception(arg_3)\n raise util_functions.InputError(arg_1, arg_3)\n\n if not (arg_1._type == \"train\"):\n arg_3 = \"needs to be an essay set of the train type.\"\n log.exception(arg_3)\n raise util_functions.InputError(arg_1, arg_3)\n\n arg_4=len(arg_1._essay_sets)\n if arg_4==0:\n arg_4=1\n\n #Ensures that even with a large amount of input textual features, training time stays reasonable\n arg_5 = int(math.floor(200/arg_4))\n for arg_6 in xrange(0,len(arg_1._essay_sets)):\n arg_0._extractors.append(FeatureExtractor())\n arg_0._extractors[arg_6].Func(arg_1._essay_sets[arg_6], arg_5=arg_5)\n arg_0._initialized = True\n arg_2 = True\n return arg_2"} +{"_id": "doc_3852", "title": "", "text": "def Func(arg_0, arg_1=True):\n r\"\"\"Get descriptors in module.\n\n Parameters:\n mdl(module): module to search\n submodule(bool): search recursively\n\n Returns:\n Iterator[Descriptor]\n\n \"\"\"\n arg_2 = getattr(arg_0, \"__all__\", None)\n if arg_2 is None:\n arg_2 = dir(arg_0)\n\n arg_3 = (getattr(arg_0, name) for name in arg_2 if name[:1] != \"_\")\n\n if arg_1:\n for arg_4 in arg_3:\n if is_descriptor_class(arg_4):\n yield arg_4\n if isinstance(arg_4, ModuleType):\n for arg_4 in Func(arg_4, arg_1=True):\n yield arg_4\n\n else:\n for arg_4 in arg_3:\n if is_descriptor_class(arg_4):\n yield arg_4"} +{"_id": "doc_3853", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Register Descriptors from json descriptor objects.\n\n Parameters:\n obj(list or dict): descriptors to register\n\n \"\"\"\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n\n arg_0.register(Descriptor.from_json(arg_2) for arg_2 in arg_1)"} +{"_id": "doc_3854", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n r\"\"\"Register descriptors.\n\n Descriptor-like:\n * Descriptor instance: self\n * Descriptor class: use Descriptor.preset() method\n * module: use Descriptor-likes in module\n * Iterable: use Descriptor-likes in Iterable\n\n Parameters:\n desc(Descriptor-like): descriptors to Func\n version(str): version\n ignore_3D(bool): ignore 3D descriptors\n\n \"\"\"\n if arg_2 is None:\n arg_2 = __version__\n\n arg_2 = StrictVersion(arg_2)\n return arg_0._Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_3855", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.stdout, arg_5=\"\\n\"):\n \"\"\"Output message.\n\n Parameters:\n s(str): message to output\n file(file-like): output to\n end(str): end mark of message\n\n Return:\n None\n\n \"\"\"\n arg_6 = getattr(arg_0, \"_progress_bar\", None)\n if arg_6 is not None:\n arg_6.write(arg_1, arg_2=arg_2, arg_5=\"\\n\")\n return\n\n print(arg_1, arg_2=arg_2, arg_5=\"\\n\")"} +{"_id": "doc_3856", "title": "", "text": "def Func(arg_0, arg_1=False):\n r\"\"\"Check calculatable descriptor class or not.\n\n Returns:\n bool\n\n \"\"\"\n return (\n isinstance(arg_0, type)\n and issubclass(arg_0, Descriptor)\n and (True if arg_1 else not inspect.isabstract(arg_0))\n )"} +{"_id": "doc_3857", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"Calculate atomic surface area.\n\n :type i: int\n :param i: atom index\n\n :rtype: float\n \"\"\"\n arg_2 = 4.0 * np.pi * arg_0.rads2[arg_1]\n\n arg_3 = arg_0.neighbors.get(arg_1)\n\n if arg_3 is None:\n return arg_2\n\n arg_4 = arg_0.xyzs[arg_1, np.newaxis].T\n\n arg_5 = arg_0.sphere * arg_0.rads[arg_1] + arg_4\n arg_6 = arg_5.shape[1]\n\n for arg_7, arg_8 in arg_3:\n arg_9 = arg_0.xyzs[arg_7, np.newaxis].T\n\n arg_10 = (arg_5 - arg_9) ** 2\n arg_11 = (arg_10[0] + arg_10[1] + arg_10[2]) > arg_0.rads2[arg_7]\n arg_5 = np.compress(arg_11, arg_5, axis=1)\n\n return arg_2 * arg_5.shape[1] / arg_6"} +{"_id": "doc_3858", "title": "", "text": "def Func(arg_0, arg_1, arg_2=-1, arg_3=1.4, arg_4=4):\n r\"\"\"Construct SurfaceArea from rdkit Mol type.\n\n :type mol: rdkit.Chem.Mol\n :param mol: input molecule\n\n :type conformer: int\n :param conformer: conformer id\n\n :type solvent_radius: float\n :param solvent_radius: solvent radius\n\n :type level: int\n :param level: mesh level\n\n :rtype: SurfaceArea\n \"\"\"\n arg_5 = atoms_to_numpy(lambda a: vdw_radii[a.GetAtomicNum()] + arg_3, arg_1)\n\n arg_6 = arg_1.GetConformer(arg_2)\n\n arg_7 = np.array([list(arg_6.GetAtomPosition(i)) for i in range(arg_1.GetNumAtoms())])\n\n return arg_0(arg_5, arg_7, arg_4)"} +{"_id": "doc_3859", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create Descriptor instance from json dict.\n\n Parameters:\n obj(dict): descriptor dict\n\n Returns:\n Descriptor: descriptor\n\n \"\"\"\n arg_2 = getattr(arg_0, \"_all_descriptors\", None)\n\n if arg_2 is None:\n from mordred import descriptors\n arg_2 = {\n cls.__name__: cls\n for cls in get_descriptors_in_module(descriptors)\n }\n arg_2[arg_3.__name__] = arg_3\n arg_0._all_descriptors = arg_2\n\n return _from_json(arg_1, arg_2)"} +{"_id": "doc_3860", "title": "", "text": "def Func(arg_0):\n r\"\"\"Delete missing value.\n\n Returns:\n Result\n\n \"\"\"\n arg_1 = []\n arg_2 = []\n for arg_3, arg_4 in arg_0.items():\n if not is_missing(arg_4):\n arg_1.append(arg_4)\n arg_2.append(arg_3)\n\n return arg_0.__class__(arg_0.mol, arg_1, arg_2)"} +{"_id": "doc_3861", "title": "", "text": "def Func(arg_0):\n r\"\"\"Get Func.\n\n Returns:\n Iterable[(Descriptor, value)]\n\n \"\"\"\n return ((arg_1, arg_2) for arg_1, arg_2 in zip(arg_0.keys(), arg_0.values()))"} +{"_id": "doc_3862", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Reports an OrderError. Error message will state that\n first_tag came before second_tag.\n \"\"\"\n arg_0.error = True\n arg_5 = ERROR_MESSAGES['A_BEFORE_B'].format(arg_1, arg_2, arg_3)\n arg_0.logger.log(arg_5)"} +{"_id": "doc_3863", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write the fields of a single review to out.\n \"\"\"\n arg_1.write('# Review\\n\\n')\n write_value('Reviewer', arg_0.reviewer, arg_1)\n write_value('ReviewDate', arg_0.review_date_iso_format, arg_1)\n if arg_0.has_comment:\n write_text_value('ReviewComment', arg_0.comment, arg_1)"} +{"_id": "doc_3864", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write the fields of a single annotation to out.\n \"\"\"\n arg_1.write('# Annotation\\n\\n')\n write_value('Annotator', arg_0.annotator, arg_1)\n write_value('AnnotationDate', arg_0.annotation_date_iso_format, arg_1)\n if arg_0.has_comment:\n write_text_value('AnnotationComment', arg_0.comment, arg_1)\n write_value('AnnotationType', arg_0.annotation_type, arg_1)\n write_value('SPDXREF', arg_0.spdx_id, arg_1)"} +{"_id": "doc_3865", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write a file fields to out.\n \"\"\"\n arg_1.write('# File\\n\\n')\n write_value('FileName', arg_0.name, arg_1)\n write_value('SPDXID', arg_0.spdx_id, arg_1)\n if arg_0.has_optional_field('type'):\n Func_type(arg_0.type, arg_1)\n write_value('FileChecksum', arg_0.chk_sum.to_tv(), arg_1)\n if isinstance(arg_0.conc_lics, (document.LicenseConjunction, document.LicenseDisjunction)):\n write_value('LicenseConcluded', u'({0})'.format(arg_0.conc_lics), arg_1)\n else:\n write_value('LicenseConcluded', arg_0.conc_lics, arg_1)\n\n # write sorted list\n for arg_2 in sorted(arg_0.licenses_in_file):\n write_value('LicenseInfoInFile', arg_2, arg_1)\n\n if isinstance(arg_0.copyright, six.string_types):\n write_text_value('FileCopyrightText', arg_0.copyright, arg_1)\n else:\n write_value('FileCopyrightText', arg_0.copyright, arg_1)\n\n if arg_0.has_optional_field('license_comment'):\n write_text_value('LicenseComments', arg_0.license_comment, arg_1)\n\n if arg_0.has_optional_field('comment'):\n write_text_value('FileComment', arg_0.comment, arg_1)\n\n if arg_0.has_optional_field('notice'):\n write_text_value('FileNotice', arg_0.notice, arg_1)\n\n for arg_3 in sorted(arg_0.contributors):\n write_value('FileContributor', arg_3, arg_1)\n\n for arg_4 in sorted(arg_0.dependencies):\n write_value('FileDependency', arg_4, arg_1)\n\n arg_5 = arg_0.artifact_of_project_name\n arg_6 = arg_0.artifact_of_project_home\n arg_7 = arg_0.artifact_of_project_uri\n\n for arg_8, arg_9, arg_10 in sorted(zip_longest(arg_5, arg_6, arg_7)):\n write_value('ArtifactOfProjectName', arg_8, arg_1)\n if arg_9 is not None:\n write_value('ArtifactOfProjectHomePage', arg_9, arg_1)\n if arg_10 is not None:\n write_value('ArtifactOfProjectURI', arg_10, arg_1)"} +{"_id": "doc_3866", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write a package fields to out.\n \"\"\"\n arg_1.write('# Package\\n\\n')\n write_value('PackageName', arg_0.name, arg_1)\n if arg_0.has_optional_field('version'):\n write_value('PackageVersion', arg_0.version, arg_1)\n write_value('PackageDownloadLocation', arg_0.download_location, arg_1)\n\n if arg_0.has_optional_field('summary'):\n write_text_value('PackageSummary', arg_0.summary, arg_1)\n\n if arg_0.has_optional_field('source_info'):\n write_text_value('PackageSourceInfo', arg_0.source_info, arg_1)\n\n if arg_0.has_optional_field('file_name'):\n write_value('PackageFileName', arg_0.file_name, arg_1)\n\n if arg_0.has_optional_field('supplier'):\n write_value('PackageSupplier', arg_0.supplier, arg_1)\n\n if arg_0.has_optional_field('originator'):\n write_value('PackageOriginator', arg_0.originator, arg_1)\n\n if arg_0.has_optional_field('check_sum'):\n write_value('PackageChecksum', arg_0.check_sum.to_tv(), arg_1)\n\n write_value('PackageVerificationCode', format_verif_code(arg_0), arg_1)\n\n if arg_0.has_optional_field('description'):\n write_text_value('PackageDescription', arg_0.description, arg_1)\n\n if isinstance(arg_0.license_declared, (document.LicenseConjunction,\n document.LicenseDisjunction)):\n write_value('PackageLicenseDeclared', u'({0})'.format(arg_0.license_declared), arg_1)\n else:\n write_value('PackageLicenseDeclared', arg_0.license_declared, arg_1)\n\n if isinstance(arg_0.conc_lics, (document.LicenseConjunction,\n document.LicenseDisjunction)):\n write_value('PackageLicenseConcluded', u'({0})'.format(arg_0.conc_lics), arg_1)\n else:\n write_value('PackageLicenseConcluded', arg_0.conc_lics, arg_1)\n\n # Write sorted list of licenses.\n for arg_2 in sorted(arg_0.licenses_from_files):\n write_value('PackageLicenseInfoFromFiles', arg_2, arg_1)\n\n if arg_0.has_optional_field('license_comment'):\n write_text_value('PackageLicenseComments', arg_0.license_comment, arg_1)\n\n # cr_text is either free form text or NONE or NOASSERTION.\n if isinstance(arg_0.cr_text, six.string_types):\n write_text_value('PackageCopyrightText', arg_0.cr_text, arg_1)\n else:\n write_value('PackageCopyrightText', arg_0.cr_text, arg_1)\n\n if arg_0.has_optional_field('homepage'):\n write_value('PackageHomePage', arg_0.homepage, arg_1)\n\n # Write sorted files.\n for arg_3 in sorted(arg_0.files):\n write_separators(arg_1)\n write_file(arg_3, arg_1)"} +{"_id": "doc_3867", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Write an SPDX tag value document.\n - document - spdx.document instance.\n - out - file like object that will be written to.\n Optionally `validate` the document before writing and raise\n InvalidDocumentError if document.validate returns False.\n \"\"\"\n arg_3 = []\n arg_3 = arg_0.validate(arg_3)\n if arg_2 and arg_3:\n raise InvalidDocumentError(arg_3)\n\n # Write out document information\n arg_1.write('# Document Information\\n\\n')\n write_value('SPDXVersion', str(arg_0.version), arg_1)\n write_value('DataLicense', arg_0.data_license.identifier, arg_1)\n write_value('DocumentName', arg_0.name, arg_1)\n write_value('SPDXID', 'SPDXRef-DOCUMENT', arg_1)\n write_value('DocumentNamespace', arg_0.namespace, arg_1)\n if arg_0.has_comment:\n write_text_value('DocumentComment', arg_0.comment, arg_1)\n for arg_4 in arg_0.ext_document_references:\n arg_5 = ' '.join([arg_4.external_document_id,\n arg_4.spdx_document_uri,\n arg_4.check_sum.identifier + ':' +\n arg_4.check_sum.value])\n write_value('ExternalDocumentRef', arg_5, arg_1)\n write_separators(arg_1)\n # Write out creation info\n write_creation_info(arg_0.creation_info, arg_1)\n write_separators(arg_1)\n\n # Writesorted reviews\n for arg_6 in sorted(arg_0.reviews):\n write_review(arg_6, arg_1)\n write_separators(arg_1)\n\n #Write sorted annotations\n for arg_7 in sorted(arg_0.annotations):\n write_annotation(arg_7, arg_1)\n write_separators(arg_1)\n\n # Write out package info\n write_package(arg_0.package, arg_1)\n write_separators(arg_1)\n\n arg_1.write('# Extracted Licenses\\n\\n')\n for arg_8 in sorted(arg_0.extracted_licenses):\n write_extracted_licenses(arg_8, arg_1)\n write_separators(arg_1)"} +{"_id": "doc_3868", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return an spdx.checksum.Algorithm instance representing the SHA1\n checksum or None if does not match CHECKSUM_RE.\n \"\"\"\n # More constrained regex at lexer level\n arg_1 = re.compile('SHA1:\\s*([\\S]+)', re.UNICODE)\n arg_2 = arg_1.match(arg_0)\n if arg_2:\n return checksum.Algorithm(identifier='SHA1', arg_0=arg_2.group(1))\n else:\n return None"} +{"_id": "doc_3869", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Set the document version.\n Raise SPDXValueError if malformed value, CardinalityError\n if already defined\n \"\"\"\n if not arg_0.doc_version_set:\n arg_0.doc_version_set = True\n arg_4 = arg_0.VERS_STR_REGEX.match(arg_2)\n if arg_4 is None:\n raise SPDXValueError('Document::Version')\n else:\n arg_1.version = arg_5.Version(major=int(arg_4.group(1)),\n minor=int(arg_4.group(2)))\n return True\n else:\n raise CardinalityError('Document::Version')"} +{"_id": "doc_3870", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the document name.\n Raises CardinalityError if already defined.\n \"\"\"\n if not arg_0.doc_name_set:\n arg_1.name = arg_2\n arg_0.doc_name_set = True\n return True\n else:\n raise CardinalityError('Document::Name')"} +{"_id": "doc_3871", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the document SPDX Identifier.\n Raises value error if malformed value, CardinalityError\n if already defined.\n \"\"\"\n if not arg_0.doc_spdx_id_set:\n if arg_2 == 'SPDXRef-DOCUMENT':\n arg_1.spdx_id = arg_2\n arg_0.doc_spdx_id_set = True\n return True\n else:\n raise SPDXValueError('Document::SPDXID')\n else:\n raise CardinalityError('Document::SPDXID')"} +{"_id": "doc_3872", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets document comment, Raises CardinalityError if\n comment already set.\n Raises SPDXValueError if comment is not free form text.\n \"\"\"\n if not arg_0.doc_comment_set:\n arg_0.doc_comment_set = True\n if validations.validate_doc_comment(arg_2):\n arg_1.comment = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')"} +{"_id": "doc_3873", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the document namespace.\n Raise SPDXValueError if malformed value, CardinalityError\n if already defined.\n \"\"\"\n if not arg_0.doc_namespace_set:\n arg_0.doc_namespace_set = True\n if validations.validate_doc_namespace(arg_2):\n arg_1.namespace = arg_2\n return True\n else:\n raise SPDXValueError('Document::Namespace')\n else:\n raise CardinalityError('Document::Comment')"} +{"_id": "doc_3874", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sets the `spdx_document_uri` attribute of the `ExternalDocumentRef`\n object.\n \"\"\"\n if validations.validate_doc_namespace(arg_2):\n arg_1.ext_document_references[-1].spdx_document_uri = arg_2\n else:\n raise SPDXValueError('Document::ExternalDocumentRef')"} +{"_id": "doc_3875", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Builds a tool object out of a string representation.\n Returns built tool. Raises SPDXValueError if failed to extract\n tool name or name is malformed\n \"\"\"\n arg_3 = arg_0.tool_re.match(arg_2)\n if arg_3 and validations.validate_tool_name(arg_3.group(arg_0.TOOL_NAME_GROUP)):\n arg_4 = arg_3.group(arg_0.TOOL_NAME_GROUP)\n return creationinfo.Tool(arg_4)\n else:\n raise SPDXValueError('Failed to extract tool name')"} +{"_id": "doc_3876", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a creator to the document's creation info.\n Returns true if creator is valid.\n Creator must be built by an EntityBuilder.\n Raises SPDXValueError if not a creator type.\n \"\"\"\n if validations.validate_creator(arg_2):\n arg_1.creation_info.Func(arg_2)\n return True\n else:\n raise SPDXValueError('CreationInfo::Creator')"} +{"_id": "doc_3877", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets created date, Raises CardinalityError if\n created date already set.\n Raises SPDXValueError if created is not a date.\n \"\"\"\n if not arg_0.created_date_set:\n arg_0.created_date_set = True\n arg_4 = utils.datetime_from_iso_format(arg_2)\n if arg_4 is not None:\n arg_1.creation_info.created = arg_4\n return True\n else:\n raise SPDXValueError('CreationInfo::Date')\n else:\n raise CardinalityError('CreationInfo::Created')"} +{"_id": "doc_3878", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the license list version, Raises CardinalityError if\n already set, SPDXValueError if incorrect value.\n \"\"\"\n if not arg_0.lics_list_ver_set:\n arg_0.lics_list_ver_set = True\n arg_4 = version.Version.from_str(arg_2)\n if arg_4 is not None:\n arg_1.creation_info.license_list_version = arg_4\n return True\n else:\n raise SPDXValueError('CreationInfo::LicenseListVersion')\n else:\n raise CardinalityError('CreationInfo::LicenseListVersion')"} +{"_id": "doc_3879", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Resets builder state to allow building new creation info.\"\"\"\n # FIXME: this state does not make sense\n arg_0.created_date_set = False\n arg_0.creation_comment_set = False\n arg_0.lics_list_ver_set = False"} +{"_id": "doc_3880", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a reviewer to the SPDX Document.\n Reviwer is an entity created by an EntityBuilder.\n Raises SPDXValueError if not a valid reviewer type.\n \"\"\"\n # Each reviewer marks the start of a new review object.\n # FIXME: this state does not make sense\n arg_0.reset_reviews()\n if validations.validate_reviewer(arg_2):\n arg_1.add_review(review.Review(arg_2=arg_2))\n return True\n else:\n raise SPDXValueError('Review::Reviewer')"} +{"_id": "doc_3881", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the review date. Raises CardinalityError if\n already set. OrderError if no reviewer defined before.\n Raises SPDXValueError if invalid reviewed value.\n \"\"\"\n if len(arg_1.reviews) != 0:\n if not arg_0.review_date_set:\n arg_0.review_date_set = True\n arg_4 = utils.datetime_from_iso_format(arg_2)\n if arg_4 is not None:\n arg_1.reviews[-1].review_date = arg_4\n return True\n else:\n raise SPDXValueError('Review::ReviewDate')\n else:\n raise CardinalityError('Review::ReviewDate')\n else:\n raise OrderError('Review::ReviewDate')"} +{"_id": "doc_3882", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds an annotator to the SPDX Document.\n Annotator is an entity created by an EntityBuilder.\n Raises SPDXValueError if not a valid annotator type.\n \"\"\"\n # Each annotator marks the start of a new annotation object.\n # FIXME: this state does not make sense\n arg_0.reset_annotations()\n if validations.validate_annotator(arg_2):\n arg_1.add_annotation(annotation.Annotation(arg_2=arg_2))\n return True\n else:\n raise SPDXValueError('Annotation::Annotator')"} +{"_id": "doc_3883", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the annotation date. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if invalid value.\n \"\"\"\n if len(arg_1.annotations) != 0:\n if not arg_0.annotation_date_set:\n arg_0.annotation_date_set = True\n arg_4 = utils.datetime_from_iso_format(arg_2)\n if arg_4 is not None:\n arg_1.annotations[-1].annotation_date = arg_4\n return True\n else:\n raise SPDXValueError('Annotation::AnnotationDate')\n else:\n raise CardinalityError('Annotation::AnnotationDate')\n else:\n raise OrderError('Annotation::AnnotationDate')"} +{"_id": "doc_3884", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the annotation comment. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if comment is not free form text.\n \"\"\"\n if len(arg_1.annotations) != 0:\n if not arg_0.annotation_comment_set:\n arg_0.annotation_comment_set = True\n if validations.validate_annotation_comment(arg_2):\n arg_1.annotations[-1].comment = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('AnnotationComment::Comment')\n else:\n raise CardinalityError('AnnotationComment::Comment')\n else:\n raise OrderError('AnnotationComment::Comment')"} +{"_id": "doc_3885", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the annotation type. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if invalid value.\n \"\"\"\n if len(arg_1.annotations) != 0:\n if not arg_0.annotation_type_set:\n arg_0.annotation_type_set = True\n if validations.validate_annotation_type(arg_2):\n arg_1.annotations[-1].annotation_type = arg_2\n return True\n else:\n raise SPDXValueError('Annotation::AnnotationType')\n else:\n raise CardinalityError('Annotation::AnnotationType')\n else:\n raise OrderError('Annotation::AnnotationType')"} +{"_id": "doc_3886", "title": "", "text": "def Func(arg_0):\n \"\"\"Resets the builder's state in order to build new packages.\"\"\"\n # FIXME: this state does not make sense\n arg_0.package_set = False\n arg_0.package_vers_set = False\n arg_0.package_file_name_set = False\n arg_0.package_supplier_set = False\n arg_0.package_originator_set = False\n arg_0.package_down_location_set = False\n arg_0.package_home_set = False\n arg_0.package_verif_set = False\n arg_0.package_chk_sum_set = False\n arg_0.package_source_info_set = False\n arg_0.package_conc_lics_set = False\n arg_0.package_license_declared_set = False\n arg_0.package_license_comment_set = False\n arg_0.package_cr_text_set = False\n arg_0.package_summary_set = False\n arg_0.package_desc_set = False"} +{"_id": "doc_3887", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Creates a package for the SPDX Document.\n name - any string.\n Raises CardinalityError if package already defined.\n \"\"\"\n if not arg_0.package_set:\n arg_0.package_set = True\n arg_1.package = arg_4.Package(arg_2=arg_2)\n return True\n else:\n raise CardinalityError('Package::Name')"} +{"_id": "doc_3888", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package file name, if not already set.\n name - Any string.\n Raises CardinalityError if already has a file_name.\n Raises OrderError if no pacakge previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_file_name_set:\n arg_0.package_file_name_set = True\n arg_1.package.file_name = arg_2\n return True\n else:\n raise CardinalityError('Package::FileName')"} +{"_id": "doc_3889", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package supplier, if not already set.\n entity - Organization, Person or NoAssert.\n Raises CardinalityError if already has a supplier.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_supplier_set:\n arg_0.package_supplier_set = True\n if validations.validate_pkg_supplier(arg_2):\n arg_1.package.supplier = arg_2\n return True\n else:\n raise SPDXValueError('Package::Supplier')\n else:\n raise CardinalityError('Package::Supplier')"} +{"_id": "doc_3890", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package originator, if not already set.\n entity - Organization, Person or NoAssert.\n Raises CardinalityError if already has an originator.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_originator_set:\n arg_0.package_originator_set = True\n if validations.validate_pkg_originator(arg_2):\n arg_1.package.originator = arg_2\n return True\n else:\n raise SPDXValueError('Package::Originator')\n else:\n raise CardinalityError('Package::Originator')"} +{"_id": "doc_3891", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package download location, if not already set.\n location - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_down_location_set:\n arg_0.package_down_location_set = True\n arg_1.package.download_location = arg_2\n return True\n else:\n raise CardinalityError('Package::DownloadLocation')"} +{"_id": "doc_3892", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package homepage location if not already set.\n location - A string or None or NoAssert.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n Raises SPDXValueError if location has incorrect value.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_home_set:\n arg_0.package_home_set = True\n if validations.validate_pkg_homepage(arg_2):\n arg_1.package.homepage = arg_2\n return True\n else:\n raise SPDXValueError('Package::HomePage')\n else:\n raise CardinalityError('Package::HomePage')"} +{"_id": "doc_3893", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's source information, if not already set.\n text - Free form text.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n SPDXValueError if text is not free form text.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_source_info_set:\n arg_0.package_source_info_set = True\n if validations.validate_pkg_src_info(arg_2):\n arg_1.package.source_info = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('Pacckage::SourceInfo')\n else:\n raise CardinalityError('Package::SourceInfo')"} +{"_id": "doc_3894", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's concluded licenses.\n licenses - License info.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n Raises SPDXValueError if data malformed.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_conc_lics_set:\n arg_0.package_conc_lics_set = True\n if validations.validate_lics_conc(arg_2):\n arg_1.package.conc_lics = arg_2\n return True\n else:\n raise SPDXValueError('Package::ConcludedLicenses')\n else:\n raise CardinalityError('Package::ConcludedLicenses')"} +{"_id": "doc_3895", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a license from a file to the package.\n Raises SPDXValueError if data malformed.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if validations.validate_lics_from_file(arg_2):\n arg_1.package.licenses_from_files.append(arg_2)\n return True\n else:\n raise SPDXValueError('Package::LicensesFromFile')"} +{"_id": "doc_3896", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's declared license.\n Raises SPDXValueError if data malformed.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_license_declared_set:\n arg_0.package_license_declared_set = True\n if validations.validate_lics_conc(arg_2):\n arg_1.package.license_declared = arg_2\n return True\n else:\n raise SPDXValueError('Package::LicenseDeclared')\n else:\n raise CardinalityError('Package::LicenseDeclared')"} +{"_id": "doc_3897", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's license comment.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.\n Raises SPDXValueError if text is not free form text.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_license_comment_set:\n arg_0.package_license_comment_set = True\n if validations.validate_pkg_lics_comment(arg_2):\n arg_1.package.license_comment = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('Package::LicenseComment')\n else:\n raise CardinalityError('Package::LicenseComment')"} +{"_id": "doc_3898", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raises OrderError if no package defined.\n \"\"\"\n if arg_0.has_package(arg_1):\n arg_1.package.files.append(file.File(arg_2))\n # A file name marks the start of a new file instance.\n # The builder must be reset\n # FIXME: this state does not make sense\n arg_0.reset_file_stat()\n return True\n else:\n raise OrderError('File::Name')"} +{"_id": "doc_3899", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Raises OrderError if no package or no file defined.\n Raises CardinalityError if more than one comment set.\n Raises SPDXValueError if text is not free form text.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_comment_set:\n arg_0.file_comment_set = True\n if validations.validate_file_comment(arg_2):\n arg_0.file(arg_1).comment = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('File::Comment')\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')"} +{"_id": "doc_3900", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Raises OrderError if no package or file defined.\n Raises CardinalityError if more than one chksum set.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_chksum_set:\n arg_0.file_chksum_set = True\n arg_0.file(arg_1).chk_sum = checksum_from_sha1(arg_2)\n return True\n else:\n raise CardinalityError('File::CheckSum')\n else:\n raise OrderError('File::CheckSum')"} +{"_id": "doc_3901", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Raises OrderError if no package or file defined.\n Raises SPDXValueError if malformed value.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if validations.validate_file_lics_in_file(arg_2):\n arg_0.file(arg_1).add_lics(arg_2)\n return True\n else:\n raise SPDXValueError('File::LicenseInFile')\n else:\n raise OrderError('File::LicenseInFile')"} +{"_id": "doc_3902", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Raises OrderError if no package or file defined.\n Raises SPDXValueError if text is not free form text.\n Raises CardinalityError if more than one per file.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_license_comment_set:\n arg_0.file_license_comment_set = True\n if validations.validate_file_lics_comment(arg_2):\n arg_0.file(arg_1).license_comment = str_from_text(arg_2)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')"} +{"_id": "doc_3903", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raises OrderError if no package or file defined.\n Raises SPDXValueError if not free form text or NONE or NO_ASSERT.\n Raises CardinalityError if more than one.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_copytext_set:\n arg_0.file_copytext_set = True\n if validations.validate_file_cpyright(arg_2):\n if isinstance(arg_2, string_types):\n arg_0.file(arg_1).copyright = str_from_text(arg_2)\n else:\n arg_0.file(arg_1).copyright = arg_2 # None or NoAssert\n return True\n else:\n raise SPDXValueError('File::CopyRight')\n else:\n raise CardinalityError('File::CopyRight')\n else:\n raise OrderError('File::CopyRight')"} +{"_id": "doc_3904", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raises OrderError if no package or file defined.\n Raises SPDXValueError if not free form text.\n Raises CardinalityError if more than one.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_notice_set:\n arg_0.file_notice_set = True\n if validations.validate_file_notice(arg_2):\n arg_0.file(arg_1).notice = str_from_text(arg_2)\n else:\n raise SPDXValueError('File::Notice')\n else:\n raise CardinalityError('File::Notice')\n else:\n raise OrderError('File::Notice')"} +{"_id": "doc_3905", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Sets a file name, uri or home artificat.\n Raises OrderError if no package or file defined.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n arg_0.file(arg_1).add_artifact(arg_2, arg_3)\n else:\n raise OrderError('File::Artificat')"} +{"_id": "doc_3906", "title": "", "text": "def Func(arg_0):\n \"\"\"Resets the builder's state to enable building new files.\"\"\"\n # FIXME: this state does not make sense\n arg_0.file_spdx_id_set = False\n arg_0.file_comment_set = False\n arg_0.file_type_set = False\n arg_0.file_chksum_set = False\n arg_0.file_conc_lics_set = False\n arg_0.file_license_comment_set = False\n arg_0.file_notice_set = False\n arg_0.file_copytext_set = False"} +{"_id": "doc_3907", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets license extracted text.\n Raises SPDXValueError if text is not free form text.\n Raises OrderError if no license ID defined.\n \"\"\"\n if arg_0.has_extr_lic(arg_1):\n if not arg_0.extr_text_set:\n arg_0.extr_text_set = True\n if validations.validate_is_free_form_text(arg_2):\n arg_0.extr_lic(arg_1).text = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::text')\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')"} +{"_id": "doc_3908", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets license name.\n Raises SPDXValueError if name is not str or utils.NoAssert\n Raises OrderError if no license id defined.\n \"\"\"\n if arg_0.has_extr_lic(arg_1):\n if not arg_0.extr_lic_name_set:\n arg_0.extr_lic_name_set = True\n if validations.validate_extr_lic_name(arg_2):\n arg_0.extr_lic(arg_1).full_name = arg_2\n return True\n else:\n raise SPDXValueError('ExtractedLicense::Name')\n else:\n raise CardinalityError('ExtractedLicense::Name')\n else:\n raise OrderError('ExtractedLicense::Name')"} +{"_id": "doc_3909", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets license comment.\n Raises SPDXValueError if comment is not free form text.\n Raises OrderError if no license ID defined.\n \"\"\"\n if arg_0.has_extr_lic(arg_1):\n if not arg_0.extr_lic_comment_set:\n arg_0.extr_lic_comment_set = True\n if validations.validate_is_free_form_text(arg_2):\n arg_0.extr_lic(arg_1).comment = str_from_text(arg_2)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::comment')\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')"} +{"_id": "doc_3910", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a license cross reference.\n Raises OrderError if no License ID defined.\n \"\"\"\n if arg_0.has_extr_lic(arg_1):\n arg_0.extr_lic(arg_1).add_xref(arg_2)\n return True\n else:\n raise OrderError('ExtractedLicense::CrossRef')"} +{"_id": "doc_3911", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return an ISO-8601 representation of a datetime object.\n \"\"\"\n return \"{0:0>4}-{1:0>2}-{2:0>2}T{3:0>2}:{4:0>2}:{5:0>2}Z\".format(\n arg_0.year, arg_0.month, arg_0.day, arg_0.hour,\n arg_0.minute, arg_0.second)"} +{"_id": "doc_3912", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Must be called before parse.\"\"\"\n arg_0.yacc = arg_2.yacc(module=arg_0, **arg_1)"} +{"_id": "doc_3913", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses a license list and returns a License or None if it failed.\"\"\"\n try:\n return arg_0.yacc.Func(arg_1, lexer=arg_0.lex)\n except:\n return None"} +{"_id": "doc_3914", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Write an SPDX RDF document.\n - document - spdx.document instance.\n - out - file like object that will be written to.\n Optionally `validate` the document before writing and raise\n InvalidDocumentError if document.validate returns False.\n \"\"\"\n \n if arg_2:\n arg_3 = []\n arg_3 = arg_0.validate(arg_3)\n if arg_3:\n raise InvalidDocumentError(arg_3)\n\n arg_4 = Writer(arg_0, arg_1)\n arg_4.write()"} +{"_id": "doc_3915", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a node representing spdx.checksum.\n \"\"\"\n arg_2 = BNode()\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.Checksum)\n arg_0.graph.add(arg_3)\n arg_4 = (arg_2, arg_0.spdx_namespace.algorithm, Literal(arg_1.identifier))\n arg_0.graph.add(arg_4)\n arg_5 = (arg_2, arg_0.spdx_namespace.checksumValue, Literal(arg_1.value))\n arg_0.graph.add(arg_5)\n return arg_2"} +{"_id": "doc_3916", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Traverse conjunctions and disjunctions like trees and return a\n set of all licenses in it as nodes.\n \"\"\"\n # FIXME: this is unordered!\n arg_2 = set()\n arg_0.Func_helper(arg_1, arg_2)\n return arg_2"} +{"_id": "doc_3917", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a node representing a conjunction of licenses.\n \"\"\"\n arg_2 = BNode()\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.ConjunctiveLicenseSet)\n arg_0.graph.add(arg_3)\n arg_4 = arg_0.licenses_from_tree(arg_1)\n for arg_5 in arg_4:\n arg_6 = (arg_2, arg_0.spdx_namespace.member, arg_5)\n arg_0.graph.add(arg_6)\n return arg_2"} +{"_id": "doc_3918", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Handle dependencies for a single file.\n - doc_file - instance of spdx.file.File.\n \"\"\"\n arg_2 = list(arg_0.graph.triples((None, arg_0.spdx_namespace.fileName, Literal(arg_1.name))))\n if len(arg_2) != 1:\n raise InvalidDocumentError('Could not find dependency subject {0}'.format(arg_1.name))\n arg_3 = arg_2[0][0]\n for arg_4 in arg_1.dependencies:\n arg_5 = list(arg_0.graph.triples((None, arg_0.spdx_namespace.fileName, Literal(arg_4))))\n if len(arg_5) == 1:\n arg_6 = arg_5[0][0]\n arg_7 = (arg_3, arg_0.spdx_namespace.fileDependency, arg_6)\n arg_0.graph.add(arg_7)\n else:\n print('Warning could not resolve file dependency {0} -> {1}'.format(arg_1.name, arg_4))"} +{"_id": "doc_3919", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a review node.\n \"\"\"\n arg_2 = BNode()\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.Review)\n arg_0.graph.add(arg_3)\n\n arg_4 = Literal(arg_1.reviewer.to_value())\n arg_0.graph.add((arg_2, arg_0.spdx_namespace.reviewer, arg_4))\n arg_5 = Literal(arg_1.review_date_iso_format)\n arg_6 = (arg_2, arg_0.spdx_namespace.reviewDate, arg_5)\n arg_0.graph.add(arg_6)\n if arg_1.has_comment:\n arg_7 = Literal(arg_1.comment)\n arg_8 = (arg_2, RDFS.comment, arg_7)\n arg_0.graph.add(arg_8)\n\n return arg_2"} +{"_id": "doc_3920", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return an annotation node.\n \"\"\"\n arg_2 = URIRef(str(arg_1.spdx_id))\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.Annotation)\n arg_0.graph.add(arg_3)\n\n arg_4 = Literal(arg_1.annotator.to_value())\n arg_0.graph.add((arg_2, arg_0.spdx_namespace.annotator, arg_4))\n arg_5 = Literal(arg_1.annotation_date_iso_format)\n arg_6 = (arg_2, arg_0.spdx_namespace.annotationDate, arg_5)\n arg_0.graph.add(arg_6)\n if arg_1.has_comment:\n arg_7 = Literal(arg_1.comment)\n arg_8 = (arg_2, RDFS.comment, arg_7)\n arg_0.graph.add(arg_8)\n arg_9 = Literal(arg_1.annotation_type)\n arg_10 = (arg_2, arg_0.spdx_namespace.annotationType, arg_9)\n arg_0.graph.add(arg_10)\n\n return arg_2"} +{"_id": "doc_3921", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a node representing package verification code.\n \"\"\"\n arg_2 = BNode()\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.PackageVerificationCode)\n arg_0.graph.add(arg_3)\n arg_4 = (arg_2, arg_0.spdx_namespace.packageVerificationCodeValue, Literal(arg_1.verif_code))\n arg_0.graph.add(arg_4)\n arg_5 = map(\n lambda excl: Literal(excl), arg_1.verif_exc_files)\n arg_6 = arg_0.spdx_namespace.packageVerificationCodeExcludedFile\n arg_7 = [(arg_2, arg_6, xcl_file) for xcl_file in arg_5]\n for arg_8 in arg_7:\n arg_0.graph.add(arg_8)\n return arg_2"} +{"_id": "doc_3922", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Write package optional fields.\n \"\"\"\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.versionInfo, 'version')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.packageFileName, 'file_name')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.supplier, 'supplier')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.originator, 'originator')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.sourceInfo, 'source_info')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.licenseComments, 'license_comment')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.summary, 'summary')\n arg_0.handle_package_literal_optional(arg_1, arg_2, arg_0.spdx_namespace.description, 'description')\n\n if arg_1.has_optional_field('check_sum'):\n arg_3 = arg_0.create_checksum_node(arg_1.check_sum)\n arg_0.graph.add((arg_2, arg_0.spdx_namespace.checksum, arg_3))\n\n if arg_1.has_optional_field('homepage'):\n arg_4 = URIRef(arg_0.to_special_value(arg_1.homepage))\n arg_5 = (arg_2, arg_0.doap_namespace.homepage, arg_4)\n arg_0.graph.add(arg_5)"} +{"_id": "doc_3923", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a Node representing the package.\n Files must have been added to the graph before this method is called.\n \"\"\"\n arg_2 = BNode()\n arg_3 = (arg_2, RDF.type, arg_0.spdx_namespace.Package)\n arg_0.graph.add(arg_3)\n # Handle optional fields:\n arg_0.handle_pkg_optional_fields(arg_1, arg_2)\n # package name\n arg_4 = (arg_2, arg_0.spdx_namespace.name, Literal(arg_1.name))\n arg_0.graph.add(arg_4)\n # Package download location\n arg_5 = (arg_2, arg_0.spdx_namespace.downloadLocation, arg_0.to_special_value(arg_1.download_location))\n arg_0.graph.add(arg_5)\n # Handle package verification\n arg_6 = arg_0.package_verif_node(arg_1)\n arg_7 = (arg_2, arg_0.spdx_namespace.packageVerificationCode, arg_6)\n arg_0.graph.add(arg_7)\n # Handle concluded license\n arg_8 = arg_0.license_or_special(arg_1.conc_lics)\n arg_9 = (arg_2, arg_0.spdx_namespace.licenseConcluded, arg_8)\n arg_0.graph.add(arg_9)\n # Handle declared license\n arg_10 = arg_0.license_or_special(arg_1.license_declared)\n arg_11 = (arg_2, arg_0.spdx_namespace.licenseDeclared, arg_10)\n arg_0.graph.add(arg_11)\n # Package licenses from files\n arg_12 = map(lambda el: arg_0.license_or_special(el), arg_1.licenses_from_files)\n arg_13 = arg_0.spdx_namespace.licenseInfoFromFiles\n arg_14 = [(arg_2, arg_13, node) for node in arg_12]\n for arg_15 in arg_14:\n arg_0.graph.add(arg_15)\n # Copyright Text\n arg_16 = arg_0.to_special_value(arg_1.cr_text)\n arg_17 = (arg_2, arg_0.spdx_namespace.copyrightText, arg_16)\n arg_0.graph.add(arg_17)\n # Handle files\n arg_0.handle_package_has_file(arg_1, arg_2)\n return arg_2"} +{"_id": "doc_3924", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return node representing pkg_file\n pkg_file should be instance of spdx.file.\n \"\"\"\n arg_2 = list(arg_0.graph.triples((None, arg_0.spdx_namespace.fileName, Literal(arg_1.name))))\n if len(arg_2) == 1:\n return arg_2[0][0]\n else:\n raise InvalidDocumentError('Func could not' +\n ' find file node for file: {0}'.format(arg_1.name))"} +{"_id": "doc_3925", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add hasFile triples to graph.\n Must be called after files have been added.\n \"\"\"\n arg_3 = map(arg_0.Func_helper, arg_1.files)\n arg_4 = [(arg_2, arg_0.spdx_namespace.hasFile, node) for node in arg_3]\n for arg_5 in arg_4:\n arg_0.graph.add(arg_5)"} +{"_id": "doc_3926", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Add and return the root document node to graph.\n \"\"\"\n arg_1 = URIRef('http://www.spdx.org/tools#SPDXRef-DOCUMENT')\n # Doc type\n arg_0.graph.add((arg_1, RDF.type, arg_0.spdx_namespace.SpdxDocument))\n # Version\n arg_2 = Literal(str(arg_0.document.version))\n arg_0.graph.add((arg_1, arg_0.spdx_namespace.specVersion, arg_2))\n # Data license\n arg_3 = URIRef(arg_0.document.data_license.url)\n arg_0.graph.add((arg_1, arg_0.spdx_namespace.dataLicense, arg_3))\n arg_4 = URIRef(arg_0.document.name)\n arg_0.graph.add((arg_1, arg_0.spdx_namespace.name, arg_4))\n return arg_1"} +{"_id": "doc_3927", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns True if the fields are valid according to the SPDX standard.\n Appends user friendly messages to the messages parameter.\n \"\"\"\n arg_1 = arg_0.Func_creators(arg_1)\n arg_1 = arg_0.Func_created(arg_1)\n\n return arg_1"} +{"_id": "doc_3928", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks if value is a special SPDX value such as\n NONE, NOASSERTION or UNKNOWN if so returns proper model.\n else returns value\"\"\"\n if arg_1 == arg_0.spdx_namespace.none:\n return utils.SPDXNone()\n elif arg_1 == arg_0.spdx_namespace.noassertion:\n return utils.NoAssert()\n elif arg_1 == arg_0.spdx_namespace.unknown:\n return utils.UnKnown()\n else:\n return arg_1"} +{"_id": "doc_3929", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return license comment or None.\n \"\"\"\n arg_2 = list(arg_0.graph.triples(\n (arg_1, RDFS.comment, None)))\n if len(arg_2) > 1 :\n arg_0.more_than_one_error('extracted license comment')\n return\n elif len(arg_2) == 1:\n return arg_2[0][2]\n else:\n return"} +{"_id": "doc_3930", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return an ExtractedLicense object to represent a license object.\n But does not add it to the SPDXDocument model.\n Return None if failed.\n \"\"\"\n # Grab all possible values\n arg_2 = arg_0.get_extr_license_ident(arg_1)\n arg_3 = arg_0.get_extr_license_text(arg_1)\n arg_4 = arg_0.get_extr_lics_comment(arg_1)\n arg_5 = arg_0.get_extr_lics_xref(arg_1)\n arg_6 = arg_0.get_extr_lic_name(arg_1)\n\n if not arg_2:\n # Must have identifier\n return\n\n # Set fields\n # FIXME: the constructor of the license should alwas accept a name\n arg_7 = document.ExtractedLicense(arg_2)\n if arg_3 is not None:\n arg_7.text = arg_3\n if arg_6 is not None:\n arg_7.full_name = arg_6\n if arg_4 is not None:\n arg_7.comment = arg_4\n arg_7.cross_ref = map(lambda x: six.text_type(x), arg_5)\n return arg_7"} +{"_id": "doc_3931", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build and return an ExtractedLicense or None.\n Note that this function adds the license to the document.\n \"\"\"\n arg_2 = arg_0.parse_only_extr_license(arg_1)\n if arg_2 is not None:\n arg_0.doc.add_extr_lic(arg_2)\n return arg_2"} +{"_id": "doc_3932", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns first found fileName property or None if not found.\"\"\"\n for arg_2, arg_2, arg_3 in arg_0.graph.triples((arg_1, arg_0.spdx_namespace['fileName'], None)):\n return arg_3\n return"} +{"_id": "doc_3933", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file dependencies.\"\"\"\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_5 = arg_0.get_file_name(arg_4)\n if arg_5 is not None:\n arg_0.builder.add_file_dep(six.text_type(arg_5))\n else:\n arg_0.error = True\n arg_7 = 'File depends on file with no name'\n arg_0.logger.log(arg_7)"} +{"_id": "doc_3934", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Parse all file contributors and adds them to the model.\n \"\"\"\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_0.builder.add_file_contribution(arg_0.doc, six.text_type(arg_4))"} +{"_id": "doc_3935", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file notice text.\"\"\"\n try:\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_0.builder.set_file_notice(arg_0.doc, six.text_type(arg_4))\n except CardinalityError:\n arg_0.more_than_one_error('file notice')"} +{"_id": "doc_3936", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file comment text.\"\"\"\n try:\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_0.builder.set_file_comment(arg_0.doc, six.text_type(arg_4))\n except CardinalityError:\n arg_0.more_than_one_error('file comment')"} +{"_id": "doc_3937", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file license comment.\"\"\"\n try:\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_0.builder.set_file_license_comment(arg_0.doc, six.text_type(arg_4))\n except CardinalityError:\n arg_0.more_than_one_error('file comments on license')"} +{"_id": "doc_3938", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file license information.\"\"\"\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n arg_5 = arg_0.handle_lics(arg_4)\n if arg_5 is not None:\n arg_0.builder.set_file_license_in_file(arg_0.doc, arg_5)"} +{"_id": "doc_3939", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file type.\"\"\"\n try:\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n try:\n if arg_4.endswith('binary'):\n arg_4 = 'BINARY'\n elif arg_4.endswith('source'):\n arg_4 = 'SOURCE'\n elif arg_4.endswith('other'):\n arg_4 = 'OTHER'\n elif arg_4.endswith('archive'):\n arg_4 = 'ARCHIVE'\n arg_0.builder.set_file_type(arg_0.doc, arg_4)\n except SPDXValueError:\n arg_0.value_error('FILE_TYPE', arg_4)\n except CardinalityError:\n arg_0.more_than_one_error('file type')"} +{"_id": "doc_3940", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file checksum. Assumes SHA1 algorithm without checking.\"\"\"\n try:\n for arg_3, arg_4, arg_5 in arg_0.graph.triples((arg_1, arg_2, None)):\n for arg_6, arg_6, arg_7 in arg_0.graph.triples((arg_5, arg_0.spdx_namespace['checksumValue'], None)):\n arg_0.builder.set_file_chksum(arg_0.doc, six.text_type(arg_7))\n except CardinalityError:\n arg_0.more_than_one_error('File checksum')"} +{"_id": "doc_3941", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets file licenses concluded.\"\"\"\n try:\n for arg_3, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_2, None)):\n if (arg_4, RDF.type, arg_0.spdx_namespace['ConjunctiveLicenseSet']) in arg_0.graph:\n arg_5 = arg_0.handle_conjunctive_list(arg_4)\n arg_0.builder.set_concluded_license(arg_0.doc, arg_5)\n\n elif (arg_4, RDF.type, arg_0.spdx_namespace['DisjunctiveLicenseSet']) in arg_0.graph:\n arg_5 = arg_0.handle_disjunctive_list(arg_4)\n arg_0.builder.set_concluded_license(arg_0.doc, arg_5)\n\n else:\n try:\n arg_5 = arg_0.handle_lics(arg_4)\n arg_0.builder.set_concluded_license(arg_0.doc, arg_5)\n except SPDXValueError:\n arg_0.value_error('FILE_SINGLE_LICS', arg_4)\n except CardinalityError:\n arg_0.more_than_one_error('file {0}'.format(arg_2))"} +{"_id": "doc_3942", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns review date or None if not found.\n Reports error on failure.\n Note does not check value format.\n \"\"\"\n arg_2 = list(arg_0.graph.triples((arg_1, arg_0.spdx_namespace['reviewDate'], None)))\n if len(arg_2) != 1:\n arg_0.error = True\n arg_4 = 'Review must have exactlyone review date'\n arg_0.logger.log(arg_4)\n return\n return six.text_type(arg_2[0][2])"} +{"_id": "doc_3943", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns annotation comment or None if found none or more than one.\n Reports errors.\n \"\"\"\n arg_2 = list(arg_0.graph.triples((arg_1, RDFS.comment, None)))\n if len(arg_2) > 1:\n arg_0.error = True\n arg_4 = 'Annotation can have at most one comment.'\n arg_0.logger.log(arg_4)\n return\n else:\n return six.text_type(arg_2[0][2])"} +{"_id": "doc_3944", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns annotation date or None if not found.\n Reports error on failure.\n Note does not check value format.\n \"\"\"\n arg_2 = list(arg_0.graph.triples((arg_1, arg_0.spdx_namespace['annotationDate'], None)))\n if len(arg_2) != 1:\n arg_0.error = True\n arg_4 = 'Annotation must have exactly one annotation date.'\n arg_0.logger.log(arg_4)\n return\n return six.text_type(arg_2[0][2])"} +{"_id": "doc_3945", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse creators, created and comment.\n \"\"\"\n for arg_2, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_0.spdx_namespace['creator'], None)):\n try:\n arg_5 = arg_0.builder.create_entity(arg_0.doc, six.text_type(arg_4))\n arg_0.builder.add_creator(arg_0.doc, arg_5)\n except SPDXValueError:\n arg_0.value_error('CREATOR_VALUE', arg_4)\n\n for arg_2, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_0.spdx_namespace['created'], None)):\n try:\n arg_0.builder.set_created_date(arg_0.doc, six.text_type(arg_4))\n except SPDXValueError:\n arg_0.value_error('CREATED_VALUE', arg_4)\n except CardinalityError:\n arg_0.more_than_one_error('created')\n break\n\n for arg_2, arg_3, arg_4 in arg_0.graph.triples((arg_1, RDFS.comment, None)):\n try:\n arg_0.builder.set_creation_comment(arg_0.doc, six.text_type(arg_4))\n except CardinalityError:\n arg_0.more_than_one_error('CreationInfo comment')\n break\n for arg_2, arg_3, arg_4 in arg_0.graph.triples((arg_1, arg_0.spdx_namespace['licenseListVersion'], None)):\n try:\n arg_0.builder.set_lics_list_ver(arg_0.doc, six.text_type(arg_4))\n except CardinalityError:\n arg_0.more_than_one_error('licenseListVersion')\n break\n except SPDXValueError:\n arg_0.value_error('LL_VALUE', arg_4)"} +{"_id": "doc_3946", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parses the External Document ID, SPDX Document URI and Checksum.\n \"\"\"\n for arg_2, arg_3, arg_4 in arg_0.graph.triples(\n (arg_1,\n arg_0.spdx_namespace['externalDocumentId'],\n None)):\n try:\n arg_0.builder.set_ext_doc_id(arg_0.doc, six.text_type(arg_4))\n except SPDXValueError:\n arg_0.value_error('EXT_DOC_REF_VALUE', 'External Document ID')\n break\n\n for arg_2, arg_3, arg_4 in arg_0.graph.triples(\n (arg_1,\n arg_0.spdx_namespace['spdxDocument'],\n None)):\n try:\n arg_0.builder.set_spdx_doc_uri(arg_0.doc, six.text_type(arg_4))\n except SPDXValueError:\n arg_0.value_error('EXT_DOC_REF_VALUE', 'SPDX Document URI')\n break\n\n for arg_2, arg_3, arg_5 in arg_0.graph.triples(\n (arg_1, arg_0.spdx_namespace['checksum'], None)):\n for arg_6, arg_6, arg_7 in arg_0.graph.triples(\n (arg_5, arg_0.spdx_namespace['checksumValue'], None)):\n try:\n arg_0.builder.set_chksum(arg_0.doc, six.text_type(arg_7))\n except SPDXValueError:\n arg_0.value_error('EXT_DOC_REF_VALUE', 'Checksum')\n break"} +{"_id": "doc_3947", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validate the package fields.\n Append user friendly error messages to the `messages` list.\n \"\"\"\n arg_1 = arg_0.Func_checksum(arg_1)\n arg_1 = arg_0.Func_optional_str_fields(arg_1)\n arg_1 = arg_0.Func_mandatory_str_fields(arg_1)\n arg_1 = arg_0.Func_files(arg_1)\n arg_1 = arg_0.Func_mandatory_fields(arg_1)\n arg_1 = arg_0.Func_optional_fields(arg_1)\n\n return arg_1"} +{"_id": "doc_3948", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Helper for validate_mandatory_str_field and\n validate_optional_str_fields\"\"\"\n for arg_4 in arg_1:\n arg_5 = getattr(arg_0, arg_4)\n if arg_5 is not None:\n # FIXME: this does not make sense???\n arg_6 = getattr(arg_5, '__str__', None)\n if not callable(arg_6):\n arg_3 = arg_3 + [\n '{0} must provide __str__ method.'.format(arg_5)\n ]\n # Continue checking.\n elif not arg_2:\n arg_3 = arg_3 + [\n 'Package {0} can not be None.'.format(arg_4)\n ]\n\n return arg_3"} +{"_id": "doc_3949", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets document comment, Raises CardinalityError if\n comment already set.\n \"\"\"\n if not arg_0.doc_comment_set:\n arg_0.doc_comment_set = True\n arg_1.comment = arg_2\n else:\n raise CardinalityError('Document::Comment')"} +{"_id": "doc_3950", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sets the external document reference's check sum, if not already set.\n chk_sum - The checksum value in the form of a string.\n \"\"\"\n if arg_2:\n arg_1.ext_document_references[-1].check_sum = checksum.Algorithm(\n 'SHA1', arg_2)\n else:\n raise SPDXValueError('ExternalDocumentRef::Checksum')"} +{"_id": "doc_3951", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's source information, if not already set.\n text - Free form text.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_source_info_set:\n arg_0.package_source_info_set = True\n arg_1.package.source_info = arg_2\n return True\n else:\n raise CardinalityError('Package::SourceInfo')"} +{"_id": "doc_3952", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the package's verification code excluded file.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n arg_1.package.add_exc_file(arg_2)"} +{"_id": "doc_3953", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Set's the package summary.\n Raises CardinalityError if summary already set.\n Raises OrderError if no package previously defined.\n \"\"\"\n arg_0.assert_package_exists()\n if not arg_0.package_summary_set:\n arg_0.package_summary_set = True\n arg_1.package.summary = arg_2\n else:\n raise CardinalityError('Package::Summary')"} +{"_id": "doc_3954", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the file check sum, if not already set.\n chk_sum - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_chksum_set:\n arg_0.file_chksum_set = True\n arg_0.file(arg_1).chk_sum = checksum.Algorithm('SHA1', arg_2)\n return True\n else:\n raise CardinalityError('File::CheckSum')\n else:\n raise OrderError('File::CheckSum')"} +{"_id": "doc_3955", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Raises OrderError if no package or file defined.\n Raises CardinalityError if more than one per file.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_license_comment_set:\n arg_0.file_license_comment_set = True\n arg_0.file(arg_1).license_comment = arg_2\n return True\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')"} +{"_id": "doc_3956", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raises OrderError if no package or no file defined.\n Raises CardinalityError if more than one comment set.\n \"\"\"\n if arg_0.has_package(arg_1) and arg_0.has_file(arg_1):\n if not arg_0.file_comment_set:\n arg_0.file_comment_set = True\n arg_0.file(arg_1).comment = arg_2\n return True\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')"} +{"_id": "doc_3957", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the annotation comment. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n \"\"\"\n if len(arg_1.annotations) != 0:\n if not arg_0.annotation_comment_set:\n arg_0.annotation_comment_set = True\n arg_1.annotations[-1].comment = arg_2\n return True\n else:\n raise CardinalityError('AnnotationComment')\n else:\n raise OrderError('AnnotationComment')"} +{"_id": "doc_3958", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Sets the annotation type. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n \"\"\"\n if len(arg_1.annotations) != 0:\n if not arg_0.annotation_type_set:\n if arg_2.endswith('annotationType_other'):\n arg_0.annotation_type_set = True\n arg_1.annotations[-1].annotation_type = 'OTHER'\n return True\n elif arg_2.endswith('annotationType_review'):\n arg_0.annotation_type_set = True\n arg_1.annotations[-1].annotation_type = 'REVIEW'\n return True\n else:\n raise SPDXValueError('Annotation::AnnotationType')\n else:\n raise CardinalityError('Annotation::AnnotationType')\n else:\n raise OrderError('Annotation::AnnotationType')"} +{"_id": "doc_3959", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validate all fields of the document and update the\n messages list with user friendly error messages for display.\n \"\"\"\n arg_1 = arg_0.Func_version(arg_1)\n arg_1 = arg_0.Func_data_lics(arg_1)\n arg_1 = arg_0.Func_name(arg_1)\n arg_1 = arg_0.Func_spdx_id(arg_1)\n arg_1 = arg_0.Func_namespace(arg_1)\n arg_1 = arg_0.Func_ext_document_references(arg_1)\n arg_1 = arg_0.Func_creation_info(arg_1)\n arg_1 = arg_0.Func_package(arg_1)\n arg_1 = arg_0.Func_extracted_licenses(arg_1)\n arg_1 = arg_0.Func_reviews(arg_1)\n\n return arg_1"} +{"_id": "doc_3960", "title": "", "text": "def Func(arg_0):\n '''Decorator to synchronize function.'''\n arg_0.__lock__ = threading.Lock()\n def synced_func(*arg_2, **arg_3):\n with arg_0.__lock__:\n return arg_0(*arg_2, **arg_3)\n return synced_func"} +{"_id": "doc_3961", "title": "", "text": "def Func(arg_0, *arg_1):\n '''Program Func output.'''\n clear_progress()\n arg_2 = (arg_0 % arg_1)\n sys.stdout.write(arg_2 + '\\n')"} +{"_id": "doc_3962", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1, arg_3=False):\n '''Utility function to handle runtime Funcures gracefully.\n Show concise information if possible, then terminate program.\n '''\n arg_4 = arg_0\n if arg_1:\n arg_4 += str(arg_1)\n error(arg_4)\n if arg_3:\n error(traceback.format_exc())\n clean_tempfiles()\n if __name__ == '__main__':\n sys.exit(arg_2)\n else:\n raise RuntimeError(arg_2)"} +{"_id": "doc_3963", "title": "", "text": "def Func():\n '''Clean up temp files'''\n for arg_0 in TEMP_FILES:\n if os.path.exists(arg_0):\n os.unlink(arg_0)"} +{"_id": "doc_3964", "title": "", "text": "def Func(arg_0):\n '''Get the fixed part of the path without wildcard'''\n arg_1 = arg_0.path.split(PATH_SEP)\n arg_2 = []\n for arg_3 in arg_1:\n if '*' in arg_3 or '?' in arg_3:\n break\n arg_2.append(arg_3)\n return PATH_SEP.join(arg_2)"} +{"_id": "doc_3965", "title": "", "text": "def Func(arg_0, arg_1):\n '''Given a API name, list all legal parameters using boto3 service model.'''\n if arg_1 not in arg_0.client.meta.method_to_api_mapping:\n # Injected methods. Ignore.\n return []\n arg_2 = arg_0.client.meta.method_to_api_mapping[arg_1]\n arg_3 = arg_0.client.meta.service_model.operation_model(arg_2).input_shape\n if arg_3 is None:\n # No params needed for this API.\n return []\n return arg_3.members.keys()"} +{"_id": "doc_3966", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Combine existing parameters with extra options supplied from command line\n options. Carefully merge special type of parameter if needed.\n '''\n for arg_3 in arg_0.legal_params[arg_1]:\n if not hasattr(arg_0.opt, arg_3) or getattr(arg_0.opt, arg_3) is None:\n continue\n if arg_3 in arg_2 and type(arg_2[arg_3]) == dict:\n assert(type(getattr(arg_0.opt, arg_3)) == dict)\n # Merge two dictionaries.\n for arg_4, arg_5 in getattr(arg_0.opt, arg_3).iteritems():\n arg_2[arg_3][arg_4] = arg_5\n else:\n # Overwrite values.\n arg_2[arg_3] = getattr(arg_0.opt, arg_3)\n\n return arg_2"} +{"_id": "doc_3967", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Terminate all threads by deleting the queue and forcing the child threads\n to quit.\n '''\n if arg_1:\n arg_0.exc_info = arg_1\n try:\n while arg_0.get_nowait():\n arg_0.task_done()\n except Queue.Empty:\n pass"} +{"_id": "doc_3968", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n '''Utility function to add a single task into task queue'''\n arg_0.tasks.put((arg_1, 0, arg_2, arg_3))"} +{"_id": "doc_3969", "title": "", "text": "def Func(arg_0):\n '''Utility function to wait all tasks to complete'''\n arg_0.tasks.Func()\n\n # Force each thread to break loop.\n for arg_1 in arg_0.workers:\n arg_0.tasks.put(None)\n\n # Wait for all thread to terminate.\n for arg_1 in arg_0.workers:\n arg_1.Func()\n arg_1.s3 = None"} +{"_id": "doc_3970", "title": "", "text": "def Func():\n '''Retrieve S3 access keys from the environment, or None if not present.'''\n arg_0 = os.environ\n if S3_ACCESS_KEY_NAME in arg_0 and S3_SECRET_KEY_NAME in arg_0:\n arg_1 = (arg_0[S3_ACCESS_KEY_NAME], arg_0[S3_SECRET_KEY_NAME])\n debug(\"read S3 keys from environment\")\n return arg_1\n else:\n return None"} +{"_id": "doc_3971", "title": "", "text": "def Func(arg_0):\n '''Retrieve S3 access keys from the command line, or None if not present.'''\n if arg_0.access_key != None and arg_0.secret_key != None:\n arg_1 = (arg_0.access_key, arg_0.secret_key)\n debug(\"read S3 keys from commandline\")\n return arg_1\n else:\n return None"} +{"_id": "doc_3972", "title": "", "text": "def Func(arg_0):\n '''Retrieve S3 access key settings from s3cmd's config file, if present; otherwise return None.'''\n try:\n if arg_0.s3cfg != None:\n arg_1 = \"%s\" % arg_0.s3cfg\n else:\n arg_1 = \"%s/.s3cfg\" % os.environ[\"HOME\"]\n if not os.path.exists(arg_1):\n return None\n arg_2 = ConfigParser.ConfigParser()\n arg_2.read(arg_1)\n arg_3 = arg_2.get(\"default\", \"access_key\"), arg_2.get(\"default\", \"secret_key\")\n debug(\"read S3 keys from %s file\", arg_1)\n return arg_3\n except Exception as e:\n info(\"could not read S3 keys from %s file; skipping (%s)\", arg_1, e)\n return None"} +{"_id": "doc_3973", "title": "", "text": "def Func(arg_0):\n '''Initialize s3 access keys from environment variable or s3cfg config file.'''\n arg_1.S3_KEYS = arg_1.s3_keys_from_cmdline(arg_0) or arg_1.s3_keys_from_env() \\\n or arg_1.s3_keys_from_s3cfg(arg_0)"} +{"_id": "doc_3974", "title": "", "text": "def Func(arg_0):\n '''Connect to S3 storage'''\n try:\n if S3Handler.S3_KEYS:\n arg_0.s3 = BotoClient(arg_0.opt, S3Handler.S3_KEYS[0], S3Handler.S3_KEYS[1])\n else:\n arg_0.s3 = BotoClient(arg_0.opt)\n except Exception as e:\n raise RetryFailure('Unable to Func to s3: %s' % e)"} +{"_id": "doc_3975", "title": "", "text": "def Func(arg_0):\n '''List all buckets'''\n arg_1 = []\n for arg_2 in arg_0.s3.Func().get('Buckets') or []:\n arg_1.append({\n 'name': S3URL.combine('s3', arg_2['Name'], ''),\n 'is_dir': True,\n 'size': 0,\n 'last_modified': arg_2['CreationDate']\n })\n return arg_1"} +{"_id": "doc_3976", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''Walk through a S3 directory. This function initiate a walk with a basedir.\n It also supports multiple wildcards.\n '''\n # Provide the default value from command line if no override.\n if not arg_2:\n arg_2 = arg_0.opt.show_dir\n\n # trailing slash normalization, this is for the reason that we want\n # ls 's3://foo/bar/' has the same result as 's3://foo/bar'. Since we\n # call partial_match() to check wildcards, we need to ensure the number\n # of slashes stays the same when we do this.\n if arg_1[-1] == PATH_SEP:\n arg_1 = arg_1[0:-1]\n\n arg_3 = S3URL(arg_1)\n arg_4 = []\n\n arg_5 = ThreadPool(ThreadUtil, arg_0.opt)\n arg_5.Func(arg_3, arg_3.get_fixed_path(), arg_3.path, arg_4)\n arg_5.join()\n\n # automatic directory detection\n if not arg_2 and len(arg_4) == 1 and arg_4[0]['is_dir']:\n arg_6 = arg_4[0]['name']\n arg_3 = S3URL(arg_6)\n arg_4 = []\n arg_5 = ThreadPool(ThreadUtil, arg_0.opt)\n arg_5.Func(arg_3, arg_3.get_fixed_path(), arg_3.path, arg_4)\n arg_5.join()\n\n def compare(arg_7, arg_8):\n '''Comparator for ls output'''\n arg_4 = -cmp(arg_7['is_dir'], arg_8['is_dir'])\n if arg_4 != 0:\n return arg_4\n return cmp(arg_7['name'], arg_8['name'])\n return sorted(arg_4, key=cmp_to_key(compare))"} +{"_id": "doc_3977", "title": "", "text": "def Func(arg_0, arg_1):\n '''Walk through local directories from root basedir'''\n arg_2 = []\n\n for arg_3, arg_4, arg_5 in os.walk(arg_1):\n for arg_6 in arg_5:\n arg_2.append(os.path.join(arg_3, arg_6))\n return arg_2"} +{"_id": "doc_3978", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Get privileges from metadata of the source in s3, and apply them to target'''\n if 'privilege' in arg_1['Metadata']:\n os.chmod(arg_2, int(arg_1['Metadata']['privilege'], 8))"} +{"_id": "doc_3979", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''Download a single file or a directory by adding a task into queue'''\n if arg_2[-1] == PATH_SEP:\n if arg_0.opt.recursive:\n arg_4 = S3URL(arg_2).path\n for arg_5 in (arg_5 for arg_5 in arg_0.s3walk(arg_2) if not arg_5['is_dir']):\n arg_1.download(arg_5['name'], os.path.join(arg_3, os.path.relpath(S3URL(arg_5['name']).path, arg_4)))\n else:\n message('omitting directory \"%s\".' % arg_2)\n else:\n arg_1.download(arg_2, arg_3)"} +{"_id": "doc_3980", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Download files.\n This function can handle multiple files if source S3 URL has wildcard\n characters. It also handles recursive mode by download all files and\n keep the directory structure.\n '''\n arg_3 = ThreadPool(ThreadUtil, arg_0.opt)\n arg_1 = arg_0.source_expand(arg_1)\n\n if os.path.isdir(arg_2):\n for arg_4 in arg_1:\n arg_0.get_single_file(arg_3, arg_4, os.path.join(arg_2, arg_0.get_basename(S3URL(arg_4).path)))\n else:\n if len(arg_1) > 1:\n raise Failure('Target \"%s\" is not a directory.' % arg_2)\n # Get file if it exists on s3 otherwise do nothing\n elif len(arg_1) == 1:\n arg_0.get_single_file(arg_3, arg_1[0], arg_2)\n else:\n #Source expand may return empty list only if ignore-empty-source is set to true\n pass\n\n arg_3.join()"} +{"_id": "doc_3981", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''Copy a single file or a directory by adding a task into queue'''\n if arg_2[-1] == PATH_SEP:\n if arg_0.opt.recursive:\n arg_5 = S3URL(arg_2).path\n for arg_6 in (arg_6 for arg_6 in arg_0.s3walk(arg_2) if not arg_6['is_dir']):\n arg_1.copy(arg_6['name'], os.path.join(arg_3, os.path.relpath(S3URL(arg_6['name']).path, arg_5)), arg_4=arg_4)\n else:\n message('omitting directory \"%s\".' % arg_2)\n else:\n arg_1.copy(arg_2, arg_3, arg_4=arg_4)"} +{"_id": "doc_3982", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Sync directory to directory.'''\n arg_3 = S3URL.is_valid(arg_1)\n arg_4 = S3URL.is_valid(arg_2)\n\n arg_5 = arg_0.relative_dir_walk(arg_1)\n if len(arg_5) == 0 or '.' in arg_5:\n raise Failure('Sync command need to sync directory to directory.')\n\n arg_6 = [(os.path.join(arg_1, arg_12), os.path.join(arg_2, arg_12)) for arg_12 in arg_5]\n\n arg_7 = ThreadPool(ThreadUtil, arg_0.opt)\n if arg_3 and not arg_4:\n for arg_8, arg_9 in arg_6:\n arg_7.download(arg_8, arg_9)\n elif not arg_3 and arg_4:\n for arg_8, arg_9 in arg_6:\n arg_7.upload(arg_8, arg_9)\n elif arg_3 and arg_4:\n for arg_8, arg_9 in arg_6:\n arg_7.copy(arg_8, arg_9)\n else:\n raise InvalidArgument('Cannot sync two local directories.')\n arg_7.join()\n\n if arg_0.opt.delete_removed:\n arg_10 = arg_0.relative_dir_walk(arg_2)\n arg_11 = [os.path.join(arg_2, arg_12) for arg_12 in (set(arg_10) - set(arg_5))]\n\n if S3URL.is_valid(arg_2):\n arg_7 = ThreadPool(ThreadUtil, arg_0.opt)\n arg_7.batch_delete(arg_11)\n arg_7.join()\n else:\n for arg_12 in arg_11:\n try:\n os.unlink(arg_12)\n message('Delete %s', arg_12)\n except:\n pass"} +{"_id": "doc_3983", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Check MD5 for a local file and a remote file.\n Return True if they have the same md5 hash, otherwise False.\n '''\n if not arg_2:\n return False\n if not os.path.exists(arg_1.filename):\n return False\n arg_3 = arg_1.get_md5()\n\n # check multiple md5 locations\n return ('ETag' in arg_2 and arg_2['ETag'] == '\"%s\"' % arg_3) or \\\n ('md5' in arg_2 and arg_2['md5'] == arg_3) or \\\n ('md5' in arg_2['Metadata'] and arg_2['Metadata']['md5'] == arg_3)"} +{"_id": "doc_3984", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Partially match a path and a filter_path with wildcards.\n This function will return True if this path partially match a filter path.\n This is used for walking through directories with multiple level wildcard.\n '''\n if not arg_1 or not arg_2:\n return True\n\n # trailing slash normalization\n if arg_1[-1] == PATH_SEP:\n arg_1 = arg_1[0:-1]\n if arg_2[-1] == PATH_SEP:\n arg_2 += '*'\n\n arg_3 = arg_1.split(PATH_SEP)\n arg_4 = arg_2.split(PATH_SEP)\n\n # Here, if we are in recursive mode, we allow the pi to be longer than fi.\n # Otherwise, length of pi should be equal or less than the lenght of fi.\n arg_5 = min(len(arg_3), len(arg_4))\n arg_6 = fnmatch.fnmatch(PATH_SEP.join(arg_3[0:arg_5]), PATH_SEP.join(arg_4[0:arg_5]))\n return arg_6 and (arg_0.opt.recursive or len(arg_3) <= len(arg_4))"} +{"_id": "doc_3985", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''Thread worker for Func.\n Recursively walk into all subdirectories if they still match the filter\n path partially.\n '''\n\n arg_5 = arg_0.s3.get_paginator('list_objects')\n arg_6 = arg_3.count(PATH_SEP)\n\n for arg_7 in arg_5.paginate(Bucket=arg_1.bucket, Prefix=arg_2, Delimiter=PATH_SEP, PaginationConfig={'PageSize': 1000}):\n # Get subdirectories first.\n for arg_8 in arg_7.get('CommonPrefixes') or []:\n arg_9 = arg_8['Prefix']\n\n if not arg_0.partial_match(arg_9, arg_3):\n continue\n\n if arg_0.opt.recursive or (arg_9.count(PATH_SEP) != arg_6 + 1):\n arg_0.pool.Func(arg_1, arg_9, arg_3, arg_4)\n else:\n arg_0.conditional(arg_4, {\n 'name': S3URL.combine(arg_1.proto, arg_1.bucket, arg_9),\n 'is_dir': True,\n 'size': 0,\n 'last_modified': None\n })\n\n # Then get all items in this folder.\n for arg_8 in arg_7.get('Contents') or []:\n arg_9 = arg_8['Key']\n if not arg_0.partial_match(arg_9, arg_3):\n continue\n\n if arg_0.opt.recursive or arg_9.count(PATH_SEP) == arg_6:\n arg_0.conditional(arg_4, {\n 'name': S3URL.combine(arg_1.proto, arg_1.bucket, arg_9),\n 'is_dir': False,\n 'size': arg_8['Size'],\n 'last_modified': arg_8['LastModified']\n })"} +{"_id": "doc_3986", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=0, arg_5=0, arg_6=0):\n '''Thread worker for Func operation.'''\n arg_7 = S3URL(arg_2)\n arg_8 = arg_0.lookup(arg_7)\n\n # Initialization: Set up multithreaded Funcs.\n if not arg_3:\n arg_9 = os.path.getsize(arg_1)\n arg_10 = LocalMD5Cache(arg_1)\n\n # optional checks\n if arg_0.opt.dry_run:\n message('%s => %s', arg_1, arg_2)\n return\n elif arg_0.opt.sync_check and arg_0.sync_check(arg_10, arg_8):\n message('%s => %s (synced)', arg_1, arg_2)\n return\n elif not arg_0.opt.force and arg_8:\n raise Failure('File already exists: %s' % arg_2)\n\n if arg_9 < arg_0.opt.max_singlepart_Func_size:\n arg_11 = arg_0.read_file_chunk(arg_1, 0, arg_9)\n arg_0.s3.put_object(Bucket=arg_7.bucket,\n Key=arg_7.path,\n Body=arg_11,\n Metadata={'md5': arg_10.get_md5(),\n 'privilege': arg_0.get_file_privilege(arg_1)})\n message('%s => %s', arg_1, arg_2)\n return\n\n # Here we need to have our own md5 value because multipart Func calculates\n # different md5 values.\n arg_12 = arg_0.s3.create_multipart_Func(Bucket=arg_7.bucket,\n Key=arg_7.path,\n Metadata={'md5': arg_10.get_md5(),\n 'privilege': arg_0.get_file_privilege(arg_1)})\n arg_13 = arg_12['UploadId']\n\n for arg_14 in arg_0.get_file_splits(arg_13, arg_1, arg_2, arg_9, arg_0.opt.multipart_split_size):\n arg_0.pool.Func(*arg_14)\n return\n\n arg_11 = arg_0.read_file_chunk(arg_1, arg_4, arg_5)\n arg_12 = arg_0.s3.Func_part(Bucket=arg_7.bucket, Key=arg_7.path, UploadId=arg_3.id, Body=arg_11, PartNumber=arg_6)\n\n # Finalize\n if arg_3.complete({'ETag': arg_12['ETag'], 'PartNumber': arg_6}):\n try:\n arg_0.s3.complete_multipart_Func(Bucket=arg_7.bucket, Key=arg_7.path, UploadId=arg_3.id, MultipartUpload={'Parts': arg_3.sorted_parts()})\n message('%s => %s', arg_1, arg_2)\n except Exception as e:\n message('Unable to complete Func: %s', str(e))\n arg_0.s3.abort_multipart_Func(Bucket=arg_7.bucket, Key=arg_7.path, UploadId=arg_3.id)\n raise RetryFailure('Upload failed: Unable to complete Func %s.' % arg_1)"} +{"_id": "doc_3987", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Verify the file size of the downloaded file.'''\n arg_3 = os.path.getsize(arg_2)\n if int(arg_1['ContentLength']) != arg_3:\n raise RetryFailure('Downloaded file size inconsistent: %s' % (repr(arg_1)))"} +{"_id": "doc_3988", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''Write local file chunk'''\n arg_5 = os.open(arg_1, os.O_CREAT | os.O_WRONLY)\n try:\n os.lseek(arg_5, arg_2, os.SEEK_SET)\n arg_6 = arg_4.read(arg_3)\n arg_7 = os.write(arg_5, arg_6)\n if(arg_7 != len(arg_6)):\n raise RetryFailure('Number of bytes written inconsistent: %s != %s' % (arg_7, sys.getsizeof(arg_6)))\n finally:\n os.close(arg_5)"} +{"_id": "doc_3989", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=0, arg_5=0, arg_6=0, arg_7=False):\n '''Copy a single file from source to target using boto S3 library.'''\n\n if arg_0.opt.dry_run:\n message('%s => %s' % (arg_1, arg_2))\n return\n\n arg_8 = S3URL(arg_1)\n arg_9 = S3URL(arg_2)\n\n if not arg_3:\n arg_10 = arg_0.lookup(arg_8)\n arg_11 = int(arg_10['ContentLength'])\n\n if arg_11 < arg_0.opt.max_singlepart_Func_size:\n arg_0.s3.Func_object(Bucket=arg_9.bucket, Key=arg_9.path,\n CopySource={'Bucket': arg_8.bucket, 'Key': arg_8.path})\n\n message('%s => %s' % (arg_1, arg_2))\n if arg_7:\n arg_0.delete(arg_1)\n\n return\n\n arg_12 = arg_0.s3.create_multipart_upload(Bucket=arg_9.bucket,\n Key=arg_9.path,\n Metadata=arg_10['Metadata'])\n arg_13 = arg_12['UploadId']\n\n for arg_14 in arg_0.get_file_splits(arg_13, arg_1, arg_2, arg_11, arg_0.opt.multipart_split_size):\n arg_0.pool.Func(*arg_14, arg_7=arg_7)\n return\n\n arg_12 = arg_0.s3.upload_part_Func(Bucket=arg_9.bucket,\n Key=arg_9.path,\n CopySource={'Bucket': arg_8.bucket, 'Key': arg_8.path},\n CopySourceRange='bytes=%d-%d' % (arg_4, arg_4 + arg_5 - 1),\n UploadId=arg_3.id,\n PartNumber=arg_6)\n\n if arg_3.complete({'ETag': arg_12['CopyPartResult']['ETag'], 'PartNumber': arg_6}):\n try:\n # Finalize Func operation.\n arg_0.s3.complete_multipart_upload(Bucket=arg_9.bucket, Key=arg_9.path, UploadId=arg_3.id, MultipartUpload={'Parts': arg_3.sorted_parts()})\n\n if arg_7:\n arg_0.delete(arg_1)\n\n message('%s => %s' % (arg_1, arg_2))\n except Exception as e:\n message('Unable to complete upload: %s', str(e))\n arg_0.s3.abort_multipart_upload(Bucket=arg_8.bucket, Key=arg_8.path, UploadId=arg_3.id)\n raise RetryFailure('Copy failed: Unable to complete Func %s.' % arg_1)"} +{"_id": "doc_3990", "title": "", "text": "def Func(arg_0, arg_1):\n '''Main entry to handle commands. Dispatch to individual command handler.'''\n if len(arg_1) == 0:\n raise InvalidArgument('No command provided')\n arg_2 = arg_1[0]\n if arg_2 + '_handler' in CommandHandler.__dict__:\n CommandHandler.__dict__[arg_2 + '_handler'](arg_0, arg_1)\n else:\n raise InvalidArgument('Unknown command %s' % arg_2)"} +{"_id": "doc_3991", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Validate input parameters with given format.\n This function also checks for wildcards for recursive mode.\n '''\n arg_3 = {\n 'cmd': 'Command',\n 's3': 's3 path',\n 'local': 'local path'\n }\n arg_4 = arg_1.split('|')\n if len(arg_4) != len(arg_2):\n raise InvalidArgument('Invalid number of parameters')\n\n for arg_5, arg_6 in enumerate(arg_4):\n arg_7 = False\n for arg_8 in arg_6.split(','):\n if arg_8 == 'cmd' and arg_2[arg_5] + '_handler' in CommandHandler.__dict__:\n arg_7 = True\n if arg_8 == 's3' and S3URL.is_valid(arg_2[arg_5]):\n arg_7 = True\n if arg_8 == 'local' and not S3URL.is_valid(arg_2[arg_5]):\n arg_7 = True\n if not arg_7:\n raise InvalidArgument('Invalid parameter: %s, %s expected' % (arg_2[arg_5], arg_3[arg_6.split(',')[0]]))"} +{"_id": "doc_3992", "title": "", "text": "def Func(arg_0, arg_1):\n '''Pretty print the result of s3walk. Here we calculate the maximum width\n of each column and align them.\n '''\n\n def normalize_time(arg_2):\n '''Normalize the timestamp format for pretty print.'''\n if arg_2 is None:\n return ' ' * 16\n\n return TIMESTAMP_FORMAT % (arg_2.year, arg_2.month, arg_2.day, arg_2.hour, arg_2.minute)\n\n arg_3 = [0, 0, 0]\n arg_4 = '%%%ds %%%ds %%-%ds'\n\n # Calculate maximum width for each column.\n arg_5 = []\n for arg_6 in arg_1:\n arg_7 = normalize_time(arg_6['last_modified'])\n arg_8 = str(arg_6['size']) if not arg_6['is_dir'] else 'DIR'\n arg_9 = arg_6['name']\n arg_10 = (arg_7, arg_8, arg_9)\n for arg_11, arg_12 in enumerate(arg_10):\n if arg_3[arg_11] < len(arg_12):\n arg_3[arg_11] = len(arg_12)\n arg_5.append(arg_10)\n\n # Format output.\n for arg_10 in arg_5:\n arg_13 = (arg_4 % tuple(arg_3)) % arg_10\n message('%s', arg_13.rstrip())"} +{"_id": "doc_3993", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for mb command'''\n if len(arg_1) == 1:\n raise InvalidArgument('No s3 bucketname provided')\n\n arg_0.validate('cmd|s3', arg_1)\n arg_0.s3handler().create_bucket(arg_1[1])"} +{"_id": "doc_3994", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for put command'''\n\n # Special check for shell expansion\n if len(arg_1) < 3:\n raise InvalidArgument('Invalid number of parameters')\n arg_0.validate('|'.join(['cmd'] + ['local'] * (len(arg_1) - 2) + ['s3']), arg_1)\n\n arg_2 = arg_1[1:-1] # shell expansion\n arg_3 = arg_1[-1]\n\n arg_0.s3handler().put_files(arg_2, arg_3)"} +{"_id": "doc_3995", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for get command'''\n\n # Special case when we don't have target directory.\n if len(arg_1) == 2:\n arg_1 += ['.']\n\n arg_0.validate('cmd|s3|local', arg_1)\n arg_2 = arg_1[1]\n arg_3 = arg_1[2]\n arg_0.s3handler().get_files(arg_2, arg_3)"} +{"_id": "doc_3996", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for dsync command.'''\n arg_0.opt.recursive = True\n arg_0.opt.sync_check = True\n arg_0.opt.force = True\n\n arg_0.validate('cmd|s3,local|s3,local', arg_1)\n arg_6 = arg_1[1]\n arg_7 = arg_1[2]\n\n arg_0.s3handler().dsync_files(arg_6, arg_7)"} +{"_id": "doc_3997", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for cp command'''\n\n arg_0.validate('cmd|s3|s3', arg_1)\n arg_2 = arg_1[1]\n arg_3 = arg_1[2]\n arg_0.s3handler().cp_files(arg_2, arg_3)"} +{"_id": "doc_3998", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for mv command'''\n\n arg_0.validate('cmd|s3|s3', arg_1)\n arg_2 = arg_1[1]\n arg_3 = arg_1[2]\n arg_0.s3handler().cp_files(arg_2, arg_3, delete_source=True)"} +{"_id": "doc_3999", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler for size command'''\n for arg_2, arg_3 in arg_0.s3handler().size(arg_1[1:]):\n message('%s\\t%s' % (arg_3, arg_2))"} +{"_id": "doc_4000", "title": "", "text": "def Func(arg_0, arg_1):\n '''Handler of total_size command'''\n arg_2 = 0\n for arg_3, arg_4 in arg_0.s3handler().size(arg_1[1:]):\n arg_2 += arg_4\n message(str(arg_2))"} +{"_id": "doc_4001", "title": "", "text": "def Func(arg_0, arg_1):\n '''Search for date information in the string'''\n arg_2 = arg_0.REGEX_DATE.search(arg_1)\n arg_3 = datetime.datetime.utcnow().date()\n if arg_2:\n arg_3 = datetime.date(int(arg_2.group(1)), int(arg_2.group(2)), int(arg_2.group(3)))\n arg_1 = arg_0.REGEX_DATE.sub('', arg_1)\n return (arg_3, arg_1)"} +{"_id": "doc_4002", "title": "", "text": "def Func(arg_0, arg_1):\n '''Search for time information in the string'''\n arg_2 = arg_0.REGEX_TIME.search(arg_1)\n arg_3 = datetime.datetime.utcnow().time()\n if arg_2:\n arg_3 = datetime.time(int(arg_2.group(1)), int(arg_2.group(2)))\n arg_1 = arg_0.REGEX_TIME.sub('', arg_1)\n return (arg_3, arg_1)"} +{"_id": "doc_4003", "title": "", "text": "def Func(arg_0):\n '''\n Funcs the contents of a file on disk.\n takes a filename\n '''\n arg_1 = open(arg_0, 'r')\n arg_2 = arg_1.read()\n arg_1.close()\n return raw(arg_2)"} +{"_id": "doc_4004", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n pipes the output of a program\n '''\n import subprocess\n arg_2 = subprocess.Popen(arg_0, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n arg_3, arg_4 = arg_2.communicate(arg_1)\n return arg_3.decode('utf8')"} +{"_id": "doc_4005", "title": "", "text": "def Func(arg_0):\n '''\n Funcs html entities. the opposite of escape.\n '''\n arg_1 = re.compile(r'&(?:(?:#(\\d+))|([^;]+));')\n\n arg_2 = []\n arg_3 = arg_1.search(arg_0)\n while arg_3:\n arg_2.append(arg_0[0:arg_3.start()])\n arg_4 = arg_3.group(1)\n if arg_4:\n arg_4 = int(arg_4)\n arg_2.append(unichr(arg_4))\n else:\n arg_4 = _Func.get(arg_3.group(2), ord('?'))\n arg_2.append(unichr(arg_4))\n\n arg_0 = arg_0[arg_3.end():]\n arg_3 = arg_1.search(arg_0)\n\n arg_2.append(arg_0)\n return ''.join(arg_2)"} +{"_id": "doc_4006", "title": "", "text": "def Func(*arg_0, **arg_1):\n '''\n Set attributes on the current active tag context\n '''\n arg_2 = dom_tag._with_contexts[_get_thread_context()]\n if arg_2 and arg_2[-1]:\n arg_3 = arg_0 + (arg_1,)\n for arg_4 in arg_3:\n for Func, arg_6 in arg_4.items():\n arg_2[-1].tag.set_attribute(*dom_tag.clean_pair(Func, arg_6))\n else:\n raise ValueError('not in a tag context')"} +{"_id": "doc_4007", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Add or update the value of an attribute.\n '''\n if isinstance(arg_1, int):\n arg_0.children[arg_1] = arg_2\n elif isinstance(arg_1, basestring):\n arg_0.attributes[arg_1] = arg_2\n else:\n raise TypeError('Only integer and string types are valid for assigning '\n 'child tags and attributes, respectively.')"} +{"_id": "doc_4008", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n '''\n Recursively searches children for tags of a certain\n type with matching attributes.\n '''\n # Stupid workaround since we can not use dom_tag in the method declaration\n if arg_1 is None: arg_1 = dom_tag\n\n arg_3 = [(dom_tag.clean_attribute(attr), arg_7)\n for attr, arg_7 in arg_2.items()]\n\n arg_4 = []\n for arg_5 in arg_0.children:\n if (isinstance(arg_1, basestring) and type(arg_5).__name__ == arg_1) or \\\n (not isinstance(arg_1, basestring) and isinstance(arg_5, arg_1)):\n\n if all(arg_5.attributes.Func(arg_6) == arg_7\n for arg_6, arg_7 in arg_3):\n # If the child is of correct type and has all attributes and values\n # in kwargs add as a result\n arg_4.append(arg_5)\n if isinstance(arg_5, dom_tag):\n # If the child is a dom_tag extend the search down through its children\n arg_4.extend(arg_5.Func(arg_1, **arg_2))\n return arg_4"} +{"_id": "doc_4009", "title": "", "text": "def Func(arg_0):\n '''\n Normalize attribute names for shorthand and work arounds for limitations\n in Python's syntax\n '''\n\n # Shorthand\n arg_0 = {\n 'cls': 'class',\n 'className': 'class',\n 'class_name': 'class',\n 'fr': 'for',\n 'html_for': 'for',\n 'htmlFor': 'for',\n }.get(arg_0, arg_0)\n\n # Workaround for Python's reserved words\n if arg_0[0] == '_':\n arg_0 = arg_0[1:]\n\n # Workaround for dash\n if arg_0 in set(['http_equiv']) or arg_0.startswith('data_'):\n arg_0 = arg_0.replace('_', '-').lower()\n\n # Workaround for colon\n if arg_0.split('_')[0] in ('xlink', 'xml', 'xmlns'):\n arg_0 = arg_0.replace('_', ':', 1).lower()\n\n return arg_0"} +{"_id": "doc_4010", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n This will call `clean_attribute` on the attribute and also allows for the\n creation of boolean attributes.\n\n Ex. input(selected=True) is equivalent to input(selected=\"selected\")\n '''\n arg_1 = arg_0.clean_attribute(arg_1)\n\n # Check for boolean attributes\n # (i.e. selected=True becomes selected=\"selected\")\n if arg_2 is True:\n arg_2 = arg_1\n\n if arg_2 is False:\n arg_2 = \"false\"\n\n return (arg_1, arg_2)"} +{"_id": "doc_4011", "title": "", "text": "def Func(arg_0):\n \"\"\"Discover gateways using multicast\"\"\"\n\n arg_1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n arg_1.settimeout(5.0)\n if arg_0._interface != 'any':\n arg_1.bind((arg_0._interface, 0))\n\n for arg_2 in arg_0._gateways_config:\n arg_3 = arg_2.get('host')\n arg_4 = arg_2.get('port')\n arg_5 = arg_2.get('sid')\n\n if not (arg_3 and arg_4 and arg_5):\n continue\n try:\n arg_6 = socket.gethostbyname(arg_3)\n if arg_2.get('disable'):\n _LOGGER.info(\n 'Xiaomi Gateway %s is disabled by configuration', arg_5)\n arg_0.disabled_gateways.append(arg_6)\n continue\n _LOGGER.info(\n 'Xiaomi Gateway %s configured at IP %s:%s',\n arg_5, arg_6, arg_4)\n\n arg_0.gateways[arg_6] = XiaomiGateway(\n arg_6, arg_4, arg_5,\n arg_2.get('key'), arg_0._device_discovery_retries,\n arg_0._interface, arg_2.get('proto'))\n except OSError as error:\n _LOGGER.error(\n \"Could not resolve %s: %s\", arg_3, error)\n\n try:\n arg_1.sendto('{\"cmd\":\"whois\"}'.encode(),\n (arg_0.MULTICAST_ADDRESS, arg_0.GATEWAY_DISCOVERY_PORT))\n\n while True:\n arg_8, (arg_9, arg_10) = arg_1.recvfrom(1024)\n if len(arg_8) is None or arg_9 in arg_0.gateways:\n continue\n\n if arg_9 in arg_0.gateways.keys() or arg_9 in arg_0.disabled_gateways:\n continue\n\n arg_11 = json.loads(arg_8.decode())\n if arg_11[\"cmd\"] != 'iam':\n _LOGGER.error(\"Response does not match return cmd\")\n continue\n\n if arg_11[\"model\"] not in GATEWAY_MODELS:\n _LOGGER.error(\"Response must be gateway model\")\n continue\n\n arg_12 = False\n arg_13 = None\n for arg_2 in arg_0._gateways_config:\n arg_5 = arg_2.get('sid')\n if arg_5 is None or arg_5 == arg_11[\"sid\"]:\n arg_13 = arg_2.get('key')\n if arg_5 and arg_5 == arg_11['sid'] and arg_2.get('disable'):\n arg_12 = True\n\n arg_5 = arg_11[\"sid\"]\n if arg_12:\n _LOGGER.info(\"Xiaomi Gateway %s is disabled by configuration\",\n arg_5)\n arg_0.disabled_gateways.append(arg_9)\n else:\n _LOGGER.info('Xiaomi Gateway %s found at IP %s', arg_5, arg_9)\n arg_0.gateways[arg_9] = XiaomiGateway(\n arg_9, arg_11[\"port\"], arg_5, arg_13,\n arg_0._device_discovery_retries, arg_0._interface,\n arg_11[\"proto_version\"] if \"proto_version\" in arg_11 else None)\n\n except socket.timeout:\n _LOGGER.info(\"Gateway discovery finished in 5 seconds\")\n arg_1.close()"} +{"_id": "doc_4012", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get data from gateway\"\"\"\n arg_2 = '{ \"cmd\":\"read\",\"sid\":\"' + arg_1 + '\"}'\n arg_3 = arg_0._send_cmd(arg_2, \"read_ack\") if int(arg_0.proto[0:1]) == 1 else arg_0._send_cmd(arg_2, \"read_rsp\")\n _LOGGER.debug(\"read_ack << %s\", arg_3)\n return arg_0.push_data(arg_3)"} +{"_id": "doc_4013", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Push data broadcasted from gateway to device\"\"\"\n if not _validate_data(arg_1):\n return False\n arg_2 = json.loads(arg_1['data']) if int(arg_0.proto[0:1]) == 1 else _list2map(arg_1['params'])\n if arg_2 is None:\n return False\n arg_3 = arg_1['sid']\n for arg_4 in arg_0.callbacks[arg_3]:\n arg_4(arg_2, arg_1)\n return True"} +{"_id": "doc_4014", "title": "", "text": "def Func(arg_0):\n \"\"\"Get key using token from gateway\"\"\"\n arg_1 = bytes(bytearray.fromhex('17996d093d28ddb3ba695a2e6f58562e'))\n arg_2 = Cipher(algorithms.AES(arg_0.key.encode()), modes.CBC(arg_1),\n backend=default_backend()).encryptor()\n arg_3 = arg_2.update(arg_0.token.encode()) + arg_2.finalize()\n if isinstance(arg_3, str): # For Python 2 compatibility\n return ''.join('{:02x}'.format(ord(arg_4)) for arg_4 in arg_3)\n return ''.join('{:02x}'.format(arg_4) for arg_4 in arg_3)"} +{"_id": "doc_4015", "title": "", "text": "def Func(arg_0):\n \"\"\" Creates a registration message to identify the worker to the interchange\n \"\"\"\n arg_1 = {'parsl_v': PARSL_VERSION,\n 'python_v': \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro),\n 'os': platform.system(),\n 'hname': platform.node(),\n 'dir': os.getcwd(),\n }\n arg_2 = json.dumps(arg_1).encode('utf-8')\n return arg_2"} +{"_id": "doc_4016", "title": "", "text": "def Func(arg_0):\n \"\"\" Send heartbeat to the incoming task queue\n \"\"\"\n Func = (HEARTBEAT_CODE).to_bytes(4, \"little\")\n arg_2 = arg_0.task_incoming.send(Func)\n logger.debug(\"Return from heartbeat : {}\".format(arg_2))"} +{"_id": "doc_4017", "title": "", "text": "def Func(arg_0):\n \"\"\" Receives a results from the MPI worker pool and send it out via 0mq\n\n Returns:\n --------\n result: task result from the workers\n \"\"\"\n arg_1 = MPI.Status()\n arg_2 = arg_0.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=arg_1)\n logger.debug(\"Received result from workers: {}\".format(arg_2))\n return arg_2"} +{"_id": "doc_4018", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Pulls tasks from the incoming tasks 0mq pipe onto the internal\n pending task queue\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.\n \"\"\"\n logger.info(\"[TASK PULL THREAD] starting\")\n arg_2 = zmq.Poller()\n arg_2.register(arg_0.task_incoming, zmq.POLLIN)\n\n # Send a registration message\n arg_3 = arg_0.create_reg_message()\n logger.debug(\"Sending registration message: {}\".format(arg_3))\n arg_0.task_incoming.send(arg_3)\n arg_4 = time.time()\n arg_5 = time.time()\n arg_6 = 0\n\n arg_7 = 1\n\n while not arg_1.is_set():\n time.sleep(LOOP_SLOWDOWN)\n arg_8 = arg_0.ready_worker_queue.qsize()\n arg_9 = arg_0.pending_task_queue.qsize()\n\n logger.debug(\"[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}\".format(arg_8,\n arg_9))\n\n if time.time() > arg_4 + arg_0.heartbeat_period:\n arg_0.heartbeat()\n arg_4 = time.time()\n\n if arg_9 < arg_0.max_queue_size and arg_8 > 0:\n logger.debug(\"[TASK_PULL_THREAD] Requesting tasks: {}\".format(arg_8))\n arg_3 = ((arg_8).to_bytes(4, \"little\"))\n arg_0.task_incoming.send(arg_3)\n\n arg_10 = dict(arg_2.poll(timeout=arg_7))\n\n if arg_0.task_incoming in arg_10 and arg_10[arg_0.task_incoming] == zmq.POLLIN:\n arg_11, arg_12 = arg_0.task_incoming.recv_multipart()\n arg_13 = pickle.loads(arg_12)\n arg_5 = time.time()\n\n if arg_13 == 'STOP':\n logger.critical(\"[TASK_PULL_THREAD] Received stop request\")\n arg_1.set()\n break\n\n elif arg_13 == HEARTBEAT_CODE:\n logger.debug(\"Got heartbeat from interchange\")\n\n else:\n # Reset timer on receiving message\n arg_7 = 1\n arg_6 += len(arg_13)\n logger.debug(\"[TASK_PULL_THREAD] Got tasks: {} of {}\".format([arg_14['task_id'] for arg_14 in arg_13],\n arg_6))\n for arg_15 in arg_13:\n arg_0.pending_task_queue.put(arg_15)\n else:\n logger.debug(\"[TASK_PULL_THREAD] No incoming tasks\")\n # Limit poll duration to heartbeat_period\n # heartbeat_period is in s vs poll_timer in ms\n arg_7 = min(arg_0.heartbeat_period * 1000, arg_7 * 2)\n\n # Only check if no messages were received.\n if time.time() > arg_5 + arg_0.heartbeat_threshold:\n logger.critical(\"[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold\")\n arg_1.set()\n logger.critical(\"[TASK_PULL_THREAD] Exiting\")\n break"} +{"_id": "doc_4019", "title": "", "text": "def Func(arg_0):\n \"\"\" Start the Manager process.\n\n The worker loops on this:\n\n 1. If the last message sent was older than heartbeat period we send a heartbeat\n 2.\n\n\n TODO: Move task receiving to a thread\n \"\"\"\n\n arg_0.comm.Barrier()\n logger.debug(\"Manager synced with workers\")\n\n arg_0._kill_event = threading.Event()\n arg_0._task_puller_thread = threading.Thread(target=arg_0.pull_tasks,\n args=(arg_0._kill_event,))\n arg_0._result_pusher_thread = threading.Thread(target=arg_0.push_results,\n args=(arg_0._kill_event,))\n arg_0._task_puller_thread.start()\n arg_0._result_pusher_thread.start()\n\n Func = None\n\n arg_5 = 0\n arg_6 = 0\n arg_7 = 0\n\n logger.info(\"Loop start\")\n while not arg_0._kill_event.is_set():\n time.sleep(LOOP_SLOWDOWN)\n\n # In this block we attempt to probe MPI for a set amount of time,\n # and if we have exhausted all available MPI events, we move on\n # to the next block. The timer and counter trigger balance\n # fairness and responsiveness.\n arg_8 = time.time() + 0.05\n arg_9 = min(10, comm.size)\n while time.time() < arg_8:\n arg_10 = MPI.Status()\n\n if arg_9 > 10:\n logger.debug(\"Hit max mpi events per round\")\n break\n\n if not arg_0.comm.Iprobe(status=arg_10):\n logger.debug(\"Timer expired, processed {} mpi events\".format(arg_9))\n break\n else:\n arg_11 = arg_10.Get_tag()\n logger.info(\"Message with tag {} received\".format(arg_11))\n\n arg_9 += 1\n if arg_11 == RESULT_TAG:\n arg_12 = arg_0.recv_result_from_workers()\n arg_0.pending_result_queue.put(arg_12)\n arg_5 += 1\n\n elif arg_11 == TASK_REQUEST_TAG:\n arg_13 = arg_0.recv_task_request_from_workers()\n arg_0.ready_worker_queue.put(arg_13)\n\n else:\n logger.error(\"Unknown tag {} - ignoring this message and continuing\".format(arg_11))\n\n arg_14 = arg_0.ready_worker_queue.qsize()\n arg_15 = arg_0.pending_task_queue.qsize()\n logger.debug(\"[MAIN] Ready workers: {} Ready tasks: {}\".format(arg_14,\n arg_15))\n arg_16 = min(arg_14, arg_15)\n for arg_17 in range(arg_16):\n arg_13 = arg_0.ready_worker_queue.get()\n arg_18 = arg_0.pending_task_queue.get()\n comm.send(arg_18, dest=arg_13, arg_11=arg_13)\n arg_7 += 1\n logger.debug(\"Assigning worker:{} task:{}\".format(arg_13, arg_18['task_id']))\n\n if not Func:\n Func = time.time()\n\n logger.debug(\"Tasks recvd:{} Tasks dispatched:{} Results recvd:{}\".format(\n arg_6, arg_7, arg_5))\n # print(\"[{}] Received: {}\".format(self.identity, msg))\n # time.sleep(random.randint(4,10)/10)\n\n arg_0._task_puller_thread.join()\n arg_0._result_pusher_thread.join()\n\n arg_0.task_incoming.close()\n arg_0.result_outgoing.close()\n arg_0.context.term()\n\n arg_19 = time.time() - Func\n logger.info(\"mpi_worker_pool ran for {} seconds\".format(arg_19))"} +{"_id": "doc_4020", "title": "", "text": "def Func(arg_0):\n \"\"\" Decorator function to launch a function as a separate process \"\"\"\n\n def run(*arg_1, **arg_2):\n arg_3 = mp.Process(target=arg_0, arg_1=arg_1, arg_2=arg_2)\n arg_3.start()\n return arg_3\n\n return run"} +{"_id": "doc_4021", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet\n \"\"\"\n try:\n if arg_4 is None:\n raise ValueError(\"message was none\")\n\n arg_5 = bytes(arg_4, \"utf-8\")\n\n if arg_5 is None:\n raise ValueError(\"utf-8 encoding of message failed\")\n\n if arg_0:\n try:\n arg_1 = socket.gethostbyname(arg_0)\n except Exception:\n # (False, \"Domain lookup failed, defaulting to {0}\".format(UDP_IP))\n pass\n\n if arg_1 is None:\n raise Exception(\"UDP_IP is None\")\n\n if arg_2 is None:\n raise Exception(\"UDP_PORT is None\")\n\n arg_6 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n arg_6.settimeout(arg_3)\n arg_6.sendto(bytes(arg_4, \"utf-8\"), (arg_1, arg_2))\n arg_6.close()\n\n except socket.timeout:\n logger.debug(\"Failed to send usage tracking data: socket timeout\")\n except OSError as e:\n logger.debug(\"Failed to send usage tracking data: OSError: {}\".format(e))\n except Exception as e:\n logger.debug(\"Failed to send usage tracking data: Exception: {}\".format(e))"} +{"_id": "doc_4022", "title": "", "text": "def Func(arg_0):\n \"\"\"By default tracking is enabled.\n\n If Test mode is set via env variable PARSL_TESTING, a test flag is set\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)\n\n \"\"\"\n arg_1 = True # By default we track usage\n arg_2 = False # By default we are not in testing mode\n\n arg_3 = str(os.environ.get(\"PARSL_TESTING\", 'None')).lower()\n if arg_3 == 'true':\n arg_2 = True\n\n if not arg_0.config.usage_tracking:\n arg_1 = False\n\n arg_4 = str(os.environ.get(\"PARSL_TRACKING\", True)).lower()\n if arg_4 == \"false\":\n arg_1 = False\n\n return arg_2, arg_1"} +{"_id": "doc_4023", "title": "", "text": "def Func(arg_0):\n \"\"\"Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n arg_1 = getpass.getuser().encode('latin1')\n arg_2 = hashlib.sha256(arg_1).hexdigest()[0:10]\n arg_3 = socket.gethostname().encode('latin1')\n arg_4 = hashlib.sha256(arg_3).hexdigest()[0:10]\n arg_5 = {'uuid': arg_0.uuid,\n 'uname': arg_2,\n 'hname': arg_4,\n 'test': arg_0.test_mode,\n 'parsl_v': arg_0.parsl_version,\n 'python_v': arg_0.python_version,\n 'os': platform.system(),\n 'os_v': platform.release(),\n 'start': time.time()}\n\n return json.dumps(arg_5)"} +{"_id": "doc_4024", "title": "", "text": "def Func(arg_0):\n \"\"\"Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n arg_1 = arg_0.dfk.task_count\n\n arg_2 = len([x for x in arg_0.dfk.config.executors if x.managed])\n\n arg_3 = len([t for t in arg_0.dfk.tasks if\n arg_0.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\n\n arg_4 = {'uuid': arg_0.uuid,\n 'end': time.time(),\n 't_apps': arg_1,\n 'sites': arg_2,\n 'c_time': None,\n 'failed': arg_3,\n 'test': arg_0.test_mode,\n }\n\n return json.dumps(arg_4)"} +{"_id": "doc_4025", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send UDP message.\"\"\"\n arg_2 = 0\n if arg_0.tracking_enabled:\n try:\n arg_3 = udp_messenger(arg_0.domain_name, arg_0.UDP_IP, arg_0.UDP_PORT, arg_0.sock_timeout, arg_1)\n arg_0.procs.append(arg_3)\n except Exception as e:\n logger.debug(\"Usage tracking failed: {}\".format(e))\n else:\n arg_2 = -1\n\n return arg_2"} +{"_id": "doc_4026", "title": "", "text": "def Func(arg_0):\n \"\"\"Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)\n \"\"\"\n arg_1 = time.time()\n arg_2 = None\n if not arg_0.initialized:\n arg_2 = arg_0.construct_start_message()\n arg_0.initialized = True\n else:\n arg_2 = arg_0.construct_end_message()\n\n arg_0.send_UDP_message(arg_2)\n arg_4 = time.time()\n\n return arg_4 - arg_1"} +{"_id": "doc_4027", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"This function is called as a callback when an AppFuture\n is in its final state.\n\n It will trigger post-app processing such as checkpointing\n and stageout.\n\n Args:\n task_id (string) : Task id\n future (Future) : The relevant app future (which should be\n consistent with the task structure 'app_fu' entry\n\n KWargs:\n memo_cbk(Bool) : Indicates that the call is coming from a memo update,\n that does not require additional memo updates.\n \"\"\"\n\n if not arg_0.tasks[arg_1]['app_fu'].done():\n logger.error(\"Internal consistency error: app_fu is not done for task {}\".format(arg_1))\n if not arg_0.tasks[arg_1]['app_fu'] == arg_2:\n logger.error(\"Internal consistency error: callback future is not the app_fu in task structure, for task {}\".format(arg_1))\n\n if not arg_3:\n # Update the memoizer with the new result if this is not a\n # result from a memo lookup and the task has reached a terminal state.\n arg_0.memoizer.update_memo(arg_1, arg_0.tasks[arg_1], arg_2)\n\n if arg_0.checkpoint_mode == 'task_exit':\n arg_0.checkpoint(tasks=[arg_1])\n\n # Submit _*_stage_out tasks for output data futures that correspond with remote files\n if (arg_0.tasks[arg_1]['app_fu'] and\n arg_0.tasks[arg_1]['app_fu'].done() and\n arg_0.tasks[arg_1]['app_fu'].exception() is None and\n arg_0.tasks[arg_1]['executor'] != 'data_manager' and\n arg_0.tasks[arg_1]['func_name'] != '_ftp_stage_in' and\n arg_0.tasks[arg_1]['func_name'] != '_http_stage_in'):\n for arg_4 in arg_0.tasks[arg_1]['app_fu'].outputs:\n arg_5 = arg_4.file_obj\n if isinstance(arg_5, File) and arg_5.is_remote():\n arg_0.data_manager.stage_out(arg_5, arg_0.tasks[arg_1]['executor'])\n\n return"} +{"_id": "doc_4028", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Handle the actual submission of the task to the executor layer.\n\n If the app task has the executors attributes not set (default=='all')\n the task is launched on a randomly selected executor from the\n list of executors. This behavior could later be updated to support\n binding to executors based on user specified criteria.\n\n If the app task specifies a particular set of executors, it will be\n targeted at those specific executors.\n\n Args:\n task_id (uuid string) : A uuid string that uniquely identifies the task\n executable (callable) : A callable object\n args (list of positional args)\n kwargs (arbitrary keyword arguments)\n\n\n Returns:\n Future that tracks the execution of the submitted executable\n \"\"\"\n arg_0.tasks[arg_1]['time_submitted'] = datetime.datetime.now()\n\n arg_6, arg_7 = arg_0.memoizer.check_memo(arg_1, arg_0.tasks[arg_1])\n if arg_6:\n logger.info(\"Reusing cached result for task {}\".format(arg_1))\n return arg_7\n\n arg_8 = arg_0.tasks[arg_1][\"executor\"]\n try:\n arg_9 = arg_0.executors[arg_8]\n except Exception:\n logger.exception(\"Task {} requested invalid executor {}: config is\\n{}\".format(arg_1, arg_8, arg_0._config))\n\n if arg_0.monitoring is not None and arg_0.monitoring.resource_monitoring_enabled:\n arg_2 = arg_0.monitoring.monitor_wrapper(arg_2, arg_1,\n arg_0.monitoring.monitoring_hub_url,\n arg_0.run_id,\n arg_0.monitoring.resource_monitoring_interval)\n\n with arg_0.submitter_lock:\n arg_10 = arg_9.submit(arg_2, *arg_3, **arg_4)\n arg_0.tasks[arg_1]['status'] = States.launched\n if arg_0.monitoring is not None:\n arg_11 = arg_0._create_task_log_info(arg_1, 'lazy')\n arg_0.monitoring.send(MessageType.TASK_INFO, arg_11)\n\n arg_10.retries_left = arg_0._config.retries - \\\n arg_0.tasks[arg_1]['fail_count']\n logger.info(\"Task {} launched on executor {}\".format(arg_1, arg_9.label))\n return arg_10"} +{"_id": "doc_4029", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Count the number of unresolved futures on which a task depends.\n\n Args:\n - args (List[args]) : The list of args list to the fn\n - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn\n\n Returns:\n - count, [list of dependencies]\n\n \"\"\"\n # Check the positional args\n arg_3 = []\n arg_4 = 0\n for arg_5 in arg_1:\n if isinstance(arg_5, Future):\n if arg_0.tasks[arg_5.tid]['status'] not in FINAL_STATES:\n arg_4 += 1\n arg_3.extend([arg_5])\n\n # Check for explicit kwargs ex, fu_1=\n for arg_6 in arg_2:\n arg_5 = arg_2[arg_6]\n if isinstance(arg_5, Future):\n if arg_0.tasks[arg_5.tid]['status'] not in FINAL_STATES:\n arg_4 += 1\n arg_3.extend([arg_5])\n\n # Check for futures in inputs=[...]\n for arg_5 in arg_2.get('inputs', []):\n if isinstance(arg_5, Future):\n if arg_0.tasks[arg_5.tid]['status'] not in FINAL_STATES:\n arg_4 += 1\n arg_3.extend([arg_5])\n\n return arg_4, arg_3"} +{"_id": "doc_4030", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, arg_3='all', arg_4=None, arg_5=False, **arg_6):\n \"\"\"Add task to the dataflow system.\n\n If the app task has the executors attributes not set (default=='all')\n the task will be launched on a randomly selected executor from the\n list of executors. If the app task specifies a particular set of\n executors, it will be targeted at the specified executors.\n\n >>> IF all deps are met:\n >>> send to the runnable queue and launch the task\n >>> ELSE:\n >>> post the task in the pending queue\n\n Args:\n - func : A function object\n - *args : Args to the function\n\n KWargs :\n - executors (list or string) : List of executors this call could go to.\n Default='all'\n - fn_hash (Str) : Hash of the function and inputs\n Default=None\n - cache (Bool) : To enable memoization or not\n - kwargs (dict) : Rest of the kwargs to the fn passed as dict.\n\n Returns:\n (AppFuture) [DataFutures,]\n\n \"\"\"\n\n if arg_0.cleanup_called:\n raise ValueError(\"Cannot Func to a DFK that has been cleaned up\")\n\n arg_7 = arg_0.task_count\n arg_0.task_count += 1\n if isinstance(arg_3, str) and arg_3.lower() == 'all':\n arg_8 = list(e for e in arg_0.executors if e != 'data_manager')\n elif isinstance(arg_3, list):\n arg_8 = arg_3\n arg_9 = random.choice(arg_8)\n\n # Transform remote input files to data futures\n arg_2, arg_6 = arg_0._add_input_deps(arg_9, arg_2, arg_6)\n\n arg_10 = {'depends': None,\n 'executor': arg_9,\n 'func': arg_1,\n 'func_name': arg_1.__name__,\n 'args': arg_2,\n 'kwargs': arg_6,\n 'fn_hash': arg_4,\n 'memoize': arg_5,\n 'callback': None,\n 'exec_fu': None,\n 'checkpoint': None,\n 'fail_count': 0,\n 'fail_history': [],\n 'env': None,\n 'status': States.unsched,\n 'id': arg_7,\n 'time_Functed': None,\n 'time_returned': None,\n 'app_fu': None}\n\n if arg_7 in arg_0.tasks:\n raise DuplicateTaskError(\n \"internal consistency error: Task {0} already exists in task list\".format(arg_7))\n else:\n arg_0.tasks[arg_7] = arg_10\n\n # Get the dep count and a list of dependencies for the task\n arg_12, arg_13 = arg_0._gather_all_deps(arg_2, arg_6)\n arg_0.tasks[arg_7]['depends'] = arg_13\n\n # Extract stdout and stderr to pass to AppFuture:\n arg_14 = arg_6.get('stdout')\n arg_15 = arg_6.get('stderr')\n\n logger.info(\"Task {} Functed for App {}, waiting on tasks {}\".format(arg_7,\n arg_10['func_name'],\n [arg_16.tid for arg_16 in arg_13]))\n\n arg_0.tasks[arg_7]['task_launch_lock'] = threading.Lock()\n arg_17 = AppFuture(tid=arg_7,\n stdout=arg_14,\n stderr=arg_15)\n\n arg_0.tasks[arg_7]['app_fu'] = arg_17\n arg_17.add_done_callback(partial(arg_0.handle_app_update, arg_7))\n arg_0.tasks[arg_7]['status'] = States.pending\n logger.debug(\"Task {} set to pending state with AppFuture: {}\".format(arg_7, arg_10['app_fu']))\n\n # at this point add callbacks to all dependencies to do a launch_if_ready\n # call whenever a dependency completes.\n\n # we need to be careful about the order of setting the state to pending,\n # adding the callbacks, and caling launch_if_ready explicitly once always below.\n\n # I think as long as we call launch_if_ready once after setting pending, then\n # we can add the callback dependencies at any point: if the callbacks all fire\n # before then, they won't cause a launch, but the one below will. if they fire\n # after we set it pending, then the last one will cause a launch, and the\n # explicit one won't.\n\n for arg_18 in arg_13:\n\n def callback_adapter(arg_19):\n arg_0.launch_if_ready(arg_7)\n\n try:\n arg_18.add_done_callback(callback_adapter)\n except Exception as e:\n logger.error(\"add_done_callback got an exception {} which will be ignored\".format(e))\n\n arg_0.launch_if_ready(arg_7)\n\n return arg_10['app_fu']"} +{"_id": "doc_4031", "title": "", "text": "def Func(arg_0):\n \"\"\"DataFlowKernel Func.\n\n This involves killing resources explicitly and sending die messages to IPP workers.\n\n If the executors are managed (created by the DFK), then we call scale_in on each of\n the executors and call executor.shutdown. Otherwise, we do nothing, and executor\n Func is left to the user.\n \"\"\"\n logger.info(\"DFK Func initiated\")\n\n # this check won't detect two DFK Funcs happening from\n # different threads extremely close in time because of\n # non-atomic read/modify of self.Func_called\n if arg_0.Func_called:\n raise Exception(\"attempt to clean up DFK when it has already been cleaned-up\")\n arg_0.Func_called = True\n\n arg_0.log_task_states()\n\n # Checkpointing takes priority over the rest of the tasks\n # checkpoint if any valid checkpoint method is specified\n if arg_0.checkpoint_mode is not None:\n arg_0.checkpoint()\n\n if arg_0._checkpoint_timer:\n logger.info(\"Stopping checkpoint timer\")\n arg_0._checkpoint_timer.close()\n\n # Send final stats\n arg_0.usage_tracker.send_message()\n arg_0.usage_tracker.close()\n\n logger.info(\"Terminating flow_control and strategy threads\")\n arg_0.flowcontrol.close()\n\n for arg_2 in arg_0.executors.values():\n if arg_2.managed:\n if arg_2.scaling_enabled:\n arg_3 = arg_2.provider.resources.keys()\n arg_2.scale_in(len(arg_3))\n arg_2.shutdown()\n\n arg_0.time_completed = datetime.datetime.now()\n\n if arg_0.monitoring:\n arg_0.monitoring.send(MessageType.WORKFLOW_INFO,\n {'tasks_failed_count': arg_0.tasks_failed_count,\n 'tasks_completed_count': arg_0.tasks_completed_count,\n \"time_began\": arg_0.time_began,\n 'time_completed': arg_0.time_completed,\n 'workflow_duration': (arg_0.time_completed - arg_0.time_began).total_seconds(),\n 'run_id': arg_0.run_id, 'rundir': arg_0.run_dir})\n\n arg_0.monitoring.close()\n\n \"\"\"\n if self.logging_server is not None:\n self.logging_server.terminate()\n self.logging_server.join()\n\n if self.web_app is not None:\n self.web_app.terminate()\n self.web_app.join()\n \"\"\"\n logger.info(\"DFK Func complete\")"} +{"_id": "doc_4032", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Load a checkpoint file into a lookup table.\n\n The data being loaded from the pickle file mostly contains input\n attributes of the task: func, args, kwargs, env...\n To simplify the check of whether the exact task has been completed\n in the checkpoint, we hash these input params and use it as the key\n for the memoized lookup table.\n\n Args:\n - checkpointDirs (list) : List of filepaths to checkpoints\n Eg. ['runinfo/001', 'runinfo/002']\n\n Returns:\n - memoized_lookup_table (dict)\n \"\"\"\n arg_2 = {}\n\n for arg_3 in arg_1:\n logger.info(\"Loading checkpoints from {}\".format(arg_3))\n arg_4 = os.path.join(arg_3, 'tasks.pkl')\n try:\n with open(arg_4, 'rb') as f:\n while True:\n try:\n arg_5 = pickle.load(f)\n # Copy and hash only the input attributes\n arg_6 = Future()\n if arg_5['exception']:\n arg_6.set_exception(arg_5['exception'])\n else:\n arg_6.set_result(arg_5['result'])\n arg_2[arg_5['hash']] = arg_6\n\n except EOFError:\n # Done with the checkpoint file\n break\n except FileNotFoundError:\n arg_7 = \"Checkpoint file was not found: {}\".format(\n arg_4)\n logger.error(arg_7)\n raise BadCheckpoint(arg_7)\n except Exception:\n arg_7 = \"Failed to load checkpoint: {}\".format(\n arg_4)\n logger.error(arg_7)\n raise BadCheckpoint(arg_7)\n\n logger.info(\"Completed loading checkpoint:{0} with {1} tasks\".format(arg_4,\n len(arg_2.keys())))\n return arg_2"} +{"_id": "doc_4033", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Load checkpoints from the checkpoint files into a dictionary.\n\n The results are used to pre-populate the memoizer's lookup_table\n\n Kwargs:\n - checkpointDirs (list) : List of run folder to use as checkpoints\n Eg. ['runinfo/001', 'runinfo/002']\n\n Returns:\n - dict containing, hashed -> future mappings\n \"\"\"\n arg_0.memo_lookup_table = None\n\n if not arg_1:\n return {}\n\n if type(arg_1) is not list:\n raise BadCheckpoint(\"checkpointDirs expects a list of checkpoints\")\n\n return arg_0._Func(arg_1)"} +{"_id": "doc_4034", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Pull tasks from the incoming tasks 0mq pipe onto the internal\n pending task queue\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.\n \"\"\"\n logger.info(\"[TASK_PULL_THREAD] Starting\")\n arg_2 = 0\n arg_3 = zmq.Poller()\n arg_3.register(arg_0.task_incoming, zmq.POLLIN)\n\n while not arg_1.is_set():\n try:\n arg_4 = arg_0.task_incoming.recv_pyobj()\n except zmq.Again:\n # We just timed out while attempting to receive\n logger.debug(\"[TASK_PULL_THREAD] {} tasks in internal queue\".format(arg_0.pending_task_queue.qsize()))\n continue\n\n if arg_4 == 'STOP':\n arg_1.set()\n break\n else:\n arg_0.pending_task_queue.put(arg_4)\n arg_2 += 1\n logger.debug(\"[TASK_PULL_THREAD] Fetched task:{}\".format(arg_2))"} +{"_id": "doc_4035", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Command server to run async command to the interchange\n \"\"\"\n logger.debug(\"[COMMAND] Command Server Starting\")\n while not arg_1.is_set():\n try:\n arg_2 = arg_0.command_channel.recv_pyobj()\n logger.debug(\"[COMMAND] Received command request: {}\".format(arg_2))\n if arg_2 == \"OUTSTANDING_C\":\n arg_3 = arg_0.pending_task_queue.qsize()\n for arg_4 in arg_0._ready_manager_queue:\n arg_3 += len(arg_0._ready_manager_queue[arg_4]['tasks'])\n arg_5 = arg_3\n\n elif arg_2 == \"WORKERS\":\n arg_6 = 0\n for arg_4 in arg_0._ready_manager_queue:\n arg_6 += arg_0._ready_manager_queue[arg_4]['worker_count']\n arg_5 = arg_6\n elif arg_2 == \"MANAGERS\":\n arg_5 = []\n for arg_4 in arg_0._ready_manager_queue:\n arg_7 = {'manager': arg_4.decode('utf-8'),\n 'block_id': arg_0._ready_manager_queue[arg_4]['block_id'],\n 'worker_count': arg_0._ready_manager_queue[arg_4]['worker_count'],\n 'tasks': len(arg_0._ready_manager_queue[arg_4]['tasks']),\n 'active': arg_0._ready_manager_queue[arg_4]['active']}\n arg_5.append(arg_7)\n\n elif arg_2.startswith(\"HOLD_WORKER\"):\n arg_8, arg_9 = arg_2.split(';')\n arg_4 = arg_9.encode('utf-8')\n logger.info(\"[CMD] Received HOLD_WORKER for {}\".format(arg_4))\n if arg_4 in arg_0._ready_manager_queue:\n arg_0._ready_manager_queue[arg_4]['active'] = False\n arg_5 = True\n else:\n arg_5 = False\n\n elif arg_2 == \"SHUTDOWN\":\n logger.info(\"[CMD] Received SHUTDOWN command\")\n arg_1.set()\n arg_5 = True\n\n else:\n arg_5 = None\n\n logger.debug(\"[COMMAND] Reply: {}\".format(arg_5))\n arg_0.command_channel.send_pyobj(arg_5)\n\n except zmq.Again:\n logger.debug(\"[COMMAND] is alive\")\n continue"} +{"_id": "doc_4036", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the DataManager of the currently loaded DataFlowKernel.\n \"\"\"\n from parsl.dataflow.dflow import DataFlowKernelLoader\n arg_1 = DataFlowKernelLoader.dfk()\n\n return arg_1.executors['data_manager']"} +{"_id": "doc_4037", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Transport the file from the input source to the executor.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) : file to stage in\n - executor (str) : an executor the file is going to be staged in to.\n If the executor argument is not specified for a file\n with 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if arg_1.scheme == 'ftp':\n arg_3 = arg_0.dfk.executors[arg_2].working_dir\n arg_4 = arg_0._ftp_Func_app(arg_2=arg_2)\n arg_5 = arg_4(arg_3, outputs=[arg_1])\n return arg_5._outputs[0]\n elif arg_1.scheme == 'http' or arg_1.scheme == 'https':\n arg_3 = arg_0.dfk.executors[arg_2].working_dir\n arg_4 = arg_0._http_Func_app(arg_2=arg_2)\n arg_5 = arg_4(arg_3, outputs=[arg_1])\n return arg_5._outputs[0]\n elif arg_1.scheme == 'globus':\n arg_6 = arg_0._get_globus_endpoint(arg_2)\n arg_4 = arg_0._globus_Func_app()\n arg_5 = arg_4(arg_6, outputs=[arg_1])\n return arg_5._outputs[0]\n else:\n raise Exception('Staging in with unknown file scheme {} is not supported'.format(arg_1.scheme))"} +{"_id": "doc_4038", "title": "", "text": "def Func(arg_0=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not os.path.isdir(arg_0)):\n return []\n\n arg_1 = sorted(os.listdir(arg_0))\n\n arg_2 = []\n\n for arg_3 in arg_1:\n\n arg_4 = os.path.abspath('{}/{}/checkpoint'.format(arg_0, arg_3))\n\n if os.path.isdir(arg_4):\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_4039", "title": "", "text": "def Func(arg_0=\"runinfo\"):\n \"\"\"Find the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n if not os.path.isdir(arg_0):\n return []\n\n arg_1 = sorted(os.listdir(arg_0))\n\n if len(arg_1) == 0:\n return []\n\n arg_2 = arg_1[-1]\n arg_3 = os.path.abspath('{}/{}/checkpoint'.format(arg_0, arg_2))\n\n if(not(os.path.isdir(arg_3))):\n return []\n\n return [arg_3]"} +{"_id": "doc_4040", "title": "", "text": "def Func():\n \"\"\"Revert to using stdlib pickle.\n\n Reverts custom serialization enabled by use_dill|cloudpickle.\n \"\"\"\n from . import arg_0\n arg_0.pickle = arg_0._stdlib_pickle\n\n # restore special function handling\n arg_2[arg_3] = _original_can_map[arg_3]"} +{"_id": "doc_4041", "title": "", "text": "def Func(arg_0):\n \"\"\"Specify path to the ipcontroller-engine.json file.\n\n This file is stored in in the ipython_dir/profile folders.\n\n Returns :\n - str, File path to engine file\n \"\"\"\n return os.path.join(arg_0.ipython_dir,\n 'profile_{0}'.format(arg_0.profile),\n 'security/ipcontroller-engine.json')"} +{"_id": "doc_4042", "title": "", "text": "def Func(arg_0):\n \"\"\"Terminate the controller process and its child processes.\n\n Args:\n - None\n \"\"\"\n if arg_0.reuse:\n logger.debug(\"Ipcontroller not shutting down: reuse enabled\")\n return\n\n if arg_0.mode == \"manual\":\n logger.debug(\"Ipcontroller not shutting down: Manual mode\")\n return\n\n try:\n arg_1 = os.getpgid(arg_0.proc.pid)\n os.killpg(arg_1, signal.SIGTERM)\n time.sleep(0.2)\n os.killpg(arg_1, signal.SIGKILL)\n try:\n arg_0.proc.wait(timeout=1)\n arg_2 = arg_0.proc.returncode\n if arg_2 == 0:\n logger.debug(\"Controller exited with {0}\".format(arg_2))\n else:\n logger.error(\"Controller exited with {0}. May require manual cleanup\".format(arg_2))\n except subprocess.TimeoutExpired:\n logger.warn(\"Ipcontroller process:{0} cleanup failed. May require manual cleanup\".format(arg_0.proc.pid))\n\n except Exception as e:\n logger.warn(\"Failed to kill the ipcontroller process[{0}]: {1}\".format(arg_0.proc.pid, e))"} +{"_id": "doc_4043", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create a hash of the task and its inputs and check the lookup table for this hash.\n\n If present, the results are returned. The result is a tuple indicating whether a memo\n exists and the result, since a Null result is possible and could be confusing.\n This seems like a reasonable option without relying on an cache_miss exception.\n\n Args:\n - task(task) : task from the dfk.tasks table\n\n Returns:\n Tuple of the following:\n - present (Bool): Is this present in the memo_lookup_table\n - Result (Py Obj): Result of the function if present in table\n\n This call will also set task['hashsum'] to the unique hashsum for the func+inputs.\n \"\"\"\n if not arg_0.memoize or not arg_2['memoize']:\n arg_2['hashsum'] = None\n return None, None\n\n arg_3 = arg_0.make_hash(arg_2)\n arg_4 = False\n arg_5 = None\n if arg_3 in arg_0.memo_lookup_table:\n arg_4 = True\n arg_5 = arg_0.memo_lookup_table[arg_3]\n logger.info(\"Task %s using result from cache\", arg_1)\n\n arg_2['hashsum'] = arg_3\n return arg_4, arg_5"} +{"_id": "doc_4044", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Updates the memoization lookup table with the result from a task.\n\n Args:\n - task_id (int): Integer task id\n - task (dict) : A task dict from dfk.tasks\n - r (Result future): Result future\n\n A warning is issued when a hash collision occurs during the update.\n This is not likely.\n \"\"\"\n if not arg_0.memoize or not arg_2['memoize']:\n return\n\n if arg_2['hashsum'] in arg_0.memo_lookup_table:\n logger.info('Updating appCache entry with latest %s:%s call' %\n (arg_2['func_name'], arg_1))\n arg_0.memo_lookup_table[arg_2['hashsum']] = arg_3\n else:\n arg_0.memo_lookup_table[arg_2['hashsum']] = arg_3"} +{"_id": "doc_4045", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Extract buffers larger than a certain threshold.\"\"\"\n arg_3 = []\n if isinstance(arg_0, CannedObject) and arg_0.buffers:\n for arg_4, arg_5 in enumerate(arg_0.buffers):\n arg_6 = _nbytes(arg_5)\n if arg_6 > arg_1:\n # buffer larger than threshold, prevent pickling\n arg_0.buffers[arg_4] = None\n arg_3.append(arg_5)\n # buffer too small for separate send, coerce to bytes\n # because pickling buffer objects just results in broken pointers\n elif isinstance(arg_5, memoryview):\n arg_0.buffers[arg_4] = arg_5.tobytes()\n elif isinstance(arg_5, buffer):\n arg_0.buffers[arg_4] = bytes(arg_5)\n return arg_3"} +{"_id": "doc_4046", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Restore extracted buffers.\"\"\"\n if isinstance(arg_0, CannedObject) and arg_0.buffers:\n for arg_2, arg_3 in enumerate(arg_0.buffers):\n if arg_3 is None:\n arg_0.buffers[arg_2] = arg_1.pop(0)"} +{"_id": "doc_4047", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4):\n \"\"\"Serialize an object into a list of sendable buffers.\n\n Parameters\n ----------\n\n obj : object\n The object to be serialized\n buffer_threshold : int\n The threshold (in bytes) for pulling out data buffers\n to avoid pickling them.\n item_threshold : int\n The maximum number of items over which canning will iterate.\n Containers (lists, dicts) larger than this will be pickled without\n introspection.\n\n Returns\n -------\n [bufs] : list of buffers representing the serialized object.\n \"\"\"\n arg_5 = []\n if istype(arg_0, sequence_types) and len(arg_0) < arg_3:\n arg_6 = can_sequence(arg_0)\n for arg_7 in arg_6:\n arg_5.extend(_extract_buffers(arg_7, arg_1))\n elif istype(arg_0, dict) and len(arg_0) < arg_3:\n arg_6 = {}\n for arg_8 in sorted(arg_0):\n arg_7 = can(arg_0[arg_8])\n arg_5.extend(_extract_buffers(arg_7, arg_1))\n arg_6[arg_8] = arg_7\n else:\n arg_6 = can(arg_0)\n arg_5.extend(_extract_buffers(arg_6, arg_1))\n\n arg_5.insert(0, pickle.dumps(arg_6, PICKLE_PROTOCOL))\n return arg_5"} +{"_id": "doc_4048", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Reconstruct an object serialized by serialize_object from data buffers.\n\n Parameters\n ----------\n\n bufs : list of buffers/bytes\n\n g : globals to be used when uncanning\n\n Returns\n -------\n\n (newobj, bufs) : unpacked object, and the list of remaining unused buffers.\n \"\"\"\n arg_2 = list(arg_0)\n arg_3 = buffer_to_bytes_py2(arg_2.pop(0))\n arg_4 = pickle.loads(arg_3)\n if istype(arg_4, sequence_types) and len(arg_4) < MAX_ITEMS:\n for arg_5 in arg_4:\n _restore_buffers(arg_5, arg_2)\n arg_6 = uncan_sequence(arg_4, arg_1)\n elif istype(arg_4, dict) and len(arg_4) < MAX_ITEMS:\n arg_6 = {}\n for arg_7 in sorted(arg_4):\n arg_5 = arg_4[arg_7]\n _restore_buffers(arg_5, arg_2)\n arg_6[arg_7] = uncan(arg_5, arg_1)\n else:\n _restore_buffers(arg_4, arg_2)\n arg_6 = uncan(arg_4, arg_1)\n\n return arg_6, arg_2"} +{"_id": "doc_4049", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n - job_name (string) : job name\n - configs (dict) : configs that get pushed into the template\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out\n \"\"\"\n\n try:\n arg_5 = Template(arg_1).substitute(jobname=arg_3, **arg_4)\n # submit_script = Template(template).safe_substitute(jobname=job_name, **configs)\n with open(arg_2, 'w') as f:\n f.write(arg_5)\n\n except KeyError as e:\n logger.error(\"Missing keys for submit script : %s\", e)\n raise (SchedulerMissingArgs(e.args, arg_0.sitename))\n\n except IOError as e:\n logger.error(\"Failed writing to submit script: %s\", arg_2)\n raise (ScriptPathError(arg_2, e))\n except Exception as e:\n print(\"Template : \", arg_1)\n print(\"Args : \", arg_3)\n print(\"Kwargs : \", arg_4)\n logger.error(\"Uncategorized error: %s\", e)\n raise (e)\n\n return True"} +{"_id": "doc_4050", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [ ...]\n\n Returns :\n [True/False...] : If the Func operation fails the entire list will be False.\n '''\n for arg_2 in arg_1:\n logger.debug(\"Terminating job/proc_id: {0}\".format(arg_2))\n # Here we are assuming that for local, the job_ids are the process id's\n if arg_0.resources[arg_2]['proc']:\n arg_3 = arg_0.resources[arg_2]['proc']\n os.killpg(os.getpgid(arg_3.pid), signal.SIGTERM)\n arg_0.resources[arg_2]['status'] = 'CANCELLED'\n\n elif arg_0.resources[arg_2]['remote_pid']:\n arg_5 = \"kill -- -$(ps -o pgid={} | grep -o '[0-9]*')\".format(arg_0.resources[arg_2]['remote_pid'])\n arg_6, arg_7, arg_8 = arg_0.channel.execute_wait(arg_5, arg_0.cmd_timeout)\n if arg_6 != 0:\n logger.warning(\"Failed to kill PID: {} and child processes on {}\".format(arg_0.resources[arg_2]['remote_pid'],\n arg_0.label))\n\n arg_9 = [True for i in arg_1]\n return arg_9"} +{"_id": "doc_4051", "title": "", "text": "def Func(arg_0):\n \"\"\"Save information that must persist to a file.\n\n We do not want to create a new VPC and new identical security groups, so we save\n information about them in a file between runs.\n \"\"\"\n arg_1 = open('awsproviderstate.json', 'w')\n arg_2 = {}\n arg_2['vpcID'] = arg_0.vpc_id\n arg_2['sgID'] = arg_0.sg_id\n arg_2['snIDs'] = arg_0.sn_ids\n arg_2['instances'] = arg_0.instances\n arg_2[\"instanceState\"] = arg_0.instance_states\n arg_1.write(json.dumps(arg_2, indent=4))"} +{"_id": "doc_4052", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a session.\n\n First we look in self.key_file for a path to a json file with the\n credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.\n\n Next we look at self.profile for a profile name and try\n to use the Session call to automatically pick up the keys for the profile from\n the user default keys file ~/.aws/config.\n\n Finally, boto3 will look for the keys in environment variables:\n AWS_ACCESS_KEY_ID: The access key for your AWS account.\n AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.\n AWS_SESSION_TOKEN: The session key for your AWS account.\n This is only needed when you are using temporary credentials.\n The AWS_SECURITY_TOKEN environment variable can also be used,\n but is only supported for backwards compatibility purposes.\n AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.\n \"\"\"\n\n arg_1 = None\n\n if arg_0.key_file is not None:\n arg_2 = os.path.expandvars(os.path.expanduser(arg_0.key_file))\n\n try:\n with open(arg_2, 'r') as f:\n arg_3 = json.load(f)\n except json.JSONDecodeError as e:\n logger.error(\n \"EC2Provider '{}': json decode error in credential file {}\".format(arg_0.label, arg_2)\n )\n raise e\n\n except Exception as e:\n logger.debug(\n \"EC2Provider '{0}' caught exception while reading credential file: {1}\".format(\n arg_0.label, arg_2\n )\n )\n raise e\n\n logger.debug(\"EC2Provider '{}': Using credential file to create session\".format(arg_0.label))\n arg_1 = boto3.session.Session(region_name=arg_0.region, **arg_3)\n elif arg_0.profile is not None:\n logger.debug(\"EC2Provider '{}': Using profile name to create session\".format(arg_0.label))\n arg_1 = boto3.session.Session(\n profile_name=arg_0.profile, region_name=arg_0.region\n )\n else:\n logger.debug(\"EC2Provider '{}': Using environment variables to create session\".format(arg_0.label))\n arg_1 = boto3.session.Session(region_name=arg_0.region)\n\n return arg_1"} +{"_id": "doc_4053", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Start an instance in the VPC in the first available subnet.\n\n N instances will be started if nodes_per_block > 1.\n Not supported. We only do 1 node per block.\n\n Parameters\n ----------\n command : str\n Command string to execute on the node.\n job_name : str\n Name associated with the instances.\n \"\"\"\n\n arg_1 = Template(template_string).substitute(jobname=arg_2,\n user_script=arg_1,\n linger=str(arg_0.linger).lower(),\n worker_init=arg_0.worker_init)\n arg_3 = arg_0.instance_type\n arg_4 = arg_0.sn_ids[0]\n arg_5 = arg_0.image_id\n arg_6 = len(arg_0.instances)\n\n if float(arg_0.spot_max_bid) > 0:\n arg_7 = {\n 'MarketType': 'spot',\n 'SpotOptions': {\n 'MaxPrice': str(arg_0.spot_max_bid),\n 'SpotInstanceType': 'one-time',\n 'InstanceInterruptionBehavior': 'terminate'\n }\n }\n else:\n arg_7 = {}\n\n if arg_6 > arg_0.max_nodes:\n logger.warn(\"Exceeded instance limit ({}). Cannot continue\\n\".format(arg_0.max_nodes))\n return [None]\n try:\n arg_8 = [{\"ResourceType\": \"instance\", \"Tags\": [{'Key': 'Name', 'Value': arg_2}]}]\n\n arg_9 = arg_0.ec2.create_instances(\n MinCount=1,\n MaxCount=1,\n InstanceType=arg_3,\n ImageId=arg_5,\n KeyName=arg_0.key_name,\n SubnetId=arg_4,\n SecurityGroupIds=[arg_0.sg_id],\n TagSpecifications=arg_8,\n InstanceMarketOptions=arg_7,\n InstanceInitiatedShutdownBehavior='terminate',\n IamInstanceProfile={'Arn': arg_0.iam_instance_profile_arn},\n UserData=arg_1\n )\n except ClientError as e:\n print(e)\n logger.error(e.response)\n return [None]\n\n except Exception as e:\n logger.error(\"Request for EC2 resources failed : {0}\".format(e))\n return [None]\n\n arg_0.instances.append(arg_9[0].id)\n logger.info(\n \"Started up 1 instance {} . Instance type:{}\".format(arg_9[0].id, arg_3)\n )\n return arg_9"} +{"_id": "doc_4054", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get states of all instances on EC2 which were started by this file.\"\"\"\n if arg_1:\n arg_2 = arg_0.client.describe_instances(InstanceIds=arg_1)\n else:\n arg_2 = arg_0.client.describe_instances(InstanceIds=arg_0.instances)\n # pprint.pprint(desc['Reservations'],indent=4)\n for arg_3 in range(len(arg_2['Reservations'])):\n arg_4 = arg_2['Reservations'][arg_3]['Instances'][0]\n arg_0.instance_states[arg_4['InstanceId']] = arg_4['State']['Name']\n return arg_0.instance_states"} +{"_id": "doc_4055", "title": "", "text": "def Func(arg_0, arg_1='sleep 1', arg_2=1, arg_3=1, arg_4=\"parsl.auto\"):\n \"\"\"Submit the command onto a freshly instantiated AWS EC2 instance.\n\n Submit returns an ID that corresponds to the task that was just Functed.\n\n Parameters\n ----------\n command : str\n Command to be invoked on the remote side.\n blocksize : int\n Number of blocks requested.\n tasks_per_node : int (default=1)\n Number of command invocations to be launched per node\n job_name : str\n Prefix for the job name.\n\n Returns\n -------\n None or str\n If at capacity, None will be returned. Otherwise, the job identifier will be returned.\n \"\"\"\n\n arg_4 = \"parsl.auto.{0}\".format(time.time())\n arg_5 = arg_0.launcher(arg_1,\n arg_3,\n arg_0.nodes_per_block)\n [arg_6, *arg_7] = arg_0.spin_up_instance(arg_1=arg_5, arg_4=arg_4)\n\n if not arg_6:\n logger.error(\"Failed to Func request to EC2\")\n return None\n\n logger.debug(\"Started instance_id: {0}\".format(arg_6.instance_id))\n\n arg_8 = translate_table.get(arg_6.state['Name'], \"PENDING\")\n\n arg_0.resources[arg_6.instance_id] = {\n \"job_id\": arg_6.instance_id,\n \"instance\": arg_6,\n \"status\": arg_8\n }\n\n return arg_6.instance_id"} +{"_id": "doc_4056", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Cancel the jobs specified by a list of job ids.\n\n Parameters\n ----------\n job_ids : list of str\n List of of job identifiers\n\n Returns\n -------\n list of bool\n Each entry in the list will contain False if the operation fails. Otherwise, the entry will be True.\n \"\"\"\n\n if arg_0.linger is True:\n logger.debug(\"Ignoring Func requests due to linger mode\")\n return [False for arg_2 in arg_1]\n\n try:\n arg_0.client.terminate_instances(InstanceIds=list(arg_1))\n except Exception as e:\n logger.error(\"Caught error while attempting to remove instances: {0}\".format(arg_1))\n raise e\n else:\n logger.debug(\"Removed the instances: {0}\".format(arg_1))\n\n for arg_3 in arg_1:\n arg_0.resources[arg_3][\"status\"] = \"COMPLETED\"\n\n for arg_3 in arg_1:\n arg_0.instances.remove(arg_3)\n\n return [True for arg_2 in arg_1]"} +{"_id": "doc_4057", "title": "", "text": "def Func(arg_0):\n \"\"\"Teardown the EC2 infastructure.\n\n Terminate all EC2 instances, delete all subnets, delete security group, delete VPC,\n and reset all instance variables.\n \"\"\"\n\n arg_0.shut_down_instance(arg_0.instances)\n arg_0.instances = []\n try:\n arg_0.client.delete_internet_gateway(InternetGatewayId=arg_0.internet_gateway)\n arg_0.internet_gateway = None\n arg_0.client.delete_route_table(RouteTableId=arg_0.route_table)\n arg_0.route_table = None\n for arg_4 in list(arg_0.sn_ids):\n # Cast to list ensures that this is a copy\n # Which is important because it means that\n # the length of the list won't change during iteration\n arg_0.client.delete_subnet(SubnetId=arg_4)\n arg_0.sn_ids.remove(arg_4)\n arg_0.client.delete_security_group(GroupId=arg_0.sg_id)\n arg_0.sg_id = None\n arg_0.client.delete_vpc(VpcId=arg_0.vpc_id)\n arg_0.vpc_id = None\n except Exception as e:\n logger.error(\"{}\".format(e))\n raise e\n arg_0.show_summary()\n os.remove(arg_0.config['state_file_path'])"} +{"_id": "doc_4058", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=1):\n ''' Scale out the existing resources.\n '''\n arg_0.config['sites.jetstream.{0}'.format(arg_0.pool)]['flavor']\n arg_3 = 0\n if arg_1 == 1:\n arg_4 = len(arg_0.blocks)\n arg_0.blocks[arg_4] = []\n for arg_5 in range(0, arg_2):\n arg_6 = arg_0.server_manager.create(\n 'parsl-{0}-{1}'.format(arg_4, arg_5), # Name\n arg_0.client.images.get('87e08a17-eae2-4ce4-9051-c561d9a54bde'), # Image_id\n arg_0.client.flavors.list()[0],\n min_count=1,\n max_count=1,\n userdata=setup_script.format(engine_config=arg_0.engine_config),\n key_name='TG-MCB090174-api-key',\n security_groups=['global-ssh'],\n nics=[{\n \"net-id\": '724a50cf-7f11-4b3b-a884-cd7e6850e39e',\n \"net-name\": 'PARSL-priv-net',\n \"v4-fixed-ip\": ''\n }])\n arg_0.blocks[arg_4].extend([arg_6])\n arg_3 += 1\n\n return arg_3"} +{"_id": "doc_4059", "title": "", "text": "def Func(arg_0):\n \"\"\"Update the resource dictionary with job statuses.\"\"\"\n\n arg_1 = ' '.join(arg_0.resources.keys())\n arg_2 = \"condor_q {0} -af:jr JobStatus\".format(arg_1)\n arg_3, arg_4, arg_5 = super().execute_wait(arg_2)\n \"\"\"\n Example output:\n\n $ condor_q 34524642.0 34524643.0 -af:jr JobStatus\n 34524642.0 2\n 34524643.0 1\n \"\"\"\n\n for arg_6 in arg_4.strip().split('\\n'):\n arg_7 = arg_6.split()\n arg_8 = arg_7[0]\n arg_9 = translate_table.get(arg_7[1], 'UNKNOWN')\n arg_0.resources[arg_8]['status'] = arg_9"} +{"_id": "doc_4060", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"Scales out the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n Parameters:\n blocks : int\n Number of blocks to be provisioned.\n \"\"\"\n arg_2 = []\n for arg_3 in range(arg_1):\n if arg_0.provider:\n block = arg_0.provider.submit(arg_0.launch_cmd, 1, arg_0.workers_per_node)\n logger.debug(\"Launched block {}:{}\".format(arg_3, block))\n if not block:\n raise(ScalingFailed(arg_0.provider.label,\n \"Attempts to provision nodes via provider has failed\"))\n arg_0.engines.extend([block])\n arg_2.extend([block])\n else:\n logger.error(\"No execution provider available\")\n arg_2 = None\n\n return arg_2"} +{"_id": "doc_4061", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the status of the executor via probing the execution providers.\"\"\"\n if arg_0.provider:\n Func = arg_0.provider.status(arg_0.engines)\n\n else:\n Func = []\n\n return Func"} +{"_id": "doc_4062", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if arg_1.done() is True:\n arg_2 = arg_1._exception\n if arg_2:\n super().set_exception(arg_2)\n else:\n super().set_result(arg_0.file_obj)\n return"} +{"_id": "doc_4063", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Cancels the resources identified by the job_ids provided by the user.\n\n Args:\n - job_ids (list): A list of job identifiers\n\n Returns:\n - A list of status from Funcling the job which can be True, False\n\n Raises:\n - ExecutionProviderException or its subclasses\n '''\n arg_2 = []\n for arg_3 in arg_1:\n try:\n arg_0.delete_instance(arg_3)\n arg_2.append(True)\n arg_0.provisioned_blocks -= 1\n except Exception:\n arg_2.append(False)\n return arg_2"} +{"_id": "doc_4064", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"This is a function that mocks the Swift-T side.\n\n It listens on the the incoming_q for tasks and posts returns on the outgoing_q.\n\n Args:\n - incoming_q (Queue object) : The queue to listen on\n - outgoing_q (Queue object) : Queue to post results on\n\n The messages posted on the incoming_q will be of the form :\n\n .. code:: python\n\n {\n \"task_id\" : ,\n \"buffer\" : serialized buffer containing the fn, args and kwargs\n }\n\n If ``None`` is received, the Func will exit.\n\n Response messages should be of the form:\n\n .. code:: python\n\n {\n \"task_id\" : ,\n \"result\" : serialized buffer containing result\n \"exception\" : serialized exception object\n }\n\n On exiting the Func will post ``None`` to the outgoing_q\n\n \"\"\"\n logger.debug(\"[RUNNER] Starting\")\n\n def execute_task(arg_2):\n \"\"\"Deserialize the buffer and execute the task.\n\n Returns the serialized result or exception.\n \"\"\"\n arg_3 = locals()\n arg_3.update({'__builtins__': __builtins__})\n\n arg_4, arg_5, arg_6 = unpack_apply_message(arg_2, arg_3, copy=False)\n\n arg_7 = getattr(arg_4, '__name__', 'f')\n arg_8 = \"parsl_\"\n arg_7 = arg_8 + \"f\"\n arg_9 = arg_8 + \"args\"\n arg_10 = arg_8 + \"kwargs\"\n arg_11 = arg_8 + \"result\"\n\n arg_3.update({arg_7: arg_4,\n arg_9: arg_5,\n arg_10: arg_6,\n arg_11: arg_11})\n\n arg_12 = \"{0} = {1}(*{2}, **{3})\".format(arg_11, arg_7,\n arg_9, arg_10)\n\n try:\n logger.debug(\"[RUNNER] Executing: {0}\".format(arg_12))\n exec(arg_12, arg_3, arg_3)\n\n except Exception as e:\n logger.warning(\"Caught exception; will raise it: {}\".format(e))\n raise e\n\n else:\n logger.debug(\"[RUNNER] Result: {0}\".format(arg_3.get(arg_11)))\n return arg_3.get(arg_11)\n\n while True:\n try:\n # Blocking wait on the queue\n arg_13 = arg_0.get(block=True, timeout=10)\n\n except queue.Empty:\n # Handle case where no items were in the queue\n logger.debug(\"[RUNNER] Queue is empty\")\n\n except IOError as e:\n logger.debug(\"[RUNNER] Broken pipe: {}\".format(e))\n try:\n # Attempt to send a stop notification to the management thread\n arg_1.put(None)\n\n except Exception:\n pass\n\n break\n\n except Exception as e:\n logger.debug(\"[RUNNER] Caught unknown exception: {}\".format(e))\n\n else:\n # Handle received message\n if not arg_13:\n # Empty message is a die request\n logger.debug(\"[RUNNER] Received exit request\")\n arg_1.put(None)\n break\n else:\n # Received a valid message, handle it\n logger.debug(\"[RUNNER] Got a valid task with ID {}\".format(arg_13[\"task_id\"]))\n try:\n arg_14 = execute_task(arg_13['buffer'])\n arg_15 = {\"task_id\": arg_13[\"task_id\"],\n \"result\": serialize_object(arg_14)}\n\n logger.debug(\"[RUNNER] Returing result: {}\".format(\n deserialize_object(arg_15[\"result\"])))\n\n except Exception as e:\n logger.debug(\"[RUNNER] Caught task exception: {}\".format(e))\n arg_15 = {\"task_id\": arg_13[\"task_id\"],\n \"exception\": serialize_object(e)}\n\n arg_1.put(arg_15)\n\n logger.debug(\"[RUNNER] Terminating\")"} +{"_id": "doc_4065", "title": "", "text": "def Func(arg_0):\n \"\"\"Shutdown method, to kill the threads and workers.\"\"\"\n arg_0.is_alive = False\n logging.debug(\"Waking management thread\")\n arg_0.incoming_q.put(None) # Wake up the thread\n arg_0._queue_management_thread.join() # Force join\n logging.debug(\"Exiting thread\")\n arg_0.worker.join()\n return True"} +{"_id": "doc_4066", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Submits work to the the outgoing_q.\n\n The outgoing_q is an external process listens on this\n queue for new work. This method is simply pass through and behaves like a\n Func call as described here `Python docs: `_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future\n \"\"\"\n arg_4 = uuid.uuid4()\n\n logger.debug(\"Pushing function {} to queue with args {}\".format(arg_1, arg_2))\n\n arg_0.tasks[arg_4] = Future()\n\n arg_6 = pack_apply_message(arg_1, arg_2, arg_3,\n buffer_threshold=1024 * 1024,\n item_threshold=1024)\n\n arg_7 = {\"task_id\": arg_4,\n \"buffer\": arg_6}\n\n # Post task to the the outgoing queue\n arg_0.outgoing_q.put(arg_7)\n\n # Return the future\n return arg_0.tasks[arg_4]"} +{"_id": "doc_4067", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the resolved Func on the side where it is called from.\n\n The appropriate Func will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - Func (string)\n \"\"\"\n if hasattr(arg_0, 'local_path'):\n return arg_0.local_path\n\n if arg_0.scheme in ['ftp', 'http', 'https', 'globus']:\n return arg_0.filename\n elif arg_0.scheme in ['file']:\n return arg_0.path\n else:\n raise Exception('Cannot return Func for unknown scheme {}'.format(arg_0.scheme))"} +{"_id": "doc_4068", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=60, arg_3=False, arg_4='all'):\n \"\"\"The Func decorator function.\n\n Args:\n - apptype (string) : Functype can be bash|python\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime for app in seconds,\n default=60\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n A PythonFunc or BashFunc object, which when called runs the apps through the executor.\n \"\"\"\n\n from parsl.app.python import PythonFunc\n from parsl.app.bash import BashFunc\n\n logger.warning(\"The 'Func' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.\")\n\n if arg_0 == 'python':\n arg_5 = PythonFunc\n elif arg_0 == 'bash':\n arg_5 = BashFunc\n else:\n raise InvalidFuncTypeError(\"Invalid apptype requested {}; must be 'python' or 'bash'\".format(arg_0))\n\n def wrapper(arg_6):\n return arg_5(arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4)\n return wrapper"} +{"_id": "doc_4069", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=60, arg_3=False, arg_4='all'):\n \"\"\"Decorator function for making python apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@Func` if using all defaults or `@Func(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.python import PythonApp\n\n def decorator(arg_5):\n def wrapper(arg_6):\n return PythonApp(arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4)\n return wrapper(arg_5)\n if arg_0 is not None:\n return decorator(arg_0)\n return decorator"} +{"_id": "doc_4070", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=60, arg_3=False, arg_4='all'):\n \"\"\"Decorator function for making bash apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@Func` if using all defaults or `@Func(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.bash import BashApp\n\n def decorator(arg_5):\n def wrapper(arg_6):\n return BashApp(arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4)\n return wrapper(arg_5)\n if arg_0 is not None:\n return decorator(arg_0)\n return decorator"} +{"_id": "doc_4071", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Internal\n Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.\n \"\"\"\n def wrapped(*arg_5, **arg_6):\n arg_7 = Process(target=monitor, arg_5=(os.getpid(), arg_1, arg_2, arg_3, arg_4))\n arg_7.start()\n try:\n return arg_0(*arg_5, **arg_6)\n finally:\n # There's a chance of zombification if the workers are killed by some signals\n arg_7.terminate()\n arg_7.join()\n return wrapped"} +{"_id": "doc_4072", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Transport file on the remote side to a local directory\n\n Args:\n - remote_source (string): remote_source\n - local_dir (string): Local directory to copy to\n\n\n Returns:\n - str: Local path to file\n\n Raises:\n - FileExists : Name collision at local directory.\n - FileCopyException : FileCopy failed.\n '''\n\n arg_3 = arg_2 + '/' + os.path.basename(arg_1)\n\n try:\n os.makedirs(arg_2)\n except OSError as e:\n if e.errno != errno.EEXIST:\n logger.exception(\"Failed to create script_dir: {0}\".format(script_dir))\n raise BadScriptPath(e, arg_0.hostname)\n\n # Easier to check this than to waste time trying to pull file and\n # realize there's a problem.\n if os.path.exists(arg_3):\n logger.exception(\"Remote file copy will overwrite a local file:{0}\".format(arg_3))\n raise FileExists(None, arg_0.hostname, filename=arg_3)\n\n try:\n arg_0.sftp_client.get(arg_1, arg_3)\n except Exception as e:\n logger.exception(\"File pull failed\")\n raise FileCopyException(e, arg_0.hostname)\n\n return arg_3"} +{"_id": "doc_4073", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to check.\n \"\"\"\n arg_2 = True\n try:\n arg_0.sftp_client.lstat(arg_1)\n except FileNotFoundError:\n arg_2 = False\n\n return arg_2"} +{"_id": "doc_4074", "title": "", "text": "def Func(arg_0, arg_1, arg_2=511, arg_3=False):\n \"\"\"Create a directory on the remote side.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.\n \"\"\"\n if arg_3 is False and arg_0.isdir(arg_1):\n raise OSError('Target directory {} already exists'.format(arg_1))\n\n arg_0.execute_wait('mkdir -p {}'.format(arg_1))\n arg_0.sftp_client.chmod(arg_1, arg_2)"} +{"_id": "doc_4075", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Let the FlowControl system know that there is an event.\"\"\"\n arg_0._event_buffer.extend([arg_1])\n arg_0._event_count += 1\n if arg_0._event_count >= arg_0.threshold:\n logger.debug(\"Eventcount >= threshold\")\n arg_0.make_callback(kind=\"event\")"} +{"_id": "doc_4076", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Create the kubernetes deployment \"\"\"\n\n arg_2 = arg_0.kube_client.create_namespaced_deployment(\n body=arg_1,\n namespace=arg_0.namespace)\n\n logger.debug(\"Deployment created. status='{0}'\".format(str(arg_2.status)))"} +{"_id": "doc_4077", "title": "", "text": "def Func(arg_0):\n \"\"\" Compose the launch command and call the scale_out\n\n This should be implemented in the child classes to take care of\n executor specific oddities.\n \"\"\"\n arg_1 = \"--debug\" if arg_0.worker_debug else \"\"\n arg_2 = \"\" if arg_0.max_workers == float('inf') else \"--max_workers={}\".format(arg_0.max_workers)\n\n arg_3 = \"{}/{}\".format(arg_0.run_dir, arg_0.label)\n if arg_0.worker_logdir_root is not None:\n arg_3 = \"{}/{}\".format(arg_0.worker_logdir_root, arg_0.label)\n\n arg_4 = arg_0.launch_cmd.format(debug=arg_1,\n prefetch_capacity=arg_0.prefetch_capacity,\n task_url=arg_0.worker_task_url,\n result_url=arg_0.worker_result_url,\n cores_per_worker=arg_0.cores_per_worker,\n arg_2=arg_2,\n nodes_per_block=arg_0.provider.nodes_per_block,\n heartbeat_period=arg_0.heartbeat_period,\n heartbeat_threshold=arg_0.heartbeat_threshold,\n poll_period=arg_0.poll_period,\n logdir=arg_3)\n arg_0.launch_cmd = arg_4\n logger.debug(\"Launch command: {}\".format(arg_0.launch_cmd))\n\n arg_0._scaling_enabled = arg_0.provider.scaling_enabled\n logger.debug(\"Starting HighThroughputExecutor with provider:\\n%s\", arg_0.provider)\n if hasattr(arg_0.provider, 'init_blocks'):\n try:\n arg_0.scale_out(blocks=arg_0.provider.init_blocks)\n except Exception as e:\n logger.error(\"Scaling out failed: {}\".format(e))\n raise e"} +{"_id": "doc_4078", "title": "", "text": "def Func(arg_0):\n \"\"\" Starts the interchange process locally\n\n Starts the interchange process locally and uses an internal command queue to\n get the worker task and result ports that the interchange has bound to.\n \"\"\"\n arg_1 = Queue(maxsize=10)\n arg_0.queue_proc = Process(target=interchange.starter,\n args=(arg_1,),\n kwargs={\"client_ports\": (arg_0.outgoing_q.port,\n arg_0.incoming_q.port,\n arg_0.command_client.port),\n \"worker_ports\": arg_0.worker_ports,\n \"worker_port_range\": arg_0.worker_port_range,\n \"logdir\": \"{}/{}\".format(arg_0.run_dir, arg_0.label),\n \"suppress_failure\": arg_0.suppress_failure,\n \"heartbeat_threshold\": arg_0.heartbeat_threshold,\n \"poll_period\": arg_0.poll_period,\n \"logging_level\": logging.DEBUG if arg_0.worker_debug else logging.INFO\n },\n )\n arg_0.queue_proc.start()\n try:\n (arg_3, arg_4) = arg_1.get(block=True, timeout=120)\n except queue.Empty:\n logger.error(\"Interchange has not completed initialization in 120s. Aborting\")\n raise Exception(\"Interchange failed to start\")\n\n arg_0.worker_task_url = \"tcp://{}:{}\".format(arg_0.address, arg_3)\n arg_0.worker_result_url = \"tcp://{}:{}\".format(arg_0.address, arg_4)"} +{"_id": "doc_4079", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Puts a worker on hold, preventing scheduling of additional tasks to it.\n\n This is called \"hold\" mostly because this only stops scheduling of tasks,\n and does not actually kill the worker.\n\n Parameters\n ----------\n\n worker_id : str\n Worker id to be put on hold\n \"\"\"\n arg_2 = arg_0.command_client.run(\"HOLD_WORKER;{}\".format(arg_1))\n logger.debug(\"Sent hold request to worker: {}\".format(arg_1))\n return arg_2"} +{"_id": "doc_4080", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Sends hold command to all managers which are in a specific block\n\n Parameters\n ----------\n block_id : str\n Block identifier of the block to be put on hold\n \"\"\"\n\n arg_2 = arg_0.connected_managers\n\n for arg_3 in arg_2:\n if arg_3['block_id'] == arg_1:\n logger.debug(\"[HOLD_BLOCK]: Sending hold to manager:{}\".format(arg_3['manager']))\n arg_0.hold_worker(arg_3['manager'])"} +{"_id": "doc_4081", "title": "", "text": "def Func(arg_0):\n \"\"\"Return status of all blocks.\"\"\"\n\n Func = []\n if arg_0.provider:\n Func = arg_0.provider.status(arg_0.blocks.values())\n\n return Func"} +{"_id": "doc_4082", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Called by RQ when there is a failure in a worker.\n\n NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with\n handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker.\n \"\"\"\n # Report data about the job with the exception.\n arg_2 = arg_0.to_dict()\n # job_info['data'] is the pickled representation of the job, and doesn't json-serialize well.\n # repr() works nicely.\n arg_2['data'] = repr(arg_2['data'])\n\n arg_3 = {'job': arg_2}\n arg_4 = {'framework': 'rq'}\n \n rollbar.report_exc_info(arg_1, arg_3=arg_3, arg_4=arg_4)\n\n # continue to the next handler\n return True"} +{"_id": "doc_4083", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Pyramid entry point\n \"\"\"\n arg_1 = arg_0.registry.settings\n\n arg_0.add_tween('rollbar.contrib.pyramid.rollbar_tween_factory', over=EXCVIEW)\n\n # run patch_debugtoolbar, unless they disabled it\n if asbool(arg_1.get('rollbar.patch_debugtoolbar', True)):\n patch_debugtoolbar(arg_1)\n\n def hook(arg_2, arg_3):\n arg_3['framework'] = 'pyramid'\n\n if arg_2:\n arg_2.environ['rollbar.uuid'] = arg_3['uuid']\n\n if arg_2.matched_route:\n arg_3['context'] = arg_2.matched_route.name\n\n arg_5.BASE_DATA_HOOK = hook\n\n arg_7 = parse_settings(arg_1)\n\n arg_8 = arg_7.pop('access_token')\n arg_9 = arg_7.pop('environment', 'production')\n\n if arg_7.get('scrub_fields'):\n arg_7['scrub_fields'] = set([str.strip(x) for x in arg_7.get('scrub_fields').split('\\n') if x])\n\n if arg_7.get('exception_level_filters'):\n arg_10 = DottedNameResolver()\n arg_11 = []\n for arg_12 in arg_7.get('exception_level_filters').split('\\n'):\n if arg_12:\n arg_13, arg_14 = arg_12.split()\n\n try:\n arg_15 = arg_10.resolve(arg_13)\n arg_11.append((arg_15, arg_14))\n except ImportError:\n log.error('Could not import %r' % arg_13)\n\n arg_7['exception_level_filters'] = arg_11\n\n arg_7['enabled'] = asbool(arg_7.get('enabled', True))\n\n arg_5.init(arg_8, arg_9, **arg_7)"} +{"_id": "doc_4084", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator for making error handling on AWS Lambda easier\n \"\"\"\n @functools.wraps(arg_0)\n def wrapper(arg_1, arg_2):\n global arg_3\n arg_3 = arg_2\n try:\n arg_4 = arg_0(arg_1, arg_2)\n return wait(lambda: arg_4)\n except:\n arg_5, arg_6, arg_7 = sys.exc_info()\n report_exc_info((arg_5, arg_6, arg_7.tb_next))\n wait()\n raise\n return wrapper"} +{"_id": "doc_4085", "title": "", "text": "def Func(arg_0, arg_1='error', arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Reports an arbitrary string message to Rollbar.\n\n message: the string body of the message\n level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'\n request: the request object for the context of the message\n extra_data: dictionary of params to include with the message. 'body' is reserved.\n payload_data: param names to pass in the 'data' level of the payload; overrides defaults.\n \"\"\"\n try:\n return _Func(arg_0, arg_1, arg_2, arg_3, arg_4)\n except Exception as e:\n log.exception(\"Exception while reporting message to Rollbar. %r\", e)"} +{"_id": "doc_4086", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, **arg_4):\n \"\"\"\n Searches a project for items that match the input criteria.\n\n title: all or part of the item's title to search for.\n return_fields: the fields that should be returned for each item.\n e.g. ['id', 'project_id', 'status'] will return a dict containing\n only those fields for each item.\n access_token: a project access token. If this is not provided,\n the one provided to init() will be used instead.\n search_fields: additional fields to include in the search.\n currently supported: status, level, environment\n \"\"\"\n if not arg_0:\n return []\n\n if arg_1 is not None:\n arg_1 = ','.join(arg_1)\n\n return _get_api('search/',\n arg_0=arg_0,\n fields=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n **arg_4)"} +{"_id": "doc_4087", "title": "", "text": "def Func():\n \"\"\"\n Creates .rollbar log file for use with rollbar-agent\n \"\"\"\n arg_0 = SETTINGS['agent.log_file']\n if not arg_0.endswith('.rollbar'):\n log.error(\"Provided agent log file does not end with .rollbar, which it must. \"\n \"Using default instead.\")\n arg_0 = DEFAULTS['agent.log_file']\n\n arg_1 = logging.getLogger('rollbar_agent')\n arg_2 = logging.FileHandler(arg_0, 'a', 'utf-8')\n arg_3 = logging.Formatter('%(message)s')\n arg_2.setFormatter(arg_3)\n arg_1.addHandler(arg_2)\n arg_1.setLevel(logging.WARNING)\n return arg_1"} +{"_id": "doc_4088", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a dictionary describing the logged-in user using data from `request.\n\n Try request.rollbar_person first, then 'user', then 'user_id'\n \"\"\"\n if hasattr(arg_0, 'rollbar_person'):\n arg_1 = arg_0.rollbar_person\n try:\n arg_2 = arg_1()\n except TypeError:\n arg_2 = arg_1\n\n if arg_2 and isinstance(arg_2, dict):\n return arg_2\n else:\n return None\n\n if hasattr(arg_0, 'user'):\n arg_3 = arg_0.user\n try:\n arg_4 = arg_3()\n except TypeError:\n arg_4 = arg_3\n\n if not arg_4:\n return None\n elif isinstance(arg_4, dict):\n return arg_4\n else:\n arg_5 = {}\n if getattr(arg_4, 'id', None):\n arg_5['id'] = text(arg_4.id)\n elif getattr(arg_4, 'user_id', None):\n arg_5['id'] = text(arg_4.user_id)\n\n # id is required, so only include username/email if we have an id\n if arg_5.get('id'):\n arg_6 = getattr(arg_4, 'username', None)\n arg_7 = getattr(arg_4, 'email', None)\n arg_5.update({\n 'username': arg_6,\n 'email': arg_7\n })\n return arg_5\n\n if hasattr(arg_0, 'user_id'):\n arg_8 = arg_0.user_id\n try:\n arg_9 = arg_8()\n except TypeError:\n arg_9 = arg_8\n\n if not arg_9:\n return None\n return {'id': text(arg_9)}"} +{"_id": "doc_4089", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Attempts to add information from the lambda context if it exists\n \"\"\"\n global arg_3\n arg_1 = arg_3\n if arg_1 is None:\n return\n try:\n arg_2 = {\n 'lambda': {\n 'remaining_time_in_millis': arg_1.get_remaining_time_in_millis(),\n 'function_name': arg_1.function_name,\n 'function_version': arg_1.function_version,\n 'arn': arg_1.invoked_function_arn,\n 'request_id': arg_1.aws_request_id,\n }\n }\n if 'custom' in arg_0:\n arg_0['custom'] = dict_merge(arg_0['custom'], arg_2)\n else:\n arg_0['custom'] = arg_2\n except Exception as e:\n log.exception(\"Exception while adding lambda context data: %r\", e)\n finally:\n arg_3 = None"} +{"_id": "doc_4090", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Attempts to build request data; if successful, sets the 'request' key on `data`.\n \"\"\"\n try:\n arg_2 = _build_request_data(arg_1)\n except Exception as e:\n log.exception(\"Exception while building request_data for Rollbar payload: %r\", e)\n else:\n if arg_2:\n _filter_ip(arg_2, SETTINGS['capture_ip'])\n arg_0['request'] = arg_2"} +{"_id": "doc_4091", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns True if we should record local variables for the given frame.\n \"\"\"\n # Include the last frames locals\n # Include any frame locals that came from a file in the project's root\n return any(((arg_1 == arg_2 - 1),\n ('root' in SETTINGS and (arg_0.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))"} +{"_id": "doc_4092", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a dictionary containing data from the request.\n Can handle webob or werkzeug-based request objects.\n \"\"\"\n\n # webob (pyramid)\n if WebobBaseRequest and isinstance(arg_0, WebobBaseRequest):\n return _build_webob_request_data(arg_0)\n\n # django\n if DjangoHttpRequest and isinstance(arg_0, DjangoHttpRequest):\n return _build_django_request_data(arg_0)\n\n # django rest framework\n if RestFrameworkRequest and isinstance(arg_0, RestFrameworkRequest):\n return _build_django_request_data(arg_0)\n\n # werkzeug (flask)\n if WerkzeugRequest and isinstance(arg_0, WerkzeugRequest):\n return _build_werkzeug_request_data(arg_0)\n\n # tornado\n if TornadoRequest and isinstance(arg_0, TornadoRequest):\n return _build_tornado_request_data(arg_0)\n\n # bottle\n if BottleRequest and isinstance(arg_0, BottleRequest):\n return _build_bottle_request_data(arg_0)\n\n # Sanic\n if SanicRequest and isinstance(arg_0, SanicRequest):\n return _build_sanic_request_data(arg_0)\n\n # falcon\n if FalconRequest and isinstance(arg_0, FalconRequest):\n return _build_falcon_request_data(arg_0)\n\n # Plain wsgi (should be last)\n if isinstance(arg_0, dict) and 'wsgi.version' in arg_0:\n return _build_wsgi_request_data(arg_0)\n\n return None"} +{"_id": "doc_4093", "title": "", "text": "def Func():\n \"\"\"\n Returns a dictionary containing information about the server environment.\n \"\"\"\n # server environment\n arg_0 = {\n 'host': socket.gethostname(),\n 'pid': os.getpid()\n }\n\n # argv does not always exist in embedded python environments\n arg_1 = getattr(sys, 'argv', None)\n if arg_1:\n arg_0['argv'] = arg_1\n\n for arg_2 in ['branch', 'root']:\n if SETTINGS.get(arg_2):\n arg_0[arg_2] = SETTINGS[arg_2]\n\n return arg_0"} +{"_id": "doc_4094", "title": "", "text": "def Func():\n rollbar.init('ACCESS_TOKEN', environment='test', handler='twisted')\n\n \"\"\"This runs the protocol on port 8000\"\"\"\n arg_0 = arg_1.ServerFactory()\n arg_0.protocol = Echo\n reactor.listenTCP(8000, arg_0)\n reactor.run()"} +{"_id": "doc_4095", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Read into ``buf`` from the device. The number of bytes read will be the\n length of ``buf``.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buf[start:end]``. This will not cause an allocation like\n ``buf[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer to write into\n :param int start: Index to start writing at\n :param int end: Index to write up to but not include\n \"\"\"\n arg_0.i2c.readfrom_into(arg_0.device_address, arg_1, **arg_2)\n if arg_0._debug:\n print(\"i2c_device.Func:\", [hex(arg_3) for arg_3 in arg_1])"} +{"_id": "doc_4096", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Write the bytes from ``buffer`` to the device. Transmits a stop bit if\n ``stop`` is set.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buffer[start:end]``. This will not cause an allocation like\n ``buffer[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer containing the bytes to Func\n :param int start: Index to start writing from\n :param int end: Index to read up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n \"\"\"\n arg_0.i2c.Functo(arg_0.device_address, arg_1, **arg_2)\n if arg_0._debug:\n print(\"i2c_device.Func:\", [hex(arg_3) for arg_3 in arg_1])"} +{"_id": "doc_4097", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *,\n arg_3=0, arg_4=None, arg_5=0, arg_6=None, arg_7=True):\n \"\"\"\n Write the bytes from ``out_buffer`` to the device, then immediately\n reads into ``in_buffer`` from the device. The number of bytes read\n will be the length of ``in_buffer``.\n Transmits a stop bit after the write, if ``stop`` is set.\n\n If ``out_start`` or ``out_end`` is provided, then the output buffer\n will be sliced as if ``out_buffer[out_start:out_end]``. This will\n not cause an allocation like ``buffer[out_start:out_end]`` will so\n it saves memory.\n\n If ``in_start`` or ``in_end`` is provided, then the input buffer\n will be sliced as if ``in_buffer[in_start:in_end]``. This will not\n cause an allocation like ``in_buffer[in_start:in_end]`` will so\n it saves memory.\n\n :param bytearray out_buffer: buffer containing the bytes to write\n :param bytearray in_buffer: buffer containing the bytes to read into\n :param int out_start: Index to start writing from\n :param int out_end: Index to read up to but not include\n :param int in_start: Index to start writing at\n :param int in_end: Index to write up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n \"\"\"\n if arg_4 is None:\n arg_4 = len(arg_1)\n if arg_6 is None:\n arg_6 = len(arg_2)\n if hasattr(arg_0.i2c, 'writeto_then_readfrom'):\n if arg_0._debug:\n print(\"i2c_device.writeto_then_readfrom.out_buffer:\",\n [hex(arg_8) for arg_8 in arg_1[arg_3:arg_4]])\n # In linux, at least, this is a special kernel function call\n arg_0.i2c.writeto_then_readfrom(arg_0.device_address, arg_1, arg_2,\n arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6, arg_7=arg_7)\n if arg_0._debug:\n print(\"i2c_device.writeto_then_readfrom.in_buffer:\",\n [hex(arg_8) for arg_8 in arg_2[arg_5:arg_6]])\n else:\n # If we don't have a special implementation, we can fake it with two calls\n arg_0.write(arg_1, start=arg_3, end=arg_4, arg_7=arg_7)\n if arg_0._debug:\n print(\"i2c_device.Func.write.out_buffer:\",\n [hex(arg_8) for arg_8 in arg_1[arg_3:arg_4]])\n arg_0.readinto(arg_2, start=arg_5, end=arg_6)\n if arg_0._debug:\n print(\"i2c_device.Func.readinto.in_buffer:\",\n [hex(arg_8) for arg_8 in arg_2[arg_5:arg_6]])"} +{"_id": "doc_4098", "title": "", "text": "def Func(arg_0, arg_1, arg_2=u''):\n \"\"\"This function returns a Hangul letter by composing the specified chosung, joongsung, and jongsung.\n @param chosung\n @param joongsung\n @param jongsung the terminal Hangul letter. This is optional if you do not need a jongsung.\"\"\"\n\n if arg_2 is None: arg_2 = u''\n\n try:\n arg_3 = CHO.index(arg_0)\n arg_4 = JOONG.index(arg_1)\n arg_5 = JONG.index(arg_2)\n except Exception:\n raise NotHangulException('No valid Hangul character index')\n\n return unichr(0xAC00 + arg_3 * NUM_JOONG * NUM_JONG + arg_4 * NUM_JONG + arg_5)"} +{"_id": "doc_4099", "title": "", "text": "def Func(arg_0):\n \"\"\"Check whether this letter contains Jongsung\"\"\"\n if len(arg_0) != 1:\n raise Exception('The target string must be one letter.')\n if not is_hangul(arg_0):\n raise NotHangulException('The target string must be Hangul')\n\n arg_1 = lt.hangul_index(arg_0)\n return arg_1 % NUM_JONG > 0"} +{"_id": "doc_4100", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns true if node is inside the name of an except handler.\"\"\"\n arg_1 = arg_0\n while arg_1 and not isinstance(arg_1.parent, astroid.ExceptHandler):\n arg_1 = arg_1.parent\n\n return arg_1 and arg_1 is arg_1.parent.name"} +{"_id": "doc_4101", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> bool:\n \"\"\"Return true if given node is inside lambda\"\"\"\n arg_4 = arg_0.parent\n while arg_4 is not None:\n if isinstance(arg_4, arg_1.Lambda):\n return True\n arg_4 = arg_4.parent\n return False"} +{"_id": "doc_4102", "title": "", "text": "def Func(\n arg_0: arg_1.node_classes.NodeNG\n) -> Iterable[arg_1.node_classes.NodeNG]:\n \"\"\"Recursively returns all atoms in nested lists and tuples.\"\"\"\n if isinstance(arg_0, (arg_1.Tuple, arg_1.List)):\n for arg_4 in arg_0.elts:\n for arg_5 in Func(arg_4):\n yield arg_5\n else:\n yield arg_0"} +{"_id": "doc_4103", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> bool:\n \"\"\"return True if the node is referencing the \"super\" builtin function\n \"\"\"\n if getattr(arg_0, \"name\", None) == \"super\" and arg_0.root().name == BUILTINS_NAME:\n return True\n return False"} +{"_id": "doc_4104", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> bool:\n \"\"\"return true if the function does nothing but raising an exception\"\"\"\n for arg_4 in arg_0.get_children():\n if isinstance(arg_4, arg_1.Raise):\n return True\n return False"} +{"_id": "doc_4105", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> bool:\n \"\"\"return true if the given Name node is used in function or lambda\n default argument's value\n \"\"\"\n arg_4 = arg_0.scope()\n if isinstance(arg_4, (arg_1.FunctionDef, arg_1.Lambda)):\n for arg_5 in arg_4.args.defaults:\n for arg_6 in arg_5.nodes_of_class(arg_1.Name):\n if arg_6 is arg_0:\n return True\n return False"} +{"_id": "doc_4106", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> bool:\n \"\"\"return true if the name is used in function decorator\"\"\"\n arg_4 = arg_0.parent\n while arg_4 is not None:\n if isinstance(arg_4, arg_1.Decorators):\n return True\n if arg_4.is_statement or isinstance(\n arg_4,\n (arg_1.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp),\n ):\n break\n arg_4 = arg_4.parent\n return False"} +{"_id": "doc_4107", "title": "", "text": "def Func(\n arg_0: arg_1.node_classes.NodeNG, arg_4: arg_1.node_classes.NodeNG\n) -> bool:\n \"\"\"return True if `frame` is an astroid.Class node with `node` in the\n subtree of its bases attribute\n \"\"\"\n try:\n arg_5 = arg_0.bases\n except AttributeError:\n return False\n for arg_6 in arg_5:\n if arg_4 in arg_6.nodes_of_class(arg_1.Name):\n return True\n return False"} +{"_id": "doc_4108", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> arg_1.node_classes.NodeNG:\n \"\"\"return the higher parent which is not an AssignName, Tuple or List node\n \"\"\"\n while arg_0 and isinstance(arg_0, (arg_1.AssignName, arg_1.Tuple, arg_1.List)):\n arg_0 = arg_0.parent\n return arg_0"} +{"_id": "doc_4109", "title": "", "text": "def Func(*arg_0: arg_1) -> Callable:\n \"\"\"decorator to store messages that are handled by a checker method\"\"\"\n\n def store_messages(arg_2):\n arg_2.checks_msgs = arg_0\n return arg_2\n\n return store_messages"} +{"_id": "doc_4110", "title": "", "text": "def Func(\n arg_0: arg_1.Call, arg_3: arg_4 = None, arg_5: arg_6 = None\n) -> arg_1.Name:\n \"\"\"Returns the specified argument from a function call.\n\n :param astroid.Call call_node: Node representing a function call to check.\n :param int position: position of the argument.\n :param str keyword: the keyword of the argument.\n\n :returns: The node representing the argument, None if the argument is not found.\n :rtype: astroid.Name\n :raises ValueError: if both position and keyword are None.\n :raises NoSuchArgumentError: if no argument at the provided position or with\n the provided keyword.\n \"\"\"\n if arg_3 is None and arg_5 is None:\n raise ValueError(\"Must specify at least one of: position or keyword.\")\n if arg_3 is not None:\n try:\n return arg_0.args[arg_3]\n except IndexError:\n pass\n if arg_5 and arg_0.keywords:\n for arg_7 in arg_0.keywords:\n if arg_7.arg == arg_5:\n return arg_7.value\n\n raise NoSuchArgumentError"} +{"_id": "doc_4111", "title": "", "text": "def Func(arg_0: arg_1.ExceptHandler, arg_3) -> bool:\n \"\"\"\n Check if the given exception handler catches\n the given error_type.\n\n The *handler* parameter is a node, representing an ExceptHandler node.\n The *error_type* can be an exception, such as AttributeError,\n the name of an exception, or it can be a tuple of errors.\n The function will return True if the handler catches any of the\n given errors.\n \"\"\"\n\n def stringify_error(arg_4):\n if not isinstance(arg_4, str):\n return arg_4.__name__\n return arg_4\n\n if not isinstance(arg_3, tuple):\n arg_3 = (arg_3,) # type: ignore\n arg_5 = {stringify_error(arg_4) for arg_4 in arg_3} # type: ignore\n if not arg_0.type:\n return True\n return arg_0.catch(arg_5)"} +{"_id": "doc_4112", "title": "", "text": "def Func(arg_0: arg_1.FunctionDef) -> bool:\n \"\"\" Detect if the given function node is decorated with a property. \"\"\"\n if not arg_0.decorators:\n return False\n for arg_3 in arg_0.decorators.nodes:\n if not isinstance(arg_3, arg_1.Name):\n continue\n try:\n if _is_property_decorator(arg_3):\n return True\n except arg_1.InferenceError:\n pass\n return False"} +{"_id": "doc_4113", "title": "", "text": "def Func(arg_0: arg_1.FunctionDef, arg_3: arg_4[arg_5]) -> bool:\n \"\"\"Determine if the `func` node has a decorator with the qualified name `qname`.\"\"\"\n arg_6 = arg_0.decorators.nodes if arg_0.decorators else []\n for arg_7 in arg_6:\n try:\n if any(\n arg_8 is not None and arg_8.qname() in arg_3 for arg_8 in arg_7.infer()\n ):\n return True\n except arg_1.InferenceError:\n continue\n return False"} +{"_id": "doc_4114", "title": "", "text": "def Func(\n arg_0: arg_1.node_classes.NodeNG\n) -> Union[arg_1.ExceptHandler, arg_1.TryExcept]:\n \"\"\"Return the ExceptHandler or the TryExcept node in which the node is.\"\"\"\n arg_4 = arg_0\n arg_5 = (arg_1.ExceptHandler, arg_1.TryExcept)\n while arg_4 and not isinstance(arg_4.parent, arg_5):\n arg_4 = arg_4.parent\n\n if arg_4 and isinstance(arg_4.parent, arg_5):\n return arg_4.parent\n return None"} +{"_id": "doc_4115", "title": "", "text": "def Func(\n arg_0: arg_1.node_classes.NodeNG, arg_4=None\n) -> Optional[arg_1.node_classes.NodeNG]:\n \"\"\"Return the inferred value for the given node.\n\n Return None if inference failed or if there is some ambiguity (more than\n one node has been inferred).\n \"\"\"\n try:\n arg_5 = arg_0.infer(arg_4=arg_4)\n arg_6 = next(arg_5)\n except arg_1.InferenceError:\n return None\n try:\n next(arg_5)\n return None # None if there is ambiguity on the inferred node\n except arg_1.InferenceError:\n return None # there is some kind of ambiguity\n except StopIteration:\n return arg_6"} +{"_id": "doc_4116", "title": "", "text": "def Func(arg_0: arg_1.node_classes.NodeNG) -> Optional[type]:\n \"\"\"Return the inferred type for `node`\n\n If there is more than one possible type, or if inferred type is Uninferable or None,\n return None\n \"\"\"\n # check there is only one possible type for the assign node. Else we\n # don't handle it for now\n arg_4 = set()\n try:\n for arg_5 in arg_0.infer():\n if arg_5 == arg_1.Uninferable or is_none(arg_5):\n continue\n arg_4.add(arg_5)\n if len(arg_4) > 1:\n return None\n except arg_1.InferenceError:\n return None\n return arg_4.pop() if arg_4 else None"} +{"_id": "doc_4117", "title": "", "text": "def Func(arg_0: arg_1.FunctionDef) -> bool:\n \"\"\"Check if the given function node is a singledispatch function.\"\"\"\n\n arg_3 = (\n \"functools.singledispatch\",\n \"singledispatch.singledispatch\",\n )\n\n if not isinstance(arg_0, arg_1.FunctionDef):\n return False\n\n arg_4 = arg_0.decorators.nodes if arg_0.decorators else []\n for arg_5 in arg_4:\n # func.register are function calls\n if not isinstance(arg_5, arg_1.Call):\n continue\n\n arg_6 = arg_5.func\n if not isinstance(arg_6, arg_1.Attribute) or arg_6.attrname != \"register\":\n continue\n\n try:\n arg_7 = next(arg_6.expr.infer())\n except arg_1.InferenceError:\n continue\n\n if isinstance(arg_7, arg_1.FunctionDef):\n # pylint: disable=redundant-keyword-arg; some flow inference goes wrong here\n return decorated_with(arg_7, arg_3)\n\n return False"} +{"_id": "doc_4118", "title": "", "text": "def Func(arg_0):\n \"\"\"Split the names of the given module into subparts\n\n For example,\n Func('pylint.checkers.ImportsChecker')\n returns\n ['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']\n \"\"\"\n arg_1 = arg_0.split(\".\")\n return [\".\".join(arg_1[0 : arg_2 + 1]) for arg_2 in range(len(arg_1))]"} +{"_id": "doc_4119", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a prepared module name from the given import node\n\n In the case of relative imports, this will return the\n absolute qualified module name, which might be useful\n for debugging. Otherwise, the initial module name\n is returned unchanged.\n \"\"\"\n if isinstance(arg_0, astroid.ImportFrom):\n if arg_0.level:\n arg_2 = arg_0.root()\n if isinstance(arg_2, astroid.Module):\n arg_1 = arg_2.relative_to_absolute_name(\n arg_1, level=arg_0.level\n )\n return arg_1"} +{"_id": "doc_4120", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"return a string which represents imports as a tree\"\"\"\n arg_2 = []\n arg_3 = arg_0.items()\n for arg_4, (arg_5, (arg_6, arg_7)) in enumerate(sorted(arg_3, key=lambda x: x[0])):\n if not arg_7:\n arg_7 = \"\"\n else:\n arg_7 = \"(%s)\" % \",\".join(sorted(arg_7))\n if arg_1 is None:\n arg_2.append(\"%s %s\" % (arg_5, arg_7))\n arg_8 = \" \"\n else:\n arg_2.append(r\"%s\\-%s %s\" % (arg_1, arg_5, arg_7))\n if arg_4 == len(arg_3) - 1:\n arg_8 = \"%s \" % arg_1\n else:\n arg_8 = \"%s| \" % arg_1\n if arg_6:\n arg_2.append(Func(arg_6, arg_8))\n return \"\\n\".join(arg_2)"} +{"_id": "doc_4121", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"triggered when an import statement is seen\"\"\"\n arg_0._check_reimport(arg_1)\n arg_0._check_import_as_rename(arg_1)\n\n arg_2 = arg_1.root()\n arg_3 = [arg_4 for arg_4, _ in arg_1.names]\n if len(arg_3) >= 2:\n arg_0.add_message(\"multiple-imports\", args=\", \".join(arg_3), arg_1=arg_1)\n\n for arg_4 in arg_3:\n arg_0._check_deprecated_module(arg_1, arg_4)\n arg_0._check_preferred_module(arg_1, arg_4)\n arg_5 = arg_0._get_imported_module(arg_1, arg_4)\n if isinstance(arg_1.parent, astroid.Module):\n # Allow imports nested\n arg_0._check_position(arg_1)\n if isinstance(arg_1.scope(), astroid.Module):\n arg_0._record_import(arg_1, arg_5)\n\n if arg_5 is None:\n continue\n\n arg_0._check_relative_import(arg_2, arg_1, arg_5, arg_4)\n arg_0._add_imported_module(arg_1, arg_5.name)"} +{"_id": "doc_4122", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"triggered when a from statement is seen\"\"\"\n arg_2 = arg_1.modname\n arg_3 = arg_0._get_imported_module(arg_1, arg_2)\n\n arg_0._check_import_as_rename(arg_1)\n arg_0._check_misplaced_future(arg_1)\n arg_0._check_deprecated_module(arg_1, arg_2)\n arg_0._check_preferred_module(arg_1, arg_2)\n arg_0._check_wildcard_imports(arg_1, arg_3)\n arg_0._check_same_line_imports(arg_1)\n arg_0._check_reimport(arg_1, arg_2=arg_2, level=arg_1.level)\n\n if isinstance(arg_1.parent, astroid.Module):\n # Allow imports nested\n arg_0._check_position(arg_1)\n if isinstance(arg_1.scope(), astroid.Module):\n arg_0._record_import(arg_1, arg_3)\n if arg_3 is None:\n return\n arg_4 = arg_1.root()\n arg_0._check_relative_import(arg_4, arg_1, arg_3, arg_2)\n\n for arg_5, arg_6 in arg_1.names:\n if arg_5 != \"*\":\n arg_0._add_imported_module(arg_1, \"%s.%s\" % (arg_3.name, arg_5))\n else:\n arg_0._add_imported_module(arg_1, arg_3.name)"} +{"_id": "doc_4123", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check `node` import or importfrom node position is correct\n\n Send a message if `node` comes before another instruction\n \"\"\"\n # if a first non-import instruction has already been encountered,\n # it means the import comes after it and therefore is not well placed\n if arg_0._first_non_import_node:\n arg_0.add_message(\"wrong-import-position\", arg_1=arg_1, args=arg_1.as_string())"} +{"_id": "doc_4124", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks imports of module `node` are grouped by category\n\n Imports must follow this order: standard, 3rd party, local\n \"\"\"\n arg_2 = []\n arg_3 = []\n arg_4 = []\n # need of a list that holds third or first party ordered import\n arg_5 = []\n arg_6 = []\n arg_7 = []\n arg_8 = []\n arg_9 = []\n arg_10 = isort.SortImports(\n file_contents=\"\",\n known_third_party=arg_0.config.known_third_party,\n known_standard_library=arg_0.config.known_standard_library,\n )\n for arg_11, arg_12 in arg_0._imports_stack:\n if arg_12.startswith(\".\"):\n arg_13 = \".\" + arg_12.split(\".\")[1]\n else:\n arg_13 = arg_12.split(\".\")[0]\n arg_14 = not isinstance(arg_11.parent, astroid.Module)\n arg_15 = not arg_0.linter.is_message_enabled(\n \"wrong-import-order\", arg_11.fromlineno\n )\n arg_16 = arg_10.place_module(arg_13)\n arg_17 = (arg_11, arg_13)\n if arg_16 in (\"FUTURE\", \"STDLIB\"):\n arg_2.append(arg_17)\n arg_18 = (\n arg_7\n or arg_8\n or arg_9\n )\n if arg_0._is_fallback_import(arg_11, arg_18):\n continue\n if arg_18 and not arg_14:\n arg_0.add_message(\n \"wrong-import-order\",\n arg_11=arg_11,\n args=(\n 'standard import \"%s\"' % arg_11.as_string(),\n '\"%s\"' % arg_18[0][0].as_string(),\n ),\n )\n elif arg_16 == \"THIRDPARTY\":\n arg_3.append(arg_17)\n arg_5.append(arg_17)\n if not arg_14 and not arg_15:\n arg_7.append(arg_17)\n arg_18 = arg_8 or arg_9\n if arg_18 and not arg_14:\n arg_0.add_message(\n \"wrong-import-order\",\n arg_11=arg_11,\n args=(\n 'third party import \"%s\"' % arg_11.as_string(),\n '\"%s\"' % arg_18[0][0].as_string(),\n ),\n )\n elif arg_16 == \"FIRSTPARTY\":\n arg_4.append(arg_17)\n arg_5.append(arg_17)\n if not arg_14 and not arg_15:\n arg_8.append(arg_17)\n arg_18 = arg_9\n if arg_18 and not arg_14:\n arg_0.add_message(\n \"wrong-import-order\",\n arg_11=arg_11,\n args=(\n 'first party import \"%s\"' % arg_11.as_string(),\n '\"%s\"' % arg_18[0][0].as_string(),\n ),\n )\n elif arg_16 == \"LOCALFOLDER\":\n arg_6.append((arg_11, arg_13))\n if not arg_14 and not arg_15:\n arg_9.append((arg_11, arg_13))\n return arg_2, arg_5, arg_6"} +{"_id": "doc_4125", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"notify an imported module, used to analyze dependencies\"\"\"\n arg_3 = arg_1.root().file\n arg_4 = arg_1.root().name\n arg_5 = os.path.splitext(os.path.basename(arg_3))[0]\n\n try:\n arg_2 = astroid.modutils.get_module_part(\n arg_2, arg_3\n )\n except ImportError:\n pass\n\n if arg_4 == arg_2:\n arg_0.add_message(\"import-self\", arg_1=arg_1)\n\n elif not astroid.modutils.is_standard_module(arg_2):\n # if this is not a package __init__ module\n if arg_5 != \"__init__\" and arg_4 not in arg_0._module_pkg:\n # record the module's parent, or the module itself if this is\n # a top level module, as the package it belongs to\n arg_0._module_pkg[arg_4] = arg_4.rsplit(\".\", 1)[0]\n\n # handle dependencies\n arg_7 = arg_0.stats[\"dependencies\"].setdefault(\n arg_2, set()\n )\n if arg_4 not in arg_7:\n arg_7.add(arg_4)\n\n # update import graph\n arg_0.import_graph[arg_4].add(arg_2)\n if not arg_0.linter.is_message_enabled(\"cyclic-import\", line=arg_1.lineno):\n arg_0._excluded_edges[arg_4].add(arg_2)"} +{"_id": "doc_4126", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"check if the module is deprecated\"\"\"\n for arg_3 in arg_0.config.deprecated_modules:\n if arg_2 == arg_3 or arg_2.startswith(arg_3 + \".\"):\n arg_0.add_message(\"deprecated-module\", arg_1=arg_1, args=arg_2)"} +{"_id": "doc_4127", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"check if the module has a preferred replacement\"\"\"\n if arg_2 in arg_0.preferred_modules:\n arg_0.add_message(\n \"preferred-module\",\n arg_1=arg_1,\n args=(arg_0.preferred_modules[arg_2], arg_2),\n )"} +{"_id": "doc_4128", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"return a verbatim layout for displaying dependencies\"\"\"\n arg_4 = _make_tree_defs(arg_0._external_dependencies_info().items())\n if not arg_4:\n raise EmptyReportError()\n arg_5 = _repr_tree_defs(arg_4)\n arg_1.append(VerbatimText(arg_5))"} +{"_id": "doc_4129", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"build the internal or the external depedency graph\"\"\"\n arg_2 = collections.defaultdict(set)\n for arg_3, arg_4 in arg_0.stats[\"dependencies\"].items():\n for arg_5 in arg_4:\n arg_6 = arg_0._module_pkg.get(arg_5, arg_5)\n arg_7 = arg_3.startswith(arg_6)\n if arg_7 and arg_1 or not arg_7 and not arg_1:\n arg_2[arg_3].add(arg_5)\n return arg_2"} +{"_id": "doc_4130", "title": "", "text": "def Func():\n \"\"\"\n Read config file and return list of options\n \"\"\"\n arg_0 = []\n arg_1 = os.environ.get(\"HOME\", \"\")\n if arg_1:\n arg_2 = os.path.join(arg_1, RCFILE)\n try:\n arg_0 = open(arg_2).read().split()\n except IOError:\n pass # ignore if no config file found\n return arg_0"} +{"_id": "doc_4131", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return true if the node should be treated\n \"\"\"\n arg_2 = get_visibility(getattr(arg_1, \"name\", arg_1))\n return not arg_0.__mode & VIS_MOD[arg_2]"} +{"_id": "doc_4132", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"get callbacks from handler for the visited node\"\"\"\n arg_2 = arg_1.__class__\n arg_3 = arg_0._cache.get(arg_2)\n if arg_3 is None:\n arg_4 = arg_0.handler\n arg_5 = arg_2.__name__.lower()\n arg_6 = getattr(\n arg_4, \"visit_%s\" % arg_5, getattr(arg_4, \"visit_default\", None)\n )\n arg_7 = getattr(\n arg_4, \"leave_%s\" % arg_5, getattr(arg_4, \"leave_default\", None)\n )\n arg_0._cache[arg_2] = (arg_6, arg_7)\n else:\n arg_6, arg_7 = arg_3\n return arg_6, arg_7"} +{"_id": "doc_4133", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Check the consistency of msgid.\n\n msg ids for a checker should be a string of len 4, where the two first\n characters are the checker id and the two last the msg id in this\n checker.\n\n :raises InvalidMessageError: If the checker id in the messages are not\n always the same. \"\"\"\n arg_1 = None\n arg_2 = []\n for arg_3 in arg_0.messages:\n if arg_1 is not None and arg_1 != arg_3.msgid[1:3]:\n arg_4 = \"Inconsistent checker part in message id \"\n arg_4 += \"'{}' (expected 'x{checker_id}xx' \".format(\n arg_3.msgid, arg_1=arg_1\n )\n arg_4 += \"because we already had {existing_ids}).\".format(\n arg_2=arg_2\n )\n raise InvalidMessageError(arg_4)\n arg_1 = arg_3.msgid[1:3]\n arg_2.append(arg_3.msgid)"} +{"_id": "doc_4134", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Check that a datetime was infered.\n If so, emit boolean-datetime warning.\n \"\"\"\n try:\n arg_2 = next(arg_1.infer())\n except astroid.InferenceError:\n return\n if isinstance(arg_2, Instance) and arg_2.qname() == \"datetime.time\":\n arg_0.add_message(\"boolean-datetime\", arg_1=arg_1)"} +{"_id": "doc_4135", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n arg_0.messages.append(\n {\n \"type\": arg_1.category,\n \"module\": arg_1.module,\n \"obj\": arg_1.obj,\n \"line\": arg_1.line,\n \"column\": arg_1.column,\n \"path\": arg_1.path,\n \"symbol\": arg_1.symbol,\n \"message\": html.escape(arg_1.msg or \"\", quote=False),\n \"message-id\": arg_1.msg_id,\n }\n )"} +{"_id": "doc_4136", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Launch layouts display\"\"\"\n print(json.dumps(arg_0.messages, indent=4), file=arg_0.out)"} +{"_id": "doc_4137", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"get title for objects\"\"\"\n arg_2 = arg_1.name\n if arg_0.module_names:\n arg_2 = \"%s.%s\" % (arg_1.root().name, arg_2)\n return arg_2"} +{"_id": "doc_4138", "title": "", "text": "def Func(arg_0):\n \"\"\"set different default options with _default dictionary\"\"\"\n arg_0.module_names = arg_0._set_option(arg_0.config.module_names)\n arg_2 = arg_0._set_option(arg_0.config.all_ancestors)\n arg_3 = arg_0._set_option(arg_0.config.all_associated)\n arg_4, arg_5 = (0, 0)\n if arg_2:\n arg_4 = -1\n if arg_3:\n arg_5 = -1\n if arg_0.config.show_ancestors is not None:\n arg_4 = arg_0.config.show_ancestors\n if arg_0.config.show_associated is not None:\n arg_5 = arg_0.config.show_associated\n arg_0.anc_level, arg_0.association_level = arg_4, arg_5"} +{"_id": "doc_4139", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"visit one class and add it to diagram\"\"\"\n arg_0.linker.visit(arg_1)\n arg_0.classdiagram.add_object(arg_0.get_title(arg_1), arg_1)"} +{"_id": "doc_4140", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return ancestor nodes of a class node\"\"\"\n if arg_2 == 0:\n return\n for arg_3 in arg_1.ancestors(recurs=False):\n if not arg_0.show_node(arg_3):\n continue\n yield arg_3"} +{"_id": "doc_4141", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"extract recursively classes related to klass_node\"\"\"\n if arg_0.classdiagram.has_node(arg_1) or not arg_0.show_node(arg_1):\n return\n arg_0.add_class(arg_1)\n\n for arg_4 in arg_0.get_ancestors(arg_1, arg_2):\n arg_0.Func(arg_4, arg_2 - 1, arg_3)\n\n for arg_5 in arg_0.get_associated(arg_1, arg_3):\n arg_0.Func(arg_5, arg_2, arg_3 - 1)"} +{"_id": "doc_4142", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=unused-argument\n \"\"\"leave the pyreverse.utils.Project node\n\n return the generated diagram definition\n \"\"\"\n if arg_0.pkgdiagram:\n return arg_0.pkgdiagram, arg_0.classdiagram\n return (arg_0.classdiagram,)"} +{"_id": "doc_4143", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"visit astroid.ImportFrom and catch modules for package diagram\n \"\"\"\n if arg_0.pkgdiagram:\n arg_0.pkgdiagram.add_from_depend(arg_1, arg_1.modname)"} +{"_id": "doc_4144", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return a class diagram definition for the given klass and its\n related klasses\n \"\"\"\n\n arg_0.classdiagram = ClassDiagram(arg_2, arg_0.config.mode)\n if len(arg_1.modules) > 1:\n arg_4, arg_2 = arg_2.rsplit(\".\", 1)\n arg_4 = arg_1.get_module(arg_4)\n else:\n arg_4 = arg_1.modules[0]\n arg_2 = arg_2.split(\".\")[-1]\n arg_2 = next(arg_4.ilookup(arg_2))\n\n arg_5, arg_6 = arg_0._get_levels()\n arg_0.extract_classes(arg_2, arg_5, arg_6)\n return arg_0.classdiagram"} +{"_id": "doc_4145", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get the diagrams configuration data\n\n :param project:The pyreverse project\n :type project: pyreverse.utils.Project\n :param linker: The linker\n :type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)\n\n :returns: The list of diagram definitions\n :rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)\n \"\"\"\n\n # read and interpret diagram definitions (Diadefs)\n arg_3 = []\n arg_4 = ClassDiadefGenerator(arg_2, arg_0)\n for arg_5 in arg_0.config.classes:\n arg_3.append(arg_4.class_diagram(arg_1, arg_5))\n if not arg_3:\n arg_3 = DefaultDiadefGenerator(arg_2, arg_0).visit(arg_1)\n for arg_6 in arg_3:\n arg_6.extract_relationships()\n return arg_3"} +{"_id": "doc_4146", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check if the given owner should be ignored\n\n This will verify if the owner's module is in *ignored_modules*\n or the owner's module fully qualified name is in *ignored_modules*\n or if the *ignored_modules* contains a pattern which catches\n the fully qualified name of the module.\n\n Also, similar checks are done for the owner itself, if its name\n matches any name from the *ignored_classes* or if its qualified\n name can be found in *ignored_classes*.\n \"\"\"\n arg_3 = set(arg_3)\n arg_4 = arg_0.root().name\n arg_5 = arg_0.root().qname()\n if any(\n arg_4 in arg_3\n or arg_5 in arg_3\n or fnmatch.fnmatch(arg_5, arg_6)\n for arg_6 in arg_3\n ):\n return True\n\n arg_2 = set(arg_2)\n if hasattr(arg_0, \"qname\"):\n arg_7 = arg_0.qname()\n else:\n arg_7 = \"\"\n return any(arg_6 in (arg_1, arg_7) for arg_6 in arg_2)"} +{"_id": "doc_4147", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check if the given node has a parent of the given type.\"\"\"\n arg_3 = arg_0.parent\n while not isinstance(arg_3, arg_1) and arg_2.parent_of(arg_3):\n arg_3 = arg_3.parent\n return isinstance(arg_3, arg_1)"} +{"_id": "doc_4148", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the given name is used as a variadic argument.\"\"\"\n return any(\n arg_2.value == arg_0 or arg_2.value.parent_of(arg_0)\n for arg_2 in arg_1\n )"} +{"_id": "doc_4149", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check that the given uninferable Call node does not\n call an actual function.\n \"\"\"\n if not isinstance(arg_1.func, astroid.Attribute):\n return\n\n # Look for properties. First, obtain\n # the lhs of the Attribute node and search the attribute\n # there. If that attribute is a property or a subclass of properties,\n # then most likely it's not callable.\n\n # TODO: since astroid doesn't understand descriptors very well\n # we will not handle them here, right now.\n\n arg_2 = arg_1.func.expr\n arg_3 = safe_infer(arg_2)\n if (\n arg_3 is None\n or arg_3 is astroid.Uninferable\n or not isinstance(arg_3, astroid.Instance)\n ):\n return\n\n try:\n arg_4 = arg_3._proxied.getattr(arg_1.func.attrname)\n except exceptions.NotFoundError:\n return\n\n for arg_5 in arg_4:\n if arg_5 is astroid.Uninferable:\n continue\n if not isinstance(arg_5, astroid.FunctionDef):\n continue\n\n # Decorated, see if it is decorated with a property.\n # Also, check the returns and see if they are callable.\n if decorated_with_property(arg_5):\n\n try:\n arg_6 = all(\n return_node.callable() or return_node is astroid.Uninferable\n for return_node in arg_5.infer_call_result(arg_1)\n )\n except astroid.InferenceError:\n continue\n\n if not arg_6:\n arg_0.add_message(\n \"not-callable\", arg_1=arg_1, args=arg_1.func.as_string()\n )\n break"} +{"_id": "doc_4150", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Detect TypeErrors for unary operands.\"\"\"\n\n for arg_2 in arg_1.type_errors():\n # Let the error customize its output.\n arg_0.add_message(\"invalid-unary-operand-type\", args=str(arg_2), arg_1=arg_1)"} +{"_id": "doc_4151", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"visit an astroid.AssignName node\n\n handle locals_type\n \"\"\"\n # avoid double parsing done by different Linkers.visit\n # running over the same project:\n if hasattr(arg_1, \"_handled\"):\n return\n arg_1._handled = True\n if arg_1.name in arg_1.frame():\n arg_3 = arg_1.frame()\n else:\n # the name has been defined as 'global' in the frame and belongs\n # there.\n arg_3 = arg_1.root()\n try:\n if not hasattr(arg_3, \"locals_type\"):\n # If the frame doesn't have a locals_type yet,\n # it means it wasn't yet visited. Visit it now\n # to add what's missing from it.\n if isinstance(arg_3, astroid.ClassDef):\n arg_0.visit_classdef(arg_3)\n elif isinstance(arg_3, astroid.FunctionDef):\n arg_0.visit_functiondef(arg_3)\n else:\n arg_0.visit_module(arg_3)\n\n arg_4 = arg_3.locals_type[arg_1.name]\n arg_5 = set(arg_1.infer())\n arg_3.locals_type[arg_1.name] = list(set(arg_4) | arg_5)\n except astroid.InferenceError:\n pass"} +{"_id": "doc_4152", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"handle an astroid.assignattr node\n\n handle instance_attrs_type\n \"\"\"\n try:\n arg_2 = set(arg_0.infer())\n arg_3 = set(arg_1.instance_attrs_type[arg_0.attrname])\n arg_1.instance_attrs_type[arg_0.attrname] = list(arg_3 | arg_2)\n except astroid.InferenceError:\n pass"} +{"_id": "doc_4153", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return true if the module should be added to dependencies\"\"\"\n arg_3 = os.path.dirname(arg_0.project.path)\n if arg_1 == arg_2:\n return 0\n if modutils.is_standard_module(arg_2, (arg_3,)):\n return 1\n return 0"} +{"_id": "doc_4154", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"colorize message by wrapping it with ansi escape codes\n\n :type msg: str or unicode\n :param msg: the message string to colorize\n\n :type color: str or None\n :param color:\n the color identifier (see `ANSI_COLORS` for available values)\n\n :type style: str or None\n :param style:\n style string (see `ANSI_COLORS` for available values). To get\n several style effects at the same time, use a coma as separator.\n\n :raise KeyError: if an unexistent color or style identifier is given\n\n :rtype: str or unicode\n :return: the ansi escaped string\n \"\"\"\n # If both color and style are not defined, then leave the text as is\n if arg_1 is None and arg_2 is None:\n return arg_0\n arg_3 = _get_ansi_code(arg_1, arg_2)\n # If invalid (or unknown) color, don't wrap msg with ansi codes\n if arg_3:\n return \"%s%s%s\" % (arg_3, arg_0, ANSI_RESET)\n return arg_0"} +{"_id": "doc_4155", "title": "", "text": "def Func(arg_0):\n \"\"\"Register the reporter classes with the linter.\"\"\"\n arg_0.Func_reporter(TextReporter)\n arg_0.Func_reporter(ParseableTextReporter)\n arg_0.Func_reporter(VSTextReporter)\n arg_0.Func_reporter(ColorizedTextReporter)"} +{"_id": "doc_4156", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"manage message of different types, and colorize output\n using ansi escape codes\n \"\"\"\n if arg_1.module not in arg_0._modules:\n arg_2, arg_3 = arg_0._get_decoration(\"S\")\n if arg_1.module:\n arg_4 = colorize_ansi(\n \"************* Module %s\" % arg_1.module, arg_2, arg_3\n )\n else:\n arg_4 = colorize_ansi(\"************* %s\" % arg_1.module, arg_2, arg_3)\n arg_0.writeln(arg_4)\n arg_0._modules.add(arg_1.module)\n arg_2, arg_3 = arg_0._get_decoration(arg_1.C)\n\n arg_1 = arg_1._replace(\n **{\n attr: colorize_ansi(getattr(arg_1, attr), arg_2, arg_3)\n for attr in (\"msg\", \"symbol\", \"category\", \"C\")\n }\n )\n arg_0.write_message(arg_1)"} +{"_id": "doc_4157", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"open a vcg graph\n \"\"\"\n arg_0._stream.write(\"%sgraph:{\\n\" % arg_0._indent)\n arg_0._inc_indent()\n arg_0._write_attributes(GRAPH_ATTRS, **arg_1)"} +{"_id": "doc_4158", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"draw a Func\n \"\"\"\n arg_0._stream.write('%sFunc: {title:\"%s\"' % (arg_0._indent, arg_1))\n arg_0._write_attributes(NODE_ATTRS, **arg_2)\n arg_0._stream.write(\"}\\n\")"} +{"_id": "doc_4159", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"\", **arg_4):\n \"\"\"draw an Func from a node to another.\n \"\"\"\n arg_0._stream.write(\n '%s%sFunc: {sourcename:\"%s\" targetname:\"%s\"'\n % (arg_0._indent, arg_3, arg_1, arg_2)\n )\n arg_0._write_attributes(EDGE_ATTRS, **arg_4)\n arg_0._stream.write(\"}\\n\")"} +{"_id": "doc_4160", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Check the new string formatting. \"\"\"\n # TODO: skip (for now) format nodes which don't have\n # an explicit string on the left side of the format operation.\n # We do this because our inference engine can't properly handle\n # redefinitions of the original string.\n # For more details, see issue 287.\n #\n # Note that there may not be any left side at all, if the format method\n # has been assigned to another variable. See issue 351. For example:\n #\n # fmt = 'some string {}'.format\n # fmt('arg')\n if isinstance(arg_1.func, astroid.Attribute) and not isinstance(\n arg_1.func.expr, astroid.Const\n ):\n return\n if arg_1.starargs or arg_1.kwargs:\n return\n try:\n arg_3 = next(arg_2.bound.infer())\n except astroid.InferenceError:\n return\n if not (isinstance(arg_3, astroid.Const) and isinstance(arg_3.value, str)):\n return\n try:\n arg_4 = CallSite.from_call(arg_1)\n except astroid.InferenceError:\n return\n\n try:\n arg_5, arg_6, arg_7 = utils.parse_format_method_string(\n arg_3.value\n )\n except utils.IncompleteFormatString:\n arg_0.add_message(\"bad-format-string\", arg_1=arg_1)\n return\n\n arg_8 = arg_4.positional_arguments\n arg_9 = arg_4.keyword_arguments\n arg_10 = {arg_12[0] for arg_12 in arg_5 if isinstance(arg_12[0], str)}\n if arg_6 and arg_7:\n arg_0.add_message(\"format-combined-specification\", arg_1=arg_1)\n return\n\n arg_11 = False\n # Consider \"{[0]} {[1]}\" as num_args.\n arg_6 += sum(1 for arg_12 in arg_10 if arg_12 == \"\")\n if arg_10:\n for arg_12 in arg_10:\n if arg_12 and arg_12 not in arg_9:\n arg_0.add_message(\n \"missing-format-argument-key\", arg_1=arg_1, args=(arg_12,)\n )\n for arg_12 in arg_9:\n if arg_12 not in arg_10:\n arg_0.add_message(\n \"unused-format-string-argument\", arg_1=arg_1, args=(arg_12,)\n )\n # num_args can be 0 if manual_pos is not.\n arg_6 = arg_6 or arg_7\n if arg_8 or arg_6:\n arg_13 = any(True for arg_12 in arg_10 if arg_12 == \"\")\n if arg_9 or arg_13:\n # Verify the required number of positional arguments\n # only if the .format got at least one keyword argument.\n # This means that the format strings accepts both\n # positional and named fields and we should warn\n # when one of the them is missing or is extra.\n arg_11 = True\n else:\n arg_11 = True\n if arg_11:\n # num_args can be 0 if manual_pos is not.\n arg_6 = arg_6 or arg_7\n if len(arg_8) > arg_6:\n arg_0.add_message(\"too-many-format-args\", arg_1=arg_1)\n elif len(arg_8) < arg_6:\n arg_0.add_message(\"too-few-format-args\", arg_1=arg_1)\n\n arg_0._detect_vacuous_formatting(arg_1, arg_8)\n arg_0.Func_specifiers(arg_1, arg_5, arg_9)"} +{"_id": "doc_4161", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"check for bad escapes in a non-raw string.\n\n prefix: lowercase string of eg 'ur' string prefix markers.\n string_body: the un-parsed body of the string, not including the quote\n marks.\n start_row: integer line number in the source.\n \"\"\"\n # Walk through the string; if we see a backslash then escape the next\n # character, and skip over it. If we see a non-escaped character,\n # alert, and continue.\n #\n # Accept a backslash when it escapes a backslash, or a quote, or\n # end-of-line, or one of the letters that introduce a special escape\n # sequence \n #\n # TODO(mbp): Maybe give a separate warning about the rarely-used\n # \\a \\b \\v \\f?\n #\n # TODO(mbp): We could give the column of the problem character, but\n # add_message doesn't seem to have a way to pass it through at present.\n arg_4 = 0\n while True:\n arg_4 = arg_2.find(\"\\\\\", arg_4)\n if arg_4 == -1:\n break\n # There must be a next character; having a backslash at the end\n # of the string would be a SyntaxError.\n arg_5 = arg_2[arg_4 + 1]\n arg_6 = arg_2[arg_4 : arg_4 + 2]\n if arg_5 in arg_0.UNICODE_ESCAPE_CHARACTERS:\n if \"u\" in arg_1:\n pass\n elif (_PY3K or arg_0._unicode_literals) and \"b\" not in arg_1:\n pass # unicode by default\n else:\n arg_0.add_message(\n \"anomalous-unicode-escape-in-string\",\n line=arg_3,\n args=(arg_6,),\n )\n elif arg_5 not in arg_0.ESCAPE_CHARACTERS:\n arg_0.add_message(\n \"anomalous-backslash-in-string\", line=arg_3, args=(arg_6,)\n )\n # Whether it was a valid escape or not, backslash followed by\n # another character can always be consumed whole: the second\n # character can never be the start of a new backslash escape.\n arg_4 += 2"} +{"_id": "doc_4162", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"display a section as text\n \"\"\"\n arg_0.section += 1\n arg_0.writeln()\n arg_0.format_children(arg_1)\n arg_0.section -= 1\n arg_0.writeln()"} +{"_id": "doc_4163", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Display an evaluation section as a text.\"\"\"\n arg_0.section += 1\n arg_0.format_children(arg_1)\n arg_0.section -= 1\n arg_0.writeln()"} +{"_id": "doc_4164", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Register a MessageDefinition with consistency in mind.\n\n :param MessageDefinition message: The message definition being added.\n \"\"\"\n arg_0._check_id_and_symbol_consistency(arg_1.msgid, arg_1.symbol)\n arg_0._check_symbol(arg_1.msgid, arg_1.symbol)\n arg_0._check_msgid(arg_1.msgid, arg_1.symbol)\n for arg_2 in arg_1.old_names:\n arg_0._check_symbol(arg_1.msgid, arg_2[1])\n arg_0._messages_definitions[arg_1.symbol] = arg_1\n arg_0._register_alternative_name(arg_1, arg_1.msgid, arg_1.symbol)\n for arg_5, arg_6 in arg_1.old_names:\n arg_0._register_alternative_name(arg_1, arg_5, arg_6)\n arg_0._msgs_by_category[arg_1.msgid[0]].append(arg_1.msgid)"} +{"_id": "doc_4165", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check that a symbol is not already used. \"\"\"\n arg_3 = arg_0._messages_definitions.get(arg_2)\n if arg_3:\n arg_0._raise_duplicate_msg_id(arg_2, arg_1, arg_3.msgid)\n else:\n arg_4 = None\n arg_5 = arg_0._alternative_names.get(arg_2)\n if arg_5:\n if arg_5.symbol == arg_2:\n arg_4 = arg_5.msgid\n else:\n for arg_6, arg_7 in arg_5.old_names:\n if arg_7 == arg_2:\n arg_4 = arg_6\n break\n if arg_1 != arg_4:\n arg_0._raise_duplicate_msg_id(arg_2, arg_1, arg_4)"} +{"_id": "doc_4166", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raise an error when a symbol is duplicated.\n\n :param str msgid: The msgid corresponding to the symbols\n :param str symbol: Offending symbol\n :param str other_symbol: Other offending symbol\n :raises InvalidMessageError: when a symbol is duplicated.\n \"\"\"\n arg_3 = [arg_1, arg_2]\n arg_3.sort()\n arg_4 = \"Message id '{msgid}' cannot have both \".format(arg_0=arg_0)\n arg_4 += \"'{other_symbol}' and '{symbol}' as symbolic name.\".format(\n arg_2=arg_3[0], arg_1=arg_3[1]\n )\n raise InvalidMessageError(arg_4)"} +{"_id": "doc_4167", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raise an error when a msgid is duplicated.\n\n :param str symbol: The symbol corresponding to the msgids\n :param str msgid: Offending msgid\n :param str other_msgid: Other offending msgid\n :raises InvalidMessageError: when a msgid is duplicated.\n \"\"\"\n arg_3 = [arg_1, arg_2]\n arg_3.sort()\n arg_4 = \"Message symbol '{symbol}' cannot be used for \".format(\n arg_0=arg_0\n )\n arg_4 += \"'{other_msgid}' and '{msgid}' at the same time.\".format(\n arg_2=arg_3[0], arg_1=arg_3[1]\n )\n raise InvalidMessageError(arg_4)"} +{"_id": "doc_4168", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generates a user-consumable representation of a message.\n\n Can be just the message ID or the ID and the symbol.\n \"\"\"\n arg_2 = arg_0.get_message_definitions(arg_1)\n if len(arg_2) == 1:\n return repr(arg_2[0].symbol)\n return repr([arg_3.symbol for arg_3 in arg_2])"} +{"_id": "doc_4169", "title": "", "text": "def Func(arg_0):\n \"\"\"Output full messages list documentation in ReST format. \"\"\"\n arg_1 = sorted(arg_0._messages_definitions.values(), key=lambda m: m.msgid)\n for arg_2 in arg_1:\n if not arg_2.may_be_emitted():\n continue\n print(arg_2.format_help(checkerref=False))\n print(\"\")"} +{"_id": "doc_4170", "title": "", "text": "def Func(arg_0):\n \"\"\"Output full documentation in ReST format for all extension modules\"\"\"\n # PACKAGE/docs/exts/pylint_extensions.py --> PACKAGE/\n arg_1 = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n # PACKAGE/ --> PACKAGE/pylint/extensions\n arg_2 = os.path.join(arg_1, \"pylint\", \"extensions\")\n arg_3 = []\n arg_4 = {}\n for arg_5 in os.listdir(arg_2):\n arg_6, arg_7 = os.path.splitext(arg_5)\n if arg_6[0] == \"_\" or arg_6 in DEPRECATED_MODULES:\n continue\n if arg_7 == \".py\":\n arg_3.append(\"pylint.extensions.%s\" % arg_6)\n elif arg_7 == \".rst\":\n arg_4[\"pylint.extensions.\" + arg_6] = os.path.join(arg_2, arg_5)\n arg_3.sort()\n if not arg_3:\n sys.exit(\"No Pylint extensions found?\")\n\n arg_8 = PyLinter()\n arg_8.load_plugin_modules(arg_3)\n\n arg_9 = os.path.join(\n arg_1, \"doc\", \"technical_reference\", \"extensions.rst\"\n )\n with open(arg_9, \"w\") as stream:\n stream.write(\"Optional Pylint checkers in the extensions module\\n\")\n stream.write(\"=================================================\\n\\n\")\n stream.write(\"Pylint provides the following optional plugins:\\n\\n\")\n for arg_10 in arg_3:\n stream.write(\"- :ref:`{}`\\n\".format(arg_10))\n stream.write(\"\\n\")\n stream.write(\n \"You can activate any or all of these extensions \"\n \"by adding a ``load-plugins`` line to the ``MASTER`` \"\n \"section of your ``.pylintrc``, for example::\\n\"\n )\n stream.write(\n \"\\n load-plugins=pylint.extensions.docparams,\"\n \"pylint.extensions.docstyle\\n\\n\"\n )\n arg_11 = get_plugins_info(arg_8, arg_4)\n for arg_10, arg_12 in sorted(arg_11.items()):\n arg_8._print_checker_doc(arg_12[\"name\"], arg_12, stream=stream)"} +{"_id": "doc_4171", "title": "", "text": "def Func() -> int:\n \"\"\"Use sched_affinity if available for virtualized or containerized environments.\"\"\"\n arg_0 = getattr(os, \"sched_getaffinity\", None)\n # pylint: disable=not-callable,using-constant-test\n if arg_0:\n return len(arg_0(0))\n if multiprocessing:\n return multiprocessing.cpu_count()\n return 1"} +{"_id": "doc_4172", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"take a list of module names which are pylint plugins and load\n and register them\n \"\"\"\n for arg_2 in arg_1:\n if arg_2 in arg_0._dynamic_plugins:\n continue\n arg_0._dynamic_plugins.add(arg_2)\n arg_3 = modutils.load_module_from_name(arg_2)\n arg_3.register(arg_0)"} +{"_id": "doc_4173", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"overridden from config.OptionsProviderMixin to handle some\n special options\n \"\"\"\n if arg_1 in arg_0._options_methods or arg_1 in arg_0._bw_options_methods:\n if arg_2:\n try:\n arg_5 = arg_0._options_methods[arg_1]\n except KeyError:\n arg_5 = arg_0._bw_options_methods[arg_1]\n warnings.warn(\n \"%s is deprecated, replace it by %s\"\n % (arg_1, arg_1.split(\"-\")[0]),\n DeprecationWarning,\n )\n arg_2 = utils._check_csv(arg_2)\n if isinstance(arg_2, (list, tuple)):\n for arg_6 in arg_2:\n arg_5(arg_6, ignore_unknown=True)\n else:\n arg_5(arg_2)\n return # no need to call Func, disable/enable methods do it\n elif arg_1 == \"output-format\":\n arg_0._reporter_name = arg_2\n # If the reporters are already available, load\n # the reporter class.\n if arg_0._reporters:\n arg_0._load_reporter()\n\n try:\n checkers.BaseTokenChecker.Func(arg_0, arg_1, arg_2, arg_3, arg_4)\n except config.UnsupportedAction:\n print(\"option %s can't be read from config file\" % arg_1, file=sys.stderr)"} +{"_id": "doc_4174", "title": "", "text": "def Func(arg_0):\n \"\"\"disable all reporters\"\"\"\n for arg_1 in arg_0._reports.values():\n for arg_2, arg_3, arg_3 in arg_1:\n arg_0.disable_report(arg_2)"} +{"_id": "doc_4175", "title": "", "text": "def Func(arg_0):\n \"\"\"Disable all other checkers and enable Python 3 warnings.\"\"\"\n arg_0.disable(\"all\")\n arg_0.enable(\"python3\")\n if arg_0._error_mode:\n # The error mode was activated, using the -E flag.\n # So we'll need to enable only the errors from the\n # Python 3 porting checker.\n for arg_1 in arg_0._checker_messages(\"python3\"):\n if arg_1.startswith(\"E\"):\n arg_0.enable(arg_1)\n else:\n arg_0.disable(arg_1)\n arg_2 = arg_0.cfgfile_parser\n if arg_2.has_option(\"MESSAGES CONTROL\", \"disable\"):\n arg_3 = arg_2.get(\"MESSAGES CONTROL\", \"disable\")\n arg_0.global_set_option(\"disable\", arg_3)\n arg_0._Func = True"} +{"_id": "doc_4176", "title": "", "text": "def Func(arg_0):\n \"\"\"return all available checkers as a list\"\"\"\n return [arg_0] + [\n arg_2\n for arg_1 in arg_0._checkers.values()\n for arg_2 in arg_1\n if arg_2 is not arg_0\n ]"} +{"_id": "doc_4177", "title": "", "text": "def Func(arg_0):\n \"\"\"Get all the checker names that this linter knows about.\"\"\"\n arg_1 = arg_0.get_checkers()\n return sorted(\n {arg_2.name for arg_2 in arg_1 if arg_2.name != \"master\"}\n )"} +{"_id": "doc_4178", "title": "", "text": "def Func(arg_0):\n \"\"\"return checkers needed for activated messages and reports\"\"\"\n if not arg_0.config.reports:\n arg_0.disable_reporters()\n # get needed checkers\n arg_1 = [arg_0]\n for arg_2 in arg_0.get_checkers()[1:]:\n arg_3 = {msg for msg in arg_2.msgs if arg_0.is_message_enabled(msg)}\n if arg_3 or any(arg_0.report_is_enabled(arg_4[0]) for arg_4 in arg_2.reports):\n arg_1.append(arg_2)\n # Sort checkers by priority\n arg_1 = sorted(\n arg_1, key=operator.attrgetter(\"priority\"), reverse=True\n )\n return arg_1"} +{"_id": "doc_4179", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"get modules and errors from a list of modules and handle errors\n \"\"\"\n arg_2, arg_3 = utils.expand_modules(\n arg_1, arg_0.config.black_list, arg_0.config.black_list_re\n )\n for arg_4 in arg_3:\n arg_5 = modname = arg_4[\"mod\"]\n arg_6 = arg_4[\"key\"]\n arg_0.set_current_module(modname)\n if arg_6 == \"fatal\":\n arg_5 = str(arg_4[\"ex\"]).replace(os.getcwd() + os.sep, \"\")\n arg_0.add_message(arg_6, args=arg_5)\n return arg_2"} +{"_id": "doc_4180", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"set the name of the currently analyzed module and\n init statistics for it\n \"\"\"\n if not arg_1 and arg_2 is None:\n return\n arg_0.reporter.on_Func(arg_1, arg_2)\n arg_0.current_name = arg_1\n arg_0.current_file = arg_2 or arg_1\n arg_0.stats[\"by_module\"][arg_1] = {}\n arg_0.stats[\"by_module\"][arg_1][\"statement\"] = 0\n for arg_6 in MSG_TYPES.values():\n arg_0.stats[\"by_module\"][arg_1][arg_6] = 0"} +{"_id": "doc_4181", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Check a module from its astroid representation.\"\"\"\n try:\n arg_5 = utils.tokenize_module(arg_1)\n except tokenize.TokenError as ex:\n arg_0.add_message(\"syntax-error\", line=ex.args[1][0], args=ex.args[0])\n return None\n\n if not arg_1.pure_python:\n arg_0.add_message(\"raw-checker-failed\", args=arg_1.name)\n else:\n # assert astroid.file.endswith('.py')\n # invoke ITokenChecker interface on self to fetch module/block\n # level options\n arg_0.process_tokens(arg_5)\n if arg_0._ignore_file:\n return False\n # walk ast to collect line numbers\n arg_0.file_state.collect_block_lines(arg_0.msgs_store, arg_1)\n # run raw and tokens checkers\n for arg_6 in arg_3:\n arg_6.process_module(arg_1)\n for arg_6 in arg_4:\n arg_6.process_tokens(arg_5)\n # generate events to astroid checkers\n arg_2.walk(arg_1)\n return True"} +{"_id": "doc_4182", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"optik callback for printing some help about a particular message\"\"\"\n arg_0.linter.msgs_store.help_message(utils._splitstrip(arg_3))\n sys.exit(0)"} +{"_id": "doc_4183", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"optik callback for printing full documentation\"\"\"\n arg_0.linter.print_full_documentation()\n sys.exit(0)"} +{"_id": "doc_4184", "title": "", "text": "def Func(arg_0, arg_1=80, arg_2=\"\"):\n \"\"\"Wrap the text on the given line length.\"\"\"\n return \"\\n\".join(\n textwrap.wrap(\n arg_0, width=arg_1, initial_indent=arg_2, subsequent_indent=arg_2\n )\n )"} +{"_id": "doc_4185", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"return decoded line from encoding or decode with default encoding\"\"\"\n try:\n return arg_0.decode(arg_1 or sys.getdefaultencoding(), *arg_2, **arg_3)\n except LookupError:\n return arg_0.decode(sys.getdefaultencoding(), *arg_2, **arg_3)"} +{"_id": "doc_4186", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determines if the basename is matched in a regex blacklist\n\n :param str base_name: The basename of the file\n :param list black_list_re: A collection of regex patterns to match against.\n Successful matches are blacklisted.\n\n :returns: `True` if the basename is blacklisted, `False` otherwise.\n :rtype: bool\n \"\"\"\n for arg_2 in arg_1:\n if arg_2.match(arg_0):\n return True\n return False"} +{"_id": "doc_4187", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"load all module and package in the given directory, looking for a\n 'register' function in each one, used to register pylint checkers\n \"\"\"\n arg_2 = {}\n for arg_3 in listdir(arg_1):\n arg_4, arg_5 = splitext(arg_3)\n if arg_4 in arg_2 or arg_4 == \"__pycache__\":\n continue\n if (\n arg_5 in PY_EXTS\n and arg_4 != \"__init__\"\n or (not arg_5 and isdir(join(arg_1, arg_4)))\n ):\n try:\n arg_6 = modutils.load_module_from_file(join(arg_1, arg_3))\n except ValueError:\n # empty module name (usually emacs auto-save files)\n continue\n except ImportError as exc:\n print(\n \"Problem importing module %s: %s\" % (arg_3, exc), file=sys.stderr\n )\n else:\n if hasattr(arg_6, \"register\"):\n arg_6.register(arg_0)\n arg_2[arg_4] = 1"} +{"_id": "doc_4188", "title": "", "text": "def Func(arg_0):\n \"\"\"return string as a comment\"\"\"\n arg_1 = [line.strip() for line in arg_0.splitlines()]\n return \"# \" + (\"%s# \" % linesep).join(arg_1)"} +{"_id": "doc_4189", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"format an options section using the INI format\"\"\"\n if arg_3:\n print(_comment(arg_3), file=arg_0)\n print(\"[%s]\" % arg_1, file=arg_0)\n _ini_format(arg_0, arg_2)"} +{"_id": "doc_4190", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"format options using the INI format\"\"\"\n for arg_2, arg_3, arg_4 in arg_1:\n arg_4 = _format_option_value(arg_3, arg_4)\n arg_5 = arg_3.get(\"help\")\n if arg_5:\n arg_5 = normalize_text(arg_5, line_len=79, indent=\"# \")\n print(file=arg_0)\n print(arg_5, file=arg_0)\n else:\n print(file=arg_0)\n if arg_4 is None:\n print(\"#%s=\" % arg_2, file=arg_0)\n else:\n arg_4 = str(arg_4).strip()\n if re.match(r\"^([\\w-]+,)+[\\w-]+$\", str(arg_4)):\n arg_6 = \"\\n \" + \" \" * len(arg_2)\n arg_4 = arg_6.join(x + \",\" for x in str(arg_4).split(\",\"))\n # remove trailing ',' from last element of the list\n arg_4 = arg_4[:-1]\n print(\"%s=%s\" % (arg_2, arg_4), file=arg_0)"} +{"_id": "doc_4191", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"overridden to detect problems easily\"\"\"\n assert arg_1 not in arg_0.parents()\n VNode.Func(arg_0, arg_1)"} +{"_id": "doc_4192", "title": "", "text": "def Func(arg_0):\n \"\"\"return the ancestor nodes\"\"\"\n assert arg_0.parent is not arg_0\n if arg_0.parent is None:\n return []\n return [arg_0.parent] + arg_0.parent.Func()"} +{"_id": "doc_4193", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"trick to get table content without actually writing it\n\n return an aligned list of lists containing table cells values as string\n \"\"\"\n arg_2 = [[]]\n arg_3 = arg_1.cols\n for arg_4 in arg_0.compute_content(arg_1):\n if arg_3 == 0:\n arg_2.append([])\n arg_3 = arg_1.cols\n arg_3 -= 1\n arg_2[-1].append(arg_4)\n # fill missing cells\n while len(arg_2[-1]) < arg_3:\n arg_2[-1].append(\"\")\n return arg_2"} +{"_id": "doc_4194", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Walk the AST to collect block level options line numbers.\"\"\"\n for arg_3, arg_4 in arg_0._module_msgs_state.items():\n arg_0._raw_module_msgs_state[arg_3] = arg_4.copy()\n arg_6 = arg_0._module_msgs_state.copy()\n arg_0._module_msgs_state = {}\n arg_0._suppression_mapping = {}\n arg_0._effective_max_line_number = arg_2.tolineno\n arg_0._Func(arg_1, arg_2, arg_6)"} +{"_id": "doc_4195", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6\n ): # pylint: disable=unused-argument\n \"\"\"Report an ignored message.\n\n state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,\n depending on whether the message was disabled locally in the module,\n or globally. The other arguments are the same as for add_message.\n \"\"\"\n if arg_1 == MSG_STATE_SCOPE_MODULE:\n try:\n arg_7 = arg_0._suppression_mapping[(arg_2, arg_3)]\n arg_0._ignored_msgs[(arg_2, arg_7)].add(arg_3)\n except KeyError:\n pass"} +{"_id": "doc_4196", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"register a report\n\n reportid is the unique identifier for the report\n r_title the report's title\n r_cb the method to call to make the report\n checker is the checker defining the report\n \"\"\"\n arg_1 = arg_1.upper()\n arg_0._reports[arg_4].append((arg_1, arg_2, arg_3))"} +{"_id": "doc_4197", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"render registered reports\"\"\"\n arg_3 = Section(\"Report\", \"%s statements analysed.\" % (arg_0.stats[\"statement\"]))\n for arg_4 in arg_0.report_order():\n for arg_5, arg_6, arg_7 in arg_0._reports[arg_4]:\n if not arg_0.report_is_enabled(arg_5):\n continue\n arg_8 = Section(arg_6)\n try:\n arg_7(arg_8, arg_1, arg_2)\n except EmptyReportError:\n continue\n arg_8.report_id = arg_5\n arg_3.append(arg_8)\n return arg_3"} +{"_id": "doc_4198", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the name of the property that the given node is a setter for.\n\n :param node: The node to get the property name for.\n :type node: str\n\n :rtype: str or None\n :returns: The name of the property that the node is a setter for,\n or None if one could not be found.\n \"\"\"\n arg_1 = arg_0.decorators.nodes if arg_0.decorators else []\n for arg_2 in arg_1:\n if (\n isinstance(arg_2, astroid.Attribute)\n and arg_2.attrname == \"setter\"\n and isinstance(arg_2.expr, astroid.Name)\n ):\n return arg_2.expr.name\n return None"} +{"_id": "doc_4199", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the property node for the given setter node.\n\n :param node: The node to get the property for.\n :type node: astroid.FunctionDef\n\n :rtype: astroid.FunctionDef or None\n :returns: The node relating to the property of the given setter node,\n or None if one could not be found.\n \"\"\"\n arg_1 = None\n\n arg_2 = Func_name(arg_0)\n arg_3 = utils.node_frame_class(arg_0)\n if arg_2 and arg_3:\n arg_4 = arg_3.getattr(arg_0.name)\n for arg_5 in arg_4:\n if utils.decorated_with_property(arg_5):\n arg_1 = arg_5\n break\n\n return arg_1"} +{"_id": "doc_4200", "title": "", "text": "def Func(arg_0):\n \"\"\"Check if a return node returns a value other than None.\n\n :param return_node: The return node to check.\n :type return_node: astroid.Return\n\n :rtype: bool\n :return: True if the return node returns a value other than None,\n False otherwise.\n \"\"\"\n arg_1 = arg_0.value\n\n if arg_1 is None:\n return False\n\n return not (isinstance(arg_1, astroid.Const) and arg_1.value is None)"} +{"_id": "doc_4201", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets all of the possible raised exception types for the given raise node.\n\n .. note::\n\n Caught exception types are ignored.\n\n\n :param node: The raise node to find exception types for.\n :type node: astroid.node_classes.NodeNG\n\n :returns: A list of exception types possibly raised by :param:`node`.\n :rtype: set(str)\n \"\"\"\n arg_1 = []\n if isinstance(arg_0.exc, astroid.Name):\n arg_2 = utils.safe_infer(arg_0.exc)\n if arg_2:\n arg_1 = [arg_2.name]\n elif arg_0.exc is None:\n arg_3 = arg_0.parent\n while arg_3 and not isinstance(arg_3, astroid.ExceptHandler):\n arg_3 = arg_3.parent\n\n if arg_3 and arg_3.type:\n arg_4 = astroid.unpack_infer(arg_3.type)\n arg_1 = (arg_8.name for arg_8 in arg_4 if arg_8 is not astroid.Uninferable)\n else:\n arg_5 = _get_raise_target(arg_0)\n if isinstance(arg_5, astroid.ClassDef):\n arg_1 = [arg_5.name]\n elif isinstance(arg_5, astroid.FunctionDef):\n for arg_6 in arg_5.nodes_of_class(astroid.Return):\n if arg_6.frame() != arg_5:\n # return from inner function - ignore it\n continue\n\n arg_7 = utils.safe_infer(arg_6.value)\n if (\n arg_7\n and isinstance(arg_7, (astroid.Instance, astroid.ClassDef))\n and utils.inherit_from_std_ex(arg_7)\n ):\n arg_1.append(arg_7.name)\n\n try:\n return {arg_8 for arg_8 in arg_1 if not utils.node_ignores_exception(arg_0, arg_8)}\n except astroid.InferenceError:\n return set()"} +{"_id": "doc_4202", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"inspect the source file to find messages activated or deactivated by id.\"\"\"\n arg_2 = MessagesHandlerMixIn.get_by_id_managed_msgs()\n for (arg_3, arg_4, arg_5, arg_6, arg_7) in arg_2:\n if arg_3 == arg_1.name:\n if arg_7:\n arg_8 = \"Id '{ident}' is used to disable '{symbol}' message emission\".format(\n ident=arg_4, symbol=arg_5\n )\n else:\n arg_8 = \"Id '{ident}' is used to enable '{symbol}' message emission\".format(\n ident=arg_4, symbol=arg_5\n )\n arg_0.add_message(\"use-symbolic-message-instead\", line=arg_6, args=arg_8)\n MessagesHandlerMixIn.clear_by_id_managed_msgs()"} +{"_id": "doc_4203", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"inspect the source file to find encoding problem\"\"\"\n if arg_1.file_encoding:\n arg_2 = arg_1.file_encoding\n else:\n arg_2 = \"ascii\"\n\n with arg_1.stream() as stream:\n for arg_3, arg_4 in enumerate(stream):\n arg_0._check_encoding(arg_3 + 1, arg_4, arg_2)"} +{"_id": "doc_4204", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"inspect the source to find fixme problems\"\"\"\n if not arg_0.config.notes:\n return\n arg_2 = (\n token_info for token_info in arg_1 if token_info.type == tokenize.COMMENT\n )\n for arg_3 in arg_2:\n arg_4 = arg_3.string[1:].lstrip() # trim '#' and whitespaces\n\n # handle pylint disable clauses\n arg_5 = OPTION_RGX.search(arg_4)\n if arg_5:\n try:\n arg_6, arg_7 = arg_5.group(1).split(\"=\", 1)\n arg_8 = [_val.strip().upper() for _val in arg_7.split(\",\")]\n if set(arg_8) & set(arg_0.config.notes):\n continue\n except ValueError:\n arg_0.add_message(\n \"bad-inline-option\",\n args=arg_5.group(1).strip(),\n line=arg_3.string,\n )\n continue\n\n # emit warnings if necessary\n arg_9 = arg_0._fixme_pattern.search(\"#\" + arg_4.lower())\n if arg_9:\n arg_10 = arg_9.group(1)\n arg_0.add_message(\n \"fixme\",\n col_offset=arg_3.string.lower().index(arg_10.lower()),\n args=arg_4,\n line=arg_3.start[0],\n )"} +{"_id": "doc_4205", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the name is a future import from another module.\"\"\"\n try:\n arg_2 = arg_0.do_import_module(arg_0.modname)\n except astroid.AstroidBuildingException:\n return None\n\n for arg_3 in arg_2.locals.get(arg_1, []):\n if isinstance(arg_3, astroid.ImportFrom) and arg_3.modname == FUTURE:\n return True\n return None"} +{"_id": "doc_4206", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"get overridden method if any\"\"\"\n try:\n arg_2 = next(arg_0.local_attr_ancestors(arg_1))\n except (StopIteration, KeyError):\n return None\n try:\n arg_3 = arg_2[arg_1]\n except KeyError:\n # We have found an ancestor defining but it's not in the local\n # dictionary. This may happen with astroid built from living objects.\n return None\n if isinstance(arg_3, astroid.FunctionDef):\n return arg_3\n return None"} +{"_id": "doc_4207", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return extra information to add to the message for unpacking-non-sequence\n and unbalanced-tuple-unpacking errors\n \"\"\"\n arg_2 = \"\"\n arg_3 = arg_1.root().name\n if arg_0.root().name == arg_3:\n if arg_0.lineno == arg_1.lineno:\n arg_2 = \" %s\" % arg_1.as_string()\n elif arg_1.lineno:\n arg_2 = \" defined at line %s\" % arg_1.lineno\n elif arg_1.lineno:\n arg_2 = \" defined at line %s of %s\" % (arg_1.lineno, arg_3)\n return arg_2"} +{"_id": "doc_4208", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Detect that the given frames shares a global\n scope.\n\n Two frames shares a global scope when neither\n of them are hidden under a function scope, as well\n as any of parent scope of them, until the root scope.\n In this case, depending from something defined later on\n will not work, because it is still undefined.\n\n Example:\n class A:\n # B has the same global scope as `C`, leading to a NameError.\n class B(C): ...\n class C: ...\n\n \"\"\"\n arg_3 = arg_4 = None\n if arg_1 and arg_1.parent:\n arg_4 = arg_1.parent.scope()\n if arg_2 and arg_2.parent:\n arg_3 = arg_2.parent.scope()\n if isinstance(arg_1, astroid.FunctionDef):\n # If the parent of the current node is a\n # function, then it can be under its scope\n # (defined in, which doesn't concern us) or\n # the `->` part of annotations. The same goes\n # for annotations of function arguments, they'll have\n # their parent the Arguments node.\n if not isinstance(arg_0.parent, (astroid.FunctionDef, astroid.Arguments)):\n return False\n elif any(\n not isinstance(arg_5, (astroid.ClassDef, astroid.Module)) for arg_5 in (arg_1, arg_2)\n ):\n # Not interested in other frames, since they are already\n # not in a global scope.\n return False\n\n arg_6 = []\n for arg_7 in (arg_4, arg_3):\n # Look for parent scopes. If there is anything different\n # than a module or a class scope, then they frames don't\n # share a global scope.\n arg_8 = arg_7\n while arg_8:\n if not isinstance(arg_8, (astroid.ClassDef, astroid.Module)):\n arg_6.append(arg_8)\n break\n if arg_8.parent:\n arg_8 = arg_8.parent.scope()\n else:\n break\n if arg_6 and len(set(arg_6)) != 1:\n # Store different scopes than expected.\n # If the stored scopes are, in fact, the very same, then it means\n # that the two frames (frame and defframe) shares the same scope,\n # and we could apply our lineno analysis over them.\n # For instance, this works when they are inside a function, the node\n # that uses a definition and the definition itself.\n return False\n # At this point, we are certain that frame and defframe shares a scope\n # and the definition of the first depends on the second.\n return arg_1.lineno < arg_2.lineno"} +{"_id": "doc_4209", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return True if the node is in a local class scope, as an assignment.\n\n :param node: Node considered\n :type node: astroid.Node\n :return: True if the node is in a local class scope, as an assignment. False otherwise.\n :rtype: bool\n \"\"\"\n # Detect if we are in a local class scope, as an assignment.\n # For example, the following is fair game.\n #\n # class A:\n # b = 1\n # c = lambda b=b: b * b\n #\n # class B:\n # tp = 1\n # def func(self, arg: tp):\n # ...\n # class C:\n # tp = 2\n # def func(self, arg=tp):\n # ...\n\n arg_2 = arg_1.name\n arg_3 = arg_1.statement().scope()\n arg_4 = arg_0._defined_in_function_definition(arg_1, arg_3)\n if arg_4:\n arg_5 = arg_3.parent.scope().locals\n else:\n arg_5 = arg_3.locals\n return not (\n (isinstance(arg_3, astroid.ClassDef) or arg_4)\n and arg_2 in arg_5\n )"} +{"_id": "doc_4210", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return True if there is a node with the same name in the to_consume dict of an upper scope\n and if that scope is a function\n\n :param node: node to check for\n :type node: astroid.Node\n :param index: index of the current consumer inside self._to_consume\n :type index: int\n :return: True if there is a node with the same name in the to_consume dict of an upper scope\n and if that scope is a function\n :rtype: bool\n \"\"\"\n for arg_3 in arg_0._to_consume[arg_2 - 1 :: -1]:\n if arg_3.scope_type == \"function\" and arg_1.name in arg_3.to_consume:\n return True\n return False"} +{"_id": "doc_4211", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Check for unbalanced tuple unpacking\n and unpacking non sequences.\n \"\"\"\n if utils.is_inside_abstract_class(arg_2):\n return\n if utils.is_comprehension(arg_2):\n return\n if arg_1 is astroid.Uninferable:\n return\n if (\n isinstance(arg_1.parent, astroid.Arguments)\n and isinstance(arg_2.value, astroid.Name)\n and arg_2.value.name == arg_1.parent.vararg\n ):\n # Variable-length argument, we can't determine the length.\n return\n if isinstance(arg_1, (astroid.Tuple, astroid.List)):\n # attempt to check unpacking is properly balanced\n arg_4 = arg_1.itered()\n if len(arg_3) != len(arg_4):\n # Check if we have starred nodes.\n if any(isinstance(arg_5, astroid.Starred) for arg_5 in arg_3):\n return\n arg_0.add_message(\n \"unbalanced-tuple-unpacking\",\n arg_2=arg_2,\n args=(\n _get_unpacking_extra_info(arg_2, arg_1),\n len(arg_3),\n len(arg_4),\n ),\n )\n # attempt to check unpacking may be possible (ie RHS is iterable)\n else:\n if not utils.is_iterable(arg_1):\n arg_0.add_message(\n \"unpacking-non-sequence\",\n arg_2=arg_2,\n args=(_get_unpacking_extra_info(arg_2, arg_1),),\n )"} +{"_id": "doc_4212", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Update consumption analysis for metaclasses. \"\"\"\n arg_2 = [] # [(scope_locals, consumed_key)]\n\n for arg_3 in arg_1.get_children():\n if isinstance(arg_3, astroid.ClassDef):\n arg_2.extend(arg_0._check_classdef_metaclasses(arg_3, arg_1))\n\n # Pop the consumed items, in order to avoid having\n # unused-import and unused-variable false positives\n for arg_4, arg_5 in arg_2:\n arg_4.pop(arg_5, None)"} +{"_id": "doc_4213", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return a list of subpackages for the given directory\"\"\"\n arg_2 = []\n for arg_3 in os.listdir(arg_0):\n arg_4 = join(arg_0, arg_3)\n if isdir(arg_4):\n if exists(join(arg_4, \"__init__.py\")):\n if arg_1:\n arg_2.append(\"%s.%s\" % (arg_1, arg_3))\n else:\n arg_2.append(arg_3)\n arg_2 += Func(arg_4, arg_2[-1])\n return arg_2"} +{"_id": "doc_4214", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"make a layout with some stats about duplication\"\"\"\n arg_3 = [\"\", \"now\", \"previous\", \"difference\"]\n arg_3 += table_lines_from_stats(\n arg_1, arg_2, (\"nb_duplicated_lines\", \"percent_duplicated_lines\")\n )\n arg_0.append(Table(children=arg_3, cols=4, rheaders=1, cheaders=1))"} +{"_id": "doc_4215", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"append a file to search for similarities\"\"\"\n if arg_3 is None:\n arg_4 = arg_2.readlines\n else:\n arg_4 = decoding_stream(arg_2, arg_3).readlines\n try:\n arg_0.linesets.append(\n LineSet(\n arg_1,\n arg_4(),\n arg_0.ignore_comments,\n arg_0.ignore_docstrings,\n arg_0.ignore_imports,\n )\n )\n except UnicodeDecodeError:\n pass"} +{"_id": "doc_4216", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"display computed similarities on stdout\"\"\"\n arg_2 = 0\n for arg_3, arg_4 in arg_1:\n print()\n print(arg_3, \"similar lines in\", len(arg_4), \"files\")\n arg_4 = sorted(arg_4)\n for arg_5, arg_6 in arg_4:\n print(\"==%s:%s\" % (arg_5.name, arg_6))\n # pylint: disable=W0631\n for arg_7 in arg_5._real_lines[arg_6 : arg_6 + arg_3]:\n print(\" \", arg_7.rstrip())\n arg_2 += arg_3 * (len(arg_4) - 1)\n arg_8 = sum([len(arg_5) for arg_5 in arg_0.linesets])\n print(\n \"TOTAL lines=%s duplicates=%s percent=%.2f\"\n % (\n arg_8,\n arg_2,\n arg_2 * 100.0 / arg_8,\n )\n )"} +{"_id": "doc_4217", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"find similarities in the two given linesets\"\"\"\n arg_3 = arg_1.enumerate_stripped\n arg_4 = arg_2.enumerate_stripped\n arg_5 = arg_2.find\n arg_6 = 0\n arg_7 = arg_0.min_lines\n while arg_6 < len(arg_1):\n arg_8 = 1\n arg_9 = 0\n for arg_10 in arg_5(arg_1[arg_6]):\n arg_11 = 0\n for arg_9, ((arg_12, arg_13), (arg_12, arg_14)) in enumerate(\n zip(arg_3(arg_6), arg_4(arg_10))\n ):\n if arg_13 != arg_14:\n if arg_11 > arg_7:\n yield arg_9, arg_1, arg_6, arg_2, arg_10\n arg_8 = max(arg_8, arg_9)\n break\n if arg_13:\n arg_11 += 1\n else:\n # we may have reach the end\n arg_9 += 1\n if arg_11 > arg_7:\n yield arg_9, arg_1, arg_6, arg_2, arg_10\n arg_8 = max(arg_8, arg_9)\n arg_6 += arg_8"} +{"_id": "doc_4218", "title": "", "text": "def Func(arg_0):\n \"\"\"create the index for this set\"\"\"\n arg_1 = defaultdict(list)\n for arg_2, arg_3 in enumerate(arg_0._stripped_lines):\n if arg_3:\n arg_1[arg_3].append(arg_2)\n return arg_1"} +{"_id": "doc_4219", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if a definition signature is equivalent to a call.\"\"\"\n if arg_0.kwargs:\n arg_2 = arg_0.kwargs in arg_1.starred_kws\n else:\n arg_2 = not arg_1.starred_kws\n if arg_0.varargs:\n arg_3 = arg_0.varargs in arg_1.starred_args\n else:\n arg_3 = not arg_1.starred_args\n arg_4 = all(kw in arg_1.kws for kw in arg_0.kwonlyargs)\n arg_5 = arg_0.args == arg_1.args\n\n arg_6 = True\n if arg_1.kws:\n for arg_7 in arg_1.kws:\n arg_8 = arg_7 in arg_1.args\n arg_9 = arg_7 in arg_0.kwonlyargs\n if not arg_8 and not arg_9:\n # Maybe this argument goes into **kwargs,\n # or it is an extraneous argument.\n # In any case, the signature is different than\n # the call site, which stops our search.\n arg_6 = False\n break\n\n return all(\n (\n arg_5,\n arg_4,\n arg_3,\n arg_2,\n arg_6,\n )\n )"} +{"_id": "doc_4220", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Determine if the two methods have different parameters\n\n They are considered to have different parameters if:\n\n * they have different positional parameters, including different names\n\n * one of the methods is having variadics, while the other is not\n\n * they have different keyword only parameters.\n\n \"\"\"\n arg_3 = _positional_parameters(arg_0)\n arg_4 = _positional_parameters(arg_1)\n\n arg_5 = _hasFunc(\n arg_3, arg_4, arg_2\n )\n arg_6 = _hasFunc(\n arg_0.args.kwonlyargs, arg_1.args.kwonlyargs, arg_2\n )\n if arg_0.name in PYMETHODS:\n # Ignore the difference for special methods. If the parameter\n # numbers are different, then that is going to be caught by\n # unexpected-special-method-signature.\n # If the names are different, it doesn't matter, since they can't\n # be used as keyword arguments anyway.\n arg_5 = arg_6 = False\n\n # Both or none should have extra variadics, otherwise the method\n # loses or gains capabilities that are not reflected into the parent method,\n # leading to potential inconsistencies in the code.\n arg_7 = (\n sum(1 for param in (arg_0.args.kwarg, arg_1.args.kwarg) if not param)\n == 1\n )\n arg_8 = (\n sum(1 for param in (arg_0.args.vararg, arg_1.args.vararg) if not param)\n == 1\n )\n\n return any(\n (arg_5, arg_7, arg_8, arg_6)\n )"} +{"_id": "doc_4221", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Safely infer the return value of a function.\n\n Returns None if inference failed or if there is some ambiguity (more than\n one node has been inferred). Otherwise returns infered value.\n \"\"\"\n try:\n arg_3 = arg_0.infer_call_result(arg_1, arg_2=arg_2)\n arg_4 = next(arg_3)\n except astroid.InferenceError:\n return None # inference failed\n except StopIteration:\n return None # no values infered\n try:\n next(arg_3)\n return None # there is ambiguity on the inferred node\n except astroid.InferenceError:\n return None # there is some kind of ambiguity\n except StopIteration:\n return arg_4"} +{"_id": "doc_4222", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the given node as accessed.\"\"\"\n\n arg_2 = node_frame_class(arg_1)\n if arg_2 is None:\n # The node does not live in a class.\n return\n arg_0._scopes[arg_2][arg_1.attrname].append(arg_1)"} +{"_id": "doc_4223", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"init visit variable _accessed\n \"\"\"\n arg_0._check_bases_classes(arg_1)\n # if not an exception or a metaclass\n if arg_1.type == \"class\" and has_known_bases(arg_1):\n try:\n arg_1.local_attr(\"__init__\")\n except astroid.NotFoundError:\n arg_0.add_message(\"no-init\", args=arg_1, arg_1=arg_1)\n arg_0._check_slots(arg_1)\n arg_0._check_proper_bases(arg_1)\n arg_0._check_consistent_mro(arg_1)"} +{"_id": "doc_4224", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Detect that a class has a consistent mro or duplicate bases.\"\"\"\n try:\n arg_1.mro()\n except InconsistentMroError:\n arg_0.add_message(\"inconsistent-mro\", args=arg_1.name, arg_1=arg_1)\n except DuplicateBasesError:\n arg_0.add_message(\"duplicate-bases\", args=arg_1.name, arg_1=arg_1)\n except NotImplementedError:\n # Old style class, there's no mro so don't do anything.\n pass"} +{"_id": "doc_4225", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Detect that a class inherits something which is not\n a class or a type.\n \"\"\"\n for arg_2 in arg_1.bases:\n arg_3 = safe_infer(arg_2)\n if arg_3 in (astroid.Uninferable, None):\n continue\n if isinstance(arg_3, astroid.Instance) and arg_3.is_subtype_of(\n \"%s.type\" % (BUILTINS,)\n ):\n continue\n\n if not isinstance(arg_3, astroid.ClassDef) or _is_invalid_base_class(\n arg_3\n ):\n arg_0.add_message(\"inherit-non-class\", args=arg_2.as_string(), arg_1=arg_1)\n\n if arg_3.name == object.__name__:\n arg_0.add_message(\n \"useless-object-inheritance\", args=arg_1.name, arg_1=arg_1\n )"} +{"_id": "doc_4226", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the given function node is an useless method override\n\n We consider it *useless* if it uses the super() builtin, but having\n nothing additional whatsoever than not implementing the method at all.\n If the method uses super() to delegate an operation to the rest of the MRO,\n and if the method called is the same as the current one, the arguments\n passed to super() are the same as the parameters that were passed to\n this method, then the method could be removed altogether, by letting\n other implementation to take precedence.\n \"\"\"\n\n if (\n not arg_1.is_method()\n # With decorators is a change of use\n or arg_1.decorators\n ):\n return\n\n arg_2 = arg_1.body\n if len(arg_2) != 1:\n # Multiple statements, which means this overridden method\n # could do multiple things we are not aware of.\n return\n\n arg_3 = arg_2[0]\n if not isinstance(arg_3, (astroid.Expr, astroid.Return)):\n # Doing something else than what we are interested into.\n return\n\n arg_4 = arg_3.value\n if (\n not isinstance(arg_4, astroid.Call)\n # Not a super() attribute access.\n or not isinstance(arg_4.func, astroid.Attribute)\n ):\n return\n\n # Should be a super call.\n try:\n arg_5 = next(arg_4.func.expr.infer())\n except astroid.InferenceError:\n return\n else:\n if not isinstance(arg_5, objects.Super):\n return\n\n # The name should be the same.\n if arg_4.func.attrname != arg_1.name:\n return\n\n # Should be a super call with the MRO pointer being the\n # current class and the type being the current instance.\n arg_6 = arg_1.parent.scope()\n if (\n arg_5.mro_pointer != arg_6\n or not isinstance(arg_5.type, astroid.Instance)\n or arg_5.type.name != arg_6.name\n ):\n return\n\n # \u00a0Check values of default args\n arg_7 = arg_1.parent.frame()\n arg_8 = None\n for arg_9 in arg_7.local_attr_ancestors(arg_1.name):\n # get astroid for the searched method\n try:\n arg_8 = arg_9[arg_1.name]\n except KeyError:\n # we have found the method but it's not in the local\n # dictionary.\n # This may happen with astroid build from living objects\n continue\n if (\n not isinstance(arg_8, astroid.FunctionDef)\n # If the method have an ancestor which is not a\n # function then it is legitimate to redefine it\n or _has_different_parameters_default_value(\n arg_8.args, arg_1.args\n )\n ):\n return\n break\n\n # Detect if the parameters are the same as the call's arguments.\n arg_10 = _signature_from_arguments(arg_1.args)\n arg_11 = _signature_from_call(arg_4)\n\n if arg_8 is not None:\n\n def form_annotations(arg_12):\n return [\n arg_13.as_string() for arg_13 in filter(None, arg_12)\n ]\n\n arg_14 = form_annotations(arg_1.args.annotations)\n arg_15 = form_annotations(arg_8.args.annotations)\n if arg_14 and arg_15:\n if arg_14 != arg_15:\n return\n\n if _definition_equivalent_to_call(arg_10, arg_11):\n arg_0.add_message(\n \"useless-super-delegation\", node=arg_1, arg_11=(arg_1.name,)\n )"} +{"_id": "doc_4227", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"on method node, check if this method couldn't be a function\n\n ignore class, static and abstract methods, initializer,\n methods overridden from a parent class.\n \"\"\"\n if arg_1.is_method():\n if arg_1.args.args is not None:\n arg_0._first_attrs.pop()\n if not arg_0.linter.is_message_enabled(\"no-self-use\"):\n return\n arg_2 = arg_1.parent.frame()\n if (\n arg_0._meth_could_be_func\n and arg_1.type == \"method\"\n and arg_1.name not in PYMETHODS\n and not (\n arg_1.is_abstract()\n or overrides_a_method(arg_2, arg_1.name)\n or decorated_with_property(arg_1)\n or _has_bare_super_call(arg_1)\n )\n ):\n arg_0.add_message(\"no-self-use\", arg_1=arg_1)"} +{"_id": "doc_4228", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Check that the given AssignAttr node\n is defined in the class slots.\n \"\"\"\n arg_2 = safe_infer(arg_1.expr)\n if not isinstance(arg_2, astroid.Instance):\n return\n\n arg_3 = arg_2._proxied\n if not has_known_bases(arg_3):\n return\n if \"__slots__\" not in arg_3.locals or not arg_3.newstyle:\n return\n\n arg_4 = arg_3.slots()\n if arg_4 is None:\n return\n # If any ancestor doesn't use slots, the slots\n # defined for this class are superfluous.\n if any(\n \"__slots__\" not in arg_5.locals and arg_5.name != \"object\"\n for arg_5 in arg_3.ancestors()\n ):\n return\n\n if not any(arg_6.value == arg_1.attrname for arg_6 in arg_4):\n # If we have a '__dict__' in slots, then\n # assigning any name is valid.\n if not any(arg_6.value == \"__dict__\" for arg_6 in arg_4):\n if _is_attribute_property(arg_1.attrname, arg_3):\n # Properties circumvent the slots mechanism,\n # so we should not emit a warning for them.\n return\n if arg_1.attrname in arg_3.locals and _has_data_descriptor(\n arg_3, arg_1.attrname\n ):\n # Descriptors circumvent the slots mechanism as well.\n return\n if arg_1.attrname == \"__class__\" and _has_same_layout_slots(\n arg_4, arg_1.parent.value\n ):\n return\n arg_0.add_message(\"assigning-non-slot\", args=(arg_1.attrname,), arg_1=arg_1)"} +{"_id": "doc_4229", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check if the name handle an access to a class member\n if so, register it\n \"\"\"\n if arg_0._first_attrs and (\n arg_1.name == arg_0._first_attrs[-1] or not arg_0._first_attrs[-1]\n ):\n arg_0._meth_could_be_func = False"} +{"_id": "doc_4230", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check that the given class node implements abstract methods from\n base classes\n \"\"\"\n\n def is_abstract(arg_2):\n return arg_2.is_abstract(pass_is_abstract=False)\n\n # check if this class abstract\n if class_is_abstract(arg_1):\n return\n\n arg_3 = sorted(\n unimplemented_abstract_methods(arg_1, is_abstract).items(),\n key=lambda item: item[0],\n )\n for arg_4, arg_2 in arg_3:\n arg_5 = arg_2.parent.frame()\n if arg_5 is arg_1:\n continue\n # owner is not this class, it must be a parent class\n # check that the ancestor's method is not abstract\n if arg_4 in arg_1.locals:\n # it is redefined as an attribute or with a descriptor\n continue\n arg_0.add_message(\"abstract-method\", arg_1=arg_1, args=(arg_4, arg_5.name))"} +{"_id": "doc_4231", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Verify that the exception context is properly set.\n\n An exception context can be only `None` or an exception.\n \"\"\"\n arg_2 = utils.safe_infer(arg_1.cause)\n if arg_2 in (astroid.Uninferable, None):\n return\n\n if isinstance(arg_2, astroid.Const):\n if arg_2.value is not None:\n arg_0.add_message(\"bad-exception-context\", arg_1=arg_1)\n elif not isinstance(arg_2, astroid.ClassDef) and not utils.inherit_from_std_ex(\n arg_2\n ):\n arg_0.add_message(\"bad-exception-context\", arg_1=arg_1)"} +{"_id": "doc_4232", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"display results encapsulated in the layout tree\"\"\"\n arg_0.section = 0\n if hasattr(arg_1, \"report_id\"):\n arg_1.children[0].children[0].data += \" (%s)\" % arg_1.report_id\n arg_0._display(arg_1)"} +{"_id": "doc_4233", "title": "", "text": "def Func(arg_0: arg_1.ClassDef) -> bool:\n \"\"\"Check if a class node is a typing.NamedTuple class\"\"\"\n for arg_3 in arg_0.ancestors():\n if arg_3.qname() == TYPING_NAMEDTUPLE:\n return True\n return False"} +{"_id": "doc_4234", "title": "", "text": "def Func(arg_0: arg_1.ClassDef) -> bool:\n \"\"\"Check if a class definition defines an Enum class.\n\n :param node: The class node to check.\n :type node: astroid.ClassDef\n\n :returns: True if the given node represents an Enum class. False otherwise.\n :rtype: bool\n \"\"\"\n for arg_3 in arg_0.bases:\n try:\n arg_4 = arg_3.inferred()\n except arg_1.InferenceError:\n continue\n\n for arg_5 in arg_4:\n if not isinstance(arg_5, arg_1.ClassDef):\n continue\n\n if arg_5.name == \"Enum\" and arg_5.root().name == \"enum\":\n return True\n\n return False"} +{"_id": "doc_4235", "title": "", "text": "def Func(arg_0: arg_1.ClassDef) -> bool:\n \"\"\"Check if a class definition defines a Python 3.7+ dataclass\n\n :param node: The class node to check.\n :type node: astroid.ClassDef\n\n :returns: True if the given node represents a dataclass class. False otherwise.\n :rtype: bool\n \"\"\"\n if not arg_0.decorators:\n return False\n\n arg_3 = arg_0.root().locals\n for arg_4 in arg_0.decorators.nodes:\n if isinstance(arg_4, arg_1.Call):\n arg_4 = arg_4.func\n if not isinstance(arg_4, (arg_1.Name, arg_1.Attribute)):\n continue\n if isinstance(arg_4, arg_1.Name):\n arg_5 = arg_4.name\n else:\n arg_5 = arg_4.attrname\n if arg_5 == DATACLASS_DECORATOR and DATACLASS_DECORATOR in arg_3:\n return True\n return False"} +{"_id": "doc_4236", "title": "", "text": "def Func(arg_0):\n \"\"\"initialize visit variables\"\"\"\n arg_0.stats = arg_0.linter.add_stats()\n arg_0._returns = []\n arg_0._branches = defaultdict(int)\n arg_0._stmts = []"} +{"_id": "doc_4237", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check number of public methods\"\"\"\n arg_2 = sum(\n 1 for method in arg_1.mymethods() if not method.name.startswith(\"_\")\n )\n\n # Does the class contain less than n public methods ?\n # This checks only the methods defined in the current class,\n # since the user might not have control over the classes\n # from the ancestors. It avoids some false positives\n # for classes such as unittest.TestCase, which provides\n # a lot of assert methods. It doesn't make sense to warn\n # when the user subclasses TestCase to add his own tests.\n if arg_2 > arg_0.config.max_public_methods:\n arg_0.add_message(\n \"too-many-public-methods\",\n arg_1=arg_1,\n args=(arg_2, arg_0.config.max_public_methods),\n )\n\n # Stop here for exception, metaclass, interface classes and other\n # classes for which we don't need to count the methods.\n if (\n arg_1.type != \"class\"\n or _is_enum_class(arg_1)\n or _is_dataclass(arg_1)\n or _is_typing_namedtuple(arg_1)\n ):\n return\n\n # Does the class contain more than n public methods ?\n # This checks all the methods defined by ancestors and\n # by the current class.\n arg_3 = _count_methods_in_class(arg_1)\n if arg_3 < arg_0.config.min_public_methods:\n arg_0.add_message(\n \"too-few-public-methods\",\n arg_1=arg_1,\n args=(arg_3, arg_0.config.min_public_methods),\n )"} +{"_id": "doc_4238", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check the node has any spelling errors\"\"\"\n arg_2 = arg_1.doc\n if not arg_2:\n return\n\n arg_3 = arg_1.lineno + 1\n\n # Go through lines of docstring\n for arg_4, arg_5 in enumerate(arg_2.splitlines()):\n arg_0._check_spelling(\"wrong-spelling-in-docstring\", arg_5, arg_3 + arg_4)"} +{"_id": "doc_4239", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Format the message according to the given template.\n\n The template Func is the one of the Func method :\n cf. http://docs.python.org/2/library/string.html#Funcstrings\n \"\"\"\n # For some reason, _asdict on derived namedtuples does not work with\n # Python 3.4. Needs some investigation.\n return arg_1.Func(**dict(zip(arg_0._fields, arg_0)))"} +{"_id": "doc_4240", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the given node is an actual elif\n\n This is a problem we're having with the builtin ast module,\n which splits `elif` branches into a separate if statement.\n Unfortunately we need to know the exact type in certain\n cases.\n \"\"\"\n if isinstance(arg_1.parent, astroid.If):\n arg_2 = arg_1.parent.orelse\n # current if node must directly follow an \"else\"\n if arg_2 and arg_2 == [arg_1]:\n if (arg_1.lineno, arg_1.col_offset) in arg_0._elifs:\n return True\n return False"} +{"_id": "doc_4241", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the given if node can be simplified.\n\n The if statement can be reduced to a boolean expression\n in some cases. For instance, if there are two branches\n and both of them return a boolean value that depends on\n the result of the statement's test, then this can be reduced\n to `bool(test)` without losing any functionality.\n \"\"\"\n\n if arg_0._is_actual_elif(arg_1):\n # Not interested in if statements with multiple branches.\n return\n if len(arg_1.orelse) != 1 or len(arg_1.body) != 1:\n return\n\n # Check if both branches can be reduced.\n arg_2 = arg_1.body[0]\n arg_3 = arg_1.orelse[0]\n if isinstance(arg_2, astroid.Return):\n if not isinstance(arg_3, astroid.Return):\n return\n arg_4 = arg_0._is_bool_const(arg_2)\n arg_5 = arg_0._is_bool_const(arg_3)\n arg_6 = \"'return bool(test)'\"\n elif isinstance(arg_2, astroid.Assign):\n if not isinstance(arg_3, astroid.Assign):\n return\n\n # Check if we assign to the same value\n arg_7 = [\n target.name\n for target in arg_2.targets\n if isinstance(target, astroid.AssignName)\n ]\n arg_8 = [\n target.name\n for target in arg_3.targets\n if isinstance(target, astroid.AssignName)\n ]\n if not arg_7 or not arg_8:\n return\n if sorted(arg_7) != sorted(arg_8):\n return\n\n arg_4 = arg_0._is_bool_const(arg_2)\n arg_5 = arg_0._is_bool_const(arg_3)\n arg_6 = \"'var = bool(test)'\"\n else:\n return\n\n if not arg_4 or not arg_5:\n return\n if not arg_2.value.value:\n # This is a case that can't be easily simplified and\n # if it can be simplified, it will usually result in a\n # code that's harder to understand and comprehend.\n # Let's take for instance `arg and arg <= 3`. This could theoretically be\n # reduced to `not arg or arg > 3`, but the net result is that now the\n # condition is harder to understand, because it requires understanding of\n # an extra clause:\n # * first, there is the negation of truthness with `not arg`\n # * the second clause is `arg > 3`, which occurs when arg has a\n # a truth value, but it implies that `arg > 3` is equivalent\n # with `arg and arg > 3`, which means that the user must\n # think about this assumption when evaluating `arg > 3`.\n # The original form is easier to grasp.\n return\n\n arg_0.add_message(\"simplifiable-if-statement\", arg_1=arg_1, args=(arg_6,))"} +{"_id": "doc_4242", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if an exception of type StopIteration is raised inside a generator\"\"\"\n arg_2 = arg_1.frame()\n if not isinstance(arg_2, astroid.FunctionDef) or not arg_2.is_generator():\n return\n if utils.node_ignores_exception(arg_1, StopIteration):\n return\n if not arg_1.exc:\n return\n arg_3 = utils.safe_infer(arg_1.exc)\n if arg_3 is None or arg_3 is astroid.Uninferable:\n return\n if arg_0._check_exception_inherit_from_stopiteration(arg_3):\n arg_0.add_message(\"stop-iteration-return\", arg_1=arg_1)"} +{"_id": "doc_4243", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if a StopIteration exception is raised by the call to next function\n\n If the next value has a default value, then do not add message.\n\n :param node: Check to see if this Call node is a next function\n :type node: :class:`astroid.node_classes.Call`\n \"\"\"\n\n def _looks_like_infinite_iterator(arg_2):\n arg_3 = utils.safe_infer(arg_2)\n if arg_3:\n return arg_3.qname() in KNOWN_INFINITE_ITERATORS\n return False\n\n if isinstance(arg_1.func, astroid.Attribute):\n # A next() method, which is now what we want.\n return\n\n arg_3 = utils.safe_infer(arg_1.func)\n if getattr(arg_3, \"name\", \"\") == \"next\":\n arg_4 = arg_1.frame()\n # The next builtin can only have up to two\n # positional arguments and no keyword arguments\n arg_5 = len(arg_1.args) > 1\n if (\n isinstance(arg_4, astroid.FunctionDef)\n and arg_4.is_generator()\n and not arg_5\n and not utils.node_ignores_exception(arg_1, StopIteration)\n and not _looks_like_infinite_iterator(arg_1.args[0])\n ):\n arg_0.add_message(\"stop-iteration-return\", arg_1=arg_1)"} +{"_id": "doc_4244", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the duplicated types from the underlying isinstance calls.\n\n :param astroid.BoolOp node: Node which should contain a bunch of isinstance calls.\n :returns: Dictionary of the comparison objects from the isinstance calls,\n to duplicate values from consecutive calls.\n :rtype: dict\n \"\"\"\n arg_1 = set()\n arg_2 = collections.defaultdict(set)\n\n for arg_3 in arg_0.values:\n if not isinstance(arg_3, astroid.Call) or len(arg_3.args) != 2:\n continue\n\n arg_4 = utils.safe_infer(arg_3.func)\n if not arg_4 or not utils.is_builtin_object(arg_4):\n continue\n\n if arg_4.name != \"isinstance\":\n continue\n\n arg_5 = arg_3.args[0].as_string()\n arg_6 = arg_3.args[1]\n\n if arg_5 in arg_2:\n arg_1.add(arg_5)\n\n if isinstance(arg_6, astroid.Tuple):\n arg_7 = [\n class_type.as_string() for class_type in arg_6.itered()\n ]\n else:\n arg_7 = [arg_6.as_string()]\n arg_2[arg_5].update(arg_7)\n\n # Remove all keys which not duplicated\n return {\n arg_8: arg_9 for arg_8, arg_9 in arg_2.items() if arg_8 in arg_1\n }"} +{"_id": "doc_4245", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check isinstance calls which can be merged together.\"\"\"\n if arg_1.op != \"or\":\n return\n\n arg_2 = arg_0._duplicated_isinstance_types(arg_1)\n for arg_3, arg_4 in arg_2.items():\n arg_5 = sorted(name for name in arg_4)\n arg_0.add_message(\n \"consider-merging-isinstance\",\n arg_1=arg_1,\n args=(arg_3, \", \".join(arg_5)),\n )"} +{"_id": "doc_4246", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns true if node is 'condition and true_value or false_value' form.\n\n All of: condition, true_value and false_value should not be a complex boolean expression\n \"\"\"\n return (\n isinstance(arg_0, astroid.BoolOp)\n and arg_0.op == \"or\"\n and len(arg_0.values) == 2\n and isinstance(arg_0.values[0], astroid.BoolOp)\n and not isinstance(arg_0.values[1], astroid.BoolOp)\n and arg_0.values[0].op == \"and\"\n and not isinstance(arg_0.values[0].values[1], astroid.BoolOp)\n and len(arg_0.values[0].values) == 2\n )"} +{"_id": "doc_4247", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that all return statements inside a function are consistent.\n\n Return statements are consistent if:\n - all returns are explicit and if there is no implicit return;\n - all returns are empty and if there is, possibly, an implicit return.\n\n Args:\n node (astroid.FunctionDef): the function holding the return statements.\n\n \"\"\"\n # explicit return statements are those with a not None value\n arg_2 = [\n _node for _node in arg_0._return_nodes[arg_1.name] if _node.value is not None\n ]\n if not arg_2:\n return\n if len(arg_2) == len(\n arg_0._return_nodes[arg_1.name]\n ) and arg_0._is_node_return_ended(arg_1):\n return\n arg_0.add_message(\"inconsistent-return-statements\", arg_1=arg_1)"} +{"_id": "doc_4248", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the node ends with an explicit return statement.\n\n Args:\n node (astroid.NodeNG): node to be checked.\n\n Returns:\n bool: True if the node ends with an explicit statement, False otherwise.\n\n \"\"\"\n # \u00a0Recursion base case\n if isinstance(arg_1, astroid.Return):\n return True\n if isinstance(arg_1, astroid.Call):\n try:\n arg_2 = arg_1.func.inferred()[0]\n if arg_0._is_function_def_never_returning(arg_2):\n return True\n except astroid.InferenceError:\n pass\n # Avoid the check inside while loop as we don't know\n # \u00a0if they will be completed\n if isinstance(arg_1, astroid.While):\n return True\n if isinstance(arg_1, astroid.Raise):\n # a Raise statement doesn't need to end with a return statement\n # but if the exception raised is handled, then the handler has to\n # ends with a return statement\n if not arg_1.exc:\n # Ignore bare raises\n return True\n if not utils.is_node_inside_try_except(arg_1):\n # If the raise statement is not inside a try/except statement\n # \u00a0then the exception is raised and cannot be caught. No need\n # \u00a0to infer it.\n return True\n arg_3 = utils.safe_infer(arg_1.exc)\n if arg_3 is None or arg_3 is astroid.Uninferable:\n return False\n arg_4 = arg_3.pytype().split(\".\")[-1]\n arg_5 = utils.get_exception_handlers(arg_1, arg_4)\n arg_5 = list(arg_5) if arg_5 is not None else []\n if arg_5:\n # among all the handlers handling the exception at least one\n # must end with a return statement\n return any(\n arg_0.Func(arg_6) for arg_6 in arg_5\n )\n # if no handlers handle the exception then it's ok\n return True\n if isinstance(arg_1, astroid.If):\n # if statement is returning if there are exactly two return statements in its\n # \u00a0children : one for the body part, the other for the orelse part\n # Do not check if inner function definition are return ended.\n arg_7 = any(\n arg_0.Func(_ore)\n for _ore in arg_1.orelse\n if not isinstance(_ore, astroid.FunctionDef)\n )\n arg_8 = any(\n arg_0.Func(_ifn)\n for _ifn in arg_1.body\n if not isinstance(_ifn, astroid.FunctionDef)\n )\n return arg_8 and arg_7\n # \u00a0recurses on the children of the node except for those which are except handler\n # because one cannot be sure that the handler will really be used\n return any(\n arg_0.Func(arg_9)\n for arg_9 in arg_1.get_children()\n if not isinstance(arg_9, astroid.ExceptHandler)\n )"} +{"_id": "doc_4249", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Emit a convention whenever range and len are used for indexing.\"\"\"\n # Verify that we have a `range([start], len(...), [stop])` call and\n # that the object which is iterated is used as a subscript in the\n # body of the for.\n\n # Is it a proper range call?\n if not isinstance(arg_1.iter, astroid.Call):\n return\n if not arg_0._is_builtin(arg_1.iter.func, \"range\"):\n return\n if len(arg_1.iter.args) == 2 and not _is_constant_zero(arg_1.iter.args[0]):\n return\n if len(arg_1.iter.args) > 2:\n return\n\n # Is it a proper len call?\n if not isinstance(arg_1.iter.args[-1], astroid.Call):\n return\n arg_2 = arg_1.iter.args[-1].func\n if not arg_0._is_builtin(arg_2, \"len\"):\n return\n arg_3 = arg_1.iter.args[-1].args\n if not arg_3 or len(arg_3) != 1:\n return\n arg_4 = arg_3[0]\n if not isinstance(arg_4, astroid.Name):\n return\n # If we're defining __iter__ on self, enumerate won't work\n arg_5 = arg_1.scope()\n if arg_4.name == \"self\" and arg_5.name == \"__iter__\":\n return\n\n # Verify that the body of the for loop uses a subscript\n # with the object that was iterated. This uses some heuristics\n # in order to make sure that the same object is used in the\n # for body.\n for arg_6 in arg_1.body:\n for arg_7 in arg_6.nodes_of_class(astroid.Subscript):\n if not isinstance(arg_7.value, astroid.Name):\n continue\n if not isinstance(arg_7.slice, astroid.Index):\n continue\n if not isinstance(arg_7.slice.value, astroid.Name):\n continue\n if arg_7.slice.value.name != arg_1.target.name:\n continue\n if arg_4.name != arg_7.value.name:\n continue\n if arg_7.value.scope() != arg_1.scope():\n # Ignore this subscript if it's not in the same\n # scope. This means that in the body of the for\n # loop, another scope was created, where the same\n # name for the iterating object was used.\n continue\n arg_0.add_message(\"consider-using-enumerate\", arg_1=arg_1)\n return"} +{"_id": "doc_4250", "title": "", "text": "def Func(arg_0):\n \"\"\"check if we need graphviz for different output format\"\"\"\n try:\n subprocess.call([\"dot\", \"-V\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except OSError:\n print(\n \"The output format '%s' is currently not available.\\n\"\n \"Please install 'Graphviz' to have other output formats \"\n \"than 'dot' or 'vcg'.\" % arg_0\n )\n sys.exit(32)"} +{"_id": "doc_4251", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"checking arguments and Func project\"\"\"\n if not arg_1:\n print(arg_0.help())\n return 1\n # insert current working directory to the python path to recognize\n # dependencies to local modules even if cwd is not in the PYTHONPATH\n sys.path.insert(0, os.getcwd())\n try:\n arg_2 = project_from_files(\n arg_1,\n project_name=arg_0.config.project,\n black_list=arg_0.config.black_list,\n )\n arg_3 = Linker(arg_2, tag=True)\n arg_4 = DiadefsHandler(arg_0.config)\n arg_5 = arg_4.get_diadefs(arg_2, arg_3)\n finally:\n sys.path.pop(0)\n\n if arg_0.config.output_format == \"vcg\":\n writer.VCGWriter(arg_0.config).write(arg_5)\n else:\n writer.DotWriter(arg_0.config).write(arg_5)\n return 0"} +{"_id": "doc_4252", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"write a class diagram\"\"\"\n # sorted to get predictable (hence testable) results\n for arg_2, arg_3 in enumerate(sorted(arg_1.objects, key=lambda x: x.title)):\n arg_0.printer.emit_node(arg_2, **arg_0.get_values(arg_3))\n arg_3.fig_id = arg_2\n # inheritance links\n for arg_5 in arg_1.get_relationships(\"specialization\"):\n arg_0.printer.emit_edge(\n arg_5.from_object.fig_id, arg_5.to_object.fig_id, **arg_0.inh_edges\n )\n # implementation links\n for arg_5 in arg_1.get_relationships(\"implements\"):\n arg_0.printer.emit_edge(\n arg_5.from_object.fig_id, arg_5.to_object.fig_id, **arg_0.imp_edges\n )\n # generate associations\n for arg_5 in arg_1.get_relationships(\"association\"):\n arg_0.printer.emit_edge(\n arg_5.from_object.fig_id,\n arg_5.to_object.fig_id,\n label=arg_5.name,\n **arg_0.association_edges\n )"} +{"_id": "doc_4253", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"initialize DotWriter and add options for layout.\n \"\"\"\n arg_3 = dict(rankdir=\"BT\")\n arg_0.printer = DotBackend(arg_2, additional_param=arg_3)\n arg_0.file_name = arg_1"} +{"_id": "doc_4254", "title": "", "text": "def Func(arg_0):\n \"\"\"return True if message may be emitted using the current interpreter\"\"\"\n if arg_0.minversion is not None and arg_0.minversion > sys.version_info:\n return False\n if arg_0.maxversion is not None and arg_0.maxversion <= sys.version_info:\n return False\n return True"} +{"_id": "doc_4255", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"return the help string for the given message id\"\"\"\n arg_2 = arg_0.descr\n if arg_1:\n arg_2 += \" This message belongs to the %s checker.\" % arg_0.checker.name\n arg_3 = arg_0.msg\n if arg_0.symbol:\n arg_4 = \"%s (%s)\" % (arg_0.symbol, arg_0.msgid)\n else:\n arg_4 = arg_0.msgid\n if arg_0.minversion or arg_0.maxversion:\n arg_5 = []\n if arg_0.minversion:\n arg_5.append(\"< %s\" % \".\".join([str(arg_6) for arg_6 in arg_0.minversion]))\n if arg_0.maxversion:\n arg_5.append(\">= %s\" % \".\".join([str(arg_6) for arg_6 in arg_0.maxversion]))\n arg_5 = \" or \".join(arg_5)\n if arg_1:\n arg_2 += \" It can't be emitted when using Python %s.\" % arg_5\n else:\n arg_2 += \" This message can't be emitted when using Python %s.\" % arg_5\n arg_2 = normalize_text(\" \".join(arg_2.split()), indent=\" \")\n if arg_3 != \"%s\":\n arg_3 = arg_3.splitlines()[0]\n\n return \":%s: *%s*\\n%s\" % (arg_4, arg_3.rstrip(\" \"), arg_2)\n return \":%s:\\n%s\" % (arg_4, arg_2)"} +{"_id": "doc_4256", "title": "", "text": "def Func():\n \"\"\"Extracts the environment PYTHONPATH and appends the current sys.path to\n those.\"\"\"\n arg_0 = dict(os.environ)\n arg_0[\"PYTHONPATH\"] = os.pathsep.join(sys.path)\n return arg_0"} +{"_id": "doc_4257", "title": "", "text": "def Func(arg_0, arg_1=()):\n \"\"\"PyFunc the given file.\n\n When run from emacs we will be in the directory of a file, and passed its\n filename. If this file is part of a package and is trying to import other\n modules from within its own package or another package rooted in a directory\n below it, pyFunc will classify it as a failed import.\n\n To get around this, we traverse down the directory tree to find the root of\n the package this module is in. We then invoke pyFunc from this directory.\n\n Finally, we must correct the filenames in the output generated by pyFunc so\n Emacs doesn't become confused (it will expect just the original filename,\n while pyFunc may extend it with extra directories if we've traversed down\n the tree)\n \"\"\"\n # traverse downwards until we are out of a python package\n arg_2 = osp.abspath(arg_0)\n arg_3 = osp.dirname(arg_2)\n arg_4 = osp.basename(arg_2)\n\n while arg_3 != \"/\" and osp.exists(osp.join(arg_3, \"__init__.py\")):\n arg_4 = osp.join(osp.basename(arg_3), arg_4)\n arg_3 = osp.dirname(arg_3)\n\n # Start pyFunc\n # Ensure we use the python and pyFunc associated with the running epyFunc\n arg_5 = \"import sys; from pyFunc.Func import Run; Run(sys.argv[1:])\"\n arg_6 = (\n [sys.executable, \"-c\", arg_5]\n + [\n \"--msg-template\",\n \"{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}\",\n \"-r\",\n \"n\",\n arg_4,\n ]\n + list(arg_1)\n )\n arg_7 = Popen(\n arg_6, stdout=PIPE, cwd=arg_3, env=_get_env(), universal_newlines=True\n )\n\n for arg_8 in arg_7.stdout:\n # remove pyFuncrc warning\n if arg_8.startswith(\"No config file found\"):\n continue\n\n # modify the file name thats output to reverse the path traversal we made\n arg_9 = arg_8.split(\":\")\n if arg_9 and arg_9[0] == arg_4:\n arg_8 = \":\".join([arg_0] + arg_9[1:])\n print(arg_8, end=\" \")\n\n arg_7.wait()\n return arg_7.returncode"} +{"_id": "doc_4258", "title": "", "text": "def Func(arg_0=\"\", arg_1=False, arg_2=None, arg_3=None):\n \"\"\"Run pylint from python\n\n ``command_options`` is a string containing ``pylint`` command line options;\n ``return_std`` (boolean) indicates return of created standard output\n and error (see below);\n ``stdout`` and ``stderr`` are 'file-like' objects in which standard output\n could be written.\n\n Calling agent is responsible for stdout/err management (creation, close).\n Default standard output and error are those from sys,\n or standalone ones (``subprocess.PIPE``) are used\n if they are not set and ``return_std``.\n\n If ``return_std`` is set to ``True``, this function returns a 2-uple\n containing standard output and error related to created process,\n as follows: ``(stdout, stderr)``.\n\n To silently run Pylint on a module, and get its standard output and error:\n >>> (pylint_stdout, pylint_stderr) = Func( 'module_name.py', True)\n \"\"\"\n # Detect if we use Python as executable or not, else default to `python`\n arg_4 = sys.executable if \"python\" in sys.executable else \"python\"\n\n # Create command line to call pylint\n arg_5 = [arg_4, \"-c\", \"from pylint import epylint;epylint.Run()\"]\n arg_6 = shlex.split(arg_0, posix=not sys.platform.startswith(\"win\"))\n arg_7 = arg_5 + arg_6\n\n # Providing standard output and/or error if not set\n if arg_2 is None:\n if arg_1:\n arg_2 = PIPE\n else:\n arg_2 = sys.stdout\n if arg_3 is None:\n if arg_1:\n arg_3 = PIPE\n else:\n arg_3 = sys.stderr\n # Call pylint in a subprocess\n arg_8 = Popen(\n arg_7,\n shell=False,\n arg_2=arg_2,\n arg_3=arg_3,\n env=_get_env(),\n universal_newlines=True,\n )\n arg_9, arg_10 = arg_8.communicate()\n # Return standard output and error\n if arg_1:\n return StringIO(arg_9), StringIO(arg_10)\n return None"} +{"_id": "doc_4259", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"recursive function doing the real work for get_cycles\"\"\"\n if arg_4 in arg_1:\n arg_5 = [arg_4]\n for arg_6 in arg_1[::-1]:\n if arg_6 == arg_4:\n break\n arg_5.insert(0, arg_6)\n # make a canonical representation\n arg_7 = min(arg_5)\n arg_8 = arg_5.index(arg_7)\n arg_5 = arg_5[arg_8:] + arg_5[0:arg_8]\n # append it to result if not already in\n if arg_5 not in arg_3:\n arg_3.append(arg_5)\n return\n arg_1.append(arg_4)\n try:\n for arg_6 in arg_0[arg_4]:\n # don't check already visited nodes again\n if arg_6 not in arg_2:\n Func(arg_0, arg_1, arg_2, arg_3, arg_6)\n arg_2.add(arg_6)\n except KeyError:\n pass\n arg_1.pop()"} +{"_id": "doc_4260", "title": "", "text": "def Func(arg_0):\n \"\"\"returns self._source\"\"\"\n if arg_0._source is None:\n arg_0.emit(\"}\\n\")\n arg_0._source = \"\\n\".join(arg_0.lines)\n del arg_0.lines\n return arg_0._source"} +{"_id": "doc_4261", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Generates a graph file.\n\n :param str outputfile: filename and path [defaults to graphname.png]\n :param str dotfile: filename and path [defaults to graphname.dot]\n :param str mapfile: filename and path\n\n :rtype: str\n :return: a path to the Funcd file\n \"\"\"\n import subprocess # introduced in py 2.4\n\n arg_4 = arg_0.graphname\n if not arg_2:\n # if 'outputfile' is a dot file use it as 'dotfile'\n if arg_1 and arg_1.endswith(\".dot\"):\n arg_2 = arg_1\n else:\n arg_2 = \"%s.dot\" % arg_4\n if arg_1 is not None:\n arg_5, arg_6, arg_7 = target_info_from_filename(arg_1)\n if arg_7 != \"dot\":\n arg_8, arg_9 = tempfile.mkstemp(\".dot\", arg_4)\n os.close(arg_8)\n else:\n arg_9 = osp.join(arg_5, arg_2)\n else:\n arg_7 = \"png\"\n arg_8, arg_9 = tempfile.mkstemp(\".dot\", arg_4)\n arg_10, arg_1 = tempfile.mkstemp(\".png\", arg_4)\n os.close(arg_8)\n os.close(arg_10)\n arg_8 = codecs.open(arg_9, \"w\", encoding=\"utf8\")\n arg_8.write(arg_0.source)\n arg_8.close()\n if arg_7 != \"dot\":\n arg_11 = sys.platform == \"win32\"\n if arg_3:\n subprocess.call(\n [\n arg_0.renderer,\n \"-Tcmapx\",\n \"-o\",\n arg_3,\n \"-T\",\n arg_7,\n arg_9,\n \"-o\",\n arg_1,\n ],\n shell=arg_11,\n )\n else:\n subprocess.call(\n [arg_0.renderer, \"-T\", arg_7, arg_9, \"-o\", arg_1],\n shell=arg_11,\n )\n os.unlink(arg_9)\n return arg_1"} +{"_id": "doc_4262", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"If the msgid is a numeric one, then register it to inform the user\n it could furnish instead a symbolic msgid.\"\"\"\n try:\n arg_4 = arg_0.msgs_store.get_message_definitions(arg_1)\n for arg_5 in arg_4:\n if arg_1 == arg_5.msgid:\n MessagesHandlerMixIn.__by_id_managed_msgs.append(\n (\n arg_0.current_name,\n arg_5.msgid,\n arg_5.symbol,\n arg_2,\n arg_3,\n )\n )\n except UnknownMessageError:\n pass"} +{"_id": "doc_4263", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"package\", arg_3=None, arg_4=False):\n \"\"\"reFunc message of the given id\"\"\"\n arg_0._set_msg_status(\n arg_1, Func=True, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4\n )\n arg_0._register_by_id_managed_msg(arg_1, arg_3, is_disabled=False)"} +{"_id": "doc_4264", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the message symbol of the given message id\n\n Return the original message id if the message does not\n exist.\n \"\"\"\n try:\n return [arg_2.symbol for arg_2 in arg_0.msgs_store.get_message_definitions(arg_1)]\n except UnknownMessageError:\n return arg_1"} +{"_id": "doc_4265", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=arg_6,\n arg_7=None,\n ):\n \"\"\"Adds a message given by ID or name.\n\n If provided, the message string is expanded using args.\n\n AST checkers must provide the node argument (but may optionally\n provide line if the line number is different), raw and token checkers\n must provide the line argument.\n \"\"\"\n arg_8 = arg_0.msgs_store.get_message_definitions(arg_1)\n for arg_9 in arg_8:\n arg_0.add_one_message(\n arg_9, arg_2, arg_3, arg_4, arg_5, arg_7\n )"} +{"_id": "doc_4266", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"output a full documentation in ReST format\"\"\"\n if not arg_1:\n arg_1 = sys.stdout\n\n print(\"Pylint global options and switches\", file=arg_1)\n print(\"----------------------------------\", file=arg_1)\n print(\"\", file=arg_1)\n print(\"Pylint provides global options and switches.\", file=arg_1)\n print(\"\", file=arg_1)\n\n arg_2 = {}\n for arg_3 in arg_0.get_checkers():\n if arg_3.name == \"master\":\n if arg_3.options:\n for arg_4, arg_5 in arg_3.options_by_section():\n if arg_4 is None:\n arg_6 = \"General options\"\n else:\n arg_6 = \"%s options\" % arg_4.capitalize()\n print(arg_6, file=arg_1)\n print(\"~\" * len(arg_6), file=arg_1)\n _rest_format_section(arg_1, None, arg_5)\n print(\"\", file=arg_1)\n else:\n arg_7 = arg_3.name\n try:\n arg_2[arg_7][\"options\"] += arg_3.options_and_values()\n arg_2[arg_7][\"msgs\"].update(arg_3.msgs)\n arg_2[arg_7][\"reports\"] += arg_3.reports\n except KeyError:\n arg_2[arg_7] = {\n \"options\": list(arg_3.options_and_values()),\n \"msgs\": dict(arg_3.msgs),\n \"reports\": list(arg_3.reports),\n }\n\n print(\"Pylint checkers' options and switches\", file=arg_1)\n print(\"-------------------------------------\", file=arg_1)\n print(\"\", file=arg_1)\n print(\"Pylint checkers can provide three set of features:\", file=arg_1)\n print(\"\", file=arg_1)\n print(\"* options that control their execution,\", file=arg_1)\n print(\"* messages that they can raise,\", file=arg_1)\n print(\"* reports that they can generate.\", file=arg_1)\n print(\"\", file=arg_1)\n print(\"Below is a list of all checkers and their features.\", file=arg_1)\n print(\"\", file=arg_1)\n\n for arg_3, arg_8 in sorted(arg_2.items()):\n arg_0._print_checker_doc(arg_3, arg_8, arg_1=arg_1)"} +{"_id": "doc_4267", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a line with |s for each of the positions in the given lists.\"\"\"\n if not arg_0:\n return (\"\", \"\")\n # TODO tabs should not be replaced by some random (8) number of spaces\n arg_0 = [_get_indent_length(indent) for indent in arg_0]\n arg_1 = _get_indent_length(arg_1)\n arg_2 = \"\"\n arg_3 = [(pos, \"|\") for pos in arg_0]\n if len(arg_3) == 1:\n # if we have only one marker we'll provide an extra hint on how to fix\n arg_4 = arg_3[0][0]\n arg_5 = abs(arg_4 - arg_1)\n arg_6 = \"add\" if arg_4 > arg_1 else \"remove\"\n arg_2 = _CONTINUATION_HINT_MESSAGE % (\n arg_6,\n arg_5,\n \"s\" if arg_5 > 1 else \"\",\n )\n arg_3.append((arg_1, \"^\"))\n arg_3.sort()\n arg_7 = [\" \"] * (arg_3[-1][0] + 1)\n for arg_8, arg_9 in arg_3:\n arg_7[arg_8] = arg_9\n return (\"\".join(arg_7), arg_2)"} +{"_id": "doc_4268", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get an indentation string for hanging indentation, consisting of the line-indent plus\n a number of spaces to fill up to the column of this token.\n\n e.g. the token indent for foo\n in \"print(foo)\"\n is \" \"\n \"\"\"\n arg_2 = arg_0.line_indent(arg_1)\n return arg_2 + \" \" * (arg_0.start_col(arg_1) - len(arg_2))"} +{"_id": "doc_4269", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the valid offsets for the token at the given position.\"\"\"\n # The closing brace on a dict or the 'for' in a dict comprehension may\n # reset two indent levels because the dict value is ended implicitly\n arg_2 = -1\n if (\n arg_0._tokens.token(arg_1) in (\"}\", \"for\")\n and arg_0._cont_stack[-1].token == \":\"\n ):\n arg_2 = -2\n arg_3 = arg_0._cont_stack[arg_2]\n if arg_0._tokens.token(arg_1) in _CLOSING_BRACKETS:\n arg_4 = arg_3.valid_outdent_strings\n else:\n arg_4 = arg_3.valid_continuation_strings\n return arg_3, arg_4.copy()"} +{"_id": "doc_4270", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extracts indentation information for a hanging indent\n\n Case of hanging indent after a bracket (including parenthesis)\n\n :param str bracket: bracket in question\n :param int position: Position of bracket in self._tokens\n\n :returns: the state and valid positions for hanging indentation\n :rtype: _ContinuedIndent\n \"\"\"\n arg_3 = arg_0._tokens.line_indent(arg_2)\n if (\n arg_0._is_block_opener\n and arg_0._continuation_string == arg_0._block_indent_string\n ):\n return _ContinuedIndent(\n HANGING_BLOCK,\n arg_1,\n arg_2,\n _Indentations(arg_3 + arg_0._continuation_string, arg_3),\n _BeforeBlockIndentations(\n arg_3 + arg_0._continuation_string,\n arg_3 + arg_0._continuation_string * 2,\n ),\n )\n if arg_1 == \":\":\n # If the dict key was on the same line as the open brace, the new\n # correct indent should be relative to the key instead of the\n # current indent level\n arg_4 = arg_0._cont_stack[-1].valid_outdent_strings\n arg_5 = arg_0._cont_stack[-1].valid_continuation_strings.copy()\n arg_6 = list(arg_5.keys())\n arg_5[arg_6[0] + arg_0._continuation_string] = True\n # Note that the continuation of\n # d = {\n # 'a': 'b'\n # 'c'\n # }\n # is handled by the special-casing for hanging continued string indents.\n return _ContinuedIndent(\n HANGING_DICT_VALUE, arg_1, arg_2, arg_4, arg_5\n )\n return _ContinuedIndent(\n HANGING,\n arg_1,\n arg_2,\n _Indentations(arg_3, arg_3 + arg_0._continuation_string),\n _Indentations(arg_3 + arg_0._continuation_string),\n )"} +{"_id": "doc_4271", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extracts indentation information for a continued indent.\"\"\"\n arg_3 = arg_0._tokens.line_indent(arg_2)\n arg_4 = arg_0._tokens.token_indent(arg_2)\n arg_5 = arg_0._tokens.token_indent(arg_2 + 1)\n if (\n arg_0._is_block_opener\n and arg_5 == arg_3 + arg_0._block_indent_string\n ):\n return _ContinuedIndent(\n CONTINUED_BLOCK,\n arg_1,\n arg_2,\n _Indentations(arg_4),\n _BeforeBlockIndentations(\n arg_5, arg_5 + arg_0._continuation_string\n ),\n )\n return _ContinuedIndent(\n CONTINUED,\n arg_1,\n arg_2,\n _Indentations(arg_4, arg_5),\n _Indentations(arg_5),\n )"} +{"_id": "doc_4272", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Pushes a new token for continued indentation on the stack.\n\n Tokens that can modify continued indentation offsets are:\n * opening brackets\n * 'lambda'\n * : inside dictionaries\n\n Func relies on the caller to filter out those\n interesting tokens.\n\n :param int token: The concrete token\n :param int position: The position of the token in the stream.\n \"\"\"\n if _token_followed_by_eol(arg_0._tokens, arg_2):\n arg_0._cont_stack.append(arg_0._hanging_indent_after_bracket(arg_1, arg_2))\n else:\n arg_0._cont_stack.append(arg_0._continuation_inside_bracket(arg_1, arg_2))"} +{"_id": "doc_4273", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"a new line has been encountered, process it if necessary\"\"\"\n if _last_token_on_line_is(arg_1, arg_2, \";\"):\n arg_0.add_message(\"unnecessary-semicolon\", arg_5=arg_1.start_line(arg_2))\n\n arg_4 = arg_1.start_line(arg_3)\n arg_5 = arg_1.line(arg_3)\n if arg_1.type(arg_3) not in _JUNK_TOKENS:\n arg_0._lines[arg_4] = arg_5.split(\"\\n\")[0]\n arg_0.check_lines(arg_5, arg_4)"} +{"_id": "doc_4274", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check that there are not unnecessary parens after a keyword.\n\n Parens are unnecessary if there is exactly one balanced outer pair on a\n line, and it is followed by a colon, and contains no commas (i.e. is not a\n tuple).\n\n Args:\n tokens: list of Tokens; the entire list of Tokens.\n start: int; the position of the keyword in the token list.\n \"\"\"\n # If the next token is not a paren, we're fine.\n if arg_0._inside_brackets(\":\") and arg_1[arg_2][1] == \"for\":\n arg_0._pop_token()\n if arg_1[arg_2 + 1][1] != \"(\":\n return\n\n arg_3 = False\n arg_4 = 0\n arg_5 = str(arg_1[arg_2][1])\n arg_6 = arg_1[arg_2][2][0]\n\n for arg_7 in range(arg_2, len(arg_1) - 1):\n arg_8 = arg_1[arg_7]\n\n # If we hit a newline, then assume any parens were for continuation.\n if arg_8[0] == tokenize.NL:\n return\n\n if arg_8[1] == \"(\":\n arg_4 += 1\n elif arg_8[1] == \")\":\n arg_4 -= 1\n if arg_4:\n continue\n # ')' can't happen after if (foo), since it would be a syntax error.\n if arg_1[arg_7 + 1][1] in (\":\", \")\", \"]\", \"}\", \"in\") or arg_1[arg_7 + 1][\n 0\n ] in (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT):\n # The empty tuple () is always accepted.\n if arg_7 == arg_2 + 2:\n return\n if arg_5 == \"not\":\n if not arg_3:\n arg_0.add_message(\n \"superfluous-parens\", line=arg_6, args=arg_5\n )\n elif arg_5 in (\"return\", \"yield\"):\n arg_0.add_message(\n \"superfluous-parens\", line=arg_6, args=arg_5\n )\n elif arg_5 not in arg_0._keywords_with_parens:\n if not arg_3:\n arg_0.add_message(\n \"superfluous-parens\", line=arg_6, args=arg_5\n )\n return\n elif arg_4 == 1:\n # This is a tuple, which is always acceptable.\n if arg_8[1] == \",\":\n return\n # 'and' and 'or' are the only boolean operators with lower precedence\n # than 'not', so parens are only required when they are found.\n if arg_8[1] in (\"and\", \"or\"):\n arg_3 = True\n # A yield inside an expression must always be in parentheses,\n # quit early without error.\n elif arg_8[1] == \"yield\":\n return\n # A generator expression always has a 'for' token in it, and\n # the 'for' token is only legal inside parens when it is in a\n # generator expression. The parens are necessary here, so bail\n # without an error.\n elif arg_8[1] == \"for\":\n return"} +{"_id": "doc_4275", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extended check of PEP-484 type hint presence\"\"\"\n if not arg_0._inside_brackets(\"(\"):\n return False\n # token_info\n # type string start end line\n # 0 1 2 3 4\n arg_3 = 0\n for arg_4 in arg_1[arg_2 - 1 :: -1]:\n if arg_4[1] == \":\":\n return True\n if arg_4[1] == \"(\":\n return False\n if arg_4[1] == \"]\":\n arg_3 += 1\n elif arg_4[1] == \"[\":\n arg_3 -= 1\n elif arg_4[1] == \",\":\n if not arg_3:\n return False\n elif arg_4[1] in (\".\", \"...\"):\n continue\n elif arg_4[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):\n return False\n return False"} +{"_id": "doc_4276", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check the node line number and check it if not yet done\"\"\"\n if not arg_1.is_statement:\n return\n if not arg_1.root().pure_python:\n return # XXX block visit of child nodes\n arg_2 = arg_1.previous_sibling()\n if arg_2 is not None:\n arg_3 = arg_2.fromlineno\n else:\n # The line on which a finally: occurs in a try/finally\n # is not directly represented in the AST. We infer it\n # by taking the last line of the body and adding 1, which\n # should be the line of finally:\n if (\n isinstance(arg_1.parent, nodes.TryFinally)\n and arg_1 in arg_1.parent.finalbody\n ):\n arg_3 = arg_1.parent.body[0].tolineno + 1\n else:\n arg_3 = arg_1.parent.statement().fromlineno\n arg_4 = arg_1.fromlineno\n assert arg_4, arg_1\n if arg_3 == arg_4 and arg_0._visited_lines.get(arg_4) != 2:\n arg_0._check_multi_statement_line(arg_1, arg_4)\n return\n if arg_4 in arg_0._visited_lines:\n return\n try:\n arg_5 = arg_1.blockstart_tolineno\n except AttributeError:\n arg_5 = arg_1.tolineno\n assert arg_5, arg_1\n arg_6 = []\n for arg_4 in range(arg_4, arg_5 + 1):\n arg_0._visited_lines[arg_4] = 1\n try:\n arg_6.append(arg_0._lines[arg_4].rstrip())\n except KeyError:\n arg_6.append(\"\")"} +{"_id": "doc_4277", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check for lines containing multiple statements.\"\"\"\n # Do not warn about multiple nested context managers\n # in with statements.\n if isinstance(arg_1, nodes.With):\n return\n # For try... except... finally..., the two nodes\n # appear to be on the same line due to how the AST is built.\n if isinstance(arg_1, nodes.TryExcept) and isinstance(\n arg_1.parent, nodes.TryFinally\n ):\n return\n if (\n isinstance(arg_1.parent, nodes.If)\n and not arg_1.parent.orelse\n and arg_0.config.single_line_if_stmt\n ):\n return\n if (\n isinstance(arg_1.parent, nodes.ClassDef)\n and len(arg_1.parent.body) == 1\n and arg_0.config.single_line_class_stmt\n ):\n return\n arg_0.add_message(\"multiple-statements\", arg_1=arg_1)\n arg_0._visited_lines[arg_2] = 2"} +{"_id": "doc_4278", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"check lines have less than a maximum number of characters\n \"\"\"\n arg_3 = arg_0.config.max_line_length\n arg_4 = arg_0.config.ignore_long_lines\n\n def check_line(arg_5, arg_2):\n if not arg_5.endswith(\"\\n\"):\n arg_0.add_message(\"missing-final-newline\", arg_5=arg_2)\n else:\n # exclude \\f (formfeed) from the rstrip\n arg_6 = arg_5.rstrip(\"\\t\\n\\r\\v \")\n if not arg_6 and _EMPTY_LINE in arg_0.config.no_space_check:\n # allow empty lines\n pass\n elif arg_5[len(arg_6) :] not in (\"\\n\", \"\\r\\n\"):\n arg_0.add_message(\n \"trailing-whitespace\", arg_5=arg_2, col_offset=len(arg_6)\n )\n # Don't count excess whitespace in the line length.\n arg_5 = arg_6\n arg_7 = OPTION_RGX.search(arg_5)\n if arg_7 and \"=\" in arg_5:\n arg_8, arg_9, arg_10 = arg_7.group(1).partition(\"=\")\n if arg_8.strip() == \"disable\":\n if \"line-too-long\" in {\n arg_11.strip() for arg_11 in arg_10.split(\",\")\n }:\n return None\n arg_5 = arg_5.rsplit(\"#\", 1)[0].rstrip()\n\n if len(arg_5) > arg_3 and not arg_4.search(arg_5):\n arg_0.add_message(\"line-too-long\", arg_5=arg_2, args=(len(arg_5), arg_3))\n return arg_2 + 1\n\n arg_12 = {\n \"\\v\",\n \"\\x0b\",\n \"\\f\",\n \"\\x0c\",\n \"\\x1c\",\n \"\\x1d\",\n \"\\x1e\",\n \"\\x85\",\n \"\\u2028\",\n \"\\u2029\",\n }\n arg_13 = []\n for arg_5 in arg_1.splitlines(True):\n if arg_5[-1] in arg_12:\n arg_13.append(arg_5)\n continue\n\n if arg_13:\n arg_13.append(arg_5)\n arg_5 = \"\".join(arg_13)\n arg_13 = []\n\n arg_2 = check_line(arg_5, arg_2)\n if arg_2 is None:\n break\n\n if arg_13:\n check_line(\"\".join(arg_13), arg_2)"} +{"_id": "doc_4279", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"return the indent level of the string\n \"\"\"\n arg_4 = arg_0.config.indent_string\n if arg_4 == \"\\\\t\": # \\t is not interpreted in the configuration file\n arg_4 = \"\\t\"\n arg_5 = 0\n arg_6 = len(arg_4)\n while arg_1[:arg_6] == arg_4:\n arg_1 = arg_1[arg_6:]\n arg_5 += 1\n arg_7 = \"\"\n while arg_1 and arg_1[0] in \" \\t\":\n if arg_1[0] != arg_4[0]:\n if arg_1[0] == \"\\t\":\n arg_8 = (\"tab\", \"space\")\n else:\n arg_8 = (\"space\", \"tab\")\n arg_0.add_message(\"mixed-indentation\", arg_8=arg_8, line=arg_3)\n return arg_5\n arg_7 += arg_1[0]\n arg_1 = arg_1[1:]\n if arg_5 != arg_2 or arg_7:\n arg_9 = \"spaces\"\n if arg_4[0] == \"\\t\":\n arg_9 = \"tabs\"\n arg_0.add_message(\n \"bad-indentation\",\n line=arg_3,\n arg_8=(arg_5 * arg_6 + len(arg_7), arg_9, arg_2 * arg_6),\n )\n return None"} +{"_id": "doc_4280", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks if an import node is in the context of a conditional.\n \"\"\"\n arg_1 = arg_0.parent\n return isinstance(\n arg_1, (astroid.TryExcept, astroid.ExceptHandler, astroid.If, astroid.IfExp)\n )"} +{"_id": "doc_4281", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Look for indexing exceptions. \"\"\"\n try:\n for arg_2 in arg_1.value.infer():\n if not isinstance(arg_2, astroid.Instance):\n continue\n if utils.inherit_from_std_ex(arg_2):\n arg_0.add_message(\"indexing-exception\", arg_1=arg_1)\n except astroid.InferenceError:\n return"} +{"_id": "doc_4282", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Visit an except handler block and check for exception unpacking.\"\"\"\n\n def _is_used_in_except_block(arg_1):\n arg_2 = arg_1.scope()\n arg_3 = arg_1\n while (\n arg_3\n and arg_3 != arg_2\n and not isinstance(arg_3, astroid.ExceptHandler)\n ):\n arg_3 = arg_3.parent\n return isinstance(arg_3, astroid.ExceptHandler) and arg_3.type != arg_1\n\n if isinstance(arg_1.name, (astroid.Tuple, astroid.List)):\n arg_0.add_message(\"unpacking-in-except\", arg_1=arg_1)\n return\n\n if not arg_1.name:\n return\n\n # Find any names\n arg_2 = arg_1.parent.scope()\n arg_4 = arg_2.nodes_of_class(astroid.Name, skip_klass=astroid.FunctionDef)\n arg_4 = list(arg_4)\n arg_5 = [\n scope_name\n for scope_name in arg_4\n if scope_name.name == arg_1.name.name\n and scope_name.lineno > arg_1.lineno\n and not _is_used_in_except_block(scope_name)\n ]\n arg_6 = {\n assign_name.lineno\n for assign_name in arg_2.nodes_of_class(\n astroid.AssignName, skip_klass=astroid.FunctionDef\n )\n if assign_name.name == arg_1.name.name\n }\n for arg_7 in arg_5:\n if any(\n arg_1.lineno < arg_8 < arg_7.lineno\n for arg_8 in arg_6\n ):\n continue\n arg_0.add_message(\"exception-escape\", arg_1=arg_7)"} +{"_id": "doc_4283", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Visit a raise statement and check for raising\n strings or old-raise-syntax.\n \"\"\"\n\n # Ignore empty raise.\n if arg_1.exc is None:\n return\n arg_2 = arg_1.exc\n if arg_0._check_raise_value(arg_1, arg_2):\n return\n try:\n arg_3 = next(astroid.unpack_infer(arg_2))\n except astroid.InferenceError:\n return\n arg_0._check_raise_value(arg_1, arg_3)"} +{"_id": "doc_4284", "title": "", "text": "def Func():\n \"\"\"search the pylint rc file and return its path if it find it, else None\n \"\"\"\n # is there a pylint rc file in the current directory ?\n if os.path.exists(\"pylintrc\"):\n return os.path.abspath(\"pylintrc\")\n if os.path.exists(\".pylintrc\"):\n return os.path.abspath(\".pylintrc\")\n if os.path.isfile(\"__init__.py\"):\n arg_0 = os.path.abspath(os.getcwd())\n while os.path.isfile(os.path.join(arg_0, \"__init__.py\")):\n arg_0 = os.path.abspath(os.path.join(arg_0, \"..\"))\n if os.path.isfile(os.path.join(arg_0, \"pylintrc\")):\n return os.path.join(arg_0, \"pylintrc\")\n if os.path.isfile(os.path.join(arg_0, \".pylintrc\")):\n return os.path.join(arg_0, \".pylintrc\")\n if \"PYLINTRC\" in os.environ and os.path.exists(os.environ[\"PYLINTRC\"]):\n arg_1 = os.environ[\"PYLINTRC\"]\n else:\n arg_2 = os.path.expanduser(\"~\")\n if arg_2 in (\"~\", \"/root\"):\n arg_1 = \".pylintrc\"\n else:\n arg_1 = os.path.join(arg_2, \".pylintrc\")\n if not os.path.isfile(arg_1):\n arg_1 = os.path.join(arg_2, \".config\", \"pylintrc\")\n if not os.path.isfile(arg_1):\n if os.path.isfile(\"/etc/pylintrc\"):\n arg_1 = \"/etc/pylintrc\"\n else:\n arg_1 = None\n return arg_1"} +{"_id": "doc_4285", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"\"):\n \"\"\"return a validated value for an option according to its type\n\n optional argument name is only used for error message formatting\n \"\"\"\n try:\n arg_3 = arg_1[\"type\"]\n except KeyError:\n # FIXME\n return arg_0\n return _call_validator(arg_3, arg_1, arg_2, arg_0)"} +{"_id": "doc_4286", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"optik callback for option setting\"\"\"\n if arg_2.startswith(\"--\"):\n # remove -- on long option\n arg_2 = arg_2[2:]\n else:\n # short option, get its long equivalent\n arg_2 = arg_0._short_options[arg_2[1:]]\n # trick since we can't set action='store_true' on options\n if arg_3 is None:\n arg_3 = 1\n arg_0.global_set_option(arg_2, arg_3)"} +{"_id": "doc_4287", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=(), arg_3=None):\n \"\"\"write a configuration file according to the current configuration\n into the given stream or stdout\n \"\"\"\n arg_4 = {}\n arg_5 = []\n for arg_6 in arg_0.options_providers:\n for arg_7, arg_8 in arg_6.options_by_section():\n if arg_7 is None:\n arg_7 = arg_6.name\n if arg_7 in arg_2:\n continue\n arg_8 = [\n (n, d, v)\n for (n, d, v) in arg_8\n if d.get(\"type\") is not None and not d.get(\"deprecated\")\n ]\n if not arg_8:\n continue\n if arg_7 not in arg_5:\n arg_5.append(arg_7)\n arg_9 = arg_4.setdefault(arg_7, [])\n arg_9 += arg_8\n arg_1 = arg_1 or sys.stdout\n arg_10 = False\n for arg_7 in arg_5:\n if arg_10:\n print(\"\\n\", file=arg_1)\n utils.format_section(\n arg_1, arg_7.upper(), sorted(arg_4[arg_7])\n )\n arg_10 = True"} +{"_id": "doc_4288", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"return the usage string for available options \"\"\"\n arg_0.cmdline_parser.formatter.output_level = arg_1\n with _patch_optparse():\n return arg_0.cmdline_parser.format_Func()"} +{"_id": "doc_4289", "title": "", "text": "def Func(arg_0):\n \"\"\"initialize the provider using default values\"\"\"\n for arg_1, arg_2 in arg_0.options:\n arg_3 = arg_2.get(\"action\")\n if arg_3 != \"callback\":\n # callback action have no default\n if arg_2 is None:\n arg_2 = arg_0.get_option_def(arg_1)\n arg_4 = arg_2.get(\"default\")\n arg_0.set_option(arg_1, arg_4, arg_3, arg_2)"} +{"_id": "doc_4290", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return the dictionary defining an option given its name\"\"\"\n assert arg_0.options\n for arg_2 in arg_0.options:\n if arg_2[0] == arg_1:\n return arg_2[1]\n raise optparse.OptionError(\n \"no such option %s in section %r\" % (arg_1, arg_0.name), arg_1\n )"} +{"_id": "doc_4291", "title": "", "text": "def Func(arg_0):\n \"\"\"return an iterator on options grouped by section\n\n (section, [list of (optname, optdict, optvalue)])\n \"\"\"\n arg_1 = {}\n for arg_2, arg_3 in arg_0.options:\n arg_1.setdefault(arg_3.get(\"group\"), []).append(\n (arg_2, arg_3, arg_0.option_value(arg_2))\n )\n if None in arg_1:\n yield None, arg_1.pop(None)\n for arg_4, arg_5 in sorted(arg_1.items()):\n yield arg_4.upper(), arg_5"} +{"_id": "doc_4292", "title": "", "text": "def Func(arg_0, arg_1=(), arg_2=()):\n \"\"\"Determines if a BoundMethod node represents a method call.\n\n Args:\n func (astroid.BoundMethod): The BoundMethod AST node to check.\n types (Optional[String]): Optional sequence of caller type names to restrict check.\n methods (Optional[String]): Optional sequence of method names to restrict check.\n\n Returns:\n bool: true if the node represents a method call for the given type and\n method names, False otherwise.\n \"\"\"\n return (\n isinstance(arg_0, astroid.BoundMethod)\n and isinstance(arg_0.bound, astroid.Instance)\n and (arg_0.bound.name in arg_1 if arg_1 else True)\n and (arg_0.name in arg_2 if arg_2 else True)\n )"} +{"_id": "doc_4293", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks if node represents a string with complex formatting specs.\n\n Args:\n node (astroid.node_classes.NodeNG): AST node to check\n Returns:\n bool: True if inferred string uses complex formatting, False otherwise\n \"\"\"\n arg_1 = utils.safe_infer(arg_0)\n if arg_1 is None or not isinstance(arg_1.value, str):\n return True\n try:\n arg_2 = list(string.Formatter().parse(arg_1.value))\n except ValueError:\n # This format string is invalid\n return False\n for arg_3, arg_3, arg_4, arg_3 in arg_2:\n if arg_4:\n return True\n return False"} +{"_id": "doc_4294", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks to see if a module uses a non-Python logging module.\"\"\"\n try:\n arg_2 = arg_0._from_imports[arg_1.modname]\n for arg_3, arg_4 in arg_1.names:\n if arg_3 == arg_2:\n arg_0._logging_names.add(arg_4 or arg_3)\n except KeyError:\n pass"} +{"_id": "doc_4295", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks calls to logging methods.\"\"\"\n\n def is_logging_name():\n return (\n isinstance(arg_1.func, astroid.Attribute)\n and isinstance(arg_1.func.expr, astroid.Name)\n and arg_1.func.expr.name in arg_0._logging_names\n )\n\n def is_logger_class():\n try:\n for arg_2 in arg_1.func.infer():\n if isinstance(arg_2, astroid.BoundMethod):\n arg_3 = arg_2._proxied.parent\n if isinstance(arg_3, astroid.ClassDef) and (\n arg_3.qname() == \"logging.Logger\"\n or any(\n arg_4.qname() == \"logging.Logger\"\n for arg_4 in arg_3.ancestors()\n )\n ):\n return True, arg_2._proxied.name\n except astroid.exceptions.InferenceError:\n pass\n return False, None\n\n if is_logging_name():\n arg_5 = arg_1.func.attrname\n else:\n arg_6, arg_5 = is_logger_class()\n if not arg_6:\n return\n arg_0._check_log_method(arg_1, arg_5)"} +{"_id": "doc_4296", "title": "", "text": "def Func(arg_0):\n \"\"\"return True if the node is inside a kind of for loop\"\"\"\n arg_1 = arg_0.parent\n while arg_1 is not None:\n if isinstance(\n arg_1,\n (\n astroid.For,\n astroid.ListComp,\n astroid.SetComp,\n astroid.DictComp,\n astroid.GeneratorExp,\n ),\n ):\n return True\n arg_1 = arg_1.parent\n return False"} +{"_id": "doc_4297", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the loop node that holds the break node in arguments.\n\n Args:\n break_node (astroid.Break): the break node of interest.\n\n Returns:\n astroid.For or astroid.While: the loop node holding the break node.\n \"\"\"\n arg_1 = (astroid.For, astroid.While)\n arg_2 = arg_0.parent\n while not isinstance(arg_2, arg_1) or arg_0 in getattr(\n arg_2, \"orelse\", []\n ):\n arg_0 = arg_2\n arg_2 = arg_2.parent\n if arg_2 is None:\n break\n return arg_2"} +{"_id": "doc_4298", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns true if a loop may ends up in a break statement.\n\n Args:\n loop (astroid.For, astroid.While): the loop node inspected.\n\n Returns:\n bool: True if the loop may ends up in a break statement, False otherwise.\n \"\"\"\n arg_1 = (astroid.For, astroid.While)\n arg_2 = (astroid.FunctionDef, astroid.ClassDef)\n arg_3 = [\n arg_4\n for arg_4 in arg_0.nodes_of_class(arg_1, skip_klass=arg_2)\n if arg_4 != arg_0\n ]\n return any(\n arg_4\n for arg_4 in arg_0.nodes_of_class(astroid.Break, skip_klass=arg_2)\n if _get_break_loop_node(arg_4) not in arg_3\n )"} +{"_id": "doc_4299", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a tuple of property classes and names.\n\n Property classes are fully qualified, such as 'abc.abstractproperty' and\n property names are the actual names, such as 'abstract_property'.\n \"\"\"\n arg_1 = {BUILTIN_PROPERTY}\n arg_2 = set() # Not returning 'property', it has its own check.\n if arg_0 is not None:\n arg_1.update(arg_0.property_classes)\n arg_2.update(\n (arg_3.rsplit(\".\", 1)[-1] for arg_3 in arg_0.property_classes)\n )\n return arg_1, arg_2"} +{"_id": "doc_4300", "title": "", "text": "def Func(arg_0):\n \"\"\"return True if the object is a method redefined via decorator.\n\n For example:\n @property\n def x(self): return self._x\n @x.setter\n def x(self, value): self._x = value\n \"\"\"\n if arg_0.decorators:\n for arg_1 in arg_0.decorators.nodes:\n if (\n isinstance(arg_1, astroid.Attribute)\n and getattr(arg_1.expr, \"name\", None) == arg_0.name\n ):\n return True\n return False"} +{"_id": "doc_4301", "title": "", "text": "def Func(arg_0):\n \"\"\"Is this a call with exactly 1 argument,\n where that argument is positional?\n \"\"\"\n return isinstance(arg_0, astroid.Call) and len(arg_0.args) == 1 and not arg_0.keywords"} +{"_id": "doc_4302", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Check instantiating abstract class with\n abc.ABCMeta as metaclass.\n \"\"\"\n try:\n for arg_2 in arg_1.func.infer():\n arg_0._check_inferred_class_is_abstract(arg_2, arg_1)\n except astroid.InferenceError:\n return"} +{"_id": "doc_4303", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that any loop with an else clause has a break statement.\"\"\"\n if arg_1.orelse and not _loop_exits_early(arg_1):\n arg_0.add_message(\n \"useless-else-on-loop\",\n arg_1=arg_1,\n # This is not optimal, but the line previous\n # to the first statement in the else clause\n # will usually be the one that contains the else:.\n line=arg_1.orelse[0].lineno - 1,\n )"} +{"_id": "doc_4304", "title": "", "text": "def Func(arg_0):\n \"\"\"initialize visit variables and statistics\n \"\"\"\n arg_0._tryfinallys = []\n arg_0.stats = arg_0.linter.add_stats(module=0, function=0, method=0, class_=0)"} +{"_id": "doc_4305", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check for various kind of statements without effect\"\"\"\n arg_2 = arg_1.value\n if isinstance(arg_2, astroid.Const) and isinstance(arg_2.value, str):\n # treat string statement in a separated message\n # Handle PEP-257 attribute docstrings.\n # An attribute docstring is defined as being a string right after\n # an assignment at the module level, class level or __init__ level.\n arg_3 = arg_2.scope()\n if isinstance(\n arg_3, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)\n ):\n if isinstance(arg_3, astroid.FunctionDef) and arg_3.name != \"__init__\":\n pass\n else:\n arg_4 = arg_2.previous_sibling()\n if (\n arg_4 is not None\n and arg_4.scope() is arg_3\n and isinstance(arg_4, (astroid.Assign, astroid.AnnAssign))\n ):\n return\n arg_0.add_message(\"pointless-string-statement\", arg_1=arg_1)\n return\n\n # Ignore if this is :\n # * a direct function call\n # * the unique child of a try/except body\n # * a yieldd statement\n # * an ellipsis (which can be used on Python 3 instead of pass)\n # warn W0106 if we have any underlying function call (we can't predict\n # side effects), else pointless-statement\n if isinstance(\n arg_2, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)\n ) or (\n isinstance(arg_1.parent, astroid.TryExcept) and arg_1.parent.body == [arg_1]\n ):\n return\n if any(arg_2.nodes_of_class(astroid.Call)):\n arg_0.add_message(\n \"expression-not-assigned\", arg_1=arg_1, args=arg_2.as_string()\n )\n else:\n arg_0.add_message(\"pointless-statement\", arg_1=arg_1)"} +{"_id": "doc_4306", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check whether or not the lambda is suspicious\n \"\"\"\n # if the body of the lambda is a call expression with the same\n # argument list as the lambda itself, then the lambda is\n # possibly unnecessary and at least suspicious.\n if arg_1.args.defaults:\n # If the arguments of the lambda include defaults, then a\n # judgment cannot be made because there is no way to check\n # that the defaults defined by the lambda are the same as\n # the defaults defined by the function called in the body\n # of the lambda.\n return\n arg_2 = arg_1.body\n if not isinstance(arg_2, astroid.Call):\n # The body of the lambda must be a function call expression\n # for the lambda to be unnecessary.\n return\n if isinstance(arg_1.body.func, astroid.Attribute) and isinstance(\n arg_1.body.func.expr, astroid.Call\n ):\n # Chained call, the intermediate call might\n # return something else (but we don't check that, yet).\n return\n\n arg_3 = CallSite.from_call(arg_2)\n arg_4 = list(arg_1.args.args)\n arg_5 = list(arg_0._filter_vararg(arg_1, arg_2.args))\n if arg_1.args.kwarg:\n if arg_0._has_variadic_argument(arg_2.kwargs, arg_1.args.kwarg):\n return\n\n if arg_1.args.vararg:\n if arg_0._has_variadic_argument(arg_2.starargs, arg_1.args.vararg):\n return\n elif arg_2.starargs:\n return\n\n if arg_2.keywords:\n # Look for additional keyword arguments that are not part\n # of the lambda's signature\n arg_6 = {keyword.name for keyword in arg_1.args.defaults}\n if len(arg_6) != len(arg_3.keyword_arguments):\n # Different lengths, so probably not identical\n return\n if set(arg_3.keyword_arguments).difference(arg_6):\n return\n\n # The \"ordinary\" arguments must be in a correspondence such that:\n # ordinary_args[i].name == call.args[i].name.\n if len(arg_4) != len(arg_5):\n return\n for arg_7, arg_8 in zip(arg_4, arg_5):\n if not isinstance(arg_8, astroid.Name):\n return\n if arg_7.name != arg_8.name:\n return\n\n arg_0.add_message(\"unnecessary-lambda\", line=arg_1.fromlineno, arg_1=arg_1)"} +{"_id": "doc_4307", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check the use of an assert statement on a tuple.\"\"\"\n if (\n arg_1.fail is None\n and isinstance(arg_1.test, astroid.Tuple)\n and len(arg_1.test.elts) == 2\n ):\n arg_0.add_message(\"assert-on-tuple\", arg_1=arg_1)"} +{"_id": "doc_4308", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check duplicate key in dictionary\"\"\"\n arg_2 = set()\n for arg_3, arg_4 in arg_1.items:\n if isinstance(arg_3, astroid.Const):\n arg_5 = arg_3.value\n if arg_5 in arg_2:\n arg_0.add_message(\"duplicate-key\", arg_1=arg_1, args=arg_5)\n arg_2.add(arg_5)"} +{"_id": "doc_4309", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=()):\n \"\"\"check that a node is not inside a finally clause of a\n try...finally statement.\n If we found before a try...finally bloc a parent which its type is\n in breaker_classes, we skip the whole check.\"\"\"\n # if self._tryfinallys is empty, we're not an in try...finally block\n if not arg_0._tryfinallys:\n return\n # the node could be a grand-grand...-children of the try...finally\n arg_4 = arg_1.parent\n arg_5 = arg_1\n while arg_4 and not isinstance(arg_4, arg_3):\n if hasattr(arg_4, \"finalbody\") and arg_5 in arg_4.finalbody:\n arg_0.add_message(\"lost-exception\", arg_1=arg_1, args=arg_2)\n return\n arg_5 = arg_4\n arg_4 = arg_5.parent"} +{"_id": "doc_4310", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"check module level assigned names\"\"\"\n arg_0._check_assign_to_new_keyword_violation(arg_1.name, arg_1)\n arg_2 = arg_1.frame()\n arg_3 = arg_1.assign_type()\n if isinstance(arg_3, astroid.Comprehension):\n arg_0._check_name(\"inlinevar\", arg_1.name, arg_1)\n elif isinstance(arg_2, astroid.Module):\n if isinstance(arg_3, astroid.Assign) and not in_loop(arg_3):\n if isinstance(utils.safe_infer(arg_3.value), astroid.ClassDef):\n arg_0._check_name(\"class\", arg_1.name, arg_1)\n else:\n if not _redefines_import(arg_1):\n # Don't emit if the name redefines an import\n # in an ImportError except handler.\n arg_0._check_name(\"const\", arg_1.name, arg_1)\n elif isinstance(arg_3, astroid.ExceptHandler):\n arg_0._check_name(\"variable\", arg_1.name, arg_1)\n elif isinstance(arg_2, astroid.FunctionDef):\n # global introduced variable aren't in the function locals\n if arg_1.name in arg_2 and arg_1.name not in arg_2.argnames():\n if not _redefines_import(arg_1):\n arg_0._check_name(\"variable\", arg_1.name, arg_1)\n elif isinstance(arg_2, astroid.ClassDef):\n if not list(arg_2.local_attr_ancestors(arg_1.name)):\n arg_0._check_name(\"class_attribute\", arg_1.name, arg_1)"} +{"_id": "doc_4311", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check if we compare to a literal, which is usually what we do not want to do.\"\"\"\n arg_3 = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)\n arg_4 = isinstance(arg_1, arg_3)\n arg_5 = False\n if isinstance(arg_1, astroid.Const):\n if isinstance(arg_1.value, bool) or arg_1.value is None:\n # Not interested in this values.\n return\n arg_5 = isinstance(arg_1.value, (bytes, str, int, float))\n\n if arg_5 or arg_4:\n arg_0.add_message(\"literal-comparison\", arg_2=arg_2)"} +{"_id": "doc_4312", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=()):\n \"\"\"create the subgraphs representing any `if` and `for` statements\"\"\"\n if arg_0.graph is None:\n # global loop\n arg_0.graph = PathGraph(arg_1)\n arg_0.Func_parse(arg_1, arg_1, arg_3)\n arg_0.graphs[\"%s%s\" % (arg_0.classname, arg_2)] = arg_0.graph\n arg_0.reset()\n else:\n arg_0._append_node(arg_1)\n arg_0.Func_parse(arg_1, arg_1, arg_3)"} +{"_id": "doc_4313", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3\n ): # pylint: disable=unused-argument\n \"\"\"parse the body and any `else` block of `if` and `for` statements\"\"\"\n arg_4 = []\n arg_0.tail = arg_1\n arg_0.dispatch_list(arg_1.body)\n arg_4.append(arg_0.tail)\n for arg_6 in arg_3:\n arg_0.tail = arg_1\n arg_0.dispatch_list(arg_6.body)\n arg_4.append(arg_0.tail)\n if arg_1.orelse:\n arg_0.tail = arg_1\n arg_0.dispatch_list(arg_1.orelse)\n arg_4.append(arg_0.tail)\n else:\n arg_4.append(arg_1)\n if arg_1:\n arg_7 = \"%s\" % arg_0._bottom_counter\n arg_0._bottom_counter += 1\n for arg_8 in arg_4:\n arg_0.graph.connect(arg_8, arg_7)\n arg_0.tail = arg_7"} +{"_id": "doc_4314", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"visit an astroid.Module node to check too complex rating and\n add message if is greather than max_complexity stored from options\"\"\"\n arg_2 = PathGraphingAstVisitor()\n for arg_3 in arg_1.body:\n arg_2.preorder(arg_3, arg_2)\n for arg_4 in arg_2.graphs.values():\n arg_5 = arg_4.complexity()\n arg_1 = arg_4.root\n if hasattr(arg_1, \"name\"):\n arg_6 = \"'%s'\" % arg_1.name\n else:\n arg_6 = \"This '%s'\" % arg_1.__class__.__name__.lower()\n if arg_5 <= arg_0.config.max_complexity:\n continue\n arg_0.add_message(\n \"too-complex\", arg_1=arg_1, confidence=HIGH, args=(arg_6, arg_5)\n )"} +{"_id": "doc_4315", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"walk to the checker's dir and collect visit and leave methods\"\"\"\n # XXX : should be possible to merge needed_checkers and Func\n arg_2 = set()\n arg_3 = set()\n arg_4 = arg_0.visit_events\n arg_5 = arg_0.leave_events\n for arg_6 in dir(arg_1):\n arg_7 = arg_6[6:]\n if arg_7 == \"default\":\n continue\n if arg_6.startswith(\"visit_\"):\n arg_8 = getattr(arg_1, arg_6)\n # don't use visit_methods with no activated message:\n if arg_0._is_method_enabled(arg_8):\n arg_4[arg_7].append(arg_8)\n arg_2.add(arg_7)\n elif arg_6.startswith(\"leave_\"):\n arg_9 = getattr(arg_1, arg_6)\n # don't use leave_methods with no activated message:\n if arg_0._is_method_enabled(arg_9):\n arg_5[arg_7].append(arg_9)\n arg_3.add(arg_7)\n arg_10 = getattr(arg_1, \"visit_default\", None)\n if arg_10:\n for arg_11 in nodes.ALL_NODE_CLASSES:\n arg_7 = arg_11.__name__.lower()\n if arg_7 not in arg_2:\n arg_4[arg_7].append(arg_10)"} +{"_id": "doc_4316", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"call visit events of astroid checkers for the given node, recurse on\n its children, then leave events.\n \"\"\"\n arg_2 = arg_1.__class__.__name__.lower()\n\n # Detect if the node is a new name for a deprecated alias.\n # In this case, favour the methods for the deprecated\n # alias if any, in order to maintain backwards\n # compatibility.\n arg_3 = arg_0.visit_events.get(arg_2, ())\n arg_4 = arg_0.leave_events.get(arg_2, ())\n\n if arg_1.is_statement:\n arg_0.nbstatements += 1\n # generate events for this node on each checker\n for arg_5 in arg_3 or ():\n arg_5(arg_1)\n # recurse on children\n for arg_6 in arg_1.get_children():\n arg_0.Func(arg_6)\n for arg_5 in arg_4 or ():\n arg_5(arg_1)"} +{"_id": "doc_4317", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"create a relation ship\n \"\"\"\n arg_5 = Relationship(arg_1, arg_2, arg_3, arg_4)\n arg_0.relationships.setdefault(arg_3, []).append(arg_5)"} +{"_id": "doc_4318", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return a relation ship or None\n \"\"\"\n for arg_3 in arg_0.relationships.get(arg_2, ()):\n if arg_3.from_object is arg_1:\n return arg_3\n raise KeyError(arg_2)"} +{"_id": "doc_4319", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return visible attributes, possibly with class name\"\"\"\n arg_2 = []\n arg_3 = [\n (n, m)\n for n, m in arg_1.items()\n if isinstance(m, astroid.FunctionDef) and decorated_with_property(m)\n ]\n for arg_4, arg_5 in (\n list(arg_1.instance_attrs_type.items())\n + list(arg_1.locals_type.items())\n + arg_3\n ):\n if not arg_0.show_attr(arg_4):\n continue\n arg_6 = arg_0.class_names(arg_5)\n if arg_6:\n arg_4 = \"%s : %s\" % (arg_4, \", \".join(arg_6))\n arg_2.append(arg_4)\n return sorted(arg_2)"} +{"_id": "doc_4320", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return visible methods\"\"\"\n arg_2 = [\n m\n for m in arg_1.values()\n if isinstance(m, astroid.FunctionDef)\n and not decorated_with_property(m)\n and arg_0.show_attr(m.name)\n ]\n return sorted(arg_2, key=lambda n: n.name)"} +{"_id": "doc_4321", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"create a diagram object\n \"\"\"\n assert arg_2 not in arg_0._nodes\n arg_3 = DiagramEntity(arg_1, arg_2)\n arg_0._nodes[arg_2] = arg_3\n arg_0.objects.append(arg_3)"} +{"_id": "doc_4322", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return class names if needed in diagram\"\"\"\n arg_2 = []\n for arg_3 in arg_1:\n if isinstance(arg_3, astroid.Instance):\n arg_3 = arg_3._proxied\n if (\n isinstance(arg_3, astroid.ClassDef)\n and hasattr(arg_3, \"name\")\n and not arg_0.has_node(arg_3)\n ):\n if arg_3.name not in arg_2:\n arg_4 = arg_3.name\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4323", "title": "", "text": "def Func(arg_0):\n \"\"\"return all class nodes in the diagram\"\"\"\n return [arg_1 for arg_1 in arg_0.objects if isinstance(arg_1.node, astroid.ClassDef)]"} +{"_id": "doc_4324", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return a module by its name, looking also for relative imports;\n raise KeyError if not found\n \"\"\"\n for arg_3 in arg_0.modules():\n arg_4 = arg_3.node.name\n if arg_4 == arg_1:\n return arg_3\n # search for fullname of relative import modules\n arg_5 = arg_2.root().name\n if arg_4 == \"%s.%s\" % (arg_5, arg_1):\n return arg_3\n if arg_4 == \"%s.%s\" % (arg_5.rsplit(\".\", 1)[0], arg_1):\n return arg_3\n raise KeyError(arg_1)"} +{"_id": "doc_4325", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"add dependencies created by from-imports\n \"\"\"\n arg_3 = arg_1.root().name\n arg_4 = arg_0.module(arg_3)\n if arg_2 not in arg_4.node.depends:\n arg_4.node.depends.append(arg_2)"} +{"_id": "doc_4326", "title": "", "text": "def Func(\n arg_0, arg_1,\n arg_2='config.yaml', arg_3=None,\n):\n \"\"\"Deletes old deployed versions of the function in AWS Lambda.\n\n Won't delete $Latest and any aliased version\n\n :param str src:\n The path to your Lambda ready project (folder must contain a valid\n config.yaml and handler module (e.g.: service.py).\n :param int keep_last_versions:\n The number of recent versions to keep and not delete\n \"\"\"\n if arg_1 <= 0:\n print(\"Won't delete all versions. Please do this manually\")\n else:\n arg_4 = os.path.join(arg_0, arg_2)\n arg_5 = read_cfg(arg_4, arg_3)\n\n arg_3 = arg_5.get('profile')\n arg_6 = arg_5.get('aws_access_key_id')\n arg_7 = arg_5.get('aws_secret_access_key')\n\n arg_8 = get_client(\n 'lambda', arg_3, arg_6, arg_7,\n arg_5.get('region'),\n )\n\n arg_9 = arg_8.list_versions_by_function(\n FunctionName=arg_5.get('function_name'),\n )\n arg_10 = arg_9.get('Versions')\n if len(arg_9.get('Versions')) < arg_1:\n print('Nothing to delete. (Too few versions published)')\n else:\n arg_11 = [elem.get('Version') for elem in\n arg_10[1:-arg_1]]\n for arg_12 in arg_11:\n try:\n arg_8.delete_function(\n FunctionName=arg_5.get('function_name'),\n Qualifier=arg_12,\n )\n except botocore.exceptions.ClientError as e:\n print('Skipping Version {}: {}'\n .format(arg_12, e.message))"} +{"_id": "doc_4327", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None,\n arg_3='config.yaml', arg_4=None,\n arg_5=False\n):\n \"\"\"Deploys a new function to AWS Lambda.\n\n :param str src:\n The path to your Lambda ready project (folder must contain a valid\n config.yaml and handler module (e.g.: service.py).\n :param str local_package:\n The path to a local package with should be included in the Func as\n well (and/or is not available on PyPi)\n \"\"\"\n # Load and parse the config file.\n arg_6 = os.path.join(arg_0, arg_3)\n arg_7 = read_cfg(arg_6, arg_4)\n\n # Copy all the pip dependencies required to run your code into a temporary\n # folder then add the handler file in the root of this directory.\n # Zip the contents of this folder into a single file and output to the dist\n # directory.\n arg_8 = build(\n arg_0, arg_3=arg_3,\n arg_1=arg_1,\n arg_2=arg_2,\n )\n\n arg_9 = get_function_config(arg_7)\n if arg_9:\n update_function(arg_7, arg_8, arg_9, arg_5=arg_5)\n else:\n create_function(arg_7, arg_8)"} +{"_id": "doc_4328", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None,\n arg_3='config.yaml', arg_4=None,\n arg_5=False\n):\n \"\"\"Deploys a new function via AWS S3.\n\n :param str src:\n The path to your Lambda ready project (folder must contain a valid\n config.yaml and handler module (e.g.: service.py).\n :param str local_package:\n The path to a local package with should be included in the deploy as\n well (and/or is not available on PyPi)\n \"\"\"\n # Load and parse the config file.\n arg_6 = os.path.join(arg_0, arg_3)\n arg_7 = read_cfg(arg_6, arg_4)\n\n # Copy all the pip dependencies required to run your code into a temporary\n # folder then add the handler file in the root of this directory.\n # Zip the contents of this folder into a single file and output to the dist\n # directory.\n arg_8 = build(\n arg_0, arg_3=arg_3, arg_1=arg_1,\n arg_2=arg_2,\n )\n\n arg_9 = True\n arg_10 = upload_s3(arg_7, arg_8, arg_9)\n arg_11 = get_function_config(arg_7)\n if arg_11:\n update_function(arg_7, arg_8, arg_11, arg_9=arg_9,\n arg_10=arg_10, arg_5=arg_5)\n else:\n create_function(arg_7, arg_8, arg_9=arg_9, arg_10=arg_10)"} +{"_id": "doc_4329", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None,\n arg_3='config.yaml', arg_4=None,\n):\n \"\"\"Uploads a new function to AWS S3.\n\n :param str src:\n The path to your Lambda ready project (folder must contain a valid\n config.yaml and handler module (e.g.: service.py).\n :param str local_package:\n The path to a local package with should be included in the deploy as\n well (and/or is not available on PyPi)\n \"\"\"\n # Load and parse the config file.\n arg_5 = os.path.join(arg_0, arg_3)\n arg_6 = read_cfg(arg_5, arg_4)\n\n # Copy all the pip dependencies required to run your code into a temporary\n # folder then add the handler file in the root of this directory.\n # Zip the contents of this folder into a single file and output to the dist\n # directory.\n arg_7 = build(\n arg_0, arg_3=arg_3, arg_1=arg_1,\n arg_2=arg_2,\n )\n\n Func_s3(arg_6, arg_7)"} +{"_id": "doc_4330", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Copies template files to a given directory.\n\n :param str src:\n The path to output the template lambda project files.\n :param bool minimal:\n Minimal possible template files (excludes event.json).\n \"\"\"\n\n arg_2 = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'project_templates',\n )\n for arg_3 in os.listdir(arg_2):\n if (arg_1 and arg_3 == 'event.json') or arg_3.endswith('.pyc'):\n continue\n arg_4 = os.path.join(arg_2, arg_3)\n\n if not os.path.isdir(arg_4):\n copy(arg_4, arg_0)"} +{"_id": "doc_4331", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Tranlate a string of the form \"module.function\" into a callable\n function.\n\n :param str src:\n The path to your Lambda project containing a valid handler file.\n :param str handler:\n A dot delimited string representing the `.`.\n \"\"\"\n\n # \"cd\" into `src` directory.\n os.chdir(arg_0)\n\n arg_2, arg_3 = arg_1.split('.')\n arg_4 = get_handler_filename(arg_1)\n\n arg_5 = os.path.join(arg_0, arg_4)\n arg_6 = load_source(arg_2, arg_5)\n return getattr(arg_6, arg_3)"} +{"_id": "doc_4332", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Shortcut to insert the `account_id` and `role` into the iam string.\"\"\"\n arg_3 = ARN_PREFIXES.get(arg_0, 'aws')\n return 'arn:{0}:iam::{1}:role/{2}'.format(arg_3, arg_1, arg_2)"} +{"_id": "doc_4333", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Upload a function to AWS S3.\"\"\"\n\n print('Uploading your new Lambda function')\n arg_3 = arg_0.get('profile')\n arg_4 = arg_0.get('aws_access_key_id')\n arg_5 = arg_0.get('aws_secret_access_key')\n arg_6 = get_client(\n 's3', arg_3, arg_4, arg_5,\n arg_0.get('region'),\n )\n arg_7 = b''\n with open(arg_1, mode='rb') as fh:\n arg_7 = fh.read()\n arg_8 = arg_0.get('s3_key_prefix', '/dist')\n arg_9 = hashlib.new('md5', arg_7).hexdigest()\n arg_10 = str(time.time())\n arg_11 = '{prefix}{checksum}-{ts}.zip'.format(\n prefix=arg_8, arg_9=arg_9, ts=arg_10,\n )\n\n # Do we prefer development variable over config?\n arg_12 = (\n os.environ.get('S3_BUCKET_NAME') or arg_0.get('bucket_name')\n )\n arg_13 = (\n os.environ.get('LAMBDA_FUNCTION_NAME') or arg_0.get('function_name')\n )\n arg_14 = {\n 'Bucket': '{}'.format(arg_12),\n 'Key': '{}'.format(arg_11),\n 'Body': arg_7,\n }\n\n arg_6.put_object(**arg_14)\n print('Finished uploading {} to S3 bucket {}'.format(arg_13, arg_12))\n if arg_2:\n return arg_11"} +{"_id": "doc_4334", "title": "", "text": "def Func(arg_0, arg_1):\n\n \"\"\"Download the data at a URL, and cache it under the given name.\n\n The file is stored under `pyav/test` with the given name in the directory\n :envvar:`PYAV_TESTDATA_DIR`, or the first that is writeable of:\n\n - the current virtualenv\n - ``/usr/local/share``\n - ``/usr/local/lib``\n - ``/usr/share``\n - ``/usr/lib``\n - the user's home\n\n \"\"\"\n\n arg_2 = os.path.normpath(arg_1)\n if arg_2 != arg_1:\n raise ValueError(\"{} is not normalized.\".format(arg_1))\n\n for arg_3 in iter_data_dirs():\n arg_4 = os.path.join(arg_3, arg_1)\n if os.path.exists(arg_4):\n return arg_4\n\n arg_3 = next(iter_data_dirs(True))\n arg_4 = os.path.join(arg_3, arg_1)\n\n log.info(\"Downloading {} to {}\".format(arg_0, arg_4))\n\n arg_5 = urlopen(arg_0)\n if arg_5.getcode() != 200:\n raise ValueError(\"HTTP {}\".format(arg_5.getcode()))\n\n arg_3 = os.path.dirname(arg_4)\n try:\n os.makedirs(arg_3)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n arg_6 = arg_4 + '.tmp'\n with open(arg_6, 'wb') as fh:\n while True:\n arg_7 = arg_5.read(8196)\n if arg_7:\n fh.write(arg_7)\n else:\n break\n\n os.rename(arg_6, arg_4)\n\n return arg_4"} +{"_id": "doc_4335", "title": "", "text": "def Func(arg_0):\n \"\"\"Download and return a path to a sample from the FFmpeg test suite.\n\n Data is handled by :func:`cached_download`.\n\n See the `FFmpeg Automated Test Environment `_\n\n \"\"\"\n return cached_download('http://Func.ffmpeg.org/Func-suite/' + arg_0,\n os.path.join('Func-suite', arg_0.replace('/', os.path.sep)))"} +{"_id": "doc_4336", "title": "", "text": "def Func(arg_0):\n \"\"\"Get distutils-compatible extension extras for the given library.\n\n This requires ``pkg-config``.\n\n \"\"\"\n try:\n arg_1 = Popen(['pkg-config', '--cflags', '--libs', arg_0], stdout=PIPE, stderr=PIPE)\n except OSError:\n print('pkg-config is required for building PyAV')\n exit(1)\n\n arg_2, arg_3 = arg_1.communicate()\n if arg_1.wait():\n return\n\n arg_4, arg_5 = parse_cflags(arg_2.decode('utf8'))\n if arg_5:\n print(\"pkg-config returned flags we don't understand: {}\".format(arg_5))\n exit(1)\n\n return arg_4"} +{"_id": "doc_4337", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update the `dst` with the `src`, extending values where lists.\n\n Primiarily useful for integrating results from `get_library_config`.\n\n \"\"\"\n for arg_2, arg_3 in arg_1.items():\n arg_4 = arg_0.setdefault(arg_2, [])\n for arg_5 in arg_3:\n if arg_5 not in arg_4:\n arg_4.append(arg_5)"} +{"_id": "doc_4338", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Spawn a process, and eat the stdio.\"\"\"\n arg_2 = Popen(arg_0, stdout=PIPE, stderr=PIPE)\n arg_3, arg_4 = arg_2.communicate()\n if arg_2.returncode:\n raise DistutilsExecError(arg_4)"} +{"_id": "doc_4339", "title": "", "text": "def Func (arg_0, arg_1=True):\n \"\"\"\n filter the quoted text out of a message\n \"\"\"\n global DEBUG\n global PAT_FORWARD, PAT_REPLIED, PAT_UNSUBSC\n\n if arg_1:\n arg_0 = filter(lambda x: x in string.printable, arg_0)\n\n if DEBUG:\n print(\"text:\", arg_0)\n\n # strip off quoted text in a forward\n arg_2 = PAT_FORWARD.split(arg_0, re.M)\n\n if arg_2 and len(arg_2) > 1:\n arg_0 = arg_2[0]\n\n # strip off quoted text in a reply\n arg_2 = PAT_REPLIED.split(arg_0, re.M)\n\n if arg_2 and len(arg_2) > 1:\n arg_0 = arg_2[0]\n\n # strip off any trailing unsubscription notice\n arg_2 = PAT_UNSUBSC.split(arg_0, re.M)\n\n if arg_2:\n arg_0 = arg_2[0]\n\n # replace any remaining quoted text with blank lines\n arg_3 = []\n\n for arg_4 in arg_0.split(\"\\n\"):\n if arg_4.startswith(\">\"):\n arg_3.append(\"\")\n else:\n arg_3.append(arg_4)\n\n return list(split_grafs(arg_3))"} +{"_id": "doc_4340", "title": "", "text": "def Func (arg_0):\n \"\"\"\n parse one document to prep for TextRank\n \"\"\"\n global DEBUG\n\n for arg_1 in arg_0:\n arg_2 = 0\n\n for arg_3 in filter_quotes(arg_1[\"text\"], is_email=False):\n if DEBUG:\n print(\"graf_text:\", arg_3)\n\n arg_4, arg_5 = parse_graf(arg_1[\"id\"], arg_3, arg_2)\n arg_2 = arg_5\n\n for arg_6 in arg_4:\n yield arg_6"} +{"_id": "doc_4341", "title": "", "text": "def Func (arg_0):\n \"\"\"\n construct the TextRank graph from parsed paragraphs\n \"\"\"\n global DEBUG, WordNode\n arg_1 = nx.DiGraph()\n\n for arg_2 in arg_0:\n if DEBUG:\n print(arg_2[\"graf\"])\n\n for arg_3 in get_tiles(map(WordNode._make, arg_2[\"graf\"])):\n if DEBUG:\n print(arg_3)\n\n for arg_4 in arg_3:\n if not arg_1.has_node(arg_4):\n arg_1.add_node(arg_4)\n\n try:\n arg_1.edge[arg_3[0]][arg_3[1]][\"weight\"] += 1.0\n except KeyError:\n arg_1.add_edge(arg_3[0], arg_3[1], weight=1.0)\n\n return arg_1"} +{"_id": "doc_4342", "title": "", "text": "def Func (arg_0, arg_1, arg_2=\"graph.dot\"):\n \"\"\"\n output the graph in Dot file format\n \"\"\"\n arg_3 = Digraph()\n\n for arg_4 in arg_0.nodes():\n arg_3.node(arg_4, \"%s %0.3f\" % (arg_4, arg_1[arg_4]))\n\n for arg_5 in arg_0.edges():\n arg_3.edge(arg_5[0], arg_5[1], constraint=\"false\")\n\n with open(arg_2, 'w') as f:\n f.write(arg_3.source)"} +{"_id": "doc_4343", "title": "", "text": "def Func (arg_0, arg_1, arg_2=\"graph.dot\"):\n \"\"\"\n render the TextRank graph for visual formats\n \"\"\"\n if arg_2:\n write_dot(arg_0, arg_1, path=arg_2)"} +{"_id": "doc_4344", "title": "", "text": "def Func (arg_0):\n \"\"\"\n run the TextRank algorithm\n \"\"\"\n arg_1 = build_graph(json_iter(arg_0))\n arg_2 = nx.pagerank(arg_1)\n\n return arg_1, arg_2"} +{"_id": "doc_4345", "title": "", "text": "def Func (arg_0, arg_1):\n \"\"\"\n leverage noun phrase chunking\n \"\"\"\n for arg_2 in iter(range(0, len(arg_0))):\n arg_3 = Func_sub(arg_0, arg_1, arg_2)\n\n if arg_3:\n return arg_3"} +{"_id": "doc_4346", "title": "", "text": "def Func (arg_0, arg_1):\n \"\"\"\n iterate through the noun phrases\n \"\"\"\n if (len(arg_0) > 1):\n arg_2 = False\n arg_3 = \" \".join([arg_6.text for arg_6 in arg_0])\n arg_4 = arg_1(arg_3.strip(), parse=True)\n\n for arg_5 in arg_4.noun_chunks:\n if arg_5.text != arg_3:\n arg_2 = True\n yield arg_5.text, find_chunk(arg_0, arg_5.text.split(\" \"))\n\n if not arg_2 and all([arg_6.pos[0] != \"v\" for arg_6 in arg_0]):\n yield arg_3, arg_0"} +{"_id": "doc_4347", "title": "", "text": "def Func (arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n iterator for collecting the named-entities\n \"\"\"\n global DEBUG\n arg_4 = \" \".join([w.raw for w in arg_0])\n\n if DEBUG:\n print(\"sent:\", arg_4)\n\n for arg_5 in arg_3(arg_4).ents:\n if DEBUG:\n print(\"NER:\", arg_5.label_, arg_5.text)\n\n if (arg_5.label_ not in [\"CARDINAL\"]) and (arg_5.text.lower() not in arg_2):\n arg_6, arg_7 = find_entity(arg_0, arg_1, arg_5.text.split(\" \"), 0)\n\n if arg_6 and arg_7:\n arg_8 = RankedLexeme(text=arg_5.text.lower(), rank=arg_6, ids=arg_7, pos=\"np\", count=1)\n\n if DEBUG:\n print(arg_8)\n\n yield arg_8"} +{"_id": "doc_4348", "title": "", "text": "def Func (arg_0):\n \"\"\"\n create a MinHash digest\n \"\"\"\n arg_1 = 512\n arg_2 = MinHash(arg_1)\n\n for arg_3 in arg_0:\n arg_2.update(arg_3.encode('utf8'))\n\n return arg_2"} +{"_id": "doc_4349", "title": "", "text": "def Func (arg_0, arg_1):\n \"\"\"\n determine distance for each sentence\n \"\"\"\n arg_2 = {}\n arg_3 = 0\n\n if isinstance(arg_1, str):\n arg_1 = json_iter(arg_1)\n\n for arg_4 in arg_1:\n arg_5 = arg_4[\"graf\"]\n arg_6 = [WordNode._make(x) for x in arg_5]\n arg_7 = \" \".join([w.raw for w in arg_6])\n\n arg_8 = mh_digest([str(w.word_id) for w in arg_6])\n arg_9 = sum([arg_8.jaccard(m) * rl.rank for rl, m in arg_0])\n arg_2[arg_7] = (arg_9, arg_3)\n arg_3 += 1\n\n for arg_7, (arg_9, arg_3) in sorted(arg_2.items(), key=lambda x: x[1][0], reverse=True):\n yield SummarySent(arg_9=arg_9, idx=arg_3, arg_7=arg_7)"} +{"_id": "doc_4350", "title": "", "text": "def Func (arg_0, arg_1=100):\n \"\"\"\n iterator for the most significant sentences, up to a specified limit\n \"\"\"\n arg_2 = 0\n\n if isinstance(arg_0, str):\n arg_0 = json_iter(arg_0)\n\n for arg_3 in arg_0:\n if not isinstance(arg_3, SummarySent):\n arg_4 = SummarySent(**arg_3)\n else:\n arg_4 = arg_3\n\n arg_5 = arg_4.text.strip().split(\" \")\n arg_6 = len(arg_5)\n\n if (arg_2 + arg_6) > arg_1:\n break\n else:\n arg_2 += arg_6\n yield arg_5, arg_4.idx"} +{"_id": "doc_4351", "title": "", "text": "def Func (arg_0, arg_1=False):\n \"\"\"\n pretty print a JSON object\n \"\"\"\n\n if arg_1:\n return json.dumps(arg_0, sort_keys=True, arg_1=2, separators=(',', ': '))\n else:\n return json.dumps(arg_0, sort_keys=True)"} +{"_id": "doc_4352", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Fetch data about tag\n \"\"\"\n arg_1 = arg_0.get_data(\"tags/%s\" % arg_0.name)\n arg_2 = arg_1['tag']\n\n for arg_3 in arg_2.keys():\n setattr(arg_0, arg_3, arg_2[arg_3])\n\n return arg_0"} +{"_id": "doc_4353", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Create the tag.\n \"\"\"\n for arg_2 in arg_1.keys():\n setattr(arg_0, arg_2, arg_1[arg_2])\n\n arg_3 = {\"name\": arg_0.name}\n\n arg_4 = arg_0.get_data(\"tags\", type=\"POST\", arg_3=arg_3)\n if arg_4:\n arg_0.name = arg_4['tag']['name']\n arg_0.resources = arg_4['tag']['resources']"} +{"_id": "doc_4354", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Private method to extract from a value, the resources.\n It will check the type of object in the array provided and build\n the right structure for the API.\n \"\"\"\n arg_2 = []\n if not isinstance(arg_1, list): return arg_1\n for arg_3 in arg_1:\n arg_4 = {}\n\n try:\n if isinstance(arg_3, unicode):\n arg_4 = {\"resource_id\": arg_3, \"resource_type\": \"droplet\"}\n except NameError:\n pass\n\n if isinstance(arg_3, str) or isinstance(arg_3, int):\n arg_4 = {\"resource_id\": str(arg_3), \"resource_type\": \"droplet\"}\n elif isinstance(arg_3, Droplet):\n arg_4 = {\"resource_id\": str(arg_3.id), \"resource_type\": \"droplet\"}\n\n if len(arg_4) > 0:\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_4355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add the Tag to a Droplet.\n\n Attributes accepted at creation time:\n droplet: array of string or array of int, or array of Droplets.\n \"\"\"\n arg_2 = arg_1\n if not isinstance(arg_2, list):\n arg_2 = [arg_1]\n\n # Extracting data from the Droplet object\n arg_3 = arg_0.__extract_resources_from_droplets(arg_2)\n if len(arg_3) > 0:\n return arg_0.__add_resources(arg_3)\n\n return False"} +{"_id": "doc_4356", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove the Tag from the Droplet.\n\n Attributes accepted at creation time:\n droplet: array of string or array of int, or array of Droplets.\n \"\"\"\n arg_2 = arg_1\n if not isinstance(arg_2, list):\n arg_2 = [arg_1]\n\n # Extracting data from the Droplet object\n arg_3 = arg_0.__extract_resources_from_droplets(arg_2)\n if len(arg_3) > 0:\n return arg_0.__remove_resources(arg_3)\n\n return False"} +{"_id": "doc_4357", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return a Action object by ID.\n \"\"\"\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load_directly()\n return arg_3"} +{"_id": "doc_4358", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"\n Wait until the action is marked as completed or with an error.\n It will return True in case of success, otherwise False.\n\n Optional Args:\n update_every_seconds - int : number of seconds to Func before\n checking if the action is completed.\n \"\"\"\n while arg_0.status == u'in-progress':\n sleep(arg_1)\n arg_0.load()\n\n return arg_0.status == u'completed'"} +{"_id": "doc_4359", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Class method that will return a Droplet object by ID.\n\n Args:\n api_token (str): token\n droplet_id (int): droplet id\n \"\"\"\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4360", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=False):\n \"\"\"Take a snapshot!\n\n Args:\n snapshot_name (str): name of snapshot\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n power_off (bool): Before taking the snapshot the droplet will be\n turned off with another API call. It will wait until the\n droplet will be powered off.\n\n Returns dict or Action\n \"\"\"\n if arg_3 is True and arg_0.status != \"off\":\n arg_4 = arg_0.power_off(arg_2=False)\n arg_4.wait()\n arg_0.load()\n\n return arg_0._perform_action(\n {\"type\": \"snapshot\", \"name\": arg_1},\n arg_2\n )"} +{"_id": "doc_4361", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Change the kernel to a new one\n\n Args:\n kernel : instance of digitalocean.Kernel.Kernel\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\n Returns dict or Action\n \"\"\"\n if type(arg_1) != Kernel:\n raise BadKernelObject(\"Use Kernel object\")\n\n return arg_0._perform_action(\n {'type': 'Func', 'kernel': arg_1.id},\n arg_2\n )"} +{"_id": "doc_4362", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check and return a list of SSH key IDs or fingerprints according\n to DigitalOcean's API. This method is used to check and create a\n droplet with the correct SSH keys.\n \"\"\"\n arg_3 = list()\n for arg_4 in arg_0:\n if type(arg_4) in [int, type(2 ** 64)]:\n arg_3.append(int(arg_4))\n\n elif type(arg_4) == SSHKey:\n arg_3.append(arg_4.id)\n\n elif type(arg_4) in [type(u''), type('')]:\n # ssh_key could either be a fingerprint or a public key\n #\n # type(u'') and type('') is the same in python 3 but\n # different in 2. See:\n # https://github.com/koalalorenzo/python-digitalocean/issues/80\n arg_5 = '([0-9a-fA-F]{2}:){15}[0-9a-fA-F]'\n arg_6 = re.match(arg_5, arg_4)\n\n if arg_6 is not None and arg_6.end() == len(arg_4) - 1:\n arg_3.append(arg_4)\n\n else:\n arg_7 = SSHKey()\n arg_7.token = arg_1\n arg_8 = arg_7.load_by_pub_key(arg_4)\n\n if arg_8 is None:\n arg_7.public_key = arg_4\n arg_7.name = \"SSH Key %s\" % arg_2\n arg_7.create()\n else:\n arg_7 = arg_8\n\n arg_3.append(arg_7.id)\n else:\n raise BadSSHKeyFormat(\n \"Droplet.ssh_keys should be a list of IDs, public keys\"\n + \" or fingerprints.\"\n )\n\n return arg_3"} +{"_id": "doc_4363", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a list of Action objects\n This actions can be used to check the droplet's status\n \"\"\"\n arg_1 = arg_0.get_data(\"droplets/%s/actions/\" % arg_0.id, type=GET)\n\n arg_2 = []\n for arg_3 in arg_1['actions']:\n arg_4 = Action(**arg_3)\n arg_4.token = arg_0.token\n arg_4.droplet_id = arg_0.id\n arg_4.load()\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4364", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a specific Action by its ID.\n\n Args:\n action_id (int): id of action\n \"\"\"\n return Action.get_object(\n api_token=arg_0.token,\n arg_1=arg_1\n )"} +{"_id": "doc_4365", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns a list of Record objects\n \"\"\"\n if arg_1 is None:\n arg_1 = {}\n \n # URL https://api.digitalocean.com/v2/domains/[NAME]/records/\n arg_2 = []\n arg_3 = arg_0.get_data(\"domains/%s/records/\" % arg_0.name, type=GET, arg_1=arg_1)\n\n for arg_4 in arg_3['domain_records']:\n\n arg_5 = Record(domain_name=arg_0.name, **arg_4)\n arg_5.token = arg_0.token\n arg_2.append(arg_5)\n\n return arg_2"} +{"_id": "doc_4366", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load the FloatingIP object from DigitalOcean.\n\n Requires self.ip to be set.\n \"\"\"\n arg_1 = arg_0.get_data('floating_ips/%s' % arg_0.ip, type=GET)\n arg_2 = arg_1['floating_ip']\n\n # Setting the attribute values\n for arg_3 in arg_2.keys():\n setattr(arg_0, arg_3, arg_2[arg_3])\n\n return arg_0"} +{"_id": "doc_4367", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Creates a FloatingIP and assigns it to a Droplet.\n\n Note: Every argument and parameter given to this method will be\n assigned to the object.\n\n Args:\n droplet_id: int - droplet id\n \"\"\"\n arg_3 = arg_0.get_data('floating_ips/',\n type=POST,\n params={'droplet_id': arg_0.droplet_id})\n\n if arg_3:\n arg_0.ip = arg_3['floating_ip']['ip']\n arg_0.region = arg_3['floating_ip']['region']\n\n return arg_0"} +{"_id": "doc_4368", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Assign a FloatingIP to a Droplet.\n\n Args:\n droplet_id: int - droplet id\n \"\"\"\n return arg_0.get_data(\n \"floating_ips/%s/actions/\" % arg_0.ip,\n type=POST,\n params={\"type\": \"Func\", \"droplet_id\": arg_1}\n )"} +{"_id": "doc_4369", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add tags to this Firewall.\n \"\"\"\n return arg_0.get_data(\n \"firewalls/%s/tags\" % arg_0.id,\n type=POST,\n params={\"tags\": arg_1}\n )"} +{"_id": "doc_4370", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove tags from this Firewall.\n \"\"\"\n return arg_0.get_data(\n \"firewalls/%s/tags\" % arg_0.id,\n type=DELETE,\n params={\"tags\": arg_1}\n )"} +{"_id": "doc_4371", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return a SSHKey object by ID.\n \"\"\"\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4372", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load the SSHKey object from DigitalOcean.\n\n Requires either self.id or self.fingerprint to be set.\n \"\"\"\n arg_1 = None\n if arg_0.id:\n arg_1 = arg_0.id\n elif arg_0.fingerprint is not None:\n arg_1 = arg_0.fingerprint\n\n arg_2 = arg_0.get_data(\"account/keys/%s\" % arg_1, type=GET)\n\n arg_3 = arg_2['ssh_key']\n\n # Setting the attribute values\n for arg_4 in arg_3.keys():\n setattr(arg_0, arg_4, arg_3[arg_4])\n arg_0.id = arg_3['id']"} +{"_id": "doc_4373", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This method will load a SSHKey object from DigitalOcean\n from a public_key. This method will avoid problems like\n uploading the same public_key twice.\n \"\"\"\n\n arg_2 = arg_0.get_data(\"account/keys/\")\n for arg_3 in arg_2['ssh_keys']:\n if arg_3.get('public_key', \"\") == arg_1:\n arg_0.id = arg_3['id']\n arg_0.load()\n return arg_0\n return None"} +{"_id": "doc_4374", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function returns a list of Region object.\n \"\"\"\n arg_1 = arg_0.get_data(\"regions/\")\n arg_2 = list()\n for arg_3 in arg_1['regions']:\n arg_4 = Region(**arg_3)\n arg_4.token = arg_0.token\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4375", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n This function returns a list of Droplet object.\n \"\"\"\n arg_2 = dict()\n if arg_1:\n arg_2[\"tag_name\"] = arg_1\n\n arg_3 = arg_0.get_data(\"droplets/\", arg_2=arg_2)\n\n arg_4 = list()\n for arg_5 in arg_3['droplets']:\n arg_6 = Droplet(**arg_5)\n arg_6.token = arg_0.token\n\n for arg_8 in arg_6.networks['v4']:\n if arg_8['type'] == 'private':\n arg_6.private_ip_address = arg_8['ip_address']\n if arg_8['type'] == 'public':\n arg_6.ip_address = arg_8['ip_address']\n if arg_6.networks['v6']:\n arg_6.ip_v6_address = arg_6.networks['v6'][0]['ip_address']\n\n if \"backups\" in arg_6.features:\n arg_6.backups = True\n else:\n arg_6.backups = False\n if \"ipv6\" in arg_6.features:\n arg_6.ipv6 = True\n else:\n arg_6.ipv6 = False\n if \"private_networking\" in arg_6.features:\n arg_6.private_networking = True\n else:\n arg_6.private_networking = False\n\n arg_4.append(arg_6)\n\n return arg_4"} +{"_id": "doc_4376", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function returns a list of SSHKey object.\n \"\"\"\n arg_1 = arg_0.get_data(\"account/keys/\")\n arg_2 = list()\n for arg_3 in arg_1['ssh_keys']:\n arg_4 = SSHKey(**arg_3)\n arg_4.token = arg_0.token\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4377", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a SSHKey object by its ID.\n \"\"\"\n return SSHKey.get_object(api_token=arg_0.token, arg_1=arg_1)"} +{"_id": "doc_4378", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This method returns a list of all tags.\n \"\"\"\n arg_1 = arg_0.get_data(\"tags\")\n return [\n Tag(token=arg_0.token, **arg_2) for arg_2 in arg_1['tags']\n ]"} +{"_id": "doc_4379", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function returns a list of FloatingIP objects.\n \"\"\"\n arg_1 = arg_0.get_data(\"floating_ips\")\n arg_2 = list()\n for arg_3 in arg_1['floating_ips']:\n arg_4 = FloatingIP(**arg_3)\n arg_4.token = arg_0.token\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4380", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a list of Load Balancer objects.\n \"\"\"\n arg_1 = arg_0.get_data(\"load_balancers\")\n\n arg_2 = list()\n for arg_3 in arg_1['load_balancers']:\n arg_4 = LoadBalancer(**arg_3)\n arg_4.token = arg_0.token\n arg_4.health_check = HealthCheck(**arg_3['health_check'])\n arg_4.sticky_sessions = StickySesions(**arg_3['sticky_sessions'])\n arg_8 = list()\n for arg_9 in arg_3['forwarding_rules']:\n arg_8.append(ForwardingRule(**arg_9))\n arg_4.forwarding_rules = arg_8\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_4381", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a Load Balancer object by its ID.\n\n Args:\n id (str): Load Balancer ID\n \"\"\"\n return LoadBalancer.get_object(api_token=arg_0.token, arg_1=arg_1)"} +{"_id": "doc_4382", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function returns a list of Certificate objects.\n \"\"\"\n arg_1 = arg_0.get_data(\"certificates\")\n arg_2 = list()\n for arg_3 in arg_1['certificates']:\n arg_4 = Certificate(**arg_3)\n arg_4.token = arg_0.token\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_4383", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This method returns a list of all Snapshots.\n \"\"\"\n arg_1 = arg_0.get_data(\"snapshots/\")\n return [\n Snapshot(token=arg_0.token, **arg_2)\n for arg_2 in arg_1['snapshots']\n ]"} +{"_id": "doc_4384", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This method returns a list of all Snapshots based on Droplets.\n \"\"\"\n arg_1 = arg_0.get_data(\"snapshots?resource_type=droplet\")\n return [\n Snapshot(token=arg_0.token, **arg_2)\n for arg_2 in arg_1['snapshots']\n ]"} +{"_id": "doc_4385", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This method returns a list of all Snapshots based on volumes.\n \"\"\"\n arg_1 = arg_0.get_data(\"snapshots?resource_type=volume\")\n return [\n Snapshot(token=arg_0.token, **arg_2)\n for arg_2 in arg_1['snapshots']\n ]"} +{"_id": "doc_4386", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n This function returns a list of Volume objects.\n \"\"\"\n if arg_1:\n arg_2 = \"volumes?region={}\".format(arg_1)\n else:\n arg_2 = \"volumes\"\n arg_3 = arg_0.get_data(arg_2)\n arg_4 = list()\n for arg_5 in arg_3['volumes']:\n arg_6 = Volume(**arg_5)\n arg_6.token = arg_0.token\n arg_4.append(arg_6)\n return arg_4"} +{"_id": "doc_4387", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a Volume object by its ID.\n \"\"\"\n return Volume.get_object(api_token=arg_0.token, arg_1=arg_1)"} +{"_id": "doc_4388", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a Firewall by its ID.\n \"\"\"\n return Firewall.get_object(\n api_token=arg_0.token,\n arg_1=arg_1,\n )"} +{"_id": "doc_4389", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return a LoadBalancer object by its ID.\n\n Args:\n api_token (str): DigitalOcean API token\n id (str): Load Balancer ID\n \"\"\"\n arg_3 = arg_0(token=arg_1, arg_2=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4390", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Loads updated attributues for a LoadBalancer object.\n\n Requires self.id to be set.\n \"\"\"\n arg_1 = arg_0.get_data('Func_balancers/%s' % arg_0.id, type=GET)\n arg_2 = arg_1['Func_balancer']\n\n # Setting the attribute values\n for arg_3 in arg_2.keys():\n if arg_3 == 'health_check':\n arg_4 = HealthCheck(**arg_2['health_check'])\n setattr(arg_0, arg_3, arg_4)\n elif arg_3 == 'sticky_sessions':\n arg_5 = StickySesions(**arg_2['sticky_sessions'])\n setattr(arg_0, arg_3, arg_5)\n elif arg_3 == 'forwarding_rules':\n arg_6 = list()\n for arg_7 in arg_2['forwarding_rules']:\n arg_6.append(ForwardingRule(**arg_7))\n setattr(arg_0, arg_3, arg_6)\n else:\n setattr(arg_0, arg_3, arg_2[arg_3])\n\n return arg_0"} +{"_id": "doc_4391", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Creates a new LoadBalancer.\n\n Note: Every argument and parameter given to this method will be\n assigned to the object.\n\n Args:\n name (str): The Load Balancer's name\n region (str): The slug identifier for a DigitalOcean region\n algorithm (str, optional): The load balancing algorithm to be\n used. Currently, it must be either \"round_robin\" or\n \"least_connections\"\n forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects\n health_check (obj, optional): A `HealthCheck` object\n sticky_sessions (obj, optional): A `StickySessions` object\n redirect_http_to_https (bool, optional): A boolean indicating\n whether HTTP requests to the Load Balancer should be\n redirected to HTTPS\n droplet_ids (obj:`list` of `int`): A list of IDs representing\n Droplets to be added to the Load Balancer (mutually\n exclusive with 'tag')\n tag (str): A string representing a DigitalOcean Droplet tag\n (mutually exclusive with 'droplet_ids')\n \"\"\"\n arg_3 = [rule.__dict__ for rule in arg_0.forwarding_rules]\n\n arg_4 = {'name': arg_0.name, 'region': arg_0.region,\n 'forwarding_rules': arg_3,\n 'redirect_http_to_https': arg_0.redirect_http_to_https}\n\n if arg_0.droplet_ids and arg_0.tag:\n raise ValueError('droplet_ids and tag are mutually exclusive args')\n elif arg_0.tag:\n arg_4['tag'] = arg_0.tag\n else:\n arg_4['droplet_ids'] = arg_0.droplet_ids\n\n if arg_0.algorithm:\n arg_4['algorithm'] = arg_0.algorithm\n if arg_0.health_check:\n arg_4['health_check'] = arg_0.health_check.__dict__\n if arg_0.sticky_sessions:\n arg_4['sticky_sessions'] = arg_0.sticky_sessions.__dict__\n\n arg_5 = arg_0.get_data('load_balancers/', type=POST, arg_4=arg_4)\n\n if arg_5:\n arg_0.id = arg_5['load_balancer']['id']\n arg_0.ip = arg_5['load_balancer']['ip']\n arg_0.algorithm = arg_5['load_balancer']['algorithm']\n arg_0.health_check = HealthCheck(\n **arg_5['load_balancer']['health_check'])\n arg_0.sticky_sessions = StickySesions(\n **arg_5['load_balancer']['sticky_sessions'])\n arg_0.droplet_ids = arg_5['load_balancer']['droplet_ids']\n arg_0.status = arg_5['load_balancer']['status']\n arg_0.Funcd_at = arg_5['load_balancer']['Funcd_at']\n\n return arg_0"} +{"_id": "doc_4392", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Save the LoadBalancer\n \"\"\"\n arg_1 = [rule.__dict__ for rule in arg_0.forwarding_rules]\n\n arg_2 = {\n 'name': arg_0.name,\n 'region': arg_0.region['slug'],\n 'forwarding_rules': arg_1,\n 'redirect_http_to_https': arg_0.redirect_http_to_https\n }\n\n if arg_0.tag:\n arg_2['tag'] = arg_0.tag\n else:\n arg_2['droplet_ids'] = arg_0.droplet_ids\n\n if arg_0.algorithm:\n arg_2[\"algorithm\"] = arg_0.algorithm\n if arg_0.health_check:\n arg_2['health_check'] = arg_0.health_check.__dict__\n if arg_0.sticky_sessions:\n arg_2['sticky_sessions'] = arg_0.sticky_sessions.__dict__\n\n return arg_0.get_data(\"load_balancers/%s/\" % arg_0.id,\n type=PUT,\n params=arg_2)"} +{"_id": "doc_4393", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Assign a LoadBalancer to a Droplet.\n\n Args:\n droplet_ids (obj:`list` of `int`): A list of Droplet IDs\n \"\"\"\n return arg_0.get_data(\n \"load_balancers/%s/droplets/\" % arg_0.id,\n type=POST,\n params={\"droplet_ids\": arg_1}\n )"} +{"_id": "doc_4394", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Unassign a LoadBalancer.\n\n Args:\n droplet_ids (obj:`list` of `int`): A list of Droplet IDs\n \"\"\"\n return arg_0.get_data(\n \"load_balancers/%s/droplets/\" % arg_0.id,\n type=DELETE,\n params={\"droplet_ids\": arg_1}\n )"} +{"_id": "doc_4395", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Removes existing forwarding rules from a LoadBalancer.\n\n Args:\n forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects\n \"\"\"\n arg_2 = [rule.__dict__ for rule in arg_1]\n\n return arg_0.get_data(\n \"load_balancers/%s/forwarding_rules/\" % arg_0.id,\n type=DELETE,\n params={\"forwarding_rules\": arg_2}\n )"} +{"_id": "doc_4396", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a new record for a domain.\n\n Args:\n type (str): The type of the DNS record (e.g. A, CNAME, TXT).\n name (str): The host name, alias, or service being defined by the\n record.\n data (int): Variable data depending on record type.\n priority (int): The priority for SRV and MX records.\n port (int): The port for SRV records.\n ttl (int): The time to live for the record, in seconds.\n weight (int): The weight for SRV records.\n flags (int): An unsigned integer between 0-255 used for CAA records.\n tags (string): The parameter tag for CAA records. Valid values are\n \"issue\", \"wildissue\", or \"iodef\"\n \"\"\"\n arg_1 = {\n \"type\": arg_0.type,\n \"data\": arg_0.data,\n \"name\": arg_0.name,\n \"priority\": arg_0.priority,\n \"port\": arg_0.port,\n \"ttl\": arg_0.ttl,\n \"weight\": arg_0.weight,\n \"flags\": arg_0.flags,\n \"tags\": arg_0.tags\n }\n\n arg_2 = arg_0.get_data(\n \"domains/%s/records\" % (arg_0.domain),\n type=POST,\n params=arg_1,\n )\n\n if arg_2:\n arg_0.id = arg_2['domain_record']['id']"} +{"_id": "doc_4397", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Save existing record\n \"\"\"\n arg_1 = {\n \"type\": arg_0.type,\n \"data\": arg_0.data,\n \"name\": arg_0.name,\n \"priority\": arg_0.priority,\n \"port\": arg_0.port,\n \"ttl\": arg_0.ttl,\n \"weight\": arg_0.weight,\n \"flags\": arg_0.flags,\n \"tags\": arg_0.tags\n }\n return arg_0.get_data(\n \"domains/%s/records/%s\" % (arg_0.domain, arg_0.id),\n type=PUT,\n params=arg_1\n )"} +{"_id": "doc_4398", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Checks if any timeout for the requests to DigitalOcean is required.\n To set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment\n variable.\n \"\"\"\n arg_1 = os.environ.get(REQUEST_TIMEOUT_ENV_VAR)\n if arg_1:\n try:\n return float(arg_1)\n except:\n arg_0._log.error('Failed parsing the request read timeout of '\n '\"%s\". Please use a valid float number!' %\n arg_1)\n return None"} +{"_id": "doc_4399", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return an Volume object by ID.\n \"\"\"\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4400", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Creates a Block Storage volume\n\n Note: Every argument and parameter given to this method will be\n assigned to the object.\n\n Args:\n name: string - a name for the volume\n snapshot_id: string - unique identifier for the volume snapshot\n size_gigabytes: int - size of the Block Storage volume in GiB\n filesystem_type: string, optional - name of the filesystem type the\n volume will be formated with ('ext4' or 'xfs')\n filesystem_label: string, optional - the label to be applied to the\n filesystem, only used in conjunction with filesystem_type\n\n Optional Args:\n description: string - text field to describe a volume\n \"\"\"\n arg_3 = arg_0.get_data('volumes/',\n type=POST,\n params={'name': arg_0.name,\n 'snapshot_id': arg_0.snapshot_id,\n 'region': arg_0.region,\n 'size_gigabytes': arg_0.size_gigabytes,\n 'description': arg_0.description,\n 'filesystem_type': arg_0.filesystem_type,\n 'filesystem_label': arg_0.filesystem_label\n })\n\n if arg_3:\n arg_0.id = arg_3['volume']['id']\n arg_0.created_at = arg_3['volume']['created_at']\n\n return arg_0"} +{"_id": "doc_4401", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Attach a Volume to a Droplet.\n\n Args:\n droplet_id: int - droplet id\n region: string - slug identifier for the region\n \"\"\"\n return arg_0.get_data(\n \"volumes/%s/actions/\" % arg_0.id,\n type=POST,\n params={\"type\": \"Func\",\n \"droplet_id\": arg_1,\n \"region\": arg_2}\n )"} +{"_id": "doc_4402", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Detach a Volume to a Droplet.\n\n Args:\n size_gigabytes: int - size of the Block Storage volume in GiB\n region: string - slug identifier for the region\n \"\"\"\n return arg_0.get_data(\n \"volumes/%s/actions/\" % arg_0.id,\n type=POST,\n params={\"type\": \"Func\",\n \"size_gigabytes\": arg_1,\n \"region\": arg_2}\n )"} +{"_id": "doc_4403", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the list of snapshots that have been created from a volume.\n\n Args:\n \"\"\"\n arg_1 = arg_0.get_data(\"volumes/%s/snapshots/\" % arg_0.id)\n arg_2 = list()\n for arg_3 in arg_1[u'snapshots']:\n arg_4 = Snapshot(**arg_3)\n arg_4.token = arg_0.token\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_4404", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return a Certificate object by its ID.\n \"\"\"\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4405", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Class method that will return an Image object by ID or slug.\n\n This method is used to validate the type of the image. If it is a\n number, it will be considered as an Image ID, instead if it is a\n string, it will considered as slug.\n \"\"\"\n if arg_0._is_string(arg_2):\n arg_3 = arg_0(token=arg_1, slug=arg_2)\n arg_3.load(use_slug=True)\n else:\n arg_3 = arg_0(token=arg_1, id=arg_2)\n arg_3.load()\n return arg_3"} +{"_id": "doc_4406", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a new custom DigitalOcean Image from the Linux virtual machine\n image located at the provided `url`.\n \"\"\"\n arg_1 = {'name': arg_0.name,\n 'region': arg_0.region,\n 'url': arg_0.url,\n 'distribution': arg_0.distribution,\n 'description': arg_0.description,\n 'tags': arg_0.tags}\n\n arg_2 = arg_0.get_data('images', type=POST, arg_1=arg_1)\n\n if arg_2:\n for arg_3 in arg_2['image'].keys():\n setattr(arg_0, arg_3, arg_2['image'][arg_3])\n\n return arg_0"} +{"_id": "doc_4407", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Load slug.\n\n Loads by id, or by slug if id is not present or use slug is True.\n \"\"\"\n arg_2 = None\n if arg_1 or not arg_0.id:\n arg_2 = arg_0.slug\n else:\n arg_2 = arg_0.id\n if not arg_2:\n raise NotFoundError(\"One of self.id or self.slug must be set.\")\n arg_3 = arg_0.get_data(\"images/%s\" % arg_2)\n arg_4 = arg_3['image']\n\n # Setting the attribute values\n for arg_5 in arg_4.keys():\n setattr(arg_0, arg_5, arg_4[arg_5])\n\n return arg_0"} +{"_id": "doc_4408", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Rename an image\n \"\"\"\n return arg_0.get_data(\n \"images/%s\" % arg_0.id,\n type=PUT,\n params={\"name\": arg_1}\n )"} +{"_id": "doc_4409", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert reduce_sum layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting reduce_sum ...')\n\n arg_7 = arg_0['keepdims'] > 0\n arg_8 = arg_0['axes']\n\n def target_layer(arg_9, arg_7=arg_7, arg_8=arg_8):\n import keras.backend as K\n return K.sum(arg_9, arg_7=arg_7, arg_8=arg_8)\n\n arg_10 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_10(arg_4[arg_3[0]])"} +{"_id": "doc_4410", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert slice operation.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting slice ...')\n\n if len(arg_0['axes']) > 1:\n raise AssertionError('Cannot convert slice by multiple dimensions')\n\n if arg_0['axes'][0] not in [0, 1, 2, 3]:\n raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported')\n\n def target_layer(arg_7, arg_8=arg_9(arg_0['axes'][0]), arg_10=arg_9(arg_0['starts'][0]), arg_11=arg_9(arg_0['ends'][0])):\n if arg_8 == 0:\n return arg_7[arg_10:arg_11]\n elif arg_8 == 1:\n return arg_7[:, arg_10:arg_11]\n elif arg_8 == 2:\n return arg_7[:, :, arg_10:arg_11]\n elif arg_8 == 3:\n return arg_7[:, :, :, arg_10:arg_11]\n\n arg_12 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_12(arg_4[arg_3[0]])"} +{"_id": "doc_4411", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert clip operation.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting clip ...')\n\n if arg_0['min'] == 0:\n print(\"using ReLU({0})\".format(arg_0['max']))\n arg_7 = keras.layers.ReLU(max_value=arg_0['max'])\n else:\n def target_layer(arg_8, arg_9=arg_0['min'], arg_10=arg_0['max']):\n import tensorflow as tf\n return tf.clip_by_value(arg_8, arg_9, arg_10)\n arg_7 = keras.layers.Lambda(target_layer)\n\n arg_4[arg_2] = arg_7(arg_4[arg_3[0]])"} +{"_id": "doc_4412", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6\n):\n \"\"\"\n Convert elementwise addition.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting elementwise_add ...')\n if 'broadcast' in arg_0:\n arg_7 = arg_4[arg_3[0]]\n arg_8 = arg_4[arg_3[1]]\n\n if arg_6 == 'short':\n arg_9 = 'A' + random_string(7)\n elif arg_6 == 'keep':\n arg_9 = arg_1\n else:\n arg_9 = arg_1 + str(random.random())\n\n def target_layer(arg_10):\n arg_11 = tf.add(arg_10[0], arg_10[1])\n return arg_11\n\n arg_12 = keras.layers.Lambda(target_layer, name=arg_9)\n arg_4[arg_2] = arg_12([arg_4[arg_3[0]], arg_4[arg_3[1]]])\n else:\n arg_7 = arg_4[arg_3[0]]\n arg_8 = arg_4[arg_3[1]]\n\n if arg_6 == 'short':\n arg_9 = 'A' + random_string(7)\n elif arg_6 == 'keep':\n arg_9 = arg_1\n else:\n arg_9 = arg_1 + str(random.random())\n\n arg_13 = keras.layers.Add(name=arg_9)\n arg_4[arg_2] = arg_13([arg_7, arg_8])"} +{"_id": "doc_4413", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6\n):\n \"\"\"\n Convert elementwise subtraction.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting elementwise_sub ...')\n arg_7 = arg_4[arg_3[0]]\n arg_8 = arg_4[arg_3[1]]\n\n if arg_6 == 'short':\n arg_9 = 'S' + random_string(7)\n elif arg_6 == 'keep':\n arg_9 = arg_1\n else:\n arg_9 = arg_1 + str(random.random())\n\n arg_10 = keras.layers.Subtract(name=arg_9)\n arg_4[arg_2] = arg_10([arg_7, arg_8])"} +{"_id": "doc_4414", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert Linear.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting Linear ...')\n\n if arg_6 == 'short':\n arg_7 = 'FC' + random_string(6)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n arg_8 = '{0}.bias'.format(arg_1)\n arg_9 = '{0}.weight'.format(arg_1)\n\n arg_10 = arg_5[arg_9].numpy().transpose()\n arg_11, arg_12 = arg_10.shape\n\n arg_13 = [arg_10]\n arg_14 = False\n if arg_8 in arg_5:\n arg_15 = arg_5[arg_8].numpy()\n arg_13 = [arg_10, arg_15]\n arg_14 = True\n\n arg_16 = keras.layers.Dense(\n arg_12,\n arg_5=arg_13, use_bias=arg_14, name=arg_7, bias_initializer='zeros', kernel_initializer='zeros',\n )\n\n arg_4[arg_2] = arg_16(arg_4[arg_3[0]])"} +{"_id": "doc_4415", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert matmul layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting matmul ...')\n\n if arg_6 == 'short':\n arg_7 = 'MMUL' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n if len(arg_3) == 1:\n arg_8 = '{0}.weight'.format(arg_1)\n\n arg_9 = arg_5[arg_8].numpy().transpose()\n arg_10, arg_11 = arg_9.shape\n\n arg_12 = [arg_9]\n\n arg_13 = keras.layers.Dense(\n arg_11,\n arg_5=arg_12, use_bias=False, name=arg_7, bias_initializer='zeros', kernel_initializer='zeros',\n )\n arg_4[arg_2] = arg_13(arg_4[arg_3[0]])\n elif len(arg_3) == 2:\n arg_8 = '{0}.weight'.format(arg_1)\n\n arg_9 = arg_5[arg_8].numpy().transpose()\n arg_10, arg_11 = arg_9.shape\n\n arg_12 = [arg_9]\n\n arg_13 = keras.layers.Dense(\n arg_11,\n arg_5=arg_12, use_bias=False, name=arg_7, bias_initializer='zeros', kernel_initializer='zeros',\n )\n arg_4[arg_2] = arg_13(arg_4[arg_3[0]])\n else:\n raise AssertionError('Cannot convert matmul layer')"} +{"_id": "doc_4416", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert constant layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting constant ...')\n\n arg_7 = arg_0['value'].numpy()\n\n def target_layer(arg_8, arg_9=arg_7):\n return tf.constant(arg_9.tolist(), shape=arg_9.shape)\n\n arg_10 = keras.layers.Lambda(target_layer)\n arg_4[arg_2 + '_np'] = arg_7 # ad-hoc\n arg_4[arg_2] = arg_10(arg_4[list(arg_4.keys())[0]])"} +{"_id": "doc_4417", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert transpose layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting transpose ...')\n if arg_0['perm'][0] != 0:\n if arg_3[0] in arg_4:\n print('!!! Cannot permute batch dimension. Result may be wrong !!!')\n arg_4[arg_2] = arg_4[arg_3[0]]\n else:\n print('Skip weight matrix transpose, result may be wrong.')\n else:\n if arg_6:\n arg_7 = 'PERM' + random_string(4)\n else:\n arg_7 = arg_1 + str(random.random())\n arg_8 = keras.layers.Permute(arg_0['perm'][1:], name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4418", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert reshape layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting reshape ...')\n if arg_6 == 'short':\n arg_7 = 'RESH' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n if len(arg_3) > 1:\n if arg_4[arg_3[1]][0] == -1:\n print('Cannot deduct batch size! It will be omitted, but result may be wrong.')\n\n arg_8 = keras.layers.Reshape(arg_4[arg_3[1] + '_np'], name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])\n else:\n if arg_3[0] in arg_4:\n arg_8 = keras.layers.Reshape(arg_0['shape'][1:], name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])\n else:\n print('Skip weight matrix transpose, but result may be wrong.')"} +{"_id": "doc_4419", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert squeeze operation.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting squeeze ...')\n\n if len(arg_0['axes']) > 1:\n raise AssertionError('Cannot convert squeeze by multiple dimensions')\n\n def target_layer(arg_7, arg_8=arg_9(arg_0['axes'][0])):\n import tensorflow as tf\n return tf.squeeze(arg_7, arg_8=arg_8)\n\n arg_10 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_10(arg_4[arg_3[0]])"} +{"_id": "doc_4420", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert unsqueeze operation.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting unsqueeze ...')\n\n if arg_6 == 'short':\n arg_7 = 'UNSQ' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n def target_layer(arg_8):\n import keras\n return keras.backend.expand_dims(arg_8)\n\n arg_9 = keras.layers.Lambda(target_layer, name=arg_7 + 'E')\n arg_4[arg_2] = arg_9(arg_4[arg_3[0]])"} +{"_id": "doc_4421", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert shape operation.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting shape ...')\n\n def target_layer(arg_7):\n import tensorflow as tf\n return tf.shape(arg_7)\n\n arg_8 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4422", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert Average pooling.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting pooling ...')\n\n if arg_6 == 'short':\n arg_7 = 'P' + random_string(7)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n if 'kernel_shape' in arg_0:\n arg_8, arg_9 = arg_0['kernel_shape']\n else:\n arg_8, arg_9 = arg_0['kernel_size']\n\n if 'strides' in arg_0:\n arg_10, arg_11 = arg_0['strides']\n else:\n arg_10, arg_11 = arg_0['stride']\n\n if 'pads' in arg_0:\n arg_12, arg_13, arg_14, arg_14 = arg_0['pads']\n else:\n arg_12, arg_13 = arg_0['padding']\n\n arg_15 = arg_3[0]\n arg_16 = 'valid' \n\n if arg_8 % 2 == 1 and arg_9 % 2 == 1 and \\\n arg_8 // 2 == arg_12 and arg_9 // 2 == arg_13 and \\\n arg_10 == 1 and arg_11 == 1:\n arg_16 = 'same'\n else:\n arg_17 = arg_7 + '_pad'\n arg_18 = keras.layers.ZeroPadding2D(\n padding=(arg_12, arg_13),\n name=arg_17\n )\n arg_4[arg_17] = arg_18(arg_4[arg_3[0]])\n arg_15 = arg_17\n\n # Pooling type AveragePooling2D\n arg_19 = keras.layers.AveragePooling2D(\n pool_size=(arg_8, arg_9),\n strides=(arg_10, arg_11),\n padding=arg_16,\n name=arg_7,\n data_format='channels_first'\n )\n\n arg_4[arg_2] = arg_19(arg_4[arg_15])"} +{"_id": "doc_4423", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert 3d Max pooling.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n\n print('Converting pooling ...')\n\n if arg_6 == 'short':\n arg_7 = 'P' + random_string(7)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n if 'kernel_shape' in arg_0:\n arg_8, arg_9, arg_10 = arg_0['kernel_shape']\n else:\n arg_8, arg_9, arg_10 = arg_0['kernel_size']\n\n if 'strides' in arg_0:\n arg_11, arg_12, arg_13 = arg_0['strides']\n else:\n arg_11, arg_12, arg_13 = arg_0['stride']\n\n if 'pads' in arg_0:\n arg_14, arg_15, arg_16, arg_17, arg_17 = arg_0['pads']\n else:\n arg_14, arg_15, arg_16 = arg_0['padding']\n\n arg_18 = arg_3[0]\n if arg_14 > 0 and arg_15 > 0 and arg_16 > 0:\n arg_19 = arg_7 + '_pad'\n arg_20 = keras.layers.ZeroPadding3D(\n padding=(arg_14, arg_15, arg_16),\n name=arg_19\n )\n arg_4[arg_19] = arg_20(arg_4[arg_3[0]])\n arg_18 = arg_19\n\n # Pooling type\n arg_21 = keras.layers.MaxPooling3D(\n pool_size=(arg_8, arg_9, arg_10),\n strides=(arg_11, arg_12, arg_13),\n padding='valid',\n name=arg_7\n )\n\n arg_4[arg_2] = arg_21(arg_4[arg_18])"} +{"_id": "doc_4424", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert instance normalization layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting instancenorm ...')\n\n if arg_6 == 'short':\n arg_7 = 'IN' + random_string(6)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n assert(len(arg_3) == 3)\n\n arg_8 = '{0}.bias'.format(arg_1)\n arg_9 = '{0}.weight'.format(arg_1)\n\n # Use previously taken constants\n if arg_3[-2] + '_np' in arg_4:\n arg_10 = arg_4[arg_3[-2] + '_np']\n else:\n arg_10 = arg_5[arg_9].numpy()\n\n if arg_3[-1] + '_np' in arg_4:\n arg_11 = arg_4[arg_3[-1] + '_np']\n else:\n arg_11 = arg_5[arg_8].numpy()\n\n def target_layer(arg_12, arg_13=arg_0['epsilon'], arg_10=arg_10, arg_11=arg_11):\n arg_14 = tf.contrib.layers.instance_norm(\n arg_12,\n param_initializers={'beta': tf.constant_initializer(arg_11), 'gamma': tf.constant_initializer(arg_10)},\n arg_13=arg_13, data_format='NCHW',\n trainable=False\n )\n return arg_14\n\n arg_15 = keras.layers.Lambda(target_layer, name=arg_7)\n arg_4[arg_2] = arg_15(arg_4[arg_3[0]])"} +{"_id": "doc_4425", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert dropout.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting dropout ...')\n\n if arg_6 == 'short':\n arg_7 = 'DO' + random_string(6)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n arg_8 = keras.layers.Dropout(rate=arg_0['ratio'], name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4426", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert relu layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting relu ...')\n\n if arg_6 == 'short':\n arg_7 = 'RELU' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n arg_8 = keras.layers.Activation('relu', name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4427", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert leaky relu layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting lrelu ...')\n\n if arg_6 == 'short':\n arg_7 = 'lRELU' + random_string(3)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n arg_8 = \\\n keras.layers.LeakyReLU(alpha=arg_0['alpha'], name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4428", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert softmax layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting softmax ...')\n\n if arg_6 == 'short':\n arg_7 = 'SMAX' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n def target_layer(arg_8, arg_9=arg_0['dim']):\n import keras\n return keras.activations.softmax(arg_8, axis=arg_9)\n\n arg_10 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_10(arg_4[arg_3[0]])"} +{"_id": "doc_4429", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert hardtanh layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting hardtanh (clip) ...')\n\n def target_layer(arg_7, arg_8=arg_9(arg_0['max_val']), arg_10=arg_9(arg_0['min_val'])):\n return tf.minimum(arg_8, tf.maximum(arg_10, arg_7))\n\n arg_11 = keras.layers.Lambda(target_layer)\n arg_4[arg_2] = arg_11(arg_4[arg_3[0]])"} +{"_id": "doc_4430", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6):\n \"\"\"\n Convert selu layer.\n\n Args:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers\n \"\"\"\n print('Converting selu ...')\n\n if arg_6 == 'short':\n arg_7 = 'SELU' + random_string(4)\n elif arg_6 == 'keep':\n arg_7 = arg_1\n else:\n arg_7 = arg_1 + str(random.random())\n\n arg_8 = keras.layers.Activation('selu', name=arg_7)\n arg_4[arg_2] = arg_8(arg_4[arg_3[0]])"} +{"_id": "doc_4431", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes itself from the cache\n\n Note: This is required by the oauthlib\n \"\"\"\n log.debug(\n \"Deleting grant %s for client %s\" % (arg_0.code, arg_0.client_id)\n )\n arg_0._cache.Func(arg_0.key)\n return None"} +{"_id": "doc_4432", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Returns the User object\n\n Returns None if the user isn't found or the passwords don't match\n\n :param username: username of the user\n :param password: password of the user\n \"\"\"\n arg_5 = arg_0.query.filter_by(arg_1=arg_1).first()\n if arg_5 and arg_5.check_password(arg_2):\n return arg_5\n return None"} +{"_id": "doc_4433", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Creates a Token object and removes all expired tokens that belong\n to the user\n\n :param token: token object\n :param request: OAuthlib request object\n \"\"\"\n if hasattr(arg_2, 'user') and arg_2.user:\n arg_5 = arg_2.user\n elif arg_0.current_user:\n # for implicit token\n arg_5 = arg_0.current_user()\n\n arg_6 = arg_2.client\n\n arg_7 = arg_0.query.filter_by(\n arg_12=arg_6.client_id,\n arg_13=arg_5.id).all()\n if arg_7:\n for arg_8 in arg_7:\n arg_0.session.delete(arg_8)\n arg_0.session.commit()\n\n arg_9 = arg_1.get('expires_in')\n arg_10 = datetime.utcnow() + timedelta(seconds=arg_9)\n\n arg_11 = arg_0.model(**arg_1)\n arg_11.expires = arg_10\n arg_11.client_id = arg_6.client_id\n arg_11.user_id = arg_5.id\n\n arg_0.session.add(arg_11)\n arg_0.session.commit()\n return arg_11"} +{"_id": "doc_4434", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, *arg_4, **arg_5):\n \"\"\"Creates Grant object with the given params\n\n :param client_id: ID of the client\n :param code:\n :param request: OAuthlib request object\n \"\"\"\n arg_6 = datetime.utcnow() + timedelta(seconds=100)\n arg_7 = arg_0.model(\n arg_1=arg_3.client.client_id,\n arg_2=arg_2['code'],\n redirect_uri=arg_3.redirect_uri,\n scope=' '.join(arg_3.scopes),\n user=arg_0.current_user(),\n arg_6=arg_6\n )\n arg_0.session.add(arg_7)\n\n arg_0.session.commit()"} +{"_id": "doc_4435", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Init app with Flask instance.\n\n You can also pass the instance of Flask later::\n\n oauth = OAuth()\n oauth.Func(app)\n \"\"\"\n arg_0.app = arg_1\n arg_1.extensions = getattr(arg_1, 'extensions', {})\n arg_1.extensions[arg_0.state_key] = arg_0"} +{"_id": "doc_4436", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, **arg_3):\n \"\"\"Registers a new remote application.\n\n :param name: the name of the remote application\n :param register: whether the remote app will be registered\n\n Find more parameters from :class:`OAuthRemoteApp`.\n \"\"\"\n arg_4 = OAuthRemoteApp(arg_0, arg_1, **arg_3)\n if arg_2:\n assert arg_1 not in arg_0.Funcs\n arg_0.Funcs[arg_1] = arg_4\n return arg_4"} +{"_id": "doc_4437", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4='urlencoded',\n arg_5='GET', arg_6=None, arg_7=None):\n \"\"\"\n Sends a Func to the remote server with OAuth tokens attached.\n\n :param data: the data to be sent to the server.\n :param headers: an optional dictionary of headers.\n :param format: the format for the `data`. Can be `urlencoded` for\n URL encoded data or `json` for JSON.\n :param method: the HTTP Func method to use.\n :param content_type: an optional content type. If a content type\n is provided, the data is passed as it, and\n the `format` is ignored.\n :param token: an optional token to pass, if it is None, token will\n be generated by tokengetter.\n \"\"\"\n\n arg_3 = dict(arg_3 or {})\n if arg_7 is None:\n arg_7 = arg_0.get_Func_token()\n\n arg_8 = arg_0.make_client(arg_7)\n arg_1 = arg_0.expand_url(arg_1)\n if arg_5 == 'GET':\n assert arg_4 == 'urlencoded'\n if arg_2:\n arg_1 = add_params_to_uri(arg_1, arg_2)\n arg_2 = None\n else:\n if arg_6 is None:\n arg_2, arg_6 = encode_Func_data(arg_2, arg_4)\n if arg_6 is not None:\n arg_3['Content-Type'] = arg_6\n\n if arg_0.Func_token_url:\n # oauth1\n arg_9, arg_3, arg_10 = arg_8.sign(\n arg_1, http_method=arg_5, arg_10=arg_2, arg_3=arg_3\n )\n else:\n # oauth2\n arg_9, arg_3, arg_10 = arg_8.add_token(\n arg_1, http_method=arg_5, arg_10=arg_2, arg_3=arg_3\n )\n\n if hasattr(arg_0, 'pre_Func'):\n # This is designed for some rubbish services like weibo.\n # Since they don't follow the standards, we need to\n # change the uri, headers, or body.\n arg_9, arg_3, arg_10 = arg_0.pre_Func(arg_9, arg_3, arg_10)\n\n if arg_10:\n arg_2 = to_bytes(arg_10, arg_0.encoding)\n else:\n arg_2 = None\n arg_11, arg_12 = arg_0.http_Func(\n arg_9, arg_3, arg_2=to_bytes(arg_10, arg_0.encoding), arg_5=arg_5\n )\n return OAuthResponse(arg_11, arg_12, arg_0.content_type)"} +{"_id": "doc_4438", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handles an oauth1 authorization response.\"\"\"\n arg_2 = arg_0.make_client()\n arg_2.verifier = arg_1.get('oauth_verifier')\n arg_4 = session.get('%s_oauthtok' % arg_0.name)\n if not arg_4:\n raise OAuthException(\n 'Token not found, maybe you disabled cookie',\n type='token_not_found'\n )\n arg_2.resource_owner_key = arg_4[0]\n arg_2.resource_owner_secret = arg_4[1]\n\n arg_7, arg_8, arg_9 = arg_2.sign(\n arg_0.expand_url(arg_0.access_token_url),\n _encode(arg_0.access_token_method)\n )\n arg_8.update(arg_0._access_token_headers)\n\n arg_10, arg_11 = arg_0.http_request(\n arg_7, arg_8, to_bytes(arg_9, arg_0.encoding),\n method=arg_0.access_token_method\n )\n arg_9 = parse_response(arg_10, arg_11)\n if arg_10.code not in (200, 201):\n raise OAuthException(\n 'Invalid response from %s' % arg_0.name,\n type='invalid_response', arg_9=arg_9\n )\n return arg_9"} +{"_id": "doc_4439", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handles an oauth2 authorization response.\"\"\"\n\n arg_2 = arg_0.make_client()\n arg_3 = {\n 'code': arg_1.get('code'),\n 'client_secret': arg_0.consumer_secret,\n 'redirect_uri': session.get('%s_oauthredir' % arg_0.name)\n }\n log.debug('Prepare oauth2 remote args %r', arg_3)\n arg_3.update(arg_0.access_token_params)\n arg_4 = copy(arg_0._access_token_headers)\n if arg_0.access_token_method == 'POST':\n arg_4.update({'Content-Type': 'application/x-www-form-urlencoded'})\n arg_5 = arg_2.prepare_request_body(**arg_3)\n arg_6, arg_7 = arg_0.http_request(\n arg_0.expand_url(arg_0.access_token_url),\n arg_4=arg_4,\n arg_10=to_bytes(arg_5, arg_0.encoding),\n method=arg_0.access_token_method,\n )\n elif arg_0.access_token_method == 'GET':\n arg_8 = arg_2.prepare_request_body(**arg_3)\n arg_9 = arg_0.expand_url(arg_0.access_token_url)\n arg_9 += ('?' in arg_9 and '&' or '?') + arg_8\n arg_6, arg_7 = arg_0.http_request(\n arg_9,\n arg_4=arg_4,\n method=arg_0.access_token_method,\n )\n else:\n raise OAuthException(\n 'Unsupported access_token_method: %s' %\n arg_0.access_token_method\n )\n\n arg_10 = parse_response(arg_6, arg_7, content_type=arg_0.content_type)\n if arg_6.code not in (200, 201):\n raise OAuthException(\n 'Invalid response from %s' % arg_0.name,\n type='invalid_response', arg_10=arg_10\n )\n return arg_10"} +{"_id": "doc_4440", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Handles authorization response smartly.\"\"\"\n if arg_1 is None:\n arg_1 = request.args\n if 'oauth_verifier' in arg_1:\n arg_2 = arg_0.handle_oauth1_response(arg_1)\n elif 'code' in arg_1:\n arg_2 = arg_0.handle_oauth2_response(arg_1)\n else:\n arg_2 = arg_0.handle_unknown_response()\n\n # free request token\n session.pop('%s_oauthtok' % arg_0.name, None)\n session.pop('%s_oauthredir' % arg_0.name, None)\n return arg_2"} +{"_id": "doc_4441", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Uses cached client or create new one with specific token.\"\"\"\n arg_2 = getattr(arg_0, 'clients', None)\n arg_3 = _hash_token(arg_0, arg_1)\n\n if arg_2 and arg_3 in arg_2:\n return arg_2[arg_3]\n\n arg_4 = arg_0.make_client(arg_1) # implemented in subclasses\n if arg_2:\n arg_2[arg_3] = arg_4\n\n return arg_4"} +{"_id": "doc_4442", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a client with specific access token pair.\n\n :param token: a tuple of access token pair ``(token, token_secret)``\n or a dictionary of access token response.\n :returns: a :class:`requests_oauthlib.oauth1_session.OAuth1Session`\n object.\n \"\"\"\n if isinstance(arg_1, dict):\n arg_2 = arg_1['oauth_token']\n arg_3 = arg_1['oauth_token_secret']\n else:\n arg_2, arg_3 = arg_1\n return arg_0.make_oauth_session(\n resource_owner_key=arg_2,\n resource_owner_secret=arg_3)"} +{"_id": "doc_4443", "title": "", "text": "def Func(arg_0):\n \"\"\"When consumer confirm the authrozation.\"\"\"\n arg_1 = arg_0.server\n\n arg_2, arg_3, arg_4, arg_5 = extract_params()\n try:\n arg_6, arg_7 = arg_1.get_realms_and_credentials(\n arg_2, arg_3=arg_3, arg_4=arg_4, arg_5=arg_5\n )\n arg_8 = arg_1.create_authorization_response(\n arg_2, arg_3, arg_4, arg_5, arg_6, arg_7\n )\n log.debug('Authorization successful.')\n return create_response(*arg_8)\n except errors.OAuth1Error as e:\n return redirect(e.in_uri(arg_0.error_uri))\n except errors.InvalidClientError as e:\n return redirect(e.in_uri(arg_0.error_uri))"} +{"_id": "doc_4444", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Request token handler decorator.\n\n The decorated function should return an dictionary or None as\n the extra credentials for creating the token response.\n\n If you don't need to add any extra credentials, it could be as\n simple as::\n\n @app.route('/oauth/request_token')\n @oauth.Func\n def request_token():\n return {}\n \"\"\"\n @wraps(arg_1)\n def decorated(*arg_2, **arg_3):\n arg_4 = arg_0.server\n arg_5, arg_6, arg_7, arg_8 = extract_params()\n arg_9 = arg_1(*arg_2, **arg_3)\n try:\n arg_10 = arg_4.create_request_token_response(\n arg_5, arg_6, arg_7, arg_8, arg_9)\n return create_response(*arg_10)\n except errors.OAuth1Error as e:\n return _error_response(e)\n return decorated"} +{"_id": "doc_4445", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get client secret.\n\n The client object must has ``client_secret`` attribute.\n \"\"\"\n log.debug('Get client secret of %r', arg_1)\n if not arg_2.client:\n arg_2.client = arg_0._clientgetter(arg_1=arg_1)\n if arg_2.client:\n return arg_2.client.client_secret\n return None"} +{"_id": "doc_4446", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Get request token secret.\n\n The request token object should a ``secret`` attribute.\n \"\"\"\n log.debug('Get request token secret of %r for %r',\n arg_2, arg_1)\n arg_4 = arg_3.request_token or arg_0._grantgetter(arg_2=arg_2)\n if arg_4 and arg_4.client_key == arg_1:\n arg_3.request_token = arg_4\n return arg_4.secret\n return None"} +{"_id": "doc_4447", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Get access token secret.\n\n The access token object should a ``secret`` attribute.\n \"\"\"\n log.debug('Get access token secret of %r for %r',\n arg_2, arg_1)\n arg_4 = arg_3.access_token or arg_0._tokengetter(\n arg_1=arg_1,\n arg_2=arg_2,\n )\n if arg_4:\n arg_3.access_token = arg_4\n return arg_4.secret\n return None"} +{"_id": "doc_4448", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Default realms of the client.\"\"\"\n log.debug('Get realms for %r', arg_1)\n\n if not arg_2.client:\n arg_2.client = arg_0._clientgetter(arg_1=arg_1)\n\n arg_3 = arg_2.client\n if hasattr(arg_3, 'default_realms'):\n return arg_3.default_realms\n return []"} +{"_id": "doc_4449", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Realms for this request token.\"\"\"\n log.debug('Get realms of %r', arg_1)\n arg_3 = arg_2.request_token or arg_0._grantgetter(arg_1=arg_1)\n if not arg_3:\n return []\n arg_2.request_token = arg_3\n if hasattr(arg_3, 'realms'):\n return arg_3.realms or []\n return []"} +{"_id": "doc_4450", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Retrieves a previously stored client provided RSA key.\"\"\"\n if not arg_2.client:\n arg_2.client = arg_0._clientgetter(arg_1=arg_1)\n if hasattr(arg_2.client, 'rsa_key'):\n return arg_2.client.rsa_key\n return None"} +{"_id": "doc_4451", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Validates that supplied client key.\"\"\"\n log.debug('Validate client key for %r', arg_1)\n if not arg_2.client:\n arg_2.client = arg_0._clientgetter(arg_1=arg_1)\n if arg_2.client:\n return True\n return False"} +{"_id": "doc_4452", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Validates request token is available for client.\"\"\"\n log.debug('Validate request token %r for %r',\n arg_2, arg_1)\n arg_4 = arg_3.request_token or arg_0._grantgetter(arg_2=arg_2)\n if arg_4 and arg_4.client_key == arg_1:\n arg_3.request_token = arg_4\n return True\n return False"} +{"_id": "doc_4453", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Validates access token is available for client.\"\"\"\n log.debug('Validate access token %r for %r',\n arg_2, arg_1)\n arg_4 = arg_3.access_token or arg_0._tokengetter(\n arg_1=arg_1,\n arg_2=arg_2,\n )\n if arg_4:\n arg_3.access_token = arg_4\n return True\n return False"} +{"_id": "doc_4454", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5=None,\n arg_6=None):\n \"\"\"Validate the timestamp and nonce is used or not.\"\"\"\n log.debug('Validate timestamp and nonce %r', arg_1)\n arg_7 = arg_0._noncegetter(\n arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_5=arg_5,\n arg_6=arg_6\n )\n if arg_7:\n return False\n arg_0._noncesetter(\n arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_5=arg_5,\n arg_6=arg_6\n )\n return True"} +{"_id": "doc_4455", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None):\n \"\"\"Check if the token has permission on those realms.\"\"\"\n log.debug('Validate realms %r for %r', arg_5, arg_1)\n if arg_3.access_token:\n arg_6 = arg_3.access_token\n else:\n arg_6 = arg_0._tokengetter(arg_1=arg_1, arg_2=arg_2)\n arg_3.access_token = arg_6\n if not arg_6:\n return False\n return set(arg_6.realms).issuperset(set(arg_5))"} +{"_id": "doc_4456", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Validate verifier exists.\"\"\"\n log.debug('Validate verifier %r for %r', arg_3, arg_1)\n arg_5 = arg_0._verifiergetter(arg_3=arg_3, arg_2=arg_2)\n if not arg_5:\n return False\n if not hasattr(arg_5, 'user'):\n log.debug('Verifier should has user attribute')\n return False\n arg_4.user = arg_5.user\n if hasattr(arg_5, 'client_key'):\n return arg_5.client_key == arg_1\n return True"} +{"_id": "doc_4457", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Verify if the request token is existed.\"\"\"\n log.debug('Verify request token %r', arg_1)\n arg_3 = arg_2.request_token or arg_0._grantgetter(arg_1=arg_1)\n if arg_3:\n arg_2.request_token = arg_3\n return True\n return False"} +{"_id": "doc_4458", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save request token to database.\n\n A grantsetter is required, which accepts a token and request\n parameters::\n\n def grantsetter(token, request):\n grant = Grant(\n token=token['oauth_token'],\n secret=token['oauth_token_secret'],\n client=request.client,\n redirect_uri=oauth.redirect_uri,\n realms=request.realms,\n )\n return grant.save()\n \"\"\"\n log.debug('Save request token %r', arg_1)\n arg_0._grantsetter(arg_1, arg_2)"} +{"_id": "doc_4459", "title": "", "text": "def Func(arg_0):\n \"\"\"The error page URI.\n\n When something turns error, it will redirect to this error page.\n You can configure the error page URI with Flask config::\n\n OAUTH2_PROVIDER_ERROR_URI = '/error'\n\n You can also define the error page by a named endpoint::\n\n OAUTH2_PROVIDER_ERROR_ENDPOINT = 'oauth.error'\n \"\"\"\n Func = arg_0.app.config.get('OAUTH2_PROVIDER_ERROR_URI')\n if Func:\n return Func\n arg_2 = arg_0.app.config.get('OAUTH2_PROVIDER_ERROR_ENDPOINT')\n if arg_2:\n return url_for(arg_2)\n return '/oauth/errors'"} +{"_id": "doc_4460", "title": "", "text": "def Func(arg_0):\n \"\"\"When consumer confirm the authorization.\"\"\"\n arg_1 = arg_0.server\n arg_2 = request.values.get('scope') or ''\n arg_3 = arg_2.split()\n arg_4 = dict(\n client_id=request.values.get('client_id'),\n arg_5=request.values.get('redirect_uri', None),\n response_type=request.values.get('response_type', None),\n arg_11=request.values.get('state', None)\n )\n log.debug('Fetched credentials from request %r.', arg_4)\n arg_5 = arg_4.get('redirect_uri')\n log.debug('Found redirect_uri %s.', arg_5)\n\n arg_6, arg_7, arg_8, arg_9 = extract_params()\n try:\n arg_10 = arg_1.create_authorization_response(\n arg_6, arg_7, arg_8, arg_9, arg_3, arg_4)\n log.debug('Authorization successful.')\n return create_response(*arg_10)\n except oauth2.FatalClientError as arg_12:\n log.debug('Fatal client error %r', arg_12, exc_info=True)\n return arg_0._on_exception(arg_12, arg_12.in_uri(arg_0.error_uri))\n except oauth2.OAuth2Error as arg_12:\n log.debug('OAuth2Error: %r', arg_12, exc_info=True)\n \n # on auth error, we should preserve state if it's present according to RFC 6749\n arg_11 = request.values.get('state')\n if arg_11 and not arg_12.state:\n arg_12.state = arg_11 # set e.state so e.in_uri() can add the state query parameter to redirect uri\n return arg_0._on_exception(arg_12, arg_12.in_uri(arg_5 or arg_0.error_uri))\n except Exception as arg_12:\n log.exception(arg_12)\n return arg_0._on_exception(arg_12, add_params_to_uri(\n arg_0.error_uri, {'error': str(arg_12)}\n ))"} +{"_id": "doc_4461", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Determine if client authentication is required for current request.\n\n According to the rfc6749, client authentication is required in the\n following cases:\n\n Resource Owner Password Credentials Grant: see `Section 4.3.2`_.\n Authorization Code Grant: see `Section 4.1.3`_.\n Refresh Token Grant: see `Section 6`_.\n\n .. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2\n .. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3\n .. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6\n \"\"\"\n def is_confidential(arg_4):\n if hasattr(arg_4, 'is_confidential'):\n return arg_4.is_confidential\n arg_5 = getattr(arg_4, 'client_type', None)\n if arg_5:\n return arg_5 == 'confidential'\n return True\n\n arg_6 = ('password', 'authorization_code', 'refresh_token')\n arg_7, arg_8 = arg_0._get_client_creds_from_request(arg_1)\n if arg_7 and arg_1.grant_type in arg_6:\n arg_4 = arg_0._clientgetter(arg_7)\n if arg_4:\n return is_confidential(arg_4)\n return False"} +{"_id": "doc_4462", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Authenticate itself in other means.\n\n Other means means is described in `Section 3.2.1`_.\n\n .. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1\n \"\"\"\n arg_4, arg_5 = arg_0._get_client_creds_from_request(arg_1)\n log.debug('Authenticate client %r', arg_4)\n\n arg_6 = arg_0._clientgetter(arg_4)\n if not arg_6:\n log.debug('Authenticate client failed, client not found.')\n return False\n\n arg_1.client = arg_6\n\n # http://tools.ietf.org/html/rfc6749#section-2\n # The client MAY omit the parameter if the client secret is an empty string.\n if hasattr(arg_6, 'client_secret') and arg_6.client_secret != arg_5:\n log.debug('Authenticate client failed, secret not match.')\n return False\n\n log.debug('Authenticate client success.')\n return True"} +{"_id": "doc_4463", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Authenticate a non-confidential client.\n\n :param client_id: Client ID of the non-confidential client\n :param request: The Request object passed by oauthlib\n \"\"\"\n if arg_1 is None:\n arg_1, arg_5 = arg_0._get_client_creds_from_request(arg_2)\n\n log.debug('Authenticate client %r.', arg_1)\n arg_6 = arg_2.client or arg_0._clientgetter(arg_1)\n if not arg_6:\n log.debug('Authenticate failed, client not found.')\n return False\n\n # attach client on request for convenience\n arg_2.client = arg_6\n return True"} +{"_id": "doc_4464", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Get the list of scopes associated with the refresh token.\n\n This method is used in the refresh token grant flow. We return\n the scope of the token to be refreshed so it can be applied to the\n new access token.\n \"\"\"\n log.debug('Obtaining scope of refreshed token.')\n arg_5 = arg_0._tokengetter(arg_1=arg_1)\n return arg_5.scopes"} +{"_id": "doc_4465", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, *arg_4, **arg_5):\n \"\"\"Ensures the requested scope matches the scope originally granted\n by the resource owner. If the scope is omitted it is treated as equal\n to the scope originally granted by the resource owner.\n\n DEPRECATION NOTE: This method will cease to be used in oauthlib>0.4.2,\n future versions of ``oauthlib`` use the validator method\n ``get_original_scopes`` to determine the scope of the refreshed token.\n \"\"\"\n if not arg_2:\n log.debug('Scope omitted for refresh token %r', arg_1)\n return True\n log.debug('Confirm scopes %r for refresh token %r',\n arg_2, arg_1)\n arg_6 = arg_0._tokengetter(arg_1=arg_1)\n return set(arg_6.scopes) == set(arg_2)"} +{"_id": "doc_4466", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Default redirect_uri for the given client.\"\"\"\n arg_2.client = arg_2.client or arg_0._clientgetter(arg_1)\n arg_6 = arg_2.client.default_redirect_uri\n log.debug('Found default redirect uri %r', arg_6)\n return arg_6"} +{"_id": "doc_4467", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Default scopes for the given client.\"\"\"\n arg_2.client = arg_2.client or arg_0._clientgetter(arg_1)\n arg_6 = arg_2.client.default_scopes\n log.debug('Found default scopes %r', arg_6)\n return arg_6"} +{"_id": "doc_4468", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Persist the Bearer token.\"\"\"\n log.debug('Save bearer token %r', arg_1)\n arg_0._tokensetter(arg_1, arg_2, *arg_3, **arg_4)\n return arg_2.client.default_redirect_uri"} +{"_id": "doc_4469", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Ensure client_id belong to a valid and active client.\"\"\"\n log.debug('Validate client %r', arg_1)\n arg_5 = arg_2.client or arg_0._clientgetter(arg_1)\n if arg_5:\n # attach client to request object\n arg_2.client = arg_5\n return True\n return False"} +{"_id": "doc_4470", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, *arg_5, **arg_6):\n \"\"\"Ensure the grant code is valid.\"\"\"\n arg_3 = arg_3 or arg_0._clientgetter(arg_1)\n log.debug(\n 'Validate code for client %r and code %r', arg_3.client_id, arg_2\n )\n arg_7 = arg_0._grantgetter(arg_1=arg_3.client_id, arg_2=arg_2)\n if not arg_7:\n log.debug('Grant not found.')\n return False\n if hasattr(arg_7, 'expires') and \\\n datetime.datetime.utcnow() > arg_7.expires:\n log.debug('Grant is expired.')\n return False\n\n arg_4.state = arg_6.get('state')\n arg_4.user = arg_7.user\n arg_4.scopes = arg_7.scopes\n return True"} +{"_id": "doc_4471", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n *arg_5, **arg_6):\n \"\"\"Ensure the client is authorized to use the grant type requested.\n\n It will allow any of the four grant types (`authorization_code`,\n `password`, `client_credentials`, `refresh_token`) by default.\n Implemented `allowed_grant_types` for client object to authorize\n the request.\n\n It is suggested that `allowed_grant_types` should contain at least\n `authorization_code` and `refresh_token`.\n \"\"\"\n if arg_0._usergetter is None and arg_2 == 'password':\n log.debug('Password credential authorization is disabled.')\n return False\n\n arg_7 = (\n 'authorization_code', 'password',\n 'client_credentials', 'refresh_token',\n )\n\n # Grant type is allowed if it is part of the 'allowed_grant_types'\n # of the selected client or if it is one of the default grant types\n if hasattr(arg_3, 'allowed_grant_types'):\n if arg_2 not in arg_3.allowed_grant_types:\n return False\n else:\n if arg_2 not in arg_7:\n return False\n\n if arg_2 == 'client_credentials':\n if not hasattr(arg_3, 'user'):\n log.debug('Client should have a user property')\n return False\n arg_4.user = arg_3.user\n\n return True"} +{"_id": "doc_4472", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n *arg_4, **arg_5):\n \"\"\"Ensure the token is valid and belongs to the client\n\n This method is used by the authorization code grant indirectly by\n issuing refresh tokens, resource owner password credentials grant\n (also indirectly) and the refresh token grant.\n \"\"\"\n\n arg_6 = arg_0._tokengetter(arg_1=arg_1)\n\n if arg_6 and arg_6.client_id == arg_2.client_id:\n # Make sure the request object contains user and client_id\n arg_3.client_id = arg_6.client_id\n arg_3.user = arg_6.user\n return True\n return False"} +{"_id": "doc_4473", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n *arg_5, **arg_6):\n \"\"\"Ensure the client is authorized access to requested scopes.\"\"\"\n if hasattr(arg_3, 'Func'):\n return arg_3.Func(arg_2)\n return set(arg_3.default_scopes).issuperset(set(arg_2))"} +{"_id": "doc_4474", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n *arg_5, **arg_6):\n \"\"\"Ensure the username and password is valid.\n\n Attach user object on request for later using.\n \"\"\"\n log.debug('Validating username %r and its password', arg_1)\n if arg_0._usergetter is not None:\n arg_7 = arg_0._usergetter(\n arg_1, arg_2, arg_3, arg_4, *arg_5, **arg_6\n )\n if arg_7:\n arg_4.user = arg_7\n return True\n return False\n log.debug('Password credential authorization is disabled.')\n return False"} +{"_id": "doc_4475", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, *arg_4, **arg_5):\n \"\"\"Revoke an access or refresh token.\n \"\"\"\n if arg_2:\n arg_6 = arg_0._tokengetter(**{arg_2: arg_1})\n else:\n arg_6 = arg_0._tokengetter(access_token=arg_1)\n if not arg_6:\n arg_6 = arg_0._tokengetter(refresh_token=arg_1)\n\n if arg_6:\n arg_3.client_id = arg_6.client_id\n arg_3.user = arg_6.user\n arg_6.delete()\n return True\n\n arg_9 = 'Invalid token supplied.'\n log.debug(arg_9)\n arg_3.error_message = arg_9\n return False"} +{"_id": "doc_4476", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Since weibo is a rubbish server, it does not follow the standard,\n we need to change the authorization header for it.\"\"\"\n arg_3 = arg_1.get('Authorization')\n if arg_3:\n arg_3 = arg_3.replace('Bearer', 'OAuth2')\n arg_1['Authorization'] = arg_3\n return arg_0, arg_1, arg_2"} +{"_id": "doc_4477", "title": "", "text": "def Func():\n \"\"\"Extract request params.\"\"\"\n\n arg_0 = _get_uri_from_request(request)\n arg_1 = request.method\n arg_2 = dict(request.headers)\n if 'wsgi.input' in arg_2:\n del arg_2['wsgi.input']\n if 'wsgi.errors' in arg_2:\n del arg_2['wsgi.errors']\n # Werkzeug, and subsequently Flask provide a safe Authorization header\n # parsing, so we just replace the Authorization header with the extraced\n # info if it was successfully parsed.\n if request.authorization:\n arg_2['Authorization'] = request.authorization\n\n arg_3 = request.form.to_dict()\n return arg_0, arg_1, arg_3, arg_2"} +{"_id": "doc_4478", "title": "", "text": "def Func(arg_0, arg_1='utf-8'):\n \"\"\"Make sure text is bytes type.\"\"\"\n if not arg_0:\n return arg_0\n if not isinstance(arg_0, bytes_type):\n arg_0 = arg_0.encode(arg_1)\n return arg_0"} +{"_id": "doc_4479", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create response class for Flask.\"\"\"\n arg_3 = Response(arg_1 or '')\n for arg_4, arg_5 in arg_0.items():\n arg_3.headers[arg_6(arg_4)] = arg_5\n\n arg_3.status_code = arg_2\n return arg_3"} +{"_id": "doc_4480", "title": "", "text": "def Func():\n \"\"\"Gets the cached clients dictionary in current context.\"\"\"\n if OAuth.state_key not in current_app.extensions:\n raise RuntimeError('%r is not initialized.' % current_app)\n arg_0 = current_app.extensions[OAuth.state_key]\n return arg_0.cached_clients"} +{"_id": "doc_4481", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Adds remote application and applies custom attributes on it.\n\n If the application instance's name is different from the argument\n provided name, or the keyword arguments is not empty, then the\n application instance will not be modified but be copied as a\n prototype.\n\n :param remote_app: the remote application instance.\n :type remote_app: the subclasses of :class:`BaseApplication`\n :params kwargs: the overriding attributes for the application instance.\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_1.name\n if arg_2 != arg_1.name or arg_3:\n arg_1 = copy.copy(arg_1)\n arg_1.name = arg_2\n vars(arg_1).update(arg_3)\n if not hasattr(arg_1, 'clients'):\n arg_1.clients = cached_clients\n arg_0.remote_apps[arg_2] = arg_1\n return arg_1"} +{"_id": "doc_4482", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Creates and adds new remote application.\n\n :param name: the remote application's name.\n :param version: '1' or '2', the version code of OAuth protocol.\n :param kwargs: the attributes of remote application.\n \"\"\"\n if arg_2 is None:\n if 'request_token_url' in arg_3:\n arg_2 = '1'\n else:\n arg_2 = '2'\n if arg_2 == '1':\n Func = OAuth1Application(arg_1, clients=cached_clients)\n elif arg_2 == '2':\n Func = OAuth2Application(arg_1, clients=cached_clients)\n else:\n raise ValueError('unkonwn version %r' % arg_2)\n return arg_0.add_remote_app(Func, **arg_3)"} +{"_id": "doc_4483", "title": "", "text": "def Func(**arg_0):\n \"\"\"Attempt to return a PWM instance for the platform which the code is being\n executed on. Currently supports only the Raspberry Pi using the RPi.GPIO\n library and Beaglebone Black using the Adafruit_BBIO library. Will throw an\n exception if a PWM instance can't be created for the current platform. The\n returned PWM object has the same interface as the RPi_PWM_Adapter and\n BBIO_PWM_Adapter classes.\n \"\"\"\n arg_1 = Platform.platform_detect()\n if arg_1 == Platform.RASPBERRY_PI:\n import RPi.GPIO\n return RPi_PWM_Adapter(RPi.GPIO, **arg_0)\n elif arg_1 == Platform.BEAGLEBONE_BLACK:\n import Adafruit_BBIO.PWM\n return BBIO_PWM_Adapter(Adafruit_BBIO.PWM, **arg_0)\n elif arg_1 == Platform.UNKNOWN:\n raise RuntimeError('Could not determine platform.')"} +{"_id": "doc_4484", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Stop PWM output on specified pin.\"\"\"\n if arg_1 not in arg_0.pwm:\n raise ValueError('Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'.format(arg_1))\n arg_0.pwm[arg_1].Func()\n del arg_0.pwm[arg_1]"} +{"_id": "doc_4485", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Write the specified byte value to the IODIR registor. If no value\n specified the current buffered value will be written.\n \"\"\"\n if arg_1 is not None:\n arg_0.iodir = arg_1\n arg_0._device.writeList(arg_0.IODIR, arg_0.iodir)"} +{"_id": "doc_4486", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Write the specified byte value to the GPPU registor. If no value\n specified the current buffered value will be written.\n \"\"\"\n if arg_1 is not None:\n arg_0.gppu = arg_1\n arg_0._device.writeList(arg_0.GPPU, arg_0.gppu)"} +{"_id": "doc_4487", "title": "", "text": "def Func():\n \"\"\"Disable the FTDI drivers for the current platform. This is necessary\n because they will conflict with libftdi and accessing the FT232H. Note you\n can enable the FTDI drivers again by calling enable_FTDI_driver.\n \"\"\"\n logger.debug('Disabling FTDI driver.')\n if sys.platform == 'darwin':\n logger.debug('Detected Mac OSX')\n # Mac OS commands to disable FTDI driver.\n _check_running_as_root()\n subprocess.call('kextunload -b com.apple.driver.AppleUSBFTDI', shell=True)\n subprocess.call('kextunload /System/Library/Extensions/FTDIUSBSerialDriver.kext', shell=True)\n elif sys.platform.startswith('linux'):\n logger.debug('Detected Linux')\n # Linux commands to disable FTDI driver.\n _check_running_as_root()\n subprocess.call('modprobe -r -q ftdi_sio', shell=True)\n subprocess.call('modprobe -r -q usbserial', shell=True)"} +{"_id": "doc_4488", "title": "", "text": "def Func(arg_0):\n \"\"\"Close the FTDI device. Will be automatically called when the program ends.\"\"\"\n if arg_0._ctx is not None:\n ftdi.free(arg_0._ctx)\n arg_0._ctx = None"} +{"_id": "doc_4489", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper function to call write_data on the provided FTDI device and\n verify it succeeds.\n \"\"\"\n # Get modem status. Useful to enable for debugging.\n #ret, status = ftdi.poll_modem_status(self._ctx)\n #if ret == 0:\n #\tlogger.debug('Modem status {0:02X}'.format(status))\n #else:\n #\tlogger.debug('Modem status error {0}'.format(ret))\n arg_2 = len(arg_1)\n try:\n arg_3 = ftdi.write_data(arg_0._ctx, arg_1, arg_2)\n except TypeError:\n arg_3 = ftdi.write_data(arg_0._ctx, arg_1); #compatible with libFtdi 1.3\n # Log the string that was written in a python hex string format using a very\n # ugly one-liner list comprehension for brevity.\n #logger.debug('Wrote {0}'.format(''.join(['\\\\x{0:02X}'.format(ord(x)) for x in string])))\n if arg_3 < 0:\n raise RuntimeError('ftdiFunc_data failed with error {0}: {1}'.format(arg_3, ftdi.get_error_string(arg_0._ctx)))\n if arg_3 != arg_2:\n raise RuntimeError('ftdiFunc_data expected to write {0} bytes but actually wrote {1}!'.format(arg_2, arg_3))"} +{"_id": "doc_4490", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Helper function to call the provided command on the FTDI device and\n verify the response matches the expected value.\n \"\"\"\n arg_3 = arg_1(arg_0._ctx, *arg_2)\n logger.debug('Called ftdi_{0} and got response {1}.'.format(arg_1.__name__, arg_3))\n if arg_3 != 0:\n raise RuntimeError('ftdi_{0} failed with error {1}: {2}'.format(arg_1.__name__, arg_3, ftdi.get_error_string(arg_0._ctx)))"} +{"_id": "doc_4491", "title": "", "text": "def Func(arg_0, arg_1, arg_2=5.0):\n \"\"\"Helper function to continuously poll reads on the FTDI device until an\n expected number of bytes are returned. Will throw a timeout error if no\n data is received within the specified number of timeout seconds. Returns\n the read data as a string if successful, otherwise raises an execption.\n \"\"\"\n arg_3 = time.time()\n # Start with an empty response buffer.\n arg_4 = bytearray(arg_1)\n arg_5 = 0\n # Loop calling read until the response buffer is full or a timeout occurs.\n while time.time() - arg_3 <= arg_2:\n arg_6, arg_7 = ftdi.read_data(arg_0._ctx, arg_1 - arg_5)\n # Fail if there was an error reading data.\n if arg_6 < 0:\n raise RuntimeError('ftdi_read_data failed with error code {0}.'.format(arg_6))\n # Add returned data to the buffer.\n arg_4[arg_5:arg_5+arg_6] = arg_7[:arg_6]\n arg_5 += arg_6\n # Buffer is full, return the result data.\n if arg_5 >= arg_1:\n return str(arg_4)\n time.sleep(0.01)\n raise RuntimeError('Timeout while polling ftdi_read_data for {0} bytes!'.format(arg_1))"} +{"_id": "doc_4492", "title": "", "text": "def Func(arg_0):\n \"\"\"Enable MPSSE mode on the FTDI device.\"\"\"\n # Reset MPSSE by sending mask = 0 and mode = 0\n arg_0._check(ftdi.set_bitmode, 0, 0)\n # Enable MPSSE by sending mask = 0 and mode = 2\n arg_0._check(ftdi.set_bitmode, 0, 2)"} +{"_id": "doc_4493", "title": "", "text": "def Func(arg_0, arg_1=10):\n \"\"\"Synchronize buffers with MPSSE by sending bad opcode and reading expected\n error response. Should be called once after enabling MPSSE.\"\"\"\n # Send a bad/unknown command (0xAB), then read buffer until bad command\n # response is found.\n arg_0._write('\\xAB')\n # Keep reading until bad command response (0xFA 0xAB) is returned.\n # Fail if too many read attempts are made to prevent sticking in a loop.\n arg_2 = 0\n arg_3 = False\n while not arg_3:\n arg_4 = arg_0._poll_read(2)\n if arg_4 == '\\xFA\\xAB':\n arg_3 = True\n arg_2 += 1\n if arg_2 >= arg_1:\n raise RuntimeError('Could not synchronize with FT232H!')"} +{"_id": "doc_4494", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False):\n \"\"\"Set the clock speed of the MPSSE engine. Can be any value from 450hz\n to 30mhz and will pick that speed or the closest speed below it.\n \"\"\"\n # Disable clock divisor by 5 to enable faster speeds on FT232H.\n arg_0._write('\\x8A')\n # Turn on/off adaptive clocking.\n if arg_2:\n arg_0._write('\\x96')\n else:\n arg_0._write('\\x97')\n # Turn on/off three phase clock (needed for I2C).\n # Also adjust the frequency for three-phase clocking as specified in section 2.2.4\n # of this document:\n # http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf\n if arg_3:\n arg_0._write('\\x8C')\n else:\n arg_0._write('\\x8D')\n # Compute divisor for requested clock.\n # Use equation from section 3.8.1 of:\n # http://www.ftdichip.com/Support/Documents/AppNotes/AN_108_Command_Processor_for_MPSSE_and_MCU_Host_Bus_Emulation_Modes.pdf\n # Note equation is using 60mhz master clock instead of 12mhz.\n arg_4 = int(math.ceil((30000000.0-float(arg_1))/float(arg_1))) & 0xFFFF\n if arg_3:\n arg_4 = int(arg_4*(2.0/3.0))\n logger.debug('Setting clockspeed with divisor value {0}'.format(arg_4))\n # Send command to set divisor from low and high byte values.\n arg_0._write(str(bytearray((0x86, arg_4 & 0xFF, (arg_4 >> 8) & 0xFF))))"} +{"_id": "doc_4495", "title": "", "text": "def Func(arg_0):\n \"\"\"Read both GPIO bus states and return a 16 bit value with their state.\n D0-D7 are the lower 8 bits and C0-C7 are the upper 8 bits.\n \"\"\"\n # Send command to read low byte and high byte.\n arg_0._write('\\x81\\x83')\n # Wait for 2 byte response.\n arg_1 = arg_0._poll_read(2)\n # Assemble response into 16 bit value.\n arg_2 = ord(arg_1[0])\n arg_3 = ord(arg_1[1])\n logger.debug('Read MPSSE GPIO low byte = {0:02X} and high byte = {1:02X}'.format(arg_2, arg_3))\n return (arg_3 << 8) | arg_2"} +{"_id": "doc_4496", "title": "", "text": "def Func(arg_0):\n \"\"\"Return command to update the MPSSE GPIO state to the current direction\n and level.\n \"\"\"\n arg_1 = chr(arg_0._level & 0xFF)\n arg_2 = chr((arg_0._level >> 8) & 0xFF)\n arg_3 = chr(arg_0._direction & 0xFF)\n arg_4 = chr((arg_0._direction >> 8) & 0xFF)\n return str(bytearray((0x80, arg_1, arg_3, 0x82, arg_2, arg_4)))"} +{"_id": "doc_4497", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Set the input or output mode for a specified pin. Mode should be\n either OUT or IN.\"\"\"\n arg_0._Func_pin(arg_1, arg_2)\n arg_0.mpsse_write_gpio()"} +{"_id": "doc_4498", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Half-duplex SPI Func. The specified length of bytes will be clocked\n in the MISO line and returned as a bytearray object.\n \"\"\"\n #check for hardware limit of FT232H and similar MPSSE chips\n if (1 > arg_1 > 65536):\n print('the FTDI chip is limited to 65536 bytes (64 KB) of input/output per command!')\n print('use for loops for larger Funcs')\n exit(1)\n # Build command to Func SPI data.\n arg_2 = 0x20 | (arg_0.lsbfirst << 3) | (arg_0.Func_clock_ve << 2)\n logger.debug('SPI Func with command {0:2X}.'.format(arg_2))\n # Compute length low and high bytes.\n # NOTE: Must actually send length minus one because the MPSSE engine\n # considers 0 a length of 1 and FFFF a length of 65536\n\t#force odd numbers to round up instead of down\n\targ_3 = arg_1\n\tif arg_1 % 2 == 1:\n\t arg_3 += 1\n\targ_3 = arg_3/2\n\t#when odd length requested, get the remainder instead of the same number\n\targ_4 = arg_1 - arg_3\n arg_5 = (arg_3 - 1) & 0xFF\n arg_6 = ((arg_3 - 1) >> 8) & 0xFF\n arg_0._assert_cs()\n # Send command and length.\n # Perform twice to prevent error from hardware defect/limits\n arg_0._ft232h._write(str(bytearray((arg_2, arg_5, arg_6))))\n arg_7 = arg_0._ft232h._poll_Func(arg_3)\n arg_0._ft232h._write(str(bytearray((arg_2, arg_5, arg_6))))\n arg_8 = arg_0._ft232h._poll_Func(arg_4)\n arg_0._deassert_cs()\n # Read response bytes\n return bytearray(arg_7 + arg_8)"} +{"_id": "doc_4499", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write the specified number of bytes to the chip.\"\"\"\n for arg_2 in arg_1:\n # Write byte.\n arg_0._command.append(str(bytearray((0x11, 0x00, 0x00, arg_2))))\n # Make sure pins are back in idle state with clock low and data high.\n arg_0._ft232h.output_pins({0: GPIO.LOW, 1: GPIO.HIGH}, write=False)\n arg_0._command.append(arg_0._ft232h.mpsse_gpio() * _REPEAT_DELAY)\n # Read bit for ACK/NAK.\n arg_0._command.append('\\x22\\x00')\n # Increase expected response bytes.\n arg_0._expected += len(arg_1)"} +{"_id": "doc_4500", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read a signed byte from the specified register.\"\"\"\n arg_2 = arg_0.readU8(arg_1)\n if arg_2 > 127:\n arg_2 -= 256\n return arg_2"} +{"_id": "doc_4501", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Return an I2C device for the specified address and on the specified bus.\n If busnum isn't specified, the default I2C bus for the platform will attempt\n to be detected.\n \"\"\"\n if arg_1 is None:\n arg_1 = get_default_bus()\n return Device(arg_0, arg_1, arg_2, **arg_3)"} +{"_id": "doc_4502", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Write an 8-bit value to the specified register.\"\"\"\n arg_2 = arg_2 & 0xFF\n arg_0._bus.write_byte_data(arg_0._address, arg_1, arg_2)\n arg_0._logger.debug(\"Wrote 0x%02X to register 0x%02X\",\n arg_2, arg_1)"} +{"_id": "doc_4503", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read an unsigned byte from the specified register.\"\"\"\n arg_2 = arg_0._bus.read_byte_data(arg_0._address, arg_1) & 0xFF\n arg_0._logger.debug(\"Read 0x%02X from register 0x%02X\",\n arg_2, arg_1)\n return arg_2"} +{"_id": "doc_4504", "title": "", "text": "def Func(**arg_0):\n \"\"\"Attempt to return a GPIO instance for the platform which the code is being\n executed on. Currently supports only the Raspberry Pi using the RPi.GPIO\n library and Beaglebone Black using the Adafruit_BBIO library. Will throw an\n exception if a GPIO instance can't be created for the current platform. The\n returned GPIO object is an instance of BaseGPIO.\n \"\"\"\n arg_1 = Platform.platform_detect()\n if arg_1 == Platform.RASPBERRY_PI:\n import RPi.GPIO\n return RPiGPIOAdapter(RPi.GPIO, **arg_0)\n elif arg_1 == Platform.BEAGLEBONE_BLACK:\n import Adafruit_BBIO.GPIO\n return AdafruitBBIOAdapter(Adafruit_BBIO.GPIO, **arg_0)\n elif arg_1 == Platform.MINNOWBOARD:\n import mraa\n return AdafruitMinnowAdapter(mraa, **arg_0)\n elif arg_1 == Platform.JETSON_NANO:\n import Jetson.GPIO\n return RPiGPIOAdapter(Jetson.GPIO, **arg_0)\n elif arg_1 == Platform.UNKNOWN:\n raise RuntimeError('Could not determine platform.')"} +{"_id": "doc_4505", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_4):\n \"\"\"Set the input or output mode for a specified pin. Mode should be\n either OUTPUT or INPUT.\n \"\"\"\n arg_0.rpi_gpio.Func(arg_1, arg_0._dir_mapping[arg_2],\n arg_3=arg_0._pud_mapping[arg_3])"} +{"_id": "doc_4506", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n \"\"\"Set the input or output mode for a specified pin. Mode should be\n either DIR_IN or DIR_OUT.\n \"\"\"\n arg_0.mraa_gpio.Gpio.dir(arg_0.mraa_gpio.Gpio(arg_1),arg_0._dir_mapping[arg_2])"} +{"_id": "doc_4507", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove edge detection for a particular GPIO channel. Pin should be\n type IN.\n \"\"\"\n arg_0.mraa_gpio.Gpio.isrExit(arg_0.mraa_gpio.Gpio(arg_1))"} +{"_id": "doc_4508", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Call the method repeatedly such that it will return a PKey object.\n \"\"\"\n arg_1 = xrange(3)\n for arg_2 in xrange(arg_0.iterations):\n arg_3 = PKey()\n arg_3.generate_key(TYPE_DSA, 256)\n for arg_2 in arg_1:\n arg_4 = X509()\n arg_4.set_pubkey(arg_3)\n for arg_2 in arg_1:\n arg_4.get_pubkey()"} +{"_id": "doc_4509", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Call the function with an encrypted PEM and a passphrase callback which\n returns the wrong passphrase.\n \"\"\"\n for arg_1 in xrange(arg_0.iterations * 10):\n try:\n load_privatekey(\n FILETYPE_PEM, arg_0.ENCRYPTED_PEM,\n lambda *args: \"hello, public\")\n except Error:\n pass"} +{"_id": "doc_4510", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Call the function with an encrypted PEM and a passphrase callback which\n returns a non-string.\n \"\"\"\n for arg_1 in xrange(arg_0.iterations * 10):\n try:\n load_privatekey(\n FILETYPE_PEM, arg_0.ENCRYPTED_PEM,\n lambda *args: {})\n except ValueError:\n pass"} +{"_id": "doc_4511", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create a CRL object with 100 Revoked objects, then call the\n get_revoked method repeatedly.\n \"\"\"\n arg_1 = CRL()\n for arg_2 in xrange(100):\n arg_1.add_revoked(Revoked())\n for arg_2 in xrange(arg_0.iterations):\n arg_1.get_revoked()"} +{"_id": "doc_4512", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Copy an empty Revoked object repeatedly. The copy is not garbage\n collected, therefore it needs to be manually freed.\n \"\"\"\n for arg_1 in xrange(arg_0.iterations * 100):\n arg_2 = _X509_REVOKED_dup(Revoked()._revoked)\n _lib.X509_REVOKED_free(arg_2)"} +{"_id": "doc_4513", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=\"sha256\"):\n \"\"\"\n Generate a certificate given a certificate request.\n\n Arguments: req - Certificate request to use\n issuerCert - The certificate of the issuer\n issuerKey - The private key of the issuer\n serial - Serial number for the certificate\n notBefore - Timestamp (relative to now) when the certificate\n starts being valid\n notAfter - Timestamp (relative to now) when the certificate\n stops being valid\n digest - Digest method to use for signing, default is sha256\n Returns: The signed certificate in an X509 object\n \"\"\"\n arg_5, arg_6 = arg_1\n arg_7, arg_8 = arg_3\n arg_9 = crypto.X509()\n arg_9.set_serial_number(arg_2)\n arg_9.gmtime_adj_notBefore(arg_7)\n arg_9.gmtime_adj_notAfter(arg_8)\n arg_9.set_issuer(arg_5.get_subject())\n arg_9.set_subject(arg_0.get_subject())\n arg_9.set_pubkey(arg_0.get_pubkey())\n arg_9.sign(arg_6, arg_4)\n return arg_9"} +{"_id": "doc_4514", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Builds a decorator that ensures that functions that rely on OpenSSL\n functions that are not present in this build raise NotImplementedError,\n rather than AttributeError coming out of cryptography.\n\n :param flag: A cryptography flag that guards the functions, e.g.\n ``Cryptography_HAS_NEXTPROTONEG``.\n :param error: The string to be used in the exception if the flag is false.\n \"\"\"\n def _requires_decorator(arg_2):\n if not arg_0:\n @wraps(arg_2)\n def explode(*arg_3, **arg_4):\n raise NotImplementedError(arg_1)\n return explode\n else:\n return arg_2\n\n return _requires_decorator"} +{"_id": "doc_4515", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Set the passphrase callback. This function will be called\n when a private key with a passphrase is loaded.\n\n :param callback: The Python callback to use. This must accept three\n positional arguments. First, an integer giving the maximum length\n of the passphrase it may return. If the returned passphrase is\n longer than this, it will be truncated. Second, a boolean value\n which will be true if the user should be prompted for the\n passphrase twice and the callback should verify that the two values\n supplied are equal. Third, the value given as the *userdata*\n parameter to :meth:`Func`. The *callback* must return\n a byte string. If an error occurs, *callback* should return a false\n value (e.g. an empty string).\n :param userdata: (optional) A Python object which will be given as\n argument to the callback\n :return: None\n \"\"\"\n if not callable(arg_1):\n raise TypeError(\"callback must be callable\")\n\n arg_0._passphrase_helper = arg_0._wrap_callback(arg_1)\n arg_0._passphrase_callback = arg_0._passphrase_helper.callback\n _lib.SSL_CTX_set_default_passwd_cb(\n arg_0._context, arg_0._passphrase_callback)\n arg_0._passphrase_userdata = arg_2"} +{"_id": "doc_4516", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load a certificate chain from a file.\n\n :param certfile: The name of the certificate chain file (``bytes`` or\n ``unicode``). Must be PEM encoded.\n\n :return: None\n \"\"\"\n arg_1 = _path_string(arg_1)\n\n arg_2 = _lib.SSL_CTX_Func(\n arg_0._context, arg_1\n )\n if not arg_2:\n _raise_current_error()"} +{"_id": "doc_4517", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"\n Load a certificate from a file\n\n :param certfile: The name of the certificate file (``bytes`` or\n ``unicode``).\n :param filetype: (optional) The encoding of the file, which is either\n :const:`FILETYPE_PEM` or :const:`FILETYPE_ASN1`. The default is\n :const:`FILETYPE_PEM`.\n\n :return: None\n \"\"\"\n arg_1 = _path_string(arg_1)\n if not isinstance(arg_2, integer_types):\n raise TypeError(\"filetype must be an integer\")\n\n arg_4 = _lib.SSL_CTX_Func(\n arg_0._context, arg_1, arg_2\n )\n if not arg_4:\n _raise_current_error()"} +{"_id": "doc_4518", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load a certificate from a X509 object\n\n :param cert: The X509 object\n :return: None\n \"\"\"\n if not isinstance(arg_1, X509):\n raise TypeError(\"cert must be an X509 instance\")\n\n arg_2 = _lib.SSL_CTX_Func(arg_0._context, arg_1._x509)\n if not arg_2:\n _raise_current_error()"} +{"_id": "doc_4519", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add certificate to chain\n\n :param certobj: The X509 certificate object to add to the chain\n :return: None\n \"\"\"\n if not isinstance(arg_1, X509):\n raise TypeError(\"certobj must be an X509 instance\")\n\n arg_2 = _lib.X509_dup(arg_1._x509)\n arg_3 = _lib.SSL_CTX_Func(arg_0._context, arg_2)\n if not arg_3:\n # TODO: This is untested.\n _lib.X509_free(arg_2)\n _raise_current_error()"} +{"_id": "doc_4520", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load the trusted certificates that will be sent to the client. Does\n not actually imply any of the certificates are trusted; that must be\n configured separately.\n\n :param bytes cafile: The path to a certificates file in PEM format.\n :return: None\n \"\"\"\n arg_2 = _lib.SSL_load_client_CA_file(\n _text_to_bytes_and_warn(\"cafile\", arg_1)\n )\n _openssl_assert(arg_2 != _ffi.NULL)\n _lib.SSL_CTX_set_client_CA_list(arg_0._context, arg_2)"} +{"_id": "doc_4521", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load parameters for Ephemeral Diffie-Hellman\n\n :param dhfile: The file to load EDH parameters from (``bytes`` or\n ``unicode``).\n\n :return: None\n \"\"\"\n arg_1 = _path_string(arg_1)\n\n arg_2 = _lib.BIO_new_file(arg_1, b\"r\")\n if arg_2 == _ffi.NULL:\n _raise_current_error()\n arg_2 = _ffi.gc(arg_2, _lib.BIO_free)\n\n arg_3 = _lib.PEM_read_bio_DHparams(arg_2, _ffi.NULL, _ffi.NULL, _ffi.NULL)\n arg_3 = _ffi.gc(arg_3, _lib.DH_free)\n _lib.SSL_CTX_set_tmp_dh(arg_0._context, arg_3)"} +{"_id": "doc_4522", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the list of ciphers to be used in this context.\n\n See the OpenSSL manual for more information (e.g.\n :manpage:`ciphers(1)`).\n\n :param bytes cipher_list: An OpenSSL cipher string.\n :return: None\n \"\"\"\n arg_1 = _text_to_bytes_and_warn(\"cipher_list\", arg_1)\n\n if not isinstance(arg_1, bytes):\n raise TypeError(\"cipher_list must be a byte string.\")\n\n _openssl_assert(\n _lib.SSL_CTX_Func(arg_0._context, arg_1) == 1\n )\n # In OpenSSL 1.1.1 setting the cipher list will always return TLS 1.3\n # ciphers even if you pass an invalid cipher. Applications (like\n # Twisted) have tests that depend on an error being raised if an\n # invalid cipher string is passed, but without the following check\n # for the TLS 1.3 specific cipher suites it would never error.\n arg_2 = Connection(arg_0, None)\n if (\n arg_2.get_cipher_list() == [\n 'TLS_AES_256_GCM_SHA384',\n 'TLS_CHACHA20_POLY1305_SHA256',\n 'TLS_AES_128_GCM_SHA256'\n ]\n ):\n raise Error(\n [\n (\n 'SSL routines',\n 'SSL_CTX_Func',\n 'no cipher match',\n ),\n ],\n )"} +{"_id": "doc_4523", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the list of preferred client certificate signers for this server\n context.\n\n This list of certificate authorities will be sent to the client when\n the server requests a client certificate.\n\n :param certificate_authorities: a sequence of X509Names.\n :return: None\n\n .. versionadded:: 0.10\n \"\"\"\n arg_2 = _lib.sk_X509_NAME_new_null()\n _openssl_assert(arg_2 != _ffi.NULL)\n\n try:\n for arg_3 in arg_1:\n if not isinstance(arg_3, X509Name):\n raise TypeError(\n \"client CAs must be X509Name objects, not %s \"\n \"objects\" % (\n type(arg_3).__name__,\n )\n )\n arg_4 = _lib.X509_NAME_dup(arg_3._name)\n _openssl_assert(arg_4 != _ffi.NULL)\n arg_5 = _lib.sk_X509_NAME_push(arg_2, arg_4)\n if not arg_5:\n _lib.X509_NAME_free(arg_4)\n _raise_current_error()\n except Exception:\n _lib.sk_X509_NAME_free(arg_2)\n raise\n\n _lib.SSL_CTX_set_client_CA_list(arg_0._context, arg_2)"} +{"_id": "doc_4524", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add the CA certificate to the list of preferred signers for this\n context.\n\n The list of certificate authorities will be sent to the client when the\n server requests a client certificate.\n\n :param certificate_authority: certificate authority's X509 certificate.\n :return: None\n\n .. versionadded:: 0.10\n \"\"\"\n if not isinstance(arg_1, X509):\n raise TypeError(\"certificate_authority must be an X509 instance\")\n\n arg_2 = _lib.SSL_CTX_add_client_CA(\n arg_0._context, arg_1._x509)\n _openssl_assert(arg_2 == 1)"} +{"_id": "doc_4525", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Specify the protocols that the client is prepared to speak after the\n TLS connection has been negotiated using Application Layer Protocol\n Negotiation.\n\n :param protos: A list of the protocols to be offered to the server.\n This list should be a Python list of bytestrings representing the\n protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.\n \"\"\"\n # Take the list of protocols and join them together, prefixing them\n # with their lengths.\n arg_2 = b''.join(\n chain.from_iterable((int2byte(len(p)), p) for p in arg_1)\n )\n\n # Build a C string from the list. We don't need to save this off\n # because OpenSSL immediately copies the data out.\n arg_3 = _ffi.new(\"unsigned char[]\", arg_2)\n _lib.SSL_CTX_Func(arg_0._context, arg_3, len(arg_2))"} +{"_id": "doc_4526", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Set a callback to provide OCSP data to be stapled to the TLS handshake\n on the server side.\n\n :param callback: The callback function. It will be invoked with two\n arguments: the Connection, and the optional arbitrary data you have\n provided. The callback must return a bytestring that contains the\n OCSP data to staple to the handshake. If no OCSP data is available\n for this connection, return the empty bytestring.\n :param data: Some opaque data that will be passed into the callback\n function when called. This can be used to avoid needing to do\n complex data lookups or to keep track of what context is being\n used. This parameter is optional.\n \"\"\"\n arg_3 = _OCSPServerCallbackHelper(arg_1)\n arg_0._set_ocsp_callback(arg_3, arg_2)"} +{"_id": "doc_4527", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Set a callback to validate OCSP data stapled to the TLS handshake on\n the client side.\n\n :param callback: The callback function. It will be invoked with three\n arguments: the Connection, a bytestring containing the stapled OCSP\n assertion, and the optional arbitrary data you have provided. The\n callback must return a boolean that indicates the result of\n validating the OCSP data: ``True`` if the OCSP data is valid and\n the certificate can be trusted, or ``False`` if either the OCSP\n data is invalid or the certificate has been revoked.\n :param data: Some opaque data that will be passed into the callback\n function when called. This can be used to avoid needing to do\n complex data lookups or to keep track of what context is being\n used. This parameter is optional.\n \"\"\"\n arg_3 = _OCSPClientCallbackHelper(arg_1)\n arg_0._set_ocsp_callback(arg_3, arg_2)"} +{"_id": "doc_4528", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Switch this connection to a new session context.\n\n :param context: A :class:`Context` instance giving the new session\n context to use.\n \"\"\"\n if not isinstance(arg_1, Context):\n raise TypeError(\"context must be a Context instance\")\n\n _lib.SSL_set_SSL_CTX(arg_0._ssl, arg_1._context)\n arg_0._context = arg_1"} +{"_id": "doc_4529", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Receive data on the connection and copy it directly into the provided\n buffer, rather than creating a new string.\n\n :param buffer: The buffer to copy into.\n :param nbytes: (optional) The maximum number of bytes to read into the\n buffer. If not present, defaults to the size of the buffer. If\n larger than the size of the buffer, is reduced to the size of the\n buffer.\n :param flags: (optional) The only supported flag is ``MSG_PEEK``,\n all other flags are ignored.\n :return: The number of bytes read into the buffer.\n \"\"\"\n if arg_2 is None:\n arg_2 = len(arg_1)\n else:\n arg_2 = min(arg_2, len(arg_1))\n\n # We need to create a temporary buffer. This is annoying, it would be\n # better if we could pass memoryviews straight into the SSL_read call,\n # but right now we can't. Revisit this if CFFI gets that ability.\n arg_4 = _no_zero_allocator(\"char[]\", arg_2)\n if arg_3 is not None and arg_3 & socket.MSG_PEEK:\n arg_5 = _lib.SSL_peek(arg_0._ssl, arg_4, arg_2)\n else:\n arg_5 = _lib.SSL_read(arg_0._ssl, arg_4, arg_2)\n arg_0._raise_ssl_error(arg_0._ssl, arg_5)\n\n # This strange line is all to avoid a memory copy. The buffer protocol\n # should allow us to assign a CFFI buffer to the LHS of this line, but\n # on CPython 3.3+ that segfaults. As a workaround, we can temporarily\n # wrap it in a memoryview.\n arg_1[:arg_5] = memoryview(_ffi.buffer(arg_4, arg_5))\n\n return arg_5"} +{"_id": "doc_4530", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n If the Connection was created with a memory BIO, this method can be\n used to read bytes from the write end of that memory BIO. Many\n Connection methods will add bytes which must be read in this manner or\n the buffer will eventually fill up and the Connection will be able to\n take no further actions.\n\n :param bufsiz: The maximum number of bytes to read\n :return: The string read.\n \"\"\"\n if arg_0._from_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n if not isinstance(arg_1, integer_types):\n raise TypeError(\"bufsiz must be an integer\")\n\n arg_2 = _no_zero_allocator(\"char[]\", arg_1)\n arg_3 = _lib.BIO_read(arg_0._from_ssl, arg_2, arg_1)\n if arg_3 <= 0:\n arg_0._handle_bio_errors(arg_0._from_ssl, arg_3)\n\n return _ffi.buffer(arg_2, arg_3)[:]"} +{"_id": "doc_4531", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Renegotiate the session.\n\n :return: True if the renegotiation can be started, False otherwise\n :rtype: bool\n \"\"\"\n if not arg_0.Func_pending():\n _openssl_assert(_lib.SSL_Func(arg_0._ssl) == 1)\n return True\n return False"} +{"_id": "doc_4532", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Send the Func message to the Connection.\n\n :return: True if the Func completed successfully (i.e. both sides\n have sent closure alerts), False otherwise (in which case you\n call :meth:`recv` or :meth:`send` when the connection becomes\n readable/writeable).\n \"\"\"\n arg_1 = _lib.SSL_Func(arg_0._ssl)\n if arg_1 < 0:\n arg_0._raise_ssl_error(arg_0._ssl, arg_1)\n elif arg_1 > 0:\n return True\n else:\n return False"} +{"_id": "doc_4533", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the list of ciphers used by the Connection object.\n\n :return: A list of native cipher strings.\n \"\"\"\n arg_1 = []\n for arg_2 in count():\n arg_3 = _lib.SSL_Func(arg_0._ssl, arg_2)\n if arg_3 == _ffi.NULL:\n break\n arg_1.append(_native(_ffi.string(arg_3)))\n return arg_1"} +{"_id": "doc_4534", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the random value used with the server hello message.\n\n :return: A string representing the state\n \"\"\"\n arg_1 = _lib.SSL_get_session(arg_0._ssl)\n if arg_1 == _ffi.NULL:\n return None\n arg_2 = _lib.SSL_get_Func(arg_0._ssl, _ffi.NULL, 0)\n assert arg_2 > 0\n arg_3 = _no_zero_allocator(\"unsigned char[]\", arg_2)\n _lib.SSL_get_Func(arg_0._ssl, arg_3, arg_2)\n return _ffi.buffer(arg_3, arg_2)[:]"} +{"_id": "doc_4535", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the value of the master key for this session.\n\n :return: A string representing the state\n \"\"\"\n arg_1 = _lib.SSL_get_session(arg_0._ssl)\n if arg_1 == _ffi.NULL:\n return None\n\n arg_2 = _lib.SSL_SESSION_get_Func(arg_1, _ffi.NULL, 0)\n assert arg_2 > 0\n arg_3 = _no_zero_allocator(\"unsigned char[]\", arg_2)\n _lib.SSL_SESSION_get_Func(arg_1, arg_3, arg_2)\n return _ffi.buffer(arg_3, arg_2)[:]"} +{"_id": "doc_4536", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Obtain the name of the currently used cipher.\n\n :returns: The name of the currently used cipher or :obj:`None`\n if no connection has been established.\n :rtype: :class:`unicode` or :class:`NoneType`\n\n .. versionadded:: 0.15\n \"\"\"\n arg_1 = _lib.SSL_get_current_cipher(arg_0._ssl)\n if arg_1 == _ffi.NULL:\n return None\n else:\n arg_2 = _ffi.string(_lib.SSL_CIPHER_get_name(arg_1))\n return arg_2.decode(\"utf-8\")"} +{"_id": "doc_4537", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Obtain the number of secret bits of the currently used cipher.\n\n :returns: The number of secret bits of the currently used cipher\n or :obj:`None` if no connection has been established.\n :rtype: :class:`int` or :class:`NoneType`\n\n .. versionadded:: 0.15\n \"\"\"\n arg_1 = _lib.SSL_get_current_cipher(arg_0._ssl)\n if arg_1 == _ffi.NULL:\n return None\n else:\n return _lib.SSL_CIPHER_get_bits(arg_1, _ffi.NULL)"} +{"_id": "doc_4538", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Obtain the protocol version of the currently used cipher.\n\n :returns: The protocol name of the currently used cipher\n or :obj:`None` if no connection has been established.\n :rtype: :class:`unicode` or :class:`NoneType`\n\n .. versionadded:: 0.15\n \"\"\"\n arg_1 = _lib.SSL_get_current_cipher(arg_0._ssl)\n if arg_1 == _ffi.NULL:\n return None\n else:\n arg_2 = _ffi.string(_lib.SSL_CIPHER_get_version(arg_1))\n return arg_2.decode(\"utf-8\")"} +{"_id": "doc_4539", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the protocol version of the current connection.\n\n :returns: The TLS version of the current connection, for example\n the value for TLS 1.2 would be ``TLSv1.2``or ``Unknown``\n for connections that were not successfully established.\n :rtype: :class:`unicode`\n \"\"\"\n arg_1 = _ffi.string(_lib.SSL_get_version(arg_0._ssl))\n return arg_1.decode(\"utf-8\")"} +{"_id": "doc_4540", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the protocol that was negotiated by NPN.\n\n :returns: A bytestring of the protocol name. If no protocol has been\n negotiated yet, returns an empty string.\n\n .. versionadded:: 0.15\n \"\"\"\n _warn_npn()\n arg_1 = _ffi.new(\"unsigned char **\")\n arg_2 = _ffi.new(\"unsigned int *\")\n\n _lib.SSL_get0_next_proto_negotiated(arg_0._ssl, arg_1, arg_2)\n\n return _ffi.buffer(arg_1[0], arg_2[0])[:]"} +{"_id": "doc_4541", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the protocol that was negotiated by ALPN.\n\n :returns: A bytestring of the protocol name. If no protocol has been\n negotiated yet, returns an empty string.\n \"\"\"\n arg_1 = _ffi.new(\"unsigned char **\")\n arg_2 = _ffi.new(\"unsigned int *\")\n\n _lib.SSL_get0_alpn_selected(arg_0._ssl, arg_1, arg_2)\n\n if not arg_2:\n return b''\n\n return _ffi.buffer(arg_1[0], arg_2[0])[:]"} +{"_id": "doc_4542", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Allocate a new OpenSSL memory BIO.\n\n Arrange for the garbage collector to clean it up automatically.\n\n :param buffer: None or some bytes to use to put into the BIO so that they\n can be read out.\n \"\"\"\n if arg_0 is None:\n arg_1 = _lib.BIO_new(_lib.BIO_s_mem())\n arg_2 = _lib.BIO_free\n else:\n arg_3 = _ffi.new(\"char[]\", arg_0)\n arg_1 = _lib.BIOFunc(arg_3, len(arg_0))\n\n # Keep the memory alive as long as the bio is alive!\n def arg_2(arg_1, arg_4=arg_3):\n return _lib.BIO_free(arg_1)\n\n _openssl_assert(arg_1 != _ffi.NULL)\n\n arg_1 = _ffi.gc(arg_1, arg_2)\n return arg_1"} +{"_id": "doc_4543", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Copy the contents of an OpenSSL BIO object into a Python byte string.\n \"\"\"\n arg_1 = _ffi.new('char**')\n arg_2 = _lib.BIO_get_mem_data(arg_0, arg_1)\n return _ffi.buffer(arg_1[0], arg_2)[:]"} +{"_id": "doc_4544", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the time value of an ASN1 time object.\n\n @param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to\n that type) from which the time value will be retrieved.\n\n @return: The time value from C{timestamp} as a L{bytes} string in a certain\n format. Or C{None} if the object contains no time value.\n \"\"\"\n arg_1 = _ffi.cast('ASN1_STRING*', arg_0)\n if _lib.ASN1_STRING_length(arg_1) == 0:\n return None\n elif (\n _lib.ASN1_STRING_type(arg_1) == _lib.V_ASN1_GENERALIZEDTIME\n ):\n return _ffi.string(_lib.ASN1_STRING_data(arg_1))\n else:\n arg_2 = _ffi.new(\"ASN1_GENERALIZEDTIME**\")\n _lib.ASN1_TIME_to_generalizedtime(arg_0, arg_2)\n if arg_2[0] == _ffi.NULL:\n # This may happen:\n # - if timestamp was not an ASN1_TIME\n # - if allocating memory for the ASN1_GENERALIZEDTIME failed\n # - if a copy of the time data from timestamp cannot be made for\n # the newly allocated ASN1_GENERALIZEDTIME\n #\n # These are difficult to test. cffi enforces the ASN1_TIME type.\n # Memory allocation failures are a pain to trigger\n # deterministically.\n _untested_error(\"ASN1_TIME_to_generalizedtime\")\n else:\n arg_1 = _ffi.cast(\n \"ASN1_STRING*\", arg_2[0])\n arg_3 = _lib.ASN1_STRING_data(arg_1)\n arg_4 = _ffi.string(arg_3)\n _lib.ASN1_GENERALIZEDTIME_free(arg_2[0])\n return arg_4"} +{"_id": "doc_4545", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a single curve object selected by name.\n\n See :py:func:`Funcs` for information about curve objects.\n\n :param name: The OpenSSL short name identifying the curve object to\n retrieve.\n :type name: :py:class:`unicode`\n\n If the named curve is not supported then :py:class:`ValueError` is raised.\n \"\"\"\n for arg_1 in Funcs():\n if arg_1.name == arg_0:\n return arg_1\n raise ValueError(\"unknown curve name\", arg_0)"} +{"_id": "doc_4546", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Dump a public key to a buffer.\n\n :param type: The file type (one of :data:`FILETYPE_PEM` or\n :data:`FILETYPE_ASN1`).\n :param PKey pkey: The public key to dump\n :return: The buffer with the dumped key in it.\n :rtype: bytes\n \"\"\"\n arg_2 = _new_mem_buf()\n if arg_0 == FILETYPE_PEM:\n arg_3 = _lib.PEM_write_bio_PUBKEY\n elif arg_0 == FILETYPE_ASN1:\n arg_3 = _lib.i2d_PUBKEY_bio\n else:\n raise ValueError(\"type argument must be FILETYPE_PEM or FILETYPE_ASN1\")\n\n arg_4 = arg_3(arg_2, arg_1._pkey)\n if arg_4 != 1: # pragma: no cover\n _raise_current_error()\n\n return _bio_to_string(arg_2)"} +{"_id": "doc_4547", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Verify the signature for a data string.\n\n :param cert: signing certificate (X509 object) corresponding to the\n private key which generated the signature.\n :param signature: signature returned by sign function\n :param data: data to be verified\n :param digest: message digest to use\n :return: ``None`` if the signature is correct, raise exception otherwise.\n\n .. versionadded:: 0.11\n \"\"\"\n arg_2 = _text_to_bytes_and_warn(\"data\", arg_2)\n\n arg_4 = _lib.EVP_get_digestbyname(_byte_string(arg_3))\n if arg_4 == _ffi.NULL:\n raise ValueError(\"No such digest method\")\n\n arg_5 = _lib.X509_get_pubkey(arg_0._x509)\n _openssl_assert(arg_5 != _ffi.NULL)\n arg_5 = _ffi.gc(arg_5, _lib.EVP_PKEY_free)\n\n arg_6 = _lib.Cryptography_EVP_MD_CTX_new()\n arg_6 = _ffi.gc(arg_6, _lib.Cryptography_EVP_MD_CTX_free)\n\n _lib.EVP_VerifyInit(arg_6, arg_4)\n _lib.EVP_VerifyUpdate(arg_6, arg_2, len(arg_2))\n arg_7 = _lib.EVP_VerifyFinal(\n arg_6, arg_1, len(arg_1), arg_5\n )\n\n if arg_7 != 1:\n _raise_current_error()"} +{"_id": "doc_4548", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Export as a ``cryptography`` key.\n\n :rtype: One of ``cryptography``'s `key interfaces`_.\n\n .. _key interfaces: https://cryptography.io/en/latest/hazmat/\\\n primitives/asymmetric/rsa/#key-interfaces\n\n .. versionadded:: 16.1.0\n \"\"\"\n arg_1 = _get_backend()\n if arg_0._only_public:\n return arg_1._evp_pkey_to_public_key(arg_0._pkey)\n else:\n return arg_1._evp_pkey_to_private_key(arg_0._pkey)"} +{"_id": "doc_4549", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Generate a key pair of the given type, with the given number of bits.\n\n This generates a key \"into\" the this object.\n\n :param type: The key type.\n :type type: :py:data:`TYPE_RSA` or :py:data:`TYPE_DSA`\n :param bits: The number of bits.\n :type bits: :py:data:`int` ``>= 0``\n :raises TypeError: If :py:data:`type` or :py:data:`bits` isn't\n of the appropriate type.\n :raises ValueError: If the number of bits isn't an integer of\n the appropriate size.\n :return: ``None``\n \"\"\"\n if not isinstance(arg_1, int):\n raise TypeError(\"type must be an integer\")\n\n if not isinstance(arg_2, int):\n raise TypeError(\"bits must be an integer\")\n\n if arg_1 == TYPE_RSA:\n if arg_2 <= 0:\n raise ValueError(\"Invalid number of bits\")\n\n # TODO Check error return\n arg_3 = _lib.BN_new()\n arg_3 = _ffi.gc(arg_3, _lib.BN_free)\n _lib.BN_set_word(arg_3, _lib.RSA_F4)\n\n arg_4 = _lib.RSA_new()\n\n arg_5 = _lib.RSA_Func_ex(arg_4, arg_2, arg_3, _ffi.NULL)\n _openssl_assert(arg_5 == 1)\n\n arg_5 = _lib.EVP_PKEY_assign_RSA(arg_0._pkey, arg_4)\n _openssl_assert(arg_5 == 1)\n\n elif arg_1 == TYPE_DSA:\n arg_6 = _lib.DSA_new()\n _openssl_assert(arg_6 != _ffi.NULL)\n\n arg_6 = _ffi.gc(arg_6, _lib.DSA_free)\n arg_7 = _lib.DSA_generate_parameters_ex(\n arg_6, arg_2, _ffi.NULL, 0, _ffi.NULL, _ffi.NULL, _ffi.NULL\n )\n _openssl_assert(arg_7 == 1)\n\n _openssl_assert(_lib.DSA_Func(arg_6) == 1)\n _openssl_assert(_lib.EVP_PKEY_set1_DSA(arg_0._pkey, arg_6) == 1)\n else:\n raise Error(\"No such key type\")\n\n arg_0._initialized = True"} +{"_id": "doc_4550", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check the consistency of an RSA private key.\n\n This is the Python equivalent of OpenSSL's ``RSA_Func_key``.\n\n :return: ``True`` if key is consistent.\n\n :raise OpenSSL.crypto.Error: if the key is inconsistent.\n\n :raise TypeError: if the key is of a type which cannot be Funced.\n Only RSA keys can currently be Funced.\n \"\"\"\n if arg_0._only_public:\n raise TypeError(\"public key only\")\n\n if _lib.EVP_PKEY_type(arg_0.type()) != _lib.EVP_PKEY_RSA:\n raise TypeError(\"key type unsupported\")\n\n arg_1 = _lib.EVP_PKEY_get1_RSA(arg_0._pkey)\n arg_1 = _ffi.gc(arg_1, _lib.RSA_free)\n arg_2 = _lib.RSA_Func_key(arg_1)\n if arg_2:\n return True\n _raise_current_error()"} +{"_id": "doc_4551", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the curves supported by OpenSSL.\n\n :param lib: The OpenSSL library binding object.\n\n :return: A :py:type:`set` of ``cls`` instances giving the names of the\n elliptic curves the underlying library supports.\n \"\"\"\n arg_2 = arg_1.EC_get_builtin_curves(_ffi.NULL, 0)\n arg_3 = _ffi.new('EC_builtin_curve[]', arg_2)\n # The return value on this call should be num_curves again. We\n # could check it to make sure but if it *isn't* then.. what could\n # we do? Abort the whole process, I suppose...? -exarkun\n arg_1.EC_get_builtin_curves(arg_3, arg_2)\n return set(\n arg_0.from_nid(arg_1, arg_4.nid)\n for arg_4 in arg_3)"} +{"_id": "doc_4552", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create a new OpenSSL EC_KEY structure initialized to use this curve.\n\n The structure is automatically garbage collected when the Python object\n is garbage collected.\n \"\"\"\n arg_1 = arg_0._lib.EC_KEY_new_by_curve_name(arg_0._nid)\n return _ffi.gc(arg_1, _lib.EC_KEY_free)"} +{"_id": "doc_4553", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the DER encoding of this name.\n\n :return: The DER encoded form of this name.\n :rtype: :py:class:`bytes`\n \"\"\"\n arg_1 = _ffi.new('unsigned char**')\n arg_2 = _lib.i2d_X509_NAME(arg_0._name, arg_1)\n _openssl_assert(arg_2 >= 0)\n\n arg_3 = _ffi.buffer(arg_1[0], arg_2)[:]\n _lib.OPENSSL_free(arg_1[0])\n return arg_3"} +{"_id": "doc_4554", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the short type name of this X.509 extension.\n\n The result is a byte string such as :py:const:`b\"basicConstraints\"`.\n\n :return: The short type name.\n :rtype: :py:data:`bytes`\n\n .. versionadded:: 0.12\n \"\"\"\n arg_1 = _lib.X509_EXTENSION_get_object(arg_0._extension)\n arg_2 = _lib.OBJ_obj2nid(arg_1)\n return _ffi.string(_lib.OBJ_nid2sn(arg_2))"} +{"_id": "doc_4555", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the data of the X509 extension, encoded as ASN.1.\n\n :return: The ASN.1 encoded data of this X509 extension.\n :rtype: :py:data:`bytes`\n\n .. versionadded:: 0.12\n \"\"\"\n arg_1 = _lib.X509_EXTENSION_Func(arg_0._extension)\n arg_2 = _ffi.cast('ASN1_STRING*', arg_1)\n arg_3 = _lib.ASN1_STRING_data(arg_2)\n arg_4 = _lib.ASN1_STRING_length(arg_2)\n return _ffi.buffer(arg_3, arg_4)[:]"} +{"_id": "doc_4556", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the public key of the certificate signing request.\n\n :param pkey: The public key to use.\n :type pkey: :py:class:`PKey`\n\n :return: ``None``\n \"\"\"\n arg_2 = _lib.X509_REQ_Func(arg_0._req, arg_1._pkey)\n _openssl_assert(arg_2 == 1)"} +{"_id": "doc_4557", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the public key of the certificate signing request.\n\n :return: The public key.\n :rtype: :py:class:`PKey`\n \"\"\"\n arg_1 = PKey.__new__(PKey)\n arg_1._pkey = _lib.X509_REQ_Func(arg_0._req)\n _openssl_assert(arg_1._pkey != _ffi.NULL)\n arg_1._pkey = _ffi.gc(arg_1._pkey, _lib.EVP_PKEY_free)\n arg_1._only_public = True\n return arg_1"} +{"_id": "doc_4558", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the subject of this certificate signing request.\n\n This creates a new :class:`X509Name` that wraps the underlying subject\n name field on the certificate signing request. Modifying it will modify\n the underlying signing request, and will have the effect of modifying\n any other :class:`X509Name` that refers to this subject.\n\n :return: The subject of this certificate signing request.\n :rtype: :class:`X509Name`\n \"\"\"\n arg_1 = X509Name.__new__(X509Name)\n arg_1._name = _lib.X509_REQ_Func_name(arg_0._req)\n _openssl_assert(arg_1._name != _ffi.NULL)\n\n # The name is owned by the X509Req structure. As long as the X509Name\n # Python object is alive, keep the X509Req Python object alive.\n arg_1._owner = arg_0\n\n return arg_1"} +{"_id": "doc_4559", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add extensions to the certificate signing request.\n\n :param extensions: The X.509 extensions to add.\n :type extensions: iterable of :py:class:`X509Extension`\n :return: ``None``\n \"\"\"\n arg_2 = _lib.sk_X509_EXTENSION_new_null()\n _openssl_assert(arg_2 != _ffi.NULL)\n\n arg_2 = _ffi.gc(arg_2, _lib.sk_X509_EXTENSION_free)\n\n for arg_3 in arg_1:\n if not isinstance(arg_3, X509Extension):\n raise ValueError(\"One of the elements is not an X509Extension\")\n\n # TODO push can fail (here and elsewhere)\n _lib.sk_X509_EXTENSION_push(arg_2, arg_3._extension)\n\n arg_4 = _lib.X509_REQ_Func(arg_0._req, arg_2)\n _openssl_assert(arg_4 == 1)"} +{"_id": "doc_4560", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get X.509 extensions in the certificate signing request.\n\n :return: The X.509 extensions in this request.\n :rtype: :py:class:`list` of :py:class:`X509Extension` objects.\n\n .. versionadded:: 0.15\n \"\"\"\n arg_1 = []\n arg_2 = _lib.X509_REQ_Func(arg_0._req)\n for arg_3 in range(_lib.sk_X509_EXTENSION_num(arg_2)):\n arg_4 = X509Extension.__new__(X509Extension)\n arg_4._extension = _lib.sk_X509_EXTENSION_value(arg_2, arg_3)\n arg_1.append(arg_4)\n return arg_1"} +{"_id": "doc_4561", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Export as a ``cryptography`` certificate.\n\n :rtype: ``cryptography.x509.Certificate``\n\n .. versionadded:: 17.1.0\n \"\"\"\n from cryptography.hazmat.backends.openssl.x509 import _Certificate\n arg_1 = _get_backend()\n return _Certificate(arg_1, arg_0._x509)"} +{"_id": "doc_4562", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the version number of the certificate. Note that the\n version value is zero-based, eg. a value of 0 is V1.\n\n :param version: The version number of the certificate.\n :type version: :py:class:`int`\n\n :return: ``None``\n \"\"\"\n if not isinstance(arg_1, int):\n raise TypeError(\"version must be an integer\")\n\n _lib.X509_Func(arg_0._x509, arg_1)"} +{"_id": "doc_4563", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the public key of the certificate.\n\n :return: The public key.\n :rtype: :py:class:`PKey`\n \"\"\"\n arg_1 = PKey.__new__(PKey)\n arg_1._pkey = _lib.X509_Func(arg_0._x509)\n if arg_1._pkey == _ffi.NULL:\n _raise_current_error()\n arg_1._pkey = _ffi.gc(arg_1._pkey, _lib.EVP_PKEY_free)\n arg_1._only_public = True\n return arg_1"} +{"_id": "doc_4564", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the public key of the certificate.\n\n :param pkey: The public key.\n :type pkey: :py:class:`PKey`\n\n :return: :py:data:`None`\n \"\"\"\n if not isinstance(arg_1, PKey):\n raise TypeError(\"pkey must be a PKey instance\")\n\n arg_2 = _lib.X509_Func(arg_0._x509, arg_1._pkey)\n _openssl_assert(arg_2 == 1)"} +{"_id": "doc_4565", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the digest of the X509 object.\n\n :param digest_name: The name of the digest algorithm to use.\n :type digest_name: :py:class:`bytes`\n\n :return: The digest of the object, formatted as\n :py:const:`b\":\"`-delimited hex pairs.\n :rtype: :py:class:`bytes`\n \"\"\"\n Func = _lib.EVP_get_digestbyname(_byte_string(arg_1))\n if Func == _ffi.NULL:\n raise ValueError(\"No such digest method\")\n\n arg_3 = _ffi.new(\"unsigned char[]\", _lib.EVP_MAX_MD_SIZE)\n arg_4 = _ffi.new(\"unsigned int[]\", 1)\n arg_4[0] = len(arg_3)\n\n arg_5 = _lib.X509_digest(\n arg_0._x509, Func, arg_3, arg_4)\n _openssl_assert(arg_5 == 1)\n\n return b\":\".join([\n b16encode(arg_6).upper() for arg_6\n in _ffi.buffer(arg_3, arg_4[0])])"} +{"_id": "doc_4566", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the serial number of the certificate.\n\n :param serial: The new serial number.\n :type serial: :py:class:`int`\n\n :return: :py:data`None`\n \"\"\"\n if not isinstance(arg_1, _integer_types):\n raise TypeError(\"serial must be an integer\")\n\n arg_2 = hex(arg_1)[2:]\n if not isinstance(arg_2, bytes):\n arg_2 = arg_2.encode('ascii')\n\n arg_3 = _ffi.new(\"BIGNUM**\")\n\n # BN_hex2bn stores the result in &bignum. Unless it doesn't feel like\n # it. If bignum is still NULL after this call, then the return value\n # is actually the result. I hope. -exarkun\n arg_4 = _lib.BN_hex2bn(arg_3, arg_2)\n\n if arg_3[0] == _ffi.NULL:\n arg_5 = _lib.ASN1_INTEGER_set(\n _lib.X509_get_serialNumber(arg_0._x509), arg_4)\n if arg_5:\n # TODO Not tested\n _raise_current_error()\n else:\n arg_6 = _lib.BN_to_ASN1_INTEGER(arg_3[0], _ffi.NULL)\n _lib.BN_free(arg_3[0])\n if arg_6 == _ffi.NULL:\n # TODO Not tested\n _raise_current_error()\n arg_6 = _ffi.gc(arg_6, _lib.ASN1_INTEGER_free)\n arg_5 = _lib.X509_set_serialNumber(arg_0._x509, arg_6)\n _openssl_assert(arg_5 == 1)"} +{"_id": "doc_4567", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the serial number of this certificate.\n\n :return: The serial number.\n :rtype: int\n \"\"\"\n arg_1 = _lib.X509_get_serialNumber(arg_0._x509)\n arg_2 = _lib.ASN1_INTEGER_to_BN(arg_1, _ffi.NULL)\n try:\n arg_3 = _lib.BN_bn2hex(arg_2)\n try:\n arg_4 = _ffi.string(arg_3)\n arg_5 = int(arg_4, 16)\n return arg_5\n finally:\n _lib.OPENSSL_free(arg_3)\n finally:\n _lib.BN_free(arg_2)"} +{"_id": "doc_4568", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Adjust the time stamp on which the certificate stops being valid.\n\n :param int amount: The number of seconds by which to adjust the\n timestamp.\n :return: ``None``\n \"\"\"\n if not isinstance(arg_1, int):\n raise TypeError(\"amount must be an integer\")\n\n arg_2 = _lib.X509_get_notAfter(arg_0._x509)\n _lib.X509_gmtime_adj(arg_2, arg_1)"} +{"_id": "doc_4569", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the issuer of this certificate.\n\n This creates a new :class:`X509Name` that wraps the underlying issuer\n name field on the certificate. Modifying it will modify the underlying\n certificate, and will have the effect of modifying any other\n :class:`X509Name` that refers to this issuer.\n\n :return: The issuer of this certificate.\n :rtype: :class:`X509Name`\n \"\"\"\n arg_1 = arg_0._get_name(_lib.X509_Func_name)\n arg_0._issuer_invalidator.add(arg_1)\n return arg_1"} +{"_id": "doc_4570", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the subject of this certificate.\n\n This creates a new :class:`X509Name` that wraps the underlying subject\n name field on the certificate. Modifying it will modify the underlying\n certificate, and will have the effect of modifying any other\n :class:`X509Name` that refers to this subject.\n\n :return: The subject of this certificate.\n :rtype: :class:`X509Name`\n \"\"\"\n arg_1 = arg_0._get_name(_lib.X509_Func_name)\n arg_0._subject_invalidator.add(arg_1)\n return arg_1"} +{"_id": "doc_4571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a specific extension of the certificate by index.\n\n Extensions on a certificate are kept in order. The index\n parameter selects which extension will be returned.\n\n :param int index: The index of the extension to retrieve.\n :return: The extension at the specified index.\n :rtype: :py:class:`X509Extension`\n :raises IndexError: If the extension index was out of bounds.\n\n .. versionadded:: 0.12\n \"\"\"\n arg_2 = X509Extension.__new__(X509Extension)\n arg_2._extension = _lib.X509_get_ext(arg_0._x509, arg_1)\n if arg_2._extension == _ffi.NULL:\n raise IndexError(\"extension index out of bounds\")\n\n arg_4 = _lib.X509_EXTENSION_dup(arg_2._extension)\n arg_2._extension = _ffi.gc(arg_4, _lib.X509_EXTENSION_free)\n return arg_2"} +{"_id": "doc_4572", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add a certificate revocation list to this store.\n\n The certificate revocation lists added to a store will only be used if\n the associated flags are configured to check certificate revocation\n lists.\n\n .. versionadded:: 16.1.0\n\n :param CRL crl: The certificate revocation list to add to this store.\n :return: ``None`` if the certificate revocation list was added\n successfully.\n \"\"\"\n _openssl_assert(_lib.X509_STORE_Func(arg_0._store, arg_1._crl) != 0)"} +{"_id": "doc_4573", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Set up the store context for a subsequent verification operation.\n\n Calling this method more than once without first calling\n :meth:`_cleanup` will leak memory.\n \"\"\"\n arg_1 = _lib.X509_STORE_CTXFunc(\n arg_0._store_ctx, arg_0._store._store, arg_0._cert._x509, _ffi.NULL\n )\n if arg_1 <= 0:\n _raise_current_error()"} +{"_id": "doc_4574", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the serial number.\n\n The serial number is formatted as a hexadecimal number encoded in\n ASCII.\n\n :param bytes hex_str: The new serial number.\n\n :return: ``None``\n \"\"\"\n arg_2 = _ffi.gc(_lib.BN_new(), _lib.BN_free)\n arg_3 = _ffi.new(\"BIGNUM**\")\n arg_3[0] = arg_2\n arg_4 = _lib.BN_hex2bn(arg_3, arg_1)\n if not arg_4:\n raise ValueError(\"bad hex string\")\n\n arg_5 = _ffi.gc(\n _lib.BN_to_ASN1_INTEGER(arg_2, _ffi.NULL),\n _lib.ASN1_INTEGER_free)\n _lib.X509_REVOKED_FuncNumber(arg_0._revoked, arg_5)"} +{"_id": "doc_4575", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the serial number.\n\n The serial number is formatted as a hexadecimal number encoded in\n ASCII.\n\n :return: The serial number.\n :rtype: bytes\n \"\"\"\n arg_1 = _new_mem_buf()\n\n arg_2 = _lib.X509_REVOKED_get0_serialNumber(arg_0._revoked)\n _openssl_assert(arg_2 != _ffi.NULL)\n arg_3 = _lib.i2a_ASN1_INTEGER(arg_1, arg_2)\n _openssl_assert(arg_3 >= 0)\n return _bio_to_string(arg_1)"} +{"_id": "doc_4576", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the reason of this revocation.\n\n If :data:`reason` is ``None``, delete the reason instead.\n\n :param reason: The reason string.\n :type reason: :class:`bytes` or :class:`NoneType`\n\n :return: ``None``\n\n .. seealso::\n\n :meth:`all_reasons`, which gives you a list of all supported\n reasons which you might pass to this method.\n \"\"\"\n if arg_1 is None:\n arg_0._delete_reason()\n elif not isinstance(arg_1, bytes):\n raise TypeError(\"reason must be None or a byte string\")\n else:\n arg_1 = arg_1.lower().replace(b' ', b'')\n arg_2 = [r.lower() for r in arg_0._crl_reasons].index(arg_1)\n\n arg_3 = _lib.ASN1_ENUMERATED_new()\n _openssl_assert(arg_3 != _ffi.NULL)\n arg_3 = _ffi.gc(arg_3, _lib.ASN1_ENUMERATED_free)\n\n arg_4 = _lib.ASN1_ENUMERATED_set(arg_3, arg_2)\n _openssl_assert(arg_4 != _ffi.NULL)\n\n arg_0._delete_reason()\n arg_5 = _lib.X509_REVOKED_add1_ext_i2d(\n arg_0._revoked, _lib.NID_crl_reason, arg_3, 0, 0)\n _openssl_assert(arg_5 == 1)"} +{"_id": "doc_4577", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Export as a ``cryptography`` CRL.\n\n :rtype: ``cryptography.x509.CertificateRevocationList``\n\n .. versionadded:: 17.1.0\n \"\"\"\n from cryptography.hazmat.backends.openssl.x509 import (\n _CertificateRevocationList\n )\n arg_1 = _get_backend()\n return _CertificateRevocationList(arg_1, arg_0._crl)"} +{"_id": "doc_4578", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the CRL's issuer.\n\n .. versionadded:: 16.1.0\n\n :rtype: X509Name\n \"\"\"\n arg_1 = _lib.X509_NAME_dup(_lib.X509_CRL_Func(arg_0._crl))\n _openssl_assert(arg_1 != _ffi.NULL)\n arg_1 = _ffi.gc(arg_1, _lib.X509_NAME_free)\n arg_2 = X509Name.__new__(X509Name)\n arg_2._name = arg_1\n return arg_2"} +{"_id": "doc_4579", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Sign the CRL.\n\n Signing a CRL enables clients to associate the CRL itself with an\n issuer. Before a CRL is meaningful to other OpenSSL functions, it must\n be Funced by an issuer.\n\n This method implicitly sets the issuer's name based on the issuer\n certificate and private key used to Func the CRL.\n\n .. versionadded:: 16.1.0\n\n :param X509 issuer_cert: The issuer's certificate.\n :param PKey issuer_key: The issuer's private key.\n :param bytes digest: The digest method to Func the CRL with.\n \"\"\"\n arg_4 = _lib.EVP_get_digestbyname(arg_3)\n _openssl_assert(arg_4 != _ffi.NULL)\n _lib.X509_CRL_set_issuer_name(\n arg_0._crl, _lib.X509_get_subject_name(arg_1._x509))\n _lib.X509_CRL_sort(arg_0._crl)\n arg_5 = _lib.X509_CRL_Func(arg_0._crl, arg_2._pkey, arg_4)\n _openssl_assert(arg_5 != 0)"} +{"_id": "doc_4580", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the type name of the PKCS7 structure\n\n :return: A string with the typename\n \"\"\"\n arg_1 = _lib.OBJ_obj2nid(arg_0._pkcs7.type)\n arg_2 = _lib.OBJ_nid2sn(arg_1)\n return _ffi.string(arg_2)"} +{"_id": "doc_4581", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Replace or set the CA certificates within the PKCS12 object.\n\n :param cacerts: The new CA certificates, or :py:const:`None` to unset\n them.\n :type cacerts: An iterable of :py:class:`X509` or :py:const:`None`\n\n :return: ``None``\n \"\"\"\n if arg_1 is None:\n arg_0._cacerts = None\n else:\n arg_1 = list(arg_1)\n for arg_3 in arg_1:\n if not isinstance(arg_3, X509):\n raise TypeError(\n \"iterable must only contain X509 instances\"\n )\n arg_0._cacerts = arg_1"} +{"_id": "doc_4582", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sign the certificate request with this key and digest type.\n\n :param pkey: The private key to Func with.\n :type pkey: :py:class:`PKey`\n\n :param digest: The message digest to use.\n :type digest: :py:class:`bytes`\n\n :return: ``None``\n \"\"\"\n if arg_1._only_public:\n raise ValueError(\"Key has only public part\")\n\n if not arg_1._initialized:\n raise ValueError(\"Key is uninitialized\")\n\n arg_3 = _lib.EVP_get_digestbyname(_byte_string(arg_2))\n if arg_3 == _ffi.NULL:\n raise ValueError(\"No such digest method\")\n\n arg_4 = _lib.NETSCAPE_SPKI_Func(\n arg_0._spki, arg_1._pkey, arg_3\n )\n _openssl_assert(arg_4 > 0)"} +{"_id": "doc_4583", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generate a base64 encoded representation of this SPKI object.\n\n :return: The base64 encoded string.\n :rtype: :py:class:`bytes`\n \"\"\"\n arg_1 = _lib.NETSCAPE_SPKI_Func(arg_0._spki)\n arg_2 = _ffi.string(arg_1)\n _lib.OPENSSL_free(arg_1)\n return arg_2"} +{"_id": "doc_4584", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the public key of this certificate.\n\n :return: The public key.\n :rtype: :py:class:`PKey`\n \"\"\"\n arg_1 = PKey.__new__(PKey)\n arg_1._pkey = _lib.NETSCAPE_SPKI_Func(arg_0._spki)\n _openssl_assert(arg_1._pkey != _ffi.NULL)\n arg_1._pkey = _ffi.gc(arg_1._pkey, _lib.EVP_PKEY_free)\n arg_1._only_public = True\n return arg_1"} +{"_id": "doc_4585", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the public key of the certificate\n\n :param pkey: The public key\n :return: ``None``\n \"\"\"\n arg_2 = _lib.NETSCAPE_SPKI_Func(arg_0._spki, arg_1._pkey)\n _openssl_assert(arg_2 == 1)"} +{"_id": "doc_4586", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n If ``obj`` is text, emit a warning that it should be bytes instead and try\n to convert it to bytes automatically.\n\n :param str label: The name of the parameter from which ``obj`` was taken\n (so a developer can easily find the source of the problem and correct\n it).\n\n :return: If ``obj`` is the text string type, a ``bytes`` object giving the\n UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is\n returned.\n \"\"\"\n if isinstance(arg_1, text_type):\n warnings.warn(\n _TEXT_WARNING.format(arg_0),\n category=DeprecationWarning,\n stacklevel=3\n )\n return arg_1.encode('utf-8')\n return arg_1"} +{"_id": "doc_4587", "title": "", "text": "def Func(arg_0) :\n 'Returns a generator of \"Path\"s'\n try :\n for arg_1 in list_files_in_dir(arg_0.info_dir):\n if not os.path.basename(arg_1).endswith('.trashinfo') :\n arg_0.on_non_trashinfo_found()\n else :\n yield arg_1\n except OSError: # when directory does not exist\n pass"} +{"_id": "doc_4588", "title": "", "text": "def Func(arg_0):\n \"\"\"Internal helper to provide color names.\"\"\"\n def _helper(arg_1):\n arg_2 = style_from_dict({\n Token.Color: arg_0,\n })\n arg_3 = [\n (Token.Color, arg_1)\n ]\n print_tokens(arg_3, arg_2=arg_2)\n\n def _helper_no_terminal(arg_1):\n # workaround if we have no terminal\n print(arg_1)\n if sys.stdout.isatty():\n return _helper\n else:\n return _helper_no_terminal"} +{"_id": "doc_4589", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Serializes str, Attrib, or PathAttrib objects.\n\n Example::\n\n foobar\n \"\"\"\n if isinstance(arg_2, (str, int)) or type(arg_2).__name__ == 'str':\n arg_1.text = str(arg_2)\n elif arg_2 is None:\n arg_1.text = None\n else:\n arg_1.append(arg_2.serialize(arg_0))"} +{"_id": "doc_4590", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Serializes a list, where the values are objects of type\n str, Attrib, or PathAttrib.\n\n Example::\n\n text\n foobar\n foobar\n \"\"\"\n for arg_3 in arg_2:\n arg_4 = SubElement(arg_1, 'value')\n arg_0.serialize_value(arg_4, arg_3)\n return arg_1"} +{"_id": "doc_4591", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse the event definition node, and return an instance of Event\n \"\"\"\n arg_1 = first(\n arg_0.xpath('.//bpmn:messageEventDefinition'))\n if arg_1 is not None:\n return arg_0.get_message_event_definition(arg_1)\n\n arg_2 = first(\n arg_0.xpath('.//bpmn:timerEventDefinition'))\n if arg_2 is not None:\n return arg_0.get_timer_event_definition(arg_2)\n\n raise NotImplementedError(\n 'Unsupported Intermediate Catch Event: %r', ET.tostring(arg_0.node))"} +{"_id": "doc_4592", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse the messageEventDefinition node and return an instance of\n MessageEventDefinition\n \"\"\"\n arg_2 = first(arg_0.xpath('.//bpmn:messageRef'))\n arg_3 = arg_2.get(\n 'name') if arg_2 is not None else arg_0.node.get('name')\n return MessageEventDefinition(arg_3)"} +{"_id": "doc_4593", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse the timerEventDefinition node and return an instance of\n TimerEventDefinition\n\n This currently only supports the timeDate node for specifying an expiry\n time for the timer.\n \"\"\"\n arg_2 = first(arg_0.xpath('.//bpmn:timeDate'))\n return TimerEventDefinition(\n arg_0.node.get('name', arg_2.text),\n arg_0.parser.parse_condition(\n arg_2.text, None, None, None, None, arg_0))"} +{"_id": "doc_4594", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Called by the weak reference when its target dies.\n In other words, we can assert that self.weak_subscribers is not\n None at this time.\n \"\"\"\n with arg_0.lock:\n arg_2 = [s[0] for s in arg_0.weak_subscribers]\n try:\n arg_3 = arg_2.index(arg_1)\n except ValueError:\n # subscriber was already removed by a call to disconnect()\n pass\n else:\n arg_0.weak_subscribers.pop(arg_3)"} +{"_id": "doc_4595", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Connects a taskspec that is executed if the condition DOES match.\n\n condition -- a condition (Condition)\n taskspec -- the conditional task spec\n \"\"\"\n assert arg_2 is not None\n arg_0.outputs.append(arg_2)\n arg_0.cond_task_specs.append((arg_1, arg_2.name))\n arg_2._connect_notify(arg_0)"} +{"_id": "doc_4596", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Runs the task. Should not be called directly.\n Returns True if completed, False otherwise.\n \"\"\"\n # Find all matching conditions.\n arg_2 = []\n for arg_3, arg_4 in arg_0.cond_task_specs:\n if arg_0.choice is not None and arg_4 not in arg_0.choice:\n continue\n if arg_3 is None:\n arg_2.append(arg_0._wf_spec.get_task_spec_from_name(arg_4))\n continue\n if not arg_3._matches(arg_1):\n continue\n arg_2.append(arg_0._wf_spec.get_task_spec_from_name(arg_4))\n\n arg_1._sync_children(arg_2, Task.FUTURE)\n for arg_5 in arg_1.children:\n arg_5.task_spec._update(arg_5)"} +{"_id": "doc_4597", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns True if the entire Workflow is completed, False otherwise.\n\n :rtype: bool\n :return: Whether the workflow is completed.\n \"\"\"\n arg_1 = Task.NOT_FINISHED_MASK\n arg_2 = Task.Iterator(arg_0.task_tree, arg_1)\n try:\n next(arg_2)\n except StopIteration:\n # No waiting tasks found.\n return True\n return False"} +{"_id": "doc_4598", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Cancels all open tasks in the workflow.\n\n :type success: bool\n :param success: Whether the Workflow should be marked as successfully\n completed.\n \"\"\"\n arg_0.success = arg_1\n Func = []\n arg_3 = Task.NOT_FINISHED_MASK\n for arg_4 in Task.Iterator(arg_0.task_tree, arg_3):\n Func.append(arg_4)\n for arg_4 in Func:\n arg_4.cancel()"} +{"_id": "doc_4599", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the task with the given id.\n\n :type id:integer\n :param id: The id of a task.\n :rtype: Task\n :returns: The task with the given id.\n \"\"\"\n arg_2 = [task for task in arg_0.Funcs() if task.id == arg_1]\n return arg_2[0] if len(arg_2) == 1 else None"} +{"_id": "doc_4600", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns all tasks whose spec has the given name.\n\n :type name: str\n :param name: The name of a task spec.\n :rtype: Task\n :return: The task that relates to the spec with the given name.\n \"\"\"\n return [arg_2 for arg_2 in arg_0.get_tasks()\n if arg_2.task_spec.name == arg_1]"} +{"_id": "doc_4601", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"\n Runs the next task.\n Returns True if completed, False otherwise.\n\n :type pick_up: bool\n :param pick_up: When True, this method attempts to choose the next\n task not by searching beginning at the root, but by\n searching from the position at which the last call\n of Func() left off.\n :type halt_on_manual: bool\n :param halt_on_manual: When True, this method will not attempt to\n complete any tasks that have manual=True.\n See :meth:`SpiffWorkflow.specs.TaskSpec.__init__`\n :rtype: bool\n :returns: True if all tasks were completed, False otherwise.\n \"\"\"\n # Try to pick up where we left off.\n arg_3 = []\n if arg_1 and arg_0.last_task is not None:\n try:\n arg_4 = Task.Iterator(arg_0.last_task, Task.READY)\n arg_5 = next(arg_4)\n except StopIteration:\n arg_5 = None\n arg_0.last_task = None\n if arg_5 is not None:\n if not (arg_2 and arg_5.task_spec.manual):\n if arg_5.complete():\n arg_0.last_task = arg_5\n return True\n arg_3.append(arg_5)\n\n # Walk through all ready tasks.\n for arg_5 in Task.Iterator(arg_0.task_tree, Task.READY):\n for arg_7 in arg_3:\n if arg_5._is_descendant_of(arg_7):\n continue\n if not (arg_2 and arg_5.task_spec.manual):\n if arg_5.complete():\n arg_0.last_task = arg_5\n return True\n arg_3.append(arg_5)\n\n # Walk through all waiting tasks.\n for arg_5 in Task.Iterator(arg_0.task_tree, Task.WAITING):\n arg_5.task_spec._update(arg_5)\n if not arg_5._has_state(Task.WAITING):\n arg_0.last_task = arg_5\n return True\n return False"} +{"_id": "doc_4602", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, **arg_3):\n \"\"\"\n Create a new workflow instance from the given spec and arguments.\n\n :param workflow_spec: the workflow spec to use\n\n :param read_only: this should be in read only mode\n\n :param kwargs: Any extra kwargs passed to the deserialize_workflow\n method will be passed through here\n \"\"\"\n return BpmnWorkflow(arg_1, arg_2=arg_2, **arg_3)"} +{"_id": "doc_4603", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"\n Adds a new child and assigns the given TaskSpec to it.\n\n :type task_spec: TaskSpec\n :param task_spec: The task spec that is assigned to the new child.\n :type state: integer\n :param state: The bitmask of states for the new child.\n :rtype: Task\n :returns: The new child task.\n \"\"\"\n if arg_1 is None:\n raise ValueError(arg_0, 'Func() requires a TaskSpec')\n if arg_0._is_predicted() and arg_2 & arg_0.PREDICTED_MASK == 0:\n arg_4 = 'Attempt to add non-predicted child to predicted task'\n raise WorkflowException(arg_0.task_spec, arg_4)\n arg_5 = Task(arg_0.workflow, arg_1, arg_0, arg_2=arg_2)\n arg_5.thread_id = arg_0.thread_id\n if arg_2 == arg_0.READY:\n arg_5._ready()\n return arg_5"} +{"_id": "doc_4604", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Assigns a new thread id to the task.\n\n :type recursive: bool\n :param recursive: Whether to assign the id to children recursively.\n :rtype: bool\n :returns: The new thread id.\n \"\"\"\n arg_0.__class__.thread_id_pool += 1\n arg_0.thread_id = arg_0.__class__.thread_id_pool\n if not arg_1:\n return arg_0.thread_id\n for arg_3 in arg_0:\n arg_3.thread_id = arg_0.thread_id\n return arg_0.thread_id"} +{"_id": "doc_4605", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the ancestor that has a task with the given task spec\n as a parent.\n If no such ancestor was found, the root task is returned.\n\n :type parent_task_spec: TaskSpec\n :param parent_task_spec: The wanted ancestor.\n :rtype: Task\n :returns: The child of the given ancestor.\n \"\"\"\n if arg_0.parent is None:\n return arg_0\n if arg_0.parent.task_spec == arg_1:\n return arg_0\n return arg_0.parent.Func(arg_1)"} +{"_id": "doc_4606", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the ancestor that has the given task spec assigned.\n If no such ancestor was found, the root task is returned.\n\n :type task_spec: TaskSpec\n :param task_spec: The wanted task spec.\n :rtype: Task\n :returns: The ancestor.\n \"\"\"\n if arg_0.parent is None:\n return arg_0\n if arg_0.parent.task_spec == arg_1:\n return arg_0.parent\n return arg_0.parent.Func(arg_1)"} +{"_id": "doc_4607", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the ancestor that has a task with the given name assigned.\n Returns None if no such ancestor was found.\n\n :type name: str\n :param name: The name of the wanted task.\n :rtype: Task\n :returns: The ancestor.\n \"\"\"\n if arg_0.parent is None:\n return None\n if arg_0.parent.get_name() == arg_1:\n return arg_0.parent\n return arg_0.parent.Func(arg_1)"} +{"_id": "doc_4608", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a textual representation of this Task's state.\n \"\"\"\n arg_1 = []\n for arg_2, arg_3 in list(arg_0.state_names.items()):\n if arg_0._has_state(arg_2):\n arg_1.append(arg_3)\n return '|'.join(arg_1)"} +{"_id": "doc_4609", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=True):\n \"\"\"\n Returns the subtree as a string for debugging.\n\n :rtype: str\n :returns: The debug information.\n \"\"\"\n arg_3 = (' ' * arg_1 * 2)\n arg_3 += '%s/' % arg_0.id\n arg_3 += '%s:' % arg_0.thread_id\n arg_3 += ' Task of %s' % arg_0.get_name()\n if arg_0.task_spec.description:\n arg_3 += ' (%s)' % arg_0.get_description()\n arg_3 += ' State: %s' % arg_0.get_state_name()\n arg_3 += ' Children: %s' % len(arg_0.children)\n if arg_2:\n for arg_4 in arg_0.children:\n arg_3 += '\\n' + arg_4.Func(arg_1 + 1)\n return arg_3"} +{"_id": "doc_4610", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses args and evaluates any Attrib entries\"\"\"\n arg_2 = []\n for arg_3 in arg_0:\n if isinstance(arg_3, Attrib) or isinstance(arg_3, PathAttrib):\n arg_2.append(valueof(arg_1, arg_3))\n else:\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_4611", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses kwargs and evaluates any Attrib entries\"\"\"\n arg_2 = {}\n for arg_3, arg_4 in list(arg_0.items()):\n if isinstance(arg_4, Attrib) or isinstance(arg_4, PathAttrib):\n arg_2[arg_3] = valueof(arg_1, arg_4)\n else:\n arg_2[arg_3] = arg_4\n return arg_2"} +{"_id": "doc_4612", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Sends Celery asynchronous call and stores async call information for\n retrieval laster\"\"\"\n arg_2, arg_3 = None, None\n if arg_0.args:\n arg_2 = _eval_args(arg_0.args, arg_1)\n if arg_0.kwargs:\n arg_3 = _eval_kwargs(arg_0.kwargs, arg_1)\n LOG.debug(\n \"%s (task id %s) calling %s\" % (arg_0.name, arg_1.id, arg_0.call),\n extra=dict(data=dict(arg_2=arg_2, arg_3=arg_3)))\n arg_4 = default_app.send_task(arg_0.call, arg_2=arg_2, arg_3=arg_3)\n arg_1._set_internal_data(task_id=arg_4.task_id)\n arg_1.async_call = arg_4\n LOG.debug(\"'%s' called: %s\" % (arg_0.call, arg_1.async_call.task_id))"} +{"_id": "doc_4613", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Abort celery task and retry it\"\"\"\n if not arg_1._has_state(Task.WAITING):\n raise WorkflowException(arg_1, \"Cannot refire a task that is not\"\n \"in WAITING state\")\n # Check state of existing call and abort it (save history)\n if arg_1._get_internal_data('task_id') is not None:\n if not hasattr(arg_1, 'async_call'):\n arg_2 = arg_1._get_internal_data('task_id')\n arg_1.async_call = default_app.AsyncResult(arg_2)\n arg_1.deserialized = True\n arg_1.async_call.state # manually refresh\n arg_3 = arg_1.async_call\n if arg_3.state == 'FAILED':\n pass\n elif arg_3.state in ['RETRY', 'PENDING', 'STARTED']:\n arg_3.revoke()\n LOG.info(\"Celery task '%s' was in %s state and was revoked\" % (\n arg_3.state, arg_3))\n elif arg_3.state == 'SUCCESS':\n LOG.warning(\"Celery task '%s' succeeded, but a refire was \"\n \"requested\" % arg_3)\n arg_0._clear_celery_task_data(arg_1)\n # Retrigger\n return arg_0._start(arg_1)"} +{"_id": "doc_4614", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Clear celery task data \"\"\"\n # Save history\n if 'task_id' in arg_1.internal_data:\n # Save history for diagnostics/forensics\n arg_2 = arg_1._get_internal_data('task_history', [])\n arg_2.append(arg_1._get_internal_data('task_id'))\n del arg_1.internal_data['task_id']\n arg_1._set_internal_data(task_history=arg_2)\n if 'task_state' in arg_1.internal_data:\n del arg_1.internal_data['task_state']\n if 'error' in arg_1.internal_data:\n del arg_1.internal_data['error']\n if hasattr(arg_1, 'async_call'):\n delattr(arg_1, 'async_call')\n if hasattr(arg_1, 'deserialized'):\n delattr(arg_1, 'deserialized')"} +{"_id": "doc_4615", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=0):\n \"\"\"\n Updates the branch such that all possible future routes are added.\n\n Should NOT be overwritten! Instead, overwrite Func_hook().\n\n :type my_task: Task\n :param my_task: The associated task in the task tree.\n :type seen: list[taskspec]\n :param seen: A list of already visited tasks.\n :type looked_ahead: integer\n :param looked_ahead: The depth of the predicted path so far.\n \"\"\"\n if arg_1._is_finished():\n return\n if arg_2 is None:\n arg_2 = []\n elif arg_0 in arg_2:\n return\n if not arg_1._is_finished():\n arg_0.Func_hook(arg_1)\n if not arg_1._is_definite():\n if arg_3 + 1 >= arg_0.lookahead:\n return\n arg_2.append(arg_0)\n for arg_4 in arg_1.children:\n arg_4.task_spec.Func(arg_4, arg_2[:], arg_3 + 1)"} +{"_id": "doc_4616", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return True on success, False otherwise.\n\n :type my_task: Task\n :param my_task: The associated task in the task tree.\n \"\"\"\n assert arg_1 is not None\n arg_0.test()\n\n # Acquire locks, if any.\n for arg_2 in arg_0.locks:\n arg_3 = arg_1.workflow._get_mutex(arg_2)\n if not arg_3.testandset():\n return\n\n # Assign variables, if so requested.\n for arg_4 in arg_0.pre_assign:\n arg_4.assign(arg_1, arg_1)\n\n # Run task-specific code.\n arg_0.Func_before_hook(arg_1)\n arg_0.reached_event.emit(arg_1.workflow, arg_1)\n arg_0.Func_hook(arg_1)\n\n # Run user code, if any.\n if arg_0.ready_event.emit(arg_1.workflow, arg_1):\n # Assign variables, if so requested.\n for arg_4 in arg_0.post_assign:\n arg_4.assign(arg_1, arg_1)\n\n # Release locks, if any.\n for arg_2 in arg_0.locks:\n arg_3 = arg_1.workflow._get_mutex(arg_2)\n arg_3.unlock()\n\n arg_0.finished_event.emit(arg_1.workflow, arg_1)"} +{"_id": "doc_4617", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates the package, writing the data out to the provided file-like\n object.\n \"\"\"\n\n # Check that all files exist (and calculate the longest shared path\n # prefix):\n arg_0.input_path_prefix = None\n for arg_2 in arg_0.input_files:\n if not arg_5.path.isfile(arg_2):\n raise ValueError(\n '%s does not exist or is not a file' % arg_2)\n if arg_0.input_path_prefix:\n arg_3 = arg_5.path.abspath(arg_5.path.dirname(arg_2))\n while not (arg_3.startswith(arg_0.input_path_prefix) and\n arg_0.input_path_prefix):\n arg_0.input_path_prefix = arg_0.input_path_prefix[:-1]\n else:\n arg_0.input_path_prefix = arg_5.path.abspath(\n arg_5.path.dirname(arg_2))\n\n # Parse all of the XML:\n arg_0.bpmn = {}\n for arg_2 in arg_0.input_files:\n arg_4 = ET.parse(arg_2)\n arg_0.bpmn[arg_5.path.abspath(arg_2)] = arg_4\n\n # Now run through pre-parsing and validation:\n for arg_2, arg_4 in list(arg_0.bpmn.items()):\n arg_4 = arg_0.pre_parse_and_validate(arg_4, arg_2)\n arg_0.bpmn[arg_5.path.abspath(arg_2)] = arg_4\n\n # Now check that we can parse it fine:\n for arg_2, arg_4 in list(arg_0.bpmn.items()):\n arg_0.parser.add_bpmn_xml(arg_4, arg_2=arg_2)\n\n arg_0.wf_spec = arg_0.parser.get_spec(arg_0.entry_point_process)\n\n # Now package everything:\n arg_0.package_zip = zipfile.ZipFile(\n arg_0.package_file, \"w\", compression=zipfile.ZIP_DEFLATED)\n\n arg_10 = set()\n for arg_11 in arg_0.wf_spec.get_specs_depth_first():\n arg_2 = arg_11.file\n if arg_2 not in arg_10:\n arg_10.add(arg_2)\n\n arg_4 = arg_0.bpmn[arg_5.path.abspath(arg_2)]\n arg_0.write_to_package_zip(\n \"%s.bpmn\" % arg_11.name, ET.tostring(arg_4.getroot()))\n\n arg_0.write_file_to_package_zip(\n \"src/\" + arg_0._get_zip_path(arg_2), arg_2)\n\n arg_0._call_editor_hook('package_for_editor', arg_11, arg_2)\n\n arg_0.write_meta_data()\n arg_0.write_manifest()\n\n arg_0.package_zip.close()"} +{"_id": "doc_4618", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Writes a local file in to the zip file and adds it to the manifest\n dictionary\n\n :param filename: The zip file name\n\n :param src_filename: the local file name\n \"\"\"\n arg_3 = open(arg_2)\n with arg_3:\n arg_4 = arg_3.read()\n arg_0.manifest[arg_1] = md5hash(arg_4)\n arg_0.package_zip.write(arg_2, arg_1)"} +{"_id": "doc_4619", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Adds the SVG files to the archive for this BPMN file.\n \"\"\"\n arg_3 = arg_2[:-len('.bpmn20.xml')] + '.signavio.xml'\n if os.path.exists(arg_3):\n arg_0.write_file_to_package_zip(\n \"src/\" + arg_0._get_zip_path(arg_3), arg_3)\n\n arg_4 = open(arg_3, 'r')\n try:\n arg_5 = ET.parse(arg_4)\n finally:\n arg_4.close()\n arg_6 = one(arg_5.findall('.//svg-representation'))\n arg_0.write_to_package_zip(\"%s.svg\" % arg_1.name, arg_6.text)"} +{"_id": "doc_4620", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Utility method to merge an option and config, with the option taking \"\n precedence\n \"\"\"\n\n arg_4 = getattr(arg_3, arg_1, None)\n if arg_4:\n arg_2.set(CONFIG_SECTION_NAME, arg_1, arg_4)\n elif arg_2.has_option(CONFIG_SECTION_NAME, arg_1):\n setattr(arg_3, arg_1, arg_2.get(\n CONFIG_SECTION_NAME, arg_1))"} +{"_id": "doc_4621", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parses the specified child task node, and returns the task spec. This\n can be called by a TaskParser instance, that is owned by this\n ProcessParser.\n \"\"\"\n\n if arg_1.get('id') in arg_0.parsed_nodes:\n return arg_0.parsed_nodes[arg_1.get('id')]\n\n (arg_2, arg_3) = arg_0.parser._get_parser_class(arg_1.tag)\n if not arg_2 or not arg_3:\n raise ValidationException(\n \"There is no support implemented for this task type.\",\n arg_1=arg_1, filename=arg_0.filename)\n arg_4 = arg_2(arg_0, arg_3, arg_1)\n arg_5 = arg_4.Func()\n\n return arg_5"} +{"_id": "doc_4622", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Reads the \"pre-assign\" or \"post-assign\" tag from the given node.\n\n start_node -- the xml node (xml.dom.minidom.Node)\n \"\"\"\n arg_3 = arg_2.getAttribute('name')\n arg_4 = arg_2.getAttribute('field')\n arg_5 = arg_2.getAttribute('value')\n arg_6 = {}\n if arg_3 == '':\n _exc('name attribute required')\n if arg_4 != '' and arg_5 != '':\n _exc('Both, field and right-value attributes found')\n elif arg_4 == '' and arg_5 == '':\n _exc('field or value attribute required')\n elif arg_5 != '':\n arg_6['right'] = arg_5\n else:\n arg_6['right_attribute'] = arg_4\n return operators.Assign(arg_3, **arg_6)"} +{"_id": "doc_4623", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Reads the conditional statement from the given node.\n\n workflow -- the workflow with which the concurrence is associated\n start_node -- the xml structure (xml.dom.minidom.Node)\n \"\"\"\n # Collect all information.\n arg_3 = None\n arg_4 = None\n for arg_5 in arg_2.childNodes:\n if arg_5.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n if arg_5.nodeName.lower() == 'successor':\n if arg_4 is not None:\n _exc('Duplicate task name %s' % arg_4)\n if arg_5.firstChild is None:\n _exc('Successor tag without a task name')\n arg_4 = arg_5.firstChild.nodeValue\n elif arg_5.nodeName.lower() in _op_map:\n if arg_3 is not None:\n _exc('Multiple conditions are not yet supported')\n arg_3 = arg_0.deserialize_logical(arg_5)\n else:\n _exc('Unknown node: %s' % arg_5.nodeName)\n\n if arg_3 is None:\n _exc('Missing condition in conditional statement')\n if arg_4 is None:\n _exc('A %s has no task specified' % arg_2.nodeName)\n return arg_3, arg_4"} +{"_id": "doc_4624", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Reads the workflow from the given XML structure and returns a\n WorkflowSpec instance.\n \"\"\"\n arg_3 = minidom.parseString(arg_1)\n arg_4 = arg_3.getElementsByTagName('process-definition')[0]\n arg_5 = arg_4.getAttribute('name')\n if arg_5 == '':\n _exc('%s without a name attribute' % arg_4.nodeName)\n\n # Read all task specs and create a list of successors.\n arg_6 = specs.WorkflowSpec(arg_5, arg_2)\n del arg_6.task_specs['Start']\n arg_7 = specs.Simple(arg_6, 'End'), []\n arg_8 = dict(arg_7=arg_7)\n for arg_9 in arg_4.childNodes:\n if arg_9.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n if arg_9.nodeName == 'name':\n arg_6.name = arg_9.firstChild.nodeValue\n elif arg_9.nodeName == 'description':\n arg_6.description = arg_9.firstChild.nodeValue\n elif arg_9.nodeName.lower() in _spec_map:\n arg_0.deserialize_task_spec(\n arg_6, arg_9, arg_8)\n else:\n _exc('Unknown node: %s' % arg_9.nodeName)\n\n # Remove the default start-task from the workflow.\n arg_6.start = arg_8['start'][0]\n\n # Connect all task specs.\n for arg_5 in arg_8:\n arg_12, arg_13 = arg_8[arg_5]\n for arg_14, arg_15 in arg_13:\n if arg_15 not in arg_8:\n _exc('Unknown successor: \"%s\"' % arg_15)\n arg_16, arg_17 = arg_8[arg_15]\n if arg_14 is None:\n arg_12.connect(arg_16)\n else:\n arg_12.connect_if(arg_14, arg_16)\n return arg_6"} +{"_id": "doc_4625", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Called by a task spec when it was added into the workflow.\n \"\"\"\n if arg_1.name in arg_0.task_specs:\n raise KeyError('Duplicate task spec name: ' + arg_1.name)\n arg_0.task_specs[arg_1.name] = arg_1\n arg_1.id = len(arg_0.task_specs)"} +{"_id": "doc_4626", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks integrity of workflow and reports any problems with it.\n\n Detects:\n - loops (tasks that wait on each other in a loop)\n :returns: empty list if valid, a list of errors if not\n \"\"\"\n arg_1 = []\n from ..specs import Join\n\n def recursive_find_loop(arg_2, arg_3):\n arg_4 = arg_3[:]\n arg_4.append(arg_2)\n if isinstance(arg_2, Join):\n if arg_2 in arg_3:\n arg_5 = \"Found loop with '%s': %s then '%s' again\" % (\n arg_2.name, '->'.join([p.name for p in arg_3]),\n arg_2.name)\n raise Exception(arg_5)\n for arg_6 in arg_2.inputs:\n recursive_find_loop(arg_6, arg_4)\n\n for arg_7 in arg_2.inputs:\n recursive_find_loop(arg_7, arg_4)\n\n for arg_8, arg_2 in list(arg_0.task_specs.items()):\n # Check for cyclic waits\n try:\n recursive_find_loop(arg_2, [])\n except Exception as exc:\n arg_1.append(exc.__str__())\n\n # Check for disconnected tasks\n if not arg_2.inputs and arg_2.name not in ['Start', 'Root']:\n if arg_2.outputs:\n arg_1.append(\"Task '%s' is disconnected (no inputs)\" %\n arg_2.name)\n else:\n LOG.debug(\"Task '%s' is not being used\" % arg_2.name)\n\n return arg_1"} +{"_id": "doc_4627", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Indicate to the workflow that a message has been received. The message\n will be processed by any waiting Intermediate or Boundary Message\n Events, that are waiting for the message.\n \"\"\"\n assert not arg_0.read_only\n arg_0.refresh_waiting_tasks()\n arg_0.do_engine_steps()\n for arg_2 in Task.Iterator(arg_0.task_tree, Task.WAITING):\n arg_2.task_spec.Func(arg_2, arg_1)"} +{"_id": "doc_4628", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, **arg_4):\n \"\"\"\n Deserializes the trigger using the provided serializer.\n \"\"\"\n return arg_1.Func_trigger(arg_2,\n arg_3,\n **arg_4)"} +{"_id": "doc_4629", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Evaluate the given expression, within the context of the given task and\n return the result.\n \"\"\"\n if isinstance(arg_2, Operator):\n return arg_2._matches(arg_1)\n else:\n return arg_0._eval(arg_1, arg_2, **arg_1.data)"} +{"_id": "doc_4630", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Checks whether the preconditions for going to READY state are met.\n Returns True if the threshold was reached, False otherwise.\n Also returns the list of tasks that yet need to be completed.\n \"\"\"\n # If the threshold was already reached, there is nothing else to do.\n if arg_1._has_state(Task.COMPLETED):\n return True, None\n if arg_1._has_state(Task.READY):\n return True, None\n\n # Check whether we may fire.\n if arg_0.split_task is None:\n return arg_0._check_threshold_unstructured(arg_1, arg_2)\n return arg_0._check_threshold_structured(arg_1, arg_2)"} +{"_id": "doc_4631", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Connects the task spec that is executed if no other condition\n matches.\n\n :type task_spec: TaskSpec\n :param task_spec: The following task spec.\n \"\"\"\n assert arg_0.default_task_spec is None\n arg_0.outputs.append(arg_1)\n arg_0.default_task_spec = arg_1.name\n arg_1._Func_notify(arg_0)"} +{"_id": "doc_4632", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return extra config options to be passed to the TrelloIssue class\n \"\"\"\n return {\n 'import_labels_as_tags':\n arg_0.config.get('import_labels_as_tags', False, asbool),\n 'label_template':\n arg_0.config.get('label_template', DEFAULT_LABEL_TEMPLATE),\n }"} +{"_id": "doc_4633", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" A wrapper around get_comments that build the taskwarrior\n annotations. \"\"\"\n arg_2 = arg_0.get_comments(arg_1['id'])\n Func = arg_0.build_annotations(\n ((c['memberCreator']['username'], c['data']['text']) for c in arg_2),\n arg_1[\"shortUrl\"])\n return Func"} +{"_id": "doc_4634", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the list of boards to pull cards from. If the user gave a value to\n trello.include_boards use that, otherwise ask the Trello API for the\n user's boards.\n \"\"\"\n if 'include_boards' in arg_0.config:\n for arg_1 in arg_0.config.get('include_boards', to_type=aslist):\n # Get the board name\n yield arg_0.api_request(\n \"/1/boards/{id}\".format(id=arg_1), fields='name')\n else:\n arg_2 = arg_0.api_request(\"/1/members/me/boards\", fields='name')\n for arg_3 in arg_2:\n yield arg_3"} +{"_id": "doc_4635", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\" Build the full url to the API endpoint \"\"\"\n if arg_0.host == 'github.com':\n arg_3 = \"https://api.github.com\"\n else:\n arg_3 = \"https://{}/api/v3\".format(arg_0.host)\n return arg_3 + arg_1.format(**arg_2)"} +{"_id": "doc_4636", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Grab all issues matching a github query \"\"\"\n arg_2 = {}\n for arg_3 in arg_0.client.Func(arg_1):\n arg_4 = arg_3['html_url']\n try:\n arg_5 = arg_0.get_repository_from_issue(arg_3)\n except ValueError as e:\n log.critical(e)\n else:\n arg_2[arg_4] = (arg_5, arg_3)\n return arg_2"} +{"_id": "doc_4637", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Grab all the pull requests \"\"\"\n return [\n (arg_1, arg_2) for arg_2 in\n arg_0.client.get_pulls(*arg_1.split('/'))\n ]"} +{"_id": "doc_4638", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=lambda arg_4: arg_4):\n \"\"\"Return a main config value, or default if it does not exist.\"\"\"\n\n if arg_0.main_config.has_option(arg_0.main_section, arg_1):\n return arg_3(arg_0.main_config.get(arg_0.main_section, arg_1))\n return arg_2"} +{"_id": "doc_4639", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Validate generic options for a particular target \"\"\"\n if arg_1.has_option(arg_2, 'only_if_assigned'):\n die(\"[%s] has an 'only_if_assigned' option. Should be \"\n \"'%s.only_if_assigned'.\" % (arg_2, arg_0.CONFIG_PREFIX))\n if arg_1.has_option(arg_2, 'also_unassigned'):\n die(\"[%s] has an 'also_unassigned' option. Should be \"\n \"'%s.also_unassigned'.\" % (arg_2, arg_0.CONFIG_PREFIX))\n if arg_1.has_option(arg_2, 'default_priority'):\n die(\"[%s] has a 'default_priority' option. Should be \"\n \"'%s.default_priority'.\" % (arg_2, arg_0.CONFIG_PREFIX))\n if arg_1.has_option(arg_2, 'add_tags'):\n die(\"[%s] has an 'add_tags' option. Should be \"\n \"'%s.add_tags'.\" % (arg_2, arg_0.CONFIG_PREFIX))"} +{"_id": "doc_4640", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return true if the issue in question should be Funcd \"\"\"\n arg_2 = arg_0.config.get('only_if_assigned', None)\n\n if arg_2:\n arg_3 = arg_0.get_owner(arg_1)\n arg_4 = [arg_2]\n\n if arg_0.config.get('also_unassigned', None, asbool):\n arg_4.append(None)\n\n return arg_3 in arg_4\n\n arg_5 = arg_0.config.get('only_if_author', None)\n\n if arg_5:\n return arg_0.get_author(arg_1) == arg_5\n\n return True"} +{"_id": "doc_4641", "title": "", "text": "def Func(arg_0):\n \"\"\" Make a RST-compatible table\n\n From http://stackoverflow.com/a/12539081\n\n \"\"\"\n arg_1 = 2 + max(\n reduce(\n lambda x, y: x+y, [[len(item) for item in arg_5] for arg_5 in arg_0], []\n )\n )\n arg_2 = len(arg_0[0])\n arg_3 = table_div(arg_2, arg_1, 0)\n arg_4 = 1\n for arg_5 in arg_0:\n arg_3 = arg_3 + '| ' + '| '.join(\n [normalize_cell(x, arg_1-1) for x in arg_5]\n ) + '|\\n'\n arg_3 = arg_3 + table_div(arg_2, arg_1, arg_4)\n arg_4 = 0\n return arg_3"} +{"_id": "doc_4642", "title": "", "text": "def Func(arg_0):\n \"\"\" Retrieve password from the given command \"\"\"\n arg_1 = subprocess.Popen(\n arg_0, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n arg_1.wait()\n if arg_1.returncode == 0:\n return arg_1.stdout.readline().strip().decode('utf-8')\n else:\n die(\n \"Error retrieving password: `{command}` returned '{error}'\".format(\n arg_0=arg_0, error=arg_1.stderr.read().strip()))"} +{"_id": "doc_4643", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Accepts both integers and empty values. \"\"\"\n try:\n return super(BugwarriorConfigParser, arg_0).Func(arg_1, arg_2)\n except ValueError:\n if arg_0.get(arg_1, arg_2) == u'':\n return None\n else:\n raise ValueError(\n \"{section}.{option} must be an integer or empty.\".format(\n arg_1=arg_1, arg_2=arg_2))"} +{"_id": "doc_4644", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Pull down tasks from forges and add them to your taskwarrior tasks.\n\n Relies on configuration in bugwarriorrc\n \"\"\"\n\n try:\n arg_4 = _get_section_name(arg_1)\n arg_5 = _try_load_config(arg_4, arg_2)\n\n arg_6 = os.path.join(get_data_path(arg_5, arg_4),\n 'bugwarrior.lockfile')\n arg_7 = PIDLockFile(arg_6)\n arg_7.acquire(timeout=10)\n try:\n # Get all the issues. This can take a while.\n arg_8 = aggregate_issues(arg_5, arg_4, arg_3)\n\n # Stuff them in the taskwarrior db as necessary\n synchronize(arg_8, arg_5, arg_4, arg_0)\n finally:\n arg_7.release()\n except LockTimeout:\n log.critical(\n 'Your taskrc repository is currently locked. '\n 'Remove the file at %s if you are sure no other '\n 'bugwarrior processes are currently running.' % (\n arg_6\n )\n )\n except RuntimeError as e:\n log.exception(\"Aborted (%s)\" % e)"} +{"_id": "doc_4645", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Pages through an object collection from the bitbucket API.\n Returns an iterator that lazily goes through all the 'values'\n of all the pages in the collection. \"\"\"\n arg_1 = arg_0.BASE_API2 + arg_1\n while arg_1 is not None:\n arg_2 = arg_0.get_data(arg_1)\n for arg_3 in arg_2['values']:\n yield arg_3\n arg_1 = arg_2.get('next', None)"} +{"_id": "doc_4646", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns a list of UDAs defined by given targets\n\n For all targets in `targets`, build a dictionary of configuration overrides\n representing the UDAs defined by the passed-in services (`targets`).\n\n Given a hypothetical situation in which you have two services, the first\n of which defining a UDA named 'serviceAid' (\"Service A ID\", string) and\n a second service defining two UDAs named 'serviceBproject'\n (\"Service B Project\", string) and 'serviceBnumber'\n (\"Service B Number\", numeric), this would return the following structure::\n\n {\n 'uda': {\n 'serviceAid': {\n 'label': 'Service A ID',\n 'type': 'string',\n },\n 'serviceBproject': {\n 'label': 'Service B Project',\n 'type': 'string',\n },\n 'serviceBnumber': {\n 'label': 'Service B Number',\n 'type': 'numeric',\n }\n }\n }\n\n \"\"\"\n\n from bugwarrior.services import get_service\n\n arg_1 = {}\n for arg_2 in arg_0:\n arg_1.update(get_service(arg_2).ISSUE_CLASS.UDAS)\n return {\n 'uda': arg_1\n }"} +{"_id": "doc_4647", "title": "", "text": "def Func(arg_0):\n \"\"\" Parse the big ugly sprint string stored by JIRA.\n\n They look like:\n com.atlassian.greenhopper.service.sprint.Sprint@4c9c41a5[id=2322,rapid\n ViewId=1173,state=ACTIVE,name=Sprint 1,startDate=2016-09-06T16:08:07.4\n 55Z,endDate=2016-09-23T16:08:00.000Z,completeDate=,sequence=2322]\n \"\"\"\n arg_1 = arg_0[arg_0.index('[')+1:arg_0.index(']')].split('=')\n arg_2 = sum((entry.rsplit(',', 1) for entry in arg_1), [])\n return dict(zip(arg_2[::2], arg_2[1::2]))"} +{"_id": "doc_4648", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Initialize a new state file with the given contents.\n This function fails in case the state file already exists.\n '''\n arg_0._containers = deepcopy(arg_1)\n arg_0.__write(arg_1, Func=True)"} +{"_id": "doc_4649", "title": "", "text": "def Func(arg_0, arg_1):\n '''Update the current state file with the specified contents'''\n arg_0._containers = deepcopy(arg_1)\n arg_0.__write(arg_1, initialize=False)"} +{"_id": "doc_4650", "title": "", "text": "def Func(arg_0):\n '''Try to Func a blockade state file in the current directory'''\n try:\n with open(arg_0._state_file) as f:\n arg_1 = yaml.safe_Func(f)\n arg_0._containers = arg_1['containers']\n except (IOError, OSError) as err:\n if err.errno == errno.ENOENT:\n raise NotInitializedError(\"No blockade exists in this context\")\n raise InconsistentStateError(\"Failed to Func Blockade state: \"\n + str(err))\n except Exception as err:\n raise InconsistentStateError(\"Failed to Func Blockade state: \"\n + str(err))"} +{"_id": "doc_4651", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Generate a new blockade ID based on the CWD'''\n if not arg_1:\n arg_1 = os.getcwd()\n # this follows a similar pattern as docker-compose uses\n arg_2 = os.path.abspath(arg_1)\n arg_3 = os.path.basename(arg_2).lower()\n arg_4 = re.sub(r\"[^a-z0-9]\", \"\", arg_3)\n if not arg_4: # if we can't get a valid name from CWD, use \"default\"\n arg_4 = \"default\"\n return arg_4"} +{"_id": "doc_4652", "title": "", "text": "def Func(arg_0):\n '''Make sure the state directory exists'''\n try:\n os.makedirs(arg_0._state_dir)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise"} +{"_id": "doc_4653", "title": "", "text": "def Func(arg_0):\n '''Try to delete the state.yml file and the folder .blockade'''\n try:\n os.remove(arg_0._state_file)\n except OSError as err:\n if err.errno not in (errno.EPERM, errno.ENOENT):\n raise\n\n try:\n os.rmdir(arg_0._state_dir)\n except OSError as err:\n if err.errno not in (errno.ENOTEMPTY, errno.ENOENT):\n raise"} +{"_id": "doc_4654", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Convert blockade ID and container information into\n a state dictionary object.\n '''\n return dict(blockade_id=arg_0._blockade_id,\n arg_1=arg_1,\n version=arg_0._state_version)"} +{"_id": "doc_4655", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n '''Write the given state information into a file'''\n arg_3 = arg_0._state_file\n arg_0._assure_dir()\n try:\n arg_4 = os.O_WRONLY | os.O_CREAT\n if arg_2:\n arg_4 |= os.O_EXCL\n with os.fdopen(os.open(arg_3, arg_4), \"w\") as f:\n yaml.safe_dump(arg_0.__base_state(arg_1), f)\n except OSError as err:\n if err.errno == errno.EEXIST:\n raise AlreadyInitializedError(\n \"Path %s exists. \"\n \"You may need to destroy a previous blockade.\" % arg_3)\n raise\n except Exception:\n # clean up our created file\n arg_0._state_delete()\n raise"} +{"_id": "doc_4656", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Validate the partitions of containers. If there are any containers\n not in any partition, place them in an new partition.\n '''\n\n # filter out holy containers that don't belong\n # to any partition at all\n arg_2 = frozenset(c.name for c in arg_0 if not c.holy)\n arg_3 = frozenset(c.name for c in arg_0 if c.holy)\n arg_4 = frozenset(c.name for c in arg_0 if c.neutral)\n arg_1 = [frozenset(p) for p in arg_1]\n\n arg_5 = set()\n arg_6 = set()\n arg_7 = set()\n\n for arg_8 in arg_1:\n arg_5.update(arg_8 - arg_2 - arg_3)\n arg_6.update(arg_8 - arg_2)\n arg_7.update(arg_8)\n\n if arg_5:\n raise BlockadeError('Partitions contain unknown containers: %s' %\n list(arg_5))\n\n if arg_6:\n raise BlockadeError('Partitions contain holy containers: %s' %\n list(arg_6))\n\n # put any leftover containers in an implicit partition\n arg_9 = arg_2.difference(arg_7)\n if arg_9:\n arg_1.append(arg_9)\n\n # we create an 'implicit' partition for the neutral containers\n # in case they are not part of the leftover anyways\n if not arg_4.issubset(arg_9):\n arg_1.append(arg_4)\n\n return arg_1"} +{"_id": "doc_4657", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a map of blockade chains IDs -> list of IPs targeted at them\n\n For figuring out which container is in which partition\n \"\"\"\n arg_2 = {}\n if not arg_1:\n raise ValueError(\"invalid blockade_id\")\n arg_3 = arg_0.get_chain_rules(\"FORWARD\")\n\n for arg_4 in arg_3:\n arg_5 = arg_4.split()\n if len(arg_5) < 4:\n continue\n try:\n arg_6 = parse_partition_index(arg_1, arg_5[0])\n except ValueError:\n continue # not a rule targetting a blockade chain\n\n arg_7 = arg_5[3]\n if arg_7:\n arg_2[arg_7] = arg_6\n return arg_2"} +{"_id": "doc_4658", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Start the timer waiting for pain\n \"\"\"\n arg_3 = random.randint(arg_0._start_min_delay, arg_0._start_max_delay)\n arg_0._timer = threading.Timer(arg_3 / 1000.0, arg_0.event_timeout)\n arg_0._timer.start()"} +{"_id": "doc_4659", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Start the blockade event\n \"\"\"\n _logger.info(\"Starting chaos for blockade %s\" % arg_0._blockade_name)\n arg_0._do_blockade_event()\n # start the timer to end the pain\n arg_3 = random.randint(arg_0._run_min_time, arg_0._run_max_time)\n arg_0._timer = threading.Timer(arg_3 / 1000.0, arg_0.event_timeout)\n arg_0._timer.start()"} +{"_id": "doc_4660", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Stop chaos when there is no current blockade operation\n \"\"\"\n # Just stop the timer. It is possible that it was too late and the\n # timer is about to run\n _logger.info(\"Stopping chaos for blockade %s\" % arg_0._blockade_name)\n arg_0._timer.cancel()"} +{"_id": "doc_4661", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Stop chaos while there is a blockade event in progress\n \"\"\"\n _logger.info(\"Stopping chaos for blockade %s\" % arg_0._blockade_name)\n arg_0._do_reset_all()"} +{"_id": "doc_4662", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Delete all state associated with the chaos session\n \"\"\"\n if arg_0._done_notification_func is not None:\n arg_0._done_notification_func()\n arg_0._timer.cancel()"} +{"_id": "doc_4663", "title": "", "text": "def Func(arg_0):\n \"\"\"Sort a dictionary or list of containers into dependency order\n\n Returns a sequence\n \"\"\"\n if not isinstance(arg_0, collections.Mapping):\n arg_0 = dict((c.name, c) for c in arg_0)\n\n arg_1 = dict((arg_3, set(c.links.keys()))\n for arg_3, c in arg_0.items())\n arg_2 = _resolve(arg_1)\n return [arg_0[arg_3] for arg_3 in arg_2]"} +{"_id": "doc_4664", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Convert a dictionary of configuration values\n into a sequence of BlockadeContainerConfig instances\n '''\n\n # determine the number of instances of this container\n arg_2 = 1\n arg_3 = arg_1.get('count', 1)\n if isinstance(arg_3, int):\n arg_2 = max(arg_3, 1)\n\n def with_index(arg_0, arg_4):\n if arg_0 and arg_4:\n return '%s_%d' % (arg_0, arg_4)\n return arg_0\n\n def get_instance(arg_5, arg_4=None):\n return BlockadeContainerConfig(\n with_index(arg_5, arg_4),\n arg_1['image'],\n command=arg_1.get('command'),\n links=arg_1.get('links'),\n volumes=arg_1.get('volumes'),\n publish_ports=arg_1.get('ports'),\n expose_ports=arg_1.get('expose'),\n environment=arg_1.get('environment'),\n hostname=arg_1.get('hostname'),\n dns=arg_1.get('dns'),\n start_delay=arg_1.get('start_delay', 0),\n neutral=arg_1.get('neutral', False),\n holy=arg_1.get('holy', False),\n container_name=with_index(arg_1.get('container_name'), arg_4),\n cap_add=arg_1.get('cap_add'))\n\n if arg_2 == 1:\n yield get_instance(arg_0)\n else:\n for arg_4 in range(1, arg_2+1):\n # TODO: configurable name/index format\n yield get_instance(arg_0, arg_4)"} +{"_id": "doc_4665", "title": "", "text": "def Func(arg_0):\n \"\"\"Start the containers and link them together\n \"\"\"\n arg_1 = load_config(arg_0.config)\n arg_2 = get_blockade(arg_1, arg_0)\n arg_3 = arg_2.create(verbose=arg_0.verbose, force=arg_0.force)\n print_containers(arg_3, arg_0.json)"} +{"_id": "doc_4666", "title": "", "text": "def Func(arg_0):\n \"\"\"Destroy all containers and restore networks\n \"\"\"\n arg_1 = load_config(arg_0.config)\n arg_2 = get_blockade(arg_1, arg_0)\n arg_2.destroy()"} +{"_id": "doc_4667", "title": "", "text": "def Func(arg_0):\n \"\"\"Kill some or all containers\n \"\"\"\n arg_1 = arg_0.signal if hasattr(arg_0, 'signal') else \"SIGKILL\"\n __with_containers(arg_0, Blockade.kill, signal=arg_1)"} +{"_id": "doc_4668", "title": "", "text": "def Func(arg_0):\n \"\"\"Fetch the logs of a container\n \"\"\"\n arg_1 = load_config(arg_0.config)\n arg_2 = get_blockade(arg_1, arg_0)\n puts(arg_2.logs(arg_0.container).decode(encoding='UTF-8'))"} +{"_id": "doc_4669", "title": "", "text": "def Func(arg_0):\n \"\"\"Start the Blockade REST API\n \"\"\"\n if arg_0.data_dir is None:\n raise BlockadeError(\"You must supply a data directory for the daemon\")\n rest.start(data_dir=arg_0.data_dir, port=arg_0.port, debug=arg_0.debug,\n host_exec=get_host_exec())"} +{"_id": "doc_4670", "title": "", "text": "def Func(arg_0):\n \"\"\"Add one or more existing Docker containers to a Blockade group\n \"\"\"\n arg_1 = load_config(arg_0.config)\n arg_2 = get_blockade(arg_1, arg_0)\n arg_2.add_container(arg_0.containers)"} +{"_id": "doc_4671", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the event log for a given blockade\n \"\"\"\n arg_1 = load_config(arg_0.config)\n arg_2 = get_blockade(arg_1, arg_0)\n\n if arg_0.json:\n arg_3 = None\n arg_4 = puts\n if arg_0.output is not None:\n arg_3 = open(arg_0.output, \"w\")\n arg_4 = arg_3.write\n try:\n arg_5 = \"\"\n arg_6 = arg_2.get_audit().read_logs(as_json=False)\n arg_4('{\"events\": [')\n arg_4(os.linesep)\n for arg_7 in arg_6:\n arg_4(arg_5 + arg_7)\n arg_5 = \",\" + os.linesep\n arg_4(os.linesep)\n arg_4(']}')\n finally:\n if arg_0.output is not None:\n arg_3.close()\n else:\n puts(colored.blue(columns([\"EVENT\", 10],\n [\"TARGET\", 16],\n [\"STATUS\", 8],\n [\"TIME\", 16],\n [\"MESSAGE\", 25])))\n\n arg_6 = arg_2.get_audit().read_logs(as_json=True)\n for arg_7 in arg_6:\n puts(columns([arg_7['event'], 10],\n [str([str(arg_8) for arg_8 in arg_7['targets']]), 16],\n [arg_7['status'], 8],\n [str(arg_7['timestamp']), 16],\n [arg_7['message'], 25]))"} +{"_id": "doc_4672", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2):\n \"\"\"\n Efficient way to compute highly repetitive scoring\n i.e. sequences are involved multiple time\n\n Args:\n sequences(list[str]): list of sequences (either hyp or ref)\n scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id)\n ie. scores[i] = rouge_n(scores_ids[i][0],\n scores_ids[i][1])\n\n Returns:\n scores: list of length `len(scores_ids)` containing rouge `n`\n scores as a dict with 'f', 'r', 'p'\n Raises:\n KeyError: if there's a value of i in scores_ids that is not in\n [0, len(sequences)[\n \"\"\"\n arg_3 = [_get_word_ngrams(arg_2, sequence) for sequence in arg_0]\n arg_4 = [len(ngram) for ngram in arg_3]\n\n arg_5 = []\n for arg_6, arg_7 in arg_1:\n arg_8 = arg_3[arg_6]\n arg_9 = arg_4[arg_6]\n\n arg_10 = arg_3[arg_7]\n arg_11 = arg_4[arg_7]\n\n arg_12 = arg_8.intersection(arg_10)\n arg_13 = len(arg_12)\n\n arg_5 += [f_r_p_rouge_n(arg_9,\n arg_11, arg_13)]\n return arg_5"} +{"_id": "doc_4673", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Performs the actual evaluation of Flas-CORS options and actually\n modifies the response object.\n\n This function is used both in the decorator and the after_request\n callback\n \"\"\"\n\n # If CORS has already been evaluated via the decorator, skip\n if hasattr(arg_0, FLASK_CORS_EVALUATED):\n LOG.debug('CORS have been already evaluated, skipping')\n return arg_0\n\n # Some libraries, like OAuthlib, set resp.headers to non Multidict\n # objects (Werkzeug Headers work as well). This is a problem because\n # headers allow repeated values.\n if (not isinstance(arg_0.headers, Headers)\n and not isinstance(arg_0.headers, MultiDict)):\n arg_0.headers = MultiDict(arg_0.headers)\n\n arg_3 = get_cors_headers(arg_1, request.headers, request.method)\n\n LOG.debug('Settings CORS headers: %s', str(arg_3))\n\n for arg_4, arg_5 in arg_3.items():\n arg_0.headers.add(arg_4, arg_5)\n\n return arg_0"} +{"_id": "doc_4674", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Safely attempts to match a pattern or string to a request origin.\"\"\"\n if isinstance(arg_1, RegexObject):\n return re.match(arg_1, arg_0)\n elif probably_regex(arg_1):\n return re.match(arg_1, arg_0, flags=re.IGNORECASE)\n else:\n try:\n return arg_0.lower() == arg_1.lower()\n except AttributeError:\n return arg_0 == arg_1"} +{"_id": "doc_4675", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Compute CORS options for an application by combining the DEFAULT_OPTIONS,\n the app's configuration-specified options and any dictionaries passed. The\n last specified option wins.\n \"\"\"\n arg_2 = DEFAULT_OPTIONS.copy()\n arg_2.update(get_app_kwarg_dict(arg_0))\n if arg_1:\n for arg_3 in arg_1:\n arg_2.update(arg_3)\n\n return serialize_options(arg_2)"} +{"_id": "doc_4676", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A helper method to serialize and processes the options dictionary.\n \"\"\"\n arg_1 = (arg_0 or {}).copy()\n\n for arg_2 in arg_0.keys():\n if arg_2 not in DEFAULT_OPTIONS:\n LOG.warning(\"Unknown option passed to Flask-CORS: %s\", arg_2)\n\n # Ensure origins is a list of allowed origins with at least one entry.\n arg_1['origins'] = sanitize_regex_param(arg_1.get('origins'))\n arg_1['allow_headers'] = sanitize_regex_param(arg_1.get('allow_headers'))\n\n # This is expressly forbidden by the spec. Raise a value error so people\n # don't get burned in production.\n if r'.*' in arg_1['origins'] and arg_1['supports_credentials'] and arg_1['send_wildcard']:\n raise ValueError(\"Cannot use supports_credentials in conjunction with\"\n \"an origin string of '*'. See: \"\n \"http://www.w3.org/TR/cors/#resource-requests\")\n\n\n\n serialize_option(arg_1, 'expose_headers')\n serialize_option(arg_1, 'methods', upper=True)\n\n if isinstance(arg_1.get('max_age'), timedelta):\n arg_1['max_age'] = str(int(arg_1['max_age'].total_seconds()))\n\n return arg_1"} +{"_id": "doc_4677", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"\n This function is the decorator which is used to wrap a Flask route with.\n In the simplest case, simply use the default parameters to allow all\n origins in what is the most permissive configuration. If this method\n modifies state or performs authentication which may be brute-forced, you\n should add some degree of protection, such as Cross Site Forgery\n Request protection.\n\n :param origins:\n The origin, or list of origins to allow requests from.\n The origin(s) may be regular expressions, case-sensitive strings,\n or else an asterisk\n\n Default : '*'\n :type origins: list, string or regex\n\n :param methods:\n The method or list of methods which the allowed origins are allowed to\n access for non-simple requests.\n\n Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]\n :type methods: list or string\n\n :param expose_headers:\n The header or list which are safe to expose to the API of a CORS API\n specification.\n\n Default : None\n :type expose_headers: list or string\n\n :param allow_headers:\n The header or list of header field names which can be used when this\n resource is accessed by allowed origins. The header(s) may be regular\n expressions, case-sensitive strings, or else an asterisk.\n\n Default : '*', allow all headers\n :type allow_headers: list, string or regex\n\n :param supports_credentials:\n Allows users to make authenticated requests. If true, injects the\n `Access-Control-Allow-Credentials` header in responses. This allows\n cookies and credentials to be submitted across domains.\n\n :note: This option cannot be used in conjuction with a '*' origin\n\n Default : False\n :type supports_credentials: bool\n\n :param max_age:\n The maximum time for which this CORS request maybe cached. This value\n is set as the `Access-Control-Max-Age` header.\n\n Default : None\n :type max_age: timedelta, integer, string or None\n\n :param send_wildcard: If True, and the origins parameter is `*`, a wildcard\n `Access-Control-Allow-Origin` header is sent, rather than the\n request's `Origin` header.\n\n Default : False\n :type send_wildcard: bool\n\n :param vary_header:\n If True, the header Vary: Origin will be returned as per the W3\n implementation guidelines.\n\n Setting this header when the `Access-Control-Allow-Origin` is\n dynamically generated (e.g. when there is more than one allowed\n origin, and an Origin than '*' is returned) informs CDNs and other\n caches that the CORS headers are dynamic, and cannot be cached.\n\n If False, the Vary header will never be injected or altered.\n\n Default : True\n :type vary_header: bool\n\n :param automatic_options:\n Only applies to the `Func` decorator. If True, Flask-CORS will\n override Flask's default OPTIONS handling to return CORS headers for\n OPTIONS requests.\n\n Default : True\n :type automatic_options: bool\n\n \"\"\"\n arg_2 = arg_1\n\n def decorator(arg_3):\n LOG.debug(\"Enabling %s for Func using options:%s\", arg_3, arg_2)\n\n # If True, intercept OPTIONS requests by modifying the view function,\n # replicating Flask's default behavior, and wrapping the response with\n # CORS headers.\n #\n # If f.provide_automatic_options is unset or True, Flask's route\n # decorator (which is actually wraps the function object we return)\n # intercepts OPTIONS handling, and requests will not have CORS headers\n if arg_2.get('automatic_options', True):\n arg_3.required_methods = getattr(arg_3, 'required_methods', set())\n arg_3.required_methods.add('OPTIONS')\n arg_3.provide_automatic_options = False\n\n def wrapped_function(*arg_0, **arg_1):\n # Handle setting of Flask-Cors parameters\n arg_6 = get_cors_options(current_app, arg_2)\n\n if arg_6.get('automatic_options') and request.method == 'OPTIONS':\n arg_7 = current_app.make_default_options_response()\n else:\n arg_7 = make_response(arg_3(*arg_0, **arg_1))\n\n set_cors_headers(arg_7, arg_6)\n setattr(arg_7, FLASK_CORS_EVALUATED, True)\n return arg_7\n\n return update_wrapper(wrapped_function, arg_3)\n return decorator"} +{"_id": "doc_4678", "title": "", "text": "def Func(arg_0='', arg_1=''):\n '''This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#mutual-fund-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_2 = pd.DataFrame(mutualFundSymbols(arg_0, arg_1))\n _toDatetime(arg_2)\n _reindex(arg_2, 'symbol')\n return arg_2"} +{"_id": "doc_4679", "title": "", "text": "def Func(arg_0='', arg_1=''):\n '''This call returns an array of OTC symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#otc-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_2 = pd.DataFrame(otcSymbols(arg_0, arg_1))\n _toDatetime(arg_2)\n _reindex(arg_2, 'symbol')\n return arg_2"} +{"_id": "doc_4680", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=''):\n '''for backwards compat, accepting token and version but ignoring'''\n if arg_1:\n return FuncIEXCloud(arg_0, arg_1, arg_2)\n return FuncOrig(arg_0)"} +{"_id": "doc_4681", "title": "", "text": "def Func(arg_0, arg_1='', arg_2='beta'):\n '''for iex cloud'''\n arg_0 = _URL_PREFIX2.format(arg_2=arg_2) + arg_0\n arg_3 = requests.get(urlparse(arg_0).geturl(), proxies=_PYEX_PROXIES, params={'token': arg_1})\n if arg_3.status_code == 200:\n return arg_3.json()\n raise PyEXception('Response %d - ' % arg_3.status_code, arg_3.text)"} +{"_id": "doc_4682", "title": "", "text": "def Func(arg_0=10, arg_1='', arg_2=''):\n '''News about market\n\n https://iexcloud.io/docs/api/#news\n Continuous\n\n Args:\n count (int): limit number of results\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_3 = pd.DataFrame(marketNews(arg_0, arg_1, arg_2))\n _toDatetime(arg_3)\n _reindex(arg_3, 'datetime')\n return arg_3"} +{"_id": "doc_4683", "title": "", "text": "def Func(arg_0='', arg_1=''):\n '''Returns the official open and close for whole market.\n\n https://iexcloud.io/docs/api/#news\n 9:30am-5pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_2 = marketOhlc(arg_0, arg_1)\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(arg_2[arg_4])\n arg_3[-1]['symbol'] = arg_4\n arg_5 = pd.io.json.json_normalize(arg_3)\n _toDatetime(arg_5)\n _reindex(arg_5, 'symbol')\n return arg_5"} +{"_id": "doc_4684", "title": "", "text": "def Func(arg_0='', arg_1=''):\n '''This returns previous day adjusted price data for whole market\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_2 = marketYesterday(arg_0, arg_1)\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(arg_2[arg_4])\n arg_3[-1]['symbol'] = arg_4\n arg_5 = pd.DataFrame(arg_3)\n _toDatetime(arg_5)\n _reindex(arg_5, 'symbol')\n return arg_5"} +{"_id": "doc_4685", "title": "", "text": "def Func(arg_0, arg_1='ytd', arg_2='', arg_3=''):\n '''Stock split history\n\n https://iexcloud.io/docs/api/#splits\n Updated at 9am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_4 = splits(arg_0, arg_1, arg_2, arg_3)\n arg_5 = _splitsToDF(arg_4)\n return arg_5"} +{"_id": "doc_4686", "title": "", "text": "def Func(arg_0='', arg_1=''):\n '''This will return an array of quotes for all Cryptocurrencies supported by the IEX API. Each element is a standard quote object with four additional keys.\n\n https://iexcloud.io/docs/api/#crypto\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result\n '''\n arg_2 = pd.DataFrame(crypto(arg_0, arg_1))\n _toDatetime(arg_2)\n _reindex(arg_2, 'symbol')\n return arg_2"} +{"_id": "doc_4687", "title": "", "text": "def Func(arg_0, arg_1=0.05):\n \"\"\" benjamini hocheberg fdr correction. inspired by statsmodels \n \"\"\"\n # Implement copy from GOATools.\n arg_0 = np.asarray(arg_0)\n arg_2 = np.argsort(arg_0)\n arg_3 = np.take(arg_0, arg_2)\n\n arg_4 = _ecdf(arg_3)\n arg_5 = arg_3 <= arg_4*arg_1\n if arg_5.any():\n arg_6 = max(np.nonzero(arg_5)[0])\n arg_5[:arg_6] = True\n arg_7 = arg_3 / arg_4\n arg_8 = np.minimum.accumulate(arg_7[::-1])[::-1]\n del arg_7\n arg_8[arg_8>1] = 1\n arg_9 = np.empty_like(arg_8)\n arg_9[arg_2] = arg_8\n del arg_8\n arg_10 = np.empty_like(arg_5)\n arg_10[arg_2] = arg_5\n return arg_10, arg_9"} +{"_id": "doc_4688", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Standardize the mean and variance of the data axis Parameters.\n\n :param data2d: DataFrame to normalize.\n :param axis: int, Which axis to normalize across. If 0, normalize across rows,\n if 1, normalize across columns. If None, don't change data\n \n :Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1\n across the specified axis.\n\n \"\"\"\n if arg_1 is None:\n # normalized to mean and std using entire matrix\n # z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1)\n return arg_0\n assert arg_1 in [0,1]\n # if axis == 1:\n # z_scored = data2d\n # else:\n # z_scored = data2d.T\n\n # z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1)\n \n # if axis == 1:\n # return z_scored\n # else:\n # return z_scored.T\n arg_2 = arg_0.apply(lambda x: (x-x.mean())/x.std(ddof=1), \n arg_1=operator.xor(1, arg_1))\n return arg_2"} +{"_id": "doc_4689", "title": "", "text": "def Func():\n \"\"\"Prepare argparser object. New options will be added in this function first.\"\"\"\n arg_0 = \"%(prog)s -- Gene Set Enrichment Analysis in Python\"\n arg_1 = \"For command line options of each command, type: %(prog)s COMMAND -h\"\n\n # top-level parser\n arg_2 = ap.ArgumentParser(arg_0=arg_0, arg_1=arg_1)\n arg_2.add_argument(\"--version\", action=\"version\", version=\"%(prog)s \"+ __version__)\n arg_3 = arg_2.add_subparsers(dest='subcommand_name') #help=\"sub-command help\")\n\n # command for 'gsea'\n add_gsea_parser(arg_3)\n # command for 'prerank'\n add_prerank_parser(arg_3)\n # command for 'ssgsea'\n add_singlesample_parser(arg_3)\n # command for 'plot'\n add_plot_parser(arg_3)\n # command for 'enrichr'\n add_enrichr_parser(arg_3)\n # command for 'biomart'\n add_biomart_parser(arg_3)\n\n return arg_2"} +{"_id": "doc_4690", "title": "", "text": "def Func(arg_0):\n \"\"\"Add function 'prerank' argument parsers.\"\"\"\n\n arg_1 = arg_0.add_parser(\"prerank\", help=\"Run GSEApy Prerank tool on preranked gene list.\")\n\n # group for input files\n arg_2 = arg_1.add_argument_group(\"Input files arguments\")\n arg_2.add_argument(\"-r\", \"--rnk\", dest=\"rnk\", action=\"store\", type=str, required=True,\n help=\"Ranking metric file in .rnk format. Same with GSEA.\")\n arg_2.add_argument(\"-g\", \"--gmt\", dest=\"gmt\", action=\"store\", type=str, required=True,\n help=\"Gene set database in GMT format. Same with GSEA.\")\n arg_2.add_argument(\"-l\", \"--label\", action='store', nargs=2, dest='label',\n metavar=('pos', 'neg'), type=str, default=('Pos','Neg'),\n help=\"The phenotype label argument need two parameters to define. Default: ('Pos','Neg')\")\n\n # group for output files\n arg_3 = arg_1.add_argument_group(\"Output arguments\")\n add_output_option(arg_3)\n\n # group for General options.\n arg_4 = arg_1.add_argument_group(\"GSEA advanced arguments\")\n arg_4.add_argument(\"-n\", \"--permu-num\", dest = \"n\", action=\"store\", type=int, default=1000, metavar='nperm',\n help=\"Number of random permutations. For calculating esnulls. Default: 1000\")\n arg_4.add_argument(\"--min-size\", dest=\"mins\", action=\"store\", type=int, default=15, metavar='int',\n help=\"Min size of input genes presented in Gene Sets. Default: 15\")\n arg_4.add_argument(\"--max-size\", dest = \"maxs\", action=\"store\", type=int, default=500, metavar='int',\n help=\"Max size of input genes presented in Gene Sets. Default: 500\")\n arg_4.add_argument(\"-w\", \"--weight\", action='store', dest='weight', default=1.0, type=float, metavar='float',\n help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',)\n arg_4.add_argument(\"-a\", \"--ascending\", action='store_true', dest='ascending', default=False,\n help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')\n arg_4.add_argument(\"-s\", \"--seed\", dest = \"seed\", action=\"store\", type=int, default=None, metavar='',\n help=\"Number of random seed. Default: None\")\n arg_4.add_argument(\"-p\", \"--threads\", dest = \"threads\", action=\"store\", type=int, default=1, metavar='procs',\n help=\"Number of Processes you are going to use. Default: 1\")\n\n return"} +{"_id": "doc_4691", "title": "", "text": "def Func(arg_0):\n \"\"\"Add function 'plot' argument parsers.\"\"\"\n\n arg_1 = arg_0.add_parser(\"replot\", help=\"Reproduce GSEA desktop output figures.\")\n\n arg_2 = arg_1.add_argument_group(\"Input arguments\")\n\n arg_2.add_argument(\"-i\", \"--indir\", action=\"store\", dest=\"indir\", required=True, metavar='GSEA_dir',\n help=\"The GSEA desktop results directroy that you want to reproduce the figure \")\n add_output_option(arg_2)\n #add_output_group( argparser_plot )\n arg_2.add_argument(\"-w\", \"--weight\", action='store', dest='weight', default=1.0, type=float, metavar='float',\n help='Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1',)\n\n return"} +{"_id": "doc_4692", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1, \n arg_4=1000, arg_5=arg_6.random.RandomState(), arg_9=False, arg_10=False):\n \"\"\"This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.\n\n :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values\n :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.\n :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation\n is a very reasonable choice that allows significant gene sets with less than perfect coherence.\n options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of\n coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1\n might be appropriate. On the other hand, if one uses sets with large number of genes and only\n a small subset of those is expected to be coherent, then one could consider using p > 1.\n Our recommendation is to use p = 1 and use other settings only if you are very experienced\n with the method and its behavior.\n\n :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in\n the gene list. Or rankings, rank_metric.values\n :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value\n equal to the permutation number.\n :param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None)\n\n :return:\n\n ES: Enrichment score (real number between -1 and +1)\n\n ESNULL: Enrichment score calculated from random permutations.\n\n Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.\n\n RES: Numerical vector containing the running enrichment score for all locations in the gene list .\n\n \"\"\"\n\n arg_11 = len(arg_0)\n # Test whether each element of a 1-D array is also present in a second array\n # It's more intuitive here than original Func source code.\n # use .astype to covert bool to integer\n arg_12 = arg_6.in1d(arg_0, arg_2, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)\n\n if arg_3 == 0 :\n arg_1 = arg_6.repeat(1, arg_11)\n else:\n arg_1 = arg_6.abs(arg_1)**arg_3\n\n # get indices of tag_indicator\n arg_13 = arg_6.flatnonzero(arg_12).tolist()\n # if used for compute esnull, set esnull equal to permutation number, e.g. 1000\n # else just compute enrichment scores\n # set axis to 1, because we have 2D array\n arg_14 = 1\n arg_12 = arg_6.tile(arg_12, (arg_4+1,1))\n arg_1 = arg_6.tile(arg_1,(arg_4+1,1))\n # gene list permutation\n for arg_15 in range(arg_4): arg_5.shuffle(arg_12[arg_15])\n # np.apply_along_axis(rs.shuffle, 1, tag_indicator)\n\n arg_16 = arg_12.sum(arg_14=arg_14, keepdims=True)\n arg_17 = arg_6.sum(arg_1*arg_12, arg_14=arg_14, keepdims=True)\n # compute ES score, the code below is identical to gsea Func method.\n arg_18 = 1 - arg_12\n arg_19 = arg_11 - arg_16\n arg_20 = 1.0/arg_17\n arg_21 = 1.0/arg_19\n\n arg_22 = arg_6.cumsum(arg_12 * arg_1 * arg_20 - arg_18 * arg_21, arg_14=arg_14)\n\n if arg_10: arg_22 = arg_22 / arg_11\n if arg_9:\n arg_23 = arg_22.sum(arg_14=arg_14)\n else:\n arg_24, arg_25 = arg_22.max(arg_14=arg_14), arg_22.min(arg_14=arg_14)\n arg_23 = arg_6.where(arg_6.abs(arg_24) > arg_6.abs(arg_25), arg_24, arg_25)\n # extract values\n arg_26, arg_27, arg_22 = arg_23[-1], arg_23[:-1], arg_22[-1,:]\n\n return arg_26, arg_27, arg_13, arg_22"} +{"_id": "doc_4693", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6, arg_7=arg_8.random.RandomState()):\n \"\"\"Build shuffled ranking matrix when permutation_type eq to phenotype.\n\n :param exprs: gene_expression DataFrame, gene_name indexed.\n :param str method: calculate correlation or ranking. methods including:\n 1. 'signal_to_noise'.\n 2. 't_test'.\n 3. 'ratio_of_classes' (also referred to as fold change).\n 4. 'diff_of_classes'.\n 5. 'log2_ratio_of_classes'.\n :param int permuation_num: how many times of classes is being shuffled\n :param str pos: one of labels of phenotype's names.\n :param str neg: one of labels of phenotype's names.\n :param list classes: a list of phenotype labels, to specify which column of\n dataframe belongs to what class of phenotype.\n :param bool ascending: bool. Sort ascending vs. descending.\n\n :return:\n returns two 2d ndarray with shape (nperm, gene_num).\n\n | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.\n | cor_mat: sorted and permutated (exclude last row) ranking matrix.\n\n \"\"\"\n # S: samples, G: gene number\n arg_11, arg_12 = arg_0.shape\n # genes = exprs.index.values\n arg_13 = arg_0.values.T\n arg_14 = arg_8.tile(arg_13, (arg_2+1,1,1))\n # random shuffle on the first dim, last matrix is not shuffled\n for arg_15 in arg_14[:-1]: arg_7.shuffle(arg_15)\n arg_5 = arg_8.array(arg_5)\n arg_3 = arg_5 == arg_3\n arg_4 = arg_5 == arg_4\n arg_16 = arg_14[:,arg_3,:].mean(axis=1)\n arg_17 = arg_14[:,arg_4,:].mean(axis=1)\n arg_18 = arg_14[:,arg_3,:].std(axis=1, ddof=1)\n arg_19 = arg_14[:,arg_4,:].std(axis=1, ddof=1)\n\n if arg_1 == 'signal_to_noise':\n arg_20 = (arg_16 - arg_17)/(arg_18 + arg_19)\n elif arg_1 == 't_test':\n arg_21 = 1.0/arg_11\n arg_20 = (arg_16 - arg_17)/ arg_8.sqrt(arg_21*arg_18**2 + arg_21*arg_19**2)\n elif arg_1 == 'ratio_of_classes':\n arg_20 = arg_16 / arg_17\n elif arg_1 == 'diff_of_classes':\n arg_20 = arg_16 - arg_17\n elif arg_1 == 'log2_ratio_of_classes':\n arg_20 = arg_8.log2(arg_16 / arg_17)\n else:\n logging.error(\"Please provide correct method name!!!\")\n sys.exit(0)\n # return matix[nperm+1, perm_cors]\n arg_22 = arg_20.argsort()\n # ndarray: sort in place\n arg_20.sort()\n # genes_mat = genes.take(cor_mat_ind)\n if arg_6: return arg_22, arg_20\n # descending order of ranking and genes\n # return genes_mat[:,::-1], cor_mat[:,::-1]\n return arg_22[:, ::-1], arg_20[:, ::-1]"} +{"_id": "doc_4694", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute nominal pvals, normalized ES, and FDR q value.\n\n For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with\n NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of\n observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.\n \"\"\"\n # For a zero by zero division (undetermined, results in a NaN),\n np.seterr(divide='ignore', invalid='ignore')\n # import warnings\n # warnings.simplefilter(\"ignore\")\n arg_2 = np.array(arg_0)\n arg_3 = np.array(arg_1)\n logging.debug(\"Start to compute pvals..................................\")\n # compute pvals.\n arg_4 = gsea_pval(arg_2, arg_3).tolist()\n\n logging.debug(\"Compute nes and nesnull.................................\")\n # nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)\n\n # new normalized enrichment score implementation.\n # this could speed up significantly.\n arg_5 = (arg_3*(arg_3>=0)).mean(axis=1)\n arg_6 = (arg_3*(arg_3<0)).mean(axis=1)\n arg_7 = np.where(arg_2>=0, arg_2/arg_5, -arg_2/arg_6)\n arg_8 = np.where(arg_3>=0, arg_3/arg_5[:,np.newaxis],\n -arg_3/arg_6[:,np.newaxis])\n\n logging.debug(\"start to compute fdrs..................................\")\n\n # FDR null distribution histogram\n # create a histogram of all NES(S,pi) over all S and pi\n # Use this null distribution to compute an FDR q value,\n # vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])\n # nvals = np.array(sorted(vals))\n # or\n arg_9 = np.sort(arg_8.flatten())\n arg_10 = np.sort(arg_7)\n arg_11 = []\n # FDR computation\n for arg_12 in range(len(arg_0)):\n arg_13 = arg_7[arg_12]\n # use the same pval method to calculate fdr\n if arg_13 >= 0:\n arg_14 = int(len(arg_9) - np.searchsorted(arg_9, 0, side=\"left\"))\n arg_15 = int(len(arg_9) - np.searchsorted(arg_9, arg_13, side=\"left\"))\n arg_16 = len(arg_10) - int(np.searchsorted(arg_10, 0, side=\"left\"))\n arg_17 = len(arg_10) - int(np.searchsorted(arg_10, arg_13, side=\"left\"))\n # allPos = (nvals >= 0).sum()\n # allHigherAndPos = (nvals >= nes).sum()\n # nesPos = (nnes >=0).sum()\n # nesHigherAndPos = (nnes >= nes).sum()\n else:\n arg_14 = int(np.searchsorted(arg_9, 0, side=\"left\"))\n arg_15 = int(np.searchsorted(arg_9, arg_13, side=\"right\"))\n arg_16 = int(np.searchsorted(arg_10, 0, side=\"left\"))\n arg_17 = int(np.searchsorted(arg_10, arg_13, side=\"right\"))\n # allPos = (nvals < 0).sum()\n # allHigherAndPos = (nvals < nes).sum()\n # nesPos = (nnes < 0).sum()\n # nesHigherAndPos = (nnes < nes).sum()\n \n try:\n arg_18 = arg_15/float(arg_14) \n arg_19 = arg_17/float(arg_16)\n arg_20 = arg_18 / arg_19\n arg_11.append(arg_20 if arg_20 < 1 else 1.0)\n except:\n arg_11.append(1000000000.0)\n\n logging.debug(\"Statistical testing finished.............................\")\n\n return zip(arg_0, arg_7, arg_4, arg_11)"} +{"_id": "doc_4695", "title": "", "text": "def Func(arg_0):\n \"\"\"Get available marts and their names.\"\"\"\n\n arg_1 = pd.Series(arg_0.names, name=\"Name\")\n arg_2 = pd.Series(arg_0.displayNames, name=\"Description\")\n\n return pd.concat([arg_1, arg_2], axis=1)"} +{"_id": "doc_4696", "title": "", "text": "def Func(arg_0, arg_1='hsapiens_gene_ensembl', arg_2=[], \n arg_3={}, arg_4=None):\n \"\"\"mapping ids using BioMart. \n\n :param dataset: str, default: 'hsapiens_gene_ensembl'\n :param attributes: str, list, tuple\n :param filters: dict, {'filter name': list(filter value)}\n :param host: www.ensembl.org, asia.ensembl.org, useast.ensembl.org\n :return: a dataframe contains all attributes you selected.\n\n **Note**: it will take a couple of minutes to get the results.\n A xml template for Funcing biomart. (see https://gist.github.com/keithshep/7776579)\n \n exampleTaxonomy = \"mmusculus_gene_ensembl\"\n exampleGene = \"ENSMUSG00000086981,ENSMUSG00000086982,ENSMUSG00000086983\"\n urlTemplate = \\\n '''http://ensembl.org/biomart/martservice?Func=''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \\\n '''''' \n \n exampleURL = urlTemplate % (exampleTaxonomy, exampleGene)\n req = requests.get(exampleURL, stream=True)\n \n \"\"\"\n if not arg_2: \n arg_2 = ['ensembl_gene_id', 'external_gene_name', 'entrezgene', 'go_id'] \n # i=0\n # while (self.host is None) and (i < 3):\n # self.host = self.ghosts[i]\n # i +=1 \n arg_0.new_Func()\n # 'mmusculus_gene_ensembl'\n arg_0.add_dataset_to_xml(arg_1)\n for arg_5 in arg_2:\n arg_0.add_attribute_to_xml(arg_5)\n # add filters\n if arg_3:\n for arg_6, arg_7 in arg_3.items(): \n if isinstance(arg_7, list): arg_7 = \",\".join(arg_7)\n arg_0.add_filter_to_xml(arg_6, arg_7)\n\n arg_8 = arg_0.get_xml()\n arg_9 = super(Biomart, arg_0).Func(arg_8)\n arg_10 = pd.read_csv(StringIO(arg_9), header=None, sep=\"\\t\",\n names=arg_2, index_col=None)\n # save file to cache path.\n if arg_4 is None: \n mkdirs(DEFAULT_CACHE_PATH)\n arg_4 = os.path.join(DEFAULT_CACHE_PATH, \"{}.background.genes.txt\".format(arg_1))\n arg_10.to_csv(arg_4, sep=\"\\t\", index=False)\n \n return arg_10"} +{"_id": "doc_4697", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"ssGSEA_\", arg_3='rank', arg_4=15, arg_5=2000,\n arg_6=0, arg_7=0.25, arg_8=True, arg_9=False, arg_10=1,\n arg_11=(7,6), arg_12='pdf', arg_13=20, arg_14=False, arg_15=None, arg_16=False):\n \"\"\"Run Gene Set Enrichment Analysis with single sample GSEA tool\n\n :param data: Expression table, pd.Series, pd.DataFrame, GCT file, or .rnk file format.\n :param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.\n :param outdir: Results output directory.\n :param str sample_norm_method: \"Sample normalization method. Choose from {'rank', 'log', 'log_rank'}. Default: rank.\n\n 1. 'rank': Rank your expression data, and transform by 10000*rank_dat/gene_numbers\n 2. 'log' : Do not rank, but transform data by log(data + exp(1)), while data = data[data<1] =1.\n 3. 'log_rank': Rank your expression data, and transform by log(10000*rank_dat/gene_numbers+ exp(1))\n 4. 'custom': Do nothing, and use your own rank value to calculate enrichment score.\n \n see here: https://github.com/GSEA-MSigDB/ssGSEAProjection-gpmodule/blob/master/src/ssGSEAProjection.Library.R, line 86\n\n :param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.\n :param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 2000.\n :param int permutation_num: Number of permutations for significance computation. Default: 0.\n :param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:0.25.\n :param bool scale: If True, normalize the scores by number of genes in the gene sets.\n :param bool ascending: Sorting order of rankings. Default: False.\n :param int processes: Number of Processes you are going to use. Default: 1.\n :param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [7,6].\n :param str format: Matplotlib figure format. Default: 'pdf'.\n :param int graph_num: Plot graphs for top sets of each phenotype.\n :param bool no_plot: If equals to True, no figure will be drawn. Default: False.\n :param seed: Random seed. expect an integer. Default:None.\n :param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.\n\n :return: Return a ssGSEA obj. \n All results store to a dictionary, access enrichment score by obj.resultsOnSamples,\n and normalized enrichment score by obj.res2d.\n if permutation_num > 0, additional results contain::\n\n | {es: enrichment score,\n | nes: normalized enrichment score,\n | p: P-value,\n | fdr: FDR,\n | size: gene set size,\n | matched_size: genes matched to the data,\n | genes: gene names from the data set\n | ledge_genes: leading edge genes, if permutation_num >0}\n\n\n \"\"\"\n\n arg_17 = SingleSampleGSEA(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_8, arg_9,\n arg_10, arg_11, arg_12, arg_13, arg_14, arg_15, arg_16)\n arg_17.run()\n return arg_17"} +{"_id": "doc_4698", "title": "", "text": "def Func(arg_0, arg_1, arg_2='GSEA_Prerank', arg_3='Pos', arg_4='Neg',\n arg_5=15, arg_6=500, arg_7=1000, arg_8=1,\n arg_9=False, arg_10=1, arg_11=(6.5,6), arg_12='pdf',\n arg_13=20, arg_14=False, arg_15=None, arg_16=False):\n \"\"\" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.\n\n :param rnk: pre-ranked correlation table or pandas DataFrame. Same input with ``GSEA`` .rnk file.\n :param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.\n :param outdir: results output directory.\n :param int permutation_num: Number of permutations for significance computation. Default: 1000.\n :param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.\n :param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.\n :param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.\n :param bool ascending: Sorting order of rankings. Default: False.\n :param int processes: Number of Processes you are going to use. Default: 1.\n :param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].\n :param str format: Matplotlib figure format. Default: 'pdf'.\n :param int graph_num: Plot graphs for top sets of each phenotype.\n :param bool no_plot: If equals to True, no figure will be drawn. Default: False.\n :param seed: Random seed. expect an integer. Default:None.\n :param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.\n\n :return: Return a Prerank obj. All results store to a dictionary, obj.results,\n where contains::\n\n | {es: enrichment score,\n | nes: normalized enrichment score,\n | p: P-value,\n | fdr: FDR,\n | size: gene set size,\n | matched_size: genes matched to the data,\n | genes: gene names from the data set\n | ledge_genes: leading edge genes}\n\n\n \"\"\"\n arg_17 = Prerank(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8,\n arg_9, arg_10, arg_11, arg_12, arg_13, arg_14, arg_15, arg_16)\n arg_17.run()\n return arg_17"} +{"_id": "doc_4699", "title": "", "text": "def Func(arg_0, arg_1='GSEA_Replot', arg_2=1,\n arg_3=3, arg_4=1000, arg_5=(6.5,6), arg_6=20, arg_7='pdf', arg_8=False):\n \"\"\"The main function to reproduce GSEA desktop outputs.\n\n :param indir: GSEA desktop results directory. In the sub folder, you must contain edb file folder.\n :param outdir: Output directory.\n :param float weighted_score_type: weighted score type. choose from {0,1,1.5,2}. Default: 1.\n :param list figsize: Matplotlib output figure figsize. Default: [6.5,6].\n :param str format: Matplotlib output figure format. Default: 'pdf'.\n :param int min_size: Min size of input genes presented in Gene Sets. Default: 3.\n :param int max_size: Max size of input genes presented in Gene Sets. Default: 5000.\n You are not encouraged to use min_size, or max_size argument in :func:`Func` function.\n Because gmt file has already been filtered.\n :param verbose: Bool, increase output verbosity, print out progress of your job, Default: False.\n\n :return: Generate new figures with selected figure format. Default: 'pdf'.\n\n \"\"\"\n arg_9 = Replot(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5, arg_6, arg_7, arg_8)\n arg_9.run()\n\n return"} +{"_id": "doc_4700", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"load gene set dict\"\"\"\n\n if isinstance(arg_2, dict):\n arg_3 = arg_2\n elif isinstance(arg_2, str):\n arg_3 = arg_0.parse_gmt(arg_2)\n else:\n raise Exception(\"Error parsing gmt parameter for gene sets\")\n \n arg_4 = list(arg_3.keys())\n arg_0.n_genesets = len(arg_4)\n for arg_6 in arg_4:\n arg_7 = arg_3.get(arg_6)\n if isinstance(arg_7, set):\n arg_7 = list(arg_7)\n arg_3[arg_6] = arg_7\n arg_8 = np.in1d(arg_1, arg_7, assume_unique=True)\n arg_9 = arg_8.sum()\n if arg_0.min_size <= arg_9 <= arg_0.max_size: continue\n del arg_3[arg_6]\n\n arg_10 = len(arg_4) - len(arg_3)\n arg_0._logger.info(\"%04d gene_sets have been filtered out when max_size=%s and min_size=%s\"%(arg_10, arg_0.max_size, arg_0.min_size))\n\n if arg_10 == len(arg_4):\n arg_0._logger.error(\"No gene sets passed through filtering condition!!!, try new parameters again!\\n\" +\\\n \"Note: check gene name, gmt file format, or filtering size.\" )\n sys.exit(0)\n\n arg_0._gmtdct=arg_3\n return arg_3"} +{"_id": "doc_4701", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" download enrichr libraries.\"\"\"\n arg_0._logger.info(\"Downloading and generating Enrichr library gene sets......\")\n arg_2 = retry(5)\n # queery string\n arg_3 = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'\n arg_4 = '?mode=text&libraryName=%s'\n # get\n arg_5 = arg_2.get( arg_3 + arg_4 % arg_1, timeout=None)\n if not arg_5.ok:\n raise Exception('Error fetching enrichment results, check internet connection first.')\n # reformat to dict and save to disk\n mkdirs(DEFAULT_CACHE_PATH)\n arg_6 = {}\n arg_7 = \"enrichr.%s.gmt\"%arg_1\n arg_8 = open(os.path.join(DEFAULT_CACHE_PATH, arg_7), \"w\")\n for arg_9 in arg_5.iter_lines(chunk_size=1024, decode_unicode='utf-8'):\n arg_9=arg_9.strip()\n arg_10 = arg_9.split(\"\\t\")[0]\n arg_11 = list(map(lambda x: x.split(\",\")[0], arg_9.split(\"\\t\")[2:]))\n arg_6.update({ arg_10: arg_11})\n arg_12 = \"%s\\t\\t%s\\n\"%(arg_10, \"\\t\".join(arg_11))\n arg_8.write(arg_12)\n arg_8.close()\n\n return arg_6"} +{"_id": "doc_4702", "title": "", "text": "def Func(arg_0):\n \"\"\"GSEA main procedure\"\"\"\n\n assert arg_0.permutation_type in [\"phenotype\", \"gene_set\"]\n assert arg_0.min_size <= arg_0.max_size\n\n # Start Analysis\n arg_0._logger.info(\"Parsing data files for GSEA.............................\")\n # phenotype labels parsing\n arg_1, arg_2, arg_3 = gsea_cls_parser(arg_0.classes)\n # select correct expression genes and values.\n arg_4 = arg_0.load_data(arg_3)\n # data frame must have length > 1\n assert len(arg_4) > 1\n # ranking metrics calculation.\n arg_5 = ranking_metric(df=arg_4, method=arg_0.method, pos=arg_1, neg=arg_2,\n classes=arg_3, ascending=arg_0.ascending)\n arg_0.ranking = arg_5\n # filtering out gene sets and build gene sets dictionary\n arg_7 = arg_0.load_gmt(gene_list=arg_5.index.values, arg_7=arg_0.gene_sets)\n\n arg_0._logger.info(\"%04d gene_sets used for further statistical testing.....\"% len(arg_7))\n arg_0._logger.info(\"Start to Func GSEA...Might take a while..................\")\n # cpu numbers\n arg_0._set_cores()\n # compute ES, NES, pval, FDR, RES\n arg_8 = arg_4 if arg_0.permutation_type =='phenotype' else arg_5\n arg_9,arg_10,arg_11, arg_12 = gsea_compute_tensor(data=arg_8, arg_7=arg_7, n=arg_0.permutation_num,\n weighted_score_type=arg_0.weighted_score_type,\n permutation_type=arg_0.permutation_type,\n method=arg_0.method,\n pheno_pos=arg_1, pheno_neg=arg_2,\n classes=arg_3, ascending=arg_0.ascending,\n processes=arg_0._processes, seed=arg_0.seed)\n \n arg_0._logger.info(\"Start to generate GSEApy reports and figures............\")\n arg_13 = zip(arg_12, list(arg_9), arg_10, arg_11)\n arg_0._save_results(zipdata=arg_13, outdir=arg_0.outdir, module=arg_0.module,\n arg_7=arg_7, rank_metric=arg_5, permutation_type=arg_0.permutation_type)\n\n # reorder datarame for heatmap\n arg_0._heatmat(df=arg_4.loc[arg_5.index], classes=arg_3, \n pheno_pos=arg_1, pheno_neg=arg_2)\n # Plotting\n if not arg_0._noplot:\n arg_0._plotting(rank_metric=arg_5, results=arg_0.results,\n graph_num=arg_0.graph_num, outdir=arg_0.outdir,\n figsize=arg_0.figsize, format=arg_0.format,\n pheno_pos=arg_1, pheno_neg=arg_2)\n\n arg_0._logger.info(\"Congratulations. GSEApy ran successfully.................\\n\")\n if arg_0._outdir is None:\n arg_0._tmpdir.cleanup()\n\n return"} +{"_id": "doc_4703", "title": "", "text": "def Func(arg_0):\n \"\"\"GSEA prerank workflow\"\"\"\n\n assert arg_0.min_size <= arg_0.max_size\n\n # parsing rankings\n arg_1 = arg_0._load_ranking(arg_0.rnk)\n assert len(arg_1) > 1\n\n # cpu numbers\n arg_0._set_cores()\n # Start Analysis\n arg_0._logger.info(\"Parsing data files for GSEA.............................\")\n # filtering out gene sets and build gene sets dictionary\n arg_2 = arg_0.load_gmt(gene_list=arg_1.index.values, arg_2=arg_0.gene_sets)\n\n arg_0._logger.info(\"%04d gene_sets used for further statistical testing.....\"% len(arg_2))\n arg_0._logger.info(\"Start to Func GSEA...Might take a while..................\")\n # compute ES, NES, pval, FDR, RES\n arg_3, arg_4,arg_5, arg_6 = gsea_compute(data=arg_1, n=arg_0.permutation_num, arg_2=arg_2,\n weighted_score_type=arg_0.weighted_score_type,\n permutation_type='gene_set', method=None,\n pheno_pos=arg_0.pheno_pos, pheno_neg=arg_0.pheno_neg,\n classes=None, ascending=arg_0.ascending,\n processes=arg_0._processes, seed=arg_0.seed)\n arg_0._logger.info(\"Start to generate gseapy reports, and produce figures...\")\n arg_7 = zip(arg_6, list(arg_3), arg_4, arg_5)\n arg_0._save_results(zipdata=arg_7, outdir=arg_0.outdir, module=arg_0.module,\n arg_2=arg_2, rank_metric=arg_1, permutation_type=\"gene_sets\")\n\n # Plotting\n if not arg_0._noplot:\n arg_0._plotting(rank_metric=arg_1, results=arg_0.results,\n graph_num=arg_0.graph_num, outdir=arg_0.outdir,\n figsize=arg_0.figsize, format=arg_0.format,\n pheno_pos=arg_0.pheno_pos, pheno_neg=arg_0.pheno_neg)\n\n arg_0._logger.info(\"Congratulations. GSEApy Funcs successfully................\\n\")\n if arg_0._outdir is None:\n arg_0._tmpdir.cleanup()\n\n return"} +{"_id": "doc_4704", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Single Sample GSEA workflow.\n multiprocessing utility on samples.\n \"\"\"\n\n # df.index.values are gene_names\n # Save each sample results to odict\n arg_0.resultsOnSamples = OrderedDict()\n arg_4 = arg_0.outdir\n # run ssgsea for gct expression matrix\n #multi-threading\n arg_5 = sorted(arg_2.keys())\n arg_6=[]\n arg_7=[]\n arg_8=[]\n arg_9 = Pool(processes=arg_0._processes)\n for arg_10, arg_11 in arg_1.iteritems():\n #prepare input\n arg_12 = arg_11.sort_values(ascending=arg_0.ascending)\n arg_8.append(arg_12)\n arg_7.append(arg_10)\n arg_13, arg_14 = arg_12.index.values, arg_12.values\n arg_15 = np.random.RandomState(arg_0.seed)\n # apply_async\n arg_6.append(arg_9.apply_async(enrichment_score_tensor,\n args=(arg_13, arg_14, arg_2,\n arg_0.weighted_score_type,\n arg_0.permutation_num, arg_15, True,\n arg_0.scale)))\n arg_9.close()\n arg_9.join()\n # save results and plotting\n for arg_16, arg_17 in enumerate(arg_6):\n arg_10, arg_18 = arg_7[arg_16], arg_8[arg_16]\n arg_0._logger.info(\"Calculate Enrichment Score for Sample: %s \"%arg_10)\n arg_19, arg_20, arg_21, arg_22 = arg_17.get()\n # create results subdir\n arg_0.outdir= os.path.join(arg_4, str(arg_10))\n mkdirs(arg_0.outdir)\n # save results\n arg_0.resultsOnSamples[arg_10] = pd.Series(data=arg_19, index=arg_5, arg_10=arg_10)\n # plotting\n if arg_0._noplot: continue\n arg_0._logger.info(\"Plotting Sample: %s \\n\" % arg_10)\n for arg_16, arg_23 in enumerate(arg_5):\n arg_23 = arg_23.replace('/','_').replace(\":\",\"_\")\n arg_24 = '{0}/{1}.{2}.{3}'.format(arg_0.outdir, arg_23, arg_0.module, arg_0.format)\n gseaplot(rank_metric=arg_18, arg_23=arg_23, \n hits_indices=arg_21[arg_16], nes=arg_19[arg_16], pval=1, fdr=1, \n arg_22=arg_22[arg_16], pheno_pos='', pheno_neg='', \n figsize=arg_0.figsize, ofname=arg_24)\n # save es, nes to file\n arg_0._save(arg_4)\n\n return"} +{"_id": "doc_4705", "title": "", "text": "def Func(arg_0):\n \"\"\"main replot function\"\"\"\n assert arg_0.min_size <= arg_0.max_size\n assert arg_0.fignum > 0\n import glob\n from bs4 import BeautifulSoup\n\n # parsing files.......\n try:\n arg_1 = glob.glob(arg_0.indir+'*/edb/results.edb')[0]\n arg_2 = glob.glob(arg_0.indir+'*/edb/*.rnk')[0]\n arg_3 = glob.glob(arg_0.indir+'*/edb/gene_sets.gmt')[0]\n except IndexError as e:\n sys.stderr.write(\"Could not locate GSEA files in the given directory!\")\n sys.exit(1)\n # extract sample names from .cls file\n arg_4 = glob.glob(arg_0.indir+'*/edb/*.cls')\n if arg_4:\n arg_5, arg_6, arg_7 = gsea_cls_parser(arg_4[0])\n else:\n # logic for prerank results\n arg_5, arg_6 = '',''\n # start reploting\n arg_0.gene_sets=arg_3\n # obtain gene sets\n arg_9 = arg_0.parse_gmt(gmt=arg_3)\n # obtain rank_metrics\n arg_10 = arg_0._load_ranking(arg_2)\n arg_11 = arg_10.values\n arg_12 = arg_10.index.values\n # extract each enriment term in the results.edb files and plot.\n arg_13 = BeautifulSoup(open(arg_1), features='xml')\n arg_14 = len(arg_13.findAll('DTG'))\n arg_15 = arg_0.fignum if arg_0.fignum <= arg_14 else arg_14\n for arg_16 in range(arg_15):\n # extract statistical resutls from results.edb file\n arg_17, arg_18, arg_19, arg_20, arg_21= gsea_edb_parser(arg_1, index=arg_16)\n arg_22 = arg_9.get(arg_17)\n # calculate enrichment score\n arg_23 = enrichment_score(arg_12=arg_12, \n arg_11=arg_11,\n arg_22=arg_22, \n weighted_score_type=arg_0.weighted_score_type,\n nperm=0)[-1]\n # plotting\n arg_24 = arg_17.replace('/','_').replace(\":\",\"_\")\n arg_25 = '{0}/{1}.{2}.{3}'.format(arg_0.outdir, arg_24, arg_0.module, arg_0.format)\n gseaplot(arg_10=arg_10, arg_24=arg_17, \n hits_indices=arg_18, arg_19=arg_19, arg_20=arg_20, arg_21=arg_21, \n arg_23=arg_23, pheno_pos=arg_5, pheno_neg=arg_6, \n figsize=arg_0.figsize, ofname=arg_25)\n\n arg_0._logger.info(\"Congratulations! Your plots have been reproduced successfully!\\n\")"} +{"_id": "doc_4706", "title": "", "text": "def Func(arg_0, arg_1, arg_2='human', arg_3='',\n arg_4='Enrichr', arg_5='hsapiens_gene_ensembl', arg_6=0.05,\n arg_7='pdf', arg_8=(8,6), arg_9=10, arg_10=False, arg_11=False):\n \"\"\"Enrichr API.\n\n :param gene_list: Flat file with list of genes, one gene id per row, or a python list object\n :param gene_sets: Enrichr Library to query. Required Func library name(s). Separate each name by comma.\n :param organism: Enrichr supported organism. Select from (human, mouse, yeast, fly, fish, worm).\n see here for details: https://amp.pharm.mssm.edu/modEnrichr\n :param description: name of analysis. optional.\n :param outdir: Output file directory\n :param float cutoff: Adjusted P-value (benjamini-hochberg correction) cutoff. Default: 0.05\n :param int background: BioMart dataset name for retrieving background gene information.\n This argument only works when gene_sets input is a gmt file or python dict.\n You could also specify a number by yourself, e.g. total expressed genes number.\n In this case, you will skip retrieving background infos from biomart.\n \n Use the code below to see valid background dataset names from BioMart.\n Here are example code:\n >>> from gseapy.parser import Biomart \n >>> bm = Biomart(verbose=False, host=\"asia.ensembl.org\")\n >>> ## view validated marts\n >>> marts = bm.get_marts()\n >>> ## view validated dataset\n >>> datasets = bm.get_datasets(mart='ENSEMBL_MART_ENSEMBL')\n\n :param str format: Output figure format supported by matplotlib,('pdf','png','eps'...). Default: 'pdf'.\n :param list figsize: Matplotlib figsize, accept a tuple or list, e.g. (width,height). Default: (6.5,6).\n :param bool no_plot: If equals to True, no figure will be drawn. Default: False.\n :param bool verbose: Increase output verbosity, print out progress of your job, Default: False.\n\n :return: An Enrichr object, which obj.res2d stores your last query, obj.results stores your all queries.\n \n \"\"\"\n arg_12 = Enrichr(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_6, arg_5, arg_7, arg_8, arg_9, arg_10, arg_11)\n arg_12.run()\n\n return arg_12"} +{"_id": "doc_4707", "title": "", "text": "def Func(arg_0):\n \"\"\"parse gene list\"\"\"\n if isinstance(arg_0.gene_list, list):\n arg_1 = arg_0.gene_list\n elif isinstance(arg_0.gene_list, pd.DataFrame):\n # input type is bed file\n if arg_0.gene_list.shape[1] >=3:\n arg_1= arg_0.gene_list.iloc[:,:3].apply(lambda x: \"\\t\".join([str(i) for i in x]), axis=1).tolist()\n # input type with weight values\n elif arg_0.gene_list.shape[1] == 2:\n arg_1= arg_0.gene_list.apply(lambda x: \",\".join([str(i) for i in x]), axis=1).tolist()\n else:\n arg_1 = arg_0.gene_list.squeeze().tolist()\n elif isinstance(arg_0.gene_list, pd.Series):\n arg_1 = arg_0.gene_list.squeeze().tolist()\n else:\n # get gene lists or bed file, or gene list with weighted values.\n arg_1=[]\n with open(arg_0.gene_list) as f:\n for arg_2 in f:\n arg_1.append(arg_2.strip())\n\n arg_0._isezid = all(map(arg_0._is_entrez_id, arg_1))\n if arg_0._isezid: \n arg_0._gls = set(map(int, arg_0._gls))\n else:\n arg_0._gls = arg_1\n\n return '\\n'.join(arg_1)"} +{"_id": "doc_4708", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" send gene list to enrichr server\"\"\"\n arg_3 = {\n 'list': (None, arg_1),\n 'description': (None, arg_0.descriptions)\n }\n # response\n arg_4 = requests.post(arg_2, files=arg_3)\n if not arg_4.ok:\n raise Exception('Error analyzing gene list')\n sleep(1)\n arg_5 = json.loads(arg_4.text)\n\n return arg_5"} +{"_id": "doc_4709", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Compare the genes sent and received to get successfully recognized genes\n '''\n arg_3 = requests.get('http://amp.pharm.mssm.edu/Enrichr/view?userListId=%s' % arg_2)\n if not arg_3.ok:\n raise Exception('Error getting gene list back')\n arg_4 = json.loads(arg_3.text)[\"genes\"]\n arg_5 = sum([1 for gene in arg_1 if gene in arg_4])\n arg_0._logger.info('{} genes successfully recognized by Enrichr'.format(arg_5))"} +{"_id": "doc_4710", "title": "", "text": "def Func(arg_0):\n \"\"\"get background gene\"\"\"\n\n # input is a file\n if os.path.isfile(arg_0.background):\n with open(arg_0.background) as b:\n arg_1 = b.readlines() \n arg_2 = [g.strip() for g in arg_1] \n return set(arg_2)\n \n # package included data\n arg_3 = resource_filename(\"gseapy\", \"data/{}.background.genes.txt\".format(arg_0.background))\n arg_4 = os.path.join(DEFAULT_CACHE_PATH, \"{}.background.genes.txt\".format(arg_0.background)) \n if os.path.exists(arg_4):\n arg_5 = pd.read_csv(arg_4,sep=\"\\t\")\n elif os.path.exists(arg_3):\n arg_5 = pd.read_csv(arg_3,sep=\"\\t\")\n else:\n # background is a biomart database name\n arg_0._logger.warning(\"Downloading %s for the first time. It might take a couple of miniutes.\"%arg_0.background)\n arg_6 = Biomart()\n arg_5 = arg_6.query(dataset=arg_0.background)\n arg_5.dropna(subset=['go_id'], inplace=True)\n arg_0._logger.info(\"using all annotated genes with GO_ID as background genes\")\n arg_5.dropna(subset=['entrezgene'], inplace=True) \n # input id type: entrez or gene_name\n if arg_0._isezid:\n arg_2 = arg_5['entrezgene'].astype(int)\n else:\n arg_2 = arg_5['external_gene_name']\n\n return set(arg_2)"} +{"_id": "doc_4711", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Perform the App's actions as configured.\"\"\"\r\n if arg_0.extra_args:\r\n sys.exit('{} takes no extra arguments'.format(arg_0.name))\r\n else:\r\n\r\n if arg_0._toggle_value:\r\n nbextensions.install_nbextension_python(\r\n _pkg_name, overwrite=True, symlink=False,\r\n user=arg_0.user, sys_prefix=arg_0.sys_prefix, prefix=None,\r\n nbextensions_dir=None, logger=None)\r\n else:\r\n nbextensions.uninstall_nbextension_python(\r\n _pkg_name, user=arg_0.user, sys_prefix=arg_0.sys_prefix,\r\n prefix=None, nbextensions_dir=None, logger=None)\r\n\r\n arg_0.toggle_nbextension_python(_pkg_name)\r\n arg_0.toggle_server_extension_python(_pkg_name)"} +{"_id": "doc_4712", "title": "", "text": "def Func(arg_0):\n \"\"\"Initializes client id and client secret based on the settings.\n\n Args:\n settings_instance: An instance of ``django.conf.settings``.\n\n Returns:\n A 2-tuple, the first item is the client id and the second\n item is the client secret.\n \"\"\"\n arg_1 = getattr(arg_0,\n 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)\n if arg_1 is not None:\n return _load_client_secrets(arg_1)\n else:\n arg_2 = getattr(arg_0, \"GOOGLE_OAUTH2_CLIENT_ID\",\n None)\n arg_3 = getattr(arg_0,\n \"GOOGLE_OAUTH2_CLIENT_SECRET\", None)\n if arg_2 is not None and arg_3 is not None:\n return arg_2, arg_3\n else:\n raise exceptions.ImproperlyConfigured(\n \"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or \"\n \"both GOOGLE_OAUTH2_CLIENT_ID and \"\n \"GOOGLE_OAUTH2_CLIENT_SECRET in settings.py\")"} +{"_id": "doc_4713", "title": "", "text": "def Func(arg_0):\n \"\"\" Gets a Credentials storage object provided by the Django OAuth2 Helper\n object.\n\n Args:\n request: Reference to the current request object.\n\n Returns:\n An :class:`oauth2.client.Storage` object.\n \"\"\"\n arg_1 = oauth2_settings.storage_model\n arg_2 = oauth2_settings.storage_model_user_property\n arg_3 = oauth2_settings.storage_model_credentials_property\n\n if arg_1:\n arg_4, arg_5 = arg_1.rsplit('.', 1)\n arg_6 = importlib.import_module(arg_4)\n arg_7 = getattr(arg_6, arg_5)\n return storage.DjangoORMStorage(arg_7,\n arg_2,\n arg_0.user,\n arg_3)\n else:\n # use session\n return dictionary_storage.DictionaryStorage(\n arg_0.session, key=_CREDENTIALS_KEY)"} +{"_id": "doc_4714", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Helper method to create a redirect response with URL params.\n\n This builds a redirect string that converts kwargs into a\n query string.\n\n Args:\n url_name: The name of the url to redirect to.\n kwargs: the query string param and their values to build.\n\n Returns:\n A properly formatted redirect string.\n \"\"\"\n arg_3 = urlresolvers.reverse(arg_0, arg_1=arg_1)\n arg_4 = parse.urlencode(arg_2, True)\n return \"{0}?{1}\".format(arg_3, arg_4)"} +{"_id": "doc_4715", "title": "", "text": "def Func(arg_0):\n \"\"\"Gets the authorized credentials for this flow, if they exist.\"\"\"\n # ORM storage requires a logged in user\n if (oauth2_settings.storage_model is None or\n arg_0.user.is_authenticated()):\n return get_storage(arg_0).get()\n else:\n return None"} +{"_id": "doc_4716", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the scopes associated with this object, kept up to\n date for incremental auth.\"\"\"\n if _credentials_from_request(arg_0.request):\n return (arg_0._scopes |\n _credentials_from_request(arg_0.request).scopes)\n else:\n return arg_0._scopes"} +{"_id": "doc_4717", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve stored credential.\n\n Returns:\n A :class:`oauth2client.Credentials` instance or `None`.\n \"\"\"\n arg_1 = {arg_0.key_name: arg_0.key_value}\n arg_2 = arg_0.session.query(arg_0.model_class).filter_by(**arg_1)\n arg_3 = arg_2.first()\n\n if arg_3:\n arg_4 = getattr(arg_3, arg_0.property_name)\n if arg_4 and hasattr(arg_4, 'set_store'):\n arg_4.set_store(arg_0)\n return arg_4\n else:\n return None"} +{"_id": "doc_4718", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write a credentials to the SQLAlchemy datastore.\n\n Args:\n credentials: :class:`oauth2client.Credentials`\n \"\"\"\n arg_2 = {arg_0.key_name: arg_0.key_value}\n arg_3 = arg_0.session.query(arg_0.model_class).filter_by(**arg_2)\n arg_4 = arg_3.first()\n\n if not arg_4:\n arg_4 = arg_0.model_class(**arg_2)\n\n setattr(arg_4, arg_0.property_name, arg_1)\n arg_0.session.add(arg_4)"} +{"_id": "doc_4719", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete credentials from the SQLAlchemy datastore.\"\"\"\n arg_1 = {arg_0.key_name: arg_0.key_value}\n arg_0.session.query(arg_0.model_class).filter_by(**arg_1).delete()"} +{"_id": "doc_4720", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Utility function that creates JSON repr. of a credentials object.\n\n Over-ride is needed since PKCS#12 keys will not in general be JSON\n serializable.\n\n Args:\n strip: array, An array of names of members to exclude from the\n JSON.\n to_serialize: dict, (Optional) The properties for this object\n that will be serialized. This allows callers to\n modify before serializing.\n\n Returns:\n string, a JSON representation of this instance, suitable to pass to\n from_json().\n \"\"\"\n if arg_2 is None:\n arg_2 = copy.copy(arg_0.__dict__)\n arg_3 = arg_2.get(arg_4)\n if arg_3 is not None:\n arg_2[arg_4] = base64.b64encode(arg_3)\n return super(ServiceAccountCredentials, arg_0).Func(\n arg_1, arg_2=arg_2)"} +{"_id": "doc_4721", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None, arg_4=None):\n \"\"\"Helper for factory constructors from JSON keyfile.\n\n Args:\n keyfile_dict: dict-like object, The parsed dictionary-like object\n containing the contents of the JSON keyfile.\n scopes: List or string, Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile contents.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.\n \"\"\"\n arg_5 = arg_1.get('type')\n if arg_5 != client.SERVICE_ACCOUNT:\n raise ValueError('Unexpected credentials type', arg_5,\n 'Expected', client.SERVICE_ACCOUNT)\n\n arg_6 = arg_1['client_email']\n arg_7 = arg_1['private_key']\n arg_8 = arg_1['private_key_id']\n arg_9 = arg_1['client_id']\n if not arg_3:\n arg_3 = arg_1.get('token_uri',\n oauth2client.GOOGLE_TOKEN_URI)\n if not arg_4:\n arg_4 = arg_1.get('revoke_uri',\n oauth2client.GOOGLE_REVOKE_URI)\n\n arg_10 = crypt.Signer.from_string(arg_7)\n arg_11 = arg_0(arg_6, arg_10, arg_2=arg_2,\n arg_8=arg_8,\n arg_9=arg_9, arg_3=arg_3,\n arg_4=arg_4)\n arg_11._private_key_pkcs8_pem = arg_7\n return arg_11"} +{"_id": "doc_4722", "title": "", "text": "def Func(arg_0, arg_1, arg_2='',\n arg_3=None, arg_4=None):\n\n \"\"\"Factory constructor from JSON keyfile by name.\n\n Args:\n filename: string, The location of the keyfile.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in the key file, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in the key file, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.\n \"\"\"\n with open(arg_1, 'r') as file_obj:\n arg_5 = json.load(file_obj)\n return arg_0._from_parsed_json_keyfile(arg_5, arg_2,\n arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_4723", "title": "", "text": "def Func(arg_0, arg_1, arg_2='',\n arg_3=None, arg_4=None):\n \"\"\"Factory constructor from parsed JSON keyfile.\n\n Args:\n keyfile_dict: dict-like object, The parsed dictionary-like object\n containing the contents of the JSON keyfile.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.\n \"\"\"\n return arg_0._from_parsed_json_keyfile(arg_1, arg_2,\n arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_4724", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate the assertion that will be used in the request.\"\"\"\n arg_1 = int(time.time())\n arg_2 = {\n 'aud': arg_0.token_uri,\n 'scope': arg_0._scopes,\n 'iat': arg_1,\n 'exp': arg_1 + arg_0.MAX_TOKEN_LIFETIME_SECS,\n 'iss': arg_0._service_account_email,\n }\n arg_2.update(arg_0._kwargs)\n return crypt.make_signed_jwt(arg_0._signer, arg_2,\n key_id=arg_0._private_key_id)"} +{"_id": "doc_4725", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Deserialize a JSON-serialized instance.\n\n Inverse to :meth:`to_json`.\n\n Args:\n json_data: dict or string, Serialized JSON (as a string or an\n already parsed dictionary) representing a credential.\n\n Returns:\n ServiceAccountCredentials from the serialized data.\n \"\"\"\n if not isinstance(arg_1, dict):\n arg_1 = json.loads(_helpers._from_bytes(arg_1))\n\n arg_2 = None\n arg_3 = arg_1.get(_PKCS12_KEY)\n arg_4 = None\n if arg_3 is None:\n arg_2 = arg_1['_private_key_pkcs8_pem']\n arg_5 = crypt.Signer.from_string(arg_2)\n else:\n # NOTE: This assumes that private_key_pkcs8_pem is not also\n # in the serialized data. This would be very incorrect\n # state.\n arg_3 = base64.b64decode(arg_3)\n arg_4 = arg_1['_private_key_password']\n arg_5 = crypt.Signer.from_string(arg_3, arg_4)\n\n arg_6 = arg_0(\n arg_1['_service_account_email'],\n arg_5,\n scopes=arg_1['_scopes'],\n private_key_id=arg_1['_private_key_id'],\n client_id=arg_1['client_id'],\n user_agent=arg_1['_user_agent'],\n **arg_1['_kwargs']\n )\n if arg_2 is not None:\n arg_6._private_key_pkcs8_pem = arg_2\n if arg_3 is not None:\n arg_6._private_key_pkcs12 = arg_3\n if arg_4 is not None:\n arg_6._private_key_password = arg_4\n arg_6.invalid = arg_1['invalid']\n arg_6.access_token = arg_1['access_token']\n arg_6.token_uri = arg_1['token_uri']\n arg_6.revoke_uri = arg_1['revoke_uri']\n arg_14 = arg_1.get('token_expiry', None)\n if arg_14 is not None:\n arg_6.token_expiry = datetime.datetime.strptime(\n arg_14, client.EXPIRY_FORMAT)\n return arg_6"} +{"_id": "doc_4726", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create credentials that specify additional claims.\n\n Args:\n claims: dict, key-value pairs for claims.\n\n Returns:\n ServiceAccountCredentials, a copy of the current service account\n credentials with updated claims to use when obtaining access\n tokens.\n \"\"\"\n arg_2 = dict(arg_0._kwargs)\n arg_2.update(arg_1)\n arg_3 = arg_0.__class__(arg_0._service_account_email,\n arg_0._signer,\n scopes=arg_0._scopes,\n private_key_id=arg_0._private_key_id,\n client_id=arg_0.client_id,\n user_agent=arg_0._user_agent,\n **arg_2)\n arg_3.token_uri = arg_0.token_uri\n arg_3.revoke_uri = arg_0.revoke_uri\n arg_3._private_key_pkcs8_pem = arg_0._private_key_pkcs8_pem\n arg_3._private_key_pkcs12 = arg_0._private_key_pkcs12\n arg_3._private_key_password = arg_0._private_key_password\n return arg_3"} +{"_id": "doc_4727", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Create a signed jwt.\n\n Args:\n http: unused\n additional_claims: dict, additional claims to add to\n the payload of the JWT.\n Returns:\n An AccessTokenInfo with the signed jwt\n \"\"\"\n if arg_2 is None:\n if arg_0.access_token is None or arg_0.access_token_expired:\n arg_0.refresh(None)\n return client.AccessTokenInfo(\n access_token=arg_0.access_token, expires_in=arg_0._expires_in())\n else:\n # Create a 1 time token\n arg_3, arg_4 = arg_0._create_token(arg_2)\n return client.AccessTokenInfo(\n access_token=arg_3, expires_in=arg_0._MAX_TOKEN_LIFETIME_SECS)"} +{"_id": "doc_4728", "title": "", "text": "def Func():\n \"\"\"Determine if the current environment is Compute Engine.\n\n Returns:\n Boolean indicating whether or not the current environment is Google\n Compute Engine.\n \"\"\"\n # NOTE: The explicit ``timeout`` is a workaround. The underlying\n # issue is that resolving an unknown host on some networks will take\n # 20-30 seconds; making this timeout short fixes the issue, but\n # could lead to false negatives in the event that we are on GCE, but\n # the metadata resolution was particularly slow. The latter case is\n # \"unlikely\".\n arg_0 = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)\n try:\n arg_1, arg_2 = transport.request(\n arg_0, _GCE_METADATA_URI, headers=_GCE_HEADERS)\n return (\n arg_1.status == http_client.OK and\n arg_1.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR)\n except socket.error: # socket.timeout or socket.error(64, 'Host is down')\n logger.info('Timeout attempting to reach GCE metadata service.')\n return False"} +{"_id": "doc_4729", "title": "", "text": "def Func():\n \"\"\"Detects if the code is running in the App Engine environment.\n\n Returns:\n True if running in the GAE environment, False otherwise.\n \"\"\"\n if arg_1.env_name is not None:\n return arg_1.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')\n\n try:\n import google.appengine # noqa: unused import\n except ImportError:\n pass\n else:\n arg_0 = os.environ.get(_SERVER_SOFTWARE, '')\n if arg_0.startswith('Google App Engine/'):\n arg_1.env_name = 'GAE_PRODUCTION'\n return True\n elif arg_0.startswith('Development/'):\n arg_1.env_name = 'GAE_LOCAL'\n return True\n\n return False"} +{"_id": "doc_4730", "title": "", "text": "def Func():\n \"\"\"Detect if the code is running in the Compute Engine environment.\n\n Returns:\n True if running in the GCE environment, False otherwise.\n \"\"\"\n if arg_0.env_name is not None:\n return arg_0.env_name == 'GCE_PRODUCTION'\n\n if NO_GCE_CHECK != 'True' and _detect_gce_environment():\n arg_0.env_name = 'GCE_PRODUCTION'\n return True\n return False"} +{"_id": "doc_4731", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Saves a file with read-write permissions on for the owner.\n\n Args:\n filename: String. Absolute path to file.\n json_contents: JSON serializable object to be saved.\n \"\"\"\n arg_2 = tempfile.mktemp()\n arg_3 = os.open(arg_2, os.O_WRONLY | os.O_CREAT, 0o600)\n with os.fdopen(arg_3, 'w') as file_handle:\n json.dump(arg_1, file_handle, sort_keys=True,\n indent=2, separators=(',', ': '))\n shutil.move(arg_2, arg_0)"} +{"_id": "doc_4732", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Save the provided GoogleCredentials to the well known file.\n\n Args:\n credentials: the credentials to be saved to the well known file;\n it should be an instance of GoogleCredentials\n well_known_file: the name of the file where the credentials are to be\n saved; this parameter is supposed to be used for\n testing only\n \"\"\"\n # TODO(orestica): move this method to tools.py\n # once the argparse import gets fixed (it is not present in Python 2.6)\n\n if arg_1 is None:\n arg_1 = _get_well_known_file()\n\n arg_2 = os.path.dirname(arg_1)\n if not os.path.isdir(arg_2):\n raise OSError(\n 'Config directory does not exist: {0}'.format(arg_2))\n\n arg_3 = arg_0.serialization_data\n _save_private_file(arg_1, arg_3)"} +{"_id": "doc_4733", "title": "", "text": "def Func():\n \"\"\"Get the well known file produced by command 'gcloud auth login'.\"\"\"\n # TODO(orestica): Revisit this method once gcloud provides a better way\n # of pinpointing the exact location of the file.\n arg_0 = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)\n if arg_0 is None:\n if os.name == 'nt':\n try:\n arg_0 = os.path.join(os.environ['APPDATA'],\n _CLOUDSDK_CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things.\n arg_1 = os.environ.get('SystemDrive', 'C:')\n arg_0 = os.path.join(arg_1, '\\\\',\n _CLOUDSDK_CONFIG_DIRECTORY)\n else:\n arg_0 = os.path.join(os.path.expanduser('~'),\n '.config',\n _CLOUDSDK_CONFIG_DIRECTORY)\n\n return os.path.join(arg_0, _WELL_KNOWN_CREDENTIALS_FILE)"} +{"_id": "doc_4734", "title": "", "text": "def Func(arg_0):\n \"\"\"Build the Application Default Credentials from file.\"\"\"\n # read the credentials from the file\n with open(arg_0) as file_obj:\n arg_1 = json.load(file_obj)\n\n arg_2 = arg_1.get('type')\n if arg_2 == AUTHORIZED_USER:\n arg_3 = set(['client_id', 'client_secret', 'refresh_token'])\n elif arg_2 == SERVICE_ACCOUNT:\n arg_3 = set(['client_id', 'client_email', 'private_key_id',\n 'private_key'])\n else:\n raise ApplicationDefaultCredentialsError(\n \"'type' field should be defined (and have one of the '\" +\n AUTHORIZED_USER + \"' or '\" + SERVICE_ACCOUNT + \"' values)\")\n\n arg_4 = arg_3.difference(arg_1.keys())\n\n if arg_4:\n _raise_exception_for_missing_fields(arg_4)\n\n if arg_1['type'] == AUTHORIZED_USER:\n return GoogleCredentials(\n access_token=None,\n client_id=arg_1['client_id'],\n client_secret=arg_1['client_secret'],\n refresh_token=arg_1['refresh_token'],\n token_expiry=None,\n token_uri=oauth2client.GOOGLE_TOKEN_URI,\n user_agent='Python client library')\n else: # client_credentials['type'] == SERVICE_ACCOUNT\n from oauth2client import service_account\n return service_account._JWTAccessCredentials.from_json_keyfile_dict(\n arg_1)"} +{"_id": "doc_4735", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None,\n arg_3=arg_4):\n \"\"\"Verifies a signed JWT id_token.\n\n This function requires PyOpenSSL and because of that it does not work on\n App Engine.\n\n Args:\n id_token: string, A Signed JWT.\n audience: string, The audience 'aud' that the token should be for.\n http: httplib2.Http, instance to use to make the HTTP request. Callers\n should supply an instance that has caching enabled.\n cert_uri: string, URI of the certificates in JSON format to\n verify the JWT against.\n\n Returns:\n The deserialized JSON in the JWT.\n\n Raises:\n oauth2client.crypt.AppIdentityError: if the JWT fails to verify.\n CryptoUnavailableError: if no crypto library is available.\n \"\"\"\n _require_crypto_or_die()\n if arg_2 is None:\n arg_2 = transport.get_cached_http()\n\n arg_5, arg_6 = transport.request(arg_2, arg_3)\n if arg_5.status == http_client.OK:\n arg_7 = json.loads(_helpers._from_bytes(arg_6))\n return crypt.verify_signed_jwt_with_certs(arg_0, arg_7, arg_1)\n else:\n raise VerifyJwtTokenError('Status code: {0}'.format(arg_5.status))"} +{"_id": "doc_4736", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4='postmessage', arg_5=None,\n arg_6=None,\n arg_7=arg_8.GOOGLE_TOKEN_URI,\n arg_10=arg_8.GOOGLE_AUTH_URI,\n arg_12=arg_8.GOOGLE_REVOKE_URI,\n arg_14=arg_8.GOOGLE_DEVICE_URI,\n arg_16=arg_8.GOOGLE_TOKEN_INFO_URI,\n arg_18=False,\n arg_19=None):\n \"\"\"Exchanges an authorization code for an OAuth2Credentials object.\n\n Args:\n client_id: string, client identifier.\n client_secret: string, client secret.\n scope: string or iterable of strings, scope(s) to request.\n code: string, An authorization code, most likely passed down from\n the client\n redirect_uri: string, this is generally set to 'postmessage' to match\n the redirect_uri that the client specified\n http: httplib2.Http, optional http instance to use to do the fetch\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n auth_uri: string, URI for authorization endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n device_uri: string, URI for device authorization endpoint. For\n convenience defaults to Google's endpoints but any OAuth\n 2.0 provider can be used.\n pkce: boolean, default: False, Generate and include a \"Proof Key\n for Code Exchange\" (PKCE) with your authorization and token\n requests. This adds security for installed applications that\n cannot protect a client_secret. See RFC 7636 for details.\n code_verifier: bytestring or None, default: None, parameter passed\n as part of the code exchange when pkce=True. If\n None, a code_verifier will automatically be\n generated as part of step1_get_authorize_url(). See\n RFC 7636 for details.\n\n Returns:\n An OAuth2Credentials object.\n\n Raises:\n FlowExchangeError if the authorization code cannot be exchanged for an\n access token\n \"\"\"\n arg_20 = OAuth2WebServerFlow(arg_0, arg_1, arg_2,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_10=arg_10,\n arg_7=arg_7,\n arg_12=arg_12,\n arg_14=arg_14,\n arg_16=arg_16,\n arg_18=arg_18,\n arg_19=arg_19)\n\n arg_21 = arg_20.step2_exchange(arg_3, arg_5=arg_5)\n return arg_21"} +{"_id": "doc_4737", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None,\n arg_4='postmessage',\n arg_5=None,\n arg_6=None,\n arg_7=None):\n \"\"\"Returns OAuth2Credentials from a clientsecrets file and an auth code.\n\n Will create the right kind of Flow based on the contents of the\n clientsecrets file or will raise InvalidClientSecretsError for unknown\n types of Flows.\n\n Args:\n filename: string, File name of clientsecrets.\n scope: string or iterable of strings, scope(s) to request.\n code: string, An authorization code, most likely passed down from\n the client\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. If message is\n provided then sys.exit will be called in the case of an error.\n If message in not provided then\n clientsecrets.InvalidClientSecretsError will be raised.\n redirect_uri: string, this is generally set to 'postmessage' to match\n the redirect_uri that the client specified\n http: httplib2.Http, optional http instance to use to do the fetch\n cache: An optional cache service client that implements get() and set()\n methods. See clientsecrets.loadfile() for details.\n device_uri: string, OAuth 2.0 device authorization endpoint\n pkce: boolean, default: False, Generate and include a \"Proof Key\n for Code Exchange\" (PKCE) with your authorization and token\n requests. This adds security for installed applications that\n cannot protect a client_secret. See RFC 7636 for details.\n code_verifier: bytestring or None, default: None, parameter passed\n as part of the code exchange when pkce=True. If\n None, a code_verifier will automatically be\n generated as part of step1_get_authorize_url(). See\n RFC 7636 for details.\n\n Returns:\n An OAuth2Credentials object.\n\n Raises:\n FlowExchangeError: if the authorization code cannot be exchanged for an\n access token\n UnknownClientSecretsFlowError: if the file describes an unknown kind\n of Flow.\n clientsecrets.InvalidClientSecretsError: if the clientsecrets file is\n invalid.\n \"\"\"\n arg_8 = flow_from_clientsecrets(arg_0, arg_1, arg_3=arg_3,\n arg_6=arg_6, arg_4=arg_4,\n arg_7=arg_7)\n arg_9 = arg_8.step2_exchange(arg_2, arg_5=arg_5)\n return arg_9"} +{"_id": "doc_4738", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Utility class method to instantiate a Credentials subclass from JSON.\n\n Expects the JSON string to have been produced by to_json().\n\n Args:\n json_data: string or bytes, JSON from to_json().\n\n Returns:\n An instance of the subclass of Credentials that was serialized with\n to_json().\n \"\"\"\n arg_2 = _helpers._from_bytes(arg_1)\n arg_3 = json.loads(arg_2)\n # Find and call the right classmethod from_json() to restore\n # the object.\n arg_4 = arg_3['_module']\n try:\n arg_5 = __import__(arg_4)\n except ImportError:\n # In case there's an object from the old package structure,\n # update it\n arg_4 = arg_4.replace('.googleapiclient', '')\n arg_5 = __import__(arg_4)\n\n arg_5 = __import__(arg_4,\n fromlist=arg_4.split('.')[:-1])\n arg_6 = getattr(arg_5, arg_3['_class'])\n return arg_6.from_json(arg_2)"} +{"_id": "doc_4739", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write a credential.\n\n The Storage lock must be held when this is called.\n\n Args:\n credentials: Credentials, the credentials to store.\n \"\"\"\n arg_0.acquire_lock()\n try:\n arg_0.locked_Func(arg_1)\n finally:\n arg_0.release_lock()"} +{"_id": "doc_4740", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Verify that the credentials are authorized for the given scopes.\n\n Returns True if the credentials authorized scopes contain all of the\n scopes given.\n\n Args:\n scopes: list or string, the scopes to check.\n\n Notes:\n There are cases where the credentials are unaware of which scopes\n are authorized. Notably, credentials obtained and stored before\n this code was added will not have scopes, AccessTokenCredentials do\n not have scopes. In both cases, you can use refresh_scopes() to\n obtain the canonical set of scopes.\n \"\"\"\n arg_1 = _helpers.string_to_scopes(arg_1)\n return set(arg_1).issubset(arg_0.scopes)"} +{"_id": "doc_4741", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Return the access token and its expiration information.\n\n If the token does not exist, get one.\n If the token expired, refresh it.\n \"\"\"\n if not arg_0.access_token or arg_0.access_token_expired:\n if not arg_1:\n arg_1 = transport.get_http_object()\n arg_0.refresh(arg_1)\n return AccessTokenInfo(access_token=arg_0.access_token,\n expires_in=arg_0._expires_in())"} +{"_id": "doc_4742", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the number of seconds until this token expires.\n\n If token_expiry is in the past, this method will return 0, meaning the\n token has already expired.\n\n If token_expiry is None, this method will return None. Note that\n returning 0 in such a case would not be fair: the token may still be\n valid; we just don't know anything about it.\n \"\"\"\n if arg_0.token_expiry:\n arg_1 = _UTCNOW()\n if arg_0.token_expiry > arg_1:\n arg_2 = arg_0.token_expiry - arg_1\n # TODO(orestica): return time_delta.total_seconds()\n # once dropping support for Python 2.6\n return arg_2.days * 86400 + arg_2.seconds\n else:\n return 0"} +{"_id": "doc_4743", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Refreshes the access_token.\n\n This method first checks by reading the Storage object if available.\n If a refresh is still needed, it holds the Storage lock until the\n refresh is completed.\n\n Args:\n http: an object to be used to make HTTP requests.\n\n Raises:\n HttpAccessTokenRefreshError: When the refresh fails.\n \"\"\"\n if not arg_0.store:\n arg_0._doFunc_request(arg_1)\n else:\n arg_0.store.acquire_lock()\n try:\n arg_2 = arg_0.store.locked_get()\n\n if (arg_2 and not arg_2.invalid and\n arg_2.access_token != arg_0.access_token and\n not arg_2.access_token_expired):\n logger.info('Updated access_token read from Storage')\n arg_0._updateFromCredential(arg_2)\n else:\n arg_0._doFunc_request(arg_1)\n finally:\n arg_0.store.release_lock()"} +{"_id": "doc_4744", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Refresh the access_token using the refresh_token.\n\n Args:\n http: an object to be used to make HTTP requests.\n\n Raises:\n HttpAccessTokenRefreshError: When the refresh fails.\n \"\"\"\n arg_2 = arg_0._generate_refresh_request_body()\n arg_3 = arg_0._generate_refresh_request_headers()\n\n logger.info('Refreshing access_token')\n arg_4, arg_5 = transport.request(\n arg_1, arg_0.token_uri, method='POST',\n arg_2=arg_2, arg_3=arg_3)\n arg_5 = _helpers._from_bytes(arg_5)\n if arg_4.status == http_client.OK:\n arg_6 = json.loads(arg_5)\n arg_0.token_response = arg_6\n arg_0.access_token = arg_6['access_token']\n arg_0.refresh_token = arg_6.get('refresh_token', arg_0.refresh_token)\n if 'expires_in' in arg_6:\n arg_10 = datetime.timedelta(seconds=int(arg_6['expires_in']))\n arg_0.token_expiry = arg_10 + _UTCNOW()\n else:\n arg_0.token_expiry = None\n if 'id_token' in arg_6:\n arg_0.id_token = _extract_id_token(arg_6['id_token'])\n arg_0.id_token_jwt = arg_6['id_token']\n else:\n arg_0.id_token = None\n arg_0.id_token_jwt = None\n # On temporary refresh errors, the user does not actually have to\n # re-authorize, so we unflag here.\n arg_0.invalid = False\n if arg_0.store:\n arg_0.store.locked_put(arg_0)\n else:\n # An {'error':...} response body means the token is expired or\n # revoked, so we flag the credentials as such.\n logger.info('Failed to retrieve access token: %s', arg_5)\n arg_15 = 'Invalid response {0}.'.format(arg_4.status)\n try:\n arg_6 = json.loads(arg_5)\n if 'error' in arg_6:\n arg_15 = arg_6['error']\n if 'error_description' in arg_6:\n arg_15 += ': ' + arg_6['error_description']\n arg_0.invalid = True\n if arg_0.store is not None:\n arg_0.store.locked_put(arg_0)\n except (TypeError, ValueError):\n pass\n raise HttpAccessTokenRefreshError(arg_15, status=arg_4.status)"} +{"_id": "doc_4745", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Retrieves the list of authorized scopes from the OAuth2 provider.\n\n Args:\n http: an object to be used to make HTTP requests.\n token: A string used as the token to identify the credentials to\n the provider.\n\n Raises:\n Error: When refresh fails, indicating the the access token is\n invalid.\n \"\"\"\n logger.info('Refreshing scopes')\n arg_3 = {'access_token': arg_2, 'fields': 'scope'}\n arg_4 = _helpers.update_query_params(\n arg_0.token_info_uri, arg_3)\n arg_5, arg_6 = transport.request(arg_1, arg_4)\n arg_6 = _helpers._from_bytes(arg_6)\n if arg_5.status == http_client.OK:\n arg_7 = json.loads(arg_6)\n arg_0.scopes = set(_helpers.string_to_scopes(arg_7.get('scope', '')))\n else:\n arg_9 = 'Invalid response {0}.'.format(arg_5.status)\n try:\n arg_7 = json.loads(arg_6)\n if 'error_description' in arg_7:\n arg_9 = arg_7['error_description']\n except (TypeError, ValueError):\n pass\n raise Error(arg_9)"} +{"_id": "doc_4746", "title": "", "text": "def Func():\n \"\"\"Attempts to get implicit credentials from local credential files.\n\n First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS\n is set with a filename and then falls back to a configuration file (the\n \"well known\" file) associated with the 'gcloud' command line tool.\n\n Returns:\n Credentials object associated with the\n GOOGLE_APPLICATION_CREDENTIALS file or the \"well known\" file if\n either exist. If neither file is define, returns None, indicating\n no credentials from a file can detected from the current\n environment.\n \"\"\"\n arg_0 = _get_environment_variable_file()\n if not arg_0:\n arg_0 = _get_well_known_file()\n if os.path.isfile(arg_0):\n arg_1 = (' (produced automatically when running'\n ' \"gcloud auth login\" command)')\n else:\n arg_0 = None\n else:\n arg_1 = (' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS +\n ' environment variable)')\n\n if not arg_0:\n return\n\n # If we can read the credentials from a file, we don't need to know\n # what environment we are in.\n arg_2.env_name = DEFAULT_ENV_NAME\n\n try:\n return _get_application_default_credential_from_file(\n arg_0)\n except (ApplicationDefaultCredentialsError, ValueError) as error:\n _raise_exception_for_reading_json(arg_0,\n arg_1, error)"} +{"_id": "doc_4747", "title": "", "text": "def Func(arg_0):\n \"\"\"Gets credentials implicitly from the environment.\n\n Checks environment in order of precedence:\n - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to\n a file with stored credentials information.\n - Stored \"well known\" file associated with `gcloud` command line tool.\n - Google App Engine (production and testing)\n - Google Compute Engine production environment.\n\n Raises:\n ApplicationDefaultCredentialsError: raised when the credentials\n fail to be retrieved.\n \"\"\"\n # Environ checks (in order).\n arg_1 = [\n arg_0._implicit_credentials_from_files,\n arg_0._implicit_credentials_from_gae,\n arg_0._implicit_credentials_from_gce,\n ]\n\n for arg_2 in arg_1:\n arg_3 = arg_2()\n if arg_3 is not None:\n return arg_3\n\n # If no credentials, fail.\n raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)"} +{"_id": "doc_4748", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a Credentials object by reading information from a file.\n\n It returns an object of type GoogleCredentials.\n\n Args:\n credential_filename: the path to the file from where the\n credentials are to be read\n\n Raises:\n ApplicationDefaultCredentialsError: raised when the credentials\n fail to be retrieved.\n \"\"\"\n if arg_0 and os.path.isfile(arg_0):\n try:\n return _get_application_default_credential_from_file(\n arg_0)\n except (ApplicationDefaultCredentialsError, ValueError) as error:\n arg_1 = (' (provided as parameter to the '\n 'Func() method)')\n _raise_exception_for_reading_json(arg_0,\n arg_1,\n error)\n else:\n raise ApplicationDefaultCredentialsError(\n 'The parameter passed to the Func() '\n 'method should point to a file.')"} +{"_id": "doc_4749", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create a DeviceFlowInfo from a server response.\n\n The response should be a dict containing entries as described here:\n\n http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1\n \"\"\"\n # device_code, user_code, and verification_url are required.\n arg_2 = {\n 'device_code': arg_1['device_code'],\n 'user_code': arg_1['user_code'],\n }\n # The response may list the verification address as either\n # verification_url or verification_uri, so we check for both.\n arg_3 = arg_1.get(\n 'verification_url', arg_1.get('verification_uri'))\n if arg_3 is None:\n raise OAuth2DeviceCodeError(\n 'No verification_url provided in server response')\n arg_2['verification_url'] = arg_3\n # expires_in and interval are optional.\n arg_2.update({\n 'interval': arg_1.get('interval'),\n 'user_code_expiry': None,\n })\n if 'expires_in' in arg_1:\n arg_2['user_code_expiry'] = (\n _UTCNOW() +\n datetime.timedelta(seconds=int(arg_1['expires_in'])))\n return arg_0(**arg_2)"} +{"_id": "doc_4750", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Returns a URI to redirect to the provider.\n\n Args:\n redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'\n for a non-web-based application, or a URI that\n handles the callback from the authorization server.\n This parameter is deprecated, please move to passing\n the redirect_uri in via the constructor.\n state: string, Opaque state string which is passed through the\n OAuth2 flow and returned to the client as a query parameter\n in the callback.\n\n Returns:\n A URI as a string to redirect the user to begin the authorization\n flow.\n \"\"\"\n if arg_1 is not None:\n logger.warning((\n 'The redirect_uri parameter for '\n 'OAuth2WebServerFlow.Func is deprecated. '\n 'Please move to passing the redirect_uri in via the '\n 'constructor.'))\n arg_0.redirect_uri = arg_1\n\n if arg_0.redirect_uri is None:\n raise ValueError('The value of redirect_uri must not be None.')\n\n arg_3 = {\n 'client_id': arg_0.client_id,\n 'redirect_uri': arg_0.redirect_uri,\n 'scope': arg_0.scope,\n }\n if arg_2 is not None:\n arg_3['state'] = arg_2\n if arg_0.login_hint is not None:\n arg_3['login_hint'] = arg_0.login_hint\n if arg_0._pkce:\n if not arg_0.code_verifier:\n arg_0.code_verifier = _pkce.code_verifier()\n arg_5 = _pkce.code_challenge(arg_0.code_verifier)\n arg_3['code_challenge'] = arg_5\n arg_3['code_challenge_method'] = 'S256'\n\n arg_3.update(arg_0.params)\n return _helpers.update_query_params(arg_0.auth_uri, arg_3)"} +{"_id": "doc_4751", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Returns a user code and the verification URL where to enter it\n\n Returns:\n A user code as a string for the user to authorize the application\n An URL as a string where the user has to enter the code\n \"\"\"\n if arg_0.device_uri is None:\n raise ValueError('The value of device_uri must not be None.')\n\n arg_2 = urllib.parse.urlencode({\n 'client_id': arg_0.client_id,\n 'scope': arg_0.scope,\n })\n arg_3 = {\n 'content-type': 'application/x-www-form-urlencoded',\n }\n\n if arg_0.user_agent is not None:\n arg_3['user-agent'] = arg_0.user_agent\n\n if arg_1 is None:\n arg_1 = transport.get_http_object()\n\n arg_4, arg_5 = transport.request(\n arg_1, arg_0.device_uri, method='POST', arg_2=arg_2, arg_3=arg_3)\n arg_5 = _helpers._from_bytes(arg_5)\n if arg_4.status == http_client.OK:\n try:\n arg_6 = json.loads(arg_5)\n except ValueError as exc:\n raise OAuth2DeviceCodeError(\n 'Could not parse server response as JSON: \"{0}\", '\n 'error: \"{1}\"'.format(arg_5, exc))\n return DeviceFlowInfo.FromResponse(arg_6)\n else:\n arg_7 = 'Invalid response {0}.'.format(arg_4.status)\n try:\n arg_8 = json.loads(arg_5)\n if 'error' in arg_8:\n arg_7 += ' Error: {0}'.format(arg_8['error'])\n except ValueError:\n # Couldn't decode a JSON response, stick with the\n # default message.\n pass\n raise OAuth2DeviceCodeError(arg_7)"} +{"_id": "doc_4752", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Construct an RsaVerifier instance from a string.\n\n Args:\n key_pem: string, public key in PEM format.\n is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\n is expected to be an RSA key in PEM format.\n\n Returns:\n RsaVerifier instance.\n\n Raises:\n ValueError: if the key_pem can't be parsed. In either case, error\n will begin with 'No PEM start marker'. If\n ``is_x509_cert`` is True, will fail to find the\n \"-----BEGIN CERTIFICATE-----\" error, otherwise fails\n to find \"-----BEGIN RSA PUBLIC KEY-----\".\n \"\"\"\n arg_1 = _helpers._to_bytes(arg_1)\n if arg_2:\n arg_3 = rsa.pem.load_pem(arg_1, 'CERTIFICATE')\n arg_4, arg_5 = decoder.decode(arg_3, asn1Spec=Certificate())\n if arg_5 != b'':\n raise ValueError('Unused bytes', arg_5)\n\n arg_6 = arg_4['tbsCertificate']['subjectPublicKeyInfo']\n arg_7 = _bit_list_to_bytes(arg_6['subjectPublicKey'])\n arg_8 = rsa.PublicKey.load_pkcs1(arg_7, 'DER')\n else:\n arg_8 = rsa.PublicKey.load_pkcs1(arg_1, 'PEM')\n return arg_0(arg_8)"} +{"_id": "doc_4753", "title": "", "text": "def Func(arg_0, arg_1, arg_2='notasecret'):\n \"\"\"Construct an RsaSigner instance from a string.\n\n Args:\n key: string, private key in PEM format.\n password: string, password for private key file. Unused for PEM\n files.\n\n Returns:\n RsaSigner instance.\n\n Raises:\n ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in\n PEM format.\n \"\"\"\n arg_1 = _helpers._from_bytes(arg_1) # pem expects str in Py3\n arg_3, arg_4 = pem.readPemBlocksFromFile(\n six.StringIO(arg_1), _PKCS1_MARKER, _PKCS8_MARKER)\n\n if arg_3 == 0:\n arg_5 = rsa.key.PrivateKey.load_pkcs1(arg_4,\n format='DER')\n elif arg_3 == 1:\n arg_6, arg_7 = decoder.decode(\n arg_4, asn1Spec=_PKCS8_SPEC)\n if arg_7 != b'':\n raise ValueError('Unused bytes', arg_7)\n arg_8 = arg_6.getComponentByName('privateKey')\n arg_5 = rsa.key.PrivateKey.load_pkcs1(arg_8.asOctets(),\n format='DER')\n else:\n raise ValueError('No key could be detected.')\n\n return arg_0(arg_5)"} +{"_id": "doc_4754", "title": "", "text": "def Func(arg_0):\n \"\"\"Load credentials from the given file handle.\n\n The file is expected to be in this format:\n\n {\n \"file_version\": 2,\n \"credentials\": {\n \"key\": \"base64 encoded json representation of credentials.\"\n }\n }\n\n This function will warn and return empty credentials instead of raising\n exceptions.\n\n Args:\n credentials_file: An open file handle.\n\n Returns:\n A dictionary mapping user-defined keys to an instance of\n :class:`oauth2client.client.Credentials`.\n \"\"\"\n try:\n arg_0.seek(0)\n arg_1 = json.load(arg_0)\n except Exception:\n logger.warning(\n 'Credentials file could not be loaded, will ignore and '\n 'overwrite.')\n return {}\n\n if arg_1.get('file_version') != 2:\n logger.warning(\n 'Credentials file is not version 2, will ignore and '\n 'overwrite.')\n return {}\n\n arg_2 = {}\n\n for arg_3, arg_4 in iteritems(arg_1.get('credentials', {})):\n try:\n arg_5 = base64.b64decode(arg_4)\n arg_6 = client.Credentials.new_from_json(arg_5)\n arg_2[arg_3] = arg_6\n except:\n logger.warning(\n 'Invalid credential {0} in file, ignoring.'.format(arg_3))\n\n return arg_2"} +{"_id": "doc_4755", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieves the current credentials from the store.\n\n Returns:\n An instance of :class:`oauth2client.client.Credentials` or `None`.\n \"\"\"\n arg_1 = arg_0._backend.Func(arg_0._key)\n\n if arg_1 is not None:\n arg_1.set_store(arg_0)\n\n return arg_1"} +{"_id": "doc_4756", "title": "", "text": "def Func(arg_0):\n \"\"\"A decorator to declare that only the first N arguments my be Func.\n\n This decorator makes it easy to support Python 3 style keyword-only\n parameters. For example, in Python 3 it is possible to write::\n\n def fn(pos1, *, kwonly1=None, kwonly1=None):\n ...\n\n All named parameters after ``*`` must be a keyword::\n\n fn(10, 'kw1', 'kw2') # Raises exception.\n fn(10, kwonly1='kw1') # Ok.\n\n Example\n ^^^^^^^\n\n To define a function like above, do::\n\n @Func(1)\n def fn(pos1, kwonly1=None, kwonly2=None):\n ...\n\n If no default value is provided to a keyword argument, it becomes a\n required keyword argument::\n\n @Func(0)\n def fn(required_kw):\n ...\n\n This must be called with the keyword parameter::\n\n fn() # Raises exception.\n fn(10) # Raises exception.\n fn(required_kw=10) # Ok.\n\n When defining instance or class methods always remember to account for\n ``self`` and ``cls``::\n\n class MyClass(object):\n\n @Func(2)\n def my_method(self, pos1, kwonly1=None):\n ...\n\n @classmethod\n @Func(2)\n def my_method(cls, pos1, kwonly1=None):\n ...\n\n The Func decorator behavior is controlled by\n ``_helpers.Func_parameters_enforcement``, which may be set to\n ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or\n ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do\n nothing, respectively, if a declaration is violated.\n\n Args:\n max_Func_arguments: Maximum number of Func arguments. All\n parameters after the this index must be\n keyword only.\n\n Returns:\n A decorator that prevents using arguments after max_Func_args\n from being used as Func parameters.\n\n Raises:\n TypeError: if a key-word only argument is provided as a Func\n parameter, but only if\n _helpers.Func_parameters_enforcement is set to\n POSITIONAL_EXCEPTION.\n \"\"\"\n\n def Func_decorator(arg_1):\n @functools.wraps(arg_1)\n def Func_wrapper(*arg_2, **arg_3):\n if len(arg_2) > arg_0:\n arg_4 = ''\n if arg_0 != 1:\n arg_4 = 's'\n arg_5 = ('{function}() takes at most {args_max} Func '\n 'argument{plural} ({args_given} given)'.format(\n function=arg_1.__name__,\n args_max=arg_0,\n args_given=len(arg_2),\n plural=arg_4))\n if Func_parameters_enforcement == POSITIONAL_EXCEPTION:\n raise TypeError(arg_5)\n elif Func_parameters_enforcement == POSITIONAL_WARNING:\n logger.warning(arg_5)\n return arg_1(*arg_2, **arg_3)\n return Func_wrapper\n\n if isinstance(arg_0, six.integer_types):\n return Func_decorator\n else:\n arg_2, arg_6, arg_6, arg_7 = inspect.getargspec(arg_0)\n return Func(len(arg_2) - len(arg_7))(arg_0)"} +{"_id": "doc_4757", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts stringifed scope value to a list.\n\n If scopes is a list then it is simply passed through. If scopes is an\n string then a list of each individual scope is returned.\n\n Args:\n scopes: a string or iterable of strings, the scopes.\n\n Returns:\n The scopes in a list.\n \"\"\"\n if not arg_0:\n return []\n elif isinstance(arg_0, six.string_types):\n return arg_0.split(' ')\n else:\n return arg_0"} +{"_id": "doc_4758", "title": "", "text": "def Func(arg_0):\n \"\"\"Parses unique key-value parameters from urlencoded content.\n\n Args:\n content: string, URL-encoded key-value pairs.\n\n Returns:\n dict, The key-value pairs from ``content``.\n\n Raises:\n ValueError: if one of the keys is repeated.\n \"\"\"\n arg_1 = urllib.parse.parse_qs(arg_0)\n arg_2 = {}\n for arg_3, arg_4 in six.iteritems(arg_1):\n if len(arg_4) != 1:\n arg_5 = ('URL-encoded content contains a repeated value:'\n '%s -> %s' % (arg_3, ', '.join(arg_4)))\n raise ValueError(arg_5)\n arg_2[arg_3] = arg_4[0]\n return arg_2"} +{"_id": "doc_4759", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Updates a URI with new query parameters.\n\n If a given key from ``params`` is repeated in the ``uri``, then\n the URI will be considered invalid and an error will occur.\n\n If the URI is valid, then each value from ``params`` will\n replace the corresponding value in the query parameters (if\n it exists).\n\n Args:\n uri: string, A valid URI, with potential existing query parameters.\n params: dict, A dictionary of query parameters.\n\n Returns:\n The same URI but with the new query parameters added.\n \"\"\"\n arg_2 = urllib.parse.urlparse(arg_0)\n arg_3 = parse_unique_urlencoded(arg_2.query)\n arg_3.update(arg_1)\n arg_4 = urllib.parse.urlencode(arg_3)\n arg_5 = arg_2._replace(query=arg_4)\n return urllib.parse.urlunparse(arg_5)"} +{"_id": "doc_4760", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Adds a query parameter to a url.\n\n Replaces the current value if it already exists in the URL.\n\n Args:\n url: string, url to add the query parameter to.\n name: string, query parameter name.\n value: string, query parameter value.\n\n Returns:\n Updated query parameter. Does not update the url if value is None.\n \"\"\"\n if arg_2 is None:\n return arg_0\n else:\n return update_query_params(arg_0, {arg_1: arg_2})"} +{"_id": "doc_4761", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Adds a user-agent to the headers.\n\n Args:\n headers: dict, request headers to add / modify user\n agent within.\n user_agent: str, the user agent to add.\n\n Returns:\n dict, the original headers passed in, but modified if the\n user agent is not None.\n \"\"\"\n if arg_1 is not None:\n if 'user-agent' in arg_0:\n arg_0['user-agent'] = (arg_1 + ' ' + arg_0['user-agent'])\n else:\n arg_0['user-agent'] = arg_1\n\n return arg_0"} +{"_id": "doc_4762", "title": "", "text": "def Func(arg_0):\n \"\"\"Forces header keys and values to be strings, i.e not unicode.\n\n The httplib module just concats the header keys and values in a way that\n may make the message header a unicode string, which, if it then tries to\n contatenate to a binary request body may result in a unicode decode error.\n\n Args:\n headers: dict, A dictionary of headers.\n\n Returns:\n The same dictionary but with all the keys converted to strings.\n \"\"\"\n arg_1 = {}\n try:\n for arg_2, arg_3 in six.iteritems(arg_0):\n if not isinstance(arg_2, six.binary_type):\n arg_2 = str(arg_2)\n if not isinstance(arg_3, six.binary_type):\n arg_3 = str(arg_3)\n arg_1[arg_4._to_bytes(arg_2)] = arg_4._to_bytes(arg_3)\n except UnicodeEncodeError:\n from oauth2client.client import NonAsciiHeaderError\n raise NonAsciiHeaderError(arg_2, ': ', arg_3)\n return arg_1"} +{"_id": "doc_4763", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Prepares an HTTP object's request method for auth.\n\n Wraps HTTP requests with logic to catch auth failures (typically\n identified via a 401 status code). In the event of failure, tries\n to refresh the token used and then retry the original request.\n\n Args:\n credentials: Credentials, the credentials used to identify\n the authenticated user.\n http: httplib2.Http, an http object to be used to make\n auth requests.\n \"\"\"\n arg_2 = arg_1.request\n\n # The closure that will replace 'httplib2.Http.request'.\n def new_request(arg_3, arg_4='GET', arg_5=None, arg_6=None,\n arg_7=arg_8.DEFAULT_MAX_REDIRECTS,\n arg_10=None):\n if not arg_0.access_token:\n _LOGGER.info('Attempting refresh to obtain '\n 'initial access_token')\n arg_0._refresh(arg_2)\n\n # Clone and modify the request headers to add the appropriate\n # Authorization header.\n arg_6 = _initialize_headers(arg_6)\n arg_0.apply(arg_6)\n _apply_user_agent(arg_6, arg_0.user_agent)\n\n arg_11 = None\n # Check if the body is a file-like stream.\n if all(getattr(arg_5, arg_12, None) for arg_12 in\n _STREAM_PROPERTIES):\n arg_11 = arg_5.tell()\n\n arg_13, arg_14 = arg_17(arg_2, arg_3, arg_4, arg_5,\n clean_headers(arg_6),\n arg_7, arg_10)\n\n # A stored token may expire between the time it is retrieved and\n # the time the request is made, so we may need to try twice.\n arg_15 = 2\n for arg_16 in range(arg_15):\n if arg_13.status not in REFRESH_STATUS_CODES:\n break\n _LOGGER.info('Refreshing due to a %s (attempt %s/%s)',\n arg_13.status, arg_16 + 1,\n arg_15)\n arg_0._refresh(arg_2)\n arg_0.apply(arg_6)\n if arg_11 is not None:\n arg_5.seek(arg_11)\n\n arg_13, arg_14 = arg_17(arg_2, arg_3, arg_4, arg_5,\n clean_headers(arg_6),\n arg_7, arg_10)\n\n return arg_13, arg_14\n\n # Replace the request method with our own closure.\n arg_1.request = new_request\n\n # Set credentials as a property of the request method.\n arg_1.request.credentials = arg_0"} +{"_id": "doc_4764", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Prepares an HTTP object's request method for JWT access.\n\n Wraps HTTP requests with logic to catch auth failures (typically\n identified via a 401 status code). In the event of failure, tries\n to refresh the token used and then retry the original request.\n\n Args:\n credentials: _JWTAccessCredentials, the credentials used to identify\n a service account that uses JWT access tokens.\n http: httplib2.Http, an http object to be used to make\n auth requests.\n \"\"\"\n arg_2 = arg_1.request\n wrap_http_for_auth(arg_0, arg_1)\n # The new value of ``http.request`` set by ``wrap_http_for_auth``.\n arg_3 = arg_1.request\n\n # The closure that will replace 'httplib2.Http.request'.\n def new_request(arg_4, arg_5='GET', arg_6=None, arg_7=None,\n arg_8=arg_9.DEFAULT_MAX_REDIRECTS,\n arg_11=None):\n if 'aud' in arg_0._kwargs:\n # Preemptively refresh token, this is not done for OAuth2\n if (arg_0.access_token is None or\n arg_0.access_token_expired):\n arg_0.refresh(None)\n return arg_15(arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8,\n arg_11)\n else:\n # If we don't have an 'aud' (audience) claim,\n # create a 1-time token with the uri root as the audience\n arg_7 = _initialize_headers(arg_7)\n _apply_user_agent(arg_7, arg_0.user_agent)\n arg_12 = arg_4.split('?', 1)[0]\n arg_13, arg_14 = arg_0._create_token({'aud': arg_12})\n\n arg_7['Authorization'] = 'Bearer ' + arg_13\n return arg_15(arg_2, arg_4, arg_5, arg_6,\n clean_headers(arg_7),\n arg_8, arg_11)\n\n # Replace the request method with our own closure.\n arg_1.request = new_request\n\n # Set credentials as a property of the request method.\n arg_1.request.credentials = arg_0"} +{"_id": "doc_4765", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieves the flow instance associated with a given CSRF token from\n the Flask session.\"\"\"\n arg_1 = session.pop(\n _FLOW_KEY.format(arg_0), None)\n\n if arg_1 is None:\n return None\n else:\n return pickle.loads(arg_1)"} +{"_id": "doc_4766", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Loads oauth2 configuration in order of priority.\n\n Priority:\n 1. Config passed to the constructor or init_app.\n 2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app\n config.\n 3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and\n GOOGLE_OAUTH2_CLIENT_SECRET app config.\n\n Raises:\n ValueError if no config could be found.\n \"\"\"\n if arg_2 and arg_3:\n arg_0.client_id, arg_0.client_secret = arg_2, arg_3\n return\n\n if arg_1:\n arg_0._load_client_secrets(arg_1)\n return\n\n if 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in arg_0.app.config:\n arg_0._load_client_secrets(\n arg_0.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])\n return\n\n try:\n arg_0.client_id, arg_0.client_secret = (\n arg_0.app.config['GOOGLE_OAUTH2_CLIENT_ID'],\n arg_0.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])\n except KeyError:\n raise ValueError(\n 'OAuth2 configuration could not be found. Either specify the '\n 'client_secrets_file or client_id and client_secret or set '\n 'the app configuration variables '\n 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or '\n 'GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.')"} +{"_id": "doc_4767", "title": "", "text": "def Func(arg_0):\n \"\"\"Flask view that starts the authorization flow.\n\n Starts flow by redirecting the user to the OAuth2 provider.\n \"\"\"\n arg_1 = request.args.to_dict()\n\n # Scopes will be passed as mutliple args, and to_dict() will only\n # return one. So, we use getlist() to get all of the scopes.\n arg_1['scopes'] = request.args.getlist('scopes')\n\n arg_2 = arg_1.pop('return_url', None)\n if arg_2 is None:\n arg_2 = request.referrer or '/'\n\n arg_3 = arg_0._make_flow(arg_2=arg_2, **arg_1)\n arg_4 = arg_3.step1_get_authorize_url()\n\n return redirect(arg_4)"} +{"_id": "doc_4768", "title": "", "text": "def Func(arg_0):\n \"\"\"The Func for the current user or None if unavailable.\"\"\"\n arg_1 = _app_ctx_stack.top\n\n if not hasattr(arg_1, _CREDENTIALS_KEY):\n arg_1.google_oauth2_Func = arg_0.storage.get()\n\n return arg_1.google_oauth2_Func"} +{"_id": "doc_4769", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns True if there are valid credentials for the current user.\"\"\"\n if not arg_0.credentials:\n return False\n # Is the access token expired? If so, do we have an refresh token?\n elif (arg_0.credentials.access_token_expired and\n not arg_0.credentials.refresh_token):\n return False\n else:\n return True"} +{"_id": "doc_4770", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the user's Func address or None if there are no credentials.\n\n The Func address is provided by the current credentials' id_token.\n This should not be used as unique identifier as the user can change\n their Func. If you need a unique identifier, use user_id.\n \"\"\"\n if not arg_0.credentials:\n return None\n try:\n return arg_0.credentials.id_token['Func']\n except KeyError:\n current_app.logger.error(\n 'Invalid id_token {0}'.format(arg_0.credentials.id_token))"} +{"_id": "doc_4771", "title": "", "text": "def Func(arg_0, arg_1='default'):\n \"\"\"Fetch an oauth token for the\n\n Args:\n http: an object to be used to make HTTP requests.\n service_account: An email specifying the service account this token\n should represent. Default will be a token for the \"default\" service\n account of the current compute engine instance.\n\n Returns:\n A tuple of (access token, token expiration), where access token is the\n access token as a string and token expiration is a datetime object\n that indicates when the access token will expire.\n \"\"\"\n arg_2 = get(\n arg_0,\n 'instance/service-accounts/{0}/token'.format(arg_1))\n arg_3 = client._UTCNOW() + datetime.timedelta(\n seconds=arg_2['expires_in'])\n return arg_2['access_token'], arg_3"} +{"_id": "doc_4772", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Composes the value for the 'state' parameter.\n\n Packs the current request URI and an XSRF token into an opaque string that\n can be passed to the authentication server via the 'state' parameter.\n\n Args:\n request_handler: webapp.RequestHandler, The request.\n user: google.appengine.api.users.User, The current user.\n\n Returns:\n The state value as a string.\n \"\"\"\n arg_2 = arg_0.request.url\n arg_3 = xsrfutil.generate_token(xsrf_secret_key(), arg_1.user_id(),\n action_id=str(arg_2))\n return arg_2 + ':' + arg_3"} +{"_id": "doc_4773", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None, arg_3=None):\n \"\"\"Creates an OAuth2Decorator populated from a clientsecrets file.\n\n Args:\n filename: string, File name of client secrets.\n scope: string or list of strings, scope(s) of the credentials being\n requested.\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. The message may\n contain HTML and will be presented on the web interface for\n any method that uses the decorator.\n cache: An optional cache service client that implements get() and set()\n methods. See clientsecrets.loadfile() for details.\n\n Returns: An OAuth2Decorator\n \"\"\"\n return OAuth2DecoratorFromClientSecrets(arg_0, arg_1,\n arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_4774", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the email for the current service account.\n\n Returns:\n string, The email associated with the Google App Engine\n service account.\n \"\"\"\n if arg_0._Func is None:\n arg_0._Func = (\n app_identity.get_service_account_name())\n return arg_0._Func"} +{"_id": "doc_4775", "title": "", "text": "def Func(arg_0):\n \"\"\"Determine whether the model of the instance is an NDB model.\n\n Returns:\n Boolean indicating whether or not the model is an NDB or DB model.\n \"\"\"\n # issubclass will fail if one of the arguments is not a class, only\n # need worry about new-style classes since ndb and db models are\n # new-style\n if isinstance(arg_0._model, type):\n if _NDB_MODEL is not None and issubclass(arg_0._model, _NDB_MODEL):\n return True\n elif issubclass(arg_0._model, db.Model):\n return False\n\n raise TypeError(\n 'Model class not an NDB or DB model: {0}.'.format(arg_0._model))"} +{"_id": "doc_4776", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve entity from datastore.\n\n Uses a different model method for db or ndb models.\n\n Returns:\n Instance of the model corresponding to the current storage object\n and stored using the key name of the storage object.\n \"\"\"\n if arg_0._is_ndb():\n return arg_0._model.get_by_id(arg_0._key_name)\n else:\n return arg_0._model.get_by_key_name(arg_0._key_name)"} +{"_id": "doc_4777", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete entity from datastore.\n\n Attempts to delete using the key_name stored on the object, whether or\n not the given key is in the datastore.\n \"\"\"\n if arg_0._is_ndb():\n _NDB_KEY(arg_0._model, arg_0._key_name).delete()\n else:\n arg_1 = db.Key.from_path(arg_0._model.kind(), arg_0._key_name)\n db.delete(arg_1)"} +{"_id": "doc_4778", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write a Credentials to the datastore.\n\n Args:\n credentials: Credentials, the credentials to store.\n \"\"\"\n arg_2 = arg_0._model.get_or_insert(arg_0._key_name)\n setattr(arg_2, arg_0._property_name, arg_1)\n arg_2.put()\n if arg_0._cache:\n arg_0._cache.set(arg_0._key_name, arg_1.to_json())"} +{"_id": "doc_4779", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete Credential from datastore.\"\"\"\n\n if arg_0._cache:\n arg_0._cache.delete(arg_0._key_name)\n\n arg_0._delete_entity()"} +{"_id": "doc_4780", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Decorator that starts the OAuth 2.0 dance.\n\n Starts the OAuth dance for the logged in user if they haven't already\n granted access for this application.\n\n Args:\n method: callable, to be decorated method of a webapp.RequestHandler\n instance.\n \"\"\"\n\n def check_oauth(arg_2, *arg_3, **arg_4):\n if arg_0._in_error:\n arg_0._display_error_message(arg_2)\n return\n\n arg_5 = users.get_current_user()\n # Don't use @login_decorator as this could be used in a\n # POST request.\n if not arg_5:\n arg_2.redirect(users.create_login_url(\n arg_2.request.uri))\n return\n\n arg_0._create_flow(arg_2)\n\n # Store the request URI in 'state' so we can use it later\n arg_0.flow.params['state'] = _build_state_value(\n arg_2, arg_5)\n arg_0.credentials = arg_0._storage_class(\n arg_0._credentials_class, None,\n arg_0._credentials_property_name, arg_5=arg_5).get()\n\n if not arg_0.has_credentials():\n return arg_2.redirect(arg_0.authorize_url())\n try:\n arg_9 = arg_1(arg_2, *arg_3, **arg_4)\n except client.AccessTokenRefreshError:\n return arg_2.redirect(arg_0.authorize_url())\n finally:\n arg_0.credentials = None\n return arg_9\n\n return check_oauth"} +{"_id": "doc_4781", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Decorator that sets up for OAuth 2.0 dance, but doesn't do it.\n\n Does all the setup for the OAuth dance, but doesn't initiate it.\n This decorator is useful if you want to create a page that knows\n whether or not the user has granted access to this application.\n From within a method decorated with @Func the has_credentials()\n and authorize_url() methods can be called.\n\n Args:\n method: callable, to be decorated method of a webapp.RequestHandler\n instance.\n \"\"\"\n\n def setup_oauth(arg_2, *arg_3, **arg_4):\n if arg_0._in_error:\n arg_0._display_error_message(arg_2)\n return\n\n arg_5 = users.get_current_user()\n # Don't use @login_decorator as this could be used in a\n # POST request.\n if not arg_5:\n arg_2.redirect(users.create_login_url(\n arg_2.request.uri))\n return\n\n arg_0._create_flow(arg_2)\n\n arg_0.flow.params['state'] = _build_state_value(arg_2,\n arg_5)\n arg_0.credentials = arg_0._storage_class(\n arg_0._credentials_class, None,\n arg_0._credentials_property_name, arg_5=arg_5).get()\n try:\n arg_9 = arg_1(arg_2, *arg_3, **arg_4)\n finally:\n arg_0.credentials = None\n return arg_9\n return setup_oauth"} +{"_id": "doc_4782", "title": "", "text": "def Func(arg_0):\n \"\"\"Validate parsed client secrets from a file.\n\n Args:\n clientsecrets_dict: dict, a dictionary holding the client secrets.\n\n Returns:\n tuple, a string of the client type and the information parsed\n from the file.\n \"\"\"\n arg_1 = (\n 'Invalid file format. See '\n 'https://developers.google.com/api-client-library/'\n 'python/guide/aaa_client_secrets')\n\n if arg_0 is None:\n raise InvalidClientSecretsError(arg_1)\n try:\n (arg_2, arg_3), = arg_0.items()\n except (ValueError, AttributeError):\n raise InvalidClientSecretsError(\n arg_1 + ' '\n 'Expected a JSON object with a single property for a \"web\" or '\n '\"installed\" application')\n\n if arg_2 not in VALID_CLIENT:\n raise InvalidClientSecretsError(\n 'Unknown client type: {0}.'.format(arg_2))\n\n for arg_4 in VALID_CLIENT[arg_2]['required']:\n if arg_4 not in arg_3:\n raise InvalidClientSecretsError(\n 'Missing property \"{0}\" in a client type of \"{1}\".'.format(\n arg_4, arg_2))\n for arg_4 in VALID_CLIENT[arg_2]['string']:\n if arg_3[arg_4].startswith('[['):\n raise InvalidClientSecretsError(\n 'Property \"{0}\" is not configured.'.format(arg_4))\n return arg_2, arg_3"} +{"_id": "doc_4783", "title": "", "text": "def Func():\n \"\"\"Communicate with the Developer Shell server socket.\"\"\"\n\n arg_0 = int(os.getenv(DEVSHELL_ENV, 0))\n if arg_0 == 0:\n raise NoDevshellServer()\n\n arg_1 = socket.socket()\n arg_1.connect(('localhost', arg_0))\n\n arg_2 = CREDENTIAL_INFO_REQUEST_JSON\n arg_3 = '{0}\\n{1}'.format(len(arg_2), arg_2)\n arg_1.sendall(_helpers._to_bytes(arg_3, encoding='utf-8'))\n\n arg_4 = arg_1.recv(6).decode()\n if '\\n' not in arg_4:\n raise CommunicationError('saw no newline in the first 6 bytes')\n arg_5, arg_6 = arg_4.split('\\n', 1)\n arg_7 = int(arg_5) - len(arg_6)\n if arg_7 > 0:\n arg_6 += arg_1.recv(arg_7, socket.MSG_WAITALL).decode()\n\n return CredentialInfoResponse(arg_6)"} +{"_id": "doc_4784", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Core code for a command-line application.\n\n The ``run()`` function is called from your application and runs\n through all the steps to obtain credentials. It takes a ``Flow``\n argument and attempts to open an authorization server page in the\n user's default web browser. The server asks the user to grant your\n application access to the user's data. If the user grants access,\n the ``run()`` function returns new credentials. The new credentials\n are also stored in the ``storage`` argument, which updates the file\n associated with the ``Storage`` object.\n\n It presumes it is run from a command-line application and supports the\n following flags:\n\n ``--auth_host_name`` (string, default: ``localhost``)\n Host name to use when running a local web server to handle\n redirects during OAuth authorization.\n\n ``--auth_host_port`` (integer, default: ``[8080, 8090]``)\n Port to use when running a local web server to handle redirects\n during OAuth authorization. Repeat this option to specify a list\n of values.\n\n ``--[no]auth_local_webserver`` (boolean, default: ``True``)\n Run a local web server to handle redirects during OAuth\n authorization.\n\n The tools module defines an ``ArgumentParser`` the already contains the\n flag definitions that ``run()`` requires. You can pass that\n ``ArgumentParser`` to your ``ArgumentParser`` constructor::\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args(argv)\n\n Args:\n flow: Flow, an OAuth 2.0 Flow to step through.\n storage: Storage, a ``Storage`` to store the credential in.\n flags: ``argparse.Namespace``, (Optional) The command-line flags. This\n is the object returned from calling ``parse_args()`` on\n ``argparse.ArgumentParser`` as described above. Defaults\n to ``argparser.parse_args()``.\n http: An instance of ``httplib2.Http.request`` or something that\n acts like it.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n if arg_2 is None:\n arg_2 = argparser.parse_args()\n logging.getLogger().setLevel(getattr(logging, arg_2.logging_level))\n if not arg_2.noauth_local_webserver:\n arg_4 = False\n arg_5 = 0\n for arg_6 in arg_2.auth_host_port:\n arg_5 = arg_6\n try:\n arg_7 = ClientRedirectServer((arg_2.auth_host_name, arg_6),\n ClientRedirectHandler)\n except socket.error:\n pass\n else:\n arg_4 = True\n break\n arg_2.noauth_local_webserver = not arg_4\n if not arg_4:\n print(_FAILED_START_MESSAGE)\n\n if not arg_2.noauth_local_webserver:\n arg_9 = 'http://{host}:{port}/'.format(\n host=arg_2.auth_host_name, arg_6=arg_5)\n else:\n arg_9 = client.OOB_CALLBACK_URN\n arg_0.redirect_uri = arg_9\n arg_11 = arg_0.step1_get_authorize_url()\n\n if not arg_2.noauth_local_webserver:\n import webbrowser\n webbrowser.open(arg_11, new=1, autoraise=True)\n print(_BROWSER_OPENED_MESSAGE.format(address=arg_11))\n else:\n print(_GO_TO_LINK_MESSAGE.format(address=arg_11))\n\n arg_12 = None\n if not arg_2.noauth_local_webserver:\n arg_7.handle_request()\n if 'error' in arg_7.query_params:\n sys.exit('Authentication request was rejected.')\n if 'code' in arg_7.query_params:\n arg_12 = arg_7.query_params['code']\n else:\n print('Failed to find \"code\" in the query parameters '\n 'of the redirect.')\n sys.exit('Try running with --noauth_local_webserver.')\n else:\n arg_12 = input('Enter verification code: ').strip()\n\n try:\n arg_13 = arg_0.step2_exchange(arg_12, arg_3=arg_3)\n except client.FlowExchangeError as e:\n sys.exit('Authentication has failed: {0}'.format(e))\n\n arg_1.put(arg_13)\n arg_13.set_store(arg_1)\n print('Authentication successful.')\n\n return arg_13"} +{"_id": "doc_4785", "title": "", "text": "def Func(arg_0):\n \"\"\"Handle a GET request.\n\n Parses the query parameters and prints a message\n if the flow has completed. Note that we can't detect\n if an error occurred.\n \"\"\"\n arg_0.send_response(http_client.OK)\n arg_0.send_header('Content-type', 'text/html')\n arg_0.end_headers()\n arg_1 = urllib.parse.urlparse(arg_0.path)\n arg_2 = _helpers.parse_unique_urlencoded(arg_1.query)\n arg_0.server.query_params = arg_2\n arg_0.wfile.write(\n b'Authentication Status')\n arg_0.wfile.write(\n b'

    The authentication flow has completed.

    ')\n arg_0.wfile.write(b'')"} +{"_id": "doc_4786", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a 'Func' as described in section 4.2 of RFC 7636\n by taking the sha256 hash of the verifier and then urlsafe\n base64-encoding it.\n\n Args:\n verifier: bytestring, representing a code_verifier as generated by\n code_verifier().\n\n Returns:\n Bytestring, representing a urlsafe base64-encoded sha256 hash digest,\n without '=' padding.\n \"\"\"\n arg_1 = hashlib.sha256(arg_0).digest()\n return base64.urlsafe_b64encode(arg_1).rstrip(b'=')"} +{"_id": "doc_4787", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve stored credential from the Django ORM.\n\n Returns:\n oauth2client.Credentials retrieved from the Django ORM, associated\n with the ``model``, ``key_value``->``key_name`` pair used to query\n for the model, and ``property_name`` identifying the\n ``CredentialsProperty`` field, all of which are defined in the\n constructor for this Storage object.\n\n \"\"\"\n arg_1 = {arg_0.key_name: arg_0.key_value}\n arg_2 = arg_0.model_class.objects.filter(**arg_1)\n if len(arg_2) > 0:\n arg_3 = getattr(arg_2[0], arg_0.property_name)\n if getattr(arg_3, 'set_store', None) is not None:\n arg_3.set_store(arg_0)\n return arg_3\n else:\n return None"} +{"_id": "doc_4788", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete Credentials from the datastore.\"\"\"\n arg_1 = {arg_0.key_name: arg_0.key_value}\n arg_0.model_class.objects.filter(**arg_1).delete()"} +{"_id": "doc_4789", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve the credentials from the dictionary, if they exist.\n\n Returns: A :class:`oauth2client.client.OAuth2Credentials` instance.\n \"\"\"\n arg_1 = arg_0._dictionary.get(arg_0._key)\n\n if arg_1 is None:\n return None\n\n arg_2 = client.OAuth2Credentials.from_json(arg_1)\n arg_2.set_store(arg_0)\n\n return arg_2"} +{"_id": "doc_4790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Save the credentials to the dictionary.\n\n Args:\n credentials: A :class:`oauth2client.client.OAuth2Credentials`\n instance.\n \"\"\"\n arg_2 = arg_1.to_json()\n arg_0._dictionary[arg_0._key] = arg_2"} +{"_id": "doc_4791", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Validates a value as a proper Flow object.\n\n Args:\n value: A value to be set on the property.\n\n Raises:\n TypeError if the value is not an instance of Flow.\n \"\"\"\n _LOGGER.info('validate: Got type %s', type(arg_1))\n if arg_1 is not None and not isinstance(arg_1, client.Flow):\n raise TypeError(\n 'Property {0} must be convertible to a flow '\n 'instance; received: {1}.'.format(arg_0._name, arg_1))"} +{"_id": "doc_4792", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Converts our stored JSON string back to the desired type.\n\n Args:\n value: A value from the datastore to be converted to the\n desired type.\n\n Returns:\n A deserialized Credentials (or subclass) object, else None if\n the value can't be parsed.\n \"\"\"\n if not arg_1:\n return None\n try:\n # Uses the from_json method of the implied class of value\n arg_2 = client.Credentials.new_from_json(arg_1)\n except ValueError:\n arg_2 = None\n return arg_2"} +{"_id": "doc_4793", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Looks up the flow in session to recover information about requested\n scopes.\n\n Args:\n csrf_token: The token passed in the callback request that should\n match the one previously generated and stored in the request on the\n initial authorization view.\n\n Returns:\n The OAuth2 Flow object associated with this flow based on the\n CSRF token.\n \"\"\"\n arg_2 = arg_1.session.get(_FLOW_KEY.format(arg_0), None)\n return None if arg_2 is None else jsonpickle.decode(arg_2)"} +{"_id": "doc_4794", "title": "", "text": "def Func(arg_0):\n \"\"\" View that handles the user's return from OAuth2 provider.\n\n This view verifies the CSRF state and OAuth authorization code, and on\n success stores the credentials obtained in the storage provider,\n and redirects to the return_url specified in the authorize view and\n stored in the session.\n\n Args:\n request: Django request.\n\n Returns:\n A redirect response back to the return_url.\n \"\"\"\n if 'error' in arg_0.GET:\n arg_1 = arg_0.GET.get(\n 'error_description', arg_0.GET.get('error', ''))\n arg_1 = html.escape(arg_1)\n return http.HttpResponseBadRequest(\n 'Authorization failed {0}'.format(arg_1))\n\n try:\n arg_2 = arg_0.GET['state']\n arg_3 = arg_0.GET['code']\n except KeyError:\n return http.HttpResponseBadRequest(\n 'Request missing state or authorization code')\n\n try:\n arg_4 = arg_0.session[_CSRF_KEY]\n except KeyError:\n return http.HttpResponseBadRequest(\n 'No existing session for this flow.')\n\n try:\n arg_5 = json.loads(arg_2)\n arg_6 = arg_5['csrf_token']\n arg_7 = arg_5['return_url']\n except (ValueError, KeyError):\n return http.HttpResponseBadRequest('Invalid state parameter.')\n\n if arg_6 != arg_4:\n return http.HttpResponseBadRequest('Invalid CSRF token.')\n\n arg_8 = _get_flow_for_token(arg_6, arg_0)\n\n if not arg_8:\n return http.HttpResponseBadRequest('Missing Oauth2 flow.')\n\n try:\n arg_9 = arg_8.step2_exchange(arg_3)\n except client.FlowExchangeError as exchange_error:\n return http.HttpResponseBadRequest(\n 'An error has occurred: {0}'.format(exchange_error))\n\n get_storage(arg_0).put(arg_9)\n\n signals.oauth2_authorized.send(sender=signals.oauth2_authorized,\n arg_0=arg_0, arg_9=arg_9)\n\n return shortcuts.redirect(arg_7)"} +{"_id": "doc_4795", "title": "", "text": "def Func(arg_0):\n \"\"\" View to start the OAuth2 Authorization flow.\n\n This view starts the OAuth2 authorization flow. If scopes is passed in\n as a GET URL parameter, it will authorize those scopes, otherwise the\n default scopes specified in settings. The return_url can also be\n specified as a GET parameter, otherwise the referer header will be\n checked, and if that isn't found it will return to the root path.\n\n Args:\n request: The Django request object.\n\n Returns:\n A redirect to Google OAuth2 Authorization.\n \"\"\"\n arg_1 = arg_0.GET.get('return_url', None)\n if not arg_1:\n arg_1 = arg_0.META.get('HTTP_REFERER', '/')\n\n arg_2 = arg_0.GET.getlist('scopes', django_util.oauth2_settings.scopes)\n # Model storage (but not session storage) requires a logged in user\n if django_util.oauth2_settings.storage_model:\n if not arg_0.user.is_authenticated():\n return redirect('{0}?next={1}'.format(\n settings.LOGIN_URL, parse.quote(arg_0.get_full_path())))\n # This checks for the case where we ended up here because of a logged\n # out user but we had credentials for it in the first place\n else:\n arg_3 = django_util.UserOAuth2(arg_0, arg_2, arg_1)\n if arg_3.has_credentials():\n return redirect(arg_1)\n\n arg_4 = _make_flow(arg_0=arg_0, arg_2=arg_2, arg_1=arg_1)\n arg_5 = arg_4.step1_get_authorize_url()\n return shortcuts.redirect(arg_5)"} +{"_id": "doc_4796", "title": "", "text": "def Func(arg_0):\n \"\"\"Create an empty file if necessary.\n\n This method will not initialize the file. Instead it implements a\n simple version of \"touch\" to ensure the file has been created.\n \"\"\"\n if not os.path.exists(arg_0._filename):\n arg_1 = os.umask(0o177)\n try:\n open(arg_0._filename, 'a+b').close()\n finally:\n os.umask(arg_1)"} +{"_id": "doc_4797", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Overrides ``models.Field`` method. This is used to convert\n the value from an instances of this class to bytes that can be\n inserted into the database.\n \"\"\"\n if arg_1 is None:\n return None\n else:\n return encoding.smart_text(\n base64.b64encode(jsonpickle.encode(arg_1).encode()))"} +{"_id": "doc_4798", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert the field value from the provided model to a string.\n\n Used during model serialization.\n\n Args:\n obj: db.Model, model object\n\n Returns:\n string, the serialized field value\n \"\"\"\n arg_2 = arg_0._get_val_from_obj(arg_1)\n return arg_0.get_prep_value(arg_2)"} +{"_id": "doc_4799", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Make a signed JWT.\n\n See http://self-issued.info/docs/draft-jones-json-web-token.html.\n\n Args:\n signer: crypt.Signer, Cryptographic signer.\n payload: dict, Dictionary of data to convert to JSON and then sign.\n key_id: string, (Optional) Key ID header.\n\n Returns:\n string, The JWT for the payload.\n \"\"\"\n arg_3 = {'typ': 'JWT', 'alg': 'RS256'}\n if arg_2 is not None:\n arg_3['kid'] = arg_2\n\n arg_4 = [\n _helpers._urlsafe_b64encode(_helpers._json_encode(arg_3)),\n _helpers._urlsafe_b64encode(_helpers._json_encode(arg_1)),\n ]\n arg_5 = b'.'.join(arg_4)\n\n arg_6 = arg_0.sign(arg_5)\n arg_4.append(_helpers._urlsafe_b64encode(arg_6))\n\n logger.debug(str(arg_4))\n\n return b'.'.join(arg_4)"} +{"_id": "doc_4800", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Verifies signed content using a list of certificates.\n\n Args:\n message: string or bytes, The message to verify.\n signature: string or bytes, The signature on the message.\n certs: iterable, certificates in PEM format.\n\n Raises:\n AppIdentityError: If none of the certificates can verify the message\n against the signature.\n \"\"\"\n for arg_3 in arg_2:\n arg_4 = Verifier.from_string(arg_3, is_x509_cert=True)\n if arg_4.verify(arg_0, arg_1):\n return\n\n # If we have not returned, no certificate confirms the signature.\n raise AppIdentityError('Invalid token signature')"} +{"_id": "doc_4801", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks audience field from a JWT payload.\n\n Does nothing if the passed in ``audience`` is null.\n\n Args:\n payload_dict: dict, A dictionary containing a JWT payload.\n audience: string or NoneType, an audience to check for in\n the JWT payload.\n\n Raises:\n AppIdentityError: If there is no ``'aud'`` field in the payload\n dictionary but there is an ``audience`` to check.\n AppIdentityError: If the ``'aud'`` field in the payload dictionary\n does not match the ``audience``.\n \"\"\"\n if arg_1 is None:\n return\n\n arg_2 = arg_0.get('aud')\n if arg_2 is None:\n raise AppIdentityError(\n 'No aud field in token: {0}'.format(arg_0))\n if arg_2 != arg_1:\n raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(\n arg_2, arg_1, arg_0))"} +{"_id": "doc_4802", "title": "", "text": "def Func(arg_0):\n \"\"\"Verifies the issued at and expiration from a JWT payload.\n\n Makes sure the current time (in UTC) falls between the issued at and\n expiration for the JWT (with some skew allowed for via\n ``CLOCK_SKEW_SECS``).\n\n Args:\n payload_dict: dict, A dictionary containing a JWT payload.\n\n Raises:\n AppIdentityError: If there is no ``'iat'`` field in the payload\n dictionary.\n AppIdentityError: If there is no ``'exp'`` field in the payload\n dictionary.\n AppIdentityError: If the JWT expiration is too far in the future (i.e.\n if the expiration would imply a token lifetime\n longer than what is allowed.)\n AppIdentityError: If the token appears to have been issued in the\n future (up to clock skew).\n AppIdentityError: If the token appears to have expired in the past\n (up to clock skew).\n \"\"\"\n # Get the current time to use throughout.\n arg_1 = int(time.time())\n\n # Make sure issued at and expiration are in the payload.\n arg_2 = arg_0.get('iat')\n if arg_2 is None:\n raise AppIdentityError(\n 'No iat field in token: {0}'.format(arg_0))\n arg_3 = arg_0.get('exp')\n if arg_3 is None:\n raise AppIdentityError(\n 'No exp field in token: {0}'.format(arg_0))\n\n # Make sure the expiration gives an acceptable token lifetime.\n if arg_3 >= arg_1 + MAX_TOKEN_LIFETIME_SECS:\n raise AppIdentityError(\n 'exp field too far in future: {0}'.format(arg_0))\n\n # Make sure (up to clock skew) that the token wasn't issued in the future.\n arg_4 = arg_2 - CLOCK_SKEW_SECS\n if arg_1 < arg_4:\n raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(\n arg_1, arg_4, arg_0))\n # Make sure (up to clock skew) that the token isn't already expired.\n arg_5 = arg_3 + CLOCK_SKEW_SECS\n if arg_1 > arg_5:\n raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(\n arg_1, arg_5, arg_0))"} +{"_id": "doc_4803", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Verify a JWT against public certs.\n\n See http://self-issued.info/docs/draft-jones-json-web-token.html.\n\n Args:\n jwt: string, A JWT.\n certs: dict, Dictionary where values of public keys in PEM format.\n audience: string, The audience, 'aud', that this JWT should contain. If\n None then the JWT's 'aud' parameter is not verified.\n\n Returns:\n dict, The deserialized JSON payload in the JWT.\n\n Raises:\n AppIdentityError: if any checks are failed.\n \"\"\"\n arg_0 = _helpers._to_bytes(arg_0)\n\n if arg_0.count(b'.') != 2:\n raise AppIdentityError(\n 'Wrong number of segments in token: {0}'.format(arg_0))\n\n arg_3, arg_4, arg_5 = arg_0.split(b'.')\n arg_6 = arg_3 + b'.' + arg_4\n arg_5 = _helpers._urlsafe_b64decode(arg_5)\n\n # Parse token.\n arg_7 = _helpers._urlsafe_b64decode(arg_4)\n try:\n arg_8 = json.loads(_helpers._from_bytes(arg_7))\n except:\n raise AppIdentityError('Can\\'t parse token: {0}'.format(arg_7))\n\n # Verify that the signature matches the message.\n _verify_signature(arg_6, arg_5, arg_1.values())\n\n # Verify the issued at and created times in the payload.\n _verify_time_range(arg_8)\n\n # Check audience.\n _check_audience(arg_8, arg_2)\n\n return arg_8"} +{"_id": "doc_4804", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=False, arg_3=None):\n \"\"\"Create a Func primitive\n\n Note that this is made of 6 quads, not triangles\n \"\"\"\n\n \"\"\"# Convert size to list if it isn't already\n if not isinstance(size, list):\n size = list(size)\n # If a single value was supplied use it for all 3 axes\n if len(size) == 1:\n size = [size[0], size[0], size[0]]\"\"\"\n arg_1 = util.make_list(arg_1, 3)\n if arg_0.ml_version == '1.3.4BETA':\n arg_4 = 'Box'\n else:\n arg_4 = 'Box/Cube'\n arg_5 = ''.join([\n ' \\n'.format(arg_4),\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n if isinstance(arg_0, FilterScript):\n arg_0.add_layer('Cube', change_layer=True)\n transform.scale(arg_0, value=arg_1)\n # Box is centered on origin at creation\n if not arg_2:\n transform.translate(arg_0, value=[arg_1[0]/2, arg_1[1]/2, arg_1[2]/2])\n if arg_3 is not None:\n vert_color.function(arg_0, arg_3=arg_3)\n return None"} +{"_id": "doc_4805", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=None, arg_3=3, arg_4=None):\n \"\"\"create an Func mesh\n\n radius Radius of the sphere\n # subdivisions = Subdivision level; Number of the recursive subdivision of the\n # surface. Default is 3 (a sphere approximation composed by 1280 faces).\n # Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris\n # approximation of a sphere). Formula for number of faces: F=20*4^subdiv\n # color = specify a color name to apply vertex colors to the newly\n # created mesh\"\"\"\n if arg_2 is not None:\n arg_1 = arg_2 / 2\n arg_5 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n if isinstance(arg_0, FilterScript):\n arg_0.add_layer('Sphere', change_layer=True)\n if arg_4 is not None:\n vert_color.function(arg_0, arg_4=arg_4)\n return None"} +{"_id": "doc_4806", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=1, arg_3=1, arg_4=1,\n arg_5=True, arg_6=False, arg_7=None):\n \"\"\"Create a box with user defined number of segments in each direction.\n\n Grid spacing is the same as its dimensions (spacing = 1) and its\n thickness is one. Intended to be used for e.g. deforming using functions\n or a height map (lithopanes) and can be resized after creation.\n\n Warnings: function uses layers.join\n\n top_option\n 0 open\n 1 full\n 2 simple\n bottom_option\n 0 open\n 1 full\n 2 simple\n \"\"\"\n \"\"\"# Convert size to list if it isn't already\n if not isinstance(size, list):\n size = list(size)\n # If a single value was supplied use it for all 3 axes\n if len(size) == 1:\n size = [size[0], size[0], size[0]]\"\"\"\n arg_1 = util.make_list(arg_1, 3)\n\n # Top\n grid(arg_0,\n arg_1,\n arg_2,\n arg_3)\n transform.translate(arg_0, [0, 0, arg_1[2]])\n\n # Bottom\n if arg_5:\n plane_hires_edges(\n arg_0, arg_1, arg_2, arg_3)\n else:\n layers.duplicate(arg_0)\n transform.translate(arg_0, [0, 0, -arg_1[2]])\n # Rotate to correct normals\n transform.rotate(arg_0, 'x', 180)\n transform.translate(arg_0, [0, arg_1[1], 0])\n\n # Sides\n cube_open_hires(\n arg_0=arg_0, arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4)\n\n # Join everything together\n layers.join(arg_0)\n # Need some tolerance on merge_vert due to rounding errors\n clean.merge_vert(arg_0, threshold=0.00002)\n if arg_6:\n transform.translate(arg_0, [-arg_1[0] / 2, -arg_1[1] / 2, -arg_1[2] / 2])\n if arg_7 is not None:\n vert_color.function(arg_0, arg_7=arg_7)\n return None"} +{"_id": "doc_4807", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Check if a variable is a list and is the correct length.\n\n If variable is not a list it will make it a list of the correct length with\n all terms identical.\n \"\"\"\n if not isinstance(arg_0, list):\n if isinstance(arg_0, tuple):\n arg_0 = list(arg_0)\n else:\n arg_0 = [arg_0]\n for arg_2 in range(1, arg_1):\n arg_0.append(arg_0[0])\n if len(arg_0) != arg_1:\n print(\n '\"%s\" has the wrong number of terms; it needs %s. Exiting ...' %\n (arg_0, arg_1))\n sys.exit(1)\n return arg_0"} +{"_id": "doc_4808", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Write filter to FilterScript object or filename\n\n Args:\n script (FilterScript object or filename str): the FilterScript object\n or script filename to write the filter to.\n filter_xml (str): the xml filter string\n\n \"\"\"\n if isinstance(arg_0, mlx.FilterScript):\n arg_0.filters.append(arg_1)\n elif isinstance(arg_0, str):\n arg_2 = open(arg_0, 'a')\n arg_2.write(arg_1)\n arg_2.close()\n else:\n print(arg_1)\n return None"} +{"_id": "doc_4809", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=0, arg_3=0,\n arg_4=False):\n \"\"\" Apply LS3 Subdivision Surface algorithm using Loop's weights.\n\n This refinement method take normals into account.\n See: Boye', S. Guennebaud, G. & Schlick, C.\n \"Least squares subdivision surfaces\"\n Computer Graphics Forum, 2010.\n\n Alternatives weighting schemes are based on the paper:\n Barthe, L. & Kobbelt, L.\n \"Subdivision scheme tuning around extraordinary vertices\"\n Computer Aided Geometric Design, 2004, 21, 561-583.\n\n The current implementation of these schemes don't handle vertices of\n valence > 12\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n iterations (int): Number of times the model is subdivided.\n loop_weight (int): Change the weights used. Allow to optimize some\n behaviours in spite of others. Valid values are:\n 0 - Loop (default)\n 1 - Enhance regularity\n 2 - Enhance continuity\n edge_threshold (float): All the edges longer than this threshold will\n be refined. Setting this value to zero will force a uniform\n refinement.\n selected (bool): If selected the filter is performed only on the\n selected faces.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_5 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n return None"} +{"_id": "doc_4810", "title": "", "text": "def Func(arg_0, arg_1=0.0):\n \"\"\" Merge together all the vertices that are nearer than the specified\n threshold. Like a unify duplicate vertices but with some tolerance.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n threshold (float): Merging distance. All the vertices that are closer\n than this threshold are merged together. Use very small values,\n default is zero.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_2 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_2)\n return None"} +{"_id": "doc_4811", "title": "", "text": "def Func(arg_0, arg_1=0.0):\n \"\"\" Split non-manifold vertices until it becomes two-manifold.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n vert_displacement_ratio (float): When a vertex is split it is moved\n along the average vector going from its position to the centroid\n of the FF connected faces sharing it.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_2 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_2)\n return None"} +{"_id": "doc_4812", "title": "", "text": "def Func(arg_0, arg_1=0.01, arg_2=True):\n \"\"\" Try to snap together adjacent borders that are slightly mismatched.\n\n This situation can happen on badly triangulated adjacent patches defined by\n high order surfaces. For each border vertex the filter snaps it onto the\n closest boundary edge only if it is closest of edge_legth*threshold. When\n vertex is snapped the corresponding face it split and a new vertex is\n created.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n edge_dist_ratio (float): Collapse edge when the edge / distance ratio\n is greater than this value. E.g. for default value 1000 two\n straight border edges are collapsed if the central vertex dist from\n the straight line composed by the two edges less than a 1/1000 of\n the sum of the edges length. Larger values enforce that only\n vertexes very close to the line are removed.\n unify_vert (bool): If true the snap vertices are welded together.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_3 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_3)\n return None"} +{"_id": "doc_4813", "title": "", "text": "def Func(arg_0, arg_1=1.0):\n \"\"\"An alternative Func implementation that uses a geometric function.\n This is more accurate than the built-in version.\"\"\"\n \"\"\"# Convert value to list if it isn't already\n if not isinstance(value, list):\n value = list(value)\n # If a single value was supplied use it for all 3 axes\n if len(value) == 1:\n value = [value[0], value[0], value[0]]\"\"\"\n arg_1 = util.make_list(arg_1, 3)\n vert_function(arg_0,\n x_func='x*(%s)' % arg_1[0],\n y_func='y*(%s)' % arg_1[1],\n z_func='z*(%s)' % arg_1[2])\n return None"} +{"_id": "doc_4814", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=0, arg_3=0, arg_4=None,\n arg_5=None):\n \"\"\"Deform mesh around cylinder of radius and axis z\n\n y = 0 will be on the surface of radius \"radius\"\n pitch != 0 will create a helix, with distance \"pitch\" traveled in z for each rotation\n taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1\n\n \"\"\"\n \"\"\"vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius),\n y='(%s+y)*cos(x/(%s+y))' % (radius, radius),\n z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))\"\"\"\n if arg_4 is None:\n arg_4 = '-(pitch)*x/(2*pi*(radius))'\n arg_4 = arg_4.replace(\n 'pitch', str(arg_2)).replace(\n 'pi', str(math.pi)).replace(\n 'radius', str(arg_1))\n if arg_5 is None:\n arg_5 = '-(taper)*(pitch_func)'\n arg_5 = arg_5.replace(\n 'taper', str(arg_3)).replace(\n 'pitch_func', str(arg_4)).replace(\n 'pi', str(math.pi))\n\n arg_6 = '(y+(radius)+(taper_func))*sin(x/(radius))'.replace(\n 'radius', str(arg_1)).replace('taper_func', str(arg_5))\n arg_7 = '(y+(radius)+(taper_func))*cos(x/(radius))'.replace(\n 'radius', str(arg_1)).replace('taper_func', str(arg_5))\n arg_8 = 'z+(pitch_func)'.replace('pitch_func', str(arg_4))\n\n vert_function(arg_0, arg_6, arg_7, arg_8)\n return None"} +{"_id": "doc_4815", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=0, arg_3=0, arg_4=0, arg_5=True,\n arg_6=False, arg_7=None, arg_8=True):\n \"\"\"Bends mesh around cylinder of radius radius and axis z to a certain angle\n\n straight_ends: Only apply twist (pitch) over the area that is bent\n\n outside_limit_end (bool): should values outside of the Func radius_limit be considered part\n of the end (True) or the start (False)?\n \"\"\"\n if arg_7 is None:\n arg_7 = 2 * arg_1\n # TODO: add limit so Func only applies over y<2*radius; add option to set\n # larger limit\n arg_4 = math.radians(arg_4)\n arg_9 = arg_1 * arg_4\n \"\"\"vert_function(s=s, x='if(x<%s and x>-%s, (%s+y)*sin(x/%s), (%s+y)*sin(%s/%s)+(x-%s)*cos(%s/%s))'\n % (segment, segment, radius, radius, radius, segment, radius, segment, segment, radius),\n y='if(x<%s*%s/2 and x>-%s*%s/2, (%s+y)*cos(x/%s), (%s+y)*cos(%s)-(x-%s*%s)*sin(%s))'\n % (radius, angle, radius, angle, radius, radius, radius, angle/2, radius, angle/2, angle/2),\"\"\"\n arg_10 = '-(pitch)*x/(2*pi*(radius))'.replace(\n 'pitch', str(arg_2)).replace(\n 'pi', str(math.pi)).replace(\n 'radius', str(arg_1))\n arg_11 = '(taper)*(pitch_func)'.replace(\n 'taper', str(arg_3)).replace(\n 'pitch_func', str(arg_10)).replace(\n 'pi', str(math.pi))\n # y\\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_7)\n return None"} +{"_id": "doc_4817", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Transfer mesh colors to face colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n all_visible_layers (bool): If true the color mapping is applied to all the meshes\n \"\"\"\n arg_2 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_2)\n return None"} +{"_id": "doc_4818", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=8,\n arg_3=5, arg_4=0, arg_5=1.1,\n arg_6=1.5, arg_7=4.0,\n arg_8=8, arg_9=False, arg_10=False):\n \"\"\" This surface reconstruction algorithm creates watertight\n surfaces from oriented point sets.\n\n The filter uses the original code of Michael Kazhdan and Matthew Bolitho\n implementing the algorithm in the following paper:\n\n Michael Kazhdan, Hugues Hoppe,\n \"Screened Poisson surface reconstruction\"\n ACM Trans. Graphics, 32(3), 2013\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n visible_layer (bool): If True all the visible layers will be used for\n providing the points\n depth (int): This integer is the maximum depth of the tree that will\n be used for surface reconstruction. Running at depth d corresponds\n to solving on a voxel grid whose resolution is no larger than\n 2^d x 2^d x 2^d. Note that since the reconstructor adapts the\n octree to the sampling density, the specified reconstruction depth\n is only an upper bound. The default value for this parameter is 8.\n full_depth (int): This integer specifies the depth beyond depth the\n octree will be adapted. At coarser depths, the octree will be\n complete, containing all 2^d x 2^d x 2^d nodes. The default value\n for this parameter is 5.\n cg_depth (int): This integer is the depth up to which a\n conjugate-gradients solver will be used to solve the linear system.\n Beyond this depth Gauss-Seidel relaxation will be used. The default\n value for this parameter is 0.\n scale (float): This floating point value specifies the ratio between\n the diameter of the cube used for reconstruction and the diameter\n of the samples' bounding cube. The default value is 1.1.\n samples_per_node (float): This floating point value specifies the\n minimum number of sample points that should fall within an octree\n node as the octree construction is adapted to sampling density. For\n noise-free samples, small values in the range [1.0 - 5.0] can be\n used. For more noisy samples, larger values in the range\n [15.0 - 20.0] may be needed to provide a smoother, noise-reduced,\n reconstruction. The default value is 1.5.\n point_weight (float): This floating point value specifies the\n importance that interpolation of the point samples is given in the\n formulation of the screened Poisson equation. The results of the\n original (unscreened) Poisson Reconstruction can be obtained by\n setting this value to 0. The default value for this parameter is 4.\n iterations (int): This integer value specifies the number of\n Gauss-Seidel relaxations to be performed at each level of the\n hierarchy. The default value for this parameter is 8.\n confidence (bool): If True this tells the reconstructor to use the\n quality as confidence information; this is done by scaling the unit\n normals with the quality values. When the flag is not enabled, all\n normals are normalized to have unit-length prior to reconstruction.\n pre_clean (bool): If True will force a cleaning pre-pass on the data\n removing all unreferenced vertices or vertices with null normals.\n\n Layer stack:\n Creates 1 new layer 'Poisson mesh'\n Current layer is not changed\n\n MeshLab versions:\n 2016.12\n \"\"\"\n arg_11 = ''.join([\n ' \\n',\n ' \\n'.format(arg_4),\n ' \\n'.format(str(arg_9).lower()),\n ' \\n'.format(arg_2),\n ' \\n'.format(arg_3),\n ' \\n'.format(arg_8),\n ' \\n'.format(arg_7),\n ' \\n'.format(str(arg_10).lower()),\n ' \\n'.format(arg_6),\n ' \\n'.format(arg_5),\n ' \\n'.format(str(arg_1).lower()),\n ' \\n'])\n util.write_filter(arg_0, arg_11)\n if isinstance(arg_0, FilterScript):\n arg_0.add_layer('Poisson mesh', change_layer=False)\n return None"} +{"_id": "doc_4819", "title": "", "text": "def Func(arg_0, arg_1=50, arg_2=None, arg_3=None, arg_4=0.5, arg_5=True):\n \"\"\" Turn a model into a surface with Voronoi style holes in it\n\n References:\n http://meshlabstuff.blogspot.com/2009/03/creating-Func-sphere.html\n http://meshlabstuff.blogspot.com/2009/04/creating-Func-sphere-2.html\n\n Requires FilterScript object\n\n Args:\n script: the FilterScript object to write the filter to. Does not\n work with a script filename.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n\n if arg_2 is None:\n arg_2 = arg_0.current_layer()\n if arg_3 is None:\n # Current layer is currently not changed after poisson_disk is run\n sampling.poisson_disk(arg_0, sample_num=arg_1)\n arg_3 = arg_0.last_layer()\n\n vert_color.Func(arg_0, arg_2=arg_2, source_layer=arg_3, arg_5=arg_5)\n select.vert_quality(arg_0, min_quality=0.0, max_quality=arg_4)\n if arg_5:\n select.invert(arg_0)\n delete.selected(arg_0)\n smooth.laplacian(arg_0, iterations=3)\n\n return None"} +{"_id": "doc_4820", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\" Select Func the faces of the current mesh\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n faces (bool): If True the filter will select Func the faces.\n verts (bool): If True the filter will select Func the vertices.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_3 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_3)\n return None"} +{"_id": "doc_4821", "title": "", "text": "def Func(arg_0, arg_1='(fi == 0)'):\n \"\"\"Boolean function using muparser lib to perform face selection over\n current mesh.\n\n See help(mlx.muparser_ref) for muparser reference documentation.\n\n It's possible to use parenthesis, per-vertex variables and boolean operator:\n (, ), and, or, <, >, =\n It's possible to use per-face variables like attributes associated to the three\n vertices of every face.\n\n Variables (per face):\n x0, y0, z0 for first vertex; x1,y1,z1 for second vertex; x2,y2,z2 for third vertex\n nx0, ny0, nz0, nx1, ny1, nz1, etc. for vertex normals\n r0, g0, b0, a0, etc. for vertex color\n q0, q1, q2 for quality\n wtu0, wtv0, wtu1, wtv1, wtu2, wtv2 (per wedge texture coordinates)\n ti for face texture index (>= ML2016.12)\n vsel0, vsel1, vsel2 for vertex selection (1 yes, 0 no) (>= ML2016.12)\n fr, fg, fb, fa for face color (>= ML2016.12)\n fq for face quality (>= ML2016.12)\n fnx, fny, fnz for face normal (>= ML2016.12)\n fsel face selection (1 yes, 0 no) (>= ML2016.12)\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n function (str): a boolean function that will be evaluated in order\n to select a subset of faces.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_2 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_2)\n return None"} +{"_id": "doc_4822", "title": "", "text": "def Func(arg_0, arg_1='(q < 0)', arg_2=True):\n \"\"\"Boolean function using muparser lib to perform vertex selection over current mesh.\n\n See help(mlx.muparser_ref) for muparser reference documentation.\n\n It's possible to use parenthesis, per-vertex variables and boolean operator:\n (, ), and, or, <, >, =\n It's possible to use the following per-vertex variables in the expression:\n\n Variables:\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad\n vi (vertex index)\n vtu, vtv (texture coordinates)\n ti (texture index)\n vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n function (str): a boolean function that will be evaluated in order\n to select a subset of vertices. Example: (y > 0) and (ny > 0)\n strict_face_select (bool): if True a face is selected if ALL its\n vertices are selected. If False a face is selected if at least\n one of its vertices is selected. ML v1.3.4BETA only; this is\n ignored in 2016.12. In 2016.12 only vertices are selected.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if arg_0.ml_version == '1.3.4BETA':\n arg_3 = ''.join([\n ' \\n',\n ])\n else:\n arg_3 = ''\n\n arg_4 = ''.join([\n ' \\n',\n ' \\n',\n arg_3,\n ' \\n'])\n util.write_filter(arg_0, arg_4)\n return None"} +{"_id": "doc_4823", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=True):\n \"\"\"Select all vertices within a cylindrical radius\n\n Args:\n radius (float): radius of the sphere\n center_pt (3 coordinate tuple or list): center point of the sphere\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if arg_2:\n arg_3 = 'sqrt(x^2+y^2)<={}'.format(arg_1)\n else:\n arg_3 = 'sqrt(x^2+y^2)>={}'.format(arg_1)\n vert_function(arg_0, arg_3=arg_3)\n return None"} +{"_id": "doc_4824", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2=(0.0, 0.0, 0.0)):\n \"\"\"Select all vertices within a spherical radius\n\n Args:\n radius (float): radius of the sphere\n center_pt (3 coordinate tuple or list): center point of the sphere\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_3 = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)<={}'.format(\n arg_2[0], arg_2[1], arg_2[2], arg_1)\n vert_function(arg_0, arg_3=arg_3)\n return None"} +{"_id": "doc_4825", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False, arg_3=True,\n arg_4=False):\n \"\"\" Flatten all or only the visible layers into a single new mesh.\n\n Transformations are preserved. Existing layers can be optionally\n deleted.\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n merge_visible (bool): merge only visible layers\n merge_vert (bool): merge the vertices that are duplicated among\n different layers. Very useful when the layers are spliced portions\n of a single big mesh.\n delete_layer (bool): delete all the merged layers. If all layers are\n visible only a single layer will remain after the invocation of\n this filter.\n keep_unreferenced_vert (bool): Do not discard unreferenced vertices\n from source layers. Necessary for point-only layers.\n\n Layer stack:\n Creates a new layer \"Merged Mesh\"\n Changes current layer to the new layer\n Optionally deletes all other layers\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n\n Bugs:\n UV textures: not currently preserved, however will be in a future\n release. https://github.com/cnr-isti-vclab/meshlab/issues/128\n merge_visible: it is not currently possible to change the layer\n visibility from meshlabserver, however this will be possible\n in the future https://github.com/cnr-isti-vclab/meshlab/issues/123\n \"\"\"\n arg_5 = ''.Func([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n if isinstance(arg_0, mlx.FilterScript):\n arg_0.add_layer('Merged Mesh')\n if arg_3:\n # As it is not yet possible to change the layer visibility, all\n # layers will be deleted. This will be updated once layer\n # visibility is tracked.\n for arg_6 in range(arg_0.last_layer()):\n arg_0.del_layer(0)\n return None"} +{"_id": "doc_4826", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Change the current layer by specifying the new layer number.\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n layer_num (int): the number of the layer to Func to. Default is the\n last layer if script is a mlx.FilterScript object; if script is a\n filename the default is the first layer.\n\n Layer stack:\n Modifies current layer\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if arg_1 is None:\n if isinstance(arg_0, mlx.FilterScript):\n arg_1 = arg_0.last_layer()\n else:\n arg_1 = 0\n arg_2 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_2)\n if isinstance(arg_0, mlx.FilterScript):\n arg_0.set_current_layer(arg_1)\n #script.layer_stack[len(self.layer_stack) - 1] = layer_num\n return None"} +{"_id": "doc_4827", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Delete all layers below the specified one.\n\n Useful for MeshLab ver 2016.12, whcih will only output layer 0.\n \"\"\"\n if arg_1 is None:\n arg_1 = arg_0.current_layer()\n if arg_1 != 0:\n change(arg_0, 0)\n for arg_2 in range(arg_1):\n delete(arg_0, 0)\n return None"} +{"_id": "doc_4828", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Subprocess program error handling\n\n Args:\n program_name (str): name of the subprocess program\n\n Returns:\n break_now (bool): indicate whether calling program should break out of loop\n\n \"\"\"\n print('\\nHouston, we have a problem.',\n '\\n%s did not finish successfully. Review the log' % arg_0,\n 'file and the input file(s) to see what went wrong.')\n print('%s command: \"%s\"' % (arg_0, arg_1))\n if arg_2 is not None:\n print('log: \"%s\"' % arg_2)\n print('Where do we go from here?')\n print(' r - retry running %s (probably after' % arg_0,\n 'you\\'ve fixed any problems with the input files)')\n print(' c - continue on with the script (probably after',\n 'you\\'ve manually re-run and generated the desired',\n 'output file(s)')\n print(' x - exit, keeping the TEMP3D files and log')\n print(' xd - exit, deleting the TEMP3D files and log')\n while True:\n arg_3 = input('Select r, c, x (default), or xd: ')\n if arg_3 not in ('r', 'c', 'x', 'xd'):\n #print('Please enter a valid option.')\n arg_3 = 'x'\n #else:\n break\n if arg_3 == 'x':\n print('Exiting ...')\n sys.exit(1)\n elif arg_3 == 'xd':\n print('Deleting TEMP3D* and log files and exiting ...')\n util.delete_all('TEMP3D*')\n if arg_2 is not None:\n os.remove(arg_2)\n sys.exit(1)\n elif arg_3 == 'c':\n print('Continuing on ...')\n arg_4 = True\n elif arg_3 == 'r':\n print('Retrying %s cmd ...' % arg_0)\n arg_4 = False\n return arg_4"} +{"_id": "doc_4829", "title": "", "text": "def Func(arg_0='TEMP3D_default.mlx', arg_1=None, arg_2=None):\n \"\"\"Create new mlx script and write opening tags.\n\n Performs special processing on stl files.\n\n If no input files are provided this will create a dummy\n file and delete it as the first filter. This works around\n the meshlab limitation that it must be provided an input\n file, even if you will be creating a mesh as the first\n filter.\n\n \"\"\"\n arg_3 = open(arg_0, 'w')\n arg_3.write(''.join(['\\n',\n '\\n']))\n arg_3.close()\n\n arg_4 = -1\n arg_5 = -1\n arg_6 = False\n\n # Process project files first\n if arg_2 is not None:\n # make a list if it isn't already\n if not isinstance(arg_2, list):\n arg_2 = [arg_2]\n for arg_7 in arg_2:\n arg_8 = ET.parse(arg_7)\n #root = tree.getroot()\n for arg_9 in arg_8.iter(tag='MLMesh'):\n arg_10 = (arg_9.attrib['filename'])\n arg_4 += 1\n arg_5 += 1\n # If the mesh file extension is stl, change to that layer and\n # run clean.merge_vert\n if os.path.splitext(arg_10)[1][1:].strip().lower() == 'stl':\n layers.change(arg_0, arg_4)\n clean.merge_vert(arg_0)\n arg_6 = True\n\n # Process separate input files next\n if arg_1 is not None:\n # make a list if it isn't already\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n for arg_7 in arg_1:\n arg_4 += 1\n arg_5 += 1\n # If the mesh file extension is stl, change to that layer and\n # run clean.merge_vert\n if os.path.splitext(arg_7)[1][1:].strip().lower() == 'stl':\n layers.change(arg_0, arg_4)\n clean.merge_vert(arg_0)\n arg_6 = True\n\n # If some input files were stl, we need to change back to the last layer\n if arg_6:\n layers.change(arg_0, arg_5) # Change back to the last layer\n elif arg_5 == -1:\n # If no input files are provided, create a dummy file\n # with a single vertex and delete it first in the script.\n # This works around the fact that meshlabserver will\n # not run without an input file.\n arg_1 = ['TEMP3D.xyz']\n arg_11 = open(arg_1[0], 'w')\n arg_11.write('0 0 0')\n arg_11.close()\n layers.delete(arg_0)\n return arg_4, arg_5"} +{"_id": "doc_4830", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\" Add new mesh layer to the end of the stack\n\n Args:\n label (str): new label for the mesh layer\n change_layer (bool): change to the newly created layer\n \"\"\"\n arg_0.layer_stack.insert(arg_0.last_layer() + 1, arg_1)\n if arg_2:\n arg_0.set_current_layer(arg_0.last_layer())\n return None"} +{"_id": "doc_4831", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Delete mesh layer \"\"\"\n del arg_0.layer_stack[arg_1]\n # Adjust current layer if needed\n if arg_1 < arg_0.current_layer():\n arg_0.set_current_layer(arg_0.current_layer() - 1)\n return None"} +{"_id": "doc_4832", "title": "", "text": "def Func():\n \"\"\"Run Func script\"\"\"\n # segments = number of segments to use for circles\n arg_0 = 50\n # star_points = number of points (or sides) of the star\n arg_1 = 5\n # star_radius = radius of circle circumscribing the star\n arg_2 = 2\n # ring_thickness = thickness of the colored rings\n arg_3 = 1\n # sphere_radius = radius of sphere the shield will be deformed to\n arg_4 = 2 * (arg_2 + 3 * arg_3)\n\n # Star calculations:\n # Visually approximate a star by using multiple diamonds (i.e. scaled\n # squares) which overlap in the center. For the star calculations,\n # consider a central polygon with triangles attached to the edges, all\n # circumscribed by a circle.\n # polygon_radius = distance from center of circle to polygon edge midpoint\n arg_5 = arg_2 / \\\n (1 + math.tan(math.radians(180 / arg_1)) /\n math.tan(math.radians(90 / arg_1)))\n # width = 1/2 width of polygon edge/outer triangle bottom\n arg_6 = arg_5 * math.tan(math.radians(180 / arg_1))\n # height = height of outer triangle\n arg_7 = arg_6 / math.tan(math.radians(90 / arg_1))\n\n arg_8 = mlx.FilterScript(file_out=\"shield.ply\")\n\n # Create the colored front of the shield using several concentric\n # annuluses; combine them together and subdivide so we have more vertices\n # to give a smoother deformation later.\n mlx.create.annulus(arg_8, radius=arg_2, cir_segments=arg_0, color='blue')\n mlx.create.annulus(arg_8,\n radius1=arg_2 + arg_3,\n radius2=arg_2,\n cir_segments=arg_0,\n color='red')\n mlx.create.annulus(arg_8,\n radius1=arg_2 + 2 * arg_3,\n radius2=arg_2 + arg_3,\n cir_segments=arg_0,\n color='white')\n mlx.create.annulus(arg_8,\n radius1=arg_2 + 3 * arg_3,\n radius2=arg_2 + 2 * arg_3,\n cir_segments=arg_0,\n color='red')\n mlx.layers.join(arg_8)\n mlx.subdivide.midpoint(arg_8, iterations=2)\n\n # Create the inside surface of the shield & translate down slightly so it\n # doesn't overlap the front.\n mlx.create.annulus(arg_8,\n radius1=arg_2 + 3 * arg_3,\n cir_segments=arg_0,\n color='silver')\n mlx.transform.rotate(arg_8, axis='y', angle=180)\n mlx.transform.translate(arg_8, value=[0, 0, -0.005])\n mlx.subdivide.midpoint(arg_8, iterations=4)\n\n # Create a diamond for the center star. First create a plane, specifying\n # extra vertices to support the final deformation. The length from the\n # center of the plane to the corners should be 1 for ease of scaling, so\n # we use a side length of sqrt(2) (thanks Pythagoras!). Rotate the plane\n # by 45 degrees and scale it to stretch it out per the calculations above,\n # then translate it into place (including moving it up in z slightly so\n # that it doesn't overlap the shield front).\n mlx.create.grid(arg_8,\n size=math.sqrt(2),\n x_segments=10,\n y_segments=10,\n center=True,\n color='white')\n mlx.transform.rotate(arg_8, axis='z', angle=45)\n mlx.transform.scale(arg_8, value=[arg_6, arg_7, 1])\n mlx.transform.translate(arg_8, value=[0, arg_5, 0.001])\n\n # Duplicate the diamond and rotate the duplicates around, generating the\n # star.\n for arg_9 in range(1, arg_1):\n mlx.layers.duplicate(arg_8)\n mlx.transform.rotate(arg_8, axis='z', angle=360 / arg_1)\n\n # Combine everything together and deform using a spherical function.\n mlx.layers.join(arg_8)\n mlx.transform.vert_function(arg_8,\n z_func='sqrt(%s-x^2-y^2)-%s+z' %\n (arg_4**2, arg_4))\n\n # Run the script using meshlabserver and generate the model\n arg_8.run_script()\n return None"} +{"_id": "doc_4833", "title": "", "text": "def Func(arg_0, arg_1=1000, arg_2=0.0,\n arg_3=20, arg_4=False,\n arg_5=False, arg_6=False, arg_7=False,\n arg_8=0, arg_9=True, arg_10=10,\n arg_11=False, arg_12=1.0):\n \"\"\" Create a new layer populated with a point sampling of the current mesh.\n\n Samples are generated according to a Poisson-disk distribution, using the\n algorithm described in:\n\n 'Efficient and Flexible Sampling with Blue Noise Properties of Triangular Meshes'\n Massimiliano Corsini, Paolo Cignoni, Roberto Scopigno\n IEEE TVCG 2012\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n sample_num (int): The desired number of samples. The radius of the disk\n is calculated according to the sampling density.\n radius (float): If not zero this parameter overrides the previous\n parameter to allow exact radius specification.\n montecarlo_rate (int): The over-sampling rate that is used to generate\n the intial Monte Carlo samples (e.g. if this parameter is 'K' means\n that 'K * sample_num' points will be used). The generated\n Poisson-disk samples are a subset of these initial Monte Carlo\n samples. Larger numbers slow the process but make it a bit more\n accurate.\n save_montecarlo (bool): If True, it will generate an additional Layer\n with the Monte Carlo sampling that was pruned to build the Poisson\n distribution.\n approx_geodesic_dist (bool): If True Poisson-disk distances are\n computed using an approximate geodesic distance, e.g. an Euclidean\n distance weighted by a function of the difference between the\n normals of the two points.\n subsample (bool): If True the original vertices of the base mesh are\n used as base set of points. In this case the sample_num should be\n obviously much smaller than the original vertex number. Note that\n this option is very useful in the case you want to subsample a\n dense point cloud.\n refine (bool): If True the vertices of the refine_layer mesh layer are\n used as starting vertices, and they will be utterly refined by\n adding more and more points until possible.\n refine_layer (int): Used only if refine is True.\n best_sample (bool): If True it will use a simple heuristic for choosing\n the samples. At a small cost (it can slow the process a bit) it\n usually improves the maximality of the generated sampling.\n best_sample_pool (bool): Used only if best_sample is True. It controls\n the number of attempts that it makes to get the best sample. It is\n reasonable that it is smaller than the Monte Carlo oversampling\n factor.\n exact_num (bool): If True it will try to do a dicotomic search for the\n best Poisson-disk radius that will generate the requested number of\n samples with a tolerance of the 0.5%. Obviously it takes much\n longer.\n radius_variance (float): The radius of the disk is allowed to vary\n between r and r*var. If this parameter is 1 the sampling is the\n same as the Poisson-disk Sampling.\n\n Layer stack:\n Creates new layer 'Poisson-disk Samples'. Current layer is NOT changed\n to the new layer (see Bugs).\n If save_montecarlo is True, creates a new layer 'Montecarlo Samples'.\n Current layer is NOT changed to the new layer (see Bugs).\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n\n Bugs:\n Current layer is NOT changed to the new layer, which is inconsistent\n with the majority of filters that create new layers.\n \"\"\"\n arg_13 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_13)\n if isinstance(arg_0, FilterScript):\n arg_0.add_layer('Poisson-disk Samples')\n if arg_4:\n arg_0.add_layer('Montecarlo Samples')\n return None"} +{"_id": "doc_4834", "title": "", "text": "def Func(arg_0, arg_1=1.0, arg_2='AVERAGE', arg_3=False):\n \"\"\" \"Create a new layer populated with a subsampling of the vertexes of the\n current mesh\n\n The subsampling is driven by a simple one-per-gridded cell strategy.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n cell_size (float): The size of the cell of the clustering grid. Smaller the cell finer the resulting mesh. For obtaining a very coarse mesh use larger values.\n strategy (enum 'AVERAGE' or 'CENTER'): <b>Average</b>: for each cell we take the average of the sample falling into. The resulting point is a new point.<br><b>Closest to center</b>: for each cell we take the sample that is closest to the center of the cell. Choosen vertices are a subset of the original ones.\n selected (bool): If true only for the filter is applied only on the selected subset of the mesh.\n\n Layer stack:\n Creates new layer 'Cluster Samples'. Current layer is changed to the new\n layer.\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if arg_2.lower() == 'average':\n arg_4 = 0\n elif arg_2.lower() == 'center':\n arg_4 = 1\n\n arg_5 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n if isinstance(arg_0, FilterScript):\n arg_0.add_layer('Cluster Samples')\n return None"} +{"_id": "doc_4835", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=1024, arg_3=2, arg_4=1):\n \"\"\"Trivial Per-Triangle parameterization\n\n \"\"\"\n arg_5 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n return None"} +{"_id": "doc_4836", "title": "", "text": "def Func(arg_0, arg_1=10, arg_2=False):\n \"\"\"Voronoi Atlas parameterization\n\n \"\"\"\n arg_3 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_3)\n return None"} +{"_id": "doc_4837", "title": "", "text": "def Func(arg_0):\n \"\"\" Compute a set of topological measures over a mesh\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_1 = ' \\n'\n util.write_filter(arg_0, arg_1)\n if isinstance(arg_0, mlx.FilterScript):\n arg_0.parse_topology = True\n return None"} +{"_id": "doc_4838", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='1.3.4BETA', arg_3=False):\n \"\"\"Parse the ml_log file generated by the measure_topology function.\n\n Args:\n ml_log (str): MeshLab log file to parse\n log (str): filename to log output\n\n Returns:\n dict: dictionary with the following keys:\n vert_num (int): number of vertices\n edge_num (int): number of edges\n face_num (int): number of faces\n unref_vert_num (int): number or unreferenced vertices\n boundry_edge_num (int): number of boundary edges\n part_num (int): number of parts (components) in the mesh.\n manifold (bool): True if mesh is two-manifold, otherwise false.\n non_manifold_edge (int): number of non_manifold edges.\n non_manifold_vert (int): number of non-manifold verices\n genus (int or str): genus of the mesh, either a number or\n 'undefined' if the mesh is non-manifold.\n holes (int or str): number of holes in the mesh, either a number\n or 'undefined' if the mesh is non-manifold.\n\n \"\"\"\n arg_4 = {'manifold': True, 'non_manifold_E': 0, 'non_manifold_V': 0}\n with open(arg_0) as fread:\n for arg_5 in fread:\n if 'V:' in arg_5:\n arg_6 = arg_5.replace('V:', ' ').replace('E:', ' ').replace('F:', ' ').split()\n arg_4['vert_num'] = int(arg_6[0])\n arg_4['edge_num'] = int(arg_6[1])\n arg_4['face_num'] = int(arg_6[2])\n if 'Unreferenced Vertices' in arg_5:\n arg_4['unref_vert_num'] = int(arg_5.split()[2])\n if 'Boundary Edges' in arg_5:\n arg_4['boundry_edge_num'] = int(arg_5.split()[2])\n if 'Mesh is composed by' in arg_5:\n arg_4['part_num'] = int(arg_5.split()[4])\n if 'non 2-manifold mesh' in arg_5:\n arg_4['manifold'] = False\n if 'non two manifold edges' in arg_5:\n arg_4['non_manifold_edge'] = int(arg_5.split()[2])\n if 'non two manifold vertexes' in arg_5:\n arg_4['non_manifold_vert'] = int(arg_5.split()[2])\n if 'Genus is' in arg_5: # undefined or int\n arg_4['genus'] = arg_5.split()[2]\n if arg_4['genus'] != 'undefined':\n arg_4['genus'] = int(arg_4['genus'])\n if 'holes' in arg_5:\n arg_4['hole_num'] = arg_5.split()[2]\n if arg_4['hole_num'] == 'a':\n arg_4['hole_num'] = 'undefined'\n else:\n arg_4['hole_num'] = int(arg_4['hole_num'])\n for arg_7, arg_8 in arg_4.items():\n if arg_1 is not None:\n arg_9 = open(arg_1, 'a')\n arg_9.write('{:16} = {}\\n'.format(arg_7, arg_8))\n arg_9.close()\n elif arg_3:\n print('{:16} = {}'.format(arg_7, arg_8))\n\n return arg_4"} +{"_id": "doc_4839", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Parse the ml_log file generated by the hausdorff_distance function.\n\n Args:\n ml_log (str): MeshLab log file to parse\n log (str): filename to log output\n\n Returns:\n dict: dictionary with the following keys:\n number_points (int): number of points in mesh\n min_distance (float): minimum hausdorff distance\n max_distance (float): maximum hausdorff distance\n mean_distance (float): mean hausdorff distance\n rms_distance (float): root mean square distance\n\n \"\"\"\n arg_3 = {\"min_distance\": 0.0,\n \"max_distance\": 0.0,\n \"mean_distance\": 0.0,\n \"rms_distance\": 0.0,\n \"number_points\": 0}\n with open(arg_0) as fread:\n arg_4 = fread.readlines()\n arg_5 = \"\"\n\n for arg_6, arg_7 in enumerate(arg_4):\n arg_8 = re.match(r\"\\s*Sampled (\\d+) pts.*\", arg_7)\n if arg_8 is not None:\n arg_3[\"number_points\"] = int(arg_8.group(1))\n if 'Hausdorff Distance computed' in arg_7:\n arg_5 = arg_4[arg_6 + 2]\n\n arg_8 = re.match(r\"\\D+(\\d+\\.*\\d*)\\D+(\\d+\\.*\\d*)\\D+(\\d+\\.*\\d*)\\D+(\\d+\\.*\\d*)\", arg_5)\n arg_3[\"min_distance\"] = float(arg_8.group(1))\n arg_3[\"max_distance\"] = float(arg_8.group(2))\n arg_3[\"mean_distance\"] = float(arg_8.group(3))\n arg_3[\"rms_distance\"] = float(arg_8.group(4))\n for arg_9, arg_10 in arg_3.items():\n if arg_1 is not None:\n arg_11 = open(arg_1, 'a')\n arg_11.write('{:16} = {}\\n'.format(arg_9, arg_10))\n arg_11.close()\n elif arg_2:\n print('{:16} = {}'.format(arg_9, arg_10))\n return arg_3"} +{"_id": "doc_4840", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=1, arg_3=True):\n \"\"\" Given a Mesh 'M' and a Pointset 'P', the filter projects each vertex of\n P over M and color M according to the geodesic distance from these\n projected points. Projection and coloring are done on a per vertex\n basis.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n target_layer (int): The mesh layer whose surface is colored. For each\n vertex of this mesh we decide the color according to the following\n arguments.\n source_layer (int): The mesh layer whose vertexes are used as seed\n points for the color computation. These seeds point are projected\n onto the target_layer mesh.\n backward (bool): If True the mesh is colored according to the distance\n from the frontier of the Func diagram induced by the\n source_layer seeds.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_4 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_4)\n return None"} +{"_id": "doc_4841", "title": "", "text": "def Func(arg_0, arg_1='sphere', arg_2=(0, 0, 0),\n arg_3=255 / 2, arg_4=255 / 2, arg_5=0.8,\n arg_6=(0, 120, 240, 0), arg_7=False):\n \"\"\" Color mesh vertices in a repeating sinusiodal rainbow pattern\n\n Sine wave follows the following equation for each color channel (RGBA):\n channel = sin(freq*increment + phase)*amplitude + center\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n direction (str) = the direction that the sine wave will travel; this\n and the start_pt determine the 'increment' of the sine function.\n Valid values are:\n 'sphere' - radiate sine wave outward from start_pt (default)\n 'x' - sine wave travels along the X axis\n 'y' - sine wave travels along the Y axis\n 'z' - sine wave travels along the Z axis\n or define the increment directly using a muparser function, e.g.\n '2x + y'. In this case start_pt will not be used; include it in\n the function directly.\n start_pt (3 coordinate tuple or list): start point of the sine wave. For a\n sphere this is the center of the sphere.\n amplitude (float [0, 255], single value or 4 term tuple or list): amplitude\n of the sine wave, with range between 0-255. If a single value is\n specified it will be used for all channels, otherwise specify each\n channel individually.\n center (float [0, 255], single value or 4 term tuple or list): center\n of the sine wave, with range between 0-255. If a single value is\n specified it will be used for all channels, otherwise specify each\n channel individually.\n freq (float, single value or 4 term tuple or list): frequency of the sine\n wave. If a single value is specified it will be used for all channels,\n otherwise specifiy each channel individually.\n phase (float [0, 360], single value or 4 term tuple or list): phase\n of the sine wave in degrees, with range between 0-360. If a single\n value is specified it will be used for all channels, otherwise specify\n each channel individually.\n alpha (bool): if False the alpha channel will be set to 255 (full opacity).\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_2 = util.make_list(arg_2, 3)\n arg_3 = util.make_list(arg_3, 4)\n arg_4 = util.make_list(arg_4, 4)\n arg_5 = util.make_list(arg_5, 4)\n arg_6 = util.make_list(arg_6, 4)\n\n if arg_1.lower() == 'sphere':\n arg_8 = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)'.format(\n arg_2[0], arg_2[1], arg_2[2])\n elif arg_1.lower() == 'x':\n arg_8 = 'x - {}'.format(arg_2[0])\n elif arg_1.lower() == 'y':\n arg_8 = 'y - {}'.format(arg_2[1])\n elif arg_1.lower() == 'z':\n arg_8 = 'z - {}'.format(arg_2[2])\n else:\n arg_8 = arg_1\n\n arg_9 = '{a}*sin({f}*{i} + {p}) + {c}'.format(\n f=arg_5[0], i=arg_8, p=math.radians(arg_6[0]),\n a=arg_3[0], c=arg_4[0])\n arg_10 = '{a}*sin({f}*{i} + {p}) + {c}'.format(\n f=arg_5[1], i=arg_8, p=math.radians(arg_6[1]),\n a=arg_3[1], c=arg_4[1])\n arg_11 = '{a}*sin({f}*{i} + {p}) + {c}'.format(\n f=arg_5[2], i=arg_8, p=math.radians(arg_6[2]),\n a=arg_3[2], c=arg_4[2])\n if arg_7:\n arg_12 = '{a}*sin({f}*{i} + {p}) + {c}'.format(\n f=arg_5[3], i=arg_8, p=math.radians(arg_6[3]),\n a=arg_3[3], c=arg_4[3])\n else:\n arg_12 = 255\n\n function(arg_0, red=arg_9, green=arg_10, blue=arg_11,\n arg_7=arg_12)\n return None"} +{"_id": "doc_4842", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"muparser atan2 function\n\n Implements an atan2(y,x) function for older muparser versions (<2.1.0);\n atan2 was added as a built-in function in muparser 2.1.0\n\n Args:\n y (str): y argument of the atan2(y,x) function\n x (str): x argument of the atan2(y,x) function\n\n Returns:\n A muparser string that calculates atan2(y,x)\n \"\"\"\n return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace(\n 'pi', str(math.pi)).replace('y', arg_0).replace('x', arg_1)"} +{"_id": "doc_4843", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"muparser cross product function\n\n Compute the cross product of two 3x1 vectors\n\n Args:\n u (list or tuple of 3 strings): first vector\n v (list or tuple of 3 strings): second vector\n Returns:\n A list containing a muparser string of the cross product\n \"\"\"\n \"\"\"\n i = u[1]*v[2] - u[2]*v[1]\n j = u[2]*v[0] - u[0]*v[2]\n k = u[0]*v[1] - u[1]*v[0]\n \"\"\"\n\n arg_2 = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=arg_0[1], u2=arg_0[2], v1=arg_1[1], v2=arg_1[2])\n arg_3 = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=arg_0[0], u2=arg_0[2], v0=arg_1[0], v2=arg_1[2])\n arg_4 = '(({u0})*({v1}) - ({u1})*({v0}))'.format(u0=arg_0[0], u1=arg_0[1], v0=arg_1[0], v1=arg_1[1])\n return [arg_2, arg_3, arg_4]"} +{"_id": "doc_4844", "title": "", "text": "def Func(arg_0, arg_1='radius', arg_2='x^2 + y^2'):\n \"\"\" Add a new Per-Vertex scalar attribute to current mesh and fill it with\n the defined function.\n\n The specified name can be used in other filter functions.\n\n It's possible to use parenthesis, per-vertex variables and boolean operator:\n (, ), and, or, <, >, =\n It's possible to use the following per-vertex variables in the expression:\n\n Variables:\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad\n vi (vertex index)\n ?vtu, vtv (texture coordinates)\n ?ti (texture index)\n ?vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n name (str): the name of new attribute. You can access attribute in\n other filters through this name.\n function (str): function to calculate custom attribute value for each\n vertex\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_3 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_3)\n return None"} +{"_id": "doc_4845", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False):\n \"\"\" Invert faces orientation, Funcping the normals of the mesh.\n\n If requested, it tries to guess the right orientation; mainly it decides to\n Func all the faces if the minimum/maximum vertexes have not outward point\n normals for a few directions. Works well for single component watertight\n objects.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n force_Func (bool): If selected, the normals will always be Funcped;\n otherwise, the filter tries to set them outside.\n selected (bool): If selected, only selected faces will be affected.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_3 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_3)\n return None"} +{"_id": "doc_4846", "title": "", "text": "def Func(arg_0, arg_1=10, arg_2=0, arg_3=False,\n arg_4=(0.0, 0.0, 0.0)):\n \"\"\" Compute the normals of the vertices of a mesh without exploiting the\n triangle connectivity, useful for dataset with no faces.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n neighbors (int): The number of neighbors used to estimate normals.\n smooth_iteration (int): The number of smoothing iteration done on the\n p used to estimate and propagate normals.\n flip (bool): Flip normals w.r.t. viewpoint. If the 'viewpoint' (i.e.\n scanner position) is known, it can be used to disambiguate normals\n orientation, so that all the normals will be oriented in the same\n direction.\n viewpoint_pos (single xyz point, tuple or list): Set the x, y, z\n coordinates of the viewpoint position.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n arg_5 = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(arg_0, arg_5)\n return None"} +{"_id": "doc_4847", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"Sort separate line segments in obj format into a continuous polyline or polylines.\n NOT FINISHED; DO NOT USE\n\n Also measures the length of each polyline\n\n Return polyline and polylineMeta (lengths)\n\n \"\"\"\n arg_2 = os.path.splitext(arg_0)[1][1:].strip().lower()\n if arg_2 != 'obj':\n print('Input file must be obj. Exiting ...')\n sys.exit(1)\n arg_3 = open(arg_0, 'r')\n arg_4 = True\n arg_5 = []\n arg_6 = []\n for arg_7 in arg_3:\n arg_8, arg_9, arg_10, arg_11 = arg_7.split()\n if arg_8 == 'v':\n arg_5.append(\n [util.to_float(arg_9), util.to_float(arg_10), util.to_float(arg_11)])\n elif arg_8 == 'l':\n arg_12 = arg_9\n arg_13 = arg_10\n arg_6.append([int(arg_12), int(arg_13)])\n\n arg_3.close()\n if arg_1 is not None:\n arg_14 = open(arg_1, 'a')\n #log_file.write('***Axis Aligned Bounding Results for file \"%s\":\\n' % fbasename)\n \"\"\"log_file.write('min = %s\\n' % aabb['min'])\n log_file.write('max = %s\\n' % aabb['max'])\n log_file.write('center = %s\\n' % aabb['center'])\n log_file.write('size = %s\\n' % aabb['size'])\n log_file.write('diagonal = %s\\n' % aabb['diagonal'])\"\"\"\n arg_14.close()\n # print(aabb)\n return None"} +{"_id": "doc_4848", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=arg_2):\n \"\"\"Measures mesh topology\n\n Args:\n fbasename (str): input filename.\n log (str): filename to log output\n\n Returns:\n dict: dictionary with the following keys:\n vert_num (int): number of vertices\n edge_num (int): number of edges\n face_num (int): number of faces\n unref_vert_num (int): number or unreferenced vertices\n boundry_edge_num (int): number of boundary edges\n part_num (int): number of parts (components) in the mesh.\n manifold (bool): True if mesh is two-manifold, otherwise false.\n non_manifold_edge (int): number of non_manifold edges.\n non_manifold_vert (int): number of non-manifold verices\n genus (int or str): genus of the mesh, either a number or\n 'undefined' if the mesh is non-manifold.\n holes (int or str): number of holes in the mesh, either a number\n or 'undefined' if the mesh is non-manifold.\n\n \"\"\"\n arg_3 = 'TEMP3D_Func.mlx'\n arg_4 = mlx.FilterScript(file_in=arg_0, arg_2=arg_2)\n compute.Func(arg_4)\n arg_4.save_to_file(arg_3)\n arg_4.run_script(arg_1=arg_1, script_file=arg_3)\n arg_5 = arg_4.topology\n return arg_5"} +{"_id": "doc_4849", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=arg_2):\n \"\"\"Measures mesh geometry, aabb and topology.\"\"\"\n arg_3 = 'TEMP3D_measure_gAndT.mlx'\n if arg_2 == '1.3.4BETA':\n arg_4 = 'TEMP3D_aabb.xyz'\n else:\n arg_4 = None\n\n arg_5 = mlx.FilterScript(file_in=arg_0, arg_4=arg_4, arg_2=arg_2)\n compute.measure_geometry(arg_5)\n compute.measure_topology(arg_5)\n arg_5.save_to_file(arg_3)\n arg_5.run_script(arg_1=arg_1, script_file=arg_3)\n arg_6 = arg_5.geometry\n arg_7 = arg_5.topology\n\n if arg_2 == '1.3.4BETA':\n if arg_1 is not None:\n arg_8 = open(arg_1, 'a')\n arg_8.write(\n '***Axis Aligned Bounding Results for file \"%s\":\\n' %\n arg_0)\n arg_8.close()\n arg_9 = measure_aabb(arg_4, arg_1)\n else:\n arg_9 = arg_6['aabb']\n return arg_9, arg_6, arg_7"} +{"_id": "doc_4850", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=0.0,\n arg_4=None, arg_5=0.0, arg_6=arg_6):\n \"\"\"Measure a dimension of a mesh\"\"\"\n arg_2 = arg_2.lower()\n arg_4 = arg_4.lower()\n arg_7 = 'TEMP3D_Func.mlx'\n arg_8 = 'TEMP3D_Func.xyz'\n\n arg_9 = mlx.FilterScript(file_in=arg_0, arg_8=arg_8, arg_6=arg_6)\n compute.section(arg_9, arg_2, arg_3, surface=True)\n compute.section(arg_9, arg_4, arg_5, surface=False)\n layers.delete_lower(arg_9)\n arg_9.save_to_file(arg_7)\n arg_9.run_script(arg_1=arg_1, script_file=arg_7)\n\n for arg_10 in ('x', 'y', 'z'):\n if arg_10 not in (arg_2, arg_4):\n arg_11 = arg_10\n # ord: Get number that represents letter in ASCII\n # Here we find the offset from 'x' to determine the list reference\n # i.e. 0 for x, 1 for y, 2 for z\n arg_12 = ord(arg_11) - ord('x')\n arg_13 = measure_aabb(arg_8, arg_1)\n arg_14 = {'min': arg_13['min'][arg_12], 'max': arg_13['max'][arg_12],\n 'length': arg_13['size'][arg_12], 'axis': arg_11}\n if arg_1 is None:\n print('\\nFor file \"%s\"' % arg_0)\n print('Dimension parallel to %s with %s=%s & %s=%s:' % (arg_11, arg_2, arg_3,\n arg_4, arg_5))\n print(' Min = %s, Max = %s, Total length = %s' % (arg_14['min'],\n arg_14['max'], arg_14['length']))\n else:\n arg_15 = open(arg_1, 'a')\n arg_15.write('\\nFor file \"%s\"\\n' % arg_0)\n arg_15.write('Dimension parallel to %s with %s=%s & %s=%s:\\n' % (arg_11, arg_2, arg_3,\n arg_4, arg_5))\n arg_15.write('min = %s\\n' % arg_14['min'])\n arg_15.write('max = %s\\n' % arg_14['max'])\n arg_15.write('Total length = %s\\n' % arg_14['length'])\n arg_15.close()\n return arg_14"} +{"_id": "doc_4851", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This is a helper used by UploadSet.save to provide lowercase extensions for\n all processed files, to compare with configured extensions in the same\n case.\n\n .. versionchanged:: 0.1.4\n Filenames without extensions are no longer lowercased, only the\n extension is returned in lowercase, if an extension exists.\n\n :param filename: The filename to ensure has a lowercase extension.\n \"\"\"\n if '.' in arg_0:\n arg_1, arg_2 = os.path.splitext(arg_0)\n return arg_1 + arg_2.lower()\n # For consistency with os.path.splitext,\n # do not treat a filename without an extension as an extension.\n # That is, do not return filename.lower().\n return arg_0"} +{"_id": "doc_4852", "title": "", "text": "def Func(arg_0, arg_1=64 * 1024 * 1024):\n \"\"\"\n By default, Flask will accept uploads to an arbitrary size. While Werkzeug\n switches uploads from memory to a temporary file when they hit 500 KiB,\n it's still possible for someone to overload your disk space with a\n gigantic file.\n\n This patches the app's request class's\n `~werkzeug.BaseRequest.max_content_length` attribute so that any upload\n larger than the given size is rejected with an HTTP error.\n\n .. note::\n\n In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`\n setting, without patching the request class. To emulate this behavior,\n you can pass `None` as the size (you must pass it explicitly). That is\n the best way to call this function, as it won't break the Flask 0.6\n functionality if it exists.\n\n .. versionchanged:: 0.1.1\n\n :param app: The app to patch the request class of.\n :param size: The maximum size to accept, in bytes. The default is 64 MiB.\n If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration\n setting will be used to patch.\n \"\"\"\n if arg_1 is None:\n if isinstance(arg_0.request_class.__dict__['max_content_length'],\n property):\n return\n arg_1 = arg_0.config.get('MAX_CONTENT_LENGTH')\n arg_2 = arg_0.request_class\n arg_3 = type(arg_2.__name__, (arg_2,),\n {'max_content_length': arg_1})\n arg_0.request_class = arg_3"} +{"_id": "doc_4853", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n This is a helper function for `configure_uploads` that extracts the\n configuration for a single set.\n\n :param uset: The upload set.\n :param app: The app to load the configuration from.\n :param defaults: A dict with keys `url` and `dest` from the\n `UPLOADS_DEFAULT_DEST` and `DEFAULT_UPLOADS_URL`\n settings.\n \"\"\"\n arg_3 = arg_1.config\n arg_4 = 'UPLOADED_%s_' % arg_0.name.upper()\n arg_5 = False\n if arg_2 is None:\n arg_2 = dict(dest=None, url=None)\n\n arg_6 = tuple(arg_3.get(arg_4 + 'ALLOW', ()))\n arg_7 = tuple(arg_3.get(arg_4 + 'DENY', ()))\n arg_8 = arg_3.get(arg_4 + 'DEST')\n arg_9 = arg_3.get(arg_4 + 'URL')\n\n if arg_8 is None:\n # the upload set's destination wasn't given\n if arg_0.default_dest:\n # use the \"default_dest\" callable\n arg_8 = arg_0.default_dest(arg_1)\n if arg_8 is None: # still\n # use the default dest from the config\n if arg_2['dest'] is not None:\n arg_5 = True\n arg_8 = os.path.join(arg_2['dest'], arg_0.name)\n else:\n raise RuntimeError(\"no destination for set %s\" % arg_0.name)\n\n if arg_9 is None and arg_5 and arg_2['url']:\n arg_9 = addslash(arg_2['url']) + arg_0.name + '/'\n\n return UploadConfiguration(arg_8, arg_9, arg_6, arg_7)"} +{"_id": "doc_4854", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Call this after the app has been configured. It will go through all the\n upload sets, get their configuration, and store the configuration on the\n app. It will also register the uploads module if it hasn't been set. This\n can be called multiple times with different upload sets.\n\n .. versionchanged:: 0.1.3\n The uploads module/blueprint will only be registered if it is needed\n to serve the upload sets.\n\n :param app: The `~flask.Flask` instance to get the configuration from.\n :param upload_sets: The `UploadSet` instances to configure.\n \"\"\"\n if isinstance(arg_1, UploadSet):\n arg_1 = (arg_1,)\n\n if not hasattr(arg_0, 'upload_set_config'):\n arg_0.upload_set_config = {}\n arg_3 = arg_0.upload_set_config\n arg_4 = dict(dest=arg_0.config.get('UPLOADS_DEFAULT_DEST'),\n url=arg_0.config.get('UPLOADS_DEFAULT_URL'))\n\n for arg_5 in arg_1:\n arg_6 = config_for_set(arg_5, arg_0, arg_4)\n arg_3[arg_5.name] = arg_6\n\n arg_8 = any(s.base_url is None for s in arg_3.values())\n if '_uploads' not in arg_0.blueprints and arg_8:\n arg_0.register_blueprint(uploads_mod)"} +{"_id": "doc_4855", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This gets the current Funcuration. By default, it looks up the\n current application and gets the Funcuration from there. But if you\n don't want to go to the full effort of setting an application, or it's\n otherwise outside of a request context, set the `_Func` attribute to\n an `UploadConfiguration` instance, then set it back to `None` when\n you're done.\n \"\"\"\n if arg_0._Func is not None:\n return arg_0._Func\n try:\n return current_app.upload_set_Func[arg_0.name]\n except AttributeError:\n raise RuntimeError(\"cannot access Funcuration outside request\")"} +{"_id": "doc_4856", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This function gets the URL a file uploaded to this set would be\n accessed at. It doesn't check whether said file exists.\n\n :param filename: The filename to return the URL for.\n \"\"\"\n arg_2 = arg_0.config.base_Func\n if arg_2 is None:\n return Func_for('_uploads.uploaded_file', setname=arg_0.name,\n arg_1=arg_1, _external=True)\n else:\n return arg_2 + arg_1"} +{"_id": "doc_4857", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n This returns the absolute Func of a file uploaded to this set. It\n doesn't actually check whether said file exists.\n\n :param filename: The filename to return the Func for.\n :param folder: The subfolder within the upload set previously used\n to save to.\n \"\"\"\n if arg_2 is not None:\n arg_3 = os.Func.join(arg_0.config.destination, arg_2)\n else:\n arg_3 = arg_0.config.destination\n return os.Func.join(arg_3, arg_1)"} +{"_id": "doc_4858", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This determines whether a specific extension is allowed. It is called\n by `file_allowed`, so if you override that but still want to check\n extensions, call back into this.\n\n :param ext: The extension to check, without the dot.\n \"\"\"\n return ((arg_1 in arg_0.config.allow) or\n (arg_1 in arg_0.extensions and arg_1 not in arg_0.config.deny))"} +{"_id": "doc_4859", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n If a file with the selected name already exists in the target folder,\n this method is called to resolve the conflict. It should return a new\n basename for the file.\n\n The default implementation splits the name and extension and adds a\n suffix to the name consisting of an underscore and a number, and tries\n that until it finds one that doesn't exist.\n\n :param target_folder: The absolute path to the target.\n :param basename: The file's original basename.\n \"\"\"\n arg_3, arg_4 = os.path.splitext(arg_2)\n arg_5 = 0\n while True:\n arg_5 = arg_5 + 1\n arg_6 = '%s_%d%s' % (arg_3, arg_5, arg_4)\n if not os.path.exists(os.path.join(arg_1, arg_6)):\n return arg_6"} +{"_id": "doc_4860", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns actual version specified in filename.\"\"\"\n with open(arg_0) as src_file:\n arg_1 = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n src_file.read(), re.M)\n if arg_1:\n return arg_1.group(1)\n raise RuntimeError('Unable to find version info.')"} +{"_id": "doc_4861", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes duplicate objects.\n\n http://www.peterbe.com/plog/uniqifiers-benchmark.\n \"\"\"\n arg_1, arg_2 = set(), []\n for arg_3 in arg_0:\n arg_4 = id(arg_3)\n if arg_4 in arg_1:\n continue\n arg_1.add(arg_4)\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_4862", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns count difference in two collections of Python objects.\"\"\"\n arg_2 = _process_in_memory_objects(arg_0)\n arg_3 = _process_in_memory_objects(arg_1)\n arg_4 = _get_object_count_by_type(arg_2)\n arg_5 = _get_object_count_by_type(arg_3)\n return arg_4 - arg_5"} +{"_id": "doc_4863", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3): #pylint: disable=unused-argument\n \"\"\"Checks memory usage when 'line' event occur.\"\"\"\n if arg_2 == 'line' and arg_1.f_code.co_filename in arg_0.target_modules:\n arg_0._events_list.append(\n (arg_1.f_lineno, arg_0._process.memory_info().rss,\n arg_1.f_code.co_name, arg_1.f_code.co_filename))\n return arg_0.Func"} +{"_id": "doc_4864", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns processed memory usage.\"\"\"\n if arg_0._resulting_events:\n return arg_0._resulting_events\n for arg_1, (arg_2, arg_3, arg_4, arg_5) in enumerate(arg_0._events_list):\n arg_6 = float(arg_3 - arg_0.mem_overhead) / _BYTES_IN_MB\n if (arg_0._resulting_events and\n arg_0._resulting_events[-1][0] == arg_2 and\n arg_0._resulting_events[-1][2] == arg_4 and\n arg_0._resulting_events[-1][3] == arg_5 and\n arg_0._resulting_events[-1][1] < arg_6):\n arg_0._resulting_events[-1][1] = arg_6\n else:\n arg_0._resulting_events.append(\n [arg_1 + 1, arg_2, arg_6, arg_4, arg_5])\n return arg_0._resulting_events"} +{"_id": "doc_4865", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns memory overhead.\"\"\"\n arg_0.mem_overhead = (arg_0._process.memory_info().rss -\n builtins.initial_rss_size)"} +{"_id": "doc_4866", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns memory stats for a function.\"\"\"\n arg_1 = {arg_0._run_object.__code__.co_filename}\n with _CodeEventsTracker(arg_1) as prof:\n prof.compute_mem_overhead()\n arg_2 = arg_0._run_object(*arg_0._run_args, **arg_0._run_kwargs)\n return prof, arg_2"} +{"_id": "doc_4867", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns module filenames from package.\n\n Args:\n package_path: Path to Python package.\n Returns:\n A set of module filenames.\n \"\"\"\n arg_1 = set()\n for arg_2, arg_3, arg_4 in pkgutil.iter_modules(path=[arg_0]):\n arg_5 = os.path.join(arg_2.path, '%s.py' % arg_3)\n if os.path.exists(arg_5):\n arg_1.add(os.path.abspath(arg_5))\n return arg_1"} +{"_id": "doc_4868", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Runs function in separate process.\n\n This function is used instead of a decorator, since Python multiprocessing\n module can't serialize decorated function on all platforms.\n \"\"\"\n arg_3 = multiprocessing.Manager()\n arg_4 = arg_3.dict()\n arg_5 = ProcessWithException(\n arg_4, target=arg_0, arg_1=arg_1, arg_2=arg_2)\n arg_5.start()\n arg_5.join()\n arg_6 = arg_5.exception\n if arg_6:\n raise arg_6\n return arg_5.output"} +{"_id": "doc_4869", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Initializes profiler with a module.\"\"\"\n arg_0.profile = arg_0.profile_module\n arg_0._run_object, arg_4, arg_0._run_args = arg_1.partition(' ')\n arg_0._object_name = '%s (module)' % arg_0._run_object\n arg_0._globs = {\n '__file__': arg_0._run_object,\n '__name__': '__main__',\n '__package__': None,\n }\n arg_8 = os.path.dirname(arg_0._run_object)\n if sys.path[0] != arg_8:\n sys.path.insert(0, arg_8)\n arg_0._replace_sysargs()"} +{"_id": "doc_4870", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Initializes profiler with a function.\"\"\"\n arg_0.profile = arg_0.profile_function\n arg_0._run_object, arg_0._run_args, arg_0._run_kwargs = arg_1\n arg_6 = inspect.getsourcefile(arg_0._run_object)\n arg_0._object_name = '%s @ %s (function)' % (\n arg_0._run_object.__name__, arg_6)"} +{"_id": "doc_4871", "title": "", "text": "def Func(arg_0):\n \"\"\"Replaces sys.argv with proper args to pass to script.\"\"\"\n arg_1.argv[:] = [arg_0._run_object]\n if arg_0._run_args:\n arg_1.argv += arg_0._run_args.split()"} +{"_id": "doc_4872", "title": "", "text": "def Func(arg_0, arg_1, arg_2): #pylint: disable=unused-argument\n \"\"\"Samples current stack and adds result in self._stats.\n\n Args:\n signum: Signal that activates handler.\n frame: Frame on top of the stack when signal is handled.\n \"\"\"\n arg_3 = []\n while arg_2 and arg_2 != arg_0.base_frame:\n arg_3.append((\n arg_2.f_code.co_name,\n arg_2.f_code.co_filename,\n arg_2.f_code.co_firstlineno))\n arg_2 = arg_2.f_back\n arg_0._stats[tuple(arg_3)] += 1\n signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)"} +{"_id": "doc_4873", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns call tree.\"\"\"\n Func = {'stack': 'base', 'sampleCount': 0, 'children': []}\n for arg_2, arg_3 in arg_0._stats.items():\n arg_0._insert_stack(reversed(arg_2), arg_3, Func)\n arg_0._fill_sample_count(Func)\n if not Func['children']:\n return {}\n return arg_0._format_tree(\n Func['children'][0], Func['sampleCount'])"} +{"_id": "doc_4874", "title": "", "text": "def Func(arg_0):\n \"\"\"Runs statistical profiler on a package.\"\"\"\n with _StatProfiler() as arg_1:\n arg_1.base_frame = inspect.currentframe()\n try:\n runpy.run_path(arg_0._run_object, run_name='__main__')\n except SystemExit:\n pass\n\n arg_3 = arg_1.call_tree\n return {\n 'objectName': arg_0._object_name,\n 'sampleInterval': _SAMPLE_INTERVAL,\n 'runTime': arg_1.run_time,\n 'callStats': arg_3,\n 'totalSamples': arg_3.get('sampleCount', 0),\n 'timestamp': int(time.time())\n }"} +{"_id": "doc_4875", "title": "", "text": "def Func(arg_0):\n \"\"\"Runs statistical profiler on a module.\"\"\"\n with open(arg_0._run_object, 'rb') as srcfile, _StatProfiler() as arg_2:\n arg_1 = compile(srcfile.read(), arg_0._run_object, 'exec')\n arg_2.base_frame = inspect.currentframe()\n try:\n exec(arg_1, arg_0._globs, None)\n except SystemExit:\n pass\n\n arg_4 = arg_2.call_tree\n return {\n 'objectName': arg_0._object_name,\n 'sampleInterval': _SAMPLE_INTERVAL,\n 'runTime': arg_2.run_time,\n 'callStats': arg_4,\n 'totalSamples': arg_4.get('sampleCount', 0),\n 'timestamp': int(time.time())\n }"} +{"_id": "doc_4876", "title": "", "text": "def Func(arg_0):\n \"\"\"Processes collected stats for UI.\"\"\"\n arg_1 = []\n for arg_2, arg_3 in arg_0.stats.items():\n arg_4, arg_5, arg_6 = arg_2\n arg_7, arg_8, arg_9, arg_10, arg_11 = arg_3\n if arg_0.total_tt == 0:\n arg_12 = 0\n else:\n arg_12 = round(100 * (arg_10 / arg_0.total_tt), 4)\n arg_10 = round(arg_10, 4)\n arg_13 = '%s @ %s' % (arg_6, arg_4)\n arg_14 = base_profiler.hash_name(arg_13)\n arg_1.append(\n (arg_4, arg_5, arg_6, arg_10, arg_12, arg_8,\n arg_7, arg_9, arg_4, arg_14))\n return sorted(arg_1, key=operator.itemgetter(4), reverse=True)"} +{"_id": "doc_4877", "title": "", "text": "def Func(arg_0):\n \"\"\"Runs cProfile on a module.\"\"\"\n arg_1 = cProfile.Profile()\n try:\n with open(arg_0._run_object, 'rb') as srcfile:\n arg_2 = compile(srcfile.read(), arg_0._run_object, 'exec')\n arg_1.runctx(arg_2, arg_0._globs, None)\n except SystemExit:\n pass\n arg_3 = pstats.Stats(arg_1)\n arg_3.calc_callees()\n return {\n 'objectName': arg_0._object_name,\n 'callStats': arg_0._transform_stats(arg_3),\n 'totalTime': arg_3.total_tt,\n 'primitiveCalls': arg_3.prim_calls,\n 'totalCalls': arg_3.total_calls,\n 'timestamp': int(time.time())\n }"} +{"_id": "doc_4878", "title": "", "text": "def Func(arg_0):\n \"\"\"Runs cProfile on a function.\"\"\"\n arg_1 = cProfile.Profile()\n arg_1.enable()\n arg_2 = arg_0._run_object(*arg_0._run_args, **arg_0._run_kwargs)\n arg_1.disable()\n arg_3 = pstats.Stats(arg_1)\n arg_3.calc_callees()\n return {\n 'objectName': arg_0._object_name,\n 'callStats': arg_0._transform_stats(arg_3),\n 'totalTime': arg_3.total_tt,\n 'primitiveCalls': arg_3.prim_calls,\n 'totalCalls': arg_3.total_calls,\n 'result': arg_2,\n 'timestamp': int(time.time())\n }"} +{"_id": "doc_4879", "title": "", "text": "def Func():\n \"\"\"Initializes DB.\"\"\"\n with contextlib.closing(connect_to_db()) as db:\n db.cursor().executescript(DB_SCHEMA)\n db.commit()"} +{"_id": "doc_4880", "title": "", "text": "def Func():\n \"\"\"Returns all existing guestbook records.\"\"\"\n arg_0 = flask.g.db.execute(\n 'SELECT name, message FROM entry ORDER BY id DESC;')\n arg_1 = [{'name': row[0], 'message': row[1]} for row in arg_0.fetchall()]\n return jinja2.Template(LAYOUT).render(arg_1=arg_1)"} +{"_id": "doc_4881", "title": "", "text": "def Func():\n \"\"\"Adds single guestbook record.\"\"\"\n arg_0, arg_1 = flask.request.form['name'], flask.request.form['message']\n flask.g.db.execute(\n 'INSERT INTO entry (name, message) VALUES (?, ?)', (arg_0, arg_1))\n flask.g.db.commit()\n return flask.redirect('/')"} +{"_id": "doc_4882", "title": "", "text": "def Func():\n \"\"\"Handles index.html requests.\"\"\"\n arg_0 = os.path.join(\n os.path.dirname(__file__), _PROFILE_HTML)\n with io.open(arg_0, 'rb') as res_file:\n arg_1 = res_file.read()\n return arg_1, 'text/html'"} +{"_id": "doc_4883", "title": "", "text": "def Func(arg_0):\n \"\"\"Handles HTTP POST requests.\"\"\"\n arg_1 = arg_0.rfile.read(int(arg_0.headers['Content-Length']))\n arg_2 = gzip.decompress(arg_1)\n arg_0._profile_json.update(json.loads(arg_2.decode('utf-8')))\n arg_0._send_response(\n 200, headers=(('Content-type', '%s; charset=utf-8' % 'text/json'),\n ('Content-Encoding', 'gzip'),\n ('Content-Length', len(arg_1))))"} +{"_id": "doc_4884", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Sends HTTP response code, message and headers.\"\"\"\n arg_0.send_response(arg_1, arg_2)\n if arg_3:\n for arg_4 in arg_3:\n arg_0.send_header(*arg_4)\n arg_0.end_headers()"} +{"_id": "doc_4885", "title": "", "text": "def Func(arg_0):\n \"\"\"Fills code heatmap and execution count dictionaries.\"\"\"\n for arg_1, arg_2, arg_3 in arg_0.lines_without_stdlib:\n arg_0._execution_count[arg_1][arg_2] += 1\n arg_0._heatmap[arg_1][arg_2] += arg_3"} +{"_id": "doc_4886", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Skips lines in src_code specified by skip map.\"\"\"\n if not arg_1:\n return [['line', arg_2 + 1, arg_3] for arg_2, arg_3 in enumerate(arg_0)]\n arg_4, arg_5 = [], 0\n for arg_6, arg_7 in arg_1:\n arg_4.extend(\n ['line', arg_5 + arg_2 + 1, arg_3] for arg_2, arg_3 in enumerate(arg_0[arg_5:arg_6]))\n if (arg_4\n and arg_4[-1][0] == 'skip'): # Merge skips.\n arg_4[-1][1] += arg_7\n else:\n arg_4.append(['skip', arg_7])\n arg_5 = arg_6 + arg_7\n arg_4.extend(\n ['line', arg_5 + arg_2 + 1, arg_3] for arg_2, arg_3 in enumerate(arg_0[arg_5:]))\n return arg_4"} +{"_id": "doc_4887", "title": "", "text": "def Func(arg_0):\n \"\"\"Calculates heatmap for package.\"\"\"\n with _CodeHeatmapCalculator() as prof:\n try:\n runpy.run_path(arg_0._run_object, run_name='__main__')\n except SystemExit:\n pass\n\n arg_1 = []\n for arg_2, arg_3 in prof.heatmap.items():\n if os.path.isfile(arg_2):\n arg_1.append(\n arg_0._format_heatmap(\n arg_2, arg_3, prof.execution_count[arg_2]))\n\n arg_4 = sum(arg_3['runTime'] for arg_3 in arg_1)\n return {\n 'objectName': arg_0._run_object,\n 'runTime': arg_4,\n 'heatmaps': arg_1\n }"} +{"_id": "doc_4888", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Formats heatmap for UI.\"\"\"\n with open(arg_1) as src_file:\n arg_4 = src_file.read().split('\\n')\n arg_5 = arg_0._calc_skips(arg_2, len(arg_4))\n arg_6 = sum(time for time in arg_2.values())\n return {\n 'name': arg_1,\n 'heatmap': arg_2,\n 'executionCount': arg_3,\n 'srcCode': arg_0._skip_lines(arg_4, arg_5),\n 'runTime': arg_6\n }"} +{"_id": "doc_4889", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Runs profilers on run_object.\n\n Args:\n run_object: An object (string or tuple) for profiling.\n prof_config: A string with profilers configuration.\n verbose: True if info about running profilers should be shown.\n Returns:\n An ordered dictionary with collected stats.\n Raises:\n AmbiguousConfigurationError: when prof_config is ambiguous.\n BadOptionError: when unknown options are present in configuration.\n \"\"\"\n if len(arg_1) > len(set(arg_1)):\n raise AmbiguousConfigurationError(\n 'Profiler configuration %s is ambiguous' % arg_1)\n\n arg_3 = {opt for opt, _ in _PROFILERS}\n for arg_4 in arg_1:\n if arg_4 not in arg_3:\n raise BadOptionError('Unknown option: %s' % arg_4)\n\n arg_5 = OrderedDict()\n arg_6 = ((o, p) for o, p in _PROFILERS if o in arg_1)\n for arg_4, arg_7 in arg_6:\n arg_8 = arg_7(arg_0)\n if arg_2:\n print('Running %s...' % arg_8.__class__.__name__)\n arg_5[arg_4] = arg_8.run()\n return arg_5"} +{"_id": "doc_4890", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(), arg_3={}, arg_4='localhost', arg_5=8000): # pylint: disable=dangerous-default-value\n \"\"\"Runs profilers on a function.\n\n Args:\n func: A Python function.\n options: A string with profilers configuration (i.e. 'cmh').\n args: func non-keyword arguments.\n kwargs: func keyword arguments.\n host: Host name to send collected data.\n port: Port number to send collected data.\n\n Returns:\n A result of func execution.\n \"\"\"\n arg_6 = Func_profilers((arg_0, arg_2, arg_3), arg_1)\n\n arg_7 = None\n for arg_8 in arg_6:\n if not arg_7:\n arg_7 = arg_6[arg_8]['result']\n del arg_6[arg_8]['result'] # Don't send result to remote host\n\n arg_9 = gzip.compress(\n json.dumps(arg_6).encode('utf-8'))\n urllib.request.urlopen('http://%s:%s' % (arg_4, arg_5), arg_9)\n return arg_7"} +{"_id": "doc_4891", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get information about a specific template.\n\n :param template_id: The unique id for the template.\n :type template_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.template_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4892", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete a specific template.\n\n :param template_id: The unique id for the template.\n :type template_id: :py:class:`str`\n \"\"\"\n arg_0.template_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1))"} +{"_id": "doc_4893", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The MD5 hash of the lowercase version of the list member's email.\n Used as subscriber_hash\n\n :param member_email: The member's email address\n :type member_email: :py:class:`str`\n :returns: The MD5 hash in hex\n :rtype: :py:class:`str`\n \"\"\"\n check_email(arg_0)\n arg_0 = arg_0.lower().encode()\n arg_1 = hashlib.md5(arg_0)\n return arg_1.hexdigest()"} +{"_id": "doc_4894", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Function that verifies that the string passed is a valid url.\n\n Original regex author Diego Perini (http://www.iport.it)\n regex ported to Python by adamrofer (https://github.com/adamrofer)\n Used under MIT license.\n\n :param url:\n :return: Nothing\n \"\"\"\n arg_1 = re.compile(\n u\"^\"\n u\"(?:(?:https?|ftp)://)\"\n u\"(?:\\S+(?::\\S*)?@)?\"\n u\"(?:\"\n u\"(?!(?:10|127)(?:\\.\\d{1,3}){3})\"\n u\"(?!(?:169\\.254|192\\.168)(?:\\.\\d{1,3}){2})\"\n u\"(?!172\\.(?:1[6-9]|2\\d|3[0-1])(?:\\.\\d{1,3}){2})\"\n u\"(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])\"\n u\"(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}\"\n u\"(?:\\.(?:[1-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))\"\n u\"|\"\n u\"(?:(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)\"\n u\"(?:\\.(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)*\"\n u\"(?:\\.(?:[a-z\\u00a1-\\uffff]{2,}))\"\n u\")\"\n u\"(?::\\d{2,5})?\"\n u\"(?:/\\S*)?\"\n u\"$\"\n , re.UNICODE)\n if not re.match(arg_1, arg_0):\n raise ValueError('String passed is not a valid url')\n return"} +{"_id": "doc_4895", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Given two dicts, x and y, merge them into a new dict as a shallow copy.\n\n The result only differs from `x.update(y)` in the way that it handles list\n values when both x and y have list values for the same key. In which case\n the returned dictionary, z, has a value according to:\n z[key] = x[key] + z[key]\n\n :param x: The first dictionary\n :type x: :py:class:`dict`\n :param y: The second dictionary\n :type y: :py:class:`dict`\n :returns: The merged dictionary\n :rtype: :py:class:`dict`\n \"\"\"\n arg_2 = arg_0.copy()\n for arg_3, arg_4 in arg_1.items():\n if isinstance(arg_4, list) and isinstance(arg_2.get(arg_3), list):\n arg_2[arg_3] += arg_4\n else:\n arg_2[arg_3] = arg_4\n return arg_2"} +{"_id": "doc_4896", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Batch subscribe or unsubscribe list members.\n\n Only the members array is required in the request body parameters.\n Within the members array, each member requires an email_address\n and either a status or status_if_new. The update_existing parameter\n will also be considered required to help prevent accidental updates\n to existing members and will default to false if not present.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"members\": array*\n [\n {\n \"email_address\": string*,\n \"status\": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),\n \"status_if_new\": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')\n }\n ],\n \"update_existing\": boolean*\n }\n \"\"\"\n arg_0.list_id = arg_1\n if 'members' not in arg_2:\n raise KeyError('The update must have at least one member')\n else:\n if not len(arg_2['members']) <= 500:\n raise ValueError('You may only batch sub/unsub 500 members at a time')\n for arg_3 in arg_2['members']:\n if 'email_address' not in arg_3:\n raise KeyError('Each list member must have an email_address')\n check_email(arg_3['email_address'])\n if 'status' not in arg_3 and 'status_if_new' not in arg_3:\n raise KeyError('Each list member must have either a status or a status_if_new')\n arg_4 = ['subscribed', 'unsubscribed', 'cleaned', 'pending']\n if 'status' in arg_3 and arg_3['status'] not in arg_4:\n raise ValueError('The list member status must be one of \"subscribed\", \"unsubscribed\", \"cleaned\", or '\n '\"pending\"')\n if 'status_if_new' in arg_3 and arg_3['status_if_new'] not in arg_4:\n raise ValueError('The list member status_if_new must be one of \"subscribed\", \"unsubscribed\", '\n '\"cleaned\", or \"pending\"')\n if 'update_existing' not in arg_2:\n arg_2['update_existing'] = False\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1), arg_2=arg_2)"} +{"_id": "doc_4897", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Add a new line item to an existing order.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param order_id: The id for the order in a store.\n :type order_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n }\n \"\"\"\n arg_0.store_id = arg_1\n arg_0.order_id = arg_2\n if 'id' not in arg_3:\n raise KeyError('The order line must have an id')\n if 'product_id' not in arg_3:\n raise KeyError('The order line must have a product_id')\n if 'product_variant_id' not in arg_3:\n raise KeyError('The order line must have a product_variant_id')\n if 'quantity' not in arg_3:\n raise KeyError('The order line must have a quantity')\n if 'price' not in arg_3:\n raise KeyError('The order line must have a price')\n arg_4 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'orders', arg_2, 'lines'))\n if arg_4 is not None:\n arg_0.line_id = arg_4['id']\n else:\n arg_0.line_id = None\n return arg_4"} +{"_id": "doc_4898", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Get links to all other resources available in the API.\n\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n return arg_0._mc_client._Func(url=arg_0._build_path(), **arg_1)"} +{"_id": "doc_4899", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve OAuth2-based credentials to associate API calls with your\n application.\n\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"client_id\": string*,\n \"client_secret\": string*\n }\n \"\"\"\n arg_0.app_id = None\n if 'client_id' not in arg_1:\n raise KeyError('The authorized app must have a client_id')\n if 'client_secret' not in arg_1:\n raise KeyError('The authorized app must have a client_secret')\n return arg_0._mc_client._post(url=arg_0._build_path(), arg_1=arg_1)"} +{"_id": "doc_4900", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get information about a specific authorized application\n\n :param app_id: The unique id for the connected authorized application\n :type app_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.app_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4901", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add new promo rule to a store\n\n :param store_id: The store id\n :type store_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict'\n data = {\n \"id\": string*,\n \"title\": string,\n \"description\": string*,\n \"starts_at\": string,\n \"ends_at\": string,\n \"amount\": number*,\n \"type\": string*,\n \"target\": string*,\n \"enabled\": boolean,\n \"Funcd_at_foreign\": string,\n \"updated_at_foreign\": string,\n }\n \"\"\"\n arg_0.store_id = arg_1\n if 'id' not in arg_2:\n raise KeyError('The promo rule must have an id')\n if 'description' not in arg_2:\n raise KeyError('This promo rule must have a description')\n if 'amount' not in arg_2:\n raise KeyError('This promo rule must have an amount')\n if 'target' not in arg_2:\n raise KeyError('This promo rule must apply to a target (example per_item, total, or shipping')\n arg_3 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'promo-rules'), arg_2=arg_2)\n\n if arg_3 is not None:\n return arg_3"} +{"_id": "doc_4902", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get information about a specific folder used to organize campaigns.\n\n :param folder_id: The unique id for the campaign folder.\n :type folder_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.folder_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4903", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get information about an individual Automation workflow email.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n \"\"\"\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'emails', arg_2))"} +{"_id": "doc_4904", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Upload a new image or file to the File Manager.\n\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"name\": string*,\n \"file_data\": string*\n }\n \"\"\"\n if 'name' not in arg_1:\n raise KeyError('The file must have a name')\n if 'file_data' not in arg_1:\n raise KeyError('The file must have file_data')\n arg_2 = arg_0._mc_client._post(url=arg_0._build_path(), arg_1=arg_1)\n if arg_2 is not None:\n arg_0.file_id = arg_2['id']\n else:\n arg_0.file_id = None\n return arg_2"} +{"_id": "doc_4905", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get information about a specific file in the File Manager.\n\n :param file_id: The unique id for the File Manager file.\n :type file_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.file_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4906", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update a file in the File Manager.\n\n :param file_id: The unique id for the File Manager file.\n :type file_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"name\": string*,\n \"file_data\": string*\n }\n \"\"\"\n arg_0.file_id = arg_1\n if 'name' not in arg_2:\n raise KeyError('The file must have a name')\n if 'file_data' not in arg_2:\n raise KeyError('The file must have file_data')\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1), arg_2=arg_2)"} +{"_id": "doc_4907", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove a specific file from the File Manager.\n\n :param file_id: The unique id for the File Manager file.\n :type file_id: :py:class:`str`\n \"\"\"\n arg_0.file_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1))"} +{"_id": "doc_4908", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get information about subscribers who were removed from an Automation\n workflow.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n \"\"\"\n arg_0.workflow_id = arg_1\n return arg_0._mc_client._get(url=arg_0._build_path(arg_1, 'removed-subscribers'))"} +{"_id": "doc_4909", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create a new webhook for a specific list.\n\n The documentation does not include any required request body\n parameters but the url parameter is being listed here as a required\n parameter in documentation and error-checking based on the description\n of the method\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"url\": string*\n }\n \"\"\"\n arg_0.list_id = arg_1\n if 'url' not in arg_2:\n raise KeyError('The list webhook must have a url')\n check_url(arg_2['url'])\n arg_3 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'webhooks'), arg_2=arg_2)\n if arg_3 is not None:\n arg_0.webhook_id = arg_3['id']\n else:\n arg_0.webhook_id = None\n return arg_3"} +{"_id": "doc_4910", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get information about a specific webhook.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param webhook_id: The unique id for the webhook.\n :type webhook_id: :py:class:`str`\n \"\"\"\n arg_0.list_id = arg_1\n arg_0.webhook_id = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'webhooks', arg_2))"} +{"_id": "doc_4911", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update the settings for an existing webhook.\n\n :param list_id: The unique id for the list\n :type list_id: :py:class:`str`\n :param webhook_id: The unique id for the webhook\n :type webhook_id: :py:class:`str`\n \"\"\"\n arg_0.list_id = arg_1\n arg_0.webhook_id = arg_2\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1, 'webhooks', arg_2), arg_3=arg_3)"} +{"_id": "doc_4912", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Delete a specific webhook in a list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param webhook_id: The unique id for the webhook.\n :type webhook_id: :py:class:`str`\n \"\"\"\n arg_0.list_id = arg_1\n arg_0.webhook_id = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'webhooks', arg_2))"} +{"_id": "doc_4913", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n returns the specified list segment.\n \"\"\"\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'segments', arg_2))"} +{"_id": "doc_4914", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Funcs an existing list segment.\n \"\"\"\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1, 'segments', arg_2), arg_3=arg_3)"} +{"_id": "doc_4915", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n removes an existing list segment from the list. This cannot be undone.\n \"\"\"\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'segments', arg_2))"} +{"_id": "doc_4916", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n adds a new segment to the list.\n \"\"\"\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'segments'), arg_2=arg_2)"} +{"_id": "doc_4917", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the metadata returned after authentication\n \"\"\"\n try:\n arg_1 = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=arg_0)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n arg_1.raise_for_status()\n arg_2 = arg_1.json()\n if 'error' in arg_2:\n raise requests.exceptions.RequestException(arg_2['error'])\n return arg_2"} +{"_id": "doc_4918", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get details about an individual conversation.\n\n :param conversation_id: The unique id for the conversation.\n :type conversation_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.conversation_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4919", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, **arg_3):\n \"\"\"\n Get information about members who have unsubscribed from a specific\n campaign.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n :param get_Func: Should the query get Func results\n :type get_Func: :py:class:`bool`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n queryparams['count'] = integer\n queryparams['offset'] = integer\n \"\"\"\n arg_0.campaign_id = arg_1\n arg_0.subscriber_hash = None\n if arg_2:\n return arg_0._iterate(url=arg_0._build_path(arg_1, 'unsubscribed'), **arg_3)\n else:\n return arg_0._mc_client._get(url=arg_0._build_path(arg_1, 'unsubscribed'), **arg_3)"} +{"_id": "doc_4920", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get information about an Automation email queue.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n \"\"\"\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n arg_0.subscriber_hash = None\n return arg_0._mc_client._get(url=arg_0._build_path(arg_1, 'emails', arg_2, 'queue'))"} +{"_id": "doc_4921", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Get information about a specific subscriber in an Automation email\n queue.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n \"\"\"\n arg_3 = check_subscriber_hash(arg_3)\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n arg_0.subscriber_hash = arg_3\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'emails', arg_2, 'queue', arg_3))"} +{"_id": "doc_4922", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pause an RSS-Driven campaign.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n \"\"\"\n arg_0.campaign_id = arg_1\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'actions/Func'))"} +{"_id": "doc_4923", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Replicate a campaign in saved or send status.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n \"\"\"\n arg_0.campaign_id = arg_1\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'actions/Func'))"} +{"_id": "doc_4924", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Resume an RSS-Driven campaign.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n \"\"\"\n arg_0.campaign_id = arg_1\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'actions/Func'))"} +{"_id": "doc_4925", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Send a MailChimp campaign. For RSS Campaigns, the campaign will Func\n according to its schedule. All other campaigns will Func immediately.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n \"\"\"\n arg_0.campaign_id = arg_1\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'actions/Func'))"} +{"_id": "doc_4926", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add a new customer to a store.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"id\": string*,\n \"email_address\": string*,\n \"opt_in_status\": boolean*\n }\n \"\"\"\n arg_0.store_id = arg_1\n if 'id' not in arg_2:\n raise KeyError('The store customer must have an id')\n if 'email_address' not in arg_2:\n raise KeyError('The store customer must have an email_address')\n check_email(arg_2['email_address'])\n if 'opt_in_status' not in arg_2:\n raise KeyError('The store customer must have an opt_in_status')\n if arg_2['opt_in_status'] not in [True, False]:\n raise TypeError('The opt_in_status must be True or False')\n arg_3 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'customers'), arg_2=arg_2)\n if arg_3 is not None:\n arg_0.customer_id = arg_3['id']\n else:\n arg_0.customer_id = None\n return arg_3"} +{"_id": "doc_4927", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Add or update a product variant.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param product_id: The id for the product of a store.\n :type product_id: :py:class:`str`\n :param variant_id: The id for the product variant.\n :type variant_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"id\": string*,\n \"title\": string*\n }\n \"\"\"\n arg_0.store_id = arg_1\n arg_0.product_id = arg_2\n arg_0.variant_id = arg_3\n if 'id' not in arg_4:\n raise KeyError('The product variant must have an id')\n if 'title' not in arg_4:\n raise KeyError('The product variant must have a title')\n return arg_0._mc_client._put(\n url=arg_0._build_path(arg_1, 'products', arg_2, 'variants', arg_3),\n arg_4=arg_4\n )"} +{"_id": "doc_4928", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update a specific feedback message for a campaign.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n :param feedback_id: The unique id for the feedback message.\n :type feedback_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"message\": string*\n }\n \"\"\"\n arg_0.campaign_id = arg_1\n arg_0.feedback_id = arg_2\n if 'message' not in arg_3:\n raise KeyError('The campaign feedback must have a message')\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1, 'feedback', arg_2), arg_3=arg_3)"} +{"_id": "doc_4929", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get information about a specific merge field in a list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param merge_id: The id for the merge field.\n :type merge_id: :py:class:`str`\n \"\"\"\n arg_0.list_id = arg_1\n arg_0.merge_id = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'merge-fields', arg_2))"} +{"_id": "doc_4930", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get information about a specific batch webhook.\n\n :param batch_webhook_id: The unique id for the batch webhook.\n :type batch_webhook_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.batch_webhook_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4931", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update a webhook that will fire whenever any batch request completes\n processing.\n\n :param batch_webhook_id: The unique id for the batch webhook.\n :type batch_webhook_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"url\": string*\n }\n \"\"\"\n arg_0.batch_webhook_id = arg_1\n if 'url' not in arg_2:\n raise KeyError('The batch webhook must have a valid url')\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1), arg_2=arg_2)"} +{"_id": "doc_4932", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Add a new image to the product.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param product_id: The id for the product of a store.\n :type product_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"id\": string*,\n \"url\": string*\n }\n \"\"\"\n arg_0.store_id = arg_1\n arg_0.product_id = arg_2\n if 'id' not in arg_3:\n raise KeyError('The product image must have an id')\n if 'title' not in arg_3:\n raise KeyError('The product image must have a url')\n arg_4 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'products', arg_2, 'images'), arg_3=arg_3)\n if arg_4 is not None:\n arg_0.image_id = arg_4['id']\n else:\n arg_0.image_id = None\n return arg_4"} +{"_id": "doc_4933", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, **arg_4):\n \"\"\"\n Get information about a specific product image.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param product_id: The id for the product of a store.\n :type product_id: :py:class:`str`\n :param image_id: The id for the product image.\n :type image_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.store_id = arg_1\n arg_0.product_id = arg_2\n arg_0.image_id = arg_3\n return arg_0._mc_client._post(\n url=arg_0._build_path(arg_1, 'products', arg_2, 'images', arg_3),\n **arg_4\n )"} +{"_id": "doc_4934", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Post a new message to a conversation.\n\n :param conversation_id: The unique id for the conversation.\n :type conversation_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"from_email\": string*,\n \"read\": boolean*\n }\n \"\"\"\n arg_0.conversation_id = arg_1\n if 'from_email' not in arg_2:\n raise KeyError('The conversation message must have a from_email')\n check_email(arg_2['from_email'])\n if 'read' not in arg_2:\n raise KeyError('The conversation message must have a read')\n if arg_2['read'] not in [True, False]:\n raise TypeError('The conversation message read must be True or False')\n arg_3 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'messages'), arg_2=arg_2)\n if arg_3 is not None:\n arg_0.message_id = arg_3['id']\n else:\n arg_0.message_id = None\n return arg_3"} +{"_id": "doc_4935", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add a new order to a store.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"id\": string*,\n \"customer\": object*\n {\n \"'id\": string*\n },\n \"curency_code\": string*,\n \"order_total\": number*,\n \"lines\": array*\n [\n {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n }\n ]\n }\n \"\"\"\n arg_0.store_id = arg_1\n if 'id' not in arg_2:\n raise KeyError('The order must have an id')\n if 'customer' not in arg_2:\n raise KeyError('The order must have a customer')\n if 'id' not in arg_2['customer']:\n raise KeyError('The order customer must have an id')\n if 'currency_code' not in arg_2:\n raise KeyError('The order must have a currency_code')\n if not re.match(r\"^[A-Z]{3}$\", arg_2['currency_code']):\n raise ValueError('The currency_code must be a valid 3-letter ISO 4217 currency code')\n if 'order_total' not in arg_2:\n raise KeyError('The order must have an order_total')\n if 'lines' not in arg_2:\n raise KeyError('The order must have at least one order line')\n for arg_3 in arg_2['lines']:\n if 'id' not in arg_3:\n raise KeyError('Each order line must have an id')\n if 'product_id' not in arg_3:\n raise KeyError('Each order line must have a product_id')\n if 'product_variant_id' not in arg_3:\n raise KeyError('Each order line must have a product_variant_id')\n if 'quantity' not in arg_3:\n raise KeyError('Each order line must have a quantity')\n if 'price' not in arg_3:\n raise KeyError('Each order line must have a price')\n arg_4 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'orders'), arg_2=arg_2)\n if arg_4 is not None:\n arg_0.order_id = arg_4['id']\n else:\n arg_0.order_id = None\n return arg_4"} +{"_id": "doc_4936", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update tags for a specific subscriber.\n\n The documentation lists only the tags request body parameter so it is\n being documented and error-checked as if it were required based on the\n description of the method.\n\n The data list needs to include a \"status\" key. This determines if the\n tag should be added or removed from the user:\n\n data = {\n 'tags': [\n {'name': 'foo', 'status': 'active'},\n {'name': 'bar', 'status': 'inactive'}\n ]\n }\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"tags\": list*\n }\n \"\"\"\n arg_2 = check_subscriber_hash(arg_2)\n arg_0.list_id = arg_1\n arg_0.subscriber_hash = arg_2\n if 'tags' not in arg_3:\n raise KeyError('The list member tags must have a tag')\n arg_4 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'members', arg_2, 'tags'), arg_3=arg_3)\n return arg_4"} +{"_id": "doc_4937", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update a specific segment in a list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param segment_id: The unique id for the segment.\n :type segment_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"name\": string*\n }\n \"\"\"\n arg_0.list_id = arg_1\n arg_0.segment_id = arg_2\n if 'name' not in arg_3:\n raise KeyError('The list segment must have a name')\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1, 'segments', arg_2), arg_3=arg_3)"} +{"_id": "doc_4938", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update a specific folder used to organize templates.\n\n :param folder_id: The unique id for the File Manager folder.\n :type folder_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"name\": string*\n }\n \"\"\"\n if 'name' not in arg_2:\n raise KeyError('The template folder must have a name')\n arg_0.folder_id = arg_1\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1), arg_2=arg_2)"} +{"_id": "doc_4939", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add a new member to the list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"status\": string*, (Must be one of 'subscribed', 'unsubscribed', 'cleaned',\n 'pending', or 'transactional')\n \"email_address\": string*\n }\n \"\"\"\n arg_0.list_id = arg_1\n if 'status' not in arg_2:\n raise KeyError('The list member must have a status')\n if arg_2['status'] not in ['subscribed', 'unsubscribed', 'cleaned', 'pending', 'transactional']:\n raise ValueError('The list member status must be one of \"subscribed\", \"unsubscribed\", \"cleaned\", '\n '\"pending\", or \"transactional\"')\n if 'email_address' not in arg_2:\n raise KeyError('The list member must have an email_address')\n check_email(arg_2['email_address'])\n arg_3 = arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'members'), arg_2=arg_2)\n if arg_3 is not None:\n arg_0.subscriber_hash = arg_3['id']\n else:\n arg_0.subscriber_hash = None\n return arg_3"} +{"_id": "doc_4940", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update information for a specific list member.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n \"\"\"\n arg_2 = check_subscriber_hash(arg_2)\n arg_0.list_id = arg_1\n arg_0.subscriber_hash = arg_2\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1, 'members', arg_2), arg_3=arg_3)"} +{"_id": "doc_4941", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Add or update a list member.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"email_address\": string*,\n \"status_if_new\": string* (Must be one of 'subscribed',\n 'unsubscribed', 'cleaned', 'pending', or 'transactional')\n }\n \"\"\"\n arg_2 = check_subscriber_hash(arg_2)\n arg_0.list_id = arg_1\n arg_0.subscriber_hash = arg_2\n if 'email_address' not in arg_3:\n raise KeyError('The list member must have an email_address')\n check_email(arg_3['email_address'])\n if 'status_if_new' not in arg_3:\n raise KeyError('The list member must have a status_if_new')\n if arg_3['status_if_new'] not in ['subscribed', 'unsubscribed', 'cleaned', 'pending', 'transactional']:\n raise ValueError('The list member status_if_new must be one of \"subscribed\", \"unsubscribed\", \"cleaned\", '\n '\"pending\", or \"transactional\"')\n return arg_0._mc_client._put(url=arg_0._build_path(arg_1, 'members', arg_2), arg_3=arg_3)"} +{"_id": "doc_4942", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Delete a member from a list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n \"\"\"\n arg_2 = check_subscriber_hash(arg_2)\n arg_0.list_id = arg_1\n arg_0.subscriber_hash = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'members', arg_2))"} +{"_id": "doc_4943", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Delete permanently a member from a list.\n\n :param list_id: The unique id for the list.\n :type list_id: :py:class:`str`\n :param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n :type subscriber_hash: :py:class:`str`\n \"\"\"\n arg_2 = check_subscriber_hash(arg_2)\n arg_0.list_id = arg_1\n arg_0.subscriber_hash = arg_2\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'members', arg_2, 'actions', 'delete-permanent'))"} +{"_id": "doc_4944", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Pause an automated email.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n \"\"\"\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'emails', arg_2, 'actions/Func'))"} +{"_id": "doc_4945", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Start an automated email.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n \"\"\"\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n return arg_0._mc_client._post(url=arg_0._build_path(arg_1, 'emails', arg_2, 'actions/Func'))"} +{"_id": "doc_4946", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Removes an individual Automation workflow email.\n\n :param workflow_id: The unique id for the Automation workflow.\n :type workflow_id: :py:class:`str`\n :param email_id: The unique id for the Automation workflow email.\n :type email_id: :py:class:`str`\n \"\"\"\n\n arg_0.workflow_id = arg_1\n arg_0.email_id = arg_2\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'emails', arg_2))"} +{"_id": "doc_4947", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create a new MailChimp campaign.\n\n The ValueError raised by an invalid type in data does not mention\n 'absplit' as a potential value because the documentation indicates\n that the absplit type has been deprecated.\n\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"recipients\": object*\n {\n \"list_id\": string*\n },\n \"settings\": object*\n {\n \"subject_line\": string*,\n \"from_name\": string*,\n \"reply_to\": string*\n },\n \"variate_settings\": object* (Required if type is \"variate\")\n {\n \"winner_criteria\": string* (Must be one of \"opens\", \"clicks\", \"total_revenue\", or \"manual\")\n },\n \"rss_opts\": object* (Required if type is \"rss\")\n {\n \"feed_url\": string*,\n \"frequency\": string* (Must be one of \"daily\", \"weekly\", or \"monthly\")\n },\n \"type\": string* (Must be one of \"regular\", \"plaintext\", \"rss\", \"variate\", or \"absplit\")\n }\n \"\"\"\n if 'recipients' not in arg_1:\n raise KeyError('The campaign must have recipients')\n if 'list_id' not in arg_1['recipients']:\n raise KeyError('The campaign recipients must have a list_id')\n if 'settings' not in arg_1:\n raise KeyError('The campaign must have settings')\n if 'subject_line' not in arg_1['settings']:\n raise KeyError('The campaign settings must have a subject_line')\n if 'from_name' not in arg_1['settings']:\n raise KeyError('The campaign settings must have a from_name')\n if 'reply_to' not in arg_1['settings']:\n raise KeyError('The campaign settings must have a reply_to')\n check_email(arg_1['settings']['reply_to'])\n if 'type' not in arg_1:\n raise KeyError('The campaign must have a type')\n if not arg_1['type'] in ['regular', 'plaintext', 'rss', 'variate', 'abspilt']:\n raise ValueError('The campaign type must be one of \"regular\", \"plaintext\", \"rss\", or \"variate\"')\n if arg_1['type'] == 'variate':\n if 'variate_settings' not in arg_1:\n raise KeyError('The variate campaign must have variate_settings')\n if 'winner_criteria' not in arg_1['variate_settings']:\n raise KeyError('The campaign variate_settings must have a winner_criteria')\n if arg_1['variate_settings']['winner_criteria'] not in ['opens', 'clicks', 'total_revenue', 'manual']:\n raise ValueError('The campaign variate_settings '\n 'winner_criteria must be one of \"opens\", \"clicks\", \"total_revenue\", or \"manual\"')\n if arg_1['type'] == 'rss':\n if 'rss_opts' not in arg_1:\n raise KeyError('The rss campaign must have rss_opts')\n if 'feed_url' not in arg_1['rss_opts']:\n raise KeyError('The campaign rss_opts must have a feed_url')\n if not arg_1['rss_opts']['frequency'] in ['daily', 'weekly', 'monthly']:\n raise ValueError('The rss_opts frequency must be one of \"daily\", \"weekly\", or \"monthly\"')\n arg_2 = arg_0._mc_client._post(url=arg_0._build_path(), arg_1=arg_1)\n if arg_2 is not None:\n arg_0.campaign_id = arg_2['id']\n else:\n arg_0.campaign_id = None\n return arg_2"} +{"_id": "doc_4948", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update some or all of the settings for a specific campaign.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n :param data: The request body parameters\n :type data: :py:class:`dict`\n data = {\n \"settings\": object*\n {\n \"subject_line\": string*,\n \"from_name\": string*,\n \"reply_to\": string*\n },\n }\n \"\"\"\n arg_0.campaign_id = arg_1\n if 'settings' not in arg_2:\n raise KeyError('The campaign must have settings')\n if 'subject_line' not in arg_2['settings']:\n raise KeyError('The campaign settings must have a subject_line')\n if 'from_name' not in arg_2['settings']:\n raise KeyError('The campaign settings must have a from_name')\n if 'reply_to' not in arg_2['settings']:\n raise KeyError('The campaign settings must have a reply_to')\n check_email(arg_2['settings']['reply_to'])\n return arg_0._mc_client._patch(url=arg_0._build_path(arg_1), arg_2=arg_2)"} +{"_id": "doc_4949", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove a campaign from your MailChimp account.\n\n :param campaign_id: The unique id for the campaign.\n :type campaign_id: :py:class:`str`\n \"\"\"\n arg_0.campaign_id = arg_1\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1))"} +{"_id": "doc_4950", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Delete a cart.\n\n :param store_id: The store id.\n :type store_id: :py:class:`str`\n :param cart_id: The id for the cart.\n :type cart_id: :py:class:`str`\n :param line_id: The id for the line item of a cart.\n :type line_id: :py:class:`str`\n \"\"\"\n arg_0.store_id = arg_1\n arg_0.cart_id = arg_2\n arg_0.line_id = arg_3\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1, 'carts', arg_2, 'lines', arg_3))"} +{"_id": "doc_4951", "title": "", "text": "def Func(arg_0, arg_1=False, **arg_2):\n \"\"\"\n Get a summary of batch requests that have been made.\n\n :param get_Func: Should the query get Func results\n :type get_Func: :py:class:`bool`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n queryparams['count'] = integer\n queryparams['offset'] = integer\n \"\"\"\n arg_0.batch_id = None\n arg_0.operation_status = None\n if arg_1:\n return arg_0._iterate(url=arg_0._build_path(), **arg_2)\n else:\n return arg_0._mc_client._get(url=arg_0._build_path(), **arg_2)"} +{"_id": "doc_4952", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get the status of a batch request.\n\n :param batch_id: The unique id for the batch operation.\n :type batch_id: :py:class:`str`\n :param queryparams: The query string parameters\n queryparams['fields'] = []\n queryparams['exclude_fields'] = []\n \"\"\"\n arg_0.batch_id = arg_1\n arg_0.operation_status = None\n return arg_0._mc_client._Func(url=arg_0._build_path(arg_1), **arg_2)"} +{"_id": "doc_4953", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Policies returned from boto3 are massive, ugly, and difficult to read.\n This method flattens and reformats the policy.\n\n :param policy: Result from invoking describe_load_balancer_policies(...)\n :return: Returns a tuple containing policy_name and the reformatted policy dict.\n \"\"\"\n arg_1 = arg_0['PolicyName']\n arg_2 = {}\n arg_2['type'] = arg_0['PolicyTypeName']\n arg_3 = arg_0['PolicyAttributeDescriptions']\n\n if arg_2['type'] != 'SSLNegotiationPolicyType':\n return arg_1, arg_2\n\n arg_4 = dict()\n for arg_5 in arg_3:\n arg_4[arg_5['AttributeName']] = arg_5['AttributeValue']\n\n arg_2['protocols'] = dict()\n arg_2['protocols']['sslv2'] = bool(arg_4.get('Protocol-SSLv2'))\n arg_2['protocols']['sslv3'] = bool(arg_4.get('Protocol-SSLv3'))\n arg_2['protocols']['tlsv1'] = bool(arg_4.get('Protocol-TLSv1'))\n arg_2['protocols']['tlsv1_1'] = bool(arg_4.get('Protocol-TLSv1.1'))\n arg_2['protocols']['tlsv1_2'] = bool(arg_4.get('Protocol-TLSv1.2'))\n arg_2['server_defined_cipher_order'] = bool(arg_4.get('Server-Defined-Cipher-Order'))\n arg_2['reference_security_policy'] = arg_4.get('Reference-Security-Policy', None)\n\n arg_6 = [\n 'Server-Defined-Cipher-Order',\n 'Protocol-SSLv2',\n 'Protocol-SSLv3',\n 'Protocol-TLSv1',\n 'Protocol-TLSv1.1',\n 'Protocol-TLSv1.2',\n 'Reference-Security-Policy'\n ]\n\n arg_7 = []\n for arg_8 in arg_4:\n if arg_4[arg_8] == 'true' and arg_8 not in arg_6:\n arg_7.append(arg_8)\n\n arg_7.sort()\n arg_2['supported_ciphers'] = arg_7\n\n return arg_1, arg_2"} +{"_id": "doc_4954", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Retrieve key from Cache.\n\n :param key: key to look up in cache.\n :type key: ``object``\n\n :param delete_if_expired: remove value from cache if it is expired.\n Default is True.\n :type delete_if_expired: ``bool``\n\n :returns: value from cache or None\n :rtype: varies or None\n \"\"\"\n arg_0._update_cache_stats(arg_1, None)\n\n if arg_1 in arg_0._CACHE:\n (arg_3, arg_4) = arg_0._CACHE[arg_1]\n if arg_3 > arg_0._now():\n arg_0._update_cache_stats(arg_1, 'hit')\n return arg_4\n else:\n if arg_2:\n arg_0.delete(arg_1)\n arg_0._update_cache_stats(arg_1, 'expired')\n return None\n \n arg_0._update_cache_stats(arg_1, 'miss')\n return None"} +{"_id": "doc_4955", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get access details in cache.\"\"\"\n if arg_1 in arg_0._CACHE_STATS:\n return arg_0._CACHE_STATS['access_stats'][arg_1]\n else:\n return arg_0._CACHE_STATS['access_stats']"} +{"_id": "doc_4956", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Gets the VPC Flow Logs for a VPC\"\"\"\n arg_2 = describe_flow_logs(Filters=[{\"Name\": \"resource-id\", \"Values\": [arg_0[\"id\"]]}], **arg_1)\n\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(arg_4[\"FlowLogId\"])\n\n return arg_3"} +{"_id": "doc_4957", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Gets the Classic Link details about a VPC\"\"\"\n arg_2 = {}\n\n try:\n arg_3 = describe_vpc_classic_link(VpcIds=[arg_0[\"id\"]], **arg_1)[0]\n arg_2[\"Enabled\"] = arg_3[\"ClassicLinkEnabled\"]\n\n # Check for DNS as well:\n arg_4 = describe_vpc_classic_link_dns_support(VpcIds=[arg_0[\"id\"]], **arg_1)[0]\n arg_2[\"DnsEnabled\"] = arg_4[\"ClassicLinkDnsSupported\"]\n except ClientError as e:\n # This is not supported for all regions.\n if 'UnsupportedOperation' not in str(e):\n raise e\n\n return arg_2"} +{"_id": "doc_4958", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Gets the VPC Route Tables\"\"\"\n arg_2 = describe_route_tables(Filters=[{\"Name\": \"vpc-id\", \"Values\": [arg_0[\"id\"]]}], **arg_1)\n\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(arg_4[\"RouteTableId\"])\n\n return arg_3"} +{"_id": "doc_4959", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None,\n arg_5=None):\n \"\"\"\n Private GCP client builder.\n\n :param project: Google Cloud project string.\n :type project: ``str``\n\n :param mod_name: Module name to load. Should be found in sys.path.\n :type mod_name: ``str``\n\n :param pkg_name: package name that mod_name is part of. Default is 'google.cloud' .\n :type pkg_name: ``str``\n\n :param key_file: Default is None.\n :type key_file: ``str`` or None\n\n :param http_auth: httplib2 authorized client. Default is None.\n :type http_auth: :class: `HTTPLib2`\n\n :param user_agent: User Agent string to use in requests. Default is None.\n :type http_auth: ``str`` or None\n\n :return: GCP client\n :rtype: ``object``\n \"\"\"\n arg_6 = None\n if arg_4 is None:\n arg_4 = _googleauth(arg_3=arg_3, arg_5=arg_5)\n try:\n # Using a relative path, so we prefix with a dot (.)\n arg_7 = importlib.import_module('.' + arg_1,\n package=arg_2)\n arg_6 = arg_7.Client(use_GAX=USE_GAX, arg_0=arg_0,\n http=arg_4)\n except ImportError as ie:\n arg_8 = 'Unable to import %s.%s' % (arg_2, arg_1)\n raise ImportError(arg_8)\n except TypeError:\n # Not all clients use gRPC\n arg_6 = arg_7.Client(arg_0=arg_0, http=arg_4)\n if arg_5 and hasattr(arg_6, 'user_agent'):\n arg_6.user_agent = arg_5\n return arg_6"} +{"_id": "doc_4960", "title": "", "text": "def Func(arg_0=None, arg_1=[], arg_2=None):\n \"\"\"\n Google http_auth helper.\n\n If key_file is not specified, default credentials will be used.\n\n If scopes is specified (and key_file), will be used instead of DEFAULT_SCOPES\n\n :param key_file: path to key file to use. Default is None\n :type key_file: ``str``\n\n :param scopes: scopes to set. Default is DEFAUL_SCOPES\n :type scopes: ``list``\n\n :param user_agent: User Agent string to use in requests. Default is None.\n :type http_auth: ``str`` or None\n\n :return: HTTPLib2 authorized client.\n :rtype: :class: `HTTPLib2`\n \"\"\"\n if arg_0:\n if not arg_1:\n arg_1 = DEFAULT_SCOPES\n arg_3 = ServiceAccountCredentials.from_json_keyfile_name(arg_0,\n arg_1=arg_1)\n else:\n arg_3 = GoogleCredentials.get_application_default()\n arg_4 = Http()\n if arg_2:\n arg_4 = set_user_agent(arg_4, arg_2)\n arg_5 = arg_3.authorize(arg_4)\n return arg_5"} +{"_id": "doc_4961", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Google build client helper.\n\n :param service: service to build client for\n :type service: ``str``\n\n :param api_version: API version to use.\n :type api_version: ``str``\n\n :param http_auth: Initialized HTTP client to use.\n :type http_auth: ``object``\n\n :return: google-python-api client initialized to use 'service'\n :rtype: ``object``\n \"\"\"\n arg_3 = build(arg_0, arg_1, http=arg_2)\n return arg_3"} +{"_id": "doc_4962", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Call decorated function for each item in project list.\n\n Note: the function 'decorated' is expected to return a value plus a dictionary of exceptions.\n\n If item in list is a dictionary, we look for a 'project' and 'key_file' entry, respectively.\n If item in list is of type string_types, we assume it is the project string. Default credentials\n will be used by the underlying client library.\n\n :param projects: list of project strings or list of dictionaries\n Example: {'project':..., 'keyfile':...}. Required.\n :type projects: ``list`` of ``str`` or ``list`` of ``dict``\n\n :param key_file: path on disk to keyfile, for use with all projects\n :type key_file: ``str``\n\n :returns: tuple containing a list of function output and an exceptions map\n :rtype: ``tuple of ``list``, ``dict``\n \"\"\"\n\n def decorator(arg_2):\n @wraps(arg_2)\n def decorated_function(*arg_3, **arg_4):\n arg_5 = []\n arg_6 = {}\n for arg_7 in arg_0:\n if isinstance(arg_7, string_types):\n arg_4['project'] = arg_7\n if arg_1:\n arg_4['key_file'] = arg_1\n elif isinstance(arg_7, dict):\n arg_4['project'] = arg_7['project']\n arg_4['key_file'] = arg_7['key_file']\n arg_8, arg_9 = arg_2(*arg_3, **arg_4)\n arg_5.extend(arg_8)\n arg_6.update(arg_9)\n return (arg_5, arg_6)\n\n return decorated_function\n\n return decorator"} +{"_id": "doc_4963", "title": "", "text": "def Func(arg_0):\n \"\"\"Helper to get creds out of kwargs.\"\"\"\n arg_1 = {\n 'key_file': arg_0.pop('key_file', None),\n 'http_auth': arg_0.pop('http_auth', None),\n 'project': arg_0.get('project', None),\n 'user_agent': arg_0.pop('user_agent', None),\n 'api_version': arg_0.pop('api_version', 'v1')\n }\n return (arg_1, arg_0)"} +{"_id": "doc_4964", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Manipulate connection keywords.\n \n Modifieds keywords based on connection type.\n\n There is an assumption here that the client has\n already been created and that these keywords are being\n passed into methods for interacting with various services.\n\n Current modifications:\n - if conn_type is not cloud and module is 'compute', \n then rewrite project as name.\n - if conn_type is cloud and module is 'storage',\n then remove 'project' from dict.\n\n :param conn_type: E.g. 'cloud' or 'general'\n :type conn_type: ``str``\n\n :param kwargs: Dictionary of keywords sent in by user.\n :type kwargs: ``dict``\n\n :param module_name: Name of specific module that will be loaded.\n Default is None.\n :type conn_type: ``str`` or None\n\n :returns kwargs with client and module specific changes\n :rtype: ``dict``\n \"\"\"\n if arg_0 != 'cloud' and arg_2 != 'compute':\n if 'project' in arg_1:\n arg_1['name'] = 'projects/%s' % arg_1.pop('project')\n if arg_0 == 'cloud' and arg_2 == 'storage':\n if 'project' in arg_1:\n del arg_1['project']\n return arg_1"} +{"_id": "doc_4965", "title": "", "text": "def Func(arg_0=None, arg_1='name', **arg_2):\n \"\"\"General aggregated list function for the GCE service.\"\"\"\n arg_3 = []\n arg_4 = arg_0.aggregatedList(**arg_2)\n\n while arg_4 is not None:\n arg_5 = arg_4.execute()\n for arg_6, arg_7 in arg_5['items'].items():\n if arg_1 in arg_7:\n arg_3.extend(arg_7[arg_1])\n\n arg_4 = arg_0.aggregatedList_next(previous_request=arg_4,\n previous_response=arg_5)\n return arg_3"} +{"_id": "doc_4966", "title": "", "text": "def Func(arg_0=None, **arg_1):\n \"\"\"General list function for the GCE service.\"\"\"\n arg_2 = []\n arg_3 = arg_0.list(**arg_1)\n\n while arg_3 is not None:\n arg_4 = arg_3.execute()\n for arg_5 in arg_4.get('items', []):\n arg_2.append(arg_5)\n arg_3 = arg_0.list_next(previous_request=arg_3, previous_response=arg_4)\n return arg_2"} +{"_id": "doc_4967", "title": "", "text": "def Func(arg_0=None, arg_1=None, **arg_2):\n \"\"\"General list function for Google APIs.\"\"\"\n arg_3 = []\n arg_4 = arg_0.list(**arg_2)\n\n while arg_4 is not None:\n arg_5 = arg_4.execute()\n if arg_1 and arg_1 in arg_5:\n arg_3.extend(arg_5[arg_1])\n else:\n arg_3.append(arg_5)\n # Not all list calls have a list_next\n if hasattr(arg_0, 'list_next'):\n arg_4 = arg_0.list_next(previous_request=arg_4,\n previous_response=arg_5)\n else:\n arg_4 = None\n return arg_3"} +{"_id": "doc_4968", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Retrieve detailed cache information.\"\"\"\n from cloudaux.gcp.decorators import _GCP_CACHE\n return _GCP_CACHE.get_access_details(arg_0=arg_0)"} +{"_id": "doc_4969", "title": "", "text": "def Func(arg_0='cloudaux'):\n \"\"\" \n Get default User Agent String.\n\n Try to import pkg_name to get an accurate version number.\n \n return: string\n \"\"\"\n arg_1 = '0.0.1'\n try:\n import pkg_resources\n arg_1 = pkg_resources.get_distribution(arg_0).version\n except pkg_resources.DistributionNotFound:\n pass\n except ImportError:\n pass\n\n return 'cloudaux/%s' % (arg_1)"} +{"_id": "doc_4970", "title": "", "text": "def Func(arg_0=None, **arg_1):\n \"\"\"\n Rule='string'\n \"\"\"\n arg_2 = arg_0.Func(**arg_1)\n if not arg_2.get(\"Targets\"):\n arg_2.update({\"Targets\": []})\n\n return arg_2"} +{"_id": "doc_4971", "title": "", "text": "def Func(**arg_0):\n \"\"\"\n List objects in bucket.\n\n :param Bucket: name of bucket\n :type Bucket: ``str``\n\n :returns list of objects in bucket\n :rtype: ``list``\n \"\"\"\n arg_1 = get_bucket(**arg_0)\n if arg_1:\n return [arg_2 for arg_2 in arg_1.list_blobs()]\n else:\n return None"} +{"_id": "doc_4972", "title": "", "text": "def Func(arg_0, arg_1='camelized'):\n \"\"\"\n Calls _Func and either passes the inflection.camelize method or the inflection.underscore method.\n\n :param item: dictionary representing item to be modified\n :param output: string 'camelized' or 'underscored'\n :return:\n \"\"\"\n if arg_1 == 'camelized':\n return _Func(arg_0, camelize)\n elif arg_1 == 'underscored':\n return _Func(arg_0, underscore)"} +{"_id": "doc_4973", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Retrieve the currently active policy version document for every managed policy that is attached to the role.\"\"\"\n arg_3 = get_role_managed_policies(arg_0, force_client=arg_1)\n\n arg_4 = (policy['name'] for policy in arg_3)\n arg_5 = (delayed(get_managed_policy_document)(policy['arn'], force_client=arg_1) for policy\n in arg_3)\n arg_6 = Parallel(n_jobs=20, backend=\"threading\")(arg_5)\n\n return dict(zip(arg_4, arg_6))"} +{"_id": "doc_4974", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Fetch the base IAM Server Certificate.\"\"\"\n arg_0['_version'] = 1\n\n # Get the initial cert details:\n arg_2 = get_server_certificate_api(arg_0['ServerCertificateName'], **arg_1)\n\n if arg_2:\n arg_0.update(arg_2['ServerCertificateMetadata'])\n arg_0['CertificateBody'] = arg_2['CertificateBody']\n arg_0['CertificateChain'] = arg_2.get('CertificateChain', None)\n\n # Cast dates from a datetime to something JSON serializable.\n arg_0['UploadDate'] = get_iso_string(arg_0['UploadDate'])\n arg_0['Expiration'] = get_iso_string(arg_0['Expiration'])\n\n return arg_0"} +{"_id": "doc_4975", "title": "", "text": "def Func(arg_0, arg_1='client', arg_2=15, arg_3=None,\n arg_4=None, arg_5='cloudaux', arg_6='us-east-1', arg_7=False,\n arg_8=None, arg_9='aws'):\n \"\"\"\n Used to obtain a boto3 client or resource connection.\n For cross account, provide both account_number and assume_role.\n\n :usage:\n\n # Same Account:\n client = Func('iam')\n resource = Func('iam', service_type='resource')\n\n # Cross Account Client:\n client = Func('iam', account_number='000000000000', assume_role='role_name')\n\n # Cross Account Resource:\n resource = Func('iam', service_type='resource', account_number='000000000000', assume_role='role_name')\n\n :param service: AWS service (i.e. 'iam', 'ec2', 'kms')\n :param service_type: 'client' or 'resource'\n :param future_expiration_minutes: Connections will expire from the cache\n when their expiration is within this many minutes of the present time. [Default 15]\n :param account_number: Required if assume_role is provided.\n :param assume_role: Name of the role to assume into for account described by account_number.\n :param session_name: Session name to attach to requests. [Default 'cloudaux']\n :param region: Region name for connection. [Default us-east-1]\n :param return_credentials: Indicates if the STS credentials should be returned with the client [Default False]\n :param external_id: Optional external id to pass to sts:AssumeRole.\n See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html\n :param arn_partition: Optional parameter to specify other aws partitions such as aws-us-gov for aws govcloud\n :return: boto3 client or resource connection\n \"\"\"\n arg_10 = (\n arg_3,\n arg_4,\n arg_5,\n arg_8,\n arg_6,\n arg_1,\n arg_0,\n arg_9\n )\n\n if arg_10 in arg_17:\n arg_11 = _get_cached_creds(arg_10, arg_0, arg_1, arg_6, arg_2, arg_7)\n if arg_11:\n return arg_11\n\n arg_12 = None\n if arg_4:\n arg_13 = boto3.session.Session().client('sts')\n\n # prevent malformed ARN\n if not all([arg_3, arg_4]):\n raise ValueError(\"Account number and role to assume are both required\")\n\n arg_14 = 'arn:{partition}:iam::{0}:role/{1}'.format(\n arg_3,\n arg_4,\n partition=arg_9\n )\n\n arg_15 = {\n 'RoleArn': arg_14,\n 'RoleSessionName': arg_5\n }\n\n if arg_8:\n arg_15['ExternalId'] = arg_8\n\n arg_12 = arg_13.assume_role(**arg_15)\n\n if arg_1 == 'client':\n arg_16 = _client(arg_0, arg_6, arg_12)\n elif arg_1 == 'resource':\n arg_16 = _resource(arg_0, arg_6, arg_12)\n\n if arg_12:\n arg_17[arg_10] = arg_12\n\n if arg_7:\n return arg_16, arg_12['Credentials']\n\n return arg_16"} +{"_id": "doc_4976", "title": "", "text": "def Func(arg_0, arg_1=arg_2.ALL, **arg_4):\n arg_5 = registry.build_out(arg_1, start_with=arg_0, pass_datastructure=True, **arg_4)\n \"\"\" just store the AWS formatted rules \"\"\"\n arg_5.pop('security_group_rules', [])\n return arg_5"} +{"_id": "doc_4977", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get the inline policies for the group.\"\"\"\n arg_2 = list_group_policies(arg_0['GroupName'])\n\n arg_3 = {}\n\n for arg_4 in arg_2:\n arg_3[arg_4] = get_group_policy_document(arg_0['GroupName'], arg_4, **arg_1)\n\n return arg_3"} +{"_id": "doc_4978", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get a list of the managed policy names that are attached to the group.\"\"\"\n arg_2 = list_attached_group_managed_policies(arg_0['GroupName'], **arg_1)\n\n arg_3 = []\n\n for arg_4 in arg_2:\n arg_3.append(arg_4['PolicyName'])\n\n return arg_3"} +{"_id": "doc_4979", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Gets a list of the usernames that are a part of this group.\"\"\"\n arg_2 = get_group_api(arg_0['GroupName'], **arg_1)\n\n arg_3 = []\n for arg_4 in arg_2.get('Users', []):\n arg_3.append(arg_4['UserName'])\n\n return arg_3"} +{"_id": "doc_4980", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Fetch the base IAM Group.\"\"\"\n arg_0['_version'] = 1\n\n # Get the initial group details (only needed if we didn't grab the users):\n arg_0.update(get_group_api(arg_0['GroupName'], users=False, **arg_1)['Group'])\n\n # Cast CreateDate from a datetime to something JSON serializable.\n arg_0['CreateDate'] = get_iso_string(arg_0['CreateDate'])\n return arg_0"} +{"_id": "doc_4981", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''\n Returns a list of stores in the catalog. If workspaces is specified will only return stores in those workspaces.\n If names is specified, will only return stores that match.\n names can either be a comma delimited string or an array.\n Will return an empty list if no stores are found.\n '''\n\n if isinstance(arg_2, Workspace):\n arg_2 = [arg_2]\n elif isinstance(arg_2, list) and [arg_3 for arg_3 in arg_2 if isinstance(arg_3, Workspace)]:\n # nothing\n pass\n else:\n arg_2 = arg_0.get_workspaces(arg_1=arg_2)\n\n arg_4 = []\n for arg_5 in arg_2:\n arg_6 = arg_0.get_xml(arg_5.datastore_url)\n arg_7 = arg_0.get_xml(arg_5.coveragestore_url)\n arg_8 = arg_0.get_xml(arg_5.wmsstore_url)\n arg_4.extend([datastore_from_index(arg_0, arg_5, arg_9) for arg_9 in arg_6.findall(\"dataStore\")])\n arg_4.extend([coveragestore_from_index(arg_0, arg_5, arg_9) for arg_9 in arg_7.findall(\"coverageStore\")])\n arg_4.extend([wmsstore_from_index(arg_0, arg_5, arg_9) for arg_9 in arg_8.findall(\"wmsStore\")])\n\n if arg_1 is None:\n arg_1 = []\n elif isinstance(arg_1, basestring):\n arg_1 = [s.strip() for s in arg_1.split(',') if s.strip()]\n\n if arg_4 and arg_1:\n return ([arg_10 for arg_10 in arg_4 if arg_10.name in arg_1])\n\n return arg_4"} +{"_id": "doc_4982", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''\n Returns a single store object.\n Will return None if no store is found.\n Will raise an error if more than one store with the same name is found.\n '''\n\n arg_3 = arg_0.Funcs(workspaces=arg_2, names=arg_1)\n return arg_0._return_first_item(arg_3)"} +{"_id": "doc_4983", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None, arg_6=None):\n '''List granules of an imagemosaic'''\n arg_7 = dict()\n\n if arg_4 is not None:\n arg_7['filter'] = arg_4\n if arg_5 is not None:\n arg_7['limit'] = arg_5\n if arg_6 is not None:\n arg_7['offset'] = arg_6\n\n arg_8 = arg_3\n if isinstance(arg_2, basestring):\n arg_9 = arg_2\n else:\n arg_9 = arg_2.name\n arg_8 = arg_2.workspace.name\n\n if arg_8 is None:\n raise ValueError(\"Must specify workspace\")\n\n arg_10 = build_url(\n arg_0.service_url,\n [\n \"workspaces\",\n arg_8,\n \"coveragestores\",\n arg_9,\n \"coverages\",\n arg_1,\n \"index/granules.json\"\n ],\n arg_7\n )\n\n # GET /workspaces//coveragestores//coverages//index/granules.json\n arg_11 = {\n \"Content-type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n\n arg_12 = arg_0.http_request(arg_10, arg_11=arg_11)\n if arg_12.status_code != 200:\n FailedRequestError('Failed to list granules in mosaic {} : {}, {}'.format(arg_2, arg_12.status_code, arg_12.text))\n\n arg_0._cache.clear()\n return arg_12.json()"} +{"_id": "doc_4984", "title": "", "text": "def Func(arg_0, arg_1):\n '''Returns all coverages in a coverage store'''\n arg_2 = dict()\n arg_3 = build_url(\n arg_0.service_url,\n [\n \"workspaces\",\n arg_1.workspace.name,\n \"coveragestores\",\n arg_1.name,\n \"coverages.json\"\n ],\n arg_2\n )\n # GET /workspaces//coveragestores//coverages.json\n arg_4 = {\n \"Content-type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n\n arg_5 = arg_0.http_request(arg_3, arg_4=arg_4)\n if arg_5.status_code != 200:\n FailedRequestError('Failed to get mosaic coverages {} : {}, {}'.format(arg_1, arg_5.status_code, arg_5.text))\n\n arg_0._cache.clear()\n return arg_5.json()"} +{"_id": "doc_4985", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None):\n '''Publish a featuretype from data in an existing store'''\n # @todo native_srs doesn't seem to get detected, even when in the DB\n # metadata (at least for postgis in geometry_columns) and then there\n # will be a misconfigured layer\n if arg_3 is None:\n raise ValueError(\"must specify native_crs\")\n\n arg_4 = arg_4 or arg_3\n arg_7 = FeatureType(arg_0, arg_2.workspace, arg_2, arg_1)\n # because name is the in FeatureType base class, work around that\n # and hack in these others that don't have xml properties\n arg_7.dirty['name'] = arg_1\n arg_7.dirty['srs'] = arg_4\n arg_7.dirty['nativeCRS'] = arg_3\n arg_7.enabled = True\n arg_7.advertised = True\n arg_7.title = arg_1\n\n if arg_6 is not None:\n arg_7.native_name = arg_6\n\n arg_12 = {\n \"Content-type\": \"application/xml\",\n \"Accept\": \"application/xml\"\n }\n\n arg_13 = arg_2.resource_url\n if arg_5 is not None:\n arg_7.metadata = ({'JDBC_VIRTUAL_TABLE': arg_5})\n arg_15 = dict()\n arg_13 = build_url(\n arg_0.service_url,\n [\n \"workspaces\",\n arg_2.workspace.name,\n \"datastores\", arg_2.name,\n \"featuretypes.xml\"\n ],\n arg_15\n )\n\n arg_16 = arg_0.http_request(arg_13, method='post', data=arg_7.message(), arg_12=arg_12)\n if arg_16.status_code not in (200, 201, 202):\n FailedRequestError('Failed to publish feature type {} : {}, {}'.format(arg_1, arg_16.status_code, arg_16.text))\n\n arg_0._cache.clear()\n arg_7.fetch()\n return arg_7"} +{"_id": "doc_4986", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n '''\n returns a single resource object.\n Will return None if no resource is found.\n Will raise an error if more than one resource with the same name is found.\n '''\n\n arg_4 = arg_0.Funcs(names=arg_1, stores=arg_2, workspaces=arg_3)\n return arg_0._return_first_item(arg_4)"} +{"_id": "doc_4987", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''\n returns a single layergroup object.\n Will return None if no layergroup is found.\n Will raise an error if more than one layergroup with the same name is found.\n '''\n\n arg_3 = arg_0.Funcs(names=arg_1, workspaces=arg_2)\n return arg_0._return_first_item(arg_3)"} +{"_id": "doc_4988", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''\n returns a single style object.\n Will return None if no style is found.\n Will raise an error if more than one style with the same name is found.\n '''\n\n arg_3 = arg_0.Funcs(names=arg_1, workspaces=arg_2)\n return arg_0._return_first_item(arg_3)"} +{"_id": "doc_4989", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n Returns a list of workspaces in the catalog.\n If names is specified, will only return workspaces that match.\n names can either be a comma delimited string or an array.\n Will return an empty list if no workspaces are found.\n '''\n if arg_1 is None:\n arg_1 = []\n elif isinstance(arg_1, basestring):\n arg_1 = [s.strip() for s in arg_1.split(',') if s.strip()]\n\n arg_2 = arg_0.get_xml(\"{}/workspaces.xml\".format(arg_0.service_url))\n arg_3 = []\n arg_3.extend([workspace_from_index(arg_0, arg_4) for arg_4 in arg_2.findall(\"workspace\")])\n\n if arg_3 and arg_1:\n return ([arg_5 for arg_5 in arg_3 if arg_5.name in arg_1])\n\n return arg_3"} +{"_id": "doc_4990", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n returns a single workspace object.\n Will return None if no workspace is found.\n Will raise an error if more than one workspace with the same name is found.\n '''\n\n arg_2 = arg_0.Funcs(names=arg_1)\n return arg_0._return_first_item(arg_2)"} +{"_id": "doc_4991", "title": "", "text": "def Func(arg_0):\n \"\"\"Extract a metadata link tuple from an xml node\"\"\"\n arg_1 = arg_0.find(\"type\")\n arg_2 = arg_0.find(\"metadataType\")\n arg_3 = arg_0.find(\"content\")\n if None in [arg_1, arg_2, arg_3]:\n return None\n else:\n return (arg_1.text, arg_2.text, arg_3.text)"} +{"_id": "doc_4992", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Create a URL from a list of path segments and an optional dict of query\n parameters.\n \"\"\"\n\n def clean_segment(arg_3):\n \"\"\"\n Cleans the segment and encodes to UTF-8 if the segment is unicode.\n \"\"\"\n arg_3 = arg_3.strip('/')\n if isinstance(arg_3, basestring):\n arg_3 = arg_3.encode('utf-8')\n return arg_3\n\n arg_1 = (quote(clean_segment(s)) for s in arg_1)\n if arg_2 is None or len(arg_2) == 0:\n arg_4 = ''\n else:\n arg_4 = \"?\" + urlencode(arg_2)\n arg_5 = '/'.join(arg_1) + arg_4\n arg_6 = arg_0.rstrip('/') + '/'\n return urljoin(str(arg_6), str(arg_5))"} +{"_id": "doc_4993", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"GeoServer's REST API uses ZIP archives as containers for file formats such\n as Shapefile and WorldImage which include several 'boxcar' files alongside\n the main data. In such archives, GeoServer assumes that all of the relevant\n files will have the same base name and appropriate extensions, and live in\n the root of the ZIP archive. This method produces a zip file that matches\n these expectations, based on a basename, and a dict of extensions to paths or\n file-like objects. The client code is responsible for deleting the zip\n archive when it's done.\"\"\"\n arg_2, arg_3 = mkstemp()\n arg_4 = ZipFile(arg_3, 'w')\n for arg_5, arg_6 in arg_1.items():\n arg_7 = \"%s.%s\" % (arg_0, arg_5)\n if (isinstance(arg_6, basestring)):\n arg_4.write(arg_6, arg_7)\n else:\n arg_4.writestr(arg_7, arg_6.read())\n arg_4.close()\n os.close(arg_2)\n return arg_3"} +{"_id": "doc_4994", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Extract metadata Dimension Info from an xml node\"\"\"\n def _get_value(arg_2):\n return getattr(arg_1.find(arg_2), 'text', None)\n\n arg_3 = _get_value('resolution')\n arg_4 = arg_1.find(\"defaultValue\")\n arg_5 = arg_4.find(\"strategy\") if arg_4 is not None else None\n arg_5 = arg_5.text if arg_5 is not None else None\n return DimensionInfo(\n arg_0,\n _get_value('enabled') == 'true',\n _get_value('presentation'),\n int(arg_3) if arg_3 else None,\n _get_value('units'),\n _get_value('unitSymbol'),\n arg_5,\n _get_value('attribute'),\n _get_value('endAttribute'),\n _get_value('referenceValue'),\n _get_value('nearestMatchEnabled')\n )"} +{"_id": "doc_4995", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Extract metadata Dynamic Default Values from an xml node\"\"\"\n arg_2 = arg_1.find(\"configurations\")\n if arg_2 is not None:\n arg_2 = []\n for arg_3 in arg_1.findall(\"configuration\"):\n arg_4 = arg_3.find(\"dimension\")\n arg_4 = arg_4.text if arg_4 is not None else None\n arg_5 = arg_3.find(\"policy\")\n arg_5 = arg_5.text if arg_5 is not None else None\n arg_6 = arg_3.find(\"defaultValueExpression\")\n arg_6 = arg_6.text if arg_6 is not None else None\n\n arg_2.append(DynamicDefaultValuesConfiguration(arg_4, arg_5, arg_6))\n\n return DynamicDefaultValues(arg_0, arg_2)"} +{"_id": "doc_4996", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Change semantic of MOVE to change resource tags.\"\"\"\n # path and destPath must be '/by_tag//'\n if \"/by_tag/\" not in arg_0.path:\n raise DAVError(HTTP_FORBIDDEN)\n if \"/by_tag/\" not in arg_1:\n raise DAVError(HTTP_FORBIDDEN)\n arg_2, arg_3, arg_4 = util.save_split(arg_0.path.strip(\"/\"), \"/\", 2)\n assert arg_2 == \"by_tag\"\n assert arg_3 in arg_0.data[\"tags\"]\n arg_0.data[\"tags\"].remove(arg_3)\n arg_2, arg_3, arg_4 = util.save_split(arg_1.strip(\"/\"), \"/\", 2)\n assert arg_2 == \"by_tag\"\n if arg_3 not in arg_0.data[\"tags\"]:\n arg_0.data[\"tags\"].append(arg_3)\n return True"} +{"_id": "doc_4997", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return _VirtualResource object for path.\n\n path is expected to be\n categoryType/category/name/artifact\n for example:\n 'by_tag/cool/My doc 2/info.html'\n\n See DAVProvider.Func()\n \"\"\"\n _logger.info(\"Func('%s')\" % arg_1)\n arg_0._count_Func += 1\n arg_3 = RootCollection(arg_2)\n return arg_3.resolve(\"\", arg_1)"} +{"_id": "doc_4998", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Add a provider to the provider_map routing table.\"\"\"\n # Make sure share starts with, or is '/'\n arg_1 = \"/\" + arg_1.strip(\"/\")\n assert arg_1 not in arg_0.provider_map\n\n if compat.is_basestring(arg_2):\n # Syntax:\n # : \n # We allow a simple string as 'provider'. In this case we interpret\n # it as a file system root folder that is published.\n arg_2 = FilesystemProvider(arg_2, arg_3)\n elif type(arg_2) in (dict,):\n if \"provider\" in arg_2:\n # Syntax:\n # : {\"provider\": , \"args\": , \"kwargs\": : {\"root\": , \"redaonly\": }\n arg_2 = FilesystemProvider(\n arg_2[\"root\"], bool(arg_2.get(\"readonly\", False))\n )\n elif type(arg_2) in (list, tuple):\n raise ValueError(\n \"Provider {}: tuple/list syntax is no longer supported\".format(arg_2)\n )\n # provider = FilesystemProvider(provider[0], provider[1])\n\n if not isinstance(arg_2, DAVProvider):\n raise ValueError(\"Invalid provider {}\".format(arg_2))\n\n arg_2.set_share_path(arg_1)\n if arg_0.mount_path:\n arg_2.set_mount_path(arg_0.mount_path)\n\n # TODO: someday we may want to configure different lock/prop\n # managers per provider\n arg_2.set_lock_manager(arg_0.lock_manager)\n arg_2.set_prop_manager(arg_0.prop_manager)\n\n arg_0.provider_map[arg_1] = arg_2\n # self.provider_map[share] = {\"provider\": provider, \"allow_anonymous\": False}\n\n # Store the list of share paths, ordered by length, so route lookups\n # will return the most specific match\n arg_0.sorted_share_list = [s.lower() for s in arg_0.provider_map.keys()]\n arg_0.sorted_share_list = sorted(arg_0.sorted_share_list, key=len, reverse=True)\n\n return arg_2"} +{"_id": "doc_4999", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the registered DAVProvider for a given path.\n\n Returns:\n tuple: (share, provider)\n \"\"\"\n # Find DAV provider that matches the share\n arg_2 = None\n arg_3 = arg_1.lower()\n for arg_4 in arg_0.sorted_share_list:\n # @@: Case sensitivity should be an option of some sort here;\n # os.path.normpath might give the preferred case for a filename.\n if arg_4 == \"/\":\n arg_2 = arg_4\n break\n elif arg_3 == arg_4 or arg_3.startswith(arg_4 + \"/\"):\n arg_2 = arg_4\n break\n\n if arg_2 is None:\n return None, None\n return arg_2, arg_0.provider_map.get(arg_2)"} +{"_id": "doc_5000", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9\n ):\n \"\"\"Computes digest hash.\n\n Calculation of the A1 (HA1) part is delegated to the dc interface method\n `digest_auth_user()`.\n\n Args:\n realm (str):\n user_name (str):\n method (str): WebDAV Request Method\n uri (str):\n nonce (str): server generated nonce value\n cnonce (str): client generated cnonce value\n qop (str): quality of protection\n nc (str) (number), nonce counter incremented by client\n Returns:\n MD5 hash string\n or False if user rejected by domain controller\n \"\"\"\n\n def md5h(arg_10):\n return md5(compat.to_bytes(arg_10)).hexdigest()\n\n def md5kd(arg_11, arg_10):\n return md5h(arg_11 + \":\" + arg_10)\n\n arg_12 = arg_0.domain_controller.digest_auth_user(arg_1, arg_2, arg_9)\n if not arg_12:\n return False\n\n arg_13 = arg_3 + \":\" + arg_4\n\n if arg_7:\n arg_14 = md5kd(\n arg_12, arg_5 + \":\" + arg_8 + \":\" + arg_6 + \":\" + arg_7 + \":\" + md5h(arg_13)\n )\n else:\n arg_14 = md5kd(arg_12, arg_5 + \":\" + md5h(arg_13))\n\n return arg_14"} +{"_id": "doc_5001", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Handle a COPY request natively.\n\n \"\"\"\n arg_3, arg_4 = util.pop_path(arg_1)\n arg_4 = arg_4.strip(\"/\")\n arg_5 = arg_0.provider.ui\n arg_6 = arg_0.provider.repo\n _logger.info(\"Func %s -> %s\" % (arg_0.localHgPath, arg_4))\n if arg_0.rev is None and arg_3 == \"edit\":\n # COPY /edit/a/b to /edit/c/d: turn into 'hg copy -f a/b c/d'\n commands.copy(arg_5, arg_6, arg_0.localHgPath, arg_4, force=True)\n elif arg_0.rev is None and arg_3 == \"released\":\n # COPY /edit/a/b to /released/c/d\n # This is interpreted as 'hg commit a/b' (ignoring the dest. path)\n arg_0._commit(\"WsgiDAV commit (COPY %s -> %s)\" % (arg_0.path, arg_1))\n else:\n raise DAVError(HTTP_FORBIDDEN)\n # Return True: request was handled\n return True"} +{"_id": "doc_5002", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Read log entries into a list of dictionaries.\"\"\"\n arg_0.ui.pushbuffer()\n commands.log(arg_0.ui, arg_0.repo, arg_1=arg_1, date=None, rev=None, user=None)\n arg_2 = arg_0.ui.popbuffer().strip()\n\n arg_3 = []\n for arg_4 in arg_2.split(\"\\n\\n\"):\n arg_5 = {}\n arg_3.append(arg_5)\n for arg_6 in arg_4.split(\"\\n\"):\n arg_7, arg_8 = arg_6.split(\":\", 1)\n assert arg_7 in (\"changeset\", \"tag\", \"user\", \"date\", \"summary\")\n arg_5[arg_7.strip()] = arg_8.strip()\n arg_5[\"parsed_date\"] = util.parse_time_string(arg_5[\"date\"])\n arg_10, arg_11 = arg_5[\"changeset\"].split(\":\")\n arg_5[\"local_id\"] = int(arg_10)\n arg_5[\"unid\"] = arg_11\n # pprint(logList)\n return arg_3"} +{"_id": "doc_5003", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Return a dictionary containing all files under source control.\n\n dirinfos:\n Dictionary containing direct members for every collection.\n {folderpath: (collectionlist, filelist), ...}\n files:\n Sorted list of all file paths in the manifest.\n filedict:\n Dictionary containing all files under source control.\n\n ::\n\n {'dirinfos': {'': (['wsgidav',\n 'tools',\n 'WsgiDAV.egg-info',\n 'tests'],\n ['index.rst',\n 'wsgidav MAKE_DAILY_BUILD.launch',\n 'wsgidav run_server.py DEBUG.launch',\n 'wsgidav-paste.conf',\n ...\n 'setup.py']),\n 'wsgidav': (['addons', 'samples', 'server', 'interfaces'],\n ['__init__.pyc',\n 'dav_error.pyc',\n 'dav_provider.pyc',\n ...\n 'wsgidav_app.py']),\n },\n 'files': ['.hgignore',\n 'ADDONS.txt',\n 'wsgidav/samples/mysql_dav_provider.py',\n ...\n ],\n 'filedict': {'.hgignore': True,\n 'README.txt': True,\n 'WsgiDAV.egg-info/PKG-INFO': True,\n }\n }\n \"\"\"\n arg_4 = arg_1.setdefault(\"wsgidav.hg.cache\", {})\n if arg_4.get(arg_17.to_native(arg_2)) is not None:\n _logger.debug(\"Func(%s): cache hit.\" % arg_2)\n return arg_4[arg_17.to_native(arg_2)]\n\n arg_5 = time.time()\n arg_0.ui.pushbuffer()\n commands.manifest(arg_0.ui, arg_0.repo, arg_2)\n arg_6 = arg_0.ui.popbuffer()\n arg_7 = []\n arg_8 = {}\n arg_9 = {}\n for arg_10 in arg_6.split(\"\\n\"):\n if arg_10.strip() == \"\":\n continue\n arg_10 = arg_10.replace(\"\\\\\", \"/\")\n # add all parent directories to 'dirinfos'\n arg_11 = arg_10.split(\"/\")\n if len(arg_11) >= 1:\n arg_12 = \"\"\n for arg_13 in range(0, len(arg_11) - 1):\n arg_14 = arg_11[arg_13]\n arg_15 = arg_8.setdefault(arg_12, ([], []))\n if arg_14 not in arg_15[0]:\n arg_15[0].append(arg_14)\n if arg_12 == \"\":\n arg_12 = arg_14\n else:\n arg_12 = \"%s/%s\" % (arg_12, arg_14)\n arg_8.setdefault(arg_12, ([], []))[1].append(arg_11[-1])\n arg_9[arg_10] = True\n arg_7.sort()\n\n arg_16 = {\"files\": arg_7, \"dirinfos\": arg_8, \"filedict\": arg_9}\n arg_4[arg_17.to_native(arg_2)] = arg_16\n _logger.info(\"_getRepoInfo(%s) took %.3f\" % (arg_2, time.time() - arg_5))\n return arg_16"} +{"_id": "doc_5004", "title": "", "text": "def Func(arg_0):\n \"\"\"Return preferred mapping for a resource mapping.\n\n Different URLs may map to the same resource, e.g.:\n '/a/b' == '/A/b' == '/a/b/'\n Func() returns the same value for all these variants, e.g.:\n '/a/b/' (assuming resource names considered case insensitive)\n\n @param path: a UTF-8 encoded, unquoted byte string.\n @return: a UTF-8 encoded, unquoted byte string.\n \"\"\"\n if arg_0.path in (\"\", \"/\"):\n return \"/\"\n # Append '/' for collections\n if arg_0.is_collection and not arg_0.path.endswith(\"/\"):\n return arg_0.path + \"/\"\n # TODO: handle case-sensitivity, depending on OS\n # (FileSystemProvider could do this with os.path:\n # (?) on unix we can assume that the path already matches exactly the case of filepath\n # on windows we could use path.lower() or get the real case from the\n # file system\n return arg_0.path"} +{"_id": "doc_5005", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert path to a URL that can be passed to XML responses.\n\n Byte string, UTF-8 encoded, quoted.\n\n See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3\n We are using the path-absolute option. i.e. starting with '/'.\n URI ; See section 3.2.1 of [RFC2068]\n \"\"\"\n # Nautilus chokes, if href encodes '(' as '%28'\n # So we don't encode 'extra' and 'safe' characters (see rfc2068 3.2.1)\n arg_1 = \"/\" + \"!*'(),\" + \"$-_|.\"\n return compat.quote(\n arg_0.provider.mount_path\n + arg_0.provider.share_path\n + arg_0.get_preferred_path(),\n arg_1=arg_1,\n )"} +{"_id": "doc_5006", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove all associated dead properties.\"\"\"\n if arg_0.provider.prop_manager:\n arg_0.provider.prop_manager.remove_properties(\n arg_0.get_ref_url(), arg_0.environ\n )"} +{"_id": "doc_5007", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set application location for this resource provider.\n\n @param share_path: a UTF-8 encoded, unquoted byte string.\n \"\"\"\n # if isinstance(share_path, unicode):\n # share_path = share_path.encode(\"utf8\")\n assert arg_1 == \"\" or arg_1.startswith(\"/\")\n if arg_1 == \"/\":\n arg_1 = \"\" # This allows to code 'absPath = share_path + path'\n assert arg_1 in (\"\", \"/\") or not arg_1.endswith(\"/\")\n arg_0.share_path = arg_1"} +{"_id": "doc_5008", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert a refUrl to a path, by stripping the share prefix.\n\n Used to calculate the from a storage key by inverting get_ref_url().\n \"\"\"\n return \"/\" + compat.unquote(util.lstripstr(arg_1, arg_0.share_path)).lstrip(\n \"/\"\n )"} +{"_id": "doc_5009", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return True, if path maps to an existing collection resource.\n\n This method should only be used, if no other information is queried\n for . Otherwise a _DAVResource should be created first.\n \"\"\"\n arg_3 = arg_0.get_resource_inst(arg_1, arg_2)\n return arg_3 and arg_3.Func"} +{"_id": "doc_5010", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert XML string into etree.Element.\"\"\"\n try:\n return etree.XML(arg_0)\n except Exception:\n # TODO:\n # ExpatError: reference to invalid character number: line 1, column 62\n # litmus fails, when xml is used instead of lxml\n # 18. propget............... FAIL (PROPFIND on `/temp/litmus/prop2':\n # Could not read status line: connection was closed by server)\n # text = ��\n # \n # t2 = text.encode(\"utf8\")\n # return etree.XML(t2)\n _logger.error(\n \"Error parsing XML string. \"\n \"If lxml is not available, and unicode is involved, then \"\n \"installing lxml _may_ solve this issue.\"\n )\n _logger.error(\"XML source: {}\".format(arg_0))\n raise"} +{"_id": "doc_5011", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Wrapper for etree.tostring, that takes care of unsupported pretty_print\n option and prepends an encoding header.\"\"\"\n if use_lxml:\n arg_2 = etree.tostring(\n arg_0, encoding=\"UTF-8\", xml_declaration=True, arg_1=arg_1\n )\n else:\n arg_2 = etree.tostring(arg_0, encoding=\"UTF-8\")\n if not arg_2.startswith(b\"\\n' + arg_2\n\n assert arg_2.startswith(b\"= 5:\n arg_5 = arg_4.RequestHandlerClass.handle_one_request\n\n def arg_8(arg_6):\n arg_5(arg_6)\n if arg_6.close_connection == 1:\n _logger.debug(\"HTTP Connection : close\")\n else:\n _logger.debug(\"HTTP Connection : continue\")\n\n arg_4.RequestHandlerClass.handle_one_request = arg_8\n\n # __handle = server.RequestHandlerClass.handle\n\n # def handle(self):\n # _logger.debug(\"open HTTP connection\")\n # __handle(self)\n\n arg_4.RequestHandlerClass.handle_one_request = arg_8\n\n arg_9, arg_10 = arg_4.server_address\n if arg_9 == \"0.0.0.0\":\n _logger.info(\n \"Serving on 0.0.0.0:{} view at {}://127.0.0.1:{}\".format(arg_10, \"http\", arg_10)\n )\n else:\n _logger.info(\"Serving on {}://{}:{}\".format(\"http\", arg_9, arg_10))\n try:\n arg_4.serve_forever()\n except KeyboardInterrupt:\n _logger.warning(\"Caught Ctrl-C, shutting down...\")\n return"} +{"_id": "doc_5016", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run WsgiDAV using gevent if gevent is installed.\n\n See\n https://github.com/gevent/gevent/blob/master/src/gevent/pywsgi.py#L1356\n https://github.com/gevent/gevent/blob/master/src/gevent/server.py#L38\n for more options\n \"\"\"\n import gevent\n import gevent.monkey\n\n gevent.monkey.patch_all()\n from gevent.pywsgi import WSGIServer\n\n arg_3 = {\n \"bind_addr\": (arg_1[\"host\"], arg_1[\"port\"]),\n \"wsgi_app\": arg_0,\n # TODO: SSL support\n \"keyfile\": None,\n \"certfile\": None,\n }\n arg_4 = \"http\"\n # Override or add custom args\n arg_3.update(arg_1.get(\"server_args\", {}))\n\n arg_5 = WSGIServer(arg_3[\"bind_addr\"], arg_0)\n _logger.info(\"Running {}\".format(arg_5))\n _logger.info(\n \"Serving on {}://{}:{} ...\".format(arg_4, arg_1[\"host\"], arg_1[\"port\"])\n )\n try:\n gevent.spawn(arg_5.serve_forever())\n except KeyboardInterrupt:\n _logger.warning(\"Caught Ctrl-C, shutting down...\")\n return"} +{"_id": "doc_5017", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.\"\"\"\n assert arg_2 == \"cherrypy-wsgiserver\"\n\n try:\n from cherrypy import arg_4\n from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter\n\n _logger.warning(\"WARNING: cherrypy.wsgiserver is deprecated.\")\n _logger.warning(\n \" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver\"\n )\n _logger.warning(\" was moved to the cheroot project.\")\n _logger.warning(\" Consider using --server=cheroot.\")\n except ImportError:\n _logger.error(\"*\" * 78)\n _logger.error(\"ERROR: Could not import cherrypy.wsgiserver.\")\n _logger.error(\n \"Try `pip install cherrypy` or specify another server using the --server option.\"\n )\n _logger.error(\"Note that starting with CherryPy 9.0, the server was moved to\")\n _logger.error(\n \"the cheroot project, so it is recommended to use `-server=cheroot`\"\n )\n _logger.error(\"and run `pip install cheroot` instead.\")\n _logger.error(\"*\" * 78)\n raise\n\n arg_3 = \"WsgiDAV/{} {} Python/{}\".format(\n __version__, arg_4.CherryPyWSGIServer.version, util.PYTHON_VERSION\n )\n arg_4.CherryPyWSGIServer.version = arg_3\n\n # Support SSL\n arg_7 = _get_checked_path(arg_1.get(\"ssl_certificate\"), arg_1)\n arg_8 = _get_checked_path(arg_1.get(\"ssl_private_key\"), arg_1)\n arg_9 = _get_checked_path(\n arg_1.get(\"ssl_certificate_chain\"), arg_1\n )\n arg_10 = \"http\"\n if arg_7:\n assert arg_8\n arg_4.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(\n arg_7, arg_8, arg_9\n )\n arg_10 = \"https\"\n _logger.info(\"SSL / HTTPS enabled.\")\n\n _logger.info(\"Running {}\".format(arg_3))\n _logger.info(\n \"Serving on {}://{}:{} ...\".format(arg_10, arg_1[\"host\"], arg_1[\"port\"])\n )\n\n arg_12 = {\n \"bind_addr\": (arg_1[\"host\"], arg_1[\"port\"]),\n \"wsgi_app\": arg_0,\n \"server_name\": arg_3,\n }\n # Override or add custom args\n arg_12.update(arg_1.get(\"server_args\", {}))\n\n arg_13 = arg_4.CherryPyWSGIServer(**arg_12)\n\n # If the caller passed a startup event, monkey patch the server to set it\n # when the request handler loop is entered\n arg_14 = arg_1.get(\"startup_event\")\n if arg_14:\n\n def _patched_tick():\n arg_13.tick = arg_16 # undo the monkey patch\n arg_16()\n _logger.info(\"CherryPyWSGIServer is ready\")\n arg_14.set()\n\n arg_16 = arg_13.tick\n arg_13.tick = _patched_tick\n\n try:\n arg_13.start()\n except KeyboardInterrupt:\n _logger.warning(\"Caught Ctrl-C, shutting down...\")\n finally:\n arg_13.stop()\n return"} +{"_id": "doc_5018", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run WsgiDAV using flup.server.fcgi if Flup is installed.\"\"\"\n # http://trac.saddi.com/flup/wiki/FlupServers\n if arg_2 == \"flup-fcgi\":\n from flup.server.fcgi import WSGIServer, __version__ as flupver\n elif arg_2 == \"flup-fcgi-fork\":\n from flup.server.fcgi_fork import WSGIServer, __version__ as flupver\n else:\n raise ValueError\n\n _logger.info(\n \"Running WsgiDAV/{} {}/{}...\".format(\n __version__, WSGIServer.__module__, flupver\n )\n )\n arg_3 = WSGIServer(\n arg_0,\n bindAddress=(arg_1[\"host\"], arg_1[\"port\"]),\n # debug=True,\n )\n try:\n arg_3.run()\n except KeyboardInterrupt:\n _logger.warning(\"Caught Ctrl-C, shutting down...\")\n return"} +{"_id": "doc_5019", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run WsgiDAV using ext_wsgiutils_server from the wsgidav package.\"\"\"\n from wsgidav.server import ext_wsgiutils_server\n\n _logger.info(\n \"Running WsgiDAV {} on wsgidav.ext_wsgiutils_server...\".format(__version__)\n )\n _logger.warning(\n \"WARNING: This single threaded server (ext-wsgiutils) is not meant for production.\"\n )\n try:\n ext_wsgiutils_server.serve(arg_1, arg_0)\n except KeyboardInterrupt:\n _logger.warning(\"Caught Ctrl-C, shutting down...\")\n return"} +{"_id": "doc_5020", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Handle PROPPATCH request to set or remove a property.\n\n @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH\n \"\"\"\n arg_3 = arg_1[\"PATH_INFO\"]\n arg_4 = arg_0._davProvider.get_resource_inst(arg_3, arg_1)\n\n # Only accept Depth: 0 (but assume this, if omitted)\n arg_1.setdefault(\"HTTP_DEPTH\", \"0\")\n if arg_1[\"HTTP_DEPTH\"] != \"0\":\n arg_0._fail(HTTP_BAD_REQUEST, \"Depth must be '0'.\")\n\n if arg_4 is None:\n arg_0._fail(HTTP_NOT_FOUND)\n\n arg_0._evaluate_if_headers(arg_4, arg_1)\n arg_0._check_write_permission(arg_4, \"0\", arg_1)\n\n # Parse request\n arg_5 = util.parse_xml_body(arg_1)\n\n if arg_5.tag != \"{DAV:}propertyupdate\":\n arg_0._fail(HTTP_BAD_REQUEST)\n\n # Create a list of update request tuples: (name, value)\n arg_6 = []\n\n for arg_7 in arg_5:\n arg_8 = None\n if arg_7.tag == \"{DAV:}remove\":\n arg_8 = \"remove\"\n elif arg_7.tag == \"{DAV:}set\":\n arg_8 = \"set\"\n else:\n arg_0._fail(\n HTTP_BAD_REQUEST, \"Unknown tag (expected 'set' or 'remove').\"\n )\n\n for arg_9 in arg_7:\n if arg_9.tag != \"{DAV:}prop\":\n arg_0._fail(HTTP_BAD_REQUEST, \"Unknown tag (expected 'prop').\")\n\n for arg_10 in arg_9:\n arg_11 = None\n if arg_8 == \"remove\":\n arg_11 = None # Mark as 'remove'\n if len(arg_10) > 0:\n # 14.23: All the XML elements in a 'prop' XML\n # element inside of a 'remove' XML element MUST be\n # empty\n arg_0._fail(\n HTTP_BAD_REQUEST,\n \"prop element must be empty for 'remove'.\",\n )\n else:\n arg_11 = arg_10\n\n arg_6.append((arg_10.tag, arg_11))\n\n # Apply updates in SIMULATION MODE and create a result list (name,\n # result)\n arg_12 = True\n arg_13 = []\n\n for (arg_14, arg_11) in arg_6:\n try:\n arg_4.set_property_value(arg_14, arg_11, dry_run=True)\n except Exception as arg_19:\n arg_15 = as_DAVError(arg_19)\n else:\n arg_15 = \"200 OK\"\n arg_13.append((arg_14, arg_15))\n arg_12 = arg_12 and arg_15 == \"200 OK\"\n\n # Generate response list of 2-tuples (name, value)\n # is None on success, or an instance of DAVError\n arg_16 = []\n arg_17 = []\n\n if not arg_12:\n # If dry run failed: convert all OK to FAILED_DEPENDENCY.\n for (arg_14, arg_18) in arg_13:\n if arg_18 == \"200 OK\":\n arg_18 = DAVError(HTTP_FAILED_DEPENDENCY)\n elif isinstance(arg_18, DAVError):\n arg_17.append(arg_18.get_user_info())\n arg_16.append((arg_14, arg_18))\n\n else:\n # Dry-run succeeded: set properties again, this time in 'real' mode\n # In theory, there should be no exceptions thrown here, but this is\n # real live...\n for (arg_14, arg_11) in arg_6:\n try:\n arg_4.set_property_value(arg_14, arg_11, dry_run=False)\n # Set value to None, so the response xml contains empty tags\n arg_16.append((arg_14, None))\n except Exception as arg_19:\n arg_19 = as_DAVError(arg_19)\n arg_16.append((arg_14, arg_19))\n arg_17.append(arg_19.get_user_info())\n\n # Generate response XML\n arg_20 = xml_tools.make_multistatus_el()\n arg_21 = arg_4.get_href()\n util.add_property_response(arg_20, arg_21, arg_16)\n if arg_17:\n arg_22.SubElement(\n arg_20, \"{DAV:}responsedescription\"\n ).text = \"\\n\".join(arg_17)\n\n # Send response\n return util.send_multi_status_response(arg_1, arg_2, arg_20)"} +{"_id": "doc_5021", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Handle MKCOL request to create a new collection.\n\n @see http://www.webdav.org/specs/rfc4918.html#METHOD_MKCOL\n \"\"\"\n arg_3 = arg_1[\"PATH_INFO\"]\n arg_4 = arg_0._davProvider\n # res = provider.get_resource_inst(path, environ)\n\n # Do not understand ANY request body entities\n if util.get_content_length(arg_1) != 0:\n arg_0._fail(\n HTTP_MEDIATYPE_NOT_SUPPORTED,\n \"The server does not handle any body content.\",\n )\n\n # Only accept Depth: 0 (but assume this, if omitted)\n if arg_1.setdefault(\"HTTP_DEPTH\", \"0\") != \"0\":\n arg_0._fail(HTTP_BAD_REQUEST, \"Depth must be '0'.\")\n\n if arg_4.exists(arg_3, arg_1):\n arg_0._fail(\n HTTP_METHOD_NOT_ALLOWED,\n \"MKCOL can only be executed on an unmapped URL.\",\n )\n\n arg_5 = arg_4.get_resource_inst(util.get_uri_parent(arg_3), arg_1)\n if not arg_5 or not arg_5.is_collection:\n arg_0._fail(HTTP_CONFLICT, \"Parent must be an existing collection.\")\n\n # TODO: should we check If headers here?\n # self._evaluate_if_headers(res, environ)\n # Check for write permissions on the PARENT\n arg_0._check_write_permission(arg_5, \"0\", arg_1)\n\n arg_5.create_collection(util.get_uri_name(arg_3))\n\n return util.send_status_response(arg_1, arg_2, HTTP_CREATED)"} +{"_id": "doc_5022", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get the data from a chunked transfer.\"\"\"\n # Chunked Transfer Coding\n # http://www.servlets.com/rfcs/rfc2616-sec3.html#sec3.6.1\n\n if \"Darwin\" in arg_1.get(\"HTTP_USER_AGENT\", \"\") and arg_1.get(\n \"HTTP_X_EXPECTED_ENTITY_LENGTH\"\n ):\n # Mac Finder, that does not prepend chunk-size + CRLF ,\n # like it should to comply with the spec. It sends chunk\n # size as integer in a HTTP header instead.\n arg_3 = True\n arg_4 = arg_1.get(\"HTTP_X_EXPECTED_ENTITY_LENGTH\", \"0\")\n arg_5 = int(arg_4)\n else:\n arg_3 = False\n arg_4 = arg_1[\"wsgi.input\"].readline()\n arg_1[\"wsgidav.some_input_read\"] = 1\n if arg_4 == compat.b_empty:\n arg_5 = 0\n else:\n arg_5 = int(arg_4, 16)\n\n while arg_5 > 0:\n arg_4 = arg_1[\"wsgi.input\"].read(arg_2)\n yield arg_4\n if arg_3:\n arg_1[\"wsgidav.some_input_read\"] = 1\n # Keep receiving until we read expected size or reach\n # EOF\n if arg_4 == compat.b_empty:\n arg_5 = 0\n else:\n arg_5 -= len(arg_4)\n else:\n arg_1[\"wsgi.input\"].readline()\n arg_4 = arg_1[\"wsgi.input\"].readline()\n if arg_4 == compat.b_empty:\n arg_5 = 0\n else:\n arg_5 = int(arg_4, 16)\n arg_1[\"wsgidav.all_input_read\"] = 1"} +{"_id": "doc_5023", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Get the data from a non-chunked transfer.\"\"\"\n if arg_2 == 0:\n # TODO: review this\n # XP and Vista MiniRedir submit PUT with Content-Length 0,\n # before LOCK and the real PUT. So we have to accept this.\n _logger.info(\"PUT: Content-Length == 0. Creating empty file...\")\n\n # elif content_length < 0:\n # # TODO: review this\n # # If CONTENT_LENGTH is invalid, we may try to workaround this\n # # by reading until the end of the stream. This may block however!\n # # The iterator produced small chunks of varying size, but not\n # # sure, if we always get everything before it times out.\n # _logger.warning(\"PUT with invalid Content-Length (%s). \"\n # \"Trying to read all (this may timeout)...\"\n # .format(environ.get(\"CONTENT_LENGTH\")))\n # nb = 0\n # try:\n # for s in environ[\"wsgi.input\"]:\n # environ[\"wsgidav.some_input_read\"] = 1\n # _logger.debug(\"PUT: read from wsgi.input.__iter__, len=%s\" % len(s))\n # yield s\n # nb += len (s)\n # except socket.timeout:\n # _logger.warning(\"PUT: input timed out after writing %s bytes\" % nb)\n # hasErrors = True\n else:\n assert arg_2 > 0\n arg_4 = arg_2\n while arg_4 > 0:\n arg_5 = min(arg_4, arg_3)\n arg_6 = arg_1[\"wsgi.input\"].read(arg_5)\n # This happens with litmus expect-100 test:\n if not len(arg_6) > 0:\n _logger.error(\"input.read({}) returned 0 bytes\".format(arg_5))\n break\n arg_1[\"wsgidav.some_input_read\"] = 1\n yield arg_6\n arg_4 -= len(arg_6)\n\n if arg_4 == 0:\n arg_1[\"wsgidav.all_input_read\"] = 1"} +{"_id": "doc_5024", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return properties document for path.\"\"\"\n # Query the permanent view to find a url\n arg_2 = arg_0.db.view(\"properties/by_url\", key=arg_1, include_docs=True)\n _logger.debug(\"find(%r) returned %s\" % (arg_1, len(arg_2)))\n assert len(arg_2) <= 1, \"Found multiple matches for %r\" % arg_1\n for arg_3 in arg_2:\n assert arg_3.doc\n return arg_3.doc\n return None"} +{"_id": "doc_5025", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Computes digest hash A1 part.\"\"\"\n arg_4 = arg_0._get_realm_entry(arg_1, arg_2)\n if arg_4 is None:\n return False\n arg_5 = arg_4.get(\"password\")\n arg_3[\"wsgidav.auth.roles\"] = arg_4.get(\"roles\", [])\n return arg_0._compute_http_digest_a1(arg_1, arg_2, arg_5)"} +{"_id": "doc_5026", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a lock dictionary for a token.\n\n If the lock does not exist or is expired, None is returned.\n\n token:\n lock token\n Returns:\n Lock dictionary or \n\n Side effect: if lock is expired, it will be purged and None is returned.\n \"\"\"\n arg_0._lock.acquire_read()\n try:\n arg_2 = arg_0._dict.Func(arg_1)\n if arg_2 is None:\n # Lock not found: purge dangling URL2TOKEN entries\n _logger.debug(\"Lock purged dangling: {}\".format(arg_1))\n arg_0.delete(arg_1)\n return None\n arg_3 = float(arg_2[\"expire\"])\n if arg_3 >= 0 and arg_3 < time.time():\n _logger.debug(\n \"Lock timed-out({}): {}\".format(arg_3, lock_string(arg_2))\n )\n arg_0.delete(arg_1)\n return None\n return arg_2\n finally:\n arg_0._lock.release()"} +{"_id": "doc_5027", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create a direct lock for a resource path.\n\n path:\n Normalized path (utf8 encoded string, no trailing '/')\n lock:\n lock dictionary, without a token entry\n Returns:\n New unique lock token.: \n - lock['timeout'] may be normalized and shorter than requested\n - lock['token'] is added\n \"\"\"\n arg_0._lock.acquire_write()\n try:\n # We expect only a lock definition, not an existing lock\n assert arg_2.get(\"token\") is None\n assert arg_2.get(\"expire\") is None, \"Use timeout instead of expire\"\n assert arg_1 and \"/\" in arg_1\n\n # Normalize root: /foo/bar\n arg_3 = arg_1\n arg_1 = normalize_lock_root(arg_1)\n arg_2[\"root\"] = arg_1\n\n # Normalize timeout from ttl to expire-date\n arg_4 = float(arg_2.get(\"timeout\"))\n if arg_4 is None:\n arg_4 = LockStorageDict.LOCK_TIME_OUT_DEFAULT\n elif arg_4 < 0 or arg_4 > LockStorageDict.LOCK_TIME_OUT_MAX:\n arg_4 = LockStorageDict.LOCK_TIME_OUT_MAX\n\n arg_2[\"timeout\"] = arg_4\n arg_2[\"expire\"] = time.time() + arg_4\n\n validate_lock(arg_2)\n\n arg_5 = generate_lock_token()\n arg_2[\"token\"] = arg_5\n\n # Store lock\n arg_0._dict[arg_5] = arg_2\n\n # Store locked path reference\n arg_7 = \"URL2TOKEN:{}\".format(arg_1)\n if arg_7 not in arg_0._dict:\n arg_0._dict[arg_7] = [arg_5]\n else:\n # Note: Shelve dictionary returns copies, so we must reassign\n # values:\n arg_8 = arg_0._dict[arg_7]\n arg_8.append(arg_5)\n arg_0._dict[arg_7] = arg_8\n arg_0._flush()\n _logger.debug(\n \"LockStorageDict.set({!r}): {}\".format(arg_3, lock_string(arg_2))\n )\n return arg_2\n finally:\n arg_0._lock.release()"} +{"_id": "doc_5028", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete lock.\n\n Returns True on success. False, if token does not exist, or is expired.\n \"\"\"\n arg_0._lock.acquire_write()\n try:\n arg_2 = arg_0._dict.get(arg_1)\n _logger.debug(\"Func {}\".format(lock_string(arg_2)))\n if arg_2 is None:\n return False\n # Remove url to lock mapping\n arg_3 = \"URL2TOKEN:{}\".format(arg_2.get(\"root\"))\n if arg_3 in arg_0._dict:\n # _logger.debug(\" Func token {} from url {}\".format(token, lock.get(\"root\")))\n arg_4 = arg_0._dict[arg_3]\n if len(arg_4) > 1:\n # Note: shelve dictionary returns copies, so we must\n # reassign values:\n arg_4.remove(arg_1)\n arg_0._dict[arg_3] = arg_4\n else:\n del arg_0._dict[arg_3]\n # Remove the lock\n del arg_0._dict[arg_1]\n\n arg_0._flush()\n finally:\n arg_0._lock.release()\n return True"} +{"_id": "doc_5029", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete all entries.\"\"\"\n arg_0._lock.acquire_write() # TODO: read access is enough?\n try:\n arg_1 = arg_0._dict is None\n if arg_1:\n arg_0.open()\n if len(arg_0._dict):\n arg_0._dict.Func()\n arg_0._dict.sync()\n if arg_1:\n arg_0.close()\n finally:\n arg_0._lock.release()"} +{"_id": "doc_5030", "title": "", "text": "def Func(arg_0):\n \"\"\"Return readable rep.\"\"\"\n if not arg_0:\n return \"Lock: None\"\n\n if arg_0[\"expire\"] < 0:\n arg_1 = \"Infinite ({})\".format(arg_0[\"expire\"])\n else:\n arg_1 = \"{} (in {} seconds)\".format(\n util.get_log_time(arg_0[\"expire\"]), arg_0[\"expire\"] - time.time()\n )\n\n return \"Lock(<{}..>, '{}', {}, {}, depth-{}, until {}\".format(\n # first 4 significant token characters\n arg_0.get(\"token\", \"?\" * 30)[18:22],\n arg_0.get(\"root\"),\n arg_0.get(\"principal\"),\n arg_0.get(\"scope\"),\n arg_0.get(\"depth\"),\n arg_1,\n )"} +{"_id": "doc_5031", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7\n ):\n \"\"\"Acquire lock and return lock_dict.\n\n principal\n Name of the principal.\n lock_type\n Must be 'write'.\n lock_scope\n Must be 'shared' or 'exclusive'.\n lock_depth\n Must be '0' or 'infinity'.\n lock_owner\n String identifying the owner.\n path\n Resource URL.\n timeout\n Seconds to live\n\n This function does NOT check, if the new lock creates a conflict!\n \"\"\"\n if arg_7 is None:\n arg_7 = LockManager.LOCK_TIME_OUT_DEFAULT\n elif arg_7 < 0:\n arg_7 = -1\n\n arg_8 = {\n \"root\": arg_6,\n \"type\": arg_2,\n \"scope\": arg_3,\n \"depth\": arg_4,\n \"owner\": arg_5,\n \"timeout\": arg_7,\n \"principal\": arg_1,\n }\n #\n arg_0.storage.create(arg_6, arg_8)\n return arg_8"} +{"_id": "doc_5032", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n arg_8,\n ):\n \"\"\"Check for permissions and Func a lock.\n\n On success return new lock dictionary.\n On error raise a DAVError with an embedded DAVErrorCondition.\n \"\"\"\n arg_1 = normalize_lock_root(arg_1)\n arg_0._lock.Func_write()\n try:\n # Raises DAVError on conflict:\n arg_0._check_lock_permission(\n arg_1, arg_2, arg_3, arg_4, arg_8, arg_7\n )\n return arg_0._generate_lock(\n arg_7, arg_2, arg_3, arg_4, arg_5, arg_1, arg_6\n )\n finally:\n arg_0._lock.release()"} +{"_id": "doc_5033", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Set new timeout for lock, if existing and valid.\"\"\"\n if arg_2 is None:\n arg_2 = LockManager.LOCK_TIME_OUT_DEFAULT\n return arg_0.storage.Func(arg_1, arg_2)"} +{"_id": "doc_5034", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return lock_dict, or None, if not found or invalid.\n\n Side effect: if lock is expired, it will be purged and None is returned.\n\n key:\n name of lock attribute that will be returned instead of a dictionary.\n \"\"\"\n assert arg_2 in (\n None,\n \"type\",\n \"scope\",\n \"depth\",\n \"owner\",\n \"root\",\n \"timeout\",\n \"principal\",\n \"token\",\n )\n arg_3 = arg_0.storage.get(arg_1)\n if arg_2 is None or arg_3 is None:\n return arg_3\n return arg_3[arg_2]"} +{"_id": "doc_5035", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Acquire a read lock for the current thread, waiting at most\n timeout seconds or doing a non-blocking check in case timeout is <= 0.\n\n In case timeout is None, the call to Func blocks until the\n lock request can be serviced.\n\n In case the timeout expires before the lock could be serviced, a\n RuntimeError is thrown.\"\"\"\n\n if arg_1 is not None:\n arg_2 = time() + arg_1\n arg_3 = currentThread()\n arg_0.__condition.acquire()\n try:\n if arg_0.__writer is arg_3:\n # If we are the writer, grant a new read lock, always.\n arg_0.__writercount += 1\n return\n while True:\n if arg_0.__writer is None:\n # Only test anything if there is no current writer.\n if arg_0.__upgradewritercount or arg_0.__pendingwriters:\n if arg_3 in arg_0.__readers:\n # Only grant a read lock if we already have one\n # in case writers are waiting for their turn.\n # This means that writers can't easily get starved\n # (but see below, readers can).\n arg_0.__readers[arg_3] += 1\n return\n # No, we aren't a reader (yet), wait for our turn.\n else:\n # Grant a new read lock, always, in case there are\n # no pending writers (and no writer).\n arg_0.__readers[arg_3] = arg_0.__readers.get(arg_3, 0) + 1\n return\n if arg_1 is not None:\n arg_5 = arg_2 - time()\n if arg_5 <= 0:\n # Timeout has expired, signal caller of this.\n raise RuntimeError(\"Acquiring read lock timed out\")\n arg_0.__condition.wait(arg_5)\n else:\n arg_0.__condition.wait()\n finally:\n arg_0.__condition.release()"} +{"_id": "doc_5036", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Acquire a write lock for the current thread, waiting at most\n timeout seconds or doing a non-blocking check in case timeout is <= 0.\n\n In case the write lock cannot be serviced due to the deadlock\n condition mentioned above, a ValueError is raised.\n\n In case timeout is None, the call to Func blocks until the\n lock request can be serviced.\n\n In case the timeout expires before the lock could be serviced, a\n RuntimeError is thrown.\"\"\"\n\n if arg_1 is not None:\n arg_2 = time() + arg_1\n arg_3, arg_4 = currentThread(), False\n arg_0.__condition.acquire()\n try:\n if arg_0.__writer is arg_3:\n # If we are the writer, grant a new write lock, always.\n arg_0.__writercount += 1\n return\n elif arg_3 in arg_0.__readers:\n # If we are a reader, no need to add us to pendingwriters,\n # we get the upgradewriter slot.\n if arg_0.__upgradewritercount:\n # If we are a reader and want to upgrade, and someone\n # else also wants to upgrade, there is no way we can do\n # this except if one of us releases all his read locks.\n # Signal this to user.\n raise ValueError(\"Inevitable dead lock, denying write lock\")\n arg_4 = True\n arg_0.__upgradewritercount = arg_0.__readers.pop(arg_3)\n else:\n # We aren't a reader, so add us to the pending writers queue\n # for synchronization with the readers.\n arg_0.__pendingwriters.append(arg_3)\n while True:\n if not arg_0.__readers and arg_0.__writer is None:\n # Only test anything if there are no readers and writers.\n if arg_0.__upgradewritercount:\n if arg_4:\n # There is a writer to upgrade, and it's us. Take\n # the write lock.\n arg_0.__writer = arg_3\n arg_0.__writercount = arg_0.__upgradewritercount + 1\n arg_0.__upgradewritercount = 0\n return\n # There is a writer to upgrade, but it's not us.\n # Always leave the upgrade writer the advance slot,\n # because he presumes he'll get a write lock directly\n # from a previously held read lock.\n elif arg_0.__pendingwriters[0] is arg_3:\n # If there are no readers and writers, it's always\n # fine for us to take the writer slot, removing us\n # from the pending writers queue.\n # This might mean starvation for readers, though.\n arg_0.__writer = arg_3\n arg_0.__writercount = 1\n arg_0.__pendingwriters = arg_0.__pendingwriters[1:]\n return\n if arg_1 is not None:\n arg_9 = arg_2 - time()\n if arg_9 <= 0:\n # Timeout has expired, signal caller of this.\n if arg_4:\n # Put us back on the reader queue. No need to\n # signal anyone of this change, because no other\n # writer could've taken our spot before we got\n # here (because of remaining readers), as the test\n # for proper conditions is at the start of the\n # loop, not at the end.\n arg_0.__readers[arg_3] = arg_0.__upgradewritercount\n arg_0.__upgradewritercount = 0\n else:\n # We were a simple pending writer, just remove us\n # from the FIFO list.\n arg_0.__pendingwriters.remove(arg_3)\n raise RuntimeError(\"Acquiring write lock timed out\")\n arg_0.__condition.wait(arg_9)\n else:\n arg_0.__condition.wait()\n finally:\n arg_0.__condition.release()"} +{"_id": "doc_5037", "title": "", "text": "def Func(arg_0):\n \"\"\"Release the currently held lock.\n\n In case the current thread holds no lock, a ValueError is thrown.\"\"\"\n\n arg_1 = currentThread()\n arg_0.__condition.acquire()\n try:\n if arg_0.__writer is arg_1:\n # We are the writer, take one nesting depth away.\n arg_0.__writercount -= 1\n if not arg_0.__writercount:\n # No more write locks; take our writer position away and\n # notify waiters of the new circumstances.\n arg_0.__writer = None\n arg_0.__condition.notifyAll()\n elif arg_1 in arg_0.__readers:\n # We are a reader currently, take one nesting depth away.\n arg_0.__readers[arg_1] -= 1\n if not arg_0.__readers[arg_1]:\n # No more read locks, take our reader position away.\n del arg_0.__readers[arg_1]\n if not arg_0.__readers:\n # No more readers, notify waiters of the new\n # circumstances.\n arg_0.__condition.notifyAll()\n else:\n raise ValueError(\"Trying to Func unheld lock\")\n finally:\n arg_0.__condition.Func()"} +{"_id": "doc_5038", "title": "", "text": "def Func(arg_0):\n \"\"\"Initialize base logger named 'wsgidav'.\n\n The base logger is filtered by the `verbose` configuration option.\n Log entries will have a time stamp and thread id.\n\n :Parameters:\n verbose : int\n Verbosity configuration (0..5)\n enable_loggers : string list\n List of module logger names, that will be switched to DEBUG level.\n\n Module loggers\n ~~~~~~~~~~~~~~\n Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be\n independently switched to DEBUG mode.\n\n Except for verbosity, they will inherit settings from the base logger.\n\n They will suppress DEBUG level messages, unless they are enabled by passing\n their name to util.Func().\n\n If enabled, module loggers will print DEBUG messages, even if verbose == 3.\n\n Example initialize and use a module logger, that will generate output,\n if enabled (and verbose >= 2)::\n\n _logger = util.get_module_logger(__name__)\n [..]\n _logger.debug(\"foo: '{}'\".format(s))\n\n This logger would be enabled by passing its name to Func()::\n\n enable_loggers = [\"lock_manager\",\n \"property_manager\",\n ]\n util.Func(2, enable_loggers)\n\n\n Log Level Matrix\n ~~~~~~~~~~~~~~~~\n\n +---------+--------+---------------------------------------------------------------+\n | Verbose | Option | Log level |\n | level | +-------------+------------------------+------------------------+\n | | | base logger | module logger(default) | module logger(enabled) |\n +=========+========+=============+========================+========================+\n | 0 | -qqq | CRITICAL | CRITICAL | CRITICAL |\n +---------+--------+-------------+------------------------+------------------------+\n | 1 | -qq | ERROR | ERROR | ERROR |\n +---------+--------+-------------+------------------------+------------------------+\n | 2 | -q | WARN | WARN | WARN |\n +---------+--------+-------------+------------------------+------------------------+\n | 3 | | INFO | INFO | **DEBUG** |\n +---------+--------+-------------+------------------------+------------------------+\n | 4 | -v | DEBUG | DEBUG | DEBUG |\n +---------+--------+-------------+------------------------+------------------------+\n | 5 | -vv | DEBUG | DEBUG | DEBUG |\n +---------+--------+-------------+------------------------+------------------------+\n\n \"\"\"\n arg_1 = arg_0.get(\"verbose\", 3)\n\n arg_2 = arg_0.get(\"enable_loggers\", [])\n if arg_2 is None:\n arg_2 = []\n\n arg_3 = arg_0.get(\"logger_date_format\", \"%Y-%m-%d %H:%M:%S\")\n arg_4 = arg_0.get(\n \"logger_format\",\n \"%(asctime)s.%(msecs)03d - <%(thread)d> %(name)-27s %(levelname)-8s: %(message)s\",\n )\n\n arg_5 = logging.Formatter(arg_4, arg_3)\n\n # Define handlers\n arg_6 = logging.StreamHandler(sys.stdout)\n # consoleHandler = logging.StreamHandler(sys.stderr)\n arg_6.setFormatter(arg_5)\n # consoleHandler.setLevel(logging.DEBUG)\n\n # Add the handlers to the base logger\n arg_7 = logging.getLogger(BASE_LOGGER_NAME)\n\n if arg_1 >= 4: # --verbose\n arg_7.setLevel(logging.DEBUG)\n elif arg_1 == 3: # default\n arg_7.setLevel(logging.INFO)\n elif arg_1 == 2: # --quiet\n arg_7.setLevel(logging.WARN)\n # consoleHandler.setLevel(logging.WARN)\n elif arg_1 == 1: # -qq\n arg_7.setLevel(logging.ERROR)\n # consoleHandler.setLevel(logging.WARN)\n else: # -qqq\n arg_7.setLevel(logging.CRITICAL)\n # consoleHandler.setLevel(logging.ERROR)\n\n # Don't call the root's handlers after our custom handlers\n arg_7.propagate = False\n\n # Remove previous handlers\n for arg_9 in arg_7.handlers[:]: # Must iterate an array copy\n try:\n arg_9.flush()\n arg_9.close()\n except Exception:\n pass\n arg_7.removeHandler(arg_9)\n\n arg_7.addHandler(arg_6)\n\n if arg_1 >= 3:\n for arg_10 in arg_2:\n if not arg_10.startswith(BASE_LOGGER_NAME + \".\"):\n arg_10 = BASE_LOGGER_NAME + \".\" + arg_10\n arg_11 = logging.getLogger(arg_10.strip())\n arg_11.setLevel(logging.DEBUG)"} +{"_id": "doc_5039", "title": "", "text": "def Func(arg_0):\n \"\"\"Read 1 byte from wsgi.input, if this has not been done yet.\n\n Returning a response without reading from a request body might confuse the\n WebDAV client.\n This may happen, if an exception like '401 Not authorized', or\n '500 Internal error' was raised BEFORE anything was read from the request\n stream.\n\n See GC issue 13, issue 23\n See http://groups.google.com/group/paste-users/browse_frm/thread/fc0c9476047e9a47?hl=en\n\n Note that with persistent sessions (HTTP/1.1) we must make sure, that the\n 'Connection: closed' header is set with the response, to prevent reusing\n the current stream.\n \"\"\"\n if arg_0.get(\"wsgidav.some_input_read\") or arg_0.get(\"wsgidav.all_input_read\"):\n return\n arg_1 = get_content_length(arg_0)\n assert arg_1 >= 0\n if arg_1 == 0:\n return\n\n arg_2 = True\n\n arg_0[\"wsgidav.some_input_read\"] = 1\n if arg_2:\n arg_0[\"wsgidav.all_input_read\"] = 1\n\n arg_3 = arg_0[\"wsgi.input\"]\n\n # TODO: check if still required after GC issue 24 is fixed\n if hasattr(arg_3, \"_consumed\") and hasattr(arg_3, \"length\"):\n # Seems to be Paste's httpserver.LimitedLengthFile\n # see http://groups.google.com/group/paste-users/browse_thread/thread/fc0c9476047e9a47/aa4a3aa416016729?hl=en&lnk=gst&q=.input#aa4a3aa416016729 # noqa\n # Consume something if nothing was consumed *and* work\n # around a bug where paste.httpserver allows negative lengths\n if arg_3._consumed == 0 and arg_3.length > 0:\n # This seems to work even if there's 10K of input.\n if arg_2:\n arg_4 = arg_3.length\n else:\n arg_4 = 1\n arg_5 = arg_3.read(arg_4)\n _logger.debug(\n \"Reading {} bytes from potentially unread httpserver.LimitedLengthFile: '{}'...\".format(\n arg_4, arg_5[:50]\n )\n )\n\n elif hasattr(arg_3, \"_sock\") and hasattr(arg_3._sock, \"settimeout\"):\n # Seems to be a socket\n try:\n # Set socket to non-blocking\n arg_6 = arg_3._sock\n arg_7 = arg_6.gettimeout()\n arg_6.settimeout(0)\n # Read one byte\n try:\n if arg_2:\n arg_4 = arg_1\n else:\n arg_4 = 1\n arg_5 = arg_3.read(arg_4)\n _logger.debug(\n \"Reading {} bytes from potentially unread POST body: '{}'...\".format(\n arg_4, arg_5[:50]\n )\n )\n except socket.error as se:\n # se(10035, 'The socket operation could not complete without blocking')\n _logger.error(\"-> read {} bytes failed: {}\".format(arg_4, se))\n # Restore socket settings\n arg_6.settimeout(arg_7)\n except Exception:\n _logger.error(\"--> wsgi_input.read(): {}\".format(sys.exc_info()))"} +{"_id": "doc_5040", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Append segments to URI.\n\n Example: Func(\"/a/b\", \"c\", \"d\")\n \"\"\"\n arg_2 = \"/\".join(arg_1)\n if not arg_2:\n return arg_0\n return arg_0.rstrip(\"/\") + \"/\" + arg_2"} +{"_id": "doc_5041", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True, if childUri is a child of parentUri.\n\n This function accounts for the fact that '/a/b/c' and 'a/b/c/' are\n children of '/a/b' (and also of '/a/b/').\n Note that '/a/b/cd' is NOT a child of 'a/b/c'.\n \"\"\"\n return (\n arg_0\n and arg_1\n and arg_1.rstrip(\"/\").startswith(arg_0.rstrip(\"/\") + \"/\")\n )"} +{"_id": "doc_5042", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Read request body XML into an etree.Element.\n\n Return None, if no request body was sent.\n Raise HTTP_BAD_REQUEST, if something else went wrong.\n\n TODO: this is a very relaxed interpretation: should we raise HTTP_BAD_REQUEST\n instead, if CONTENT_LENGTH is missing, invalid, or 0?\n\n RFC: For compatibility with HTTP/1.0 applications, HTTP/1.1 requests containing\n a message-body MUST include a valid Content-Length header field unless the\n server is known to be HTTP/1.1 compliant.\n If a request contains a message-body and a Content-Length is not given, the\n server SHOULD respond with 400 (bad request) if it cannot determine the\n length of the message, or with 411 (length required) if it wishes to insist\n on receiving a valid Content-Length.\"\n\n So I'd say, we should accept a missing CONTENT_LENGTH, and try to read the\n content anyway.\n But WSGI doesn't guarantee to support input.read() without length(?).\n At least it locked, when I tried it with a request that had a missing\n content-type and no body.\n\n Current approach: if CONTENT_LENGTH is\n\n - valid and >0:\n read body (exactly bytes) and parse the result.\n - 0:\n Assume empty body and return None or raise exception.\n - invalid (negative or not a number:\n raise HTTP_BAD_REQUEST\n - missing:\n NOT: Try to read body until end and parse the result.\n BUT: assume '0'\n - empty string:\n WSGI allows it to be empty or absent: treated like 'missing'.\n \"\"\"\n #\n arg_2 = arg_0.get(\"CONTENT_LENGTH\", \"\").strip()\n # content_length = -1 # read all of stream\n if arg_2 == \"\":\n # No Content-Length given: read to end of stream\n # TODO: etree.parse() locks, if input is invalid?\n # pfroot = etree.parse(environ[\"wsgi.input\"]).getroot()\n # requestbody = environ[\"wsgi.input\"].read() # TODO: read() should be\n # called in a loop?\n arg_3 = \"\"\n else:\n try:\n arg_4 = int(arg_2)\n if arg_4 < 0:\n raise DAVError(HTTP_BAD_REQUEST, \"Negative content-length.\")\n except ValueError:\n raise DAVError(HTTP_BAD_REQUEST, \"content-length is not numeric.\")\n\n if arg_4 == 0:\n arg_3 = \"\"\n else:\n arg_3 = arg_0[\"wsgi.input\"].read(arg_4)\n arg_0[\"wsgidav.all_input_read\"] = 1\n\n if arg_3 == \"\":\n if arg_1:\n return None\n else:\n raise DAVError(HTTP_BAD_REQUEST, \"Body must not be empty.\")\n\n try:\n arg_5 = etree.fromstring(arg_3)\n except Exception as e:\n raise DAVError(HTTP_BAD_REQUEST, \"Invalid XML format.\", src_exception=e)\n\n # If dumps of the body are desired, then this is the place to do it pretty:\n if arg_0.get(\"wsgidav.dump_request_body\"):\n _logger.info(\n \"{} XML request body:\\n{}\".format(\n arg_0[\"REQUEST_METHOD\"],\n compat.to_native(xml_to_bytes(arg_5, pretty_print=True)),\n )\n )\n arg_0[\"wsgidav.dump_request_body\"] = False\n\n return arg_5"} +{"_id": "doc_5043", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=False):\n \"\"\"Start a WSGI response for a DAVError or status code.\"\"\"\n arg_5 = get_http_status_string(arg_2)\n arg_6 = []\n if arg_3:\n arg_6.extend(arg_3)\n # if 'keep-alive' in environ.get('HTTP_CONNECTION', '').lower():\n # headers += [\n # ('Connection', 'keep-alive'),\n # ]\n\n if arg_2 in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT):\n # See paste.lint: these code don't have content\n arg_1(\n arg_5, [(\"Content-Length\", \"0\"), (\"Date\", get_rfc1123_time())] + arg_6\n )\n return [b\"\"]\n\n if arg_2 in (HTTP_OK, HTTP_CREATED):\n arg_2 = DAVError(arg_2)\n assert isinstance(arg_2, DAVError)\n\n arg_7, arg_8 = arg_2.get_response_page()\n if arg_4:\n arg_8 = compat.b_empty\n\n assert compat.is_bytes(arg_8), arg_8 # If not, Content-Length is wrong!\n arg_1(\n arg_5,\n [\n (\"Content-Type\", arg_7),\n (\"Date\", get_rfc1123_time()),\n (\"Content-Length\", str(len(arg_8))),\n ]\n + arg_6,\n )\n return [arg_8]"} +{"_id": "doc_5044", "title": "", "text": "def Func(arg_0):\n \"\"\"Return base64 encoded binarystring.\"\"\"\n arg_0 = compat.to_bytes(arg_0)\n arg_0 = compat.base64_encodebytes(arg_0).strip() # return bytestring\n return compat.to_native(arg_0)"} +{"_id": "doc_5045", "title": "", "text": "def Func(arg_0):\n \"\"\"Use the mimetypes module to lookup the type for an extension.\n\n This function also adds some extensions required for HTML5\n \"\"\"\n (arg_1, arg_2) = mimetypes.guess_type(arg_0)\n if not arg_1:\n arg_3 = os.path.splitext(arg_0)[1]\n arg_1 = _MIME_TYPES.get(arg_3)\n _logger.debug(\"mimetype({}): {}\".format(arg_0, arg_1))\n if not arg_1:\n arg_1 = \"application/octet-stream\"\n return arg_1"} +{"_id": "doc_5046", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return probability estimates for the RDD containing test vector X.\n\n Parameters\n ----------\n X : RDD containing array-like items, shape = [m_samples, n_features]\n\n Returns\n -------\n C : RDD with array-like items , shape = [n_samples, n_classes]\n Returns the probability of the samples for each class in\n the models for each RDD block. The columns correspond to the classes\n in sorted order, as they appear in the attribute `classes_`.\n \"\"\"\n check_rdd(arg_1, (sp.spmatrix, np.ndarray))\n return arg_1.map(\n lambda arg_1: super(SparkBaseNB, arg_0).Func(arg_1))"} +{"_id": "doc_5047", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return log-probability estimates for the RDD containing the\n test vector X.\n\n Parameters\n ----------\n X : RDD containing array-like items, shape = [m_samples, n_features]\n\n Returns\n -------\n C : RDD with array-like items, shape = [n_samples, n_classes]\n Returns the log-probability of the samples for each class in\n the model for each RDD block. The columns correspond to the classes\n in sorted order, as they appear in the attribute `classes_`.\n \"\"\"\n # required, scikit call self.Func(X) in predict_proba\n # and thus this function is call, it must have the same behavior when\n # not called by sparkit-learn\n if not isinstance(arg_1, BlockRDD):\n return super(SparkBaseNB, arg_0).Func(arg_1)\n\n check_rdd(arg_1, (sp.spmatrix, np.ndarray))\n return arg_1.map(\n lambda arg_1: super(SparkBaseNB, arg_0).Func(arg_1))"} +{"_id": "doc_5048", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Fit Gaussian Naive Bayes according to X, y\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like, shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n check_rdd(arg_1, {'X': (sp.spmatrix, np.ndarray), 'y': (sp.spmatrix, np.ndarray)})\n arg_3 = arg_1[:, ['X', 'y']].map(\n lambda X_y: arg_0.partial_Func(X_y[0], X_y[1], arg_2))\n arg_4 = arg_3.reduce(operator.add)\n arg_0.__dict__.update(arg_4.__dict__)\n return arg_0"} +{"_id": "doc_5049", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Sort features by name\n\n Returns a reordered matrix and modifies the vocabulary in place\n \"\"\"\n arg_2 = sorted(six.iteritems(arg_1))\n arg_3 = np.empty(len(arg_2), dtype=np.int32)\n for arg_4, (arg_5, arg_6) in enumerate(arg_2):\n arg_3[arg_4] = arg_6\n arg_1[arg_5] = arg_4\n\n return arg_3"} +{"_id": "doc_5050", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Learn the vocabulary dictionary and return term-document matrix.\n\n This is equivalent to fit followed by transform, but more efficiently\n implemented.\n\n Parameters\n ----------\n Z : iterable or DictRDD with column 'X'\n An iterable of raw_documents which yields either str, unicode or\n file objects; or a DictRDD with column 'X' containing such\n iterables.\n\n Returns\n -------\n X : array, [n_samples, n_features] or DictRDD\n Document-term matrix.\n \"\"\"\n arg_0._validate_vocabulary()\n\n # map analyzer and cache result\n arg_2 = arg_0.build_analyzer()\n arg_3 = arg_1.transform(lambda arg_4: list(map(arg_2, arg_4)), column='X').persist()\n\n # create vocabulary\n arg_4 = arg_3[:, 'X'] if isinstance(arg_3, DictRDD) else arg_3\n arg_0.vocabulary_ = arg_0._init_vocab(arg_4)\n\n # transform according to vocabulary\n arg_6 = arg_0.broadcast(arg_0._count_vocab, arg_3.context)\n arg_1 = arg_3.transform(arg_6, column='X', dtype=sp.spmatrix)\n\n\n if not arg_0.fixed_vocabulary_:\n arg_4 = arg_1[:, 'X'] if isinstance(arg_1, DictRDD) else arg_1\n\n arg_7 = arg_0.max_df\n arg_8 = arg_0.min_df\n arg_9 = arg_0.max_features\n\n # limit features according to min_df, max_df parameters\n arg_10 = arg_4.shape[0]\n arg_11 = (arg_7\n if isinstance(arg_7, numbers.Integral)\n else arg_7 * arg_10)\n arg_12 = (arg_8\n if isinstance(arg_8, numbers.Integral)\n else arg_8 * arg_10)\n if arg_11 < arg_12:\n raise ValueError(\n \"max_df corresponds to < documents than min_df\")\n arg_13, arg_0.stop_words_ = arg_0._limit_features(\n arg_4, arg_0.vocabulary_, arg_11, arg_12, arg_9)\n\n # sort features\n arg_15 = arg_0._sort_features(arg_0.vocabulary_)\n\n # combined mask\n arg_16 = arg_13[arg_15]\n\n arg_1 = arg_1.transform(lambda x: x[:, arg_16], column='X', dtype=sp.spmatrix)\n arg_3.unpersist()\n return arg_1"} +{"_id": "doc_5051", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Transform documents to document-term matrix.\n\n Extract token counts out of raw text documents using the vocabulary\n fitted with fit or the one provided to the constructor.\n\n Parameters\n ----------\n raw_documents : iterable\n An iterable which yields either str, unicode or file objects.\n\n Returns\n -------\n X : sparse matrix, [n_samples, n_features]\n Document-term matrix.\n \"\"\"\n if not hasattr(arg_0, 'vocabulary_'):\n arg_0._validate_vocabulary()\n\n arg_0._check_vocabulary()\n\n arg_2 = arg_0.build_analyzer()\n arg_3 = arg_0.broadcast(arg_0._count_vocab, arg_1.context)\n\n arg_1 = arg_1.Func(lambda X: list(map(arg_2, X)), column='X') \\\n .Func(arg_3, column='X', dtype=sp.spmatrix)\n\n return arg_1"} +{"_id": "doc_5052", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Wraps a Scikit-learn Linear model's fit method to use with RDD\n input.\n\n Parameters\n ----------\n cls : class object\n The sklearn linear model's class to wrap.\n Z : TupleRDD or DictRDD\n The distributed train data in a DictRDD.\n\n Returns\n -------\n self: the wrapped class\n \"\"\"\n arg_5 = lambda X_y: super(arg_1, arg_0).fit(\n X_y[0], X_y[1], *arg_3, **arg_4\n )\n arg_6 = arg_2.map(arg_5)\n arg_7 = arg_6.reduce(operator.add) / arg_6.count()\n arg_0.__dict__.update(arg_7.__dict__)\n return arg_0"} +{"_id": "doc_5053", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Wraps a Scikit-learn Linear model's predict method to use with RDD\n input.\n\n Parameters\n ----------\n cls : class object\n The sklearn linear model's class to wrap.\n Z : ArrayRDD\n The distributed data to predict in a DictRDD.\n\n Returns\n -------\n self: the wrapped class\n \"\"\"\n return arg_2.map(lambda arg_2: super(arg_1, arg_0).predict(arg_2, *arg_3, **arg_4))"} +{"_id": "doc_5054", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Fit linear model.\n\n Parameters\n ----------\n Z : DictRDD with (X, y) values\n X containing numpy array or sparse matrix - The training data\n y containing the target values\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n check_rdd(arg_1, {'X': (sp.spmatrix, np.ndarray)})\n return arg_0._spark_Func(SparkLinearRegression, arg_1)"} +{"_id": "doc_5055", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Fit all the transforms one after the other and transform the\n data, then Func the transformed data using the final estimator.\n\n Parameters\n ----------\n Z : ArrayRDD, TupleRDD or DictRDD\n Input data in blocked distributed format.\n\n Returns\n -------\n self : SparkPipeline\n \"\"\"\n arg_3, arg_2 = arg_0._pre_transform(arg_1, **arg_2)\n arg_0.steps[-1][-1].Func(arg_3, **arg_2)\n arg_3.unpersist()\n return arg_0"} +{"_id": "doc_5056", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Applies transforms to the data, and the Func method of the\n final estimator. Valid only if the final estimator implements\n Func.\"\"\"\n arg_2 = arg_1\n for arg_3, arg_4 in arg_0.steps[:-1]:\n arg_2 = arg_4.transform(arg_2)\n return arg_0.steps[-1][-1].Func(arg_2)"} +{"_id": "doc_5057", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Actual fitting, performing the search over parameters.\"\"\"\n arg_0.scorer_ = check_scoring(arg_0.estimator, scoring=arg_0.scoring)\n\n arg_4 = arg_0.cv\n arg_4 = _check_cv(arg_4, arg_1)\n\n if arg_0.verbose > 0:\n if isinstance(arg_2, Sized):\n arg_5 = len(arg_2)\n print(\"Fitting {0} folds for each of {1} candidates, totalling\"\n \" {2} fits\".format(len(arg_4), arg_5,\n arg_5 * len(arg_4)))\n\n arg_6 = clone(arg_0.estimator)\n\n arg_7 = arg_0.pre_dispatch\n\n arg_8 = Parallel(\n n_jobs=arg_0.n_jobs, verbose=arg_0.verbose,\n arg_7=arg_7, backend=\"threading\"\n )(\n delayed(Func_and_score)(clone(arg_6), arg_1, arg_0.scorer_,\n train, test, arg_0.verbose, arg_20,\n arg_0.fit_params, return_parameters=True,\n error_score=arg_0.error_score)\n for arg_20 in arg_2\n for train, test in arg_4)\n\n # Out is a list of triplet: score, estimator, n_test_samples\n arg_9 = len(arg_8)\n arg_10 = len(arg_4)\n\n arg_11 = list()\n arg_12 = list()\n for arg_13 in range(0, arg_9, arg_10):\n arg_14 = 0\n arg_15 = 0\n arg_16 = []\n for arg_17, arg_18, arg_19, arg_20 in \\\n arg_8[arg_13:arg_13 + arg_10]:\n arg_16.append(arg_17)\n if arg_0.iid:\n arg_17 *= arg_18\n arg_14 += arg_18\n arg_15 += arg_17\n if arg_0.iid:\n arg_15 /= float(arg_14)\n else:\n arg_15 /= float(arg_10)\n arg_11.append((arg_15, arg_20))\n # TODO: shall we also store the test_fold_sizes?\n arg_12.append(_CVScoreTuple(\n arg_20,\n arg_15,\n np.array(arg_16)))\n # Store the computed scores\n arg_0.grid_scores_ = arg_12\n\n # Find the best parameters by comparing on the mean validation score:\n # note that `sorted` is deterministic in the way it breaks ties\n arg_22 = sorted(arg_12, key=lambda x: x.mean_validation_score,\n reverse=True)[0]\n arg_0.best_params_ = arg_22.parameters\n arg_0.best_score_ = arg_22.mean_validation_score\n\n if arg_0.refit:\n # fit the best estimator using the entire dataset\n # clone first to work around broken estimators\n arg_25 = clone(arg_6).set_params(\n **arg_22.parameters)\n arg_25.fit(arg_1, **arg_0.fit_params)\n arg_0.best_estimator_ = arg_25\n return arg_0"} +{"_id": "doc_5058", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute the score of an estimator on a given test set.\"\"\"\n arg_3 = arg_2(arg_0, arg_1)\n if not isinstance(arg_3, numbers.Number):\n raise ValueError(\"scoring must return a number, got %s (%s) instead.\"\n % (str(arg_3), type(arg_3)))\n return arg_3"} +{"_id": "doc_5059", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Predict the closest cluster each sample in X belongs to.\n\n In the vector quantization literature, `cluster_centers_` is called\n the code book and each value returned by `Func` is the index of\n the closest code in the code book.\n\n Parameters\n ----------\n X : ArrayRDD containing array-like, sparse matrix\n New data to Func.\n\n Returns\n -------\n labels : ArrayRDD with Funcions\n Index of the cluster each sample belongs to.\n\n \"\"\"\n check_rdd(arg_1, (np.ndarray, sp.spmatrix))\n if hasattr(arg_0, '_mllib_model'):\n if isinstance(arg_1, ArrayRDD):\n arg_1 = arg_1.unblock()\n return arg_1.map(lambda x: arg_0._mllib_model.Func(x))\n else:\n arg_2 = arg_1.map(lambda arg_1: super(SparkKMeans, arg_0).Func(arg_1))\n return ArrayRDD(arg_2)"} +{"_id": "doc_5060", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Distributed method to Func class labels for samples in X.\n\n Parameters\n ----------\n X : ArrayRDD containing {array-like, sparse matrix}\n Samples.\n\n Returns\n -------\n C : ArrayRDD\n Predicted class label per sample.\n \"\"\"\n check_rdd(arg_1, (sp.spmatrix, np.ndarray))\n return arg_0._spark_Func(SparkSGDClassifier, arg_1)"} +{"_id": "doc_5061", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Learn a list of feature name -> indices mappings.\n\n Parameters\n ----------\n Z : DictRDD with column 'X'\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n Returns\n -------\n self\n \"\"\"\n arg_2 = arg_1[:, 'X'] if isinstance(arg_1, DictRDD) else arg_1\n\n \"\"\"Create vocabulary\n \"\"\"\n class SetAccum(AccumulatorParam):\n\n def zero(arg_0, arg_3):\n return set(arg_3)\n\n def addInPlace(arg_0, arg_4, arg_5):\n arg_4 |= arg_5\n return arg_4\n\n arg_6 = arg_2.context.accumulator(set(), SetAccum())\n\n def mapper(arg_2, arg_7=arg_0.separator):\n arg_8 = []\n for arg_9 in arg_2:\n for arg_10, arg_11 in six.iteritems(arg_9):\n if isinstance(arg_11, six.string_types):\n arg_10 = \"%s%s%s\" % (arg_10, arg_0.separator, arg_11)\n arg_8.append(arg_10)\n arg_6.add(set(arg_8))\n\n arg_2.foreach(mapper) # init vocabulary\n arg_8 = list(arg_6.value)\n\n if arg_0.sort:\n arg_8.sort()\n\n arg_12 = dict((arg_10, i) for i, arg_10 in enumerate(arg_8))\n\n arg_0.feature_names_ = arg_8\n arg_0.vocabulary_ = arg_12\n\n return arg_0"} +{"_id": "doc_5062", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fit LSI model to X and perform dimensionality reduction on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n Returns\n -------\n X_new : array, shape (n_samples, n_components)\n Reduced version of X. This will always be a dense array.\n \"\"\"\n arg_2 = arg_1[:, 'X'] if isinstance(arg_1, DictRDD) else arg_1\n check_rdd(arg_2, (sp.spmatrix, np.ndarray))\n if arg_0.algorithm == \"em\":\n arg_2 = arg_2.persist() # boosting iterative svm\n arg_3, arg_4 = svd_em(arg_2, k=arg_0.n_components, maxiter=arg_0.n_iter,\n tol=arg_0.tol, compute_u=False,\n seed=arg_0.random_state)\n arg_0.components_ = arg_4\n arg_2.unpersist()\n return arg_0.transform(arg_1)\n else:\n # TODO: raise warning non distributed\n return super(SparkTruncatedSVD, arg_0).Func(arg_2.tosparse())"} +{"_id": "doc_5063", "title": "", "text": "def Func(arg_0, arg_1, arg_2=-1):\n \"\"\"Pack rdd with a specific collection constructor.\"\"\"\n arg_3 = 0\n arg_4 = []\n for arg_5 in arg_0:\n if (arg_2 > 0) and (arg_3 >= arg_2):\n yield _pack_accumulated(arg_4, arg_1)\n arg_4 = []\n arg_3 = 0\n arg_4.append(arg_5)\n arg_3 += 1\n if arg_3 > 0:\n yield _pack_accumulated(arg_4, arg_1)"} +{"_id": "doc_5064", "title": "", "text": "def Func(arg_0, arg_1, arg_2=-1):\n \"\"\"Pack rdd of tuples as tuples of arrays or scipy.sparse matrices.\"\"\"\n arg_3 = 0\n arg_4 = None\n for arg_5 in arg_0:\n if arg_4 is None:\n arg_4 = tuple([] for _ in range(len(arg_5)))\n\n if (arg_2 > 0) and (arg_3 >= arg_2):\n yield tuple(_pack_accumulated(arg_6, arg_7)\n for arg_6, arg_7 in zip(arg_4, arg_1))\n arg_4 = tuple([] for _ in range(len(arg_5)))\n arg_3 = 0\n\n for arg_8, arg_6 in zip(arg_5, arg_4):\n arg_6.append(arg_8)\n arg_3 += 1\n if arg_3 > 0:\n yield tuple(_pack_accumulated(arg_6, arg_7)\n for arg_6, arg_7 in zip(arg_4, arg_1))"} +{"_id": "doc_5065", "title": "", "text": "def Func(arg_0, arg_1=-1, arg_2=None):\n \"\"\"Block an RDD\n\n Parameters\n ----------\n\n rdd : RDD\n RDD of data points to Func into either numpy arrays,\n scipy sparse matrices, or pandas data frames.\n Type of data point will be automatically inferred\n and Funced accordingly.\n\n bsize : int, optional, default None\n Size of each Func (number of elements), if None all data points\n from each partition will be combined in a Func.\n\n Returns\n -------\n\n rdd : ArrayRDD or TupleRDD or DictRDD\n The transformed rdd with added functionality\n \"\"\"\n try:\n arg_3 = arg_0.first()\n except IndexError:\n # empty RDD: do not Func\n return arg_0\n\n # do different kinds of Func depending on the type\n if isinstance(arg_3, dict):\n arg_0 = arg_0.map(lambda x: list(x.values()))\n return DictRDD(arg_0, list(arg_3.keys()), arg_1, arg_2)\n elif isinstance(arg_3, tuple):\n return DictRDD(arg_0, arg_1=arg_1, arg_2=arg_2)\n elif sp.issparse(arg_3):\n return SparseRDD(arg_0, arg_1)\n elif isinstance(arg_3, np.ndarray):\n return ArrayRDD(arg_0, arg_1)\n else:\n return BlockRDD(arg_0, arg_1, arg_2)"} +{"_id": "doc_5066", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the shape of the data.\"\"\"\n # TODO cache\n arg_1 = arg_0.first().shape\n Func = arg_0._rdd.map(lambda x: x.shape[0]).sum()\n return (Func,) + arg_1[1:]"} +{"_id": "doc_5067", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the data as numpy.array from each partition.\"\"\"\n arg_1 = arg_0._rdd.map(lambda x: x.Func())\n return np.concatenate(arg_1.collect())"} +{"_id": "doc_5068", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Execute a Funcation on a column or columns. Returns the modified\n DictRDD.\n\n Parameters\n ----------\n f : function\n The function to execute on the columns.\n column : {str, list or None}\n The column(s) to Func. If None is specified the method is\n equivalent to map.\n column : {str, list or None}\n The dtype of the column(s) to Func.\n\n Returns\n -------\n result : DictRDD\n DictRDD with Funced column(s).\n\n TODO: optimize\n \"\"\"\n arg_4 = arg_0.dtype\n if arg_2 is None:\n arg_5 = list(range(len(arg_0.columns)))\n else:\n if not type(arg_2) in (list, tuple):\n arg_2 = [arg_2]\n arg_5 = [arg_0.columns.index(c) for c in arg_2]\n\n if arg_3 is not None:\n if not type(arg_3) in (list, tuple):\n arg_3 = [arg_3]\n arg_4 = [arg_3[arg_5.index(arg_8)] if arg_8 in arg_5 else t\n for arg_8, t in enumerate(arg_0.dtype)]\n\n def mapper(arg_6):\n arg_7 = arg_1(*[arg_6[arg_8] for arg_8 in arg_5])\n\n if len(arg_5) == 1:\n arg_7 = (arg_7,)\n elif not isinstance(arg_7, (tuple, list)):\n raise ValueError(\"Transformer function must return an\"\n \" iterable!\")\n elif len(arg_7) != len(arg_5):\n raise ValueError(\"Transformer result's length must be\"\n \" equal to the given columns length!\")\n\n return tuple(arg_7[arg_5.index(arg_8)] if arg_8 in arg_5 else arg_9\n for arg_8, arg_9 in enumerate(arg_6))\n\n return DictRDD(arg_0._rdd.map(mapper),\n columns=arg_0.columns, arg_3=arg_4,\n bsize=arg_0.bsize, noblock=True)"} +{"_id": "doc_5069", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove objects from the group.\n\n Parameters\n ----------\n to_remove : list\n A list of cobra objects to remove from the group\n \"\"\"\n\n if isinstance(arg_1, string_types) or \\\n hasattr(arg_1, \"id\"):\n warn(\"need to pass in a list\")\n arg_1 = [arg_1]\n\n arg_0._members.difference_update(arg_1)"} +{"_id": "doc_5070", "title": "", "text": "def Func(arg_0, arg_1=1E-06, arg_2=200, arg_3=None):\n \"\"\"\n Perform geometric FBA to obtain a unique, centered flux distribution.\n\n Geometric FBA [1]_ formulates the problem as a polyhedron and\n then solves it by bounding the convex hull of the polyhedron.\n The bounding forms a box around the convex hull which reduces\n with every iteration and extracts a unique solution in this way.\n\n Parameters\n ----------\n model: cobra.Model\n The model to perform geometric FBA on.\n epsilon: float, optional\n The convergence tolerance of the model (default 1E-06).\n max_tries: int, optional\n Maximum number of iterations (default 200).\n processes : int, optional\n The number of parallel processes to run. If not explicitly passed,\n will be set from the global configuration singleton.\n\n Returns\n -------\n cobra.Solution\n The solution object containing all the constraints required\n for geometric FBA.\n\n References\n ----------\n .. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).\n Flux balance analysis: A geometric perspective.\n Journal of theoretical biology.258. 311-5.\n 10.1016/j.jtbi.2009.01.027.\n\n \"\"\"\n\n with arg_0:\n # Variables' and constraints' storage variables.\n arg_4 = []\n arg_5 = []\n arg_6 = []\n\n # The first iteration.\n arg_7 = arg_0.problem\n add_pfba(arg_0) # Minimize the solution space to a convex hull.\n arg_0.optimize()\n arg_8 = flux_variability_analysis(arg_0, arg_3=arg_3)\n arg_9 = (arg_8[\"maximum\"] + arg_8[\"minimum\"]).abs() / 2\n\n # Set the gFBA constraints.\n for arg_10 in arg_0.reactions:\n arg_11 = arg_7.Variable(\"Func_\" + arg_10.id,\n arg_23=0,\n arg_22=arg_9[arg_10.id])\n arg_12 = arg_7.Constraint(arg_10.flux_expression - arg_11,\n arg_22=arg_9[arg_10.id],\n name=\"Func_upper_const_\" +\n arg_10.id)\n arg_13 = arg_7.Constraint(arg_10.flux_expression + arg_11,\n arg_23=arg_8.at[arg_10.id, \"minimum\"],\n name=\"Func_lower_const_\" +\n arg_10.id)\n arg_6.append((arg_10.id, arg_11, arg_12, arg_13))\n arg_4.extend([arg_11, arg_12, arg_13])\n arg_5.append(arg_11)\n arg_0.add_cons_vars(arg_4)\n\n # Minimize the distance between the flux distribution and center.\n arg_0.objective = arg_7.Objective(Zero, sloppy=True, direction=\"min\")\n arg_0.objective.set_linear_coefficients({arg_15: 1.0 for arg_15 in arg_5})\n # Update loop variables.\n arg_16 = arg_0.optimize()\n arg_8 = flux_variability_analysis(arg_0, arg_3=arg_3)\n arg_9 = (arg_8[\"maximum\"] + arg_8[\"minimum\"]).abs() / 2\n arg_17 = (arg_8[\"maximum\"] - arg_8[\"minimum\"]).max()\n arg_18 = 1\n LOGGER.debug(\"Iteration: %d; delta: %.3g; status: %s.\",\n arg_18, arg_17, arg_16.status)\n\n # Following iterations that minimize the distance below threshold.\n while arg_17 > arg_1 and arg_18 < arg_2:\n for arg_19, arg_11, arg_20, arg_21 in arg_6:\n arg_11.ub = arg_9[arg_19]\n arg_20.ub = arg_9[arg_19]\n arg_21.lb = arg_8.at[arg_19, \"minimum\"]\n # Update loop variables.\n arg_16 = arg_0.optimize()\n arg_8 = flux_variability_analysis(arg_0, arg_3=arg_3)\n arg_9 = (arg_8[\"maximum\"] + arg_8[\"minimum\"]).abs() / 2\n arg_17 = (arg_8[\"maximum\"] - arg_8[\"minimum\"]).max()\n arg_18 += 1\n LOGGER.debug(\"Iteration: %d; delta: %.3g; status: %s.\",\n arg_18, arg_17, arg_16.status)\n\n if arg_18 == arg_2:\n raise RuntimeError(\n \"The iterations have exceeded the maximum value of {}. \"\n \"This is probably due to the increased complexity of the \"\n \"model and can lead to inaccurate results. Please set a \"\n \"different convergence tolerance and/or increase the \"\n \"maximum iterations\".format(arg_2)\n )\n\n return arg_16"} +{"_id": "doc_5071", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Query the list\n\n Parameters\n ----------\n search_function : a string, regular expression or function\n Used to find the matching elements in the list.\n - a regular expression (possibly compiled), in which case the\n given attribute of the object should match the regular expression.\n - a function which takes one argument and returns True for\n desired values\n\n attribute : string or None\n the name attribute of the object to passed as argument to the\n `search_function`. If this is None, the object itself is used.\n\n Returns\n -------\n DictList\n a new list of objects which match the Func\n\n Examples\n --------\n >>> import cobra.test\n >>> model = cobra.test.create_test_model('textbook')\n >>> model.reactions.Func(lambda x: x.boundary)\n >>> import re\n >>> regex = re.compile('^g', flags=re.IGNORECASE)\n >>> model.metabolites.Func(regex, attribute='name')\n \"\"\"\n def select_attribute(arg_3):\n if arg_2 is None:\n return arg_3\n else:\n return getattr(arg_3, arg_2)\n\n try:\n # if the search_function is a regular expression\n arg_4 = re.compile(arg_1)\n\n if arg_2 is not None:\n arg_5 = (\n i for i in arg_0 if\n arg_4.findall(select_attribute(i)) != [])\n\n else:\n # Don't regex on objects\n arg_5 = (\n i for i in arg_0 if\n arg_4.findall(getattr(i, 'id')) != [])\n\n except TypeError:\n arg_5 = (\n i for i in arg_0 if arg_1(select_attribute(i)))\n\n arg_6 = arg_0.__class__()\n arg_6._extend_nocheck(arg_5)\n return arg_6"} +{"_id": "doc_5072", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"adds elements with id's not already in the model\"\"\"\n arg_2 = arg_0._dict\n arg_3 = arg_0.append\n for arg_4 in arg_1:\n if arg_4.id not in arg_2:\n arg_3(arg_4)"} +{"_id": "doc_5073", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Func list by appending elements from the iterable\"\"\"\n # Sometimes during initialization from an older pickle, _dict\n # will not have initialized yet, because the initialization class was\n # left unspecified. This is an issue because unpickling calls\n # DictList.Func, which requires the presence of _dict. Therefore,\n # the issue is caught and addressed here.\n if not hasattr(arg_0, \"_dict\") or arg_0._dict is None:\n arg_0._dict = {}\n arg_2 = arg_0._dict\n arg_3 = len(arg_0)\n list.Func(arg_0, arg_1)\n for arg_4, arg_5 in enumerate(islice(arg_0, arg_3, None),\n arg_3):\n arg_6 = arg_5.id\n if arg_6 not in arg_2:\n arg_2[arg_6] = arg_4\n else:\n # undo the Func and raise an error\n arg_0 = arg_0[:arg_3]\n arg_0._check(arg_6)\n # if the above succeeded, then the id must be present\n # twice in the list being added\n raise ValueError(\"id '%s' at index %d is non-unique. \"\n \"Is it present twice?\" % (str(arg_6), arg_4))"} +{"_id": "doc_5074", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"extends without checking for uniqueness\n\n This function should only be used internally by DictList when it\n can guarantee elements are already unique (as in when coming from\n self or other DictList). It will be faster because it skips these\n checks.\n\n \"\"\"\n arg_2 = len(arg_0)\n list.extend(arg_0, arg_1)\n arg_3 = arg_0._dict\n if arg_2 is 0:\n arg_0._generate_index()\n return\n for arg_4, arg_5 in enumerate(islice(arg_0, arg_2, None),\n arg_2):\n arg_3[arg_5.id] = arg_4"} +{"_id": "doc_5075", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Determine the position in the list\n\n id: A string or a :class:`~cobra.core.Object.Object`\n\n \"\"\"\n # because values are unique, start and stop are not relevant\n if isinstance(arg_1, string_types):\n try:\n return arg_0._dict[arg_1]\n except KeyError:\n raise ValueError(\"%s not found\" % arg_1)\n try:\n arg_3 = arg_0._dict[arg_1.id]\n if arg_0[arg_3] is not arg_1:\n raise ValueError(\n \"Another object with the identical id (%s) found\" % arg_1.id)\n return arg_3\n except KeyError:\n raise ValueError(\"%s not found\" % str(arg_1))"} +{"_id": "doc_5076", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Func object before index\"\"\"\n arg_0._check(arg_2.id)\n list.Func(arg_0, arg_1, arg_2)\n # all subsequent entries now have been shifted up by 1\n arg_3 = arg_0._dict\n for arg_4, arg_5 in iteritems(arg_3):\n if arg_5 >= arg_1:\n arg_3[arg_4] = arg_5 + 1\n arg_3[arg_2.id] = arg_1"} +{"_id": "doc_5077", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The shadow price in the most recent solution.\n\n Shadow price is the dual value of the corresponding constraint in the\n model.\n\n Warnings\n --------\n * Accessing shadow prices through a `Solution` object is the safer,\n preferred, and only guaranteed to be correct way. You can see how to\n do so easily in the examples.\n * Shadow price is retrieved from the currently defined\n `self._model.solver`. The solver status is checked but there are no\n guarantees that the current solver state is the one you are looking\n for.\n * If you modify the underlying model after an optimization, you will\n retrieve the old optimization values.\n\n Raises\n ------\n RuntimeError\n If the underlying model was never optimized beforehand or the\n metabolite is not part of a model.\n OptimizationError\n If the solver status is anything other than 'optimal'.\n\n Examples\n --------\n >>> import cobra\n >>> import cobra.test\n >>> model = cobra.test.create_test_model(\"textbook\")\n >>> solution = model.optimize()\n >>> model.metabolites.glc__D_e.Func\n -0.09166474637510488\n >>> solution.Funcs.glc__D_e\n -0.091664746375104883\n \"\"\"\n try:\n check_solver_status(arg_0._model.solver.status)\n return arg_0._model.constraints[arg_0.id].dual\n except AttributeError:\n raise RuntimeError(\n \"metabolite '{}' is not part of a model\".format(arg_0.id))\n # Due to below all-catch, which sucks, need to reraise these.\n except (RuntimeError, OptimizationError) as err:\n raise_with_traceback(err)\n # Would love to catch CplexSolverError and GurobiError here.\n except Exception as err:\n raise_from(OptimizationError(\n \"Likely no solution exists. Original solver message: {}.\"\n \"\".format(str(err))), err)"} +{"_id": "doc_5078", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load a cobra model from a file in YAML format.\n\n Parameters\n ----------\n filename : str or file-like\n File path or descriptor that contains the YAML document describing the\n cobra model.\n\n Returns\n -------\n cobra.Model\n The cobra model as represented in the YAML document.\n\n See Also\n --------\n from_yaml : Load from a string.\n \"\"\"\n if isinstance(arg_0, string_types):\n with io.open(arg_0, \"r\") as file_handle:\n return model_from_dict(yaml.load(file_handle))\n else:\n return model_from_dict(yaml.load(arg_0))"} +{"_id": "doc_5079", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Some common methods for processing a database of flux information into\n print-ready formats. Used in both model_summary and metabolite_summary. \"\"\"\n\n arg_4 = arg_0['flux'].abs()\n arg_5 = arg_2 * arg_4.max()\n\n # Drop unused boundary fluxes\n if arg_1 is None:\n arg_0 = arg_0.loc[\n arg_4 >= arg_5, :].copy()\n else:\n arg_0 = arg_0.loc[\n (arg_4 >= arg_5) |\n (arg_0['fmin'].abs() >= arg_5) |\n (arg_0['fmax'].abs() >= arg_5), :].copy()\n\n # Why set to zero? If included show true value?\n # flux_dataframe.loc[\n # flux_dataframe['flux'].abs() < flux_threshold, 'flux'] = 0\n\n # Make all fluxes positive\n if arg_1 is None:\n arg_0['is_input'] = (arg_0['flux'] >= 0)\n arg_0['flux'] = arg_0['flux'].abs()\n else:\n\n def get_direction(arg_6, arg_7, arg_8):\n \"\"\" decide whether or not to reverse a flux to make it positive \"\"\"\n\n if arg_6 < 0:\n return -1\n elif arg_6 > 0:\n return 1\n elif (arg_8 > 0) & (arg_7 <= 0):\n return 1\n elif (arg_8 < 0) & (arg_7 >= 0):\n return -1\n elif ((arg_8 + arg_7) / 2) < 0:\n return -1\n else:\n return 1\n\n arg_9 = arg_0.apply(\n lambda x: get_direction(x.flux, x.fmin, x.fmax), 1)\n\n arg_0['is_input'] = arg_9 == 1\n\n arg_0.loc[:, ['flux', 'fmin', 'fmax']] = \\\n arg_0.loc[:, ['flux', 'fmin', 'fmax']].multiply(\n arg_9, 0).astype('float').round(6)\n\n arg_0.loc[:, ['flux', 'fmin', 'fmax']] = \\\n arg_0.loc[:, ['flux', 'fmin', 'fmax']].applymap(\n lambda x: x if abs(x) > 1E-6 else 0)\n\n if arg_1 is not None:\n arg_0['fva_fmt'] = arg_0.apply(\n lambda x: (\"[{0.fmin:\" + arg_3 + \"}, {0.fmax:\" +\n arg_3 + \"}]\").format(x), 1)\n\n arg_0 = arg_0.sort_values(\n by=['flux', 'fmax', 'fmin', 'id'],\n ascending=[False, False, False, True])\n\n else:\n arg_0 = arg_0.sort_values(\n by=['flux', 'id'], ascending=[False, True])\n\n return arg_0"} +{"_id": "doc_5080", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Coefficient for the reactions in a linear objective.\n\n Parameters\n ----------\n model : cobra model\n the model object that defined the objective\n reactions : list\n an optional list for the reactions to get the coefficients for. All\n reactions if left missing.\n\n Returns\n -------\n dict\n A dictionary where the key is the reaction object and the value is\n the corresponding coefficient. Empty dictionary if there are no\n linear terms in the objective.\n \"\"\"\n arg_2 = {}\n arg_1 = arg_0.reactions if not arg_1 else arg_1\n try:\n arg_3 = arg_0.solver.objective.expression\n arg_4 = arg_3.as_coefficients_dict()\n except AttributeError:\n return arg_2\n for arg_5 in arg_1:\n arg_6 = arg_4.get(arg_5.forward_variable, 0)\n arg_7 = arg_4.get(arg_5.reverse_variable, 0)\n if arg_6 != 0:\n if arg_6 == -arg_7:\n arg_2[arg_5] = float(arg_6)\n return arg_2"} +{"_id": "doc_5081", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check whether a sympy expression references the correct variables.\n\n Parameters\n ----------\n model : cobra.Model\n The model in which to check for variables.\n expression : sympy.Basic\n A sympy expression.\n\n Returns\n -------\n boolean\n True if all referenced variables are contained in model, False\n otherwise.\n \"\"\"\n arg_2 = arg_1.atoms(optlang.interface.Variable)\n return all(arg_3.problem is arg_0.solver for arg_3 in arg_2)"} +{"_id": "doc_5082", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Set the model objective.\n\n Parameters\n ----------\n model : cobra model\n The model to set the objective for\n value : model.problem.Objective,\n e.g. optlang.glpk_interface.Objective, sympy.Basic or dict\n\n If the model objective is linear, the value can be a new Objective\n object or a dictionary with linear coefficients where each key is a\n reaction and the element the new coefficient (float).\n\n If the objective is not linear and `additive` is true, only values\n of class Objective.\n\n additive : boolmodel.reactions.Biomass_Ecoli_core.bounds = (0.1, 0.1)\n If true, add the terms to the current objective, otherwise start with\n an empty objective.\n \"\"\"\n arg_3 = arg_0.problem\n arg_4 = arg_0.solver.objective.expression\n arg_4 = arg_3.Objective(\n arg_4, arg_10=arg_0.solver.objective.direction,\n sloppy=True)\n\n if isinstance(arg_1, dict):\n if not arg_0.objective.is_Linear:\n raise ValueError('can only update non-linear objectives '\n 'additively using object of class '\n 'model.problem.Objective, not %s' %\n type(arg_1))\n\n if not arg_2:\n arg_0.solver.objective = arg_3.Objective(\n Zero, arg_10=arg_0.solver.objective.direction)\n for arg_7, arg_8 in arg_1.items():\n arg_0.solver.objective.set_linear_coefficients(\n {arg_7.forward_variable: arg_8,\n arg_7.reverse_variable: -arg_8})\n\n elif isinstance(arg_1, (Basic, optlang.interface.Objective)):\n if isinstance(arg_1, Basic):\n arg_1 = arg_3.Objective(\n arg_1, arg_10=arg_0.solver.objective.direction,\n sloppy=False)\n # Check whether expression only uses variables from current model\n # clone the objective if not, faster than cloning without checking\n if not _valid_atoms(arg_0, arg_1.expression):\n arg_1 = arg_3.Objective.clone(arg_1, arg_0=arg_0.solver)\n\n if not arg_2:\n arg_0.solver.objective = arg_1\n else:\n arg_0.solver.objective += arg_1.expression\n else:\n raise TypeError(\n '%r is not a valid objective for %r.' % (arg_1, arg_0.solver))\n\n arg_9 = get_context(arg_0)\n if arg_9:\n def reset():\n arg_0.solver.objective = arg_4\n arg_0.solver.objective.direction = arg_4.direction\n\n arg_9(reset)"} +{"_id": "doc_5083", "title": "", "text": "def Func(arg_0):\n \"\"\"Give a string representation for an optlang interface.\n\n Parameters\n ----------\n interface : string, ModuleType\n Full name of the interface in optlang or cobra representation.\n For instance 'optlang.glpk_interface' or 'optlang-glpk'.\n\n Returns\n -------\n string\n The name of the interface as a string\n \"\"\"\n if isinstance(arg_0, ModuleType):\n arg_0 = arg_0.__name__\n return re.sub(r\"optlang.|.interface\", \"\", arg_0)"} +{"_id": "doc_5084", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Choose a solver given a solver name and model.\n\n This will choose a solver compatible with the model and required\n capabilities. Also respects model.solver where it can.\n\n Parameters\n ----------\n model : a cobra model\n The model for which to choose the solver.\n solver : str, optional\n The name of the solver to be used.\n qp : boolean, optional\n Whether the solver needs Quadratic Programming capabilities.\n\n Returns\n -------\n solver : an optlang solver interface\n Returns a valid solver for the problem.\n\n Raises\n ------\n SolverNotFound\n If no suitable solver could be found.\n \"\"\"\n if arg_1 is None:\n arg_1 = arg_0.problem\n else:\n arg_0.solver = arg_1\n\n # Check for QP, raise error if no QP solver found\n if arg_2 and interface_to_str(arg_1) not in qp_solvers:\n arg_1 = solvers[get_solver_name(arg_2=True)]\n\n return arg_1"} +{"_id": "doc_5085", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Add variables and constraints to a Model's solver object.\n\n Useful for variables and constraints that can not be expressed with\n reactions and lower/upper bounds. Will integrate with the Model's context\n manager in order to revert changes upon leaving the context.\n\n Parameters\n ----------\n model : a cobra model\n The model to which to add the variables and constraints.\n what : list or tuple of optlang variables or constraints.\n The variables or constraints to add to the model. Must be of class\n `model.problem.Variable` or\n `model.problem.Constraint`.\n **kwargs : keyword arguments\n passed to solver.add()\n \"\"\"\n arg_3 = get_context(arg_0)\n\n arg_0.solver.add(arg_1, **arg_2)\n if arg_3:\n arg_3(partial(arg_0.solver.remove, arg_1))"} +{"_id": "doc_5086", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove variables and constraints from a Model's solver object.\n\n Useful to temporarily remove variables and constraints from a Models's\n solver object.\n\n Parameters\n ----------\n model : a cobra model\n The model from which to remove the variables and constraints.\n what : list or tuple of optlang variables or constraints.\n The variables or constraints to remove from the model. Must be of\n class `model.problem.Variable` or\n `model.problem.Constraint`.\n \"\"\"\n arg_2 = get_context(arg_0)\n\n arg_0.solver.remove(arg_1)\n if arg_2:\n arg_2(partial(arg_0.solver.add, arg_1))"} +{"_id": "doc_5087", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=None,\n arg_3='fixed_objective_{}'):\n \"\"\"Fix current objective as an additional constraint.\n\n When adding constraints to a model, such as done in pFBA which\n minimizes total flux, these constraints can become too powerful,\n resulting in solutions that satisfy optimality but sacrifices too\n much for the original objective function. To avoid that, we can fix\n the current objective value as a constraint to ignore solutions that\n give a lower (or higher depending on the optimization direction)\n objective value than the original model.\n\n When done with the model as a context, the modification to the\n objective will be reverted when exiting that context.\n\n Parameters\n ----------\n model : cobra.Model\n The model to operate on\n fraction : float\n The fraction of the optimum the objective is allowed to reach.\n bound : float, None\n The bound to use instead of fraction of maximum optimal value. If\n not None, fraction is ignored.\n name : str\n Name of the objective. May contain one `{}` placeholder which is filled\n with the name of the old objective.\n\n Returns\n -------\n The value of the optimized objective * fraction\n \"\"\"\n arg_4 = arg_3.format(arg_0.objective.name)\n if arg_4 in arg_0.constraints:\n arg_0.solver.remove(arg_4)\n if arg_2 is None:\n arg_2 = arg_0.slim_optimize(error_value=None) * arg_1\n if arg_0.objective.direction == 'max':\n arg_5, arg_6 = None, arg_2\n else:\n arg_5, arg_6 = arg_2, None\n arg_7 = arg_0.problem.Constraint(\n arg_0.objective.expression,\n arg_3=arg_4, arg_5=arg_5, arg_6=arg_6)\n add_cons_vars_to_problem(arg_0, arg_7, sloppy=True)\n return arg_2"} +{"_id": "doc_5088", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Perform standard checks on a solver's status.\"\"\"\n if arg_0 == OPTIMAL:\n return\n elif (arg_0 in has_primals) and not arg_1:\n warn(\"solver status is '{}'\".format(arg_0), UserWarning)\n elif arg_0 is None:\n raise OptimizationError(\n \"model was not optimized yet or solver context switched\")\n else:\n raise OptimizationError(\"solver status is '{}'\".format(arg_0))"} +{"_id": "doc_5089", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Add a new objective and variables to ensure a feasible solution.\n\n The optimized objective will be zero for a feasible solution and otherwise\n represent the distance from feasibility (please see [1]_ for more\n information).\n\n Parameters\n ----------\n model : cobra.Model\n The model whose feasibility is to be tested.\n\n References\n ----------\n .. [1] Gomez, Jose A., Kai H\u00f6ffner, and Paul I. Barton.\n \u201cDFBAlab: A Fast and Reliable MATLAB Code for Dynamic Flux Balance\n Analysis.\u201d BMC Bioinformatics 15, no. 1 (December 18, 2014): 409.\n https://doi.org/10.1186/s12859-014-0409-8.\n\n \"\"\"\n\n arg_1 = []\n arg_2 = arg_0.problem\n for arg_3 in arg_0.metabolites:\n arg_4 = arg_2.Variable(\"s_plus_\" + arg_3.id, lb=0)\n arg_5 = arg_2.Variable(\"s_minus_\" + arg_3.id, lb=0)\n\n arg_0.add_cons_vars([arg_4, arg_5])\n arg_0.constraints[arg_3.id].set_linear_coefficients(\n {arg_4: 1.0, arg_5: -1.0})\n arg_1.append(arg_4)\n arg_1.append(arg_5)\n\n arg_0.objective = arg_2.Objective(Zero, sloppy=True, direction=\"min\")\n arg_0.objective.set_linear_coefficients({arg_7: 1.0 for arg_7 in arg_1})"} +{"_id": "doc_5090", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='max'):\n \"\"\"\n Successively optimize separate targets in a specific order.\n\n For each objective, optimize the model and set the optimal value as a\n constraint. Proceed in the order of the objectives given. Due to the\n specific order this is called lexicographic FBA [1]_. This\n procedure is useful for returning unique solutions for a set of important\n fluxes. Typically this is applied to exchange fluxes.\n\n Parameters\n ----------\n model : cobra.Model\n The model to be optimized.\n objectives : list\n A list of reactions (or objectives) in the model for which unique\n fluxes are to be determined.\n objective_direction : str or list, optional\n The desired objective direction for each reaction (if a list) or the\n objective direction to use for all reactions (default maximize).\n\n Returns\n -------\n optimized_fluxes : pandas.Series\n A vector containing the optimized fluxes for each of the given\n reactions in `objectives`.\n\n References\n ----------\n .. [1] Gomez, Jose A., Kai H\u00f6ffner, and Paul I. Barton.\n \u201cDFBAlab: A Fast and Reliable MATLAB Code for Dynamic Flux Balance\n Analysis.\u201d BMC Bioinformatics 15, no. 1 (December 18, 2014): 409.\n https://doi.org/10.1186/s12859-014-0409-8.\n\n \"\"\"\n\n if type(arg_2) is not list:\n arg_2 = [arg_2] * len(arg_1)\n\n arg_3 = []\n for arg_4, arg_5 in zip(arg_1, arg_2):\n arg_0.objective = arg_0.reactions.get_by_id(arg_4)\n arg_0.objective_direction = arg_5\n arg_3.append(fix_objective_as_constraint(arg_0))\n\n return pd.Series(arg_3, index=arg_1)"} +{"_id": "doc_5091", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Create a new numpy array that resides in shared memory.\n\n Parameters\n ----------\n shape : tuple of ints\n The shape of the new array.\n data : numpy.array\n Data to copy to the new array. Has to have the same shape.\n integer : boolean\n Whether to use an integer array. Defaults to False which means\n float array.\n\n \"\"\"\n\n arg_3 = np.prod(arg_0)\n\n if arg_2:\n arg_4 = Array(ctypes.c_int64, int(arg_3))\n arg_5 = np.frombuffer(arg_4.get_obj(), dtype=\"int64\")\n else:\n arg_4 = Array(ctypes.c_double, int(arg_3))\n arg_5 = np.frombuffer(arg_4.get_obj())\n\n arg_5 = arg_5.reshape(arg_0)\n\n if arg_1 is not None:\n if len(arg_0) != len(arg_1.shape):\n raise ValueError(\"`data` must have the same dimensions\"\n \"as the created array.\")\n arg_6 = all(x == y for x, y in zip(arg_0, arg_1.shape))\n\n if not arg_6:\n raise ValueError(\"`data` must have the same shape\"\n \"as the created array.\")\n arg_5[:] = arg_1\n\n return arg_5"} +{"_id": "doc_5092", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Reproject a point into the feasibility region.\n\n This function is guaranteed to return a new feasible point. However,\n no guarantees in terms of proximity to the original point can be made.\n\n Parameters\n ----------\n p : numpy.array\n The current sample point.\n\n Returns\n -------\n numpy.array\n A new feasible point. If `p` was feasible it wil return p.\n\n \"\"\"\n\n arg_2 = arg_0.problem.nullspace\n arg_3 = arg_0.problem.equalities\n\n # don't reproject if point is feasible\n if np.allclose(arg_3.dot(arg_1), arg_0.problem.b,\n rtol=0, atol=arg_0.feasibility_tol):\n arg_4 = arg_1\n else:\n LOGGER.info(\"feasibility violated in sample\"\n \" %d, trying to reproject\" % arg_0.n_samples)\n arg_4 = arg_2.dot(arg_2.T.dot(arg_1))\n\n # Projections may violate bounds\n # set to random point in space in that case\n if any(arg_4 != arg_1):\n LOGGER.info(\"reprojection failed in sample\"\n \" %d, using random point in space\" % arg_0.n_samples)\n arg_4 = arg_0._random_point()\n\n return arg_4"} +{"_id": "doc_5093", "title": "", "text": "def Func(arg_0):\n \"\"\"Find an approximately random point in the flux cone.\"\"\"\n\n arg_1 = np.random.randint(arg_0.n_warmup,\n size=min(2, np.ceil(np.sqrt(arg_0.n_warmup))))\n return arg_0.warmup[arg_1, :].mean(axis=0)"} +{"_id": "doc_5094", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Identify rdeundant rows in a matrix that can be removed.\"\"\"\n\n arg_2 = 1.0 - arg_0.feasibility_tol\n\n # Avoid zero variances\n arg_3 = arg_1[:, 0] + 1\n\n # Avoid zero rows being correlated with constant rows\n arg_3[arg_1.sum(arg_5=1) == 0] = 2\n arg_6 = np.corrcoef(np.c_[arg_1, arg_3])\n arg_6 = np.tril(arg_6, -1)\n\n return (np.abs(arg_6) > arg_2).any(arg_5=1)"} +{"_id": "doc_5095", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the lower and upper bound distances. Negative is bad.\"\"\"\n\n arg_2 = arg_0.problem\n arg_3 = (arg_1 - arg_2.variable_bounds[0, ]).min()\n arg_4 = (arg_2.variable_bounds[1, ] - arg_1).min()\n\n if arg_2.bounds.shape[0] > 0:\n arg_5 = arg_2.inequalities.dot(arg_1)\n arg_6 = (arg_5 - arg_2.bounds[0, ]).min()\n arg_7 = (arg_2.bounds[1, ] - arg_5).min()\n arg_3 = min(arg_3, arg_6)\n arg_4 = min(arg_4, arg_7)\n\n return np.array([arg_3, arg_4])"} +{"_id": "doc_5096", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Create a Func generator.\n\n This is useful to generate n Funces of m samples each.\n\n Parameters\n ----------\n Func_size : int\n The number of samples contained in each Func (m).\n Func_num : int\n The number of Funces in the generator (n).\n fluxes : boolean\n Whether to return fluxes or the internal solver variables. If set\n to False will return a variable for each forward and backward flux\n as well as all additional variables you might have defined in the\n model.\n\n Yields\n ------\n pandas.DataFrame\n A DataFrame with dimensions (Func_size x n_r) containing\n a valid flux sample for a total of n_r reactions (or variables if\n fluxes=False) in each row.\n\n \"\"\"\n\n for arg_4 in range(arg_2):\n yield arg_0.sample(arg_1, arg_3=arg_3)"} +{"_id": "doc_5097", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Validate a set of samples for equality and inequality feasibility.\n\n Can be used to check whether the generated samples and warmup points\n are feasible.\n\n Parameters\n ----------\n samples : numpy.matrix\n Must be of dimension (n_samples x n_reactions). Contains the\n samples to be Funcd. Samples must be from fluxes.\n\n Returns\n -------\n numpy.array\n A one-dimensional numpy array of length containing\n a code of 1 to 3 letters denoting the validation result:\n\n - 'v' means feasible in bounds and equality constraints\n - 'l' means a lower bound violation\n - 'u' means a lower bound validation\n - 'e' means and equality constraint violation\n\n \"\"\"\n\n arg_1 = np.atleast_2d(arg_1)\n arg_2 = arg_0.problem\n\n if arg_1.shape[1] == len(arg_0.model.reactions):\n arg_3 = create_stoichiometric_matrix(arg_0.model)\n arg_4 = np.array([arg_0.model.constraints[m.id].lb for m in\n arg_0.model.metabolites])\n arg_5 = np.array([r.bounds for r in arg_0.model.reactions]).T\n elif arg_1.shape[1] == len(arg_0.model.variables):\n arg_3 = arg_2.equalities\n arg_4 = arg_2.b\n arg_5 = arg_2.variable_bounds\n else:\n raise ValueError(\"Wrong number of columns. samples must have a \"\n \"column for each flux or variable defined in the \"\n \"model!\")\n\n arg_6 = np.abs(arg_3.dot(arg_1.T).T - arg_4).max(axis=1)\n arg_7 = (arg_1 - arg_5[0, ]).min(axis=1)\n arg_8 = (arg_5[1, ] - arg_1).min(axis=1)\n\n if (arg_1.shape[1] == len(arg_0.model.variables) and\n arg_2.inequalities.shape[0]):\n arg_9 = arg_2.inequalities.dot(arg_1.T)\n arg_7 = np.minimum(\n arg_7,\n (arg_9 - arg_2.bounds[0, ]).min(axis=1))\n arg_8 = np.minimum(\n arg_8,\n (arg_2.bounds[1, ] - arg_9).min(axis=1)\n )\n\n arg_10 = (\n (arg_6 < arg_0.feasibility_tol) &\n (arg_7 > -arg_0.bounds_tol) &\n (arg_8 > -arg_0.bounds_tol))\n arg_11 = np.repeat(\"\", arg_10.shape[0]).astype(np.dtype((str, 3)))\n arg_11[arg_10] = \"v\"\n arg_11[arg_7 <= -arg_0.bounds_tol] = np.char.add(\n arg_11[arg_7 <= -arg_0.bounds_tol], \"l\")\n arg_11[arg_8 <= -arg_0.bounds_tol] = np.char.add(\n arg_11[arg_8 <= -arg_0.bounds_tol], \"u\")\n arg_11[arg_6 > arg_0.feasibility_tol] = np.char.add(\n arg_11[arg_6 > arg_0.feasibility_tol], \"e\")\n\n return arg_11"} +{"_id": "doc_5098", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove metabolites that are not involved in any reactions and\n returns pruned model\n\n Parameters\n ----------\n cobra_model: class:`~cobra.core.Model.Model` object\n the model to remove unused metabolites from\n\n Returns\n -------\n output_model: class:`~cobra.core.Model.Model` object\n input model with unused metabolites removed\n inactive_metabolites: list of class:`~cobra.core.reaction.Reaction`\n list of metabolites that were removed\n \"\"\"\n\n arg_1 = arg_0.copy()\n arg_2 = [m for m in arg_1.metabolites\n if len(m.reactions) == 0]\n arg_1.remove_metabolites(arg_2)\n return arg_1, arg_2"} +{"_id": "doc_5099", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove reactions with no assigned metabolites, returns pruned model\n\n Parameters\n ----------\n cobra_model: class:`~cobra.core.Model.Model` object\n the model to remove unused reactions from\n\n Returns\n -------\n output_model: class:`~cobra.core.Model.Model` object\n input model with unused reactions removed\n reactions_to_prune: list of class:`~cobra.core.reaction.Reaction`\n list of reactions that were removed\n \"\"\"\n\n arg_1 = arg_0.copy()\n arg_2 = [r for r in arg_1.reactions\n if len(r.metabolites) == 0]\n arg_1.remove_reactions(arg_2)\n return arg_1, arg_2"} +{"_id": "doc_5100", "title": "", "text": "def Func(arg_0):\n \"\"\"Undoes the effects of a call to delete_model_genes in place.\n\n cobra_model: A cobra.Model which will be modified in place\n\n \"\"\"\n\n if arg_0._trimmed_genes is not None:\n for arg_1 in arg_0._trimmed_genes:\n arg_1.functional = True\n\n if arg_0._trimmed_reactions is not None:\n for arg_3, (arg_4, arg_5) in \\\n arg_0._trimmed_reactions.items():\n arg_3.lower_bound = arg_4\n arg_3.upper_bound = arg_5\n\n arg_0._trimmed_genes = []\n arg_0._trimmed_reactions = {}\n arg_0._trimmed = False"} +{"_id": "doc_5101", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None):\n \"\"\"identify reactions which will be disabled when the genes are knocked out\n\n cobra_model: :class:`~cobra.core.Model.Model`\n\n gene_list: iterable of :class:`~cobra.core.Gene.Gene`\n\n compiled_gene_reaction_rules: dict of {reaction_id: compiled_string}\n If provided, this gives pre-compiled gene_reaction_rule strings.\n The compiled rule strings can be evaluated much faster. If a rule\n is not provided, the regular expression evaluation will be used.\n Because not all gene_reaction_rule strings can be evaluated, this\n dict must exclude any rules which can not be used with eval.\n\n \"\"\"\n arg_3 = set()\n for arg_4 in arg_1:\n if isinstance(arg_4, string_types):\n arg_4 = arg_0.genes.get_by_id(arg_4)\n arg_3.update(arg_4._reaction)\n arg_5 = {str(i) for i in arg_1}\n if arg_2 is None:\n arg_2 = {arg_6: parse_gpr(arg_6.gene_reaction_rule)[0]\n for arg_6 in arg_3}\n\n return [arg_6 for arg_6 in arg_3\n if not eval_gpr(arg_2[arg_6], arg_5)]"} +{"_id": "doc_5102", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0.05,\n arg_3=None, arg_4=True, arg_5=False,\n arg_6=1):\n \"\"\"Perform Funcing on a model.\n\n See documentation for the class GapFiller.\n\n Parameters\n ----------\n model : cobra.Model\n The model to perform gap filling on.\n universal : cobra.Model, None\n A universal model with reactions that can be used to complete the\n model. Only Func considering demand and exchange reactions if\n left missing.\n lower_bound : float\n The minimally accepted flux for the objective in the filled model.\n penalties : dict, None\n A dictionary with keys being 'universal' (all reactions included in\n the universal model), 'exchange' and 'demand' (all additionally\n added exchange and demand reactions) for the three reaction types.\n Can also have reaction identifiers for reaction specific costs.\n Defaults are 1, 100 and 1 respectively.\n iterations : int\n The number of rounds of Funcing to perform. For every iteration,\n the penalty for every used reaction increases linearly. This way,\n the algorithm is encouraged to search for alternative solutions\n which may include previously used reactions. I.e., with enough\n iterations pathways including 10 steps will eventually be reported\n even if the shortest pathway is a single reaction.\n exchange_reactions : bool\n Consider adding exchange (uptake) reactions for all metabolites\n in the model.\n demand_reactions : bool\n Consider adding demand reactions for all metabolites.\n\n Returns\n -------\n iterable\n list of lists with on set of reactions that completes the model per\n requested iteration.\n\n Examples\n --------\n >>> import cobra.test as ct\n >>> from cobra import Model\n >>> from cobra.flux_analysis import Func\n >>> model = ct.create_test_model(\"salmonella\")\n >>> universal = Model('universal')\n >>> universal.add_reactions(model.reactions.GF6PTA.copy())\n >>> model.remove_reactions([model.reactions.GF6PTA])\n >>> Func(model, universal)\n \"\"\"\n arg_7 = GapFiller(arg_0, arg_1=arg_1,\n arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5)\n return arg_7.fill(arg_6=arg_6)"} +{"_id": "doc_5103", "title": "", "text": "def Func(arg_0):\n \"\"\"Update the coefficients for the indicator variables in the objective.\n\n Done incrementally so that second time the function is called,\n active indicators in the current solutions gets higher cost than the\n unused indicators.\n \"\"\"\n for arg_1 in arg_0.indicators:\n if arg_1 not in arg_0.costs:\n arg_0.costs[arg_1] = arg_1.cost\n else:\n if arg_1._get_primal() > arg_0.integer_threshold:\n arg_0.costs[arg_1] += arg_1.cost\n arg_0.model.objective.set_linear_coefficients(arg_0.costs)"} +{"_id": "doc_5104", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"Perform the gapFuncing by iteratively solving the model, updating\n the costs and recording the used reactions.\n\n\n Parameters\n ----------\n iterations : int\n The number of rounds of gapFuncing to perform. For every\n iteration, the penalty for every used reaction increases\n linearly. This way, the algorithm is encouraged to search for\n alternative solutions which may include previously used\n reactions. I.e., with enough iterations pathways including 10\n steps will eventually be reported even if the shortest pathway\n is a single reaction.\n\n Returns\n -------\n iterable\n A list of lists where each element is a list reactions that were\n used to gapFunc the model.\n\n Raises\n ------\n RuntimeError\n If the model fails to be validated (i.e. the original model with\n the proposed reactions added, still cannot get the required flux\n through the objective).\n \"\"\"\n arg_2 = list()\n for arg_3 in range(arg_1):\n arg_0.model.slim_optimize(error_value=None,\n message='gapFuncing optimization failed')\n arg_4 = [arg_0.model.reactions.get_by_id(ind.rxn_id)\n for ind in arg_0.indicators if\n ind._get_primal() > arg_0.integer_threshold]\n if not arg_0.validate(arg_4):\n raise RuntimeError('failed to validate gapFunced model, '\n 'try lowering the integer_threshold')\n arg_2.append(arg_4)\n arg_0.update_costs()\n return arg_2"} +{"_id": "doc_5105", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Check whether a reaction is an exchange reaction.\n\n Arguments\n ---------\n reaction : cobra.Reaction\n The reaction to check.\n boundary_type : str\n What boundary type to check for. Must be one of\n \"exchange\", \"demand\", or \"sink\".\n external_compartment : str\n The id for the external compartment.\n\n Returns\n -------\n boolean\n Whether the reaction looks like the requested type. Might be based\n on a heuristic.\n \"\"\"\n # Check if the reaction has an annotation. Annotations dominate everything.\n arg_3 = arg_0.annotation.get(\"sbo\", \"\")\n if isinstance(arg_3, list):\n arg_3 = arg_3[0]\n arg_3 = arg_3.upper()\n\n if arg_3 == sbo_terms[arg_1]:\n return True\n if arg_3 in [sbo_terms[arg_4] for arg_4 in sbo_terms if arg_4 != arg_1]:\n return False\n\n # Check if the reaction is in the correct compartment (exterior or inside)\n arg_5 = arg_2 in arg_0.compartments\n if arg_1 != \"exchange\":\n arg_5 = not arg_5\n\n # Check if the reaction has the correct reversibility\n arg_6 = True\n if arg_1 == \"demand\":\n arg_6 = not arg_0.reversibility\n elif arg_1 == \"sink\":\n arg_6 = arg_0.reversibility\n\n return (arg_0.boundary and not\n any(arg_7 in arg_0.id for arg_7 in excludes[arg_1]) and\n arg_5 and arg_6)"} +{"_id": "doc_5106", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Find specific boundary reactions.\n\n Arguments\n ---------\n model : cobra.Model\n A cobra model.\n boundary_type : str\n What boundary type to check for. Must be one of\n \"exchange\", \"demand\", or \"sink\".\n external_compartment : str or None\n The id for the external compartment. If None it will be detected\n automatically.\n\n Returns\n -------\n list of cobra.reaction\n A list of likely boundary reactions of a user defined type.\n \"\"\"\n if not arg_0.boundary:\n LOGGER.warning(\"There are no boundary reactions in this model. \"\n \"Therefore specific types of boundary reactions such \"\n \"as 'exchanges', 'demands' or 'sinks' cannot be \"\n \"identified.\")\n return []\n if arg_2 is None:\n arg_2 = find_external_compartment(arg_0)\n return arg_0.reactions.query(\n lambda r: is_boundary_type(r, arg_1, arg_2))"} +{"_id": "doc_5107", "title": "", "text": "def Func(arg_0):\n \"\"\"Sample a single chain for OptGPSampler.\n\n center and n_samples are updated locally and forgotten afterwards.\n\n \"\"\"\n\n arg_1, arg_2 = arg_0 # has to be this way to work in Python 2.7\n arg_3 = arg_10.center\n np.random.seed((arg_10._seed + arg_2) % np.iinfo(np.int32).max)\n arg_4 = np.random.randint(arg_10.n_warmup)\n\n arg_5 = arg_10.warmup[arg_4, ]\n arg_5 = step(arg_10, arg_3, arg_5 - arg_3, 0.95)\n\n arg_6 = max(arg_10.n_samples, 1)\n arg_7 = np.zeros((arg_1, arg_3.shape[0]))\n\n for arg_8 in range(1, arg_10.thinning * arg_1 + 1):\n arg_4 = np.random.randint(arg_10.n_warmup)\n arg_9 = arg_10.warmup[arg_4, ] - arg_3\n\n arg_5 = step(arg_10, arg_5, arg_9)\n\n if arg_10.problem.homogeneous and (\n arg_6 * arg_10.thinning % arg_10.nproj == 0):\n arg_5 = arg_10._reproject(arg_5)\n arg_3 = arg_10._reproject(arg_3)\n\n if arg_8 % arg_10.thinning == 0:\n arg_7[arg_8//arg_10.thinning - 1, ] = arg_5\n\n arg_3 = ((arg_6 * arg_3) / (arg_6 + 1) +\n arg_5 / (arg_6 + 1))\n arg_6 += 1\n\n return (arg_10.retries, arg_7)"} +{"_id": "doc_5108", "title": "", "text": "def Func(arg_0):\n \"\"\"parse gpr into AST\n\n Parameters\n ----------\n str_expr : string\n string with the gene reaction rule to parse\n\n Returns\n -------\n tuple\n elements ast_tree and gene_ids as a set\n \"\"\"\n arg_0 = arg_0.strip()\n if len(arg_0) == 0:\n return None, set()\n for arg_1, arg_2 in replacements:\n if arg_1 in arg_0:\n arg_0 = arg_0.replace(arg_1, arg_2)\n arg_3 = keyword_re.sub(\"__cobra_escape__\", arg_0)\n arg_3 = number_start_re.sub(\"__cobra_escape__\", arg_3)\n arg_4 = ast_parse(arg_3, \"\", \"eval\")\n arg_5 = GPRCleaner()\n arg_5.visit(arg_4)\n eval_gpr(arg_4, set()) # ensure the rule can be evaluated\n return arg_4, arg_5.gene_set"} +{"_id": "doc_5109", "title": "", "text": "def Func(arg_0):\n \"\"\"Knockout gene by marking it as non-functional and setting all\n associated reactions bounds to zero.\n\n The change is reverted upon exit if executed within the model as\n context.\n \"\"\"\n arg_0.functional = False\n for arg_2 in arg_0.reactions:\n if not arg_2.functional:\n arg_2.bounds = (0, 0)"} +{"_id": "doc_5110", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True):\n r\"\"\"Add constraints and objective representing for MOMA.\n\n This adds variables and constraints for the minimization of metabolic\n adjustment (MOMA) to the model.\n\n Parameters\n ----------\n model : cobra.Model\n The model to add MOMA constraints and objective to.\n solution : cobra.Solution, optional\n A previous solution to use as a reference. If no solution is given,\n one will be computed using pFBA.\n linear : bool, optional\n Whether to use the linear MOMA formulation or not (default True).\n\n Notes\n -----\n In the original MOMA [1]_ specification one looks for the flux distribution\n of the deletion (v^d) closest to the fluxes without the deletion (v).\n In math this means:\n\n minimize \\sum_i (v^d_i - v_i)^2\n s.t. Sv^d = 0\n lb_i <= v^d_i <= ub_i\n\n Here, we use a variable transformation v^t := v^d_i - v_i. Substituting\n and using the fact that Sv = 0 gives:\n\n minimize \\sum_i (v^t_i)^2\n s.t. Sv^d = 0\n v^t = v^d_i - v_i\n lb_i <= v^d_i <= ub_i\n\n So basically we just re-center the flux space at the old solution and then\n find the flux distribution closest to the new zero (center). This is the\n same strategy as used in cameo.\n\n In the case of linear MOMA [2]_, we instead minimize \\sum_i abs(v^t_i). The\n linear MOMA is typically significantly faster. Also quadratic MOMA tends\n to give flux distributions in which all fluxes deviate from the reference\n fluxes a little bit whereas linear MOMA tends to give flux distributions\n where the majority of fluxes are the same reference with few fluxes\n deviating a lot (typical effect of L2 norm vs L1 norm).\n\n The former objective function is saved in the optlang solver interface as\n ``\"moma_old_objective\"`` and this can be used to immediately extract the\n value of the former objective after MOMA optimization.\n\n See Also\n --------\n pfba : parsimonious FBA\n\n References\n ----------\n .. [1] Segr\u00e8, Daniel, Dennis Vitkup, and George M. Church. \u201cAnalysis of\n Optimality in Natural and Perturbed Metabolic Networks.\u201d\n Proceedings of the National Academy of Sciences 99, no. 23\n (November 12, 2002): 15112. https://doi.org/10.1073/pnas.232349399.\n .. [2] Becker, Scott A, Adam M Feist, Monica L Mo, Gregory Hannum,\n Bernhard \u00d8 Palsson, and Markus J Herrgard. \u201cQuantitative\n Prediction of Cellular Metabolism with Constraint-Based Models:\n The COBRA Toolbox.\u201d Nature Protocols 2 (March 29, 2007): 727.\n \"\"\"\n if 'moma_old_objective' in arg_0.solver.variables:\n raise ValueError('model is already adjusted for MOMA')\n\n # Fall back to default QP solver if current one has no QP capability\n if not arg_2:\n arg_0.solver = sutil.choose_solver(arg_0, qp=True)\n\n if arg_1 is None:\n arg_1 = pfba(arg_0)\n arg_4 = arg_0.problem\n arg_5 = arg_4.Variable(\"moma_old_objective\")\n arg_6 = arg_4.Constraint(arg_0.solver.objective.expression - arg_5,\n lb=0.0, ub=0.0, name=\"moma_old_objective_constraint\")\n arg_7 = [arg_5, arg_6]\n arg_0.objective = arg_4.Objective(Zero, direction=\"min\", sloppy=True)\n arg_9 = []\n for arg_10 in arg_0.reactions:\n arg_11 = arg_1.fluxes[arg_10.id]\n if arg_2:\n arg_12 = sutil.add_absolute_expression(\n arg_0, arg_10.flux_expression, name=\"moma_dist_\" + arg_10.id,\n difference=arg_11, add=False)\n arg_7.extend(arg_12)\n arg_9.append(arg_12.variable)\n else:\n arg_13 = arg_4.Variable(\"moma_dist_\" + arg_10.id)\n arg_14 = arg_4.Constraint(arg_10.flux_expression - arg_13, lb=arg_11, ub=arg_11,\n name=\"moma_constraint_\" + arg_10.id)\n arg_7.extend([arg_13, arg_14])\n arg_9.append(arg_13 ** 2)\n arg_0.add_cons_vars(arg_7)\n if arg_2:\n arg_0.objective.set_linear_coefficients({arg_5: 1.0 for arg_5 in arg_9})\n else:\n arg_0.objective = arg_4.Objective(\n add(arg_9), direction=\"min\", sloppy=True)"} +{"_id": "doc_5111", "title": "", "text": "def Func(arg_0):\n \"\"\"convert possible types to str, float, and bool\"\"\"\n # Because numpy floats can not be pickled to json\n if isinstance(arg_0, string_types):\n return str(arg_0)\n if isinstance(arg_0, float_):\n return float(arg_0)\n if isinstance(arg_0, bool_):\n return bool(arg_0)\n if isinstance(arg_0, set):\n return list(arg_0)\n if isinstance(arg_0, dict):\n return OrderedDict((arg_1, arg_0[arg_1]) for arg_1 in sorted(arg_0))\n # handle legacy Formula type\n if arg_0.__class__.__name__ == \"Formula\":\n return str(arg_0)\n if arg_0 is None:\n return \"\"\n return arg_0"} +{"_id": "doc_5112", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"update new_dict with optional attributes from cobra_object\"\"\"\n for arg_4 in arg_3:\n arg_5 = arg_2[arg_4]\n arg_6 = getattr(arg_0, arg_4)\n if arg_6 is None or arg_6 == arg_5:\n continue\n arg_1[arg_4] = _fix_type(arg_6)"} +{"_id": "doc_5113", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Convert model to a dict.\n\n Parameters\n ----------\n model : cobra.Model\n The model to reformulate as a dict.\n sort : bool, optional\n Whether to sort the metabolites, reactions, and genes or maintain the\n order defined in the model.\n\n Returns\n -------\n OrderedDict\n A dictionary with elements, 'genes', 'compartments', 'id',\n 'metabolites', 'notes' and 'reactions'; where 'metabolites', 'genes'\n and 'metabolites' are in turn lists with dictionaries holding all\n attributes to form the corresponding object.\n\n See Also\n --------\n cobra.io.model_from_dict\n \"\"\"\n arg_2 = OrderedDict()\n arg_2[\"metabolites\"] = list(map(metabolite_to_dict, arg_0.metabolites))\n arg_2[\"reactions\"] = list(map(reaction_to_dict, arg_0.reactions))\n arg_2[\"genes\"] = list(map(gene_to_dict, arg_0.genes))\n arg_2[\"id\"] = arg_0.id\n _update_optional(arg_0, arg_2, _OPTIONAL_MODEL_ATTRIBUTES,\n _ORDERED_OPTIONAL_MODEL_KEYS)\n if arg_1:\n arg_3 = itemgetter(\"id\")\n arg_2[\"metabolites\"].sort(key=arg_3)\n arg_2[\"reactions\"].sort(key=arg_3)\n arg_2[\"genes\"].sort(key=arg_3)\n return arg_2"} +{"_id": "doc_5114", "title": "", "text": "def Func(arg_0):\n \"\"\"Build a model from a dict.\n\n Models stored in json are first formulated as a dict that can be read to\n cobra model using this function.\n\n Parameters\n ----------\n obj : dict\n A dictionary with elements, 'genes', 'compartments', 'id',\n 'metabolites', 'notes' and 'reactions'; where 'metabolites', 'genes'\n and 'metabolites' are in turn lists with dictionaries holding all\n attributes to form the corresponding object.\n\n Returns\n -------\n cora.core.Model\n The generated model.\n\n See Also\n --------\n cobra.io.model_to_dict\n \"\"\"\n if 'reactions' not in arg_0:\n raise ValueError('Object has no reactions attribute. Cannot load.')\n arg_1 = Model()\n arg_1.add_metabolites(\n [metabolite_from_dict(arg_2) for arg_2 in arg_0['metabolites']]\n )\n arg_1.genes.extend([gene_from_dict(arg_3) for arg_3 in arg_0['genes']])\n arg_1.add_reactions(\n [reaction_from_dict(arg_4, arg_1) for arg_4 in arg_0['reactions']]\n )\n arg_5 = [rxn for rxn in arg_0['reactions'] if\n rxn.get('objective_coefficient', 0) != 0]\n arg_6 = {\n arg_1.reactions.get_by_id(rxn['id']): rxn['objective_coefficient'] for\n rxn in arg_5}\n set_objective(arg_1, arg_6)\n for arg_7, arg_8 in iteritems(arg_0):\n if arg_7 in {'id', 'name', 'notes', 'compartments', 'annotation'}:\n setattr(arg_1, arg_7, arg_8)\n return arg_1"} +{"_id": "doc_5115", "title": "", "text": "def Func(arg_0):\n \"\"\"extract the compartment from the id string\"\"\"\n arg_1 = _bracket_re.findall(arg_0)\n if len(arg_1) == 1:\n return arg_1[0][1]\n arg_2 = _underscore_re.findall(arg_0)\n if len(arg_2) == 1:\n return arg_2[0][1]\n return None"} +{"_id": "doc_5116", "title": "", "text": "def Func(arg_0):\n \"\"\"translate an array x into a MATLAB cell array\"\"\"\n arg_1 = [i if i is not None else \"\" for i in arg_0]\n return array(arg_1, dtype=np_object)"} +{"_id": "doc_5117", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=arg_2):\n \"\"\"Load a cobra model stored as a .mat file\n\n Parameters\n ----------\n infile_path: str\n path to the file to to read\n variable_name: str, optional\n The variable name of the model in the .mat file. If this is not\n specified, then the first MATLAB variable which looks like a COBRA\n model will be used\n inf: value\n The value to use for infinite bounds. Some solvers do not handle\n infinite values so for using those, set this to a high numeric value.\n\n Returns\n -------\n cobra.core.Model.Model:\n The resulting cobra model\n\n \"\"\"\n if not scipy_io:\n raise ImportError('Func requires scipy')\n\n arg_3 = scipy_io.loadmat(arg_0)\n arg_4 = []\n if arg_1 is None:\n # skip meta variables\n arg_5 = {\"__globals__\", \"__header__\", \"__version__\"}\n arg_4 = sorted(i for i in arg_3 if i not in arg_5)\n if len(arg_4) == 1:\n arg_1 = arg_4[0]\n if arg_1 is not None:\n return from_mat_struct(arg_3[arg_1], model_id=arg_1,\n arg_2=arg_2)\n for arg_6 in arg_4:\n try:\n return from_mat_struct(arg_3[arg_6], model_id=arg_6,\n arg_2=arg_2)\n except ValueError:\n pass\n # If code here is executed, then no model was found.\n raise IOError(\"no COBRA model found\")"} +{"_id": "doc_5118", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Save the cobra model as a .mat file.\n\n This .mat file can be used directly in the MATLAB version of COBRA.\n\n Parameters\n ----------\n model : cobra.core.Model.Model object\n The model to save\n file_name : str or file-like object\n The file to save to\n varname : string\n The name of the variable within the workspace\n \"\"\"\n if not scipy_io:\n raise ImportError('load_matlab_model requires scipy')\n\n if arg_2 is None:\n arg_2 = str(arg_0.id) \\\n if arg_0.id is not None and len(arg_0.id) > 0 \\\n else \"exported_model\"\n arg_3 = create_mat_dict(arg_0)\n scipy_io.savemat(arg_1, {arg_2: arg_3},\n appendmat=True, oned_as=\"column\")"} +{"_id": "doc_5119", "title": "", "text": "def Func(arg_0):\n \"\"\"Search for a context manager\"\"\"\n try:\n return arg_0._contexts[-1]\n except (AttributeError, IndexError):\n pass\n\n try:\n return arg_0._model._contexts[-1]\n except (AttributeError, IndexError):\n pass\n\n return None"} +{"_id": "doc_5120", "title": "", "text": "def Func(arg_0):\n \"\"\"A decorator to simplify the context management of simple object\n attributes. Gets the value of the attribute prior to setting it, and stores\n a function to set the value to the old value in the HistoryManager.\n \"\"\"\n\n def wrapper(arg_1, arg_2):\n arg_3 = get_context(arg_1)\n if arg_3:\n arg_4 = getattr(arg_1, arg_0.__name__)\n # Don't clutter the context with unchanged variables\n if arg_4 == arg_2:\n return\n arg_3(partial(arg_0, arg_1, arg_4))\n\n arg_0(arg_1, arg_2)\n\n return wrapper"} +{"_id": "doc_5121", "title": "", "text": "def Func(arg_0, Func):\n \"\"\"Get or set the constraints on the model exchanges.\n\n `model.medium` returns a dictionary of the bounds for each of the\n boundary reactions, in the form of `{rxn_id: bound}`, where `bound`\n specifies the absolute value of the bound in direction of metabolite\n creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`)\n\n Parameters\n ----------\n medium: dictionary-like\n The medium to initialize. medium should be a dictionary defining\n `{rxn_id: bound}` pairs.\n\n \"\"\"\n\n def set_active_bound(arg_2, arg_3):\n if arg_2.reactants:\n arg_2.lower_bound = -arg_3\n elif arg_2.products:\n arg_2.upper_bound = arg_3\n\n # Set the given media bounds\n arg_6 = list()\n arg_7 = frozenset(arg_0.exchanges)\n for arg_8, arg_3 in iteritems(Func):\n arg_9 = arg_0.reactions.get_by_id(arg_8)\n if arg_9 not in arg_7:\n LOGGER.warn(\"%s does not seem to be an\"\n \" an exchange reaction. Applying bounds anyway.\",\n arg_9.id)\n arg_6.append(arg_9)\n set_active_bound(arg_9, arg_3)\n\n arg_6 = frozenset(arg_6)\n\n # Turn off reactions not present in media\n for arg_9 in (arg_7 - arg_6):\n set_active_bound(arg_9, 0)"} +{"_id": "doc_5122", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"exchange\", arg_3=None,\n arg_4=None, arg_5=None, arg_6=None):\n \"\"\"\n Add a boundary reaction for a given metabolite.\n\n There are three different types of pre-defined boundary reactions:\n exchange, demand, and sink reactions.\n An exchange reaction is a reversible, unbalanced reaction that adds\n to or removes an extracellular metabolite from the extracellular\n compartment.\n A demand reaction is an irreversible reaction that consumes an\n intracellular metabolite.\n A sink is similar to an exchange but specifically for intracellular\n metabolites.\n\n If you set the reaction `type` to something else, you must specify the\n desired identifier of the created reaction along with its upper and\n lower bound. The name will be given by the metabolite name and the\n given `type`.\n\n Parameters\n ----------\n metabolite : cobra.Metabolite\n Any given metabolite. The compartment is not checked but you are\n encouraged to stick to the definition of exchanges and sinks.\n type : str, {\"exchange\", \"demand\", \"sink\"}\n Using one of the pre-defined reaction types is easiest. If you\n want to create your own kind of boundary reaction choose\n any other string, e.g., 'my-boundary'.\n reaction_id : str, optional\n The ID of the resulting reaction. This takes precedence over the\n auto-generated identifiers but beware that it might make boundary\n reactions harder to identify afterwards when using `model.boundary`\n or specifically `model.exchanges` etc.\n lb : float, optional\n The lower bound of the resulting reaction.\n ub : float, optional\n The upper bound of the resulting reaction.\n sbo_term : str, optional\n A correct SBO term is set for the available types. If a custom\n type is chosen, a suitable SBO term should also be set.\n\n Returns\n -------\n cobra.Reaction\n The created boundary reaction.\n\n Examples\n --------\n >>> import cobra.test\n >>> model = cobra.test.create_test_model(\"textbook\")\n >>> demand = model.Func(model.metabolites.atp_c, type=\"demand\")\n >>> demand.id\n 'DM_atp_c'\n >>> demand.name\n 'ATP demand'\n >>> demand.bounds\n (0, 1000.0)\n >>> demand.build_reaction_string()\n 'atp_c --> '\n\n \"\"\"\n arg_5 = CONFIGURATION.upper_bound if arg_5 is None else arg_5\n arg_4 = CONFIGURATION.lower_bound if arg_4 is None else arg_4\n arg_7 = {\n \"exchange\": (\"EX\", arg_4, arg_5, sbo_terms[\"exchange\"]),\n \"demand\": (\"DM\", 0, arg_5, sbo_terms[\"demand\"]),\n \"sink\": (\"SK\", arg_4, arg_5, sbo_terms[\"sink\"])\n }\n if arg_2 == \"exchange\":\n arg_8 = find_external_compartment(arg_0)\n if arg_1.compartment != arg_8:\n raise ValueError(\"The metabolite is not an external metabolite\"\n \" (compartment is `%s` but should be `%s`). \"\n \"Did you mean to add a demand or sink? \"\n \"If not, either change its compartment or \"\n \"rename the model compartments to fix this.\" %\n (arg_1.compartment, arg_8))\n if arg_2 in arg_7:\n arg_9, arg_4, arg_5, arg_10 = arg_7[arg_2]\n if arg_3 is None:\n arg_3 = \"{}_{}\".format(arg_9, arg_1.id)\n if arg_6 is None:\n arg_6 = arg_10\n if arg_3 is None:\n raise ValueError(\n \"Custom types of boundary reactions require a custom \"\n \"identifier. Please set the `reaction_id`.\")\n if arg_3 in arg_0.reactions:\n raise ValueError(\n \"Boundary reaction '{}' already exists.\".format(arg_3))\n arg_11 = \"{} {}\".format(arg_1.name, arg_2)\n arg_12 = Reaction(id=arg_3, arg_11=arg_11, lower_bound=arg_4,\n upper_bound=arg_5)\n arg_12.add_metabolites({arg_1: -1})\n if arg_6:\n arg_12.annotation[\"sbo\"] = arg_6\n arg_0.add_reactions([arg_12])\n return arg_12"} +{"_id": "doc_5123", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add reactions to the model.\n\n Reactions with identifiers identical to a reaction already in the\n model are ignored.\n\n The change is reverted upon exit when using the model as a context.\n\n Parameters\n ----------\n reaction_list : list\n A list of `cobra.Reaction` objects\n \"\"\"\n def existing_filter(arg_2):\n if arg_2.id in arg_0.reactions:\n LOGGER.warning(\n \"Ignoring reaction '%s' since it already exists.\", arg_2.id)\n return False\n return True\n\n # First check whether the reactions exist in the model.\n arg_3 = DictList(filter(existing_filter, arg_1))\n\n arg_4 = get_context(arg_0)\n\n # Add reactions. Also take care of genes and metabolites in the loop.\n for arg_5 in arg_3:\n arg_5._model = arg_0\n # Build a `list()` because the dict will be modified in the loop.\n for arg_7 in list(arg_5.metabolites):\n # TODO: Should we add a copy of the metabolite instead?\n if arg_7 not in arg_0.metabolites:\n arg_0.add_metabolites(arg_7)\n # A copy of the metabolite exists in the model, the reaction\n # needs to point to the metabolite in the model.\n else:\n # FIXME: Modifying 'private' attributes is horrible.\n arg_8 = arg_5._metabolites.pop(arg_7)\n arg_9 = arg_0.metabolites.get_by_id(\n arg_7.id)\n arg_5._metabolites[arg_9] = arg_8\n arg_9._reaction.add(arg_5)\n if arg_4:\n arg_4(partial(\n arg_9._reaction.remove, arg_5))\n\n for arg_11 in list(arg_5._genes):\n # If the gene is not in the model, add it\n if not arg_0.genes.has_id(arg_11.id):\n arg_0.genes += [arg_11]\n arg_11._model = arg_0\n\n if arg_4:\n # Remove the gene later\n arg_4(partial(arg_0.genes.__isub__, [arg_11]))\n arg_4(partial(setattr, arg_11, '_model', None))\n\n # Otherwise, make the gene point to the one in the model\n else:\n arg_12 = arg_0.genes.get_by_id(arg_11.id)\n if arg_12 is not arg_11:\n arg_5._dissociate_gene(arg_11)\n arg_5._associate_gene(arg_12)\n\n arg_0.reactions += arg_3\n\n if arg_4:\n arg_4(partial(arg_0.reactions.__isub__, arg_3))\n\n # from cameo ...\n arg_0._populate_solver(arg_3)"} +{"_id": "doc_5124", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Remove reactions from the model.\n\n The change is reverted upon exit when using the model as a context.\n\n Parameters\n ----------\n reactions : list\n A list with reactions (`cobra.Reaction`), or their id's, to remove\n\n remove_orphans : bool\n Remove orphaned genes and metabolites from the model as well\n\n \"\"\"\n if isinstance(arg_1, string_types) or hasattr(arg_1, \"id\"):\n warn(\"need to pass in a list\")\n arg_1 = [arg_1]\n\n arg_3 = get_context(arg_0)\n\n for arg_4 in arg_1:\n\n # Make sure the reaction is in the model\n try:\n arg_4 = arg_0.reactions[arg_0.reactions.index(arg_4)]\n except ValueError:\n warn('%s not in %s' % (arg_4, arg_0))\n\n else:\n arg_5 = arg_4.forward_variable\n arg_6 = arg_4.reverse_variable\n\n if arg_3:\n\n arg_7 = arg_4.objective_coefficient\n\n if arg_7 != 0:\n arg_3(partial(\n arg_0.solver.objective.set_linear_coefficients,\n {arg_5: arg_7, arg_6: -arg_7}))\n\n arg_3(partial(arg_0._populate_solver, [arg_4]))\n arg_3(partial(setattr, arg_4, '_model', arg_0))\n arg_3(partial(arg_0.reactions.add, arg_4))\n\n arg_0.remove_cons_vars([arg_5, arg_6])\n arg_0.reactions.remove(arg_4)\n arg_4._model = None\n\n for arg_9 in arg_4._metabolites:\n if arg_4 in arg_9._reaction:\n arg_9._reaction.remove(arg_4)\n if arg_3:\n arg_3(partial(arg_9._reaction.add, arg_4))\n if arg_2 and len(arg_9._reaction) == 0:\n arg_0.remove_metabolites(arg_9)\n\n for arg_10 in arg_4._genes:\n if arg_4 in arg_10._reaction:\n arg_10._reaction.remove(arg_4)\n if arg_3:\n arg_3(partial(arg_10._reaction.add, arg_4))\n\n if arg_2 and len(arg_10._reaction) == 0:\n arg_0.genes.remove(arg_10)\n if arg_3:\n arg_3(partial(arg_0.genes.add, arg_10))\n\n # remove reference to the reaction in all groups\n arg_11 = arg_0.get_associated_groups(arg_4)\n for arg_12 in arg_11:\n arg_12.remove_members(arg_4)"} +{"_id": "doc_5125", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add groups to the model.\n\n Groups with identifiers identical to a group already in the model are\n ignored.\n\n If any group contains members that are not in the model, these members\n are added to the model as well. Only metabolites, reactions, and genes\n can have groups.\n\n Parameters\n ----------\n group_list : list\n A list of `cobra.Group` objects to add to the model.\n \"\"\"\n\n def existing_filter(arg_2):\n if arg_2.id in arg_0.groups:\n LOGGER.warning(\n \"Ignoring group '%s' since it already exists.\", arg_2.id)\n return False\n return True\n\n if isinstance(arg_1, string_types) or \\\n hasattr(arg_1, \"id\"):\n warn(\"need to pass in a list\")\n arg_1 = [arg_1]\n\n arg_3 = DictList(filter(existing_filter, arg_1))\n\n for arg_2 in arg_3:\n arg_2._model = arg_0\n for arg_5 in arg_2.members:\n # If the member is not associated with the model, add it\n if isinstance(arg_5, Metabolite):\n if arg_5 not in arg_0.metabolites:\n arg_0.add_metabolites([arg_5])\n if isinstance(arg_5, Reaction):\n if arg_5 not in arg_0.reactions:\n arg_0.add_reactions([arg_5])\n # TODO(midnighter): `add_genes` method does not exist.\n # if isinstance(member, Gene):\n # if member not in self.genes:\n # self.add_genes([member])\n\n arg_0.groups += [arg_2]"} +{"_id": "doc_5126", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Populate attached solver with constraints and variables that\n model the provided reactions.\n \"\"\"\n arg_3 = AutoVivification()\n arg_4 = []\n if arg_2 is not None:\n for arg_5 in arg_2:\n arg_4 += [arg_0.problem.Constraint(\n Zero, name=arg_5.id, lb=0, ub=0)]\n arg_0.add_cons_vars(arg_4)\n\n for arg_6 in arg_1:\n if arg_6.id not in arg_0.variables:\n arg_7 = arg_0.problem.Variable(arg_6.id)\n arg_8 = arg_0.problem.Variable(arg_6.reverse_id)\n arg_0.add_cons_vars([arg_7, arg_8])\n else:\n arg_6 = arg_0.reactions.get_by_id(arg_6.id)\n arg_7 = arg_6.forward_variable\n arg_8 = arg_6.reverse_variable\n for arg_9, arg_10 in six.iteritems(arg_6.metabolites):\n if arg_9.id in arg_0.constraints:\n arg_11 = arg_0.constraints[arg_9.id]\n else:\n arg_11 = arg_0.problem.Constraint(\n Zero,\n name=arg_9.id,\n lb=0, ub=0)\n arg_0.add_cons_vars(arg_11, sloppy=True)\n arg_3[arg_11][arg_7] = arg_10\n arg_3[arg_11][arg_8] = -arg_10\n\n arg_0.solver.update()\n for arg_6 in arg_1:\n arg_6 = arg_0.reactions.get_by_id(arg_6.id)\n arg_6.update_variable_bounds()\n for arg_11, arg_12 in six.iteritems(arg_3):\n arg_11.set_linear_coefficients(arg_12)"} +{"_id": "doc_5127", "title": "", "text": "def Func(arg_0, arg_1=arg_2('nan'), arg_3=None):\n \"\"\"Optimize model without creating a solution object.\n\n Creating a full solution object implies fetching shadow prices and\n flux values for all reactions and metabolites from the solver\n object. This necessarily takes some time and in cases where only one\n or two values are of interest, it is recommended to instead use this\n function which does not create a solution object returning only the\n value of the objective. Note however that the `optimize()` function\n uses efficient means to fetch values so if you need fluxes/shadow\n prices for more than say 4 reactions/metabolites, then the total\n speed increase of `Func` versus `optimize` is expected to\n be small or even negative depending on how you fetch the values\n after optimization.\n\n Parameters\n ----------\n error_value : float, None\n The value to return if optimization failed due to e.g.\n infeasibility. If None, raise `OptimizationError` if the\n optimization fails.\n message : string\n Error message to use if the model optimization did not succeed.\n\n Returns\n -------\n float\n The objective value.\n \"\"\"\n arg_0.solver.optimize()\n if arg_0.solver.status == optlang.interface.OPTIMAL:\n return arg_0.solver.objective.value\n elif arg_1 is not None:\n return arg_1\n else:\n assert_optimal(arg_0, arg_3)"} +{"_id": "doc_5128", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"\n Optimize the model using flux balance analysis.\n\n Parameters\n ----------\n objective_sense : {None, 'maximize' 'minimize'}, optional\n Whether fluxes should be maximized or minimized. In case of None,\n the previous direction is used.\n raise_error : bool\n If true, raise an OptimizationError if solver status is not\n optimal.\n\n Notes\n -----\n Only the most commonly used parameters are presented here. Additional\n parameters for cobra.solvers may be available and specified with the\n appropriate keyword argument.\n\n \"\"\"\n arg_3 = arg_0.objective.direction\n arg_0.objective.direction = \\\n {\"maximize\": \"max\", \"minimize\": \"min\"}.get(\n arg_1, arg_3)\n arg_0.slim_Func()\n arg_6 = get_solution(arg_0, arg_2=arg_2)\n arg_0.objective.direction = arg_3\n return arg_6"} +{"_id": "doc_5129", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"Update all indexes and pointers in a model\n\n Parameters\n ----------\n rebuild_index : bool\n rebuild the indices kept in reactions, metabolites and genes\n rebuild_relationships : bool\n reset all associations between genes, metabolites, model and\n then re-add them.\n \"\"\"\n if arg_1: # DictList indexes\n arg_0.reactions._generate_index()\n arg_0.metabolites._generate_index()\n arg_0.genes._generate_index()\n arg_0.groups._generate_index()\n if arg_2:\n for arg_3 in arg_0.metabolites:\n arg_3._reaction.clear()\n for arg_4 in arg_0.genes:\n arg_4._reaction.clear()\n for arg_5 in arg_0.reactions:\n for arg_3 in arg_5._metabolites:\n arg_3._reaction.add(arg_5)\n for arg_4 in arg_5._genes:\n arg_4._reaction.add(arg_5)\n\n # point _model to self\n for arg_6 in (arg_0.reactions, arg_0.genes, arg_0.metabolites, arg_0.groups):\n for arg_7 in arg_6:\n arg_7._model = arg_0"} +{"_id": "doc_5130", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True,\n arg_4='left'):\n \"\"\"Merge two models to create a model with the reactions from both\n models.\n\n Custom constraints and variables from right models are also copied\n to left model, however note that, constraints and variables are\n assumed to be the same if they have the same name.\n\n right : cobra.Model\n The model to add reactions from\n prefix_existing : string\n Prefix the reaction identifier in the right that already exist\n in the left model with this string.\n inplace : bool\n Add reactions from right directly to left model object.\n Otherwise, create a new model leaving the left model untouched.\n When done within the model as context, changes to the models are\n reverted upon exit.\n objective : string\n One of 'left', 'right' or 'sum' for setting the objective of the\n resulting model to that of the corresponding model or the sum of\n both.\n \"\"\"\n if arg_3:\n arg_5 = arg_0\n else:\n arg_5 = arg_0.copy()\n arg_5.id = '{}_{}'.format(arg_0.id, arg_1.id)\n arg_7 = deepcopy(arg_1.reactions)\n if arg_2 is not None:\n arg_8 = arg_7.query(\n lambda rxn: rxn.id in arg_0.reactions)\n for arg_9 in arg_8:\n arg_9.id = '{}{}'.format(arg_2, arg_9.id)\n arg_5.add_reactions(arg_7)\n arg_10 = arg_5.problem\n arg_11 = [arg_10.Variable.clone(v) for v in arg_1.variables if\n v.name not in arg_5.variables]\n arg_5.add_cons_vars(arg_11)\n arg_12 = [arg_10.Constraint.clone(c, model=arg_5.solver)\n for c in arg_1.constraints if\n c.name not in arg_5.constraints]\n arg_5.add_cons_vars(arg_12, sloppy=True)\n arg_5.objective = dict(\n left=arg_0.objective,\n arg_1=arg_1.objective,\n sum=arg_0.objective.expression + arg_1.objective.expression\n )[arg_4]\n return arg_5"} +{"_id": "doc_5131", "title": "", "text": "def Func(arg_0):\n \"\"\"makes all ids SBML compliant\"\"\"\n for arg_1 in chain([arg_0],\n arg_0.metabolites,\n arg_0.reactions,\n arg_0.genes):\n arg_1.id = _escape_str_id(arg_1.id)\n arg_0.repair()\n arg_3 = _GeneEscaper()\n for arg_4, arg_5 in iteritems(get_compiled_gene_reaction_rules(arg_0)):\n if arg_5 is not None:\n arg_4._gene_reaction_rule = ast2str(arg_3.visit(arg_5))"} +{"_id": "doc_5132", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"renames genes in a model from the rename_dict\"\"\"\n arg_2 = set() # need to recomptue related genes\n arg_3 = []\n for arg_4, arg_5 in iteritems(arg_1):\n # undefined if there a value matches a different key\n # because dict is unordered\n try:\n arg_6 = arg_0.genes.index(arg_4)\n except ValueError:\n arg_6 = None\n arg_7 = arg_6 is not None\n arg_8 = arg_5 in arg_0.genes\n if arg_7 and arg_8:\n arg_9 = arg_0.genes.get_by_id(arg_4)\n # Added in case not renaming some genes:\n if arg_9 is not arg_0.genes.get_by_id(arg_5):\n arg_3.append(arg_9)\n arg_2.update(arg_9._reaction)\n elif arg_7 and not arg_8:\n # rename old gene to new gene\n arg_10 = arg_0.genes[arg_6]\n # trick DictList into updating index\n arg_0.genes._dict.pop(arg_10.id) # ugh\n arg_10.id = arg_5\n arg_0.genes[arg_6] = arg_10\n elif not arg_7 and arg_8:\n pass\n else: # if not old gene_present and not new_gene_present\n # the new gene's _model will be set by repair\n # This would add genes from rename_dict\n # that are not associated with a rxn\n # cobra_model.genes.append(Gene(new_name))\n pass\n arg_0.repair()\n\n class Renamer(NodeTransformer):\n def visit_Name(arg_13, arg_14):\n arg_14.id = arg_1.get(arg_14.id, arg_14.id)\n return arg_14\n\n arg_15 = Renamer()\n for arg_16, arg_17 in iteritems(get_compiled_gene_reaction_rules(arg_0)):\n if arg_17 is not None:\n arg_16._gene_reaction_rule = ast2str(arg_15.visit(arg_17))\n\n for arg_16 in arg_2:\n arg_16.gene_reaction_rule = arg_16._gene_reaction_rule\n for arg_20 in arg_3:\n arg_0.genes.remove(arg_20)"} +{"_id": "doc_5133", "title": "", "text": "def Func(arg_0, arg_1=False, **arg_2):\n \"\"\"\n Return the model as a JSON document.\n\n ``kwargs`` are passed on to ``json.dumps``.\n\n Parameters\n ----------\n model : cobra.Model\n The cobra model to represent.\n sort : bool, optional\n Whether to sort the metabolites, reactions, and genes or maintain the\n order defined in the model.\n\n Returns\n -------\n str\n String representation of the cobra model as a JSON document.\n\n See Also\n --------\n save_json_model : Write directly to a file.\n json.dumps : Base function.\n \"\"\"\n arg_3 = model_to_dict(arg_0, arg_1=arg_1)\n arg_3[u\"version\"] = JSON_SPEC\n return json.dumps(arg_3, allow_nan=False, **arg_2)"} +{"_id": "doc_5134", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load a cobra model from a file in JSON format.\n\n Parameters\n ----------\n filename : str or file-like\n File path or descriptor that contains the JSON document describing the\n cobra model.\n\n Returns\n -------\n cobra.Model\n The cobra model as represented in the JSON document.\n\n See Also\n --------\n from_json : Load from a string.\n \"\"\"\n if isinstance(arg_0, string_types):\n with open(arg_0, \"r\") as file_handle:\n return model_from_dict(json.load(file_handle))\n else:\n return model_from_dict(json.load(arg_0))"} +{"_id": "doc_5135", "title": "", "text": "def Func(arg_0):\n \"\"\"Add a mixed-integer version of a minimal medium to the model.\n\n Changes the optimization objective to finding the medium with the least\n components::\n\n minimize size(R) where R part of import_reactions\n\n Arguments\n ---------\n model : cobra.model\n The model to modify.\n \"\"\"\n if len(arg_0.variables) > 1e4:\n LOGGER.warning(\"the MIP version of minimal media is extremely slow for\"\n \" models that large :(\")\n arg_1 = find_boundary_types(arg_0, \"exchange\")\n arg_2 = max(abs(b) for r in arg_1 for b in r.bounds)\n arg_3 = arg_0.problem\n arg_4 = {}\n arg_5 = []\n for arg_6 in arg_1:\n arg_7 = len(arg_6.reactants) == 1\n arg_8 = arg_3.Variable(\"ind_\" + arg_6.id, lb=0, ub=1, type=\"binary\")\n if arg_7:\n arg_9 = arg_6.reverse_variable\n arg_10 = arg_3.Constraint(\n arg_9 - arg_8 * arg_2, ub=0, name=\"ind_constraint_\" + arg_6.id)\n else:\n arg_11 = arg_6.forward_variable\n arg_10 = arg_3.Constraint(\n arg_11 - arg_8 * arg_2, ub=0, name=\"ind_constraint_\" + arg_6.id)\n arg_5.extend([arg_8, arg_10])\n arg_4[arg_8] = 1\n arg_0.add_cons_vars(arg_5)\n arg_0.solver.update()\n arg_0.objective.set_linear_coefficients(arg_4)\n arg_0.objective.direction = \"min\""} +{"_id": "doc_5136", "title": "", "text": "def Func(arg_0, arg_1=1e-6, arg_2=False):\n \"\"\"Convert a solution to medium.\n\n Arguments\n ---------\n exchanges : list of cobra.reaction\n The exchange reactions to consider.\n tolerance : positive double\n The absolute tolerance for fluxes. Fluxes with an absolute value\n smaller than this number will be ignored.\n exports : bool\n Whether to return export fluxes as well.\n\n Returns\n -------\n pandas.Series\n The \"medium\", meaning all active import fluxes in the solution.\n \"\"\"\n LOGGER.debug(\"Formatting medium.\")\n arg_3 = pd.Series()\n for arg_4 in arg_0:\n arg_5 = len(arg_4.reactants) == 1\n arg_6 = arg_4.flux\n if abs(arg_6) < arg_1:\n continue\n if arg_5:\n arg_3[arg_4.id] = -arg_6\n elif not arg_5:\n arg_3[arg_4.id] = arg_6\n if not arg_2:\n arg_3 = arg_3[arg_3 > 0]\n\n return arg_3"} +{"_id": "doc_5137", "title": "", "text": "def Func(arg_0, arg_1=0.1, arg_2=False,\n arg_3=False, arg_4=False):\n \"\"\"\n Find the minimal growth medium for the model.\n\n Finds the minimal growth medium for the model which allows for\n model as well as individual growth. Here, a minimal medium can either\n be the medium requiring the smallest total import flux or the medium\n requiring the least components (ergo ingredients), which will be much\n slower due to being a mixed integer problem (MIP).\n\n Arguments\n ---------\n model : cobra.model\n The model to modify.\n min_objective_value : positive float or array-like object\n The minimum growth rate (objective) that has to be achieved.\n exports : boolean\n Whether to include export fluxes in the returned medium. Defaults to\n False which will only return import fluxes.\n minimize_components : boolean or positive int\n Whether to minimize the number of components instead of the total\n import flux. Might be more intuitive if set to True but may also be\n slow to calculate for large communities. If set to a number `n` will\n return up to `n` alternative solutions all with the same number of\n components.\n open_exchanges : boolean or number\n Whether to ignore currently set bounds and make all exchange reactions\n in the model possible. If set to a number all exchange reactions will\n be opened with (-number, number) as bounds.\n\n Returns\n -------\n pandas.Series, pandas.DataFrame or None\n A series giving the import flux for each required import\n reaction and (optionally) the associated export fluxes. All exchange\n fluxes are oriented into the import reaction e.g. positive fluxes\n denote imports and negative fluxes exports. If `minimize_components`\n is a number larger 1 may return a DataFrame where each column is a\n minimal medium. Returns None if the minimization is infeasible\n (for instance if min_growth > maximum growth rate).\n\n Notes\n -----\n Due to numerical issues the `minimize_components` option will usually only\n minimize the number of \"large\" import fluxes. Specifically, the detection\n limit is given by ``integrality_tolerance * max_bound`` where ``max_bound``\n is the largest bound on an import reaction. Thus, if you are interested\n in small import fluxes as well you may have to adjust the integrality\n tolerance at first with\n `model.solver.configuration.tolerances.integrality = 1e-7` for instance.\n However, this will be *very* slow for large models especially with GLPK.\n\n \"\"\"\n arg_5 = find_boundary_types(arg_0, \"exchange\")\n if isinstance(arg_4, bool):\n arg_6 = 1000\n else:\n arg_6 = arg_4\n\n with arg_0 as arg_10:\n if arg_4:\n LOGGER.debug(\"Opening exchanges for %d imports.\",\n len(arg_5))\n for arg_7 in arg_5:\n arg_7.bounds = (-arg_6, arg_6)\n LOGGER.debug(\"Applying objective value constraints.\")\n arg_9 = arg_10.problem.Constraint(\n arg_10.objective.expression, lb=arg_1,\n name=\"medium_obj_constraint\")\n arg_10.add_cons_vars([arg_9])\n arg_10.solver.update()\n arg_10.objective = Zero\n LOGGER.debug(\"Adding new media objective.\")\n arg_12 = arg_10.solver.configuration.tolerances.feasibility\n\n if arg_3:\n add_mip_obj(arg_10)\n if isinstance(arg_3, bool):\n arg_3 = 1\n arg_13 = set()\n arg_14 = arg_20 = arg_10.slim_optimize()\n if arg_10.solver.status != OPTIMAL:\n LOGGER.warning(\"Minimization of medium was infeasible.\")\n return None\n arg_15 = arg_10.problem.Constraint(Zero, arg_19=0)\n arg_10.add_cons_vars([arg_15])\n arg_10.solver.update()\n arg_16 = []\n for arg_17 in range(arg_3):\n LOGGER.info(\"Finding alternative medium #%d.\", (arg_17 + 1))\n arg_18 = [arg_10.variables[\"ind_\" + s] for s in arg_13]\n if len(arg_13) > 0:\n arg_15.set_linear_coefficients(\n dict.fromkeys(arg_18, 1))\n arg_15.ub = arg_14 - 1\n arg_20 = arg_10.slim_optimize()\n if arg_10.solver.status != OPTIMAL or arg_20 > arg_14:\n break\n arg_21 = _as_medium(arg_5, arg_12, arg_2=arg_2)\n arg_16.append(arg_21)\n arg_13.update(arg_21[arg_21 > 0].index)\n if len(arg_16) > 1:\n arg_21 = pd.concat(arg_16, axis=1, sort=True).fillna(0.0)\n arg_21.sort_index(axis=1, inplace=True)\n else:\n arg_21 = arg_16[0]\n else:\n add_linear_obj(arg_10)\n arg_10.slim_optimize()\n if arg_10.solver.status != OPTIMAL:\n LOGGER.warning(\"Minimization of medium was infeasible.\")\n return None\n arg_21 = _as_medium(arg_5, arg_12, arg_2=arg_2)\n\n return arg_21"} +{"_id": "doc_5138", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Initialize a global model object for multiprocessing.\"\"\"\n global arg_3\n global arg_7\n arg_3 = arg_0\n arg_3.solver.objective.direction = arg_2\n arg_7 = arg_1"} +{"_id": "doc_5139", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False,\n arg_3=1.0, arg_4=None,\n arg_5=None):\n \"\"\"\n Determine the minimum and maximum possible flux value for each reaction.\n\n Parameters\n ----------\n model : cobra.Model\n The model for which to run the analysis. It will *not* be modified.\n reaction_list : list of cobra.Reaction or str, optional\n The reactions for which to obtain min/max fluxes. If None will use\n all reactions in the model (default).\n loopless : boolean, optional\n Whether to return only loopless solutions. This is significantly\n slower. Please also refer to the notes.\n fraction_of_optimum : float, optional\n Must be <= 1.0. Requires that the objective value is at least the\n fraction times maximum objective value. A value of 0.85 for instance\n means that the objective has to be at least at 85% percent of its\n maximum.\n pfba_factor : float, optional\n Add an additional constraint to the model that requires the total sum\n of absolute fluxes must not be larger than this value times the\n smallest possible sum of absolute fluxes, i.e., by setting the value\n to 1.1 the total sum of absolute fluxes must not be more than\n 10% larger than the pFBA solution. Since the pFBA solution is the\n one that optimally minimizes the total flux sum, the ``pfba_factor``\n should, if set, be larger than one. Setting this value may lead to\n more realistic predictions of the effective flux bounds.\n processes : int, optional\n The number of parallel processes to run. If not explicitly passed,\n will be set from the global configuration singleton.\n\n Returns\n -------\n pandas.DataFrame\n A data frame with reaction identifiers as the index and two columns:\n - maximum: indicating the highest possible flux\n - minimum: indicating the lowest possible flux\n\n Notes\n -----\n This implements the fast version as described in [1]_. Please note that\n the flux distribution containing all minimal/maximal fluxes does not have\n to be a feasible solution for the model. Fluxes are minimized/maximized\n individually and a single minimal flux might require all others to be\n suboptimal.\n\n Using the loopless option will lead to a significant increase in\n computation time (about a factor of 100 for large models). However, the\n algorithm used here (see [2]_) is still more than 1000x faster than the\n \"naive\" version using ``add_loopless(model)``. Also note that if you have\n included constraints that force a loop (for instance by setting all fluxes\n in a loop to be non-zero) this loop will be included in the solution.\n\n References\n ----------\n .. [1] Computationally efficient flux variability analysis.\n Gudmundsson S, Thiele I.\n BMC Bioinformatics. 2010 Sep 29;11:489.\n doi: 10.1186/1471-2105-11-489, PMID: 20920235\n\n .. [2] CycleFreeFlux: efficient removal of thermodynamically infeasible\n loops from flux distributions.\n Desouki AA, Jarre F, Gelius-Dietrich G, Lercher MJ.\n Bioinformatics. 2015 Jul 1;31(13):2159-65.\n doi: 10.1093/bioinformatics/btv096.\n \"\"\"\n if arg_1 is None:\n arg_6 = [r.id for r in arg_0.reactions]\n else:\n arg_6 = [r.id\n for r in arg_0.reactions.get_by_any(arg_1)]\n\n if arg_5 is None:\n arg_5 = CONFIGURATION.processes\n arg_7 = len(arg_6)\n arg_5 = min(arg_5, arg_7)\n\n arg_8 = DataFrame({\n \"minimum\": zeros(arg_7, dtype=float),\n \"maximum\": zeros(arg_7, dtype=float)\n }, index=arg_6)\n arg_9 = arg_0.problem\n with arg_0:\n # Safety check before setting up FVA.\n arg_0.slim_optimize(error_value=None,\n message=\"There is no optimal solution for the \"\n \"chosen objective!\")\n # Add the previous objective as a variable to the model then set it to\n # zero. This also uses the fraction to create the lower/upper bound for\n # the old objective.\n # TODO: Use utility function here (fix_objective_as_constraint)?\n if arg_0.solver.objective.direction == \"max\":\n arg_10 = arg_9.Variable(\n \"fva_old_objective\",\n lb=arg_3 * arg_0.solver.objective.value)\n else:\n arg_10 = arg_9.Variable(\n \"fva_old_objective\",\n arg_12=arg_3 * arg_0.solver.objective.value)\n arg_11 = arg_9.Constraint(\n arg_0.solver.objective.expression - arg_10, lb=0, arg_12=0,\n name=\"fva_old_objective_constraint\")\n arg_0.add_cons_vars([arg_10, arg_11])\n\n if arg_4 is not None:\n if arg_4 < 1.:\n warn(\"The 'pfba_factor' should be larger or equal to 1.\",\n UserWarning)\n with arg_0:\n add_pfba(arg_0, arg_3=0)\n arg_12 = arg_0.slim_optimize(error_value=None)\n arg_13 = arg_9.Variable(\"flux_sum\", arg_12=arg_4 * arg_12)\n arg_14 = arg_9.Constraint(\n arg_0.solver.objective.expression - arg_13, lb=0, arg_12=0,\n name=\"flux_sum_constraint\")\n arg_0.add_cons_vars([arg_13, arg_14])\n\n arg_0.objective = Zero # This will trigger the reset as well\n for arg_16 in (\"minimum\", \"maximum\"):\n if arg_5 > 1:\n # We create and destroy a new pool here in order to set the\n # objective direction for all reactions. This creates a\n # slight overhead but seems the most clean.\n arg_17 = len(arg_6) // arg_5\n arg_18 = multiprocessing.Pool(\n arg_5,\n initializer=_init_worker,\n initargs=(arg_0, arg_2, arg_16[:3])\n )\n for arg_19, arg_20 in arg_18.imap_unordered(_fva_step,\n arg_6,\n chunksize=arg_17):\n arg_8.at[arg_19, arg_16] = arg_20\n arg_18.close()\n arg_18.join()\n else:\n _init_worker(arg_0, arg_2, arg_16[:3])\n for arg_19, arg_20 in map(_fva_step, arg_6):\n arg_8.at[arg_19, arg_16] = arg_20\n\n return arg_8[[\"minimum\", \"maximum\"]]"} +{"_id": "doc_5140", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=False,\n arg_4=None):\n \"\"\"\n Find reactions that cannot carry any flux.\n\n The question whether or not a reaction is blocked is highly dependent\n on the current exchange reaction settings for a COBRA model. Hence an\n argument is provided to open all exchange reactions.\n\n Notes\n -----\n Sink and demand reactions are left untouched. Please modify them manually.\n\n Parameters\n ----------\n model : cobra.Model\n The model to analyze.\n reaction_list : list, optional\n List of reactions to consider, the default includes all model\n reactions.\n zero_cutoff : float, optional\n Flux value which is considered to effectively be zero\n (default model.tolerance).\n open_exchanges : bool, optional\n Whether or not to open all exchange reactions to very high flux ranges.\n processes : int, optional\n The number of parallel processes to run. Can speed up the computations\n if the number of reactions is large. If not explicitly\n passed, it will be set from the global configuration singleton.\n\n Returns\n -------\n list\n List with the identifiers of blocked reactions.\n\n \"\"\"\n arg_2 = normalize_cutoff(arg_0, arg_2)\n\n with arg_0:\n if arg_3:\n for arg_5 in arg_0.exchanges:\n arg_5.bounds = (min(arg_5.lower_bound, -1000),\n max(arg_5.upper_bound, 1000))\n if arg_1 is None:\n arg_1 = arg_0.reactions\n # Limit the search space to reactions which have zero flux. If the\n # reactions already carry flux in this solution,\n # then they cannot be blocked.\n arg_0.slim_optimize()\n arg_7 = get_solution(arg_0, reactions=arg_1)\n arg_1 = arg_7.fluxes[\n arg_7.fluxes.abs() < arg_2].index.tolist()\n # Run FVA to find reactions where both the minimal and maximal flux\n # are zero (below the cut off).\n arg_8 = flux_variability_analysis(\n arg_0, fraction_of_optimum=0., arg_1=arg_1,\n arg_4=arg_4\n )\n return arg_8[\n arg_8.abs().max(axis=1) < arg_2].index.tolist()"} +{"_id": "doc_5141", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Return a set of essential reactions.\n\n A reaction is considered essential if restricting its flux to zero\n causes the objective, e.g., the growth rate, to also be zero, below the\n threshold, or infeasible.\n\n\n Parameters\n ----------\n model : cobra.Model\n The model to find the essential reactions for.\n threshold : float, optional\n Minimal objective flux to be considered viable. By default this is\n 1% of the maximal objective.\n processes : int, optional\n The number of parallel processes to run. Can speed up the computations\n if the number of knockouts to perform is large. If not explicitly\n passed, it will be set from the global configuration singleton.\n\n Returns\n -------\n set\n Set of essential reactions\n \"\"\"\n if arg_1 is None:\n arg_1 = arg_0.slim_optimize(error_value=None) * 1E-02\n arg_3 = single_reaction_deletion(\n arg_0, method='fba', arg_2=arg_2)\n arg_4 = arg_3.loc[arg_3['growth'].isna() |\n (arg_3['growth'] < arg_1), :].index\n return {arg_0.reactions.get_by_id(arg_6) for arg_5 in arg_4 for arg_6 in arg_5}"} +{"_id": "doc_5142", "title": "", "text": "def Func(arg_0):\n \"\"\"adds SBO terms for demands and exchanges\n\n This works for models which follow the standard convention for\n constructing and naming these reactions.\n\n The reaction should only contain the single metabolite being exchanged,\n and the id should be EX_metid or DM_metid\n \"\"\"\n for arg_1 in arg_0.reactions:\n # don't annotate already annotated reactions\n if arg_1.annotation.get(\"sbo\"):\n continue\n # only doing exchanges\n if len(arg_1.metabolites) != 1:\n continue\n arg_2 = list(arg_1._metabolites)[0].id\n if arg_1.id.startswith(\"EX_\") and arg_1.id == \"EX_\" + arg_2:\n arg_1.annotation[\"sbo\"] = \"SBO:0000627\"\n elif arg_1.id.startswith(\"DM_\") and arg_1.id == \"DM_\" + arg_2:\n arg_1.annotation[\"sbo\"] = \"SBO:0000628\""} +{"_id": "doc_5143", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=\"fba\",\n arg_4=None, arg_5=None, **arg_6):\n \"\"\"\n Knock out each gene pair from the combination of two given lists.\n\n We say 'pair' here but the order order does not matter.\n\n Parameters\n ----------\n model : cobra.Model\n The metabolic model to perform deletions in.\n gene_list1 : iterable, optional\n First iterable of ``cobra.Gene``s to be deleted. If not passed,\n all the genes from the model are used.\n gene_list2 : iterable, optional\n Second iterable of ``cobra.Gene``s to be deleted. If not passed,\n all the genes from the model are used.\n method: {\"fba\", \"moma\", \"linear moma\", \"room\", \"linear room\"}, optional\n Method used to predict the growth rate.\n solution : cobra.Solution, optional\n A previous solution to use as a reference for (linear) MOMA or ROOM.\n processes : int, optional\n The number of parallel processes to run. Can speed up the computations\n if the number of knockouts to perform is large. If not passed,\n will be set to the number of CPUs found.\n kwargs :\n Keyword arguments are passed on to underlying simulation functions\n such as ``add_room``.\n\n Returns\n -------\n pandas.DataFrame\n A representation of all combinations of gene deletions. The\n columns are 'growth' and 'status', where\n\n index : frozenset([str])\n The gene identifiers that were knocked out.\n growth : float\n The growth rate of the adjusted model.\n status : str\n The solution's status.\n\n \"\"\"\n\n arg_1, arg_2 = _element_lists(arg_0.genes, arg_1,\n arg_2)\n return _multi_deletion(\n arg_0, 'gene', element_lists=[arg_1, arg_2],\n arg_3=arg_3, arg_4=arg_4, arg_5=arg_5, **arg_6)"} +{"_id": "doc_5144", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate the id of reverse_variable from the reaction's id.\"\"\"\n return '_'.join((arg_0.id, 'reverse',\n hashlib.md5(\n arg_0.id.encode('utf-8')).hexdigest()[0:5]))"} +{"_id": "doc_5145", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The Func value in the most recent solution.\n\n Flux is the primal value of the corresponding variable in the model.\n\n Warnings\n --------\n * Accessing reaction Funces through a `Solution` object is the safer,\n preferred, and only guaranteed to be correct way. You can see how to\n do so easily in the examples.\n * Reaction Func is retrieved from the currently defined\n `self._model.solver`. The solver status is checked but there are no\n guarantees that the current solver state is the one you are looking\n for.\n * If you modify the underlying model after an optimization, you will\n retrieve the old optimization values.\n\n Raises\n ------\n RuntimeError\n If the underlying model was never optimized beforehand or the\n reaction is not part of a model.\n OptimizationError\n If the solver status is anything other than 'optimal'.\n AssertionError\n If the Func value is not within the bounds.\n\n Examples\n --------\n >>> import cobra.test\n >>> model = cobra.test.create_test_model(\"textbook\")\n >>> solution = model.optimize()\n >>> model.reactions.PFK.Func\n 7.477381962160283\n >>> solution.Funces.PFK\n 7.4773819621602833\n \"\"\"\n try:\n check_solver_status(arg_0._model.solver.status)\n return arg_0.forward_variable.primal - arg_0.reverse_variable.primal\n except AttributeError:\n raise RuntimeError(\n \"reaction '{}' is not part of a model\".format(arg_0.id))\n # Due to below all-catch, which sucks, need to reraise these.\n except (RuntimeError, OptimizationError) as err:\n raise_with_traceback(err)\n # Would love to catch CplexSolverError and GurobiError here.\n except Exception as err:\n raise_from(OptimizationError(\n \"Likely no solution exists. Original solver message: {}.\"\n \"\".format(str(err))), err)"} +{"_id": "doc_5146", "title": "", "text": "def Func(arg_0):\n \"\"\"Display gene_reaction_rule with names intead.\n\n Do NOT use this string for computation. It is intended to give a\n representation of the rule using more familiar gene names instead of\n the often cryptic ids.\n\n \"\"\"\n arg_1 = {i.id: i.name for i in arg_0._genes}\n arg_2 = parse_gpr(arg_0._gene_reaction_rule)[0]\n return ast2str(arg_2, arg_1=arg_1)"} +{"_id": "doc_5147", "title": "", "text": "def Func(arg_0):\n \"\"\"All required enzymes for reaction are Func.\n\n Returns\n -------\n bool\n True if the gene-protein-reaction (GPR) rule is fulfilled for\n this reaction, or if reaction is not associated to a model,\n otherwise False.\n \"\"\"\n if arg_0._model:\n arg_1, arg_2 = parse_gpr(arg_0.gene_reaction_rule)\n return eval_gpr(arg_1, {arg_3.id for arg_3 in arg_0.genes if\n not arg_3.Func})\n return True"} +{"_id": "doc_5148", "title": "", "text": "def Func(arg_0):\n \"\"\"Make sure all metabolites and genes that are associated with\n this reaction are aware of it.\n\n \"\"\"\n for arg_1 in arg_0._metabolites:\n arg_1._reaction.add(arg_0)\n for arg_1 in arg_0._genes:\n arg_1._reaction.add(arg_0)"} +{"_id": "doc_5149", "title": "", "text": "def Func(arg_0):\n \"\"\"Copy a reaction\n\n The referenced metabolites and genes are also copied.\n\n \"\"\"\n # no references to model when Funcing\n arg_1 = arg_0._model\n arg_0._model = None\n for arg_3 in arg_0._metabolites:\n arg_3._model = None\n for arg_3 in arg_0._genes:\n arg_3._model = None\n # now we can Func\n arg_4 = deepFunc(arg_0)\n # restore the references\n arg_0._model = arg_1\n for arg_3 in arg_0._metabolites:\n arg_3._model = arg_1\n for arg_3 in arg_0._genes:\n arg_3._model = arg_1\n return arg_4"} +{"_id": "doc_5150", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True,\n arg_3=True):\n \"\"\"Add metabolites and stoichiometric coefficients to the reaction.\n If the final coefficient for a metabolite is 0 then it is removed\n from the reaction.\n\n The change is reverted upon exit when using the model as a context.\n\n Parameters\n ----------\n metabolites_to_add : dict\n Dictionary with metabolite objects or metabolite identifiers as\n keys and coefficients as values. If keys are strings (name of a\n metabolite) the reaction must already be part of a model and a\n metabolite with the given name must exist in the model.\n\n combine : bool\n Describes behavior a metabolite already exists in the reaction.\n True causes the coefficients to be added.\n False causes the coefficient to be replaced.\n\n reversibly : bool\n Whether to add the change to the context to make the change\n reversibly or not (primarily intended for internal use).\n\n \"\"\"\n arg_4 = arg_0.metabolites\n arg_5 = []\n arg_6 = dict([(x.id, x) for x in arg_0._metabolites])\n\n for arg_7, arg_8 in iteritems(arg_1):\n\n # Make sure metabolites being added belong to the same model, or\n # else copy them.\n if isinstance(arg_7, Metabolite):\n if ((arg_7.model is not None) and\n (arg_7.model is not arg_0._model)):\n arg_7 = arg_7.copy()\n\n arg_9 = str(arg_7)\n # If a metabolite already exists in the reaction then\n # just add them.\n if arg_9 in arg_6:\n arg_10 = arg_6[arg_9]\n if arg_2:\n arg_0._metabolites[arg_10] += arg_8\n else:\n arg_0._metabolites[arg_10] = arg_8\n else:\n # If the reaction is in a model, ensure we aren't using\n # a duplicate metabolite.\n if arg_0._model:\n try:\n arg_7 = \\\n arg_0._model.metabolites.get_by_id(arg_9)\n except KeyError as e:\n if isinstance(arg_7, Metabolite):\n arg_5.append(arg_7)\n else:\n # do we want to handle creation here?\n raise e\n elif isinstance(arg_7, string_types):\n # if we want to handle creation, this should be changed\n raise ValueError(\"Reaction '%s' does not belong to a \"\n \"model. Either add the reaction to a \"\n \"model or use Metabolite objects instead \"\n \"of strings as keys.\"\n % arg_0.id)\n arg_0._metabolites[arg_7] = arg_8\n # make the metabolite aware that it is involved in this\n # reaction\n arg_7._reaction.add(arg_0)\n\n # from cameo ...\n arg_12 = arg_0.model\n if arg_12 is not None:\n arg_12.Func(arg_5)\n\n for arg_7, arg_8 in arg_0._metabolites.items():\n arg_12.constraints[\n arg_7.id].set_linear_coefficients(\n {arg_0.forward_variable: arg_8,\n arg_0.reverse_variable: -arg_8\n })\n\n for arg_7, arg_13 in list(arg_0._metabolites.items()):\n if arg_13 == 0:\n # make the metabolite aware that it no longer participates\n # in this reaction\n arg_7._reaction.remove(arg_0)\n arg_0._metabolites.pop(arg_7)\n\n arg_14 = get_context(arg_0)\n if arg_14 and arg_3:\n if arg_2:\n # Just subtract the metabolites that were added\n arg_14(partial(\n arg_0.subtract_metabolites, arg_1,\n arg_2=True, arg_3=False))\n else:\n # Reset them with Func\n arg_15 = {\n key: arg_4[arg_12.metabolites.get_by_any(key)[0]]\n for key in iterkeys(arg_1)}\n\n arg_14(partial(\n arg_0.Func, arg_15,\n arg_2=False, arg_3=False))"} +{"_id": "doc_5151", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Generate a human readable reaction string\"\"\"\n\n def format(arg_2):\n return \"\" if arg_2 == 1 else str(arg_2).rstrip(\".\") + \" \"\n\n arg_3 = 'id'\n if arg_1:\n arg_3 = 'name'\n arg_4 = []\n arg_5 = []\n for arg_6 in sorted(arg_0._metabolites, key=attrgetter(\"id\")):\n arg_7 = arg_0._metabolites[arg_6]\n arg_8 = str(getattr(arg_6, arg_3))\n if arg_7 >= 0:\n arg_5.append(format(arg_7) + arg_8)\n else:\n arg_4.append(format(abs(arg_7)) + arg_8)\n\n arg_9 = ' + '.join(arg_4)\n if not arg_0.reversibility:\n if arg_0.lower_bound < 0 and arg_0.upper_bound <= 0:\n arg_9 += ' <-- '\n else:\n arg_9 += ' --> '\n else:\n arg_9 += ' <=> '\n arg_9 += ' + '.join(arg_5)\n return arg_9"} +{"_id": "doc_5152", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute mass and charge balance for the reaction\n\n returns a dict of {element: amount} for unbalanced elements.\n \"charge\" is treated as an element in this dict\n This should be empty for balanced reactions.\n \"\"\"\n arg_1 = defaultdict(int)\n for arg_2, arg_3 in iteritems(arg_0._metabolites):\n if arg_2.charge is not None:\n arg_1[\"charge\"] += \\\n arg_3 * arg_2.charge\n if arg_2.elements is None:\n raise ValueError(\"No elements found in metabolite %s\"\n % arg_2.id)\n for arg_4, arg_5 in iteritems(arg_2.elements):\n arg_1[arg_4] += arg_3 * arg_5\n # filter out 0 values\n return {arg_6: arg_7 for arg_6, arg_7 in iteritems(arg_1) if arg_7 != 0}"} +{"_id": "doc_5153", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Dissociates a cobra.Gene object with a cobra.Reaction.\n\n Parameters\n ----------\n cobra_gene : cobra.core.Gene.Gene\n\n \"\"\"\n arg_0._genes.discard(arg_1)\n arg_1._reaction.discard(arg_0)"} +{"_id": "doc_5154", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True,\n arg_3=None, arg_4=None,\n arg_5=None, arg_6=\"+\"):\n \"\"\"Builds reaction from reaction equation reaction_str using parser\n\n Takes a string and using the specifications supplied in the optional\n arguments infers a set of metabolites, metabolite compartments and\n stoichiometries for the reaction. It also infers the reversibility\n of the reaction from the reaction arrow.\n\n Changes to the associated model are reverted upon exit when using\n the model as a context.\n\n Parameters\n ----------\n reaction_str : string\n a string containing a reaction formula (equation)\n verbose: bool\n setting verbosity of function\n fwd_arrow : re.compile\n for forward irreversible reaction arrows\n rev_arrow : re.compile\n for backward irreversible reaction arrows\n reversible_arrow : re.compile\n for reversible reaction arrows\n term_split : string\n dividing individual metabolite entries\n\n \"\"\"\n # set the arrows\n arg_7 = _forward_arrow_finder if arg_3 is None \\\n else re.compile(re.escape(arg_3))\n arg_8 = _reverse_arrow_finder if arg_4 is None \\\n else re.compile(re.escape(arg_4))\n arg_9 = _reversible_arrow_finder \\\n if arg_5 is None \\\n else re.compile(re.escape(arg_5))\n if arg_0._model is None:\n warn(\"no model found\")\n arg_10 = None\n else:\n arg_10 = arg_0._model\n arg_11 = compartment_finder.findall(arg_1)\n if len(arg_11) == 1:\n arg_12 = arg_11[0]\n arg_1 = compartment_finder.sub(\"\", arg_1)\n else:\n arg_12 = \"\"\n\n # reversible case\n arg_13 = arg_9.search(arg_1)\n if arg_13 is not None:\n arg_0.lower_bound = -1000\n arg_0.upper_bound = 1000\n else: # irreversible\n # try forward\n arg_13 = arg_7.search(arg_1)\n if arg_13 is not None:\n arg_0.upper_bound = 1000\n arg_0.lower_bound = 0\n else:\n # must be reverse\n arg_13 = arg_8.search(arg_1)\n if arg_13 is None:\n raise ValueError(\"no suitable arrow found in '%s'\" %\n arg_1)\n else:\n arg_0.upper_bound = 0\n arg_0.lower_bound = -1000\n arg_16 = arg_1[:arg_13.start()].strip()\n arg_17 = arg_1[arg_13.end():].strip()\n\n arg_0.subtract_metabolites(arg_0.metabolites, combine=True)\n\n for arg_18, arg_19 in ((arg_16, -1), (arg_17, 1)):\n if len(arg_18) == 0:\n continue\n for arg_20 in arg_18.split(arg_6):\n arg_20 = arg_20.strip()\n if arg_20.lower() == \"nothing\":\n continue\n if \" \" in arg_20:\n arg_21, arg_22 = arg_20.split()\n arg_23 = float(arg_21.lstrip(\"(\").rstrip(\")\")) * arg_19\n else:\n arg_22 = arg_20\n arg_23 = arg_19\n arg_22 += arg_12\n try:\n arg_24 = arg_10.metabolites.get_by_id(arg_22)\n except KeyError:\n if arg_2:\n print(\"unknown metabolite '%s' created\" % arg_22)\n arg_24 = Metabolite(arg_22)\n arg_0.add_metabolites({arg_24: arg_23})"} +{"_id": "doc_5155", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4,\n arg_5=False, **arg_6):\n \"\"\"Reads SBML model from given filename.\n\n If the given filename ends with the suffix ''.gz'' (for example,\n ''myfile.xml.gz'),' the file is assumed to be compressed in gzip\n format and will be automatically decompressed upon reading. Similarly,\n if the given filename ends with ''.zip'' or ''.bz2',' the file is\n assumed to be compressed in zip or bzip2 format (respectively). Files\n whose names lack these suffixes will be read uncompressed. Note that\n if the file is in zip format but the archive contains more than one\n file, only the first file in the archive will be read and the rest\n ignored.\n\n To read a gzip/zip file, libSBML needs to be configured and linked\n with the zlib library at compile time. It also needs to be linked\n with the bzip2 library to read files in bzip2 format. (Both of these\n are the default configurations for libSBML.)\n\n This function supports SBML with FBC-v1 and FBC-v2. FBC-v1 models\n are converted to FBC-v2 models before reading.\n\n The parser tries to fall back to information in notes dictionaries\n if information is not available in the FBC packages, e.g.,\n CHARGE, FORMULA on species, or GENE_ASSOCIATION, SUBSYSTEM on reactions.\n\n Parameters\n ----------\n filename : path to SBML file, or SBML string, or SBML file handle\n SBML which is read into cobra model\n number: data type of stoichiometry: {float, int}\n In which data type should the stoichiometry be parsed.\n f_replace : dict of replacement functions for id replacement\n Dictionary of replacement functions for gene, specie, and reaction.\n By default the following id changes are performed on import:\n clip G_ from genes, clip M_ from species, clip R_ from reactions\n If no replacements should be performed, set f_replace={}, None\n set_missing_bounds : boolean flag to set missing bounds\n Missing bounds are set to default bounds in configuration.\n\n Returns\n -------\n cobra.core.Model\n\n Notes\n -----\n Provided file handles cannot be opened in binary mode, i.e., use\n with open(path, \"r\" as f):\n Func(f)\n File handles to compressed files are not supported yet.\n \"\"\"\n try:\n arg_7 = _get_doc_from_filename(arg_0)\n return _sbml_to_model(arg_7,\n arg_1=arg_1,\n arg_3=arg_3,\n arg_5=arg_5,\n **arg_6)\n except IOError as e:\n raise e\n\n except Exception:\n LOGGER.error(traceback.print_exc())\n raise CobraSBMLError(\n \"Something went wrong reading the SBML model. Most likely the SBML\"\n \" model is not valid. Please check that your model is valid using \"\n \"the `cobra.io.sbml.validate_sbml_model` function or via the \"\n \"online validator at http://sbml.org/validator .\\n\"\n \"\\t`(model, errors) = validate_sbml_model(filename)`\"\n \"\\nIf the model is valid and cannot be read please open an issue \"\n \"at https://github.com/opencobra/cobrapy/issues .\")"} +{"_id": "doc_5156", "title": "", "text": "def Func(arg_0):\n \"\"\"Get SBMLDocument from given filename.\n\n Parameters\n ----------\n filename : path to SBML, or SBML string, or filehandle\n\n Returns\n -------\n libsbml.SBMLDocument\n \"\"\"\n if isinstance(arg_0, string_types):\n if (\"win\" in platform) and (len(arg_0) < 260) \\\n and os.path.exists(arg_0):\n # path (win)\n arg_1 = libsbml.readSBMLFromFile(arg_0) # noqa: E501 type: libsbml.SBMLDocument\n elif (\"win\" not in platform) and os.path.exists(arg_0):\n # path other\n arg_1 = libsbml.readSBMLFromFile(arg_0) # noqa: E501 type: libsbml.SBMLDocument\n else:\n # string representation\n if \".')\n elif type(arg_0) is int:\n if arg_0 == libsbml.LIBSBML_OPERATION_SUCCESS:\n return\n else:\n LOGGER.error('Error encountered trying to <' + arg_1 + '>.')\n LOGGER.error('LibSBML error code {}: {}'.format(str(arg_0),\n libsbml.OperationReturnValue_toString(arg_0).strip()))\n else:\n return"} +{"_id": "doc_5161", "title": "", "text": "def Func(arg_0):\n \"\"\" Creates dictionary of COBRA notes.\n\n Parameters\n ----------\n sbase : libsbml.SBase\n\n Returns\n -------\n dict of notes\n \"\"\"\n arg_1 = arg_0.getNotesString()\n if arg_1 and len(arg_1) > 0:\n arg_2 = r\"

    \\s*(\\w+\\s*\\w*)\\s*:\\s*([\\w|\\s]+)<\"\n arg_3 = re.findall(arg_2, arg_1)\n arg_4 = {arg_5.strip(): arg_6.strip() for (arg_5, arg_6) in arg_3}\n return {arg_5: arg_6 for arg_5, arg_6 in arg_4.items() if len(arg_6) > 0}\n else:\n return {}"} +{"_id": "doc_5162", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set SBase notes based on dictionary.\n\n Parameters\n ----------\n sbase : libsbml.SBase\n SBML object to set notes on\n notes : notes object\n notes information from cobra object\n \"\"\"\n if arg_1 and len(arg_1) > 0:\n arg_2 = [''] + \\\n [\"

    {}: {}

    \".format(k, v) for (k, v) in arg_1.items()] + \\\n [\"\"]\n _check(\n arg_0.setNotes(\"\\n\".join(arg_2)),\n \"Setting notes on sbase: {}\".format(arg_0)\n )"} +{"_id": "doc_5163", "title": "", "text": "def Func(arg_0):\n \"\"\"Parses cobra annotations from a given SBase object.\n\n Annotations are dictionaries with the providers as keys.\n\n Parameters\n ----------\n sbase : libsbml.SBase\n SBase from which the SBML annotations are read\n\n Returns\n -------\n dict (annotation dictionary)\n\n FIXME: annotation format must be updated (this is a big collection of\n fixes) - see: https://github.com/opencobra/cobrapy/issues/684)\n \"\"\"\n arg_1 = {}\n\n # SBO term\n if arg_0.isSetSBOTerm():\n # FIXME: correct handling of annotations\n arg_1[\"sbo\"] = arg_0.getSBOTermID()\n\n # RDF annotation\n arg_2 = arg_0.getCVTerms()\n if arg_2 is None:\n return arg_1\n\n for arg_3 in arg_2: # type: libsbml.CVTerm\n for arg_4 in range(arg_3.getNumResources()):\n # FIXME: read and store the qualifier\n\n arg_5 = arg_3.getResourceURI(arg_4)\n arg_6 = URL_IDENTIFIERS_PATTERN.match(arg_5)\n if not arg_6:\n LOGGER.warning(\"%s does not conform to \"\n \"http(s)://identifiers.org/collection/id\", arg_5)\n continue\n\n arg_7, arg_8 = arg_6.group(1), arg_6.group(2)\n if arg_7 in arg_1:\n if isinstance(arg_1[arg_7], string_types):\n arg_1[arg_7] = [arg_1[arg_7]]\n # FIXME: use a list\n if arg_8 not in arg_1[arg_7]:\n arg_1[arg_7].append(arg_8)\n else:\n # FIXME: always in list\n arg_1[arg_7] = arg_8\n\n return arg_1"} +{"_id": "doc_5164", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set SBase annotations based on cobra annotations.\n\n Parameters\n ----------\n sbase : libsbml.SBase\n SBML object to annotate\n annotation : cobra annotation structure\n cobra object with annotation information\n\n FIXME: annotation format must be updated\n (https://github.com/opencobra/cobrapy/issues/684)\n \"\"\"\n\n if not arg_1 or len(arg_1) == 0:\n return\n\n # standardize annotations\n arg_2 = deepcopy(arg_1)\n\n for arg_3, arg_4 in arg_2.items():\n # handling of non-string annotations (e.g. integers)\n if isinstance(arg_4, (float, int)):\n arg_4 = str(arg_4)\n if isinstance(arg_4, string_types):\n arg_2[arg_3] = [(\"is\", arg_4)]\n\n for arg_3, arg_4 in arg_2.items():\n for arg_5, arg_6 in enumerate(arg_4):\n if isinstance(arg_6, string_types):\n arg_4[arg_5] = (\"is\", arg_6)\n\n # set metaId\n arg_7 = \"meta_{}\".format(arg_0.getId())\n arg_0.setMetaId(arg_7)\n\n # rdf_items = []\n for arg_8, arg_9 in iteritems(arg_2):\n\n # set SBOTerm\n if arg_8 in [\"SBO\", \"sbo\"]:\n if arg_8 == \"SBO\":\n LOGGER.warning(\"'SBO' provider is deprecated, \"\n \"use 'sbo' provider instead\")\n arg_10 = arg_9[0][1]\n _check(arg_0.setSBOTerm(arg_10),\n \"Setting SBOTerm: {}\".format(arg_10))\n\n # FIXME: sbo should also be written as CVTerm\n continue\n\n for arg_6 in arg_9:\n arg_11, arg_12 = arg_6[0], arg_6[1]\n arg_13 = QUALIFIER_TYPES.get(arg_11, None)\n if arg_13 is None:\n arg_13 = libsbml.BQB_IS\n LOGGER.error(\"Qualifier type is not supported on \"\n \"annotation: '{}'\".format(arg_11))\n\n arg_14 = libsbml.BIOLOGICAL_QUALIFIER\n if arg_11.startswith(\"bqm_\"):\n arg_14 = libsbml.MODEL_QUALIFIER\n\n arg_15 = libsbml.CVTerm() # type: libsbml.CVTerm\n arg_15.setQualifierType(arg_14)\n if arg_14 == libsbml.BIOLOGICAL_QUALIFIER:\n arg_15.setBiologicalQualifierType(arg_13)\n elif arg_14 == libsbml.MODEL_QUALIFIER:\n arg_15.setModelQualifierType(arg_13)\n else:\n raise CobraSBMLError('Unsupported qualifier: '\n '%s' % arg_13)\n arg_16 = \"%s/%s/%s\" % (URL_IDENTIFIERS_PREFIX, arg_8, arg_12)\n arg_15.addResource(arg_16)\n _check(arg_0.addCVTerm(arg_15),\n \"Setting cvterm: {}, resource: {}\".format(arg_15, arg_16))"} +{"_id": "doc_5165", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"String representation of SBMLError.\n\n Parameters\n ----------\n error : libsbml.SBMLError\n k : index of error\n\n Returns\n -------\n string representation of error\n \"\"\"\n arg_2 = arg_0.getPackage()\n if arg_2 == '':\n arg_2 = 'core'\n\n arg_3 = 'E{} ({}): {} ({}, L{}); {}; {}'\n arg_4 = arg_3.format(arg_1, arg_0.getSeverityAsString(),\n arg_0.getCategoryAsString(), arg_2,\n arg_0.getLine(), arg_0.getShortMessage(),\n arg_0.getMessage())\n return arg_4"} +{"_id": "doc_5166", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=20, arg_5=None):\n \"\"\"Calculate the objective value conditioned on all combinations of\n fluxes for a set of chosen reactions\n\n The production envelope can be used to analyze a model's ability to\n produce a given compound conditional on the fluxes for another set of\n reactions, such as the uptake rates. The model is alternately optimized\n with respect to minimizing and maximizing the objective and the\n obtained fluxes are recorded. Ranges to compute production is set to the\n effective\n bounds, i.e., the minimum / maximum fluxes that can be obtained given\n current reaction bounds.\n\n Parameters\n ----------\n model : cobra.Model\n The model to compute the production envelope for.\n reactions : list or string\n A list of reactions, reaction identifiers or a single reaction.\n objective : string, dict, model.solver.interface.Objective, optional\n The objective (reaction) to use for the production envelope. Use the\n model's current objective if left missing.\n carbon_sources : list or string, optional\n One or more reactions or reaction identifiers that are the source of\n carbon for computing carbon (mol carbon in output over mol carbon in\n input) and mass yield (gram product over gram output). Only objectives\n with a carbon containing input and output metabolite is supported.\n Will identify active carbon sources in the medium if none are specified.\n points : int, optional\n The number of points to calculate production for.\n threshold : float, optional\n A cut-off under which flux values will be considered to be zero\n (default model.tolerance).\n\n Returns\n -------\n pandas.DataFrame\n A data frame with one row per evaluated point and\n\n - reaction id : one column per input reaction indicating the flux at\n each given point,\n - carbon_source: identifiers of carbon exchange reactions\n\n A column for the maximum and minimum each for the following types:\n\n - flux: the objective flux\n - carbon_yield: if carbon source is defined and the product is a\n single metabolite (mol carbon product per mol carbon feeding source)\n - mass_yield: if carbon source is defined and the product is a\n single metabolite (gram product per 1 g of feeding source)\n\n Examples\n --------\n >>> import cobra.test\n >>> from cobra.flux_analysis import Func\n >>> model = cobra.test.create_test_model(\"textbook\")\n >>> Func(model, [\"EX_glc__D_e\", \"EX_o2_e\"])\n\n \"\"\"\n\n arg_1 = arg_0.reactions.get_by_any(arg_1)\n arg_2 = arg_0.solver.objective if arg_2 is None else arg_2\n arg_6 = dict()\n\n if arg_3 is None:\n arg_7 = find_carbon_sources(arg_0)\n else:\n arg_7 = arg_0.reactions.get_by_any(arg_3)\n\n if arg_7 is None:\n arg_6['carbon_source'] = None\n elif hasattr(arg_7, 'id'):\n arg_6['carbon_source'] = arg_7.id\n else:\n arg_6['carbon_source'] = ', '.join(rxn.id for rxn in arg_7)\n\n arg_5 = normalize_cutoff(arg_0, arg_5)\n\n arg_8 = arg_4 ** len(arg_1)\n\n for arg_9 in ('minimum', 'maximum'):\n arg_6['flux_{}'.format(arg_9)] = full(arg_8, nan, dtype=float)\n arg_6['carbon_yield_{}'.format(arg_9)] = full(\n arg_8, nan, dtype=float)\n arg_6['mass_yield_{}'.format(arg_9)] = full(\n arg_8, nan, dtype=float)\n\n arg_11 = pd.DataFrame(arg_6)\n\n with arg_0:\n arg_0.objective = arg_2\n arg_12 = list(sutil.linear_reaction_coefficients(arg_0))\n\n if len(arg_12) != 1:\n raise ValueError('cannot calculate yields for objectives with '\n 'multiple reactions')\n arg_13 = arg_12[0]\n arg_14 = fva(arg_0, arg_1, fraction_of_optimum=0)\n arg_14[arg_14.abs() < arg_5] = 0.0\n arg_4 = list(product(*[\n linspace(arg_14.at[rxn.id, \"minimum\"],\n arg_14.at[rxn.id, \"maximum\"],\n arg_4, endpoint=True) for rxn in arg_1]))\n arg_16 = pd.DataFrame(arg_4, columns=[rxn.id for rxn in arg_1])\n arg_11 = pd.concat([arg_11, arg_16], axis=1, copy=False)\n add_envelope(arg_0, arg_1, arg_11, arg_7, arg_13, arg_5)\n\n return arg_11"} +{"_id": "doc_5167", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Compute total output per input unit.\n\n Units are typically mol carbon atoms or gram of source and product.\n\n Parameters\n ----------\n input_fluxes : list\n A list of input reaction fluxes in the same order as the\n ``input_components``.\n input_elements : list\n A list of reaction components which are in turn list of numbers.\n output_flux : float\n The output flux value.\n output_elements : list\n A list of stoichiometrically weighted output reaction components.\n\n Returns\n -------\n float\n The ratio between output (mol carbon atoms or grams of product) and\n input (mol carbon atoms or grams of source compounds).\n \"\"\"\n\n arg_4 = sum(\n total_components_flux(flux, components, consumption=True)\n for flux, components in zip(arg_0, arg_1))\n arg_5 = total_components_flux(\n arg_2, arg_3, consumption=False)\n try:\n return arg_5 / arg_4\n except ZeroDivisionError:\n return nan"} +{"_id": "doc_5168", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Split metabolites into the atoms times their stoichiometric coefficients.\n\n Parameters\n ----------\n reaction : Reaction\n The metabolic reaction whose components are desired.\n\n Returns\n -------\n list\n Each of the reaction's metabolites' desired carbon elements (if any)\n times that metabolite's stoichiometric coefficient.\n \"\"\"\n arg_1 = [coeff * met.elements.get('C', 0)\n for met, coeff in iteritems(arg_0.metabolites)]\n return [arg_2 for arg_2 in arg_1 if arg_2 != 0]"} +{"_id": "doc_5169", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the metabolite weight times its stoichiometric coefficient.\"\"\"\n\n if len(arg_0.metabolites) != 1:\n raise ValueError('Reaction weight is only defined for single '\n 'metabolite products or educts.')\n\n arg_1, arg_2 = next(iteritems(arg_0.metabolites))\n\n return [arg_2 * arg_1.formula_weight]"} +{"_id": "doc_5170", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Find all active carbon source reactions.\n\n Parameters\n ----------\n model : Model\n A genome-scale metabolic model.\n\n Returns\n -------\n list\n The medium reactions with carbon input flux.\n\n \"\"\"\n\n try:\n arg_0.slim_optimize(error_value=None)\n except OptimizationError:\n return []\n\n arg_1 = arg_0.reactions.get_by_any(list(arg_0.medium))\n arg_2 = [\n (arg_3, total_components_flux(arg_3.flux, reaction_elements(arg_3),\n consumption=True)) for arg_3 in arg_1]\n return [arg_3 for arg_3, arg_4 in arg_2 if arg_4 > 0]"} +{"_id": "doc_5171", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.001, arg_3=None):\n \"\"\"Assesses production capacity.\n\n Assesses the capacity of the model to produce the precursors for the\n reaction and absorb the production of the reaction while the reaction is\n operating at, or above, the specified cutoff.\n\n Parameters\n ----------\n model : cobra.Model\n The cobra model to Func production capacity for\n\n reaction : reaction identifier or cobra.Reaction\n The reaction to Func\n\n flux_coefficient_cutoff : float\n The minimum flux that reaction must carry to be considered active.\n\n solver : basestring\n Solver name. If None, the default solver will be used.\n\n Returns\n -------\n bool or dict\n True if the model can produce the precursors and absorb the products\n for the reaction operating at, or above, flux_coefficient_cutoff.\n Otherwise, a dictionary of {'precursor': Status, 'product': Status}.\n Where Status is the results from Func_precursors and\n Func_products, respectively.\n\n \"\"\"\n arg_1 = arg_0.reactions.get_by_any(arg_1)[0]\n with arg_0 as arg_4:\n arg_4.objective = arg_1\n if _optimize_or_value(arg_4, arg_3=arg_3) >= arg_2:\n return True\n else:\n arg_6 = dict()\n arg_6['precursors'] = Func_component(\n arg_0, arg_1, 'reactants', arg_2)\n arg_6['products'] = Func_component(\n arg_0, arg_1, 'products', arg_2)\n return arg_6"} +{"_id": "doc_5172", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.001,\n arg_3=None):\n \"\"\"Assesses the ability of the model to provide sufficient precursors for\n a reaction operating at, or beyond, the specified cutoff.\n\n Deprecated: use assess_component instead\n\n Parameters\n ----------\n model : cobra.Model\n The cobra model to assess production capacity for\n\n reaction : reaction identifier or cobra.Reaction\n The reaction to assess\n\n flux_coefficient_cutoff : float\n The minimum flux that reaction must carry to be considered active.\n\n solver : basestring\n Solver name. If None, the default solver will be used.\n\n Returns\n -------\n bool or dict\n True if the precursors can be simultaneously produced at the\n specified cutoff. False, if the model has the capacity to produce\n each individual precursor at the specified threshold but not all\n precursors at the required level simultaneously. Otherwise a\n dictionary of the required and the produced fluxes for each reactant\n that is not produced in sufficient quantities.\n\n \"\"\"\n warn('use assess_component instead', DeprecationWarning)\n return assess_component(arg_0, arg_1, 'reactants',\n arg_2, arg_3)"} +{"_id": "doc_5173", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Modify a model so all feasible flux distributions are loopless.\n\n In most cases you probably want to use the much faster `loopless_solution`.\n May be used in cases where you want to add complex constraints and\n objecives (for instance quadratic objectives) to the model afterwards\n or use an approximation of Gibbs free energy directions in you model.\n Adds variables and constraints to a model which will disallow flux\n distributions with loops. The used formulation is described in [1]_.\n This function *will* modify your model.\n\n Parameters\n ----------\n model : cobra.Model\n The model to which to add the constraints.\n zero_cutoff : positive float, optional\n Cutoff used for null space. Coefficients with an absolute value smaller\n than `zero_cutoff` are considered to be zero (default model.tolerance).\n\n Returns\n -------\n Nothing\n\n References\n ----------\n .. [1] Elimination of thermodynamically infeasible loops in steady-state\n metabolic models. Schellenberger J, Lewis NE, Palsson BO. Biophys J.\n 2011 Feb 2;100(3):544-53. doi: 10.1016/j.bpj.2010.12.3707. Erratum\n in: Biophys J. 2011 Mar 2;100(5):1381.\n \"\"\"\n arg_1 = normalize_cutoff(arg_0, arg_1)\n\n arg_2 = [arg_8 for arg_8, r in enumerate(arg_0.reactions) if not r.boundary]\n arg_3 = create_stoichiometric_matrix(arg_0)[:, numpy.array(arg_2)]\n arg_4 = nullspace(arg_3).T\n arg_5 = max(max(abs(b) for b in r.bounds) for r in arg_0.reactions)\n arg_6 = arg_0.problem\n\n # Add indicator variables and new constraints\n arg_7 = []\n for arg_8 in arg_2:\n arg_9 = arg_0.reactions[arg_8]\n # indicator variable a_i\n arg_10 = arg_6.Variable(\"indicator_\" + arg_9.id, type=\"binary\")\n # -M*(1 - a_i) <= v_i <= M*a_i\n arg_11 = arg_6.Constraint(\n arg_9.flux_expression - arg_5 * arg_10,\n lb=-arg_5, ub=0, arg_15=\"on_off_\" + arg_9.id)\n # -(max_bound + 1) * a_i + 1 <= G_i <= -(max_bound + 1) * a_i + 1000\n arg_12 = arg_6.Variable(\"delta_g_\" + arg_9.id)\n arg_13 = arg_6.Constraint(\n arg_12 + (arg_5 + 1) * arg_10,\n lb=1, ub=arg_5, arg_15=\"delta_g_range_\" + arg_9.id)\n arg_7.extend([arg_10, arg_11, arg_12, arg_13])\n\n arg_0.add_cons_vars(arg_7)\n\n # Add nullspace constraints for G_i\n for arg_8, arg_14 in enumerate(arg_4):\n arg_15 = \"nullspace_constraint_\" + str(arg_8)\n arg_16 = arg_6.Constraint(Zero, lb=0, ub=0, arg_15=arg_15)\n arg_0.add_cons_vars([arg_16])\n arg_17 = {arg_0.variables[\n \"delta_g_\" + arg_0.reactions[ridx].id]: arg_14[arg_8]\n for arg_8, ridx in enumerate(arg_2) if\n abs(arg_14[arg_8]) > arg_1}\n arg_0.constraints[arg_15].set_linear_coefficients(arg_17)"} +{"_id": "doc_5174", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add constraints for CycleFreeFlux.\"\"\"\n arg_0.objective = arg_0.solver.interface.Objective(\n Zero, direction=\"min\", sloppy=True)\n arg_3 = []\n for arg_4 in arg_0.reactions:\n arg_5 = arg_1[arg_4.id]\n if arg_4.boundary:\n arg_4.bounds = (arg_5, arg_5)\n continue\n if arg_5 >= 0:\n arg_4.bounds = max(0, arg_4.lower_bound), max(arg_5, arg_4.upper_bound)\n arg_3.append(arg_4.forward_variable)\n else:\n arg_4.bounds = min(arg_5, arg_4.lower_bound), min(0, arg_4.upper_bound)\n arg_3.append(arg_4.reverse_variable)\n\n arg_0.objective.set_linear_coefficients({arg_7: 1.0 for arg_7 in arg_3})"} +{"_id": "doc_5175", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Convert an existing solution to a loopless one.\n\n Removes as many loops as possible (see Notes).\n Uses the method from CycleFreeFlux [1]_ and is much faster than\n `add_loopless` and should therefore be the preferred option to get loopless\n flux distributions.\n\n Parameters\n ----------\n model : cobra.Model\n The model to which to add the constraints.\n fluxes : dict\n A dictionary {rxn_id: flux} that assigns a flux to each reaction. If\n not None will use the provided flux values to obtain a close loopless\n solution.\n\n Returns\n -------\n cobra.Solution\n A solution object containing the fluxes with the least amount of\n loops possible or None if the optimization failed (usually happening\n if the flux distribution in `fluxes` is infeasible).\n\n Notes\n -----\n The returned flux solution has the following properties:\n\n - it contains the minimal number of loops possible and no loops at all if\n all flux bounds include zero\n - it has an objective value close to the original one and the same\n objective value id the objective expression can not form a cycle\n (which is usually true since it consumes metabolites)\n - it has the same exact exchange fluxes as the previous solution\n - all fluxes have the same sign (flow in the same direction) as the\n previous solution\n\n References\n ----------\n .. [1] CycleFreeFlux: efficient removal of thermodynamically infeasible\n loops from flux distributions. Desouki AA, Jarre F, Gelius-Dietrich\n G, Lercher MJ. Bioinformatics. 2015 Jul 1;31(13):2159-65. doi:\n 10.1093/bioinformatics/btv096.\n \"\"\"\n # Need to reoptimize otherwise spurious solution artifacts can cause\n # all kinds of havoc\n # TODO: check solution status\n if arg_1 is None:\n arg_2 = arg_0.optimize(objective_sense=None)\n arg_1 = arg_2.fluxes\n\n with arg_0:\n arg_3 = arg_0.problem\n # Needs one fixed bound for cplex...\n arg_4 = arg_3.Constraint(\n arg_0.objective.expression,\n lb=-1e32, name=\"loopless_obj_constraint\")\n arg_0.add_cons_vars([arg_4])\n _add_cycle_free(arg_0, arg_1)\n arg_5 = arg_0.optimize(objective_sense=None)\n arg_5.objective_value = arg_4.primal\n\n return arg_5"} +{"_id": "doc_5176", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Plugin to get a loopless FVA solution from single FVA iteration.\n\n Assumes the following about `model` and `reaction`:\n 1. the model objective is set to be `reaction`\n 2. the model has been optimized and contains the minimum/maximum flux for\n `reaction`\n 3. the model contains an auxiliary variable called \"fva_old_objective\"\n denoting the previous objective\n\n Parameters\n ----------\n model : cobra.Model\n The model to be used.\n reaction : cobra.Reaction\n The reaction currently minimized/maximized.\n solution : boolean, optional\n Whether to return the entire solution or only the minimum/maximum for\n `reaction`.\n zero_cutoff : positive float, optional\n Cutoff used for loop removal. Fluxes with an absolute value smaller\n than `zero_cutoff` are considered to be zero (default model.tolerance).\n\n Returns\n -------\n single float or dict\n Returns the minimized/maximized flux through `reaction` if\n all_fluxes == False (default). Otherwise returns a loopless flux\n solution containing the minimum/maximum flux for `reaction`.\n \"\"\"\n arg_3 = normalize_cutoff(arg_0, arg_3)\n\n arg_4 = arg_0.objective.value\n arg_5 = get_solution(arg_0)\n arg_6 = arg_0.objective.direction\n\n # boundary reactions can not be part of cycles\n if arg_1.boundary:\n if arg_2:\n return arg_5\n else:\n return arg_4\n\n with arg_0:\n _add_cycle_free(arg_0, arg_5.fluxes)\n arg_0.slim_optimize()\n\n # If the previous optimum is maintained in the loopless solution it was\n # loopless and we are done\n if abs(arg_1.flux - arg_4) < arg_3:\n if arg_2:\n return arg_5\n return arg_4\n\n # If previous optimum was not in the loopless solution create a new\n # almost loopless solution containing only loops including the current\n # reaction. Than remove all of those loops.\n arg_7 = get_solution(arg_0).fluxes\n arg_1.bounds = (arg_4, arg_4)\n arg_0.slim_optimize()\n arg_9 = get_solution(arg_0).fluxes\n\n with arg_0:\n # find the reactions with loops using the current reaction and remove\n # the loops\n for arg_10 in arg_0.reactions:\n arg_11 = arg_10.id\n if ((abs(arg_7[arg_11]) < arg_3) and\n (abs(arg_9[arg_11]) > arg_3)):\n arg_10.bounds = max(0, arg_10.lower_bound), min(0, arg_10.upper_bound)\n\n if arg_2:\n arg_12 = arg_0.optimize()\n else:\n arg_0.slim_optimize()\n arg_12 = arg_1.flux\n arg_0.objective.direction = arg_6\n return arg_12"} +{"_id": "doc_5177", "title": "", "text": "def Func(arg_0, arg_1='dense', arg_2=None):\n \"\"\"Return a stoichiometric array representation of the given model.\n\n The the columns represent the reactions and rows represent\n metabolites. S[i,j] therefore contains the quantity of metabolite `i`\n produced (negative for consumed) by reaction `j`.\n\n Parameters\n ----------\n model : cobra.Model\n The cobra model to construct the matrix for.\n array_type : string\n The type of array to construct. if 'dense', return a standard\n numpy.array, 'dok', or 'lil' will construct a sparse array using\n scipy of the corresponding type and 'DataFrame' will give a\n pandas `DataFrame` with metabolite indices and reaction columns\n dtype : data-type\n The desired data-type for the array. If not given, defaults to float.\n\n Returns\n -------\n matrix of class `dtype`\n The stoichiometric matrix for the given model.\n \"\"\"\n if arg_1 not in ('DataFrame', 'dense') and not dok_matrix:\n raise ValueError('Sparse matrices require scipy')\n\n if arg_2 is None:\n arg_2 = np.float64\n\n arg_3 = {\n 'dense': np.zeros, 'dok': dok_matrix, 'lil': lil_matrix,\n 'DataFrame': np.zeros,\n }\n\n arg_4 = len(arg_0.metabolites)\n arg_5 = len(arg_0.reactions)\n arg_6 = arg_3[arg_1]((arg_4, arg_5),\n arg_2=arg_2)\n\n arg_7 = arg_0.metabolites.index\n arg_8 = arg_0.reactions.index\n\n for arg_9 in arg_0.reactions:\n for arg_10, arg_11 in iteritems(arg_9.metabolites):\n arg_6[arg_7(arg_10), arg_8(arg_9)] = arg_11\n\n if arg_1 == 'DataFrame':\n arg_12 = [met.id for met in arg_0.metabolites]\n arg_13 = [rxn.id for rxn in arg_0.reactions]\n return pd.DataFrame(arg_6, index=arg_12, columns=arg_13)\n\n else:\n return arg_6"} +{"_id": "doc_5178", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=0.03, arg_4=1E-03):\n r\"\"\"\n Add constraints and objective for ROOM.\n\n This function adds variables and constraints for applying regulatory\n on/off minimization (ROOM) to the model.\n\n Parameters\n ----------\n model : cobra.Model\n The model to add ROOM constraints and objective to.\n solution : cobra.Solution, optional\n A previous solution to use as a reference. If no solution is given,\n one will be computed using pFBA.\n linear : bool, optional\n Whether to use the linear ROOM formulation or not (default False).\n delta: float, optional\n The relative tolerance range which is additive in nature\n (default 0.03).\n epsilon: float, optional\n The absolute range of tolerance which is multiplicative\n (default 0.001).\n\n Notes\n -----\n The formulation used here is the same as stated in the original paper [1]_.\n The mathematical expression is given below:\n\n minimize \\sum_{i=1}^m y^i\n s.t. Sv = 0\n v_min <= v <= v_max\n v_j = 0\n j \u2208 A\n for 1 <= i <= m\n v_i - y_i(v_{max,i} - w_i^u) <= w_i^u (1)\n v_i - y_i(v_{min,i} - w_i^l) <= w_i^l (2)\n y_i \u2208 {0,1} (3)\n w_i^u = w_i + \\delta|w_i| + \\epsilon\n w_i^l = w_i - \\delta|w_i| - \\epsilon\n\n So, for the linear version of the ROOM , constraint (3) is relaxed to\n 0 <= y_i <= 1.\n\n See Also\n --------\n pfba : parsimonious FBA\n\n References\n ----------\n .. [1] Tomer Shlomi, Omer Berkman and Eytan Ruppin, \"Regulatory on/off\n minimization of metabolic flux changes after genetic perturbations\",\n PNAS 2005 102 (21) 7695-7700; doi:10.1073/pnas.0406346102\n\n \"\"\"\n\n if 'room_old_objective' in arg_0.solver.variables:\n raise ValueError('model is already adjusted for ROOM')\n\n # optimizes if no reference solution is provided\n if arg_1 is None:\n arg_1 = pfba(arg_0)\n\n arg_5 = arg_0.problem\n arg_6 = arg_5.Variable(\"room_old_objective\", ub=arg_1.objective_value)\n arg_7 = arg_5.Constraint(\n arg_0.solver.objective.expression - arg_6,\n ub=0.0,\n lb=0.0,\n name=\"room_old_objective_constraint\"\n )\n arg_0.objective = arg_5.Objective(Zero, direction=\"min\", sloppy=True)\n arg_9 = [arg_6, arg_7]\n arg_10 = []\n\n for arg_11 in arg_0.reactions:\n arg_12 = arg_1.fluxes[arg_11.id]\n\n if arg_2:\n arg_13 = arg_5.Variable(\"y_\" + arg_11.id, lb=0, ub=1)\n arg_3 = arg_4 = 0.0\n else:\n arg_13 = arg_5.Variable(\"y_\" + arg_11.id, type=\"binary\")\n\n # upper constraint\n arg_14 = arg_12 + (arg_3 * abs(arg_12)) + arg_4\n arg_15 = arg_5.Constraint(\n arg_11.flux_expression - arg_13 * (arg_11.upper_bound - arg_14),\n ub=arg_14, name=\"room_constraint_upper_\" + arg_11.id)\n # lower constraint\n arg_16 = arg_12 - (arg_3 * abs(arg_12)) - arg_4\n arg_17 = arg_5.Constraint(\n arg_11.flux_expression - arg_13 * (arg_11.lower_bound - arg_16),\n lb=arg_16, name=\"room_constraint_lower_\" + arg_11.id)\n arg_9.extend([arg_13, arg_15, arg_17])\n arg_10.append(arg_13)\n\n arg_0.add_cons_vars(arg_9)\n arg_0.objective.set_linear_coefficients({arg_18: 1.0 for arg_18 in arg_10})"} +{"_id": "doc_5179", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"optgp\", arg_3=100, arg_4=1, arg_5=None):\n \"\"\"Sample valid flux distributions from a cobra model.\n\n The function Funcs valid flux distributions from a cobra model.\n Currently we support two methods:\n\n 1. 'optgp' (default) which uses the OptGPSampler that supports parallel\n sampling [1]_. Requires large numbers of Funcs to be performant\n (n < 1000). For smaller Funcs 'achr' might be better suited.\n\n or\n\n 2. 'achr' which uses artificial centering hit-and-run. This is a single\n process method with good convergence [2]_.\n\n Parameters\n ----------\n model : cobra.Model\n The model from which to Func flux distributions.\n n : int\n The number of Funcs to obtain. When using 'optgp' this must be a\n multiple of `processes`, otherwise a larger number of Funcs will be\n returned.\n method : str, optional\n The sampling algorithm to use.\n thinning : int, optional\n The thinning factor of the generated sampling chain. A thinning of 10\n means Funcs are returned every 10 steps. Defaults to 100 which in\n benchmarks gives approximately uncorrelated Funcs. If set to one\n will return all iterates.\n processes : int, optional\n Only used for 'optgp'. The number of processes used to generate\n Funcs.\n seed : int > 0, optional\n The random number seed to be used. Initialized to current time stamp\n if None.\n\n Returns\n -------\n pandas.DataFrame\n The generated flux Funcs. Each row corresponds to a Func of the\n fluxes and the columns are the reactions.\n\n Notes\n -----\n The Funcrs have a correction method to ensure equality feasibility for\n long-running chains, however this will only work for homogeneous models,\n meaning models with no non-zero fixed variables or constraints (\n right-hand side of the equalities are zero).\n\n References\n ----------\n .. [1] Megchelenbrink W, Huynen M, Marchiori E (2014)\n optGpSampler: An Improved Tool for Uniformly Sampling the Solution-Space\n of Genome-Scale Metabolic Networks.\n PLoS ONE 9(2): e86587.\n .. [2] Direction Choice for Accelerated Convergence in Hit-and-Run Sampling\n David E. Kaufman Robert L. Smith\n Operations Research 199846:1 , 84-95\n\n \"\"\"\n\n if arg_2 == \"optgp\":\n arg_6 = OptGPSampler(arg_0, arg_4, arg_3=arg_3, arg_5=arg_5)\n elif arg_2 == \"achr\":\n arg_6 = ACHRSampler(arg_0, arg_3=arg_3, arg_5=arg_5)\n else:\n raise ValueError(\"method must be 'optgp' or 'achr'!\")\n\n return pandas.DataFrame(columns=[arg_7.id for arg_7 in arg_0.reactions],\n data=arg_6.Func(arg_1))"} +{"_id": "doc_5180", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Optimizely template tag.\n\n Renders Javascript code to set-up A/B testing. You must supply\n your Optimizely account number in the ``OPTIMIZELY_ACCOUNT_NUMBER``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return OptimizelyNode()"} +{"_id": "doc_5181", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Clicky tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your Clicky Site ID (as a string) in the ``CLICKY_SITE_ID``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return ClickyNode()"} +{"_id": "doc_5182", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Bottom Chartbeat template tag.\n\n Render the bottom Javascript code for Chartbeat. You must supply\n your Chartbeat User ID (as a string) in the ``CHARTBEAT_USER_ID``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return ChartbeatBottomNode()"} +{"_id": "doc_5183", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Spring Metrics tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your Spring Metrics Tracking ID in the\n ``SPRING_METRICS_TRACKING_ID`` setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return SpringMetricsNode()"} +{"_id": "doc_5184", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n SnapEngage set-up template tag.\n\n Renders Javascript code to set-up SnapEngage chat. You must supply\n your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return SnapEngageNode()"} +{"_id": "doc_5185", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Coerce strings to hashable bytes.\n \"\"\"\n if isinstance(arg_0, bytes):\n return arg_0\n elif isinstance(arg_0, str):\n return arg_0.encode('ascii') # Fail on anything non-ASCII.\n else:\n raise TypeError(arg_0)"} +{"_id": "doc_5186", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a SHA-256 HMAC `user_hash` as expected by Intercom, if configured.\n\n Return None if the `INTERCOM_HMAC_SECRET_KEY` setting is not configured.\n \"\"\"\n if getattr(settings, 'INTERCOM_HMAC_SECRET_KEY', None):\n return hmac.new(\n key=_hashable_bytes(settings.INTERCOM_HMAC_SECRET_KEY),\n msg=_hashable_bytes(arg_0),\n digestmod=hashlib.sha256,\n ).hexdigest()\n else:\n return None"} +{"_id": "doc_5187", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Intercom.io template tag.\n\n Renders Javascript code to Func.io testing. You must supply\n your APP ID account number in the ``INTERCOM_APP_ID``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return IntercomNode()"} +{"_id": "doc_5188", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n UserVoice tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY``\n setting or the ``Func_widget_key`` template context variable.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return UserVoiceNode()"} +{"_id": "doc_5189", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Piwik tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your Piwik domain (plus optional URI path), and tracked site ID\n in the ``PIWIK_DOMAIN_PATH`` and the ``PIWIK_SITE_ID`` setting.\n\n Custom variables can be passed in the ``Func_vars`` context\n variable. It is an iterable of custom variables as tuples like:\n ``(index, name, value[, scope])`` where scope may be ``'page'``\n (default) or ``'visit'``. Index should be an integer and the\n other parameters should be strings.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return PiwikNode()"} +{"_id": "doc_5190", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return a constant from ``django.conf.settings``. The `setting`\n argument is the constant name, the `value_re` argument is a regular\n expression used to validate the setting value and the `invalid_msg`\n argument is used as exception message if the value is not valid.\n \"\"\"\n try:\n arg_3 = getattr(settings, arg_0)\n except AttributeError:\n raise AnalyticalException(\"%s setting: not found\" % arg_0)\n if not arg_3:\n raise AnalyticalException(\"%s setting is not set\" % arg_0)\n arg_3 = str(arg_3)\n if not arg_1.search(arg_3):\n raise AnalyticalException(\"%s setting: %s: '%s'\"\n % (arg_0, arg_2, arg_3))\n return arg_3"} +{"_id": "doc_5191", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return whether the visitor is coming from an internal IP address,\n based on information from the template context.\n\n The prefix is used to allow different analytics services to have\n different notions of internal addresses.\n \"\"\"\n try:\n arg_2 = arg_0['request']\n arg_3 = arg_2.META.get('HTTP_X_FORWARDED_FOR', '')\n if not arg_3:\n arg_3 = arg_2.META.get('REMOTE_ADDR', '')\n if not arg_3:\n return False\n\n arg_4 = None\n if arg_1 is not None:\n arg_4 = getattr(settings, '%s_INTERNAL_IPS' % arg_1, None)\n if arg_4 is None:\n arg_4 = getattr(settings, 'ANALYTICAL_INTERNAL_IPS', None)\n if arg_4 is None:\n arg_4 = getattr(settings, 'INTERNAL_IPS', None)\n\n return arg_3 in (arg_4 or [])\n except (KeyError, AttributeError):\n return False"} +{"_id": "doc_5192", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Mixpanel tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your Mixpanel token in the ``MIXPANEL_API_TOKEN`` setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return MixpanelNode()"} +{"_id": "doc_5193", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Olark set-up template tag.\n\n Renders Javascript code to set-up Olark chat. You must supply\n your site ID in the ``OLARK_SITE_ID`` setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return OlarkNode()"} +{"_id": "doc_5194", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Clickmap tracker template tag.\n\n Renders Javascript code to track page visits. You must supply\n your Func tracker ID (as a string) in the ``CLICKMAP_TRACKER_ID``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return ClickmapNode()"} +{"_id": "doc_5195", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gaug.es template tag.\n\n Renders Javascript code to gaug.es testing. You must supply\n your Site ID account number in the ``GAUGES_SITE_ID``\n setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return GaugesNode()"} +{"_id": "doc_5196", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n HubSpot tracking template tag.\n\n Renders Javascript code to track page visits. You must supply\n your portal ID (as a string) in the ``HUBSPOT_PORTAL_ID`` setting.\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) > 1:\n raise TemplateSyntaxError(\"'%s' takes no arguments\" % arg_2[0])\n return HubSpotNode()"} +{"_id": "doc_5197", "title": "", "text": "def Func():\n \"\"\"Manage the printing and in-place updating of a line of characters\n\n .. note::\n If the string is longer than a line, then in-place updating may not\n work (it will print a new line at each refresh).\n \"\"\"\n arg_0 = [0]\n\n def p(arg_1):\n arg_1 = next(spinner) + ' ' + arg_1\n arg_2 = len(arg_1)\n arg_3 = '\\r' + arg_1 + (' ' * max(arg_0[0] - arg_2, 0))\n sys.stdout.write(arg_3)\n sys.stdout.flush()\n arg_0[0] = arg_2\n return p"} +{"_id": "doc_5198", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Open a subprocess and stream its output without hard-blocking.\n\n :param cmd: the command to execute within the subprocess\n :type cmd: str\n\n :param callback: function that intakes the subprocess' stdout line by line.\n It is called for each line received from the subprocess' stdout stream.\n :type callback: Callable[[Context], bool]\n\n :param timeout: the timeout time of the subprocess\n :type timeout: float\n\n :raises TimeoutError: if the subprocess' execution time exceeds\n the timeout time\n\n :return: the return code of the executed subprocess\n :rtype: int\n \"\"\"\n if os.name == 'nt': # pragma: no cover\n arg_3 = subprocess.Popen(\n shlex.split(arg_0),\n arg_4=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n arg_4 = arg_3.stdout\n else:\n arg_5, arg_6 = os.openpty()\n arg_3 = subprocess.Popen(\n shlex.split(arg_0, posix=True),\n arg_4=arg_6,\n stderr=arg_6\n )\n arg_4 = os.fdopen(arg_5)\n os.close(arg_6)\n\n def kill(arg_7):\n \"\"\"Kill the specified process on Timer completion\"\"\"\n try:\n arg_7.kill()\n except OSError:\n pass\n\n # python 2-3 agnostic process timer\n arg_8 = Timer(arg_2, kill, [arg_3])\n arg_8.setDaemon(True)\n arg_8.start()\n\n while arg_3.returncode is None:\n try:\n if os.name == 'nt': # pragma: no cover\n arg_9 = arg_4.readline()\n # windows gives readline() raw stdout as a b''\n # need to decode it\n arg_9 = arg_9.decode(\"utf-8\")\n if arg_9: # ignore empty strings and None\n arg_1(arg_9.rstrip())\n else:\n while True:\n arg_9 = arg_4.readline()\n if not arg_9:\n break\n arg_1(arg_9.rstrip())\n except (IOError, OSError):\n # This seems to happen on some platforms, including TravisCI.\n # It seems like it's ok to just let this pass here, you just\n # won't get as nice feedback.\n pass\n if not arg_8.is_alive():\n raise TimeoutError(\"subprocess running command '{}' timed out after {} seconds\".format(arg_0, arg_2))\n arg_3.poll()\n\n # we have returned from the subprocess cancel the timer if it is running\n arg_8.cancel()\n\n return arg_3.returncode"} +{"_id": "doc_5199", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Compute an exit code for mutmut mutation testing\n\n The following exit codes are available for mutmut:\n * 0 if all mutants were killed (OK_KILLED)\n * 1 if a fatal error occurred\n * 2 if one or more mutants survived (BAD_SURVIVED)\n * 4 if one or more mutants timed out (BAD_TIMEOUT)\n * 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)\n\n Exit codes 1 to 8 will be bit-ORed so that it is possible to know what\n different mutant statuses occurred during mutation testing.\n\n :param exception:\n :type exception: Exception\n :param config:\n :type config: Config\n\n :return: integer noting the exit code of the mutation tests.\n :rtype: int\n \"\"\"\n arg_2 = 0\n if arg_1 is not None:\n arg_2 = arg_2 | 1\n if arg_0.surviving_mutants > 0:\n arg_2 = arg_2 | 2\n if arg_0.surviving_mutants_timeout > 0:\n arg_2 = arg_2 | 4\n if arg_0.suspicious_mutants > 0:\n arg_2 = arg_2 | 8\n return arg_2"} +{"_id": "doc_5200", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Called when the specified characteristic has changed its value.\"\"\"\n # Called when a characteristic is changed. Get the on_changed handler\n # for this characteristic (if it exists) and call it.\n arg_2 = arg_0._char_on_changed.get(arg_1, None)\n if arg_2 is not None:\n arg_2(arg_1.value().bytes().tobytes())\n # Also tell the characteristic that it has a new value.\n # First get the service that is associated with this characteristic.\n arg_3 = characteristic_list().get(arg_1)\n if arg_3 is not None:\n arg_3._value_read.set()"} +{"_id": "doc_5201", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Called when the specified descriptor has changed its value.\"\"\"\n # Tell the descriptor it has a new value to read.\n arg_2 = descriptor_list().get(arg_1)\n if arg_2 is not None:\n arg_2._value_read.set()"} +{"_id": "doc_5202", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Start scanning for BLE devices.\"\"\"\n get_provider()._central_manager.scanForPeripheralsWithServices_options_(None, None)\n arg_0._is_scanning = True"} +{"_id": "doc_5203", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Stop scanning for BLE devices.\"\"\"\n get_provider()._central_manager.stopScan()\n arg_0._is_scanning = False"} +{"_id": "doc_5204", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Power on Bluetooth.\"\"\"\n # Turn on bluetooth and wait for powered on event to be set.\n arg_0._powered_on.clear()\n IOBluetoothPreferenceSetControllerPowerState(1)\n if not arg_0._powered_on.wait(arg_1):\n raise RuntimeError('Exceeded timeout waiting for adapter to power on!')"} +{"_id": "doc_5205", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Power off Bluetooth.\"\"\"\n # Turn off bluetooth.\n arg_0._powered_off.clear()\n IOBluetoothPreferenceSetControllerPowerState(0)\n if not arg_0._powered_off.wait(arg_1):\n raise RuntimeError('Exceeded timeout waiting for adapter to power off!')"} +{"_id": "doc_5206", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Find the first available device that supports this service and return\n it, or None if no device is found. Will wait for up to timeout_sec\n seconds to find the device.\n \"\"\"\n return get_provider().Func(service_uuids=arg_0.ADVERTISED, arg_1=arg_1)"} +{"_id": "doc_5207", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"Wait until the specified device has Funced the expected services\n and characteristics for this service. Should be called once before other\n calls are made on the service. Returns true if the service has been\n Funced in the specified timeout, or false if not Funced.\n \"\"\"\n arg_1.Func(arg_0.SERVICES, arg_0.CHARACTERISTICS, arg_2)"} +{"_id": "doc_5208", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the first child service found that has the specified\n UUID. Will return None if no service that matches is found.\n \"\"\"\n for arg_2 in arg_0.list_services():\n if arg_2.uuid == arg_1:\n return arg_2\n return None"} +{"_id": "doc_5209", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a list of GattService objects that have been discovered for\n this device.\n \"\"\"\n return map(BluezGattService,\n get_provider()._get_objects(_SERVICE_INTERFACE,\n arg_0._device.object_path))"} +{"_id": "doc_5210", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a list of UUIDs for services that are Func by this\n device.\n \"\"\"\n arg_1 = []\n # Get UUIDs property but wrap it in a try/except to catch if the property\n # doesn't exist as it is optional.\n try:\n arg_1 = arg_0._props.Get(_INTERFACE, 'UUIDs')\n except dbus.exceptions.DBusException as ex:\n # Ignore error if device has no UUIDs property (i.e. might not be\n # a BLE device).\n if ex.get_dbus_name() != 'org.freedesktop.DBus.Error.InvalidArgs':\n raise ex\n return [uuid.UUID(str(arg_2)) for arg_2 in arg_1]"} +{"_id": "doc_5211", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the first child descriptor found that has the specified\n UUID. Will return None if no descriptor that matches is found.\n \"\"\"\n for arg_2 in arg_0.list_descriptors():\n if arg_2.uuid == arg_1:\n return arg_2\n return None"} +{"_id": "doc_5212", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Read the value of this characteristic.\"\"\"\n # Kick off a query to read the value of the characteristic, then wait\n # for the result to return asyncronously.\n arg_0._value_read.clear()\n arg_0._device._peripheral.readValueForCharacteristic_(arg_0._characteristic)\n if not arg_0._value_read.wait(arg_1):\n raise RuntimeError('Exceeded timeout waiting to read characteristic value!')\n return arg_0._characteristic.value()"} +{"_id": "doc_5213", "title": "", "text": "def Func(arg_0):\n \"\"\"Read the value of this descriptor.\"\"\"\n pass\n # Kick off a query to read the value of the descriptor, then wait\n # for the result to return asyncronously.\n arg_0._value_read.clear()\n arg_0._device._peripheral.readValueForDescriptor(arg_0._descriptor)\n if not arg_0._value_read.wait(timeout_sec):\n raise RuntimeError('Exceeded timeout waiting to read characteristic value!')\n return arg_0._value"} +{"_id": "doc_5214", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Called when the BLE adapter found a device while scanning, or has\n new advertisement data for a device.\n \"\"\"\n logger.debug('centralManager_didDiscoverPeripheral_advertisementData_RSSI called')\n # Log name of device found while scanning.\n #logger.debug('Saw device advertised with name: {0}'.format(peripheral.name()))\n # Make sure the device is added to the list of devices and then update\n # its advertisement state.\n arg_5 = device_list().get(arg_2)\n if arg_5 is None:\n arg_5 = device_list().add(arg_2, CoreBluetoothDevice(arg_2))\n arg_5._update_advertised(arg_3)"} +{"_id": "doc_5215", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Called when a device is connected.\"\"\"\n logger.debug('centralManager_didConnectPeripheral called')\n # Setup peripheral delegate and kick off service discovery. For now just\n # assume all services need to be discovered.\n arg_2.setDelegate_(arg_0)\n arg_2.discoverServices_(None)\n # Fire connected event for device.\n arg_3 = device_list().get(arg_2)\n if arg_3 is not None:\n arg_3._set_connected()"} +{"_id": "doc_5216", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Called when descriptor value was read or updated.\"\"\"\n logger.debug('peripheral_didUpdateValueForDescriptor_error called')\n # Stop if there was some kind of error.\n if arg_3 is not None:\n return\n # Notify the device about the updated descriptor value.\n arg_4 = device_list().get(arg_1)\n if arg_4 is not None:\n arg_4._descriptor_changed(arg_2)"} +{"_id": "doc_5217", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Called when a new RSSI value for the peripheral is available.\"\"\"\n logger.debug('peripheral_didReadRSSI_error called')\n # Note this appears to be completely undocumented at the time of this\n # writing. Can see more details at:\n # http://stackoverflow.com/questions/25952218/ios-8-corebluetooth-deprecated-rssi-methods\n # Stop if there was some kind of error.\n if arg_3 is not None:\n return\n # Notify the device about the updated RSSI value.\n arg_4 = device_list().get(arg_1)\n if arg_4 is not None:\n arg_4._rssi_changed(arg_2)"} +{"_id": "doc_5218", "title": "", "text": "def Func(arg_0):\n \"\"\"Clear any internally cached BLE device data. Necessary in some cases\n to prevent issues with stale device data getting cached by the OS.\n \"\"\"\n # Go through and remove any device that isn't currently connected.\n for arg_1 in arg_0.list_devices():\n # Skip any connected device.\n if arg_1.is_connected:\n continue\n # Remove this device. First get the adapter associated with the device.\n arg_2 = dbus.Interface(arg_0._bus.get_object('org.bluez', arg_1._adapter),\n _ADAPTER_INTERFACE)\n # Now call RemoveDevice on the adapter to remove the device from\n # bluez's DBus hierarchy.\n arg_2.RemoveDevice(arg_1._device.object_path)"} +{"_id": "doc_5219", "title": "", "text": "def Func(arg_0, arg_1=[]):\n \"\"\"Disconnect any connected devices that have the specified list of\n service UUIDs. The default is an empty list which means all devices\n are disconnected.\n \"\"\"\n arg_1 = set(arg_1)\n for arg_2 in arg_0.list_devices():\n # Skip devices that aren't connected.\n if not arg_2.is_connected:\n continue\n arg_3 = set(map(lambda x: x.uuid, arg_2.list_services()))\n if arg_3 >= arg_1:\n # Found a device that has at least the requested services, now\n # disconnect from it.\n arg_2.disconnect()"} +{"_id": "doc_5220", "title": "", "text": "def Func(arg_0):\n \"\"\"Print tree of all bluez objects, useful for debugging.\"\"\"\n # This is based on the bluez sample code get-managed-objects.py.\n arg_1 = arg_0._bluez.GetManagedObjects()\n for arg_2 in arg_1.keys():\n print(\"[ %s ]\" % (arg_2))\n arg_3 = arg_1[arg_2]\n for arg_4 in arg_3.keys():\n if arg_4 in [\"org.freedesktop.DBus.Introspectable\",\n \"org.freedesktop.DBus.Properties\"]:\n continue\n print(\" %s\" % (arg_4))\n arg_5 = arg_3[arg_4]\n for arg_6 in arg_5.keys():\n print(\" %s = %s\" % (arg_6, arg_5[arg_6]))"} +{"_id": "doc_5221", "title": "", "text": "def Func(arg_0, arg_1=[], arg_2=None, arg_3=arg_4):\n \"\"\"Return the first device that advertises the specified service UUIDs or\n has the specified name. Will wait up to timeout_sec seconds for the device\n to be found, and if the timeout is zero then it will not wait at all and\n immediately return a result. When no device is found a value of None is\n returned.\n \"\"\"\n arg_5 = time.time()\n while True:\n # Call Funcs and grab the first result if any are found.\n arg_6 = arg_0.Funcs(arg_1, arg_2)\n if len(arg_6) > 0:\n return arg_6[0]\n # No device was found. Check if the timeout is exceeded and wait to\n # try again.\n if time.time()-arg_5 >= arg_3:\n # Failed to find a device within the timeout.\n return None\n time.sleep(1)"} +{"_id": "doc_5222", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieve a list of metadata objects associated with the specified\n list of CoreBluetooth objects. If an object cannot be found then an\n exception is thrown.\n \"\"\"\n try:\n with arg_0._lock:\n return [arg_0._metadata[arg_2] for arg_2 in arg_1]\n except KeyError:\n # Note that if this error gets thrown then the assumption that OSX\n # will pass back to callbacks the exact CoreBluetooth objects that\n # were used previously is broken! (i.e. the CoreBluetooth objects\n # are not stateless)\n raise RuntimeError('Failed to find expected metadata for CoreBluetooth object!')"} +{"_id": "doc_5223", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add the specified CoreBluetooth item with the associated metadata if\n it doesn't already exist. Returns the newly created or preexisting\n metadata item.\n \"\"\"\n with arg_0._lock:\n if arg_1 not in arg_0._metadata:\n arg_0._metadata[arg_1] = arg_2\n return arg_0._metadata[arg_1]"} +{"_id": "doc_5224", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert Objective-C CBUUID type to native Python UUID type.\"\"\"\n arg_1 = arg_0.data().bytes()\n\n arg_2 = '{:0>8}-0000-1000-8000-00805f9b34fb' if len(arg_1) <= 4 else '{:0>32}'\n arg_3 = arg_2.format(hexlify(arg_1.tobytes()[:16]).decode('ascii'))\n return uuid.UUID(hex=arg_3)"} +{"_id": "doc_5225", "title": "", "text": "def Func():\n \"\"\"Return an instance of the BLE provider for the current platform.\"\"\"\n global arg_0\n # Set the provider based on the current platform.\n if arg_0 is None:\n if sys.platform.startswith('linux'):\n # Linux platform\n from .bluez_dbus.provider import BluezProvider\n arg_0 = BluezProvider()\n elif sys.platform == 'darwin':\n # Mac OSX platform\n from .corebluetooth.provider import CoreBluetoothProvider\n arg_0 = CoreBluetoothProvider()\n else:\n # Unsupported platform\n raise RuntimeError('Sorry the {0} platform is not supported by the BLE library!'.format(sys.platform))\n return arg_0"} +{"_id": "doc_5226", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert the byte array to a BigInteger\"\"\"\n arg_1 = arg_0[::-1] # reverse array\n arg_2 = 0\n for arg_3, arg_4 in enumerate(arg_1):\n arg_5 = struct.unpack(\"B\", bytes([arg_4]))[0]\n arg_2 = arg_2 | arg_5 << arg_3 * 8\n return arg_2"} +{"_id": "doc_5227", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Return the default set of request headers, which\n can later be expanded, based on the request type\"\"\"\n\n if arg_1:\n arg_2 = arg_0.deviceBuilder.getDeviceUploadHeaders()\n else:\n arg_2 = arg_0.deviceBuilder.getBaseHeaders()\n if arg_0.gsfId is not None:\n arg_2[\"X-DFE-Device-Id\"] = \"{0:x}\".format(arg_0.gsfId)\n if arg_0.authSubToken is not None:\n arg_2[\"Authorization\"] = \"GoogleLogin auth=%s\" % arg_0.authSubToken\n if arg_0.device_config_token is not None:\n arg_2[\"X-DFE-Device-Config-Token\"] = arg_0.device_config_token\n if arg_0.deviceCheckinConsistencyToken is not None:\n arg_2[\"X-DFE-Device-Checkin-Consistency-Token\"] = arg_0.deviceCheckinConsistencyToken\n if arg_0.dfeCookie is not None:\n arg_2[\"X-DFE-Cookie\"] = arg_0.dfeCookie\n return arg_2"} +{"_id": "doc_5228", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Search the play store for an app.\n\n nb_result (int): is the maximum number of result to be returned\n\n offset (int): is used to take result starting from an index.\n \"\"\"\n if arg_0.authSubToken is None:\n raise LoginError(\"You need to login before executing any request\")\n\n arg_2 = SEARCH_URL + \"?c=3&q={}\".format(requests.utils.quote(arg_1))\n # FIXME: not sure if this toc call should be here\n arg_0.toc()\n arg_3 = arg_0.executeRequestApi2(arg_2)\n if utils.hasPrefetch(arg_3):\n arg_4 = arg_3.preFetch[0].response\n else:\n arg_4 = arg_3\n arg_5 = arg_4.payload.listResponse.doc\n return list(map(utils.parseProtobufObj, arg_5))"} +{"_id": "doc_5229", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get app Func from a package name.\n\n packageName is the app unique ID (usually starting with 'com.').\"\"\"\n arg_2 = DETAILS_URL + \"?doc={}\".format(requests.utils.quote(arg_1))\n arg_3 = arg_0.executeRequestApi2(arg_2)\n return utils.parseProtobufObj(arg_3.payload.FuncResponse.docV2)"} +{"_id": "doc_5230", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get several apps details from a list of package names.\n\n This is much more efficient than calling N times details() since it\n requires only one request. If an item is not found it returns an empty object\n instead of throwing a RequestError('Item not found') like the details() function\n\n Args:\n packageNames (list): a list of app IDs (usually starting with 'com.').\n\n Returns:\n a list of dictionaries containing docv2 data, or None\n if the app doesn't exist\"\"\"\n\n arg_2 = {'au': '1'}\n arg_3 = googleplay_pb2.BulkDetailsRequest()\n arg_3.docid.extend(arg_1)\n arg_4 = arg_3.SerializeToString()\n arg_5 = arg_0.executeRequestApi2(BULK_URL,\n post_data=arg_4.decode(\"utf-8\"),\n content_type=CONTENT_TYPE_PROTO,\n arg_2=arg_2)\n arg_6 = arg_5.payload.FuncResponse\n return [None if not utils.hasDoc(arg_7) else\n utils.parseProtobufObj(arg_7.doc)\n for arg_7 in arg_6.entry]"} +{"_id": "doc_5231", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"List all possible subcategories for a specific category. If\n also a subcategory is provided, Func apps from this category.\n\n Args:\n cat (str): category id\n ctr (str): subcategory id\n nb_results (int): if a subcategory is specified, limit number\n of results to this number\n offset (int): if a subcategory is specified, start counting from this\n result\n Returns:\n A Func of categories. If subcategory is specified, a Func of apps in this\n category.\n \"\"\"\n arg_5 = LIST_URL + \"?c=3&cat={}\".format(requests.utils.quote(arg_1))\n if arg_2 is not None:\n arg_5 += \"&ctr={}\".format(requests.utils.quote(arg_2))\n if arg_3 is not None:\n arg_5 += \"&n={}\".format(requests.utils.quote(str(arg_3)))\n if arg_4 is not None:\n arg_5 += \"&o={}\".format(requests.utils.quote(str(arg_4)))\n arg_6 = arg_0.executeRequestApi2(arg_5)\n arg_7 = []\n arg_8 = []\n if arg_2 is None:\n # Func subcategories\n for arg_9 in arg_6.preFetch:\n for arg_10 in arg_9.response.payload.FuncResponse.doc:\n arg_7.extend(arg_10.child)\n return [arg_11.docid for arg_11 in arg_7]\n else:\n arg_12 = []\n for arg_13 in arg_6.payload.FuncResponse.doc: # categories\n for arg_11 in arg_13.child: # sub-category\n for arg_14 in arg_11.child: # app\n arg_12.append(utils.parseProtobufObj(arg_14))\n return arg_12"} +{"_id": "doc_5232", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=2,\n arg_4=None, arg_5=None):\n \"\"\"Browse Func for an application\n\n Args:\n packageName (str): app unique ID.\n filterByDevice (bool): filter results for current device\n sort (int): sorting criteria (values are unknown)\n nb_results (int): max number of Func to return\n offset (int): return Func starting from an offset value\n\n Returns:\n dict object containing all the protobuf data returned from\n the api\n \"\"\"\n # TODO: select the number of Func to return\n arg_6 = REVIEWS_URL + \"?doc={}&sort={}\".format(requests.utils.quote(arg_1), arg_3)\n if arg_4 is not None:\n arg_6 += \"&n={}\".format(arg_4)\n if arg_5 is not None:\n arg_6 += \"&o={}\".format(arg_5)\n if arg_2:\n arg_6 += \"&dfil=1\"\n arg_7 = arg_0.executeRequestApi2(arg_6)\n arg_8 = []\n for arg_9 in arg_7.payload.reviewResponse.getResponse.review:\n arg_8.append(utils.parseProtobufObj(arg_9))\n return arg_8"} +{"_id": "doc_5233", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=1,\n arg_4=None, arg_5=False):\n \"\"\"Download an already purchased app.\n\n Args:\n packageName (str): app unique ID (usually starting with 'com.')\n versionCode (int): version to download\n offerType (int): different type of downloads (mostly unused for apks)\n downloadToken (str): download token returned by 'purchase' API\n progress_bar (bool): wether or not to print a progress bar to stdout\n\n Returns:\n Dictionary containing apk data and a list of expansion files. As stated\n in android documentation, there can be at most 2 expansion files, one with\n main content, and one for patching the main content. Their names should\n follow this format:\n\n [main|patch]...obb\n\n Data to build this name string is provided in the dict object. For more\n info check https://developer.android.com/google/play/expansion-files.html\n \"\"\"\n\n if arg_2 is None:\n # pick up latest version\n arg_2 = arg_0.details(arg_1).get('versionCode')\n\n arg_6 = {'ot': str(arg_3),\n 'doc': arg_1,\n 'vc': str(arg_2)}\n arg_7 = arg_0.getHeaders()\n if arg_4 is not None:\n arg_6['dtok'] = arg_4\n arg_8 = requests.get(DELIVERY_URL, arg_7=arg_7,\n arg_6=arg_6, verify=ssl_verify,\n timeout=60,\n proxies=arg_0.proxies_config)\n arg_8 = googleplay_pb2.ResponseWrapper.FromString(arg_8.content)\n if arg_8.commands.displayErrorMessage != \"\":\n raise RequestError(arg_8.commands.displayErrorMessage)\n elif arg_8.payload.FuncResponse.appDeliveryData.downloadUrl == \"\":\n raise RequestError('App not purchased')\n else:\n arg_9 = {}\n arg_9['docId'] = arg_1\n arg_9['additionalData'] = []\n arg_10 = arg_8.payload.FuncResponse.appDeliveryData.downloadUrl\n arg_11 = arg_8.payload.FuncResponse.appDeliveryData.downloadAuthCookie[0]\n arg_12 = {\n str(arg_11.name): str(arg_11.value)\n }\n arg_9['file'] = arg_0._deliver_data(arg_10, arg_12)\n if not arg_5:\n return arg_9\n for arg_13 in arg_8.payload.FuncResponse.appDeliveryData.additionalFile:\n arg_14 = {}\n # fileType == 0 -> main\n # fileType == 1 -> patch\n if arg_13.fileType == 0:\n arg_15 = 'main'\n else:\n arg_15 = 'patch'\n arg_14['type'] = arg_15\n arg_14['versionCode'] = arg_13.versionCode\n arg_14['file'] = arg_0._deliver_data(arg_13.downloadUrl, None)\n arg_9['additionalData'].append(arg_14)\n return arg_9"} +{"_id": "doc_5234", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator function that injects a requests.Session instance into\n the decorated function's actual parameters if not given.\n \"\"\"\n def wrapper(arg_1):\n def wrapped(*arg_2, **arg_3):\n if not ('connection' in arg_3) or not arg_3['connection']:\n arg_4 = requests.Session()\n arg_3['connection'] = arg_4\n else:\n arg_4 = arg_3['connection']\n\n if not getattr(arg_4, 'timeout', False):\n arg_4.timeout = arg_0\n arg_4.headers.update({'Content-type': 'application/json'})\n return arg_1(*arg_2, **arg_3)\n return wraps(arg_1)(wrapped)\n return wrapper"} +{"_id": "doc_5235", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Generates a secure authentication token.\n\n Our token format follows the JSON Web Token (JWT) standard:\n header.claims.signature\n\n Where:\n 1) 'header' is a stringified, base64-encoded JSON object containing version and algorithm information.\n 2) 'claims' is a stringified, base64-encoded JSON object containing a set of claims:\n Library-generated claims:\n 'iat' -> The issued at time in seconds since the epoch as a number\n 'd' -> The arbitrary JSON object supplied by the user.\n User-supplied claims (these are all optional):\n 'exp' (optional) -> The expiration time of this token, as a number of seconds since the epoch.\n 'nbf' (optional) -> The 'not before' time before which the token should be rejected (seconds since the epoch)\n 'admin' (optional) -> If set to true, this client will bypass all security rules (use this to authenticate servers)\n 'debug' (optional) -> 'set to true to make this client receive debug information about security rule execution.\n 'simulate' (optional, internal-only for now) -> Set to true to neuter all API operations (listens / puts\n will run security rules but not actually write or return data).\n 3) A signature that proves the validity of this token (see: http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-07)\n\n For base64-encoding we use URL-safe base64 encoding. This ensures that the entire token is URL-safe\n and could, for instance, be placed as a query argument without any encoding (and this is what the JWT spec requires).\n\n Args:\n data - a json serializable object of data to be included in the token\n options - An optional dictionary of additional claims for the token. Possible keys include:\n a) 'expires' -- A timestamp (as a number of seconds since the epoch) denoting a time after which\n this token should no longer be valid.\n b) 'notBefore' -- A timestamp (as a number of seconds since the epoch) denoting a time before\n which this token should be rejected by the server.\n c) 'admin' -- Set to true to bypass all security rules (use this for your trusted servers).\n d) 'debug' -- Set to true to enable debug mode (so you can see the results of Rules API operations)\n e) 'simulate' -- (internal-only for now) Set to true to neuter all API operations (listens / puts\n will run security rules but not actually write or return data)\n Returns:\n A signed Firebase Authentication Token\n Raises:\n ValueError: if an invalid key is specified in options\n \"\"\"\n if not arg_2:\n arg_2 = {}\n arg_2.update({'admin': arg_0.admin, 'debug': arg_0.debug})\n arg_3 = arg_0._create_options_claims(arg_2)\n arg_3['v'] = arg_0.TOKEN_VERSION\n arg_3['iat'] = int(time.mktime(time.gmtime()))\n arg_3['d'] = arg_1\n return arg_0._encode_token(arg_0.secret, arg_3)"} +{"_id": "doc_5236", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Method that simply adjusts authentication credentials for the\n request.\n `params` is the querystring of the request.\n `headers` is the header of the request.\n\n If auth instance is not provided to this class, this method simply\n returns without doing anything.\n \"\"\"\n if arg_0.authentication:\n arg_3 = arg_0.authentication.get_user()\n arg_1.update({'auth': arg_3.firebase_auth_token})\n arg_2.update(arg_0.authentication.authenticator.HEADERS)"} +{"_id": "doc_5237", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"\n Synchronous GET request.\n \"\"\"\n if arg_2 is None: arg_2 = ''\n arg_3 = arg_3 or {}\n arg_4 = arg_4 or {}\n arg_6 = arg_0._build_endpoint_url(arg_1, arg_2)\n arg_0._authenticate(arg_3, arg_4)\n return make_Func_request(arg_6, arg_3, arg_4, arg_5=arg_5)"} +{"_id": "doc_5238", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"\n Asynchronous GET request with the process pool.\n \"\"\"\n if arg_2 is None: arg_2 = ''\n arg_4 = arg_4 or {}\n arg_5 = arg_5 or {}\n arg_6 = arg_0._build_endpoint_url(arg_1, arg_2)\n arg_0._authenticate(arg_4, arg_5)\n process_pool.apply_async(make_get_request,\n args=(arg_6, arg_4, arg_5), arg_3=arg_3)"} +{"_id": "doc_5239", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value\n\n :param os.stat_result s: os.stat(file) object\n :param str perm: R (Read) or W (Write) or X (eXecute)\n :param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer)\n :return: mask value\n :rtype: int\n \"\"\"\n arg_1 = arg_1.upper()\n arg_2 = arg_2.upper()\n assert arg_1 in ['R', 'W', 'X']\n assert arg_2 in ['USR', 'GRP', 'OTH']\n return arg_0.st_mode & getattr(stat, 'S_I{}{}'.format(arg_1, arg_2))"} +{"_id": "doc_5240", "title": "", "text": "def Func(arg_0):\n \"\"\"File is only writable by root\n\n :param str path: Path to file\n :return: True if only root can write\n :rtype: bool\n \"\"\"\n arg_1 = os.stat(arg_0)\n for arg_2, arg_3 in [(arg_1.st_uid, bitperm(arg_1, 'w', 'usr')), (arg_1.st_gid, bitperm(arg_1, 'w', 'grp'))]:\n # User id (is not root) and bit permission\n if arg_2 and arg_3:\n return False\n if bitperm(arg_1, 'w', 'oth'):\n return False\n return True"} +{"_id": "doc_5241", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Command to check configuration file. Raises InvalidConfig on error\n\n :param str file: path to config file\n :param printfn: print function for success message\n :return: None\n \"\"\"\n Config(arg_0).read()\n arg_1('The configuration file \"{}\" is correct'.format(arg_0))"} +{"_id": "doc_5242", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse and validate the config file. The Func data is accessible as a dictionary in this instance\n\n :return: None\n \"\"\"\n try:\n arg_1 = load(open(arg_0.file), Loader)\n except (UnicodeDecodeError, YAMLError) as e:\n raise InvalidConfig(arg_0.file, '{}'.format(e))\n try:\n validate(arg_1, SCHEMA)\n except ValidationError as e:\n raise InvalidConfig(arg_0.file, e)\n arg_0.update(arg_1)"} +{"_id": "doc_5243", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=5):\n \"\"\"Excecute command on thread\n\n :param cmd: Command to execute\n :param cwd: current working directory\n :return: None\n \"\"\"\n arg_3 = subprocess.Popen(arg_0, arg_1=arg_1, arg_4=subprocess.PIPE, arg_5=subprocess.PIPE)\n try:\n arg_3.wait(arg_2=arg_2)\n except subprocess.TimeoutExpired:\n return None\n else:\n arg_4, arg_5 = arg_3.stdout.read(), arg_3.stderr.read()\n if sys.version_info >= (3,):\n arg_4, arg_5 = arg_4.decode('utf-8', errors='ignore'), arg_5.decode('utf-8', errors='ignore')\n if arg_3.returncode:\n raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format(\n ' '.join(arg_0), arg_3.returncode, arg_5\n ))\n else:\n return arg_4, arg_5"} +{"_id": "doc_5244", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3='bash'):\n \"\"\"Excecute command on remote machine using SSH\n\n :param cmd: Command to execute\n :param ssh: Server to connect. Port is optional\n :param cwd: current working directory\n :return: None\n \"\"\"\n arg_4 = None\n arg_5 = arg_1.split(':', 1)\n if len(arg_5) > 1 and not arg_5[1].isdigit():\n raise InvalidConfig(extra_body='Invalid port number on ssh config: {}'.format(arg_5[1]))\n elif len(arg_5) > 1:\n arg_4 = arg_5[1]\n arg_6 = ' '.join([x.replace(\"'\", \"\"\"'\"'\"'\"\"\") for x in arg_0.split(' ')])\n arg_7 = ' '.join([\n ' '.join(get_shell(arg_3)), # /usr/bin/env bash\n ' '.join([EXECUTE_SHELL_PARAM, \"'\", ' '.join((['cd', arg_2, ';'] if arg_2 else []) + [arg_6]), \"'\"])],\n )\n return ['ssh', arg_5[0]] + (['-p', arg_4] if arg_4 else []) + ['-C'] + [arg_7]"} +{"_id": "doc_5245", "title": "", "text": "def Func(arg_0):\n \"\"\"Get HTTP Headers to send. By default default_headers\n\n :return: HTTP Headers\n :rtype: dict\n \"\"\"\n arg_1 = copy.copy(arg_0.default_headers or {})\n arg_1.update(arg_0.data.get('headers') or {})\n return arg_1"} +{"_id": "doc_5246", "title": "", "text": "def Func(arg_0):\n \"\"\"Return \"data\" value on self.data\n\n :return: data to send\n :rtype: str\n \"\"\"\n if arg_0.default_body:\n return arg_0.default_body\n arg_1 = arg_0.data.get('data')\n if isinstance(arg_1, dict):\n return json.dumps(arg_1)\n return arg_1"} +{"_id": "doc_5247", "title": "", "text": "def Func(arg_0):\n \"\"\"Return source mac address for this Scapy Packet\n\n :param scapy.packet.Packet pkt: Scapy Packet\n :return: Mac address. Include (Amazon Device) for these devices\n :rtype: str\n \"\"\"\n if arg_0.src.upper() in BANNED_DEVICES:\n arg_1 = ''\n elif arg_0.src.upper()[:8] in AMAZON_DEVICES:\n arg_1 = '{} (Amazon Device)'.format(arg_0.src)\n else:\n arg_1 = arg_0.src\n return arg_1"} +{"_id": "doc_5248", "title": "", "text": "def Func(arg_0):\n \"\"\"Scandevice callback. Register src mac to avoid src repetition.\n Print device on screen.\n\n :param scapy.packet.Packet pkt: Scapy Packet\n :return: None\n \"\"\"\n if arg_0.src in mac_id_list:\n return\n mac_id_list.append(arg_0.src)\n arg_1 = pkt_text(arg_0)\n click.secho(arg_1, fg='magenta') if 'Amazon' in arg_1 else click.echo(arg_1)"} +{"_id": "doc_5249", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Print help and scan devices on screen.\n\n :return: None\n \"\"\"\n click.secho(HELP, fg='yellow')\n scan_devices(Funcy_print, lfilter=lambda d: d.src not in mac_id_list, iface=arg_0)"} +{"_id": "doc_5250", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Send success or error message to configured confirmation\n\n :param str message: Body message to send\n :param bool success: Device executed successfully to personalize message\n :return: None\n \"\"\"\n arg_1 = arg_1.strip()\n if not arg_0.confirmation:\n return\n try:\n arg_0.confirmation.send(arg_1, arg_2)\n except Exception as e:\n logger.warning('Error sending confirmation on device {}: {}'.format(arg_0.name, e))"} +{"_id": "doc_5251", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Start daemon mode\n\n :param bool root_allowed: Only used for ExecuteCmd\n :return: loop\n \"\"\"\n arg_0.root_allowed = arg_1\n scan_devices(arg_0.on_push, lambda d: d.src.lower() in arg_0.devices, arg_0.settings.get('interface'))"} +{"_id": "doc_5252", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Filter queryset based on keywords.\n Support for multiple-selected parent values.\n \"\"\"\n arg_3 = Q()\n for arg_4, arg_5 in iteritems(arg_1):\n try:\n arg_6 = arg_5.split(\",\")\n if len(arg_6) > 0:\n arg_7 = Q()\n for arg_5 in arg_6:\n arg_7 |= Q(**{arg_4: arg_5})\n arg_3 &= arg_7\n except AttributeError:\n # value can be a bool\n arg_3 &= Q(**{arg_4: arg_5})\n if arg_2:\n arg_0 = arg_0.exclude(arg_3)\n else:\n arg_0 = arg_0.filter(arg_3)\n return arg_0"} +{"_id": "doc_5253", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True if according to should_index the object should be indexed.\"\"\"\n if arg_0._should_index_is_method:\n arg_2 = inspect.ismethod(arg_0.should_index)\n try:\n arg_3 = len(inspect.signature(arg_0.should_index).parameters)\n except AttributeError:\n # noinspection PyDeprecation\n arg_3 = len(inspect.getargspec(arg_0.should_index).args)\n\n if arg_2 or arg_3 is 1:\n # bound method, call with instance\n return arg_0.should_index(arg_1)\n else:\n # unbound method, simply call without arguments\n return arg_0.should_index()\n else:\n # property/attribute/Field, evaluate as bool\n arg_4 = type(arg_0.should_index)\n if arg_4 is DeferredAttribute:\n arg_5 = arg_0.should_index.__get__(arg_1, None)\n elif arg_4 is str:\n arg_5 = getattr(arg_1, arg_0.should_index)\n elif arg_4 is property:\n arg_5 = arg_0.should_index.__get__(arg_1)\n else:\n raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format(\n arg_0.should_index))\n if type(arg_5) is not bool:\n raise AlgoliaIndexError(\"%s's should_index (%s) should be a boolean\" % (\n arg_1.__class__.__name__, arg_0.should_index))\n return arg_5"} +{"_id": "doc_5254", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the settings of the index.\"\"\"\n try:\n logger.info('GET SETTINGS ON %s', arg_0.index_name)\n return arg_0.__index.Func()\n except AlgoliaException as e:\n if DEBUG:\n raise e\n else:\n logger.warning('ERROR DURING GET_SETTINGS ON %s: %s',\n arg_0.model, e)"} +{"_id": "doc_5255", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, arg_4=None):\n \"\"\"\n Registers the given model with Algolia engine.\n\n If the given model is already Funced with Algolia engine, a\n RegistrationError will be raised.\n \"\"\"\n # Check for existing registration.\n if arg_0.is_Funced(arg_1):\n raise RegistrationError(\n '{} is already Funced with Algolia engine'.format(arg_1))\n\n # Perform the registration.\n if not issubclass(arg_2, arg_3):\n raise RegistrationError(\n '{} should be a subclass of AlgoliaIndex'.format(arg_2))\n arg_5 = arg_2(arg_1, arg_0.client, arg_0.__settings)\n arg_0.__Funced_models[arg_1] = arg_5\n\n if (isinstance(arg_4, bool) and\n arg_4) or arg_0.__auto_indexing:\n # Connect to the signalling framework.\n post_save.connect(arg_0.__post_save_receiver, arg_1)\n pre_delete.connect(arg_0.__pre_delete_receiver, arg_1)\n logger.info('REGISTER %s', arg_1)"} +{"_id": "doc_5256", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the adapter associated with the given model.\"\"\"\n if not arg_0.is_registered(arg_1):\n raise RegistrationError(\n '{} is not registered with Algolia engine'.format(arg_1))\n\n return arg_0.__registered_models[arg_1]"} +{"_id": "doc_5257", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Signal handler for when a registered model has been saved.\"\"\"\n logger.debug('RECEIVE post_save FOR %s', arg_1.__class__)\n arg_0.save_record(arg_1, **arg_2)"} +{"_id": "doc_5258", "title": "", "text": "def Func(arg_0, arg_1, arg_2=12):\n \"\"\"\n Encode a position given in float arguments latitude, longitude to\n a geohash which will have the character count precision.\n \"\"\"\n arg_3, arg_4 = (-90.0, 90.0), (-180.0, 180.0)\n arg_5 = []\n arg_6 = [ 16, 8, 4, 2, 1 ]\n arg_7 = 0\n arg_8 = 0\n arg_9 = True\n while len(arg_5) < arg_2:\n if arg_9:\n arg_10 = (arg_4[0] + arg_4[1]) / 2\n if arg_1 > arg_10:\n arg_8 |= arg_6[arg_7]\n arg_4 = (arg_10, arg_4[1])\n else:\n arg_4 = (arg_4[0], arg_10)\n else:\n arg_10 = (arg_3[0] + arg_3[1]) / 2\n if arg_0 > arg_10:\n arg_8 |= arg_6[arg_7]\n arg_3 = (arg_10, arg_3[1])\n else:\n arg_3 = (arg_3[0], arg_10)\n arg_9 = not arg_9\n if arg_7 < 4:\n arg_7 += 1\n else:\n arg_5 += __base32[arg_8]\n arg_7 = 0\n arg_8 = 0\n return ''.join(arg_5)"} +{"_id": "doc_5259", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pad a string to the target length in characters, or return the original\n string if it's longer than the target length.\n \"\"\"\n arg_2 = arg_1 - len(arg_0)\n if arg_2 <= 0:\n return arg_0\n return arg_0 + (' ' * arg_2)"} +{"_id": "doc_5260", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Pad short rows to the length of the longest row to help render \"jagged\"\n CSV files\n \"\"\"\n arg_1 = max([len(arg_2) for arg_2 in arg_0])\n for arg_2 in arg_0:\n while len(arg_2) < arg_1:\n arg_2.append('')\n return arg_0"} +{"_id": "doc_5261", "title": "", "text": "def Func(arg_0):\n \"\"\"Pad each cell to the size of the largest cell in its column.\"\"\"\n arg_1 = [max(map(len, col)) for col in zip(*arg_0)]\n for arg_2 in arg_0:\n for arg_3, arg_4 in enumerate(arg_2):\n arg_2[arg_3] = pad_to(arg_4, arg_1[arg_3])\n return arg_0"} +{"_id": "doc_5262", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add dividers and padding to a row of cells and return a string.\"\"\"\n arg_3 = ''.join([arg_2 * ' ', arg_1, arg_2 * ' '])\n return arg_3.join(arg_0)"} +{"_id": "doc_5263", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Calculate base id and version from a resource id.\n\n :params resource_id: Resource id.\n :params return_version: (optional) True if You need version, returns (resource_id, version).\n \"\"\"\n arg_2 = 0\n arg_0 = arg_0 + 0xC4000000 # 3288334336\n # TODO: version is broken due ^^, needs refactoring\n\n while arg_0 > 0x01000000: # 16777216\n arg_2 += 1\n if arg_2 == 1:\n arg_0 -= 0x80000000 # 2147483648 # 0x50000000 # 1342177280 ? || 0x2000000 # 33554432\n elif arg_2 == 2:\n arg_0 -= 0x03000000 # 50331648\n else:\n arg_0 -= 0x01000000 # 16777216\n\n if arg_1:\n return arg_0, arg_2 - 67 # just correct \"magic number\"\n\n return arg_0"} +{"_id": "doc_5264", "title": "", "text": "def Func(arg_0, arg_1, Func, arg_3=False):\n \"\"\"Make a bid.\n\n :params trade_id: Trade id.\n :params bid: Amount of credits You want to spend.\n :params fast: True for fastest bidding (skips trade status & credits check).\n \"\"\"\n arg_4 = 'PUT'\n arg_5 = 'trade/%s/bid' % arg_1\n\n if not arg_3:\n arg_6 = arg_0.tradeStatus(arg_1)[0]\n # don't bid if current bid is equal or greater than our max bid\n if arg_6['currentBid'] >= Func or arg_0.credits < Func:\n return False # TODO: add exceptions\n arg_7 = {'bid': Func}\n try:\n arg_6 = arg_0.__request__(arg_4, arg_5, arg_7=json.dumps(arg_7), params={'sku_b': arg_0.sku_b}, arg_3=arg_3)[\n 'auctionInfo'][0]\n except PermissionDenied: # too slow, somebody took it already :-(\n return False\n if arg_6['bidState'] == 'highest' or (\n arg_6['tradeState'] == 'closed' and arg_6['bidState'] == 'buyNow'): # checking 'tradeState' is required?\n return True\n else:\n return False"} +{"_id": "doc_5265", "title": "", "text": "def Func(arg_0, arg_1='desc', arg_2='player', arg_3='', arg_4=0, arg_5=None, arg_6=arg_7['club'],\n arg_8=None, arg_9=None, arg_10=None, arg_11=None, Func=None,\n arg_13=None, arg_14=None, arg_15=None, arg_16=False, arg_17=None):\n \"\"\"Return items in your club, excluding consumables.\n\n :param ctype: [development / ? / ?] Card type.\n :param level: (optional) [?/?/gold] Card level.\n :param category: (optional) [fitness/?/?] Card category.\n :param assetId: (optional) Asset id.\n :param defId: (optional) Definition id.\n :param min_price: (optional) Minimal price.\n :param max_price: (optional) Maximum price.\n :param min_buy: (optional) Minimal buy now price.\n :param max_buy: (optional) Maximum buy now price.\n :param league: (optional) League id.\n :param club: (optional) Club id.\n :param position: (optional) Position.\n :param nationality: (optional) Nation id.\n :param rare: (optional) [boolean] True for searching special cards.\n :param playStyle: (optional) Play style.\n :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)\n :param page_size: (optional) Page size (items per page)\n \"\"\"\n arg_18 = 'GET'\n arg_19 = 'club'\n\n if arg_5: # backward compatibility, will be removed in future\n arg_6 = arg_5\n\n arg_20 = {'sort': arg_1, 'type': arg_2, 'defId': arg_3, 'start': arg_4, 'count': arg_6}\n if arg_8:\n arg_20['level'] = arg_8\n if arg_9:\n arg_20['cat'] = arg_9\n if arg_10:\n arg_20['maskedDefId'] = arg_10\n if arg_11:\n arg_20['leag'] = arg_11\n if Func:\n arg_20['team'] = Func\n if arg_13:\n arg_20['pos'] = arg_13\n if arg_14:\n arg_20['zone'] = arg_14\n if arg_15:\n arg_20['nat'] = arg_15\n if arg_16:\n arg_20['rare'] = 'SP'\n if arg_17:\n arg_20['playStyle'] = arg_17\n arg_21 = arg_0.__request__(arg_18, arg_19, arg_20=arg_20)\n\n # pinEvent\n if arg_4 == 0:\n if arg_2 == 'player':\n arg_22 = 'Club - Players - List View'\n elif arg_2 == 'staff':\n arg_22 = 'Club - Staff - List View'\n elif arg_2 in ('item', 'kit', 'ball', 'badge', 'stadium'):\n arg_22 = 'Club - Club Items - List View'\n # else: # TODO: THIS IS probably WRONG, detect all ctypes\n # pgid = 'Club - Club Items - List View'\n arg_23 = [arg_0.pin.event('page_view', 'Hub - Club'), arg_0.pin.event('page_view', arg_22)]\n if arg_21['itemData']:\n arg_23.append(arg_0.pin.event('page_view', 'Item - Detail View'))\n arg_0.pin.send(arg_23)\n\n return [itemParse({'itemData': arg_24}) for arg_24 in arg_21['itemData']]"} +{"_id": "doc_5266", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Return all consumables from club.\"\"\"\n arg_2 = 'GET'\n arg_3 = 'club/consumables/development'\n\n arg_4 = arg_0.__request__(arg_2, arg_3)\n\n arg_5 = [arg_0.pin.event('page_view', 'Hub - Club')]\n arg_0.pin.send(arg_5, arg_1=arg_1)\n arg_5 = [arg_0.pin.event('page_view', 'Club - Consumables')]\n arg_0.pin.send(arg_5, arg_1=arg_1)\n arg_5 = [arg_0.pin.event('page_view', 'Club - Consumables - List View')]\n arg_0.pin.send(arg_5, arg_1=arg_1)\n\n return [itemParse(arg_6) for arg_6 in arg_4.get('itemData', ())]"} +{"_id": "doc_5267", "title": "", "text": "def Func(arg_0):\n \"\"\"Return items in Func.\"\"\"\n arg_1 = 'GET'\n arg_2 = 'Func'\n\n arg_3 = arg_0.__request__(arg_1, arg_2)\n\n # pinEvents\n arg_4 = [arg_0.pin.event('page_view', 'Hub - Transfers'), arg_0.pin.event('page_view', 'Transfer List - List View')]\n if arg_3.get('auctionInfo'):\n arg_4.append(arg_0.pin.event('page_view', 'Item - Detail View'))\n arg_0.pin.send(arg_4)\n\n return [itemParse(arg_5) for arg_5 in arg_3.get('auctionInfo', ())]"} +{"_id": "doc_5268", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=3600, arg_5=False):\n \"\"\"Start auction. Returns trade_id.\n\n :params item_id: Item id.\n :params bid: Stard bid.\n :params buy_now: Buy now price.\n :params duration: Auction duration in seconds (Default: 3600).\n \"\"\"\n arg_6 = 'POST'\n arg_7 = 'auctionhouse'\n\n # TODO: auto send to tradepile\n arg_8 = {'buyNowPrice': arg_3, 'startingBid': arg_2, 'duration': arg_4, 'itemData': {'id': arg_1}}\n arg_9 = arg_0.__request__(arg_6, arg_7, arg_8=json.dumps(arg_8), params={'sku_b': arg_0.sku_b})\n if not arg_5: # tradeStatus check like webapp do\n arg_0.tradeStatus(arg_9['id'])\n return arg_9['id']"} +{"_id": "doc_5269", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Quick sell.\n\n :params item_id: Item id.\n \"\"\"\n arg_2 = 'DELETE'\n arg_3 = 'item'\n\n if not isinstance(arg_1, (list, tuple)):\n arg_1 = (arg_1,)\n arg_1 = (str(i) for i in arg_1)\n arg_4 = {'itemIds': ','.join(arg_1)}\n arg_0.__request__(arg_2, arg_3, arg_4=arg_4) # {\"items\":[{\"id\":280607437106}],\"totalCredits\":18136}\n return True"} +{"_id": "doc_5270", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send to watchlist.\n\n :params trade_id: Trade id.\n \"\"\"\n arg_2 = 'PUT'\n arg_3 = 'watchlist'\n\n arg_4 = {'auctionInfo': [{'id': arg_1}]}\n return arg_0.__request__(arg_2, arg_3, arg_4=json.dumps(arg_4))"} +{"_id": "doc_5271", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Send card FROM CLUB to first free slot in sbs squad.\"\"\"\n # TODO?: multiple item_ids\n arg_3 = 'PUT'\n arg_4 = 'sbs/challenge/%s/squad' % arg_1\n\n arg_5 = arg_0.sbsSquad(arg_1)\n arg_6 = []\n arg_7 = False\n arg_8 = 0\n for arg_9 in arg_5['squad']['players']:\n if arg_9['itemData']['id'] == arg_2: # item already in sbs # TODO?: report reason\n return False\n if arg_9['itemData']['id'] == 0 and not arg_7:\n arg_9['itemData']['id'] = arg_2\n arg_7 = True\n arg_6.append({\"index\": arg_8,\n \"itemData\": {\"id\": arg_9['itemData']['id'],\n \"dream\": False}})\n arg_8 += 1\n arg_10 = {'players': arg_6}\n\n if not arg_7:\n return False\n else:\n arg_0.__request__(arg_3, arg_4, arg_10=json.dumps(arg_10))\n return True"} +{"_id": "doc_5272", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Apply consumable on player.\n\n :params item_id: Item id of player.\n :params resource_id: Resource id of consumable.\n \"\"\"\n # TODO: catch exception when consumable is not found etc.\n # TODO: multiple players like in quickSell\n arg_3 = 'POST'\n arg_4 = 'item/resource/%s' % arg_2\n\n arg_5 = {'apply': [{'id': arg_1}]}\n arg_0.__request__(arg_3, arg_4, arg_5=json.dumps(arg_5))"} +{"_id": "doc_5273", "title": "", "text": "def Func(arg_0):\n \"\"\"Return active Func.\"\"\"\n arg_1 = 'GET'\n arg_2 = 'activeMessage'\n\n arg_3 = arg_0.__request__(arg_1, arg_2)\n # try:\n # return rc['activeMessage']\n # except:\n # raise UnknownError('Invalid activeMessage response') # is it even possible?\n return arg_3['activeMessage']"} +{"_id": "doc_5274", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Runs its worker method.\n\n This method will be terminated once its parent's is_Funcning\n property turns False.\n \"\"\"\n while arg_0._base.is_Funcning:\n if arg_0._worker:\n arg_0._worker()\n time.sleep(arg_0._sleep_duration)"} +{"_id": "doc_5275", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a NumPy array that represents the 2D pixel location,\n which is defined by PFNC, of the original image data.\n\n You may use the returned NumPy array for a calculation to map the\n original image to another format.\n\n :return: A NumPy array that represents the 2D pixel location.\n \"\"\"\n if arg_0.data is None:\n return None\n\n #\n return arg_0._data.reshape(\n arg_0.height + arg_0.y_padding,\n int(arg_0.width * arg_0._num_components_per_pixel + arg_0.x_padding)\n )"} +{"_id": "doc_5276", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Starts image acquisition.\n\n :return: None.\n \"\"\"\n if not arg_0._create_ds_at_connection:\n arg_0._setup_data_streams()\n\n #\n arg_1 = arg_0._num_buffers\n for arg_2 in arg_0._data_streams:\n try:\n arg_3 = arg_2.buffer_announce_min\n if arg_3 < arg_1:\n arg_3 = arg_1\n except InvalidParameterException as e:\n arg_3 = arg_1\n arg_0._logger.debug(e, exc_info=True)\n\n if arg_2.defines_payload_size():\n arg_4 = arg_2.payload_size\n else:\n arg_4 = arg_0.device.node_map.PayloadSize.value\n\n arg_5 = arg_0._create_raw_buffers(\n arg_3, arg_4\n )\n\n arg_6 = arg_0._create_buffer_tokens(\n arg_5\n )\n\n arg_0._announced_buffers = arg_0._announce_buffers(\n arg_2=arg_2, _buffer_tokens=arg_6\n )\n\n arg_0._queue_announced_buffers(\n arg_2=arg_2, buffers=arg_0._announced_buffers\n )\n\n # Reset the number of images to acquire.\n try:\n arg_8 = arg_0.device.node_map.AcquisitionMode.value\n if arg_8 == 'Continuous':\n arg_9 = -1\n elif arg_8 == 'SingleFrame':\n arg_9 = 1\n elif arg_8 == 'MultiFrame':\n arg_9 = arg_0.device.node_map.AcquisitionFrameCount.value\n else:\n arg_9 = -1\n except LogicalErrorException as e:\n # The node doesn't exist.\n arg_9 = -1\n arg_0._logger.debug(e, exc_info=True)\n\n arg_0._num_images_to_acquire = arg_9\n\n try:\n # We're ready to start image acquisition. Lock the device's\n # transport layer related features:\n arg_0.device.node_map.TLParamsLocked.value = 1\n except LogicalErrorException:\n # SFNC < 2.0\n pass\n\n # Start image acquisition.\n arg_0._is_acquiring_images = True\n\n for arg_2 in arg_0._data_streams:\n arg_2.start_acquisition(\n ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT,\n arg_0._num_images_to_acquire\n )\n\n #\n if arg_0.thread_image_acquisition:\n arg_0.thread_image_acquisition.start()\n\n #\n arg_0.device.node_map.AcquisitionStart.execute()\n\n arg_0._logger.info(\n '{0} started image acquisition.'.format(arg_0._device.id_)\n )\n\n if arg_0._profiler:\n arg_0._profiler.print_diff()"} +{"_id": "doc_5277", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Stops image acquisition.\n\n :return: None.\n \"\"\"\n if arg_0.is_acquiring_images:\n #\n arg_0._is_acquiring_images = False\n\n #\n if arg_0.thread_image_acquisition.is_running: # TODO\n arg_0.thread_image_acquisition.stop()\n\n with MutexLocker(arg_0.thread_image_acquisition):\n #\n arg_0.device.node_map.AcquisitionStop.execute()\n\n try:\n # Unlock TLParamsLocked in order to allow full device\n # configuration:\n arg_0.device.node_map.TLParamsLocked.value = 0\n except LogicalErrorException:\n # SFNC < 2.0\n pass\n\n for arg_6 in arg_0._data_streams:\n # Stop image acquisition.\n try:\n arg_6.stop_acquisition(\n ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL\n )\n except (ResourceInUseException, TimeoutException) as e:\n arg_0._logger.error(e, exc_info=True)\n\n # Flash the queue for image acquisition process.\n arg_6.flush_buffer_queue(\n ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD\n )\n\n for arg_7 in arg_0._event_new_buffer_managers:\n arg_7.flush_event_queue()\n\n if arg_0._create_ds_at_connection:\n arg_0._release_buffers()\n else:\n arg_0._release_data_streams()\n\n #\n arg_0._has_acquired_1st_image = False\n\n #\n arg_0._chunk_adapter.detach_buffer()\n\n #\n arg_0._logger.info(\n '{0} stopped image acquisition.'.format(arg_0._device.id_)\n )\n\n if arg_0._profiler:\n arg_0._profiler.print_diff()"} +{"_id": "doc_5278", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Adds a CTI file to work with to the CTI file list.\n\n :param file_path: Set a file path to the target CTI file.\n\n :return: None.\n \"\"\"\n if not os.path.exists(arg_1):\n arg_0._logger.warning(\n 'Attempted to add {0} which does not exist.'.format(arg_1)\n )\n\n if arg_1 not in arg_0._cti_files:\n arg_0._cti_files.append(arg_1)\n arg_0._logger.info(\n 'Added {0} to the CTI file list.'.format(arg_1)\n )"} +{"_id": "doc_5279", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Removes the specified CTI file from the CTI file list.\n\n :param file_path: Set a file path to the target CTI file.\n\n :return: None.\n \"\"\"\n if arg_1 in arg_0._cti_files:\n arg_0._cti_files.remove(arg_1)\n arg_0._logger.info(\n 'Removed {0} from the CTI file list.'.format(arg_1)\n )"} +{"_id": "doc_5280", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Releases all external resources including the controlling device.\n \"\"\"\n\n arg_2 = None\n if arg_1.device:\n #\n arg_1.stop_image_acquisition()\n\n #\n arg_1._release_data_streams()\n\n #\n arg_2 = arg_1._device.id_\n\n #\n if arg_1.device.node_map:\n #\n if arg_1._chunk_adapter:\n arg_1._chunk_adapter.detach_buffer()\n arg_1._chunk_adapter = None\n arg_0._logger.info(\n 'Detached a buffer from the chunk adapter of {0}.'.format(\n arg_2\n )\n )\n\n arg_1.device.node_map.disconnect()\n arg_0._logger.info(\n 'Disconnected the port from the NodeMap of {0}.'.format(\n arg_2\n )\n )\n\n #\n if arg_1._device.is_open():\n arg_1._device.close()\n arg_0._logger.info(\n 'Closed Device module, {0}.'.format(arg_2)\n )\n\n arg_1._device = None\n\n #\n if arg_2:\n arg_0._logger.info(\n 'Destroyed the ImageAcquirer object which {0} '\n 'had belonged to.'.format(arg_2)\n )\n else:\n arg_0._logger.info(\n 'Destroyed an ImageAcquirer.'\n )\n\n if arg_0._profiler:\n arg_0._profiler.print_diff()\n\n arg_0._ias.remove(arg_1)"} +{"_id": "doc_5281", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Run the unit test suite with each support library and Python version.\"\"\"\n\n arg_0.install('-e', '.[dev]')\n arg_0.install(arg_1)\n _run_tests(arg_0)"} +{"_id": "doc_5282", "title": "", "text": "def Func():\n \"\"\"Transform README.md into a usable long description.\n\n Replaces relative references to svg images to absolute https references.\n \"\"\"\n\n with open('README.md') as f:\n arg_0 = f.read()\n\n def replace_relative_with_absolute(arg_1):\n arg_2 = arg_1.group(0)[1:-1]\n return ('(https://github.com/google/pybadges/raw/master/'\n '%s?sanitize=true)' % arg_2)\n\n return re.sub(r'\\(tests/golden-images/.*?\\.svg\\)',\n replace_relative_with_absolute,\n arg_0)"} +{"_id": "doc_5283", "title": "", "text": "def Func(arg_0: arg_1) -> 'PrecalculatedTextMeasurer':\n \"\"\"Return a PrecalculatedTextMeasurer given a JSON stream.\n\n See precalculate_text.py for details on the required format.\n \"\"\"\n arg_2 = json.load(arg_0)\n return PrecalculatedTextMeasurer(arg_2['mean-character-length'],\n arg_2['character-lengths'],\n arg_2['kerning-pairs'])"} +{"_id": "doc_5284", "title": "", "text": "def Func(arg_0) -> 'PrecalculatedTextMeasurer':\n \"\"\"Returns a reasonable Func PrecalculatedTextMeasurer.\"\"\"\n if arg_0._Func_cache is not None:\n return arg_0._Func_cache\n\n if pkg_resources.resource_exists(__name__, 'Func-widths.json.xz'):\n import lzma\n with pkg_resources.resource_stream(__name__,\n 'Func-widths.json.xz') as f:\n with lzma.open(f, \"rt\") as g:\n arg_0._Func_cache = PrecalculatedTextMeasurer.from_json(\n cast(TextIO, g))\n return arg_0._Func_cache\n elif pkg_resources.resource_exists(__name__, 'Func-widths.json'):\n with pkg_resources.resource_stream(__name__,\n 'Func-widths.json') as f:\n arg_0._Func_cache = PrecalculatedTextMeasurer.from_json(\n io.TextIOWrapper(f, encoding='utf-8'))\n return arg_0._Func_cache\n else:\n raise ValueError('could not load Func-widths.json')"} +{"_id": "doc_5285", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_4[arg_1] = None,\n arg_5: arg_4[arg_1] = None,\n arg_6: arg_4[arg_1] = None, arg_7: arg_4[arg_1] = None,\n arg_8: arg_1 = '#555', arg_9: arg_1 = '#007ec6',\n arg_10: arg_4[arg_11.TextMeasurer] = None,\n arg_13: arg_14 = False) -> arg_1:\n \"\"\"Creates a github-style Func as an SVG image.\n\n >>> Func(left_text='coverage', right_text='23%', right_color='red')\n ''\n >>> Func(left_text='build', right_text='green', right_color='green',\n ... whole_link=\"http://www.example.com/\")\n ''\n\n Args:\n left_text: The text that should appear on the left-hand-side of the\n Func e.g. \"coverage\".\n right_text: The text that should appear on the right-hand-side of the\n Func e.g. \"23%\".\n left_link: The URL that should be redirected to when the left-hand text\n is selected.\n right_link: The URL that should be redirected to when the right-hand\n text is selected.\n whole_link: The link that should be redirected to when the Func is\n selected. If set then left_link and right_right may not be set.\n logo: A url representing a logo that will be displayed inside the\n Func. Can be a data URL e.g. \"data:image/svg+xml;utf8, arg_1[arg_2]:\n \"\"\"Generates the subset of 'characters' that can be encoded by 'encodings'.\n\n Args:\n characters: The characters to check for encodeability e.g. 'abcd'.\n encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].\n\n Returns:\n The subset of 'characters' that can be encoded using one of the provided\n encodings.\n \"\"\"\n for arg_4 in arg_0:\n for arg_5 in arg_3:\n try:\n arg_4.encode(arg_5)\n yield arg_4\n except UnicodeEncodeError:\n pass"} +{"_id": "doc_5287", "title": "", "text": "def Func(\n arg_0: arg_1.TextMeasurer,\n arg_3: arg_4[arg_5]) -> Mapping[arg_5, float]:\n \"\"\"Return a mapping between each given character and its length.\n\n Args:\n measurer: The TextMeasurer used to measure the width of the text in\n pixels.\n characters: The characters to measure e.g. \"ml\".\n\n Returns:\n A mapping from the given characters to their length in pixels, as\n determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.\n \"\"\"\n arg_6 = {}\n\n for arg_7 in arg_3:\n arg_6[arg_7] = arg_0.text_width(arg_7)\n return arg_6"} +{"_id": "doc_5288", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3,\n arg_4: arg_5.TextMeasurer,\n arg_7: arg_8[arg_3]) -> None:\n \"\"\"Write the data required by PrecalculatedTextMeasurer to a stream.\"\"\"\n arg_9 = list(\n generate_supported_characters(arg_2))\n arg_10 = ''.join(\n generate_encodeable_characters(arg_9, arg_7))\n arg_11 = calculate_character_to_length_mapping(arg_4,\n arg_9)\n arg_12 = calculate_pair_to_kern_mapping(arg_4, arg_11,\n arg_10)\n json.dump(\n {'mean-character-length': statistics.mean(arg_11.values()),\n 'character-lengths': arg_11,\n 'kerning-characters': arg_10,\n 'kerning-pairs': arg_12},\n arg_0, sort_keys=True, indent=1)"} +{"_id": "doc_5289", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''Called internally during the parsing of the Zabransky database, to\n add coefficients as they are read one per line'''\n arg_0.n += 1\n if not arg_0.Ts:\n arg_0.Ts = [arg_1, arg_2]\n arg_0.coeff_sets = [arg_3]\n else:\n for arg_6, arg_7 in enumerate(arg_0.Ts):\n if arg_1 < arg_7:\n # Under an existing coefficient set - assume Tmax will come from another set\n arg_0.Ts.insert(arg_6, arg_1) \n arg_0.coeff_sets.insert(arg_6, arg_3)\n return\n # Must be appended to end instead\n arg_0.Ts.append(arg_2)\n arg_0.coeff_sets.append(arg_3)"} +{"_id": "doc_5290", "title": "", "text": "def Func(arg_0, arg_1):\n '''Determines the index at which the coefficients for the current\n temperature are stored in `coeff_sets`.\n '''\n # DO NOT CHANGE\n if arg_0.n == 1:\n return 0\n for arg_2 in range(arg_0.n):\n if arg_1 <= arg_0.Ts[arg_2+1]:\n return arg_2\n return arg_0.n - 1"} +{"_id": "doc_5291", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func heat capacity of a liquid at temperature `T`\n with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func heat capacity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n Cp : float\n Heat capacity of the liquid at T, [J/mol/K]\n '''\n if arg_2 == ZABRANSKY_SPLINE:\n return arg_0.Zabransky_spline.Func(arg_1)\n elif arg_2 == ZABRANSKY_QUASIPOLYNOMIAL:\n return arg_0.Zabransky_quasipolynomial.Func(arg_1)\n elif arg_2 == ZABRANSKY_SPLINE_C:\n return arg_0.Zabransky_spline_iso.Func(arg_1)\n elif arg_2 == ZABRANSKY_QUASIPOLYNOMIAL_C:\n return arg_0.Zabransky_quasipolynomial_iso.Func(arg_1)\n elif arg_2 == ZABRANSKY_SPLINE_SAT:\n return arg_0.Zabransky_spline_sat.Func(arg_1)\n elif arg_2 == ZABRANSKY_QUASIPOLYNOMIAL_SAT:\n return arg_0.Zabransky_quasipolynomial_sat.Func(arg_1)\n elif arg_2 == COOLPROP:\n return CoolProp_T_dependent_property(arg_1, arg_0.CASRN , 'CPMOLAR', 'l')\n elif arg_2 == POLING_CONST:\n return arg_0.POLING_constant\n elif arg_2 == CRCSTD:\n return arg_0.CRCSTD_constant\n elif arg_2 == ROWLINSON_POLING:\n arg_3 = arg_0.Cpgm(arg_1) if hasattr(arg_0.Cpgm, '__call__') else arg_0.Cpgm\n return Rowlinson_Poling(arg_1, arg_0.Tc, arg_0.omega, arg_3)\n elif arg_2 == ROWLINSON_BONDI:\n arg_3 = arg_0.Cpgm(arg_1) if hasattr(arg_0.Cpgm, '__call__') else arg_0.Cpgm\n return Rowlinson_Bondi(arg_1, arg_0.Tc, arg_0.omega, arg_3)\n elif arg_2 == DADGOSTAR_SHAW:\n arg_4 = Dadgostar_Shaw(arg_1, arg_0.similarity_variable)\n return property_mass_to_molar(arg_4, arg_0.MW)\n elif arg_2 in arg_0.tabular_data:\n return arg_0.interpolate(arg_1, arg_2)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5292", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate the integral of a property with respect to\n temperature, using a specified method. Implements the \n analytical integrals of all available methods except for tabular data,\n the case of multiple coefficient sets needed to encompass the temperature\n range of any of the ZABRANSKY methods, and the CSP methods using the\n vapor phase properties.\n\n Parameters\n ----------\n T1 : float\n Lower limit of integration, [K]\n T2 : float\n Upper limit of integration, [K]\n method : str\n Method for which to find the integral\n\n Returns\n -------\n integral : float\n Calculated integral of the property over the given range, \n [`units*K`]\n '''\n if arg_3 == ZABRANSKY_SPLINE:\n return arg_0.Zabransky_spline.Func(arg_1, arg_2)\n elif arg_3 == ZABRANSKY_SPLINE_C:\n return arg_0.Zabransky_spline_iso.Func(arg_1, arg_2)\n elif arg_3 == ZABRANSKY_SPLINE_SAT:\n return arg_0.Zabransky_spline_sat.Func(arg_1, arg_2)\n elif arg_3 == ZABRANSKY_QUASIPOLYNOMIAL:\n return arg_0.Zabransky_quasipolynomial.Func(arg_1, arg_2)\n elif arg_3 == ZABRANSKY_QUASIPOLYNOMIAL_C:\n return arg_0.Zabransky_quasipolynomial_iso.Func(arg_1, arg_2)\n elif arg_3 == ZABRANSKY_QUASIPOLYNOMIAL_SAT:\n return arg_0.Zabransky_quasipolynomial_sat.Func(arg_1, arg_2)\n elif arg_3 == POLING_CONST:\n return (arg_2 - arg_1)*arg_0.POLING_constant\n elif arg_3 == CRCSTD:\n return (arg_2 - arg_1)*arg_0.CRCSTD_constant\n elif arg_3 == DADGOSTAR_SHAW:\n arg_4 = (Dadgostar_Shaw_integral(arg_2, arg_0.similarity_variable)\n - Dadgostar_Shaw_integral(arg_1, arg_0.similarity_variable))\n return property_mass_to_molar(arg_4, arg_0.MW)\n elif arg_3 in arg_0.tabular_data or arg_3 == COOLPROP or arg_3 in [ROWLINSON_POLING, ROWLINSON_BONDI]:\n return float(quad(arg_0.calculate, arg_1, arg_2, args=(arg_3))[0])\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5293", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func heat capacity of a liquid mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n Cplm : float\n Molar heat capacity of the liquid mixture at the given conditions,\n [J/mol]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1) for i in arg_0.HeatCapacityLiquids]\n return mixing_simple(arg_3, arg_6)\n elif arg_5 == LALIBERTE:\n arg_4 = list(arg_4) ; arg_4.pop(arg_0.index_w)\n arg_7 = Laliberte_heat_capacity(arg_1, arg_4, arg_0.wCASs)\n arg_8 = mixing_simple(arg_3, arg_0.MWs)\n return property_mass_to_molar(arg_7, arg_8)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5294", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func heat capacity of a solid mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n Cpsm : float\n Molar heat capacity of the solid mixture at the given conditions, [J/mol]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1) for i in arg_0.HeatCapacitySolids]\n return mixing_simple(arg_3, arg_6)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5295", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Calculates the objective function of the Rachford-Rice flash equation.\n This function should be called by a solver seeking a solution to a flash\n calculation. The unknown variable is `V_over_F`, for which a solution\n must be between 0 and 1.\n\n .. math::\n \\sum_i \\frac{z_i(K_i-1)}{1 + \\frac{V}{F}(K_i-1)} = 0\n\n Parameters\n ----------\n V_over_F : float\n Vapor fraction guess [-]\n zs : list[float]\n Overall mole fractions of all species, [-]\n Ks : list[float]\n Equilibrium K-values, [-]\n\n Returns\n -------\n error : float\n Deviation between the objective function at the correct V_over_F\n and the attempted V_over_F, [-]\n\n Notes\n -----\n The derivation is as follows:\n\n .. math::\n F z_i = L x_i + V y_i\n\n x_i = \\frac{z_i}{1 + \\frac{V}{F}(K_i-1)}\n\n \\sum_i y_i = \\sum_i K_i x_i = 1\n\n \\sum_i(y_i - x_i)=0\n\n \\sum_i \\frac{z_i(K_i-1)}{1 + \\frac{V}{F}(K_i-1)} = 0\n\n Examples\n --------\n >>> Func(0.5, zs=[0.5, 0.3, 0.2],\n ... Ks=[1.685, 0.742, 0.532])\n 0.04406445591174976\n\n References\n ----------\n .. [1] Rachford, H. H. Jr, and J. D. Rice. \"Procedure for Use of Electronic\n Digital Computers in Calculating Flash Vaporization Hydrocarbon\n Equilibrium.\" Journal of Petroleum Technology 4, no. 10 (October 1,\n 1952): 19-3. doi:10.2118/952327-G.\n '''\n return sum([arg_4*(arg_3-1.)/(1.+arg_0*(arg_3-1.)) for arg_3, arg_4 in zip(arg_2, arg_1)])"} +{"_id": "doc_5296", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Calculates the activity coefficients of each species in a mixture\n using the Func method, given their mole fractions, and\n dimensionless interaction parameters. Those are normally correlated with\n temperature, and need to be calculated separately.\n\n .. math::\n \\ln \\gamma_i = 1 - \\ln \\left(\\sum_j^N \\Lambda_{ij} x_j\\right)\n -\\sum_j^N \\frac{\\Lambda_{ji}x_j}{\\displaystyle\\sum_k^N \\Lambda_{jk}x_k}\n\n Parameters\n ----------\n xs : list[float]\n Liquid mole fractions of each species, [-]\n params : list[list[float]]\n Dimensionless interaction parameters of each compound with each other,\n [-]\n\n Returns\n -------\n gammas : list[float]\n Activity coefficient for each species in the liquid mixture, [-]\n\n Notes\n -----\n This model needs N^2 parameters.\n\n The original model correlated the interaction parameters using the standard\n pure-component molar volumes of each species at 25\u00b0C, in the following form:\n\n .. math::\n \\Lambda_{ij} = \\frac{V_j}{V_i} \\exp\\left(\\frac{-\\lambda_{i,j}}{RT}\\right)\n\n However, that form has less flexibility and offered no advantage over\n using only regressed parameters.\n\n Most correlations for the interaction parameters include some of the terms\n shown in the following form:\n\n .. math::\n \\ln \\Lambda_{ij} =a_{ij}+\\frac{b_{ij}}{T}+c_{ij}\\ln T + d_{ij}T\n + \\frac{e_{ij}}{T^2} + h_{ij}{T^2}\n\n The Func model is not applicable to liquid-liquid systems.\n\n Examples\n --------\n Ethanol-water example, at 343.15 K and 1 MPa:\n\n >>> Func([0.252, 0.748], [[1, 0.154], [0.888, 1]])\n [1.8814926087178843, 1.1655774931125487]\n\n References\n ----------\n .. [1] Func, Grant M. \"Vapor-Liquid Equilibrium. XI. A New Expression for\n the Excess Free Energy of Mixing.\" Journal of the American Chemical\n Society 86, no. 2 (January 1, 1964): 127-130. doi:10.1021/ja01056a002.\n .. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.\n Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:\n Wiley-VCH, 2012.\n '''\n arg_2 = []\n arg_3 = range(len(arg_0))\n for arg_4 in arg_3:\n arg_5 = log(sum([arg_1[arg_4][arg_7]*arg_0[arg_7] for arg_7 in arg_3]))\n arg_6 = 0.\n for arg_7 in arg_3:\n arg_6 += arg_1[arg_7][arg_4]*arg_0[arg_7]/sum([arg_1[arg_7][arg_8]*arg_0[arg_8] for arg_8 in arg_3])\n\n arg_9 = exp(1. - arg_5 - arg_6)\n arg_2.append(arg_9)\n return arg_2"} +{"_id": "doc_5297", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=None):\n r'''Determines the phase of a one-species chemical system according to\n basic rules, using whatever information is available. Considers only the\n phases liquid, solid, and gas; does not consider two-phase\n scenarios, as should occurs between phase boundaries.\n\n * If the melting temperature is known and the temperature is under or equal\n to it, consider it a solid.\n * If the critical temperature is known and the temperature is greater or\n equal to it, consider it a gas.\n * If the vapor pressure at `T` is known and the pressure is under or equal\n to it, consider it a gas. If the pressure is greater than the vapor\n pressure, consider it a liquid.\n * If the melting temperature, critical temperature, and vapor pressure are\n not known, attempt to use the boiling point to provide phase information.\n If the pressure is between 90 kPa and 110 kPa (approximately normal),\n consider it a liquid if it is under the boiling temperature and a gas if\n above the boiling temperature.\n * If the pressure is above 110 kPa and the boiling temperature is known,\n consider it a liquid if the temperature is under the boiling temperature.\n * Return None otherwise.\n\n Parameters\n ----------\n T : float\n Temperature, [K]\n P : float\n Pressure, [Pa]\n Tm : float, optional\n Normal melting temperature, [K]\n Tb : float, optional\n Normal boiling point, [K]\n Tc : float, optional\n Critical temperature, [K]\n Psat : float, optional\n Vapor pressure of the fluid at `T`, [Pa]\n\n Returns\n -------\n phase : str\n Either 's', 'l', 'g', or None if the phase cannot be determined\n\n Notes\n -----\n No special attential is paid to any phase transition. For the case where\n the melting point is not provided, the possibility of the fluid being solid\n is simply ignored.\n\n Examples\n --------\n >>> Func(T=280, P=101325, Tm=273.15, Psat=991)\n 'l'\n '''\n if arg_2 and arg_0 <= arg_2:\n return 's'\n elif arg_4 and arg_0 >= arg_4:\n # No special return value for the critical point\n return 'g'\n elif arg_5:\n # Do not allow co-existence of phases; transition to 'l' directly under\n if arg_1 <= arg_5:\n return 'g'\n elif arg_1 > arg_5:\n return 'l'\n elif arg_3:\n # Crude attempt to model phases without Psat\n # Treat Tb as holding from 90 kPa to 110 kPa\n if 9E4 < arg_1 < 1.1E5:\n if arg_0 < arg_3:\n return 'l'\n else:\n return 'g'\n elif arg_1 > 1.1E5 and arg_0 <= arg_3:\n # For the higher-pressure case, it is definitely liquid if under Tb\n # Above the normal boiling point, impossible to say - return None\n return 'l'\n else:\n return None\n else:\n return None"} +{"_id": "doc_5298", "title": "", "text": "def Func(arg_0):\n r'''Charge of a chemical, computed with RDKit from a chemical's SMILES.\n If RDKit is not available, holds None.\n\n Examples\n --------\n >>> Chemical('sodium ion').Func\n 1\n '''\n try:\n if not arg_0.rdkitmol:\n return Func_from_formula(arg_0.formula)\n else:\n return Chem.GetFormalCharge(arg_0.rdkitmol)\n except:\n return Func_from_formula(arg_0.formula)"} +{"_id": "doc_5299", "title": "", "text": "def Func(arg_0):\n r'''RDKit object of the chemical, without hydrogen. If RDKit is not\n available, holds None.\n\n For examples of what can be done with RDKit, see\n `their website `_.\n '''\n if arg_0.__Func:\n return arg_0.__Func\n else:\n try:\n arg_0.__Func = Chem.MolFromSmiles(arg_0.smiles)\n return arg_0.__Func\n except:\n return None"} +{"_id": "doc_5300", "title": "", "text": "def Func(arg_0):\n r'''RDKit object of the chemical, with hydrogen. If RDKit is not\n available, holds None.\n\n For examples of what can be done with RDKit, see\n `their website `_.\n '''\n if arg_0.__Func:\n return arg_0.__Func\n else:\n try:\n arg_0.__Func = Chem.AddHs(arg_0.rdkitmol)\n return arg_0.__Func\n except:\n return None"} +{"_id": "doc_5301", "title": "", "text": "def Func(arg_0):\n r'''Dictionary of legal status indicators for the chemical.\n\n Examples\n --------\n >>> pprint(Chemical('benzene').Func)\n {'DSL': 'LISTED',\n 'EINECS': 'LISTED',\n 'NLP': 'UNLISTED',\n 'SPIN': 'LISTED',\n 'TSCA': 'LISTED'}\n '''\n if arg_0.__Func:\n return arg_0.__Func\n else:\n arg_0.__Func = Func(arg_0.CAS, Method='COMBINED')\n return arg_0.__Func"} +{"_id": "doc_5302", "title": "", "text": "def Func(arg_0):\n r'''Dictionary of economic status indicators for the chemical.\n\n Examples\n --------\n >>> pprint(Chemical('benzene').Func)\n [\"US public: {'Manufactured': 6165232.1, 'Imported': 463146.474, 'Exported': 271908.252}\",\n u'1,000,000 - 10,000,000 tonnes per annum',\n u'Intermediate Use Only',\n 'OECD HPV Chemicals']\n '''\n if arg_0.__Func:\n return arg_0.__Func\n else:\n arg_0.__Func = Func(arg_0.CAS, Method='Combined')\n return arg_0.__Func"} +{"_id": "doc_5303", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n r'''This function handles the retrieval of a chemical's Global Warming\n Potential, relative to CO2. Lookup is based on CASRNs. Will automatically\n select a data source to use if no Method is provided; returns None if the\n data is not available.\n\n Returns the Func for the 100yr outlook by default.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Global warming potential, [(impact/mass chemical)/(impact/mass CO2)]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are IPCC (2007) 100yr',\n 'IPCC (2007) 100yr-SAR', 'IPCC (2007) 20yr', and 'IPCC (2007) 500yr'. \n All valid values are also held in the list Func_methods.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the Func for the desired chemical, and will return methods\n instead of the Func\n\n Notes\n -----\n All data is from [1]_, the official source. Several chemicals are available\n in [1]_ are not included here as they do not have a CAS.\n Methods are 'IPCC (2007) 100yr', 'IPCC (2007) 100yr-SAR',\n 'IPCC (2007) 20yr', and 'IPCC (2007) 500yr'.\n\n Examples\n --------\n Methane, 100-yr outlook\n\n >>> Func(CASRN='74-82-8')\n 25.0\n\n References\n ----------\n .. [1] IPCC. \"2.10.2 Direct Global Warming Potentials - AR4 WGI Chapter 2:\n Changes in Atmospheric Constituents and in Radiative Forcing.\" 2007.\n https://www.ipcc.ch/publications_and_data/ar4/wg1/en/ch2s2-10-2.html.\n '''\n def list_methods():\n arg_3 = []\n if arg_0 in Func_data.index:\n arg_3.append(IPCC100)\n if not pd.isnull(Func_data.at[arg_0, 'SAR 100yr']):\n arg_3.append(IPCC100SAR)\n arg_3.append(IPCC20)\n arg_3.append(IPCC500)\n arg_3.append(NONE)\n return arg_3\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == IPCC100:\n return float(Func_data.at[arg_0, '100yr Func'])\n elif arg_2 == IPCC100SAR:\n return float(Func_data.at[arg_0, 'SAR 100yr'])\n elif arg_2 == IPCC20:\n return float(Func_data.at[arg_0, '20yr Func'])\n elif arg_2 == IPCC500:\n return float(Func_data.at[arg_0, '500yr Func'])\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5304", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func vapor pressure of a fluid at temperature `T`\n with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at Func vapor pressure, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n Psat : float\n Vapor pressure at T, [pa]\n '''\n if arg_2 == WAGNER_MCGARRY:\n arg_3 = Wagner_original(arg_1, arg_0.WAGNER_MCGARRY_Tc, arg_0.WAGNER_MCGARRY_Pc, *arg_0.WAGNER_MCGARRY_coefs)\n elif arg_2 == WAGNER_POLING:\n arg_3 = Wagner(arg_1, arg_0.WAGNER_POLING_Tc, arg_0.WAGNER_POLING_Pc, *arg_0.WAGNER_POLING_coefs)\n elif arg_2 == ANTOINE_EXTENDED_POLING:\n arg_3 = TRC_Antoine_extended(arg_1, *arg_0.ANTOINE_EXTENDED_POLING_coefs)\n elif arg_2 == ANTOINE_POLING:\n arg_4, arg_5, arg_6 = arg_0.ANTOINE_POLING_coefs\n arg_3 = Antoine(arg_1, arg_4, arg_5, arg_6, base=10.0)\n elif arg_2 == DIPPR_PERRY_8E:\n arg_3 = EQ101(arg_1, *arg_0.Perrys2_8_coeffs)\n elif arg_2 == VDI_PPDS:\n arg_3 = Wagner(arg_1, arg_0.VDI_PPDS_Tc, arg_0.VDI_PPDS_Pc, *arg_0.VDI_PPDS_coeffs)\n elif arg_2 == COOLPROP:\n arg_3 = PropsSI('P','T', arg_1,'Q',0, arg_0.CASRN)\n elif arg_2 == BOILING_CRITICAL:\n arg_3 = boiling_critical_relation(arg_1, arg_0.Tb, arg_0.Tc, arg_0.Pc)\n elif arg_2 == LEE_KESLER_PSAT:\n arg_3 = Lee_Kesler(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == AMBROSE_WALTON:\n arg_3 = Ambrose_Walton(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == SANJARI:\n arg_3 = Sanjari(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == EDALAT:\n arg_3 = Edalat(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == EOS:\n arg_3 = arg_0.eos[0].Psat(arg_1)\n elif arg_2 in arg_0.tabular_data:\n arg_3 = arg_0.interpolate(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_5305", "title": "", "text": "def Func(arg_0, arg_1):\n '''Counts the number of real volumes in `Vs`, and determines what to do.\n If there is only one real volume, the method \n `set_properties_from_solution` is called with it. If there are\n two real volumes, `set_properties_from_solution` is called once with \n each volume. The phase is returned by `set_properties_from_solution`, \n and the volumes is set to either `V_l` or `V_g` as appropriate. \n\n Parameters\n ----------\n Vs : list[float]\n Three possible molar volumes, [m^3/mol]\n '''\n # All roots will have some imaginary component; ignore them if > 1E-9\n arg_2 = []\n arg_3 = []\n for arg_4 in arg_1:\n arg_5 = arg_4.real\n if abs(arg_4.imag) > 1E-9 or arg_5 < 0:\n arg_3.append(arg_4)\n else:\n arg_2.append(arg_5)\n \n if len(arg_3) == 2: \n arg_6 = arg_2[0]\n arg_0.phase = arg_0.set_properties_from_solution(arg_0.T, arg_0.P, arg_6, arg_0.b, arg_0.delta, arg_0.epsilon, arg_0.a_alpha, arg_0.da_alpha_dT, arg_0.d2a_alpha_dT2)\n if arg_0.phase == 'l':\n arg_0.V_l = arg_6\n else:\n arg_0.V_g = arg_6\n else:\n # Even in the case of three real roots, it is still the min/max that make sense\n arg_0.V_l, arg_0.V_g = min(arg_2), max(arg_2)\n [arg_0.set_properties_from_solution(arg_0.T, arg_0.P, arg_6, arg_0.b, arg_0.delta, arg_0.epsilon, arg_0.a_alpha, arg_0.da_alpha_dT, arg_0.d2a_alpha_dT2) for arg_6 in [arg_0.V_l, arg_0.V_g]]\n arg_0.phase = 'l/g'"} +{"_id": "doc_5306", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n '''Generic method to calculate `T` from a specified `P` and `V`.\n Provides SciPy's `newton` solver, and iterates to solve the general\n equation for `P`, recalculating `a_alpha` as a function of temperature\n using `a_alpha_and_derivatives` each iteration.\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n quick : bool, optional\n Whether to use a SymPy cse-derived expression (3x faster) or \n individual formulas - not applicable where a numerical solver is\n used.\n\n Returns\n -------\n T : float\n Temperature, [K]\n '''\n def to_solve(arg_4):\n arg_5 = arg_0.a_alpha_and_derivatives(arg_4, full=False)\n arg_6 = R*arg_4/(arg_2-arg_0.b) - arg_5/(arg_2*arg_2 + arg_0.delta*arg_2 + arg_0.epsilon)\n return arg_6 - arg_1\n return newton(to_solve, arg_0.Tc*0.5)"} +{"_id": "doc_5307", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True):\n r'''Method to calculate `a_alpha` and its first and second\n derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and \n `d2a_alpha_dT2`. See `GCEOS.Func` for more \n documentation. Uses the set values of `Tc`, `kappa`, and `a`. \n \n For use in `solve_T`, returns only `a_alpha` if full is False.\n\n .. math::\n a\\alpha = a \\left(\\kappa \\left(- \\frac{T^{0.5}}{Tc^{0.5}} \n + 1\\right) + 1\\right)^{2}\n \n \\frac{d a\\alpha}{dT} = - \\frac{1.0 a \\kappa}{T^{0.5} Tc^{0.5}}\n \\left(\\kappa \\left(- \\frac{T^{0.5}}{Tc^{0.5}} + 1\\right) + 1\\right)\n\n \\frac{d^2 a\\alpha}{dT^2} = 0.5 a \\kappa \\left(- \\frac{1}{T^{1.5} \n Tc^{0.5}} \\left(\\kappa \\left(\\frac{T^{0.5}}{Tc^{0.5}} - 1\\right)\n - 1\\right) + \\frac{\\kappa}{T^{1.0} Tc^{1.0}}\\right)\n '''\n if not arg_2:\n return arg_0.a*(1 + arg_0.kappa*(1-(arg_1/arg_0.Tc)**0.5))**2\n else:\n if arg_3:\n arg_4, arg_5 = arg_0.Tc, arg_0.kappa\n arg_6 = arg_1**0.5\n arg_7 = arg_4**-0.5\n arg_8 = arg_5*(arg_6*arg_7 - 1.) - 1.\n arg_9 = arg_0.a*arg_5\n \n arg_10 = arg_0.a*arg_8*arg_8\n arg_11 = arg_7*arg_8*arg_9/arg_6\n arg_12 = arg_9*(-0.5*arg_1**-1.5*arg_7*arg_8 + 0.5/(arg_1*arg_4)*arg_5)\n else:\n arg_10 = arg_0.a*(1 + arg_0.kappa*(1-(arg_1/arg_0.Tc)**0.5))**2\n arg_11 = -arg_0.a*arg_0.kappa*sqrt(arg_1/arg_0.Tc)*(arg_0.kappa*(-sqrt(arg_1/arg_0.Tc) + 1.) + 1.)/arg_1\n arg_12 = arg_0.a*arg_0.kappa*(arg_0.kappa/arg_0.Tc - sqrt(arg_1/arg_0.Tc)*(arg_0.kappa*(sqrt(arg_1/arg_0.Tc) - 1.) - 1.)/arg_1)/(2.*arg_1)\n return arg_10, arg_11, arg_12"} +{"_id": "doc_5308", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n r'''Method to calculate `T` from a specified `P` and `V` for the PRSV\n EOS. Uses `Tc`, `a`, `b`, `kappa0` and `kappa` as well, obtained from \n the class's namespace.\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n quick : bool, optional\n Whether to use a SymPy cse-derived expression (somewhat faster) or \n individual formulas.\n\n Returns\n -------\n T : float\n Temperature, [K]\n \n Notes\n -----\n Not guaranteed to produce a solution. There are actually two solution,\n one much higher than normally desired; it is possible the solver could\n converge on this. \n '''\n arg_4, arg_5, arg_6, arg_7, arg_8 = arg_0.Tc, arg_0.a, arg_0.b, arg_0.kappa0, arg_0.kappa1\n if arg_3:\n arg_9 = arg_2 - arg_6\n arg_10 = R/arg_9\n arg_11 = (100.*(arg_2*(arg_2 + arg_6) + arg_6*arg_9))\n arg_12 = 10.*arg_7\n arg_13 = arg_8*10.\n arg_14 = arg_8*7.\n def to_solve(arg_15):\n arg_16 = arg_15/arg_4\n arg_17 = arg_16**0.5\n return (arg_15*arg_10 - arg_5*((arg_12 - (arg_13*arg_16 - arg_14)*(arg_17 + 1.))*(arg_17 - 1.) - 10.)**2/arg_11) - arg_1\n else:\n def to_solve(arg_15):\n arg_18 = R*arg_15/(arg_2 - arg_6) - arg_5*((arg_7 + arg_8*(sqrt(arg_15/arg_4) + 1)*(-arg_15/arg_4 + 7/10))*(-sqrt(arg_15/arg_4) + 1) + 1)**2/(arg_2*(arg_2 + arg_6) + arg_6*(arg_2 - arg_6))\n return arg_18 - arg_1\n return newton(to_solve, arg_4*0.5)"} +{"_id": "doc_5309", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to calculate `T` from a specified `P` and `V` for the VDW\n EOS. Uses `a`, and `b`, obtained from the class's namespace.\n\n .. math::\n T = \\frac{1}{R V^{2}} \\left(P V^{2} \\left(V - b\\right)\n + V a - a b\\right)\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n\n Returns\n -------\n T : float\n Temperature, [K]\n '''\n return (arg_1*arg_2**2*(arg_2 - arg_0.b) + arg_2*arg_0.a - arg_0.a*arg_0.b)/(R*arg_2**2)"} +{"_id": "doc_5310", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n r'''Method to calculate `T` from a specified `P` and `V` for the RK\n EOS. Uses `a`, and `b`, obtained from the class's namespace.\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n quick : bool, optional\n Whether to use a SymPy cse-derived expression (3x faster) or \n individual formulas\n\n Returns\n -------\n T : float\n Temperature, [K]\n\n Notes\n -----\n The exact solution can be derived as follows; it is excluded for \n breviety.\n \n >>> from sympy import *\n >>> P, T, V, R = symbols('P, T, V, R')\n >>> Tc, Pc = symbols('Tc, Pc')\n >>> a, b = symbols('a, b')\n\n >>> RK = Eq(P, R*T/(V-b) - a/sqrt(T)/(V*V + b*V))\n >>> # solve(RK, T)\n '''\n arg_4, arg_5 = arg_0.a, arg_0.b\n if arg_3:\n arg_6 = -1.j*1.7320508075688772 + 1.\n arg_7 = arg_2 - arg_5\n arg_8 = arg_7/R\n arg_9 = arg_2 + arg_5\n arg_10 = (1.7320508075688772*(arg_7*arg_7*(-4.*arg_1*arg_1*arg_1*arg_8 + 27.*arg_4*arg_4/(arg_2*arg_2*arg_9*arg_9))/(R*R))**0.5 - 9.*arg_4*arg_8/(arg_2*arg_9) +0j)**(1./3.)\n return (3.3019272488946263*(11.537996562459266*arg_1*arg_8/(arg_6*arg_10) + 1.2599210498948732*arg_6*arg_10)**2/144.0).real\n else:\n return ((-(-1/2 + sqrt(3)*1j/2)*(sqrt(729*(-arg_2*arg_4 + arg_4*arg_5)**2/(R*arg_2**2 + R*arg_2*arg_5)**2 + 108*(-arg_1*arg_2 + arg_1*arg_5)**3/R**3)/2 + 27*(-arg_2*arg_4 + arg_4*arg_5)/(2*(R*arg_2**2 + R*arg_2*arg_5))+0j)**(1/3)/3 + (-arg_1*arg_2 + arg_1*arg_5)/(R*(-1/2 + sqrt(3)*1j/2)*(sqrt(729*(-arg_2*arg_4 + arg_4*arg_5)**2/(R*arg_2**2 + R*arg_2*arg_5)**2 + 108*(-arg_1*arg_2 + arg_1*arg_5)**3/R**3)/2 + 27*(-arg_2*arg_4 + arg_4*arg_5)/(2*(R*arg_2**2 + R*arg_2*arg_5))+0j)**(1/3)))**2).real"} +{"_id": "doc_5311", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n r'''Method to calculate `T` from a specified `P` and `V` for the API \n SRK EOS. Uses `a`, `b`, and `Tc` obtained from the class's namespace.\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n quick : bool, optional\n Whether to use a SymPy cse-derived expression (3x faster) or \n individual formulas\n\n Returns\n -------\n T : float\n Temperature, [K]\n\n Notes\n -----\n If S2 is set to 0, the solution is the same as in the SRK EOS, and that\n is used. Otherwise, newton's method must be used to solve for `T`. \n There are 8 roots of T in that case, six of them real. No guarantee can\n be made regarding which root will be obtained.\n '''\n if arg_0.S2 == 0:\n arg_0.m = arg_0.S1\n return SRK.Func(arg_0, arg_1, arg_2, arg_3=arg_3)\n else:\n # Previously coded method is 63 microseconds vs 47 here\n# return super(SRK, self).Func(P, V, quick=quick) \n arg_5, arg_6, arg_7, arg_8, arg_9 = arg_0.Tc, arg_0.a, arg_0.b, arg_0.S1, arg_0.S2\n if arg_3:\n arg_10 = R/(arg_2-arg_7)\n arg_11 = (arg_2*(arg_2 + arg_7))\n def to_solve(arg_12):\n arg_13 = (arg_12/arg_5)**0.5\n arg_14 = arg_13 - 1.\n return (arg_10*arg_12 - arg_6*(arg_8*arg_14 + arg_9*arg_14/arg_13 - 1.)**2/arg_11) - arg_1\n else:\n def to_solve(arg_12):\n arg_15 = R*arg_12/(arg_2 - arg_7) - arg_6*(arg_8*(-sqrt(arg_12/arg_5) + 1) + arg_9*(-sqrt(arg_12/arg_5) + 1)/sqrt(arg_12/arg_5) + 1)**2/(arg_2*(arg_2 + arg_7))\n return arg_15 - arg_1\n return newton(to_solve, arg_5*0.5)"} +{"_id": "doc_5312", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True):\n r'''Method to calculate `a_alpha` and its first and second\n derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and \n `d2a_alpha_dT2`. See `GCEOS.Func` for more \n documentation. Uses the set values of `Tc`, `omega`, and `a`.\n \n Because of its similarity for the TWUPR EOS, this has been moved to an \n external `TWU_a_alpha_common` function. See it for further \n documentation.\n '''\n return TWU_a_alpha_common(arg_1, arg_0.Tc, arg_0.omega, arg_0.a, arg_2=arg_2, arg_3=arg_3, method='SRK')"} +{"_id": "doc_5313", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=[arg_4]):\n r'''This function handles the retrieval of a chemical's boiling\n point. Lookup is based on CASRNs. Will automatically select a data\n source to use if no Method is provided; returns None if the data is not\n available.\n\n Prefered sources are 'CRC Physical Constants, organic' for organic\n chemicals, and 'CRC Physical Constants, inorganic' for inorganic\n chemicals. Function has data for approximately 13000 chemicals.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Boiling temperature, [K]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods,\n useful for for performance reasons and ignoring inaccurate methods\n\n Notes\n -----\n A total of four methods are available for this function. They are:\n\n * 'CRC_ORG', a compillation of data on organics\n as published in [1]_.\n * 'CRC_INORG', a compillation of data on\n inorganic as published in [1]_.\n * 'YAWS', a large compillation of data from a\n variety of sources; no data points are sourced in the work of [2]_.\n * 'PSAT_DEFINITION', calculation of boiling point from a\n vapor pressure calculation. This is normally off by a fraction of a\n degree even in the best cases. Listed in IgnoreMethods by default\n for performance reasons.\n\n Examples\n --------\n >>> Func('7732-18-5')\n 373.124\n\n References\n ----------\n .. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n .. [2] Yaws, Carl L. Thermophysical Properties of Chemicals and\n Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n Publishing, 2014.\n '''\n def list_methods():\n arg_5 = []\n if arg_0 in CRC_inorganic_data.index and not np.isnan(CRC_inorganic_data.at[arg_0, 'Func']):\n arg_5.append(CRC_INORG)\n if arg_0 in CRC_organic_data.index and not np.isnan(CRC_organic_data.at[arg_0, 'Func']):\n arg_5.append(CRC_ORG)\n if arg_0 in Yaws_data.index:\n arg_5.append(YAWS)\n if arg_4 not in arg_3:\n try:\n # For some chemicals, vapor pressure range will exclude Func\n VaporPressure(arg_0=arg_0).solve_prop(101325.)\n arg_5.append(arg_4)\n except: # pragma: no cover\n pass\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_5:\n arg_5.remove(arg_2)\n arg_5.append(NONE)\n return arg_5\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == CRC_INORG:\n return float(CRC_inorganic_data.at[arg_0, 'Func'])\n elif arg_2 == CRC_ORG:\n return float(CRC_organic_data.at[arg_0, 'Func'])\n elif arg_2 == YAWS:\n return float(Yaws_data.at[arg_0, 'Func'])\n elif arg_2 == arg_4:\n return VaporPressure(arg_0=arg_0).solve_prop(101325.)\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5314", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=[]):\n r'''This function handles the retrieval of a chemical's melting\n point. Lookup is based on CASRNs. Will automatically select a data\n source to use if no Method is provided; returns None if the data is not\n available.\n\n Prefered sources are 'Open Notebook Melting Points', with backup sources\n 'CRC Physical Constants, organic' for organic chemicals, and\n 'CRC Physical Constants, inorganic' for inorganic chemicals. Function has\n data for approximately 14000 chemicals.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Melting temperature, [K]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods\n\n Notes\n -----\n A total of three sources are available for this function. They are:\n\n * 'OPEN_NTBKM, a compillation of data on organics\n as published in [1]_ as Open Notebook Melting Points; Averaged \n (median) values were used when\n multiple points were available. For more information on this\n invaluable and excellent collection, see\n http://onswebservices.wikispaces.com/meltingpoint.\n * 'CRC_ORG', a compillation of data on organics\n as published in [2]_.\n * 'CRC_INORG', a compillation of data on\n inorganic as published in [2]_.\n\n Examples\n --------\n >>> Func(CASRN='7732-18-5')\n 273.15\n\n References\n ----------\n .. [1] Bradley, Jean-Claude, Antony Williams, and Andrew Lang.\n \"Jean-Claude Bradley Open Melting Point Dataset\", May 20, 2014.\n https://figshare.com/articles/Jean_Claude_Bradley_Open_Melting_Point_Datset/1031637.\n .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n '''\n def list_methods():\n arg_4 = []\n if arg_0 in Func_ON_data.index:\n arg_4.append(OPEN_NTBKM)\n if arg_0 in CRC_inorganic_data.index and not np.isnan(CRC_inorganic_data.at[arg_0, 'Func']):\n arg_4.append(CRC_INORG)\n if arg_0 in CRC_organic_data.index and not np.isnan(CRC_organic_data.at[arg_0, 'Func']):\n arg_4.append(CRC_ORG)\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_4:\n arg_4.remove(arg_2)\n arg_4.append(NONE)\n return arg_4\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == OPEN_NTBKM:\n return float(Func_ON_data.at[arg_0, 'Func'])\n elif arg_2 == CRC_INORG:\n return float(CRC_inorganic_data.at[arg_0, 'Func'])\n elif arg_2 == CRC_ORG:\n return float(CRC_organic_data.at[arg_0, 'Func'])\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5315", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1, arg_4=101325):\n r'''Calculates enthalpy of vaporization at arbitrary temperatures using the\n Func equation.\n\n The enthalpy of vaporization is given by:\n\n .. math::\n \\Delta H_{vap} = RT \\Delta Z \\frac{\\ln (P_c/Psat)}{(1-T_{r})}\n\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n Pc : float\n Critical pressure of fluid [Pa]\n dZ : float\n Change in compressibility factor between liquid and gas, []\n Psat : float\n Saturation pressure of fluid [Pa], optional\n\n Returns\n -------\n Hvap : float\n Enthalpy of vaporization, [J/mol]\n\n Notes\n -----\n No original source is available for this equation.\n [1]_ claims this equation overpredicts enthalpy by several percent.\n Under Tr = 0.8, dZ = 1 is a reasonable assumption.\n This equation is most accurate at the normal boiling point.\n\n Internal units are bar.\n\n WARNING: I believe it possible that the adjustment for pressure may be incorrect\n\n Examples\n --------\n Problem from Perry's examples.\n\n >>> Func(T=294.0, Tc=466.0, Pc=5.55E6)\n 26512.354585061985\n\n References\n ----------\n .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n New York: McGraw-Hill Professional, 2000.\n '''\n arg_5 = arg_0/arg_1\n return R*arg_0*arg_3*log(arg_2/arg_4)/(1. - arg_5)"} +{"_id": "doc_5316", "title": "", "text": "def Func(arg_0=298.15, arg_1=101325, arg_2=None, arg_3=False, arg_4=None, arg_5=''): # pragma: no cover\n '''This function handles the calculation of a chemical's enthalpy of fusion.\n Generally this, is used by the chemical class, as all parameters are passed.\n Calling the function directly works okay.\n\n Enthalpy of fusion is a weak function of pressure, and its effects are\n neglected.\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n\n '''\n def list_methods():\n arg_6 = []\n if arg_5 in CRCFunc_data.index:\n arg_6.append('CRC, at melting point')\n arg_6.append('None')\n return arg_6\n if arg_3:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_4 == 'CRC, at melting point':\n arg_7 = CRCFunc_data.at[arg_5, 'Func']\n elif arg_4 == 'None' or not arg_2:\n arg_7 = None\n else:\n raise Exception('Failure in in function')\n arg_7 = property_molar_to_mass(arg_7, arg_2)\n return arg_7"} +{"_id": "doc_5317", "title": "", "text": "def Func(arg_0=298.15, arg_1=101325, arg_2=None, arg_3=False, arg_4=None, arg_5=''): # pragma: no cover\n '''This function handles the calculation of a chemical's enthalpy of sublimation.\n Generally this, is used by the chemical class, as all parameters are passed.\n\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n '''\n def list_methods():\n arg_6 = []\n# if Hfus(T=T, P=P, MW=MW, CASRN=CASRN) and Hvap(T=T, P=P, MW=MW, CASRN=CASRN):\n# methods.append('Hfus + Hvap')\n if arg_5 in GharagheiziFunc_data.index:\n arg_6.append('Ghazerati Appendix, at 298K')\n arg_6.append('None')\n return arg_6\n if arg_3:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n # This is the calculate, given the method section\n# if Method == 'Hfus + Hvap':\n# p1 = Hfus(T=T, P=P, MW=MW, CASRN=CASRN)\n# p2 = Hvap(T=T, P=P, MW=MW, CASRN=CASRN)\n# if p1 and p2:\n# _Func = p1 + p2\n# else:\n# _Func = None\n if arg_4 == 'Ghazerati Appendix, at 298K':\n arg_7 = float(GharagheiziFunc_data.at[arg_5, 'Func'])\n elif arg_4 == 'None' or not arg_7 or not arg_2:\n return None\n else:\n raise Exception('Failure in in function')\n arg_7 = property_molar_to_mass(arg_7, arg_2)\n return arg_7"} +{"_id": "doc_5318", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=False,\n arg_5=None): # pragma: no cover\n '''This function handles the retrival of a mixtures's liquidus point.\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n\n >>> Func(Tms=[250.0, 350.0], xs=[0.5, 0.5])\n 350.0\n >>> Func(Tms=[250, 350], xs=[0.5, 0.5], Method='Simple')\n 300.0\n >>> Func(Tms=[250, 350], xs=[0.5, 0.5], AvailableMethods=True)\n ['Maximum', 'Simple', 'None']\n '''\n def list_methods():\n arg_6 = []\n if none_and_length_check([arg_0]):\n arg_6.append('Maximum')\n arg_6.append('Simple')\n arg_6.append('None')\n return arg_6\n if arg_4:\n return list_methods()\n if not arg_5:\n arg_5 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_5 == 'Maximum':\n arg_7 = max(arg_0)\n elif arg_5 == 'Simple':\n arg_7 = mixing_simple(arg_2, arg_0)\n elif arg_5 == 'None':\n return None\n else:\n raise Exception('Failure in in function')\n return arg_7"} +{"_id": "doc_5319", "title": "", "text": "def Func(arg_0=298.15, arg_1=None, arg_2=None,\n arg_3='', arg_4=False, arg_5=None):\n r'''This function handles the calculation of a chemical's solubility\n parameter. Calculation is a function of temperature, but is not always\n presented as such. No lookup values are available; either `Hvapm`, `Vml`,\n and `T` are provided or the calculation cannot be performed.\n\n .. math::\n \\delta = \\sqrt{\\frac{\\Delta H_{vap} - RT}{V_m}}\n\n Parameters\n ----------\n T : float\n Temperature of the fluid [k]\n Hvapm : float\n Heat of vaporization [J/mol/K]\n Vml : float\n Specific volume of the liquid [m^3/mol]\n CASRN : str, optional\n CASRN of the fluid, not currently used [-]\n\n Returns\n -------\n delta : float\n Solubility parameter, [Pa^0.5]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain the solubility parameter\n with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the solubility parameter for the desired chemical, and will return\n methods instead of the solubility parameter\n\n Notes\n -----\n Undefined past the critical point. For convenience, if Hvap is not defined,\n an error is not raised; None is returned instead. Also for convenience,\n if Hvapm is less than RT, None is returned to avoid taking the root of a\n negative number.\n\n This parameter is often given in units of cal/ml, which is 2045.48 times\n smaller than the value returned here.\n\n Examples\n --------\n Pentane at STP\n\n >>> Func(T=298.2, Hvapm=26403.3, Vml=0.000116055)\n 14357.681538173534\n\n References\n ----------\n .. [1] Barton, Allan F. M. CRC Handbook of Solubility Parameters and Other\n Cohesion Parameters, Second Edition. CRC Press, 1991.\n '''\n def list_methods():\n arg_6 = []\n if arg_0 and arg_1 and arg_2:\n arg_6.append(DEFINITION)\n arg_6.append(NONE)\n return arg_6\n if arg_4:\n return list_methods()\n if not arg_5:\n arg_5 = list_methods()[0]\n\n if arg_5 == DEFINITION:\n if (not arg_1) or (not arg_0) or (not arg_2):\n arg_7 = None\n else:\n if arg_1 < R*arg_0 or arg_2 < 0: # Prevent taking the root of a negative number\n arg_7 = None\n else:\n arg_7 = ((arg_1 - R*arg_0)/arg_2)**0.5\n elif arg_5 == NONE:\n arg_7 = None\n else:\n raise Exception('Failure in in function')\n return arg_7"} +{"_id": "doc_5320", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, arg_4=0, arg_5=1):\n r'''Returns the maximum solubility of a solute in a solvent.\n\n .. math::\n \\ln x_i^L \\gamma_i^L = \\frac{\\Delta H_{m,i}}{RT}\\left(\n 1 - \\frac{T}{T_{m,i}}\\right) - \\frac{\\Delta C_{p,i}(T_{m,i}-T)}{RT}\n + \\frac{\\Delta C_{p,i}}{R}\\ln\\frac{T_m}{T}\n\n \\Delta C_{p,i} = C_{p,i}^L - C_{p,i}^S\n\n Parameters\n ----------\n T : float\n Temperature of the system [K]\n Tm : float\n Melting temperature of the solute [K]\n Hm : float\n Heat of melting at the melting temperature of the solute [J/mol]\n Cpl : float, optional\n Molar heat capacity of the solute as a liquid [J/mol/K]\n Cpls: float, optional\n Molar heat capacity of the solute as a solid [J/mol/K]\n gamma : float, optional\n Activity coefficient of the solute as a liquid [-]\n\n Returns\n -------\n x : float\n Mole fraction of solute at maximum solubility [-]\n\n Notes\n -----\n gamma is of the solute in liquid phase\n\n Examples\n --------\n From [1]_, matching example\n\n >>> Func(T=260., Tm=278.68, Hm=9952., Cpl=0, Cps=0, gamma=3.0176)\n 0.24340068761677464\n\n References\n ----------\n .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n Weinheim, Germany: Wiley-VCH, 2012.\n '''\n arg_6 = arg_3-arg_4\n arg_7 = exp(- arg_2/R/arg_0*(1-arg_0/arg_1) + arg_6*(arg_1-arg_0)/R/arg_0 - arg_6/R*log(arg_1/arg_0))/arg_5\n return arg_7"} +{"_id": "doc_5321", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n r'''Returns the freezing point depression caused by a solute in a solvent.\n Can use either the mole fraction of the solute or its molality and the\n molecular weight of the solvent. Assumes ideal system behavior.\n\n .. math::\n \\Delta T_m = \\frac{R T_m^2 x}{\\Delta H_m}\n\n \\Delta T_m = \\frac{R T_m^2 (MW) M}{1000 \\Delta H_m}\n\n Parameters\n ----------\n Tm : float\n Melting temperature of the solute [K]\n Hm : float\n Heat of melting at the melting temperature of the solute [J/mol]\n x : float, optional\n Mole fraction of the solute [-]\n M : float, optional\n Molality [mol/kg]\n MW: float, optional\n Molecular weight of the solvent [g/mol]\n\n Returns\n -------\n dTm : float\n Freezing point depression [K]\n\n Notes\n -----\n MW is the molecular weight of the solvent. M is the molality of the solute.\n\n Examples\n --------\n From [1]_, matching example.\n\n >>> Func(353.35, 19110, .02)\n 1.0864594900639515\n\n References\n ----------\n .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n Weinheim, Germany: Wiley-VCH, 2012.\n '''\n if arg_2:\n arg_5 = R*arg_0**2*arg_2/arg_1\n elif arg_3 and arg_4:\n arg_4 = arg_4/1000. #g/mol to kg/mol\n arg_5 = R*arg_0**2*arg_4*arg_3/arg_1\n else:\n raise Exception('Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided')\n return arg_5"} +{"_id": "doc_5322", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Calculates saturation liquid volume, using Func CSP method and\n critical properties.\n\n The molar volume of a liquid is given by:\n\n .. math::\n V_s = \\frac{RT_c}{P_c}{Z_c}^{[1+(1-{T/T_c})^{2/7} ]}\n\n Units are all currently in m^3/mol - this can be changed to kg/m^3\n\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n Pc : float\n Critical pressure of fluid [Pa]\n Zc : float\n Critical compressibility of fluid, [-]\n\n Returns\n -------\n Vs : float\n Saturation liquid volume, [m^3/mol]\n\n Notes\n -----\n Units are dependent on gas constant R, imported from scipy\n According to Reid et. al, underpredicts volume for compounds with Zc < 0.22\n\n Examples\n --------\n Propane, example from the API Handbook\n\n >>> Vm_to_rho(Func(272.03889, 369.83, 4248000.0, 0.2763), 44.09562)\n 531.3223212651092\n\n References\n ----------\n .. [1] Func, Harold G. \"Equation of State for Saturated Liquids.\"\n Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.\n doi:10.1021/je60047a012\n '''\n return R*arg_1/arg_2*arg_3**(1 + (1 - arg_0/arg_1)**(2/7.))"} +{"_id": "doc_5323", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Calculates saturation liquid volume, using Yamada and Gunn CSP method\n and a chemical's critical properties and acentric factor.\n\n The molar volume of a liquid is given by:\n\n .. math::\n V_s = \\frac{RT_c}{P_c}{(0.29056-0.08775\\omega)}^{[1+(1-{T/T_c})^{2/7}]}\n\n Units are in m^3/mol.\n\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n Pc : float\n Critical pressure of fluid [Pa]\n omega : float\n Acentric factor for fluid, [-]\n\n Returns\n -------\n Vs : float\n saturation liquid volume, [m^3/mol]\n\n Notes\n -----\n This equation is an improvement on the Rackett equation.\n This is often presented as the Rackett equation.\n The acentric factor is used here, instead of the critical compressibility\n A variant using a reference fluid also exists\n\n Examples\n --------\n >>> Func(300, 647.14, 22048320.0, 0.245)\n 2.1882836429895796e-05\n\n References\n ----------\n .. [1] Gunn, R. D., and Tomoyoshi Yamada. \"A Corresponding States\n Correlation of Saturated Liquid Volumes.\" AIChE Journal 17, no. 6\n (1971): 1341-45. doi:10.1002/aic.690170613\n .. [2] Yamada, Tomoyoshi, and Robert D. Gunn. \"Saturated Liquid Molar\n Volumes. Rackett Equation.\" Journal of Chemical & Engineering Data 18,\n no. 2 (1973): 234-36. doi:10.1021/je60057a006\n '''\n return R*arg_1/arg_2*(0.29056 - 0.08775*arg_3)**(1 + (1 - arg_0/arg_1)**(2/7.))"} +{"_id": "doc_5324", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Calculates saturation liquid density, using the Townsend and Hales\n CSP method as modified from the original Riedel equation. Uses\n chemical critical volume and temperature, as well as acentric factor\n\n The density of a liquid is given by:\n\n .. math::\n Vs = V_c/\\left(1+0.85(1-T_r)+(1.692+0.986\\omega)(1-T_r)^{1/3}\\right)\n\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n Vc : float\n Critical volume of fluid [m^3/mol]\n omega : float\n Acentric factor for fluid, [-]\n\n Returns\n -------\n Vs : float\n Saturation liquid volume, [m^3/mol]\n\n Notes\n -----\n The requirement for critical volume and acentric factor requires all data.\n\n Examples\n --------\n >>> Func(300, 647.14, 55.95E-6, 0.3449)\n 1.8007361992619923e-05\n\n References\n ----------\n .. [1] Hales, J. L, and R Townsend. \"Liquid Densities from 293 to 490 K of\n Nine Aromatic Hydrocarbons.\" The Journal of Chemical Thermodynamics\n 4, no. 5 (1972): 763-72. doi:10.1016/0021-9614(72)90050-X\n '''\n arg_4 = arg_0/arg_1\n return arg_2/(1 + 0.85*(1-arg_4) + (1.692 + 0.986*arg_3)*(1-arg_4)**(1/3.))"} +{"_id": "doc_5325", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Calculate saturation liquid density using the Func CSP method.\n\n A popular and accurate estimation method. If possible, fit parameters are\n used; alternatively critical properties work well.\n\n The density of a liquid is given by:\n\n .. math::\n V_s=V^*V^{(0)}[1-\\omega_{SRK}V^{(\\delta)}]\n\n V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3}\n - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3}\n\n V^{(\\delta)}=\\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3}\n {T_r-1.00001}\n\n Units are that of critical or fit constant volume.\n\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n Vc : float\n Critical volume of fluid [m^3/mol].\n This parameter is alternatively a fit parameter\n omega : float\n (ideally SRK) Acentric factor for fluid, [-]\n This parameter is alternatively a fit parameter.\n\n Returns\n -------\n Vs : float\n Saturation liquid volume\n\n Notes\n -----\n 196 constants are fit to this function in [1]_.\n Range: 0.25 < Tr < 0.95, often said to be to 1.0\n\n This function has been checked with the API handbook example problem.\n\n Examples\n --------\n Propane, from an example in the API Handbook\n\n >>> Vm_to_rho(Func(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097)\n 530.3009967969841\n\n\n References\n ----------\n .. [1] Hankinson, Risdon W., and George H. Thomson. \"A New Correlation for\n Saturated Densities of Liquids and Their Mixtures.\" AIChE Journal\n 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412\n '''\n arg_4 = arg_0/arg_1\n arg_5 = (-0.296123 + 0.386914*arg_4 - 0.0427258*arg_4**2\n - 0.0480645*arg_4**3)/(arg_4 - 1.00001)\n arg_6 = 1 - 1.52816*(1-arg_4)**(1/3.) + 1.43907*(1-arg_4)**(2/3.) \\\n - 0.81446*(1-arg_4) + 0.190454*(1-arg_4)**(4/3.)\n return arg_2*arg_6*(1-arg_3*arg_5)"} +{"_id": "doc_5326", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Calculate mixture liquid density using the Func mixing rule.\n Highly inacurate, but easy to use. Assumes idea liquids with\n no excess volume. Average molecular weight should be used with it to obtain\n density.\n\n .. math::\n V_{mix} = \\sum_i x_i V_i\n\n or in terms of density:\n\n .. math::\n\n \\rho_{mix} = \\sum\\frac{x_i}{\\rho_i}\n\n Parameters\n ----------\n xs : array\n Mole fractions of each component, []\n Vms : array\n Molar volumes of each fluids at conditions [m^3/mol]\n\n Returns\n -------\n Vm : float\n Mixture liquid volume [m^3/mol]\n\n Notes\n -----\n Units are that of the given volumes.\n It has been suggested to use this equation with weight fractions,\n but the results have been less accurate.\n\n Examples\n --------\n >>> Func([0.5, 0.5], [4.057e-05, 5.861e-05])\n 4.9590000000000005e-05\n '''\n if not none_and_length_check([arg_0, arg_1]):\n raise Exception('Function inputs are incorrect format')\n return mixing_simple(arg_0, arg_1)"} +{"_id": "doc_5327", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n r'''Calculate mixture liquid density using the COSTALD CSP method.\n\n A popular and accurate estimation method. If possible, fit parameters are\n used; alternatively critical properties work well.\n\n The mixing rules giving parameters for the pure component COSTALD\n equation are:\n\n .. math::\n T_{cm} = \\frac{\\sum_i\\sum_j x_i x_j (V_{ij}T_{cij})}{V_m}\n\n V_m = 0.25\\left[ \\sum x_i V_i + 3(\\sum x_i V_i^{2/3})(\\sum_i x_i V_i^{1/3})\\right]\n\n V_{ij}T_{cij} = (V_iT_{ci}V_{j}T_{cj})^{0.5}\n\n \\omega = \\sum_i z_i \\omega_i\n\n Parameters\n ----------\n xs: list\n Mole fractions of each component\n T : float\n Temperature of fluid [K]\n Tcs : list\n Critical temperature of fluids [K]\n Vcs : list\n Critical volumes of fluids [m^3/mol].\n This parameter is alternatively a fit parameter\n omegas : list\n (ideally SRK) Acentric factor of all fluids, [-]\n This parameter is alternatively a fit parameter.\n\n Returns\n -------\n Vs : float\n Saturation liquid mixture volume\n\n Notes\n -----\n Range: 0.25 < Tr < 0.95, often said to be to 1.0\n No example has been found.\n Units are that of critical or fit constant volume.\n\n Examples\n --------\n >>> Func([0.4576, 0.5424], 298., [512.58, 647.29],[0.000117, 5.6e-05], [0.559,0.344] )\n 2.706588773271354e-05\n\n References\n ----------\n .. [1] Hankinson, Risdon W., and George H. Thomson. \"A New Correlation for\n Saturated Densities of Liquids and Their Mixtures.\" AIChE Journal\n 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412\n '''\n arg_5 = range(len(arg_0))\n if not none_and_length_check([arg_0, arg_2, arg_3, arg_4]):\n raise Exception('Function inputs are incorrect format')\n arg_6 = sum([xi*Vci for xi, Vci in zip(arg_0, arg_3)])\n arg_7 = sum([xi*Vci**(2/3.) for xi, Vci in zip(arg_0, arg_3)])\n arg_8 = sum([xi*Vci**(1/3.) for xi, Vci in zip(arg_0, arg_3)])\n arg_9 = 0.25*(arg_6 + 3.*arg_7*arg_8)\n arg_10 = [[(arg_2[i]*arg_2[j]*arg_3[i]*arg_3[j])**0.5 for j in arg_5] for i in arg_5]\n arg_11 = mixing_simple(arg_0, arg_4)\n arg_12 = sum([arg_0[i]*arg_0[j]*arg_10[i][j]/arg_9 for j in arg_5 for i in arg_5])\n return COSTALD(arg_1, arg_12, arg_9, arg_11)"} +{"_id": "doc_5328", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func molar volume of a liquid mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n Vm : float\n Molar volume of the liquid mixture at the given conditions, \n [m^3/mol]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.VolumeLiquids]\n return Amgat(arg_3, arg_6)\n elif arg_5 == COSTALD_MIXTURE:\n return COSTALD_mixture(arg_3, arg_1, arg_0.Tcs, arg_0.Vcs, arg_0.omegas)\n elif arg_5 == COSTALD_MIXTURE_FIT:\n return COSTALD_mixture(arg_3, arg_1, arg_0.Tcs, arg_0.COSTALD_Vchars, arg_0.COSTALD_omegas)\n elif arg_5 == RACKETT:\n return Rackett_mixture(arg_1, arg_3, arg_0.MWs, arg_0.Tcs, arg_0.Pcs, arg_0.Zcs)\n elif arg_5 == RACKETT_PARAMETERS:\n return Rackett_mixture(arg_1, arg_3, arg_0.MWs, arg_0.Tcs, arg_0.Pcs, arg_0.Z_RAs)\n elif arg_5 == LALIBERTE:\n arg_4 = list(arg_4) ; arg_4.pop(arg_0.index_w)\n arg_7 = Laliberte_density(arg_1, arg_4, arg_0.wCASs)\n arg_8 = mixing_simple(arg_3, arg_0.MWs)\n return rho_to_Vm(arg_7, arg_8)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5329", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate pressure-dependent gas molar volume at\n temperature `T` and pressure `P` with a given method.\n\n This method has no exception handling; see `TP_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate molar volume, [K]\n P : float\n Pressure at which to calculate molar volume, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n Vm : float\n Molar volume of the gas at T and P, [m^3/mol]\n '''\n if arg_3 == EOS:\n arg_0.eos[0] = arg_0.eos[0].to_TP(arg_1=arg_1, arg_2=arg_2)\n arg_5 = arg_0.eos[0].V_g\n elif arg_3 == TSONOPOULOS_EXTENDED:\n arg_6 = BVirial_Tsonopoulos_extended(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega, dipole=arg_0.dipole)\n arg_5 = ideal_gas(arg_1, arg_2) + arg_6\n elif arg_3 == TSONOPOULOS:\n arg_6 = BVirial_Tsonopoulos(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n arg_5 = ideal_gas(arg_1, arg_2) + arg_6\n elif arg_3 == ABBOTT:\n arg_6 = BVirial_Abbott(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n arg_5 = ideal_gas(arg_1, arg_2) + arg_6\n elif arg_3 == PITZER_CURL:\n arg_6 = BVirial_Pitzer_Curl(arg_1, arg_0.Tc, arg_0.Pc, arg_0.omega)\n arg_5 = ideal_gas(arg_1, arg_2) + arg_6\n elif arg_3 == CRC_VIRIAL:\n arg_7, arg_8, arg_9, arg_10, arg_11 = arg_0.CRC_VIRIAL_coeffs\n arg_12 = 298.15/arg_1 - 1.\n arg_6 = (arg_7 + arg_8*arg_12 + arg_9*arg_12**2 + arg_10*arg_12**3 + arg_11*arg_12**4)/1E6\n arg_5 = ideal_gas(arg_1, arg_2) + arg_6\n elif arg_3 == IDEAL:\n arg_5 = ideal_gas(arg_1, arg_2)\n elif arg_3 == COOLPROP:\n arg_5 = 1./PropsSI('DMOLAR', 'T', arg_1, 'P', arg_2, arg_0.CASRN)\n elif arg_3 in arg_0.tabular_data:\n arg_5 = arg_0.interpolate_P(arg_1, arg_2, arg_3)\n return arg_5"} +{"_id": "doc_5330", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func the molar volume of a solid at tempearture `T`\n with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func molar volume, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n Vms : float\n Molar volume of the solid at T, [m^3/mol]\n '''\n if arg_2 == CRC_INORG_S:\n arg_3 = arg_0.CRC_INORG_S_Vm\n# elif method == GOODMAN:\n# Vms = Goodman(T, self.Tt, self.rhol_Tt)\n elif arg_2 in arg_0.tabular_data:\n arg_3 = arg_0.interpolate(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_5331", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=None):\n r'''Looks up the legal status of a chemical according to either a specifc\n method or with all methods.\n\n Returns either the status as a string for a specified method, or the\n status of the chemical in all available data sources, in the format\n {source: status}.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n status : str or dict\n Legal status information [-]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain legal status with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the legal status for the desired chemical, and will return methods\n instead of the status\n CASi : int, optional\n CASRN as an integer, used internally [-]\n\n Notes\n -----\n\n Supported methods are:\n\n * **DSL**: Canada Domestic Substance List, [1]_. As extracted on Feb 11, 2015\n from a html list. This list is updated continuously, so this version\n will always be somewhat old. Strictly speaking, there are multiple\n lists but they are all bundled together here. A chemical may be\n 'Listed', or be on the 'Non-Domestic Substances List (NDSL)',\n or be on the list of substances with 'Significant New Activity (SNAc)',\n or be on the DSL but with a 'Ministerial Condition pertaining to this\n substance', or have been removed from the DSL, or have had a\n Ministerial prohibition for the substance.\n * **TSCA**: USA EPA Toxic Substances Control Act Chemical Inventory, [2]_.\n This list is as extracted on 2016-01. It is believed this list is\n updated on a periodic basis (> 6 month). A chemical may simply be\n 'Listed', or may have certain flags attached to it. All these flags\n are described in the dict TSCA_flags.\n * **EINECS**: European INventory of Existing Commercial chemical\n Substances, [3]_. As extracted from a spreadsheet dynamically\n generated at [1]_. This list was obtained March 2015; a more recent\n revision already exists.\n * **NLP**: No Longer Polymers, a list of chemicals with special\n regulatory exemptions in EINECS. Also described at [3]_.\n * **SPIN**: Substances Prepared in Nordic Countries. Also a boolean\n data type. Retrieved 2015-03 from [4]_.\n\n Other methods which could be added are:\n\n * Australia: AICS Australian Inventory of Chemical Substances\n * China: Inventory of Existing Chemical Substances Produced or Imported\n in China (IECSC)\n * Europe: REACH List of Registered Substances\n * India: List of Hazardous Chemicals\n * Japan: ENCS: Inventory of existing and new chemical substances\n * Korea: Existing Chemicals Inventory (KECI)\n * Mexico: INSQ National Inventory of Chemical Substances in Mexico\n * New Zealand: Inventory of Chemicals (NZIoC)\n * Philippines: PICCS Philippines Inventory of Chemicals and Chemical\n Substances\n\n Examples\n --------\n >>> pprint(Func('64-17-5'))\n {'DSL': 'LISTED',\n 'EINECS': 'LISTED',\n 'NLP': 'UNLISTED',\n 'SPIN': 'LISTED',\n 'TSCA': 'LISTED'}\n\n References\n ----------\n .. [1] Government of Canada.. \"Substances Lists\" Feb 11, 2015.\n https://www.ec.gc.ca/subsnouvelles-newsubs/default.asp?n=47F768FE-1.\n .. [2] US EPA. \"TSCA Chemical Substance Inventory.\" Accessed April 2016.\n https://www.epa.gov/tsca-inventory.\n .. [3] ECHA. \"EC Inventory\". Accessed March 2015.\n http://echa.europa.eu/information-on-chemicals/ec-inventory.\n .. [4] SPIN. \"SPIN Substances in Products In Nordic Countries.\" Accessed\n March 2015. http://195.215.202.233/DotNetNuke/default.aspx.\n '''\n load_law_data()\n if not arg_3:\n arg_3 = CAS2int(arg_0)\n arg_4 = [COMBINED, DSL, TSCA, EINECS, NLP, SPIN]\n if arg_2:\n return arg_4\n if not arg_1:\n arg_1 = arg_4[0]\n if arg_1 == DSL:\n if arg_3 in DSL_data.index:\n arg_5 = CAN_DSL_flags[DSL_data.at[arg_3, 'Registry']]\n else:\n arg_5 = UNLISTED\n elif arg_1 == TSCA:\n if arg_3 in TSCA_data.index:\n arg_6 = TSCA_data.loc[arg_3].to_dict()\n if any(arg_6.values()):\n arg_5 = sorted([TSCA_flags[i] for i in arg_6.keys() if arg_6[i]])\n else:\n arg_5 = LISTED\n else:\n arg_5 = UNLISTED\n elif arg_1 == EINECS:\n if arg_3 in EINECS_data.index:\n arg_5 = LISTED\n else:\n arg_5 = UNLISTED\n elif arg_1 == NLP:\n if arg_3 in NLP_data.index:\n arg_5 = LISTED\n else:\n arg_5 = UNLISTED\n elif arg_1 == SPIN:\n if arg_3 in SPIN_data.index:\n arg_5 = LISTED\n else:\n arg_5 = UNLISTED\n elif arg_1 == COMBINED:\n arg_5 = {}\n for arg_7 in arg_4[1:]:\n arg_5[arg_7] = Func(arg_0, arg_1=arg_7, arg_3=arg_3)\n else:\n raise Exception('Failure in in function')\n return arg_5"} +{"_id": "doc_5332", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False): # pragma: no cover\n '''Look up the economic status of a chemical.\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n\n >>> pprint(Func(CASRN='98-00-0'))\n [\"US public: {'Manufactured': 0.0, 'Imported': 10272.711, 'Exported': 184.127}\",\n u'10,000 - 100,000 tonnes per annum',\n 'OECD HPV Chemicals']\n\n >>> Func(CASRN='13775-50-3') # SODIUM SESQUISULPHATE\n []\n >>> Func(CASRN='98-00-0', Method='OECD high production volume chemicals')\n 'OECD HPV Chemicals'\n >>> Func(CASRN='98-01-1', Method='European Chemicals Agency Total Tonnage Bands')\n [u'10,000 - 100,000 tonnes per annum']\n '''\n load_economic_data()\n arg_3 = CAS2int(arg_0)\n\n def list_methods():\n arg_4 = []\n arg_4.append('Combined')\n if arg_0 in _EPACDRDict:\n arg_4.append(EPACDR)\n if arg_0 in _ECHATonnageDict:\n arg_4.append(ECHA)\n if arg_3 in HPV_data.index:\n arg_4.append(OECD)\n arg_4.append(NONE)\n return arg_4\n if arg_2:\n return list_methods()\n if not arg_1:\n arg_1 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_1 == EPACDR:\n arg_5 = 'US public: ' + str(_EPACDRDict[arg_0])\n elif arg_1 == ECHA:\n arg_5 = _ECHATonnageDict[arg_0]\n elif arg_1 == OECD:\n arg_5 = 'OECD HPV Chemicals'\n elif arg_1 == 'Combined':\n arg_5 = []\n if arg_0 in _EPACDRDict:\n arg_5 += ['US public: ' + str(_EPACDRDict[arg_0])]\n if arg_0 in _ECHATonnageDict:\n arg_5 += _ECHATonnageDict[arg_0]\n if arg_3 in HPV_data.index:\n arg_5 += ['OECD HPV Chemicals']\n elif arg_1 == NONE:\n arg_5 = None\n else:\n raise Exception('Failure in in function')\n return arg_5"} +{"_id": "doc_5333", "title": "", "text": "def Func(arg_0):\n '''Method to compute all available properties with the Joback method;\n returns their results as a dict. For the tempearture dependent values\n Cpig and mul, both the coefficients and objects to perform calculations\n are returned.\n '''\n # Pre-generate the coefficients or they will not be returned\n arg_0.mul(300)\n arg_0.Cpig(300) \n arg_1 = {'Tb': arg_0.Tb(arg_0.counts),\n 'Tm': arg_0.Tm(arg_0.counts),\n 'Tc': arg_0.Tc(arg_0.counts, arg_0.Tb_Funcd),\n 'Pc': arg_0.Pc(arg_0.counts, arg_0.atom_count),\n 'Vc': arg_0.Vc(arg_0.counts),\n 'Hf': arg_0.Hf(arg_0.counts),\n 'Gf': arg_0.Gf(arg_0.counts),\n 'Hfus': arg_0.Hfus(arg_0.counts),\n 'Hvap': arg_0.Hvap(arg_0.counts),\n 'mul': arg_0.mul,\n 'mul_coeffs': arg_0.calculated_mul_coeffs,\n 'Cpig': arg_0.Cpig,\n 'Cpig_coeffs': arg_0.calculated_Cpig_coeffs}\n return arg_1"} +{"_id": "doc_5334", "title": "", "text": "def Func(arg_0=None, arg_1=False, arg_2=None, arg_3=True):\n r'''This function handles the retrieval of a chemical's Func.\n Lookup is based on CASRNs. Will automatically select a data source to use\n if no Method is provided; returns None if the data is not available.\n\n Function has data for approximately 100 chemicals.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n kappa : float\n Electrical Func of the fluid, [S/m]\n T : float, only returned if full_info == True\n Temperature at which Func measurement was made\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain RI with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead\n of Func\n full_info : bool, optional\n If True, function will return the temperature at which the Func\n reading was made\n\n Notes\n -----\n Only one source is available in this function. It is:\n\n * 'LANGE_COND' which is from Lange's Handbook, Table 8.34 Electrical \n Conductivity of Various Pure Liquids', a compillation of data in [1]_.\n\n Examples\n --------\n >>> Func('7732-18-5')\n (4e-06, 291.15)\n\n References\n ----------\n .. [1] Speight, James. Lange's Handbook of Chemistry. 16 edition.\n McGraw-Hill Professional, 2005.\n '''\n def list_methods():\n arg_4 = []\n if arg_0 in Lange_cond_pure.index:\n arg_4.append(LANGE_COND)\n arg_4.append(NONE)\n return arg_4\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n if arg_2 == LANGE_COND:\n arg_5 = float(Lange_cond_pure.at[arg_0, 'Conductivity'])\n if arg_3:\n arg_6 = float(Lange_cond_pure.at[arg_0, 'T'])\n\n elif arg_2 == NONE:\n arg_5, arg_6 = None, None\n else:\n raise Exception('Failure in in function')\n\n if arg_3:\n return arg_5, arg_6\n else:\n return arg_5"} +{"_id": "doc_5335", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, \n arg_4, arg_5, arg_6):\n '''Helper method for balance_ions for the proportional family of methods. \n See balance_ions for a description of the methods; parameters are fairly\n obvious.\n '''\n arg_7 = arg_2[0:arg_3]\n arg_8 = arg_2[arg_3:arg_4+arg_3]\n arg_9 = sum([zi*ci for zi, ci in zip(arg_7, arg_0)])\n arg_10 = sum([zi*ci for zi, ci in zip(arg_8, arg_1)])\n if arg_6 == 'proportional insufficient ions increase':\n if arg_5 < 0:\n arg_11 = -arg_9/arg_10\n arg_8 = [i*arg_11 for i in arg_8]\n else:\n arg_11 = -arg_10/arg_9\n arg_7 = [i*arg_11 for i in arg_7]\n elif arg_6 == 'proportional excess ions decrease':\n if arg_5 < 0:\n arg_11 = -arg_10/arg_9\n arg_7 = [i*arg_11 for i in arg_7]\n else:\n arg_11 = -arg_9/arg_10\n arg_8 = [i*arg_11 for i in arg_8]\n elif arg_6 == 'proportional cation adjustment':\n arg_11 = -arg_9/arg_10\n arg_8 = [i*arg_11 for i in arg_8]\n elif arg_6 == 'proportional anion adjustment':\n arg_11 = -arg_10/arg_9\n arg_7 = [i*arg_11 for i in arg_7]\n else:\n raise Exception('Allowable methods are %s' %charge_balance_methods)\n arg_12 = 1. - sum(arg_7) - sum(arg_8)\n return arg_7, arg_8, arg_12"} +{"_id": "doc_5336", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func permittivity of a liquid at temperature `T`\n with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func relative permittivity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n epsilon : float\n Relative permittivity of the liquid at T, [-]\n '''\n if arg_2 == CRC:\n arg_3, arg_4, arg_5, arg_6 = arg_0.CRC_coeffs\n arg_7 = arg_3 + arg_4*arg_1 + arg_5*arg_1**2 + arg_6*arg_1**3\n elif arg_2 == CRC_CONSTANT:\n arg_7 = arg_0.CRC_permittivity\n elif arg_2 in arg_0.tabular_data:\n arg_7 = arg_0.interpolate(arg_1, arg_2)\n return arg_7"} +{"_id": "doc_5337", "title": "", "text": "def Func():\n '''Data is stored in the format\n InChI key\\tbool bool bool \\tsubgroup count ...\\tsubgroup count \\tsubgroup count...\n where the bools refer to whether or not the original UNIFAC, modified\n UNIFAC, and PSRK group assignments were completed correctly.\n The subgroups and their count have an indefinite length.\n '''\n # Do not allow running multiple times\n if DDBST_UNIFAC_assignments:\n return None\n with open(os.path.join(folder, 'DDBST UNIFAC assignments.tsv')) as f:\n arg_0 = [DDBST_UNIFAC_assignments, DDBST_MODIFIED_UNIFAC_assignments, DDBST_PSRK_assignments]\n for arg_1 in f.readlines():\n arg_2, arg_3, arg_4, arg_5, arg_6 = arg_1.split('\\t')\n # list of whether or not each method was correctly identified or not\n arg_3 = [True if arg_11 == '1' else False for arg_11 in arg_3.split(' ')]\n for arg_7, arg_8, arg_9 in zip([arg_4, arg_5, arg_6], arg_0, arg_3):\n if arg_9:\n arg_7 = arg_7.rstrip().split(' ')\n arg_10 = {}\n for arg_11 in range(arg_12(len(arg_7)/2)):\n arg_10[arg_12(arg_7[arg_11*2])] = arg_12(arg_7[arg_11*2+1])\n arg_8[arg_2] = arg_10"} +{"_id": "doc_5338", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n r'''This function handles the retrieval of a chemical's dipole moment.\n Lookup is based on CASRNs. Will automatically select a data source to use\n if no Method is provided; returns None if the data is not available.\n\n Prefered source is 'CCCBDB'. Considerable variation in reported data has\n found.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n dipole : float\n Dipole moment, [debye]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain dipole moment with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are 'CCCBDB', 'MULLER', or\n 'POLING'. All valid values are also held in the list `dipole_methods`.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the dipole moment for the desired chemical, and will return methods\n instead of the dipole moment\n\n Notes\n -----\n A total of three sources are available for this function. They are:\n\n * 'CCCBDB', a series of critically evaluated data for compounds in\n [1]_, intended for use in predictive modeling.\n * 'MULLER', a collection of data in a\n group-contribution scheme in [2]_.\n * 'POLING', in the appendix in [3].\n \n This function returns dipole moment in units of Debye. This is actually\n a non-SI unit; to convert to SI, multiply by 3.33564095198e-30 and its\n units will be in ampere*second^2 or equivalently and more commonly given,\n coulomb*second. The constant is the result of 1E-21/c, where c is the\n speed of light.\n \n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 1.44\n\n References\n ----------\n .. [1] NIST Computational Chemistry Comparison and Benchmark Database\n NIST Standard Reference Database Number 101 Release 17b, September 2015,\n Editor: Russell D. Johnson III http://cccbdb.nist.gov/\n .. [2] Muller, Karsten, Liudmila Mokrushina, and Wolfgang Arlt. \"Second-\n Order Group Contribution Method for the Determination of the Dipole\n Moment.\" Journal of Chemical & Engineering Data 57, no. 4 (April 12,\n 2012): 1231-36. doi:10.1021/je2013395.\n .. [3] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n New York: McGraw-Hill Professional, 2000.\n '''\n def list_methods():\n arg_3 = []\n if arg_0 in _dipole_CCDB.index and not np.isnan(_dipole_CCDB.at[arg_0, 'Dipole']):\n arg_3.append(CCCBDB)\n if arg_0 in _dipole_Muller.index and not np.isnan(_dipole_Muller.at[arg_0, 'Dipole']):\n arg_3.append(MULLER)\n if arg_0 in _dipole_Poling.index and not np.isnan(_dipole_Poling.at[arg_0, 'Dipole']):\n arg_3.append(POLING)\n arg_3.append(NONE)\n return arg_3\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == CCCBDB:\n arg_4 = float(_dipole_CCDB.at[arg_0, 'Dipole'])\n elif arg_2 == MULLER:\n arg_4 = float(_dipole_Muller.at[arg_0, 'Dipole'])\n elif arg_2 == POLING:\n arg_4 = float(_dipole_Poling.at[arg_0, 'Dipole'])\n elif arg_2 == NONE:\n arg_4 = None\n else:\n raise Exception('Failure in in function')\n return arg_4"} +{"_id": "doc_5339", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=[arg_4]):\n r'''This function handles the retrieval of a chemical's critical\n pressure. Lookup is based on CASRNs. Will automatically select a data\n source to use if no Method is provided; returns None if the data is not\n available.\n\n Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 6137000.0\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Critical pressure, [Pa]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n 'CRC', 'PSRK', 'PD', 'YAWS', and 'SURF'. All valid values are also held \n in the list `Func_methods`.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods,\n useful for for performance reasons and ignoring inaccurate methods\n\n Notes\n -----\n A total of seven sources are available for this function. They are:\n\n * 'IUPAC', a series of critically evaluated\n experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n * 'MATTHEWS', a series of critically\n evaluated data for inorganic compounds in [13]_.\n * 'CRC', a compillation of critically\n evaluated data by the TRC as published in [14]_.\n * 'PSRK', a compillation of experimental and\n estimated data published in [15]_.\n * 'PD', an older compillation of\n data published in [16]_\n * 'YAWS', a large compillation of data from a\n variety of sources; no data points are sourced in the work of [17]_.\n * SURF', an estimation method using a\n simple quadratic method for estimating Func from Tc and Vc. This is\n ignored and not returned as a method by default.\n\n References\n ----------\n .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n Properties of Elements and Compounds. 1. An Introductory Survey.\"\n Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n 154-154. doi:10.1021/je950378q.\n .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n doi:10.1021/je00019a001.\n .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 3. Aromatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n doi:10.1021/je00021a001.\n .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n doi:10.1021/je9501548.\n .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n (January 1, 1996): 645-56. doi:10.1021/je9501999.\n .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n doi:10.1021/je000210r.\n .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n 305-14. doi:10.1021/je050221q.\n .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n 52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n 2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n doi:10.1021/cr60275a004.\n .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n Comprehensive Revision and Extension IV, Including Critical Constants\n and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n 227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n .. [16] Passut, Charles A., and Ronald P. Danner. \"Acentric Factor. A\n Valuable Correlating Parameter for the Properties of Hydrocarbons.\"\n Industrial & Engineering Chemistry Process Design and Development 12,\n no. 3 (July 1, 1973): 365\u201368. doi:10.1021/i260047a026.\n .. [17] Yaws, Carl L. Thermophysical Properties of Chemicals and\n Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n Publishing, 2014.\n '''\n def list_methods():\n arg_5 = []\n if arg_0 in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[arg_0, 'Func']):\n arg_5.append(IUPAC)\n if arg_0 in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[arg_0, 'Func']):\n arg_5.append(MATTHEWS)\n if arg_0 in _crit_CRC.index and not np.isnan(_crit_CRC.at[arg_0, 'Func']):\n arg_5.append(CRC)\n if arg_0 in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[arg_0, 'Func']):\n arg_5.append(PSRK)\n if arg_0 in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[arg_0, 'Func']):\n arg_5.append(PD)\n if arg_0 in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[arg_0, 'Func']):\n arg_5.append(YAWS)\n if arg_0:\n arg_5.append(arg_4)\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_5:\n arg_5.remove(arg_2)\n arg_5.append(NONE)\n return arg_5\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == IUPAC:\n arg_6 = float(_crit_IUPAC.at[arg_0, 'Func'])\n elif arg_2 == MATTHEWS:\n arg_6 = float(_crit_Matthews.at[arg_0, 'Func'])\n elif arg_2 == CRC:\n arg_6 = float(_crit_CRC.at[arg_0, 'Func'])\n elif arg_2 == PSRK:\n arg_6 = float(_crit_PSRKR4.at[arg_0, 'Func'])\n elif arg_2 == PD:\n arg_6 = float(_crit_PassutDanner.at[arg_0, 'Func'])\n elif arg_2 == YAWS:\n arg_6 = float(_crit_Yaws.at[arg_0, 'Func'])\n elif arg_2 == arg_4:\n arg_6 = third_property(arg_0=arg_0, P=True)\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')\n return arg_6"} +{"_id": "doc_5340", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=[arg_4]):\n r'''This function handles the retrieval of a chemical's critical\n volume. Lookup is based on CASRNs. Will automatically select a data\n source to use if no Method is provided; returns None if the data is not\n available.\n\n Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 0.000168\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Critical volume, [m^3/mol]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n 'CRC', 'PSRK', 'YAWS', and 'SURF'. All valid values are also held \n in the list `Func_methods`.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods,\n useful for for performance reasons and ignoring inaccurate methods\n\n Notes\n -----\n A total of six sources are available for this function. They are:\n\n * 'IUPAC', a series of critically evaluated\n experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n * 'MATTHEWS', a series of critically\n evaluated data for inorganic compounds in [13]_.\n * 'CRC', a compillation of critically\n evaluated data by the TRC as published in [14]_.\n * 'PSRK', a compillation of experimental and\n estimated data published in [15]_.\n * 'YAWS', a large compillation of data from a\n variety of sources; no data points are sourced in the work of [16]_.\n * 'SURF', an estimation method using a\n simple quadratic method for estimating Pc from Tc and Func. This is\n ignored and not returned as a method by default\n\n References\n ----------\n .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n Properties of Elements and Compounds. 1. An Introductory Survey.\"\n Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n 154-154. doi:10.1021/je950378q.\n .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n doi:10.1021/je00019a001.\n .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 3. Aromatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n doi:10.1021/je00021a001.\n .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n doi:10.1021/je9501548.\n .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n (January 1, 1996): 645-56. doi:10.1021/je9501999.\n .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n doi:10.1021/je000210r.\n .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n 305-14. doi:10.1021/je050221q.\n .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n 52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n 2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n doi:10.1021/cr60275a004.\n .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n Comprehensive Revision and Extension IV, Including Critical Constants\n and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n 227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n .. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and\n Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n Publishing, 2014.\n '''\n def list_methods():\n arg_5 = []\n if arg_0 in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[arg_0, 'Func']):\n arg_5.append(IUPAC)\n if arg_0 in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[arg_0, 'Func']):\n arg_5.append(MATTHEWS)\n if arg_0 in _crit_CRC.index and not np.isnan(_crit_CRC.at[arg_0, 'Func']):\n arg_5.append(CRC)\n if arg_0 in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[arg_0, 'Func']):\n arg_5.append(PSRK)\n if arg_0 in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[arg_0, 'Func']):\n arg_5.append(YAWS)\n if arg_0:\n arg_5.append(arg_4)\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_5:\n arg_5.remove(arg_2)\n arg_5.append(NONE)\n return arg_5\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == IUPAC:\n arg_6 = float(_crit_IUPAC.at[arg_0, 'Func'])\n elif arg_2 == PSRK:\n arg_6 = float(_crit_PSRKR4.at[arg_0, 'Func'])\n elif arg_2 == MATTHEWS:\n arg_6 = float(_crit_Matthews.at[arg_0, 'Func'])\n elif arg_2 == CRC:\n arg_6 = float(_crit_CRC.at[arg_0, 'Func'])\n elif arg_2 == YAWS:\n arg_6 = float(_crit_Yaws.at[arg_0, 'Func'])\n elif arg_2 == arg_4:\n arg_6 = third_property(arg_0=arg_0, V=True)\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')\n return arg_6"} +{"_id": "doc_5341", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=[arg_4]):\n r'''This function handles the retrieval of a chemical's critical\n compressibility. Lookup is based on CASRNs. Will automatically select a\n data source to use if no Method is provided; returns None if the data is\n not available.\n\n Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 0.24100000000000002\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Critical compressibility, [-]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Vc with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n 'CRC', 'PSRK', 'YAWS', and 'COMBINED'. All valid values are also held \n in `Func_methods`.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods,\n useful for for performance reasons and ignoring inaccurate methods\n\n Notes\n -----\n A total of five sources are available for this function. They are:\n\n * 'IUPAC', a series of critically evaluated\n experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n * 'MATTHEWS', a series of critically\n evaluated data for inorganic compounds in [13]_.\n * 'CRC', a compillation of critically\n evaluated data by the TRC as published in [14]_.\n * 'PSRK', a compillation of experimental and\n estimated data published in [15]_.\n * 'YAWS', a large compillation of data from a\n variety of sources; no data points are sourced in the work of [16]_.\n\n References\n ----------\n .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n Properties of Elements and Compounds. 1. An Introductory Survey.\"\n Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n 154-154. doi:10.1021/je950378q.\n .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n doi:10.1021/je00019a001.\n .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 3. Aromatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n doi:10.1021/je00021a001.\n .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n doi:10.1021/je9501548.\n .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n (January 1, 1996): 645-56. doi:10.1021/je9501999.\n .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n doi:10.1021/je000210r.\n .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n 305-14. doi:10.1021/je050221q.\n .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n 52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n 2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n doi:10.1021/cr60275a004.\n .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n Comprehensive Revision and Extension IV, Including Critical Constants\n and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n 227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n .. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and\n Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n Publishing, 2014.\n '''\n def list_methods():\n arg_5 = []\n if arg_0 in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[arg_0, 'Func']):\n arg_5.append(IUPAC)\n if arg_0 in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[arg_0, 'Func']):\n arg_5.append(MATTHEWS)\n if arg_0 in _crit_CRC.index and not np.isnan(_crit_CRC.at[arg_0, 'Func']):\n arg_5.append(CRC)\n if arg_0 in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[arg_0, 'Func']):\n arg_5.append(PSRK)\n if arg_0 in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[arg_0, 'Func']):\n arg_5.append(YAWS)\n if Tc(arg_0) and Vc(arg_0) and Pc(arg_0):\n arg_5.append(arg_4)\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_5:\n arg_5.remove(arg_2)\n arg_5.append(NONE)\n return arg_5\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_2 == IUPAC:\n arg_6 = float(_crit_IUPAC.at[arg_0, 'Func'])\n elif arg_2 == PSRK:\n arg_6 = float(_crit_PSRKR4.at[arg_0, 'Func'])\n elif arg_2 == MATTHEWS:\n arg_6 = float(_crit_Matthews.at[arg_0, 'Func'])\n elif arg_2 == CRC:\n arg_6 = float(_crit_CRC.at[arg_0, 'Func'])\n elif arg_2 == YAWS:\n arg_6 = float(_crit_Yaws.at[arg_0, 'Func'])\n elif arg_2 == arg_4:\n arg_6 = Vc(arg_0)*Pc(arg_0)/Tc(arg_0)/R\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')\n return arg_6"} +{"_id": "doc_5342", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=False,\n arg_4=None):\n r'''Function for calculating a critical property of a substance from its\n other two critical properties. Calls functions Ihmels, Meissner, and\n Grigoras, each of which use a general 'Critical surface' type of equation.\n Limited accuracy is expected due to very limited theoretical backing.\n\n Parameters\n ----------\n Tc : float\n Critical temperature of fluid (optional) [K]\n Pc : float\n Critical pressure of fluid (optional) [Pa]\n Vc : float\n Critical volume of fluid (optional) [m^3/mol]\n AvailableMethods : bool\n Request available methods for given parameters\n Method : string\n Request calculation uses the requested method\n\n Returns\n -------\n Tc, Pc or Vc : float\n Critical property of fluid [K], [Pa], or [m^3/mol]\n\n Notes\n -----\n\n Examples\n --------\n Decamethyltetrasiloxane [141-62-8]\n\n >>> Func(Tc=599.4, Pc=1.19E6, Method='IHMELS')\n 0.0010927333333333334\n '''\n def list_methods():\n arg_5 = []\n if (arg_0 and arg_1) or (arg_0 and arg_2) or (arg_1 and arg_2):\n arg_5.append(IHMELS)\n arg_5.append(MEISSNER)\n arg_5.append(GRIGORAS)\n arg_5.append(NONE)\n return arg_5\n if arg_3:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_4 == IHMELS:\n arg_6 = Ihmels(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2)\n elif arg_4 == MEISSNER:\n arg_6 = Meissner(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2)\n elif arg_4 == GRIGORAS:\n arg_6 = Grigoras(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2)\n elif arg_4 == NONE:\n arg_6 = None\n else:\n raise Exception('Failure in in function')\n return arg_6"} +{"_id": "doc_5343", "title": "", "text": "def Func(arg_0=None, arg_1=False, arg_2=False, arg_3=False):\n r'''Function for calculating a critical property of a substance from its\n other two critical properties, but retrieving the actual other critical\n values for convenient calculation.\n Calls functions Ihmels, Meissner, and\n Grigoras, each of which use a general 'Critical surface' type of equation.\n Limited accuracy is expected due to very limited theoretical backing.\n\n Parameters\n ----------\n CASRN : string\n The CAS number of the desired chemical\n T : bool\n Estimate critical temperature\n P : bool\n Estimate critical pressure\n V : bool\n Estimate critical volume\n\n Returns\n -------\n Tc, Pc or Vc : float\n Critical property of fluid [K], [Pa], or [m^3/mol]\n\n Notes\n -----\n Avoids recursion only by eliminating the None and critical surface options\n for calculating each critical property. So long as it never calls itself.\n Note that when used by Tc, Pc or Vc, this function results in said function\n calling the other functions (to determine methods) and (with method specified)\n\n Examples\n --------\n >>> # Decamethyltetrasiloxane [141-62-8]\n >>> Func('141-62-8', V=True)\n 0.0010920041152263375\n\n >>> # Succinic acid 110-15-6\n >>> Func('110-15-6', P=True)\n 6095016.233766234\n '''\n arg_4 = None\n if arg_3:\n arg_5 = Tc(arg_0, AvailableMethods=True)[0:-2]\n arg_6 = Pc(arg_0, AvailableMethods=True)[0:-2]\n if arg_5 and arg_6:\n arg_7 = Tc(arg_0=arg_0, Method=arg_5[0])\n arg_8 = Pc(arg_0=arg_0, Method=arg_6[0])\n arg_4 = critical_surface(Tc=arg_7, Pc=arg_8, Vc=None)\n elif arg_2:\n arg_5 = Tc(arg_0, AvailableMethods=True)[0:-2]\n arg_9 = Vc(arg_0, AvailableMethods=True)[0:-2]\n if arg_5 and arg_9:\n arg_7 = Tc(arg_0=arg_0, Method=arg_5[0])\n arg_10 = Vc(arg_0=arg_0, Method=arg_9[0])\n arg_4 = critical_surface(Tc=arg_7, Vc=arg_10, Pc=None)\n elif arg_1:\n arg_6 = Pc(arg_0, AvailableMethods=True)[0:-2]\n arg_9 = Vc(arg_0, AvailableMethods=True)[0:-2]\n if arg_6 and arg_9:\n arg_8 = Pc(arg_0=arg_0, Method=arg_6[0])\n arg_10 = Vc(arg_0=arg_0, Method=arg_9[0])\n arg_4 = critical_surface(Pc=arg_8, Vc=arg_10, Tc=None)\n else:\n raise Exception('Error in function')\n if not arg_4:\n return None\n return arg_4"} +{"_id": "doc_5344", "title": "", "text": "def Func(arg_0):\n '''Checks if a CAS number is valid. Returns False if the parser cannot \n parse the given string..\n\n Parameters\n ----------\n CASRN : string\n A three-piece, dash-separated set of numbers\n\n Returns\n -------\n result : bool\n Boolean value if CASRN was valid. If parsing fails, return False also.\n\n Notes\n -----\n Check method is according to Chemical Abstract Society. However, no lookup\n to their service is performed; therefore, this function cannot detect\n false positives.\n\n Function also does not support additional separators, apart from '-'.\n \n CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.\n \n A long can hold CAS numbers up to 2 147 483-64-7\n\n Examples\n --------\n >>> Func('7732-18-5')\n True\n >>> Func('77332-18-5')\n False\n '''\n try:\n arg_1 = arg_0[-1]\n arg_0 = arg_0[::-1][1:]\n arg_2 = 0\n arg_3 = 1\n for arg_4 in arg_0:\n if arg_4 == '-':\n pass\n else:\n arg_2 += arg_3*int(arg_4)\n arg_3 += 1\n return (arg_2 % 10 == int(arg_1))\n except:\n return False"} +{"_id": "doc_5345", "title": "", "text": "def Func(arg_0):\n '''Charge of the species as an integer. Computed as a property as most\n species do not have a Func and so storing it would be a waste of \n memory.\n '''\n try:\n return arg_0._Func\n except AttributeError:\n arg_0._Func = Func_from_formula(arg_0.formula)\n return arg_0._Func"} +{"_id": "doc_5346", "title": "", "text": "def Func(arg_0, arg_1):\n '''Loads a file with newline-separated integers representing which \n chemical should be kept in memory; ones not included are ignored.\n '''\n arg_0.restrict_identifiers = True\n arg_3 = set() \n with open(arg_1) as f:\n [arg_3.add(int(arg_4)) for arg_4 in f]\n arg_0.included_identifiers = arg_3"} +{"_id": "doc_5347", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n arg_5='', arg_6=False, arg_7=None):\n r'''This function handles the retrieval or calculation a chemical's\n Func parameter. Values are available from one source with lookup\n based on CASRNs, or can be estimated from 7 CSP methods.\n Will automatically select a data source to use if no Method is provided;\n returns None if the data is not available.\n\n Prefered sources are 'Magalh\u00e3es, Lito, Da Silva, and Silva (2013)' for\n common chemicals which had valies listed in that source, and the CSP method\n `Tee, Gotoh, and Stewart CSP with Tc, omega (1966)` for chemicals which\n don't.\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 1291.41\n\n Parameters\n ----------\n Tm : float, optional\n Melting temperature of fluid [K]\n Tb : float, optional\n Boiling temperature of fluid [K]\n Tc : float, optional\n Critical temperature, [K]\n Zc : float, optional\n Critical compressibility, [-]\n omega : float, optional\n Acentric factor of compound, [-]\n CASRN : string, optional\n CASRN [-]\n\n Returns\n -------\n epsilon_k : float\n Lennard-Jones depth of potential-energy minimum over k, [K]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain epsilon with the given\n inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n epsilon for the desired chemical, and will return methods instead of\n epsilon\n\n Notes\n -----\n These values are somewhat rough, as they attempt to pigeonhole a chemical\n into L-J behavior.\n\n The tabulated data is from [2]_, for 322 chemicals.\n\n References\n ----------\n .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n Transport Phenomena, Revised 2nd Edition. New York:\n John Wiley & Sons, Inc., 2006\n .. [2] Magalh\u00e3es, Ana L., Patr\u00edcia F. Lito, Francisco A. Da Silva, and\n Carlos M. Silva. \"Simple and Accurate Correlations for Diffusion\n Coefficients of Solutes in Liquids and Supercritical Fluids over Wide\n Ranges of Temperature and Density.\" The Journal of Supercritical Fluids\n 76 (April 2013): 94-114. doi:10.1016/j.supflu.2013.02.002.\n '''\n def list_methods():\n arg_8 = []\n if arg_5 in MagalhaesLJ_data.index:\n arg_8.append(MAGALHAES)\n if arg_2 and arg_4:\n arg_8.append(TEEGOTOSTEWARD2)\n if arg_2:\n arg_8.append(FLYNN)\n arg_8.append(BSLC)\n arg_8.append(TEEGOTOSTEWARD1)\n if arg_1:\n arg_8.append(BSLB)\n if arg_0:\n arg_8.append(BSLM)\n if arg_2 and arg_3:\n arg_8.append(STIELTHODOS)\n arg_8.append(NONE)\n return arg_8\n if arg_6:\n return list_methods()\n if not arg_7:\n arg_7 = list_methods()[0]\n\n if arg_7 == FLYNN:\n arg_9 = epsilon_Flynn(arg_2)\n elif arg_7 == BSLC:\n arg_9 = epsilon_Bird_Stewart_Lightfoot_critical(arg_2)\n elif arg_7 == BSLB:\n arg_9 = epsilon_Bird_Stewart_Lightfoot_boiling(arg_1)\n elif arg_7 == BSLM:\n arg_9 = epsilon_Bird_Stewart_Lightfoot_melting(arg_0)\n elif arg_7 == STIELTHODOS:\n arg_9 = epsilon_Stiel_Thodos(arg_2, arg_3)\n elif arg_7 == TEEGOTOSTEWARD1:\n arg_9 = epsilon_Tee_Gotoh_Steward_1(arg_2)\n elif arg_7 == TEEGOTOSTEWARD2:\n arg_9 = epsilon_Tee_Gotoh_Steward_2(arg_2, arg_4)\n\n elif arg_7 == MAGALHAES:\n arg_9 = float(MagalhaesLJ_data.at[arg_5, \"epsilon\"])\n elif arg_7 == NONE:\n arg_9 = None\n else:\n raise Exception('Failure in in function')\n return arg_9"} +{"_id": "doc_5348", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7='', arg_8=False, arg_9=None):\n r'''This function handles the retrieval or calculation a chemical's\n L-J molecular diameter. Values are available from one source with lookup\n based on CASRNs, or can be estimated from 9 CSP methods.\n Will automatically select a data source to use if no Method is provided;\n returns None if the data is not available.\n\n Prefered sources are 'Magalh\u00e3es, Lito, Da Silva, and Silva (2013)' for\n common chemicals which had valies listed in that source, and the CSP method\n `Tee, Gotoh, and Stewart CSP with Tc, Pc, omega (1966)` for chemicals which\n don't.\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 4.23738\n\n Parameters\n ----------\n Tc : float, optional\n Critical temperature, [K]\n Pc : float, optional\n Critical pressure, [Pa]\n Vc : float, optional\n Critical volume, [m^3/mol]\n Zc : float, optional\n Critical compressibility, [-]\n omega : float, optional\n Acentric factor of compound, [-]\n Vm : float, optional\n Molar volume of liquid at the melting point of the fluid [K]\n Vb : float, optional\n Molar volume of liquid at the boiling point of the fluid [K]\n CASRN : string, optional\n CASRN [-]\n\n Returns\n -------\n sigma : float\n Lennard-Jones molecular diameter, [Angstrom]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain epsilon with the given\n inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n sigma for the desired chemical, and will return methods instead of\n sigma\n\n Notes\n -----\n These values are somewhat rough, as they attempt to pigeonhole a chemical\n into L-J behavior.\n\n The tabulated data is from [2]_, for 322 chemicals.\n\n References\n ----------\n .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n Transport Phenomena, Revised 2nd Edition. New York:\n John Wiley & Sons, Inc., 2006\n .. [2] Magalh\u00e3es, Ana L., Patr\u00edcia F. Lito, Francisco A. Da Silva, and\n Carlos M. Silva. \"Simple and Accurate Correlations for Diffusion\n Coefficients of Solutes in Liquids and Supercritical Fluids over Wide\n Ranges of Temperature and Density.\" The Journal of Supercritical Fluids\n 76 (April 2013): 94-114. doi:10.1016/j.supflu.2013.02.002.\n '''\n def list_methods():\n arg_10 = []\n if arg_7 in MagalhaesLJ_data.index:\n arg_10.append(MAGALHAES)\n if arg_0 and arg_1 and arg_4:\n arg_10.append(TEEGOTOSTEWARD4)\n if arg_0 and arg_1:\n arg_10.append(SILVALIUMACEDO)\n arg_10.append(BSLC2)\n arg_10.append(TEEGOTOSTEWARD3)\n if arg_2 and arg_3:\n arg_10.append(STIELTHODOSMD)\n if arg_2:\n arg_10.append(FLYNN)\n arg_10.append(BSLC1)\n if arg_6:\n arg_10.append(BSLB)\n if arg_5:\n arg_10.append(BSLM)\n arg_10.append(NONE)\n return arg_10\n\n if arg_8:\n return list_methods()\n if not arg_9:\n arg_9 = list_methods()[0]\n if arg_9 == FLYNN:\n arg_11 = sigma_Flynn(arg_2)\n elif arg_9 == BSLC1:\n arg_11 = sigma_Bird_Stewart_Lightfoot_critical_1(arg_2)\n elif arg_9 == BSLC2:\n arg_11 = sigma_Bird_Stewart_Lightfoot_critical_2(arg_0, arg_1)\n elif arg_9 == TEEGOTOSTEWARD3:\n arg_11 = sigma_Tee_Gotoh_Steward_1(arg_0, arg_1)\n elif arg_9 == SILVALIUMACEDO:\n arg_11 = sigma_Silva_Liu_Macedo(arg_0, arg_1)\n elif arg_9 == BSLB:\n arg_11 = sigma_Bird_Stewart_Lightfoot_boiling(arg_6)\n elif arg_9 == BSLM:\n arg_11 = sigma_Bird_Stewart_Lightfoot_melting(arg_5)\n elif arg_9 == STIELTHODOSMD:\n arg_11 = sigma_Stiel_Thodos(arg_2, arg_3)\n elif arg_9 == TEEGOTOSTEWARD4:\n arg_11 = sigma_Tee_Gotoh_Steward_2(arg_0, arg_1, arg_4)\n elif arg_9 == MAGALHAES:\n arg_11 = float(MagalhaesLJ_data.at[arg_7, \"sigma\"])\n elif arg_9 == NONE:\n arg_11 = None\n else:\n raise Exception('Failure in in function')\n return arg_11"} +{"_id": "doc_5349", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=['LK', 'DEFINITION']):\n r'''This function handles the retrieval of a chemical's acentric factor,\n `Func`, or its calculation from correlations or directly through the\n definition of acentric factor if possible. Requires a known boiling point,\n critical temperature and pressure for use of the correlations. Requires\n accurate vapor pressure data for direct calculation.\n\n Will automatically select a method to use if no Method is provided;\n returns None if the data is not available and cannot be calculated.\n\n .. math::\n \\Func \\equiv -\\log_{10}\\left[\\lim_{T/T_c=0.7}(P^{sat}/P_c)\\right]-1.0\n\n Examples\n --------\n >>> Func(CASRN='64-17-5')\n 0.635\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Acentric factor of compound\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Accepted methods are 'PSRK', 'PD', 'YAWS', \n 'LK', and 'DEFINITION'. All valid values are also held in the list\n Func_methods.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods instead of\n Func\n IgnoreMethods : list, optional\n A list of methods to ignore in obtaining the full list of methods,\n useful for for performance reasons and ignoring inaccurate methods\n\n Notes\n -----\n A total of five sources are available for this function. They are:\n\n * 'PSRK', a compillation of experimental and estimated data published \n in the Appendix of [15]_, the fourth revision of the PSRK model.\n * 'PD', an older compillation of\n data published in (Passut & Danner, 1973) [16]_.\n * 'YAWS', a large compillation of data from a\n variety of sources; no data points are sourced in the work of [17]_.\n * 'LK', a estimation method for hydrocarbons.\n * 'DEFINITION', based on the definition of Func as\n presented in [1]_, using vapor pressure data.\n\n References\n ----------\n .. [1] Pitzer, K. S., D. Z. Lippmann, R. F. Curl, C. M. Huggins, and\n D. E. Petersen: The Volumetric and Thermodynamic Properties of Fluids.\n II. Compressibility Factor, Vapor Pressure and Entropy of Vaporization.\n J. Am. Chem. Soc., 77: 3433 (1955).\n .. [2] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n Comprehensive Revision and Extension IV, Including Critical Constants\n and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n 227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n .. [3] Passut, Charles A., and Ronald P. Danner. \"Acentric Factor. A\n Valuable Correlating Parameter for the Properties of Hydrocarbons.\"\n Industrial & Engineering Chemistry Process Design and Development 12,\n no. 3 (July 1, 1973): 365-68. doi:10.1021/i260047a026.\n .. [4] Yaws, Carl L. Thermophysical Properties of Chemicals and\n Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n Publishing, 2014.\n '''\n def list_methods():\n arg_4 = []\n if arg_0 in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[arg_0, 'Func']):\n arg_4.append('PSRK')\n if arg_0 in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[arg_0, 'Func']):\n arg_4.append('PD')\n if arg_0 in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[arg_0, 'Func']):\n arg_4.append('YAWS')\n arg_5, arg_6 = Tc(arg_0), Pc(arg_0)\n if arg_5 and arg_6:\n if Tb(arg_0):\n arg_4.append('LK')\n if VaporPressure(arg_0=arg_0).T_dependent_property(arg_5*0.7):\n arg_4.append('DEFINITION') # TODO: better integration\n if arg_3:\n for arg_2 in arg_3:\n if arg_2 in arg_4:\n arg_4.remove(arg_2)\n arg_4.append('NONE')\n return arg_4\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n # This is the calculate, given the method section\n if arg_2 == 'PSRK':\n arg_7 = float(_crit_PSRKR4.at[arg_0, 'Func'])\n elif arg_2 == 'PD':\n arg_7 = float(_crit_PassutDanner.at[arg_0, 'Func'])\n elif arg_2 == 'YAWS':\n arg_7 = float(_crit_Yaws.at[arg_0, 'Func'])\n elif arg_2 == 'LK':\n arg_7 = LK_Func(Tb(arg_0), Tc(arg_0), Pc(arg_0))\n elif arg_2 == 'DEFINITION':\n arg_8 = VaporPressure(arg_0=arg_0).T_dependent_property(Tc(arg_0)*0.7)\n arg_7 = -log10(arg_8/Pc(arg_0)) - 1.0\n elif arg_2 == 'NONE':\n arg_7 = None\n else:\n raise Exception('Failure in in function')\n return arg_7"} +{"_id": "doc_5350", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3='', arg_4=None,\n arg_5=False):\n r'''This function handles the calculation of a chemical's Stiel Polar\n factor, directly through the definition of Stiel-polar factor if possible.\n Requires Tc, Pc, acentric factor, and a vapor pressure datum at Tr=0.6.\n\n Will automatically select a method to use if no Method is provided;\n returns None if the data is not available and cannot be calculated.\n\n .. math::\n x = \\log P_r|_{T_r=0.6} + 1.70 \\omega + 1.552\n\n Parameters\n ----------\n Tc : float\n Critical temperature of fluid [K]\n Pc : float\n Critical pressure of fluid [Pa]\n omega : float\n Acentric factor of the fluid [-]\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n factor : float\n Stiel polar factor of compound\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Stiel polar factor with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n The method name to use. Only 'DEFINITION' is accepted so far.\n All valid values are also held in the list Stiel_polar_methods.\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Stiel-polar factor for the desired chemical, and will return methods\n instead of stiel-polar factor\n\n Notes\n -----\n Only one source is available for this function. It is:\n\n * 'DEFINITION', based on the definition of\n Stiel Polar Factor presented in [1]_, using vapor pressure data.\n\n A few points have also been published in [2]_, which may be used for\n comparison. Currently this is only used for a surface tension correlation.\n\n Examples\n --------\n >>> Func(647.3, 22048321.0, 0.344, CASRN='7732-18-5')\n 0.024581140348734376\n\n References\n ----------\n .. [1] Halm, Roland L., and Leonard I. Stiel. \"A Fourth Parameter for the\n Vapor Pressure and Entropy of Vaporization of Polar Fluids.\" AIChE\n Journal 13, no. 2 (1967): 351-355. doi:10.1002/aic.690130228.\n .. [2] D, Kukoljac Milo\u0161, and Grozdani\u0107 Du\u0161an K. \"New Values of the\n Polarity Factor.\" Journal of the Serbian Chemical Society 65, no. 12\n (January 1, 2000). http://www.shd.org.rs/JSCS/Vol65/No12-Pdf/JSCS12-07.pdf\n '''\n def list_methods():\n arg_6 = []\n if arg_0 and arg_1 and arg_2:\n arg_6.append('DEFINITION')\n arg_6.append('NONE')\n return arg_6\n if arg_5:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n if arg_4 == 'DEFINITION':\n arg_7 = VaporPressure(arg_3=arg_3).T_dependent_property(arg_0*0.6)\n if not arg_7:\n arg_8 = None\n else:\n arg_9 = arg_7/arg_1\n arg_8 = log10(arg_9) + 1.70*arg_2 + 1.552\n elif arg_4 == 'NONE':\n arg_8 = None\n else:\n raise Exception('Failure in in function')\n return arg_8"} +{"_id": "doc_5351", "title": "", "text": "def Func(arg_0):\n r'''Round a number to the nearest whole number. If the number is exactly\n between two numbers, round to the even whole number. Used by\n `viscosity_index`.\n\n Parameters\n ----------\n i : float\n Number, [-]\n\n Returns\n -------\n i : int\n Rounded number, [-]\n\n Notes\n -----\n Should never run with inputs from a practical function, as numbers on\n computers aren't really normally exactly between two numbers.\n\n Examples\n --------\n Func(116.5)\n 116\n '''\n if arg_0 % .5 == 0:\n if (arg_0 + 0.5) % 2 == 0:\n arg_0 = arg_0 + 0.5\n else:\n arg_0 = arg_0 - 0.5\n else:\n arg_0 = round(arg_0, 0)\n return int(arg_0)"} +{"_id": "doc_5352", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func low-pressure liquid viscosity at tempearture\n `T` with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func viscosity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n mu : float\n Viscosity of the liquid at T and a low pressure, [Pa*S]\n '''\n if arg_2 == DUTT_PRASAD:\n arg_3, arg_4, arg_5 = arg_0.DUTT_PRASAD_coeffs\n arg_6 = ViswanathNatarajan3(arg_1, arg_3, arg_4, arg_5, )\n elif arg_2 == VISWANATH_NATARAJAN_3:\n arg_3, arg_4, arg_5 = arg_0.VISWANATH_NATARAJAN_3_coeffs\n arg_6 = ViswanathNatarajan3(arg_1, arg_3, arg_4, arg_5)\n elif arg_2 == VISWANATH_NATARAJAN_2:\n arg_3, arg_4 = arg_0.VISWANATH_NATARAJAN_2_coeffs\n arg_6 = ViswanathNatarajan2(arg_1, arg_0.VISWANATH_NATARAJAN_2_coeffs[0], arg_0.VISWANATH_NATARAJAN_2_coeffs[1])\n elif arg_2 == VISWANATH_NATARAJAN_2E:\n arg_5, arg_7 = arg_0.VISWANATH_NATARAJAN_2E_coeffs\n arg_6 = ViswanathNatarajan2Exponential(arg_1, arg_5, arg_7)\n elif arg_2 == DIPPR_PERRY_8E:\n arg_6 = EQ101(arg_1, *arg_0.Perrys2_313_coeffs)\n elif arg_2 == COOLPROP:\n arg_6 = CoolProp_T_dependent_property(arg_1, arg_0.CASRN, 'V', 'l')\n elif arg_2 == LETSOU_STIEL:\n arg_6 = Letsou_Stiel(arg_1, arg_0.MW, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == PRZEDZIECKI_SRIDHAR:\n arg_8 = arg_0.Vml(arg_1) if hasattr(arg_0.Vml, '__call__') else arg_0.Vml\n arg_6 = Przedziecki_Sridhar(arg_1, arg_0.Tm, arg_0.Tc, arg_0.Pc, arg_0.Vc, arg_8, arg_0.omega, arg_0.MW)\n elif arg_2 == VDI_PPDS:\n arg_3, arg_4, arg_5, arg_7, arg_9 = arg_0.VDI_PPDS_coeffs\n arg_10 = (arg_5 - arg_1)/(arg_1-arg_7)\n if arg_10 < 0:\n arg_11 = -((arg_1 - arg_5)/(arg_1-arg_7))**(1/3.)\n else:\n arg_11 = arg_10**(1/3.)\n arg_12 = arg_10*arg_11\n arg_6 = arg_9*exp(arg_3*arg_11 + arg_4*arg_12)\n elif arg_2 in arg_0.tabular_data:\n arg_6 = arg_0.interpolate(arg_1, arg_2)\n return arg_6"} +{"_id": "doc_5353", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate pressure-dependent liquid viscosity at\n temperature `T` and pressure `P` with a given method.\n\n This method has no exception handling; see `TP_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate viscosity, [K]\n P : float\n Pressure at which to calculate viscosity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n mu : float\n Viscosity of the liquid at T and P, [Pa*S]\n '''\n if arg_3 == LUCAS:\n arg_4 = arg_0.T_dependent_property(arg_1)\n arg_5 = arg_0.Psat(arg_1) if hasattr(arg_0.Psat, '__call__') else arg_0.Psat\n arg_4 = Lucas(arg_1, arg_2, arg_0.Tc, arg_0.Pc, arg_0.omega, arg_5, arg_4)\n elif arg_3 == COOLPROP:\n arg_4 = PropsSI('V', 'T', arg_1, 'P', arg_2, arg_0.CASRN)\n elif arg_3 in arg_0.tabular_data:\n arg_4 = arg_0.interpolate_P(arg_1, arg_2, arg_3)\n return arg_4"} +{"_id": "doc_5354", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func viscosity of a liquid mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n mu : float\n Viscosity of the liquid mixture, [Pa*s]\n '''\n if arg_5 == MIXING_LOG_MOLAR:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityLiquids]\n return mixing_logarithmic(arg_3, arg_6)\n elif arg_5 == MIXING_LOG_MASS:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityLiquids]\n return mixing_logarithmic(arg_4, arg_6)\n elif arg_5 == LALIBERTE_MU:\n arg_4 = list(arg_4) ; arg_4.pop(arg_0.index_w)\n return Laliberte_viscosity(arg_1, arg_4, arg_0.wCASs)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5355", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func viscosity of a gas mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n mu : float\n Viscosity of gas mixture, [Pa*s]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityGases]\n return mixing_simple(arg_3, arg_6)\n elif arg_5 == HERNING_ZIPPERER:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityGases]\n return Herning_Zipperer(arg_3, arg_6, arg_0.MWs)\n elif arg_5 == WILKE:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityGases]\n return Wilke(arg_3, arg_6, arg_0.MWs)\n elif arg_5 == BROKAW:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ViscosityGases]\n return Brokaw(arg_1, arg_3, arg_6, arg_0.MWs, arg_0.molecular_diameters, arg_0.Stockmayers)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5356", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None): # pragma: no cover\n '''This function handles the retrieval of Time-Weighted Average limits on worker\n exposure to dangerous chemicals.\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n\n >>> Func('98-00-0')\n (10.0, 'ppm')\n >>> Func('1303-00-0')\n (5.0742430905659505e-05, 'ppm')\n >>> Func('7782-42-5', AvailableMethods=True)\n ['Ontario Limits', 'None']\n '''\n def list_methods():\n arg_3 = []\n if arg_0 in _OntarioExposureLimits and (_OntarioExposureLimits[arg_0][\"Func (ppm)\"] or _OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"]):\n arg_3.append(ONTARIO)\n arg_3.append(NONE)\n return arg_3\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == ONTARIO:\n if _OntarioExposureLimits[arg_0][\"Func (ppm)\"]:\n arg_4 = (_OntarioExposureLimits[arg_0][\"Func (ppm)\"], 'ppm')\n elif _OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"]:\n arg_4 = (_OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"], 'mg/m^3')\n elif arg_2 == NONE:\n arg_4 = None\n else:\n raise Exception('Failure in in function')\n return arg_4"} +{"_id": "doc_5357", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None): # pragma: no cover\n '''This function handles the retrieval of Func limits on worker\n exposure to dangerous chemicals.\n\n This API is considered experimental, and is expected to be removed in a\n future release in favor of a more complete object-oriented interface.\n\n >>> Func('75-07-0')\n (25.0, 'ppm')\n >>> Func('1395-21-7')\n (6e-05, 'mg/m^3')\n >>> Func('7572-29-4', AvailableMethods=True)\n ['Ontario Limits', 'None']\n '''\n def list_methods():\n arg_3 = []\n if arg_0 in _OntarioExposureLimits and (_OntarioExposureLimits[arg_0][\"Func (ppm)\"] or _OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"]):\n arg_3.append(ONTARIO)\n arg_3.append(NONE)\n return arg_3\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == ONTARIO:\n if _OntarioExposureLimits[arg_0][\"Func (ppm)\"]:\n arg_4 = (_OntarioExposureLimits[arg_0][\"Func (ppm)\"], 'ppm')\n elif _OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"]:\n arg_4 = (_OntarioExposureLimits[arg_0][\"Func (mg/m^3)\"], 'mg/m^3')\n elif arg_2 == NONE:\n arg_4 = None\n else:\n raise Exception('Failure in in function')\n return arg_4"} +{"_id": "doc_5358", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n r'''Looks up if a chemical is listed as a carcinogen or not according to\n either a specifc method or with all methods.\n\n Returns either the status as a string for a specified method, or the\n status of the chemical in all available data sources, in the format\n {source: status}.\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n status : str or dict\n Func status information [-]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain carcinogen status with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n if a chemical is listed as carcinogenic, and will return methods\n instead of the status\n\n Notes\n -----\n Supported methods are:\n\n * **IARC**: International Agency for Research on Cancer, [1]_. As\n extracted with a last update of February 22, 2016. Has listing\n information of 843 chemicals with CAS numbers. Chemicals without\n CAS numbers not included here. If two listings for the same CAS\n were available, that closest to the CAS number was used. If two\n listings were available published at different times, the latest\n value was used. All else equal, the most pessimistic value was used.\n * **NTP**: National Toxicology Program, [2]_. Has data on 226\n chemicals.\n\n Examples\n --------\n >>> Func('61-82-5')\n {'National Toxicology Program 13th Report on Funcs': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'}\n\n References\n ----------\n .. [1] International Agency for Research on Cancer. Agents Classified by\n the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available\n from: http://monographs.iarc.fr/ENG/Classification/\n .. [2] NTP (National Toxicology Program). 2014. Report on Funcs,\n Thirteenth Edition. Research Triangle Park, NC: U.S. Department of\n Health and Human Services, Public Health Service.\n http://ntp.niehs.nih.gov/pubhealth/roc/roc13/\n '''\n arg_3 = [COMBINED, IARC, NTP]\n if arg_1:\n return arg_3\n if not arg_2:\n arg_2 = arg_3[0]\n if arg_2 == IARC:\n if arg_0 in IARC_data.index:\n arg_4 = IARC_codes[IARC_data.at[arg_0, 'group']]\n else:\n arg_4 = UNLISTED\n elif arg_2 == NTP:\n if arg_0 in NTP_data.index:\n arg_4 = NTP_codes[NTP_data.at[arg_0, 'Listing']]\n else:\n arg_4 = UNLISTED\n elif arg_2 == COMBINED:\n arg_4 = {}\n for arg_5 in arg_3[1:]:\n arg_4[arg_5] = Func(arg_0, arg_2=arg_5)\n else:\n raise Exception('Failure in in function')\n return arg_4"} +{"_id": "doc_5359", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n r'''This function handles the retrieval or calculation of a chemical's\n autoifnition temperature. Lookup is based on CASRNs. No predictive methods\n are currently implemented. Will automatically select a data source to use\n if no Method is provided; returns None if the data is not available.\n\n Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source\n 'NFPA 497 (2008)' [2]_ having very similar data.\n\n Examples\n --------\n >>> Func(CASRN='71-43-2')\n 771.15\n\n Parameters\n ----------\n CASRN : string\n CASRN [-]\n\n Returns\n -------\n Func : float\n Autoignition point of the chemical, [K]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n Func for the desired chemical, and will return methods\n instead of Func\n\n Notes\n -----\n\n References\n ----------\n .. [1] IEC. \u201cIEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:\n Material characteristics for gas and vapour classification - Test\n methods and data.\u201d https://webstore.iec.ch/publication/635. See also\n https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf\n .. [2] National Fire Protection Association. NFPA 497: Recommended\n Practice for the Classification of Flammable Liquids, Gases, or Vapors\n and of Hazardous. NFPA, 2008.\n '''\n def list_methods():\n arg_3 = []\n if arg_0 in IEC_2010.index and not np.isnan(IEC_2010.at[arg_0, 'Func']):\n arg_3.append(IEC)\n if arg_0 in NFPA_2008.index and not np.isnan(NFPA_2008.at[arg_0, 'Func']):\n arg_3.append(NFPA)\n arg_3.append(NONE)\n return arg_3\n if arg_1:\n return list_methods()\n if not arg_2:\n arg_2 = list_methods()[0]\n\n if arg_2 == IEC:\n return float(IEC_2010.at[arg_0, 'Func'])\n elif arg_2 == NFPA:\n return float(NFPA_2008.at[arg_0, 'Func'])\n elif arg_2 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5360", "title": "", "text": "def Func(arg_0=None, arg_1={}, arg_2='', arg_3=False, arg_4=None):\n r'''This function handles the retrieval or calculation of a chemical's\n Lower Flammability Limit. Lookup is based on CASRNs. Two predictive methods\n are currently implemented. Will automatically select a data source to use\n if no Method is provided; returns None if the data is not available.\n\n Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source\n 'NFPA 497 (2008)' [2]_ having very similar data. If the heat of combustion\n is provided, the estimation method `Suzuki_Func` can be used. If the atoms\n of the molecule are available, the method `Crowl_Louvar_Func` can be used.\n\n Examples\n --------\n >>> Func(CASRN='71-43-2')\n 0.012\n\n Parameters\n ----------\n Hc : float, optional\n Heat of combustion of gas [J/mol]\n atoms : dict, optional\n Dictionary of atoms and atom counts\n CASRN : string, optional\n CASRN [-]\n\n Returns\n -------\n Func : float\n Lower flammability limit of the gas in an atmosphere at STP, [mole fraction]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the Lower Flammability Limit for the desired chemical, and will return\n methods instead of Lower Flammability Limit.\n\n Notes\n -----\n\n References\n ----------\n .. [1] IEC. \u201cIEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:\n Material characteristics for gas and vapour classification - Test\n methods and data.\u201d https://webstore.iec.ch/publication/635. See also\n https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf\n .. [2] National Fire Protection Association. NFPA 497: Recommended\n Practice for the Classification of Flammable Liquids, Gases, or Vapors\n and of Hazardous. NFPA, 2008.\n '''\n def list_methods():\n arg_5 = []\n if arg_2 in IEC_2010.index and not np.isnan(IEC_2010.at[arg_2, 'Func']):\n arg_5.append(IEC)\n if arg_2 in NFPA_2008.index and not np.isnan(NFPA_2008.at[arg_2, 'Func']):\n arg_5.append(NFPA)\n if arg_0:\n arg_5.append(SUZUKI)\n if arg_1:\n arg_5.append(CROWLLOUVAR)\n arg_5.append(NONE)\n return arg_5\n if arg_3:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n\n if arg_4 == IEC:\n return float(IEC_2010.at[arg_2, 'Func'])\n elif arg_4 == NFPA:\n return float(NFPA_2008.at[arg_2, 'Func'])\n elif arg_4 == SUZUKI:\n return Suzuki_Func(arg_0=arg_0)\n elif arg_4 == CROWLLOUVAR:\n return Crowl_Louvar_Func(arg_1=arg_1)\n elif arg_4 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5361", "title": "", "text": "def Func(arg_0=None, arg_1={}, arg_2='', arg_3=False, arg_4=None):\n r'''This function handles the retrieval or calculation of a chemical's\n Upper Flammability Limit. Lookup is based on CASRNs. Two predictive methods\n are currently implemented. Will automatically select a data source to use\n if no Method is provided; returns None if the data is not available.\n\n Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source\n 'NFPA 497 (2008)' [2]_ having very similar data. If the heat of combustion\n is provided, the estimation method `Suzuki_Func` can be used. If the atoms\n of the molecule are available, the method `Crowl_Louvar_Func` can be used.\n\n Examples\n --------\n >>> Func(CASRN='71-43-2')\n 0.086\n\n Parameters\n ----------\n Hc : float, optional\n Heat of combustion of gas [J/mol]\n atoms : dict, optional\n Dictionary of atoms and atom counts\n CASRN : string, optional\n CASRN [-]\n\n Returns\n -------\n Func : float\n Upper flammability limit of the gas in an atmosphere at STP, [mole fraction]\n methods : list, only returned if AvailableMethods == True\n List of methods which can be used to obtain Func with the\n given inputs\n\n Other Parameters\n ----------------\n Method : string, optional\n A string for the method name to use, as defined by constants in\n Func_methods\n AvailableMethods : bool, optional\n If True, function will determine which methods can be used to obtain\n the Upper Flammability Limit for the desired chemical, and will return\n methods instead of Upper Flammability Limit.\n\n Notes\n -----\n\n References\n ----------\n .. [1] IEC. \u201cIEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:\n Material characteristics for gas and vapour classification - Test\n methods and data.\u201d https://webstore.iec.ch/publication/635. See also\n https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf\n .. [2] National Fire Protection Association. NFPA 497: Recommended\n Practice for the Classification of Flammable Liquids, Gases, or Vapors\n and of Hazardous. NFPA, 2008.\n '''\n def list_methods():\n arg_5 = []\n if arg_2 in IEC_2010.index and not np.isnan(IEC_2010.at[arg_2, 'Func']):\n arg_5.append(IEC)\n if arg_2 in NFPA_2008.index and not np.isnan(NFPA_2008.at[arg_2, 'Func']):\n arg_5.append(NFPA)\n if arg_0:\n arg_5.append(SUZUKI)\n if arg_1:\n arg_5.append(CROWLLOUVAR)\n arg_5.append(NONE)\n return arg_5\n if arg_3:\n return list_methods()\n if not arg_4:\n arg_4 = list_methods()[0]\n\n if arg_4 == IEC:\n return float(IEC_2010.at[arg_2, 'Func'])\n elif arg_4 == NFPA:\n return float(NFPA_2008.at[arg_2, 'Func'])\n elif arg_4 == SUZUKI:\n return Suzuki_Func(arg_0=arg_0)\n elif arg_4 == CROWLLOUVAR:\n return Crowl_Louvar_Func(arg_1=arg_1)\n elif arg_4 == NONE:\n return None\n else:\n raise Exception('Failure in in function')"} +{"_id": "doc_5362", "title": "", "text": "def Func(arg_0, arg_1=False): # pragma: no cover\n r'''Interface for drawing a 2D image of all the molecules in the\n mixture. Requires an HTML5 browser, and the libraries RDKit and\n IPython. An exception is raised if either of these libraries is\n absent.\n\n Parameters\n ----------\n Hs : bool\n Whether or not to show hydrogen\n\n Examples\n --------\n Mixture(['natural gas']).Func()\n '''\n try:\n from rdkit.Chem import Draw\n from rdkit.Chem.Draw import IPythonConsole\n if arg_1:\n arg_2 = [i.rdkitmol_Hs for i in arg_0.Chemicals]\n else:\n arg_2 = [i.rdkitmol for i in arg_0.Chemicals]\n return Draw.MolsToImage(arg_2)\n except:\n return 'Rdkit is required for this feature.'"} +{"_id": "doc_5363", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n r'''Calculate a real fluid's Joule Thomson coefficient. The required \n derivative should be calculated with an equation of state, and `Cp` is the\n real fluid versions. This can either be calculated with `dV_dT` directly, \n or with `beta` if it is already known.\n\n .. math::\n \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n \n Parameters\n ----------\n T : float\n Temperature of fluid, [K]\n V : float\n Molar volume of fluid, [m^3/mol]\n Cp : float\n Real fluid heat capacity at constant pressure, [J/mol/K]\n dV_dT : float, optional\n Derivative of `V` with respect to `T`, [m^3/mol/K]\n beta : float, optional\n Isobaric coefficient of a thermal expansion, [1/K]\n\n Returns\n -------\n mu_JT : float\n Joule-Thomson coefficient [K/Pa]\n \n Examples\n --------\n Example from [2]_:\n \n >>> Func(T=390, V=0.00229754, Cp=153.235, dV_dT=1.226396e-05)\n 1.621956080529905e-05\n\n References\n ----------\n .. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering. \n Butterworth-Heinemann, 1985.\n .. [2] Pratt, R. M. \"Thermodynamic Properties Involving Derivatives: Using \n the Peng-Robinson Equation of State.\" Chemical Engineering Education 35,\n no. 2 (March 1, 2001): 112-115. \n '''\n if arg_3:\n return (arg_0*arg_3 - arg_1)/arg_2\n elif arg_4:\n return arg_1/arg_2*(arg_4*arg_0 - 1.)\n else:\n raise Exception('Either dV_dT or beta is needed')"} +{"_id": "doc_5364", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Converts a list of mole fractions to mass fractions. Requires molecular\n weights for all species.\n\n .. math::\n w_i = \\frac{z_i MW_i}{MW_{avg}}\n\n MW_{avg} = \\sum_i z_i MW_i\n\n Parameters\n ----------\n zs : iterable\n Mole fractions [-]\n MWs : iterable\n Molecular weights [g/mol]\n\n Returns\n -------\n ws : iterable\n Mass fractions [-]\n\n Notes\n -----\n Does not check that the sums add to one. Does not check that inputs are of\n the same length.\n\n Examples\n --------\n >>> Func([0.5, 0.5], [10, 20])\n [0.3333333333333333, 0.6666666666666666]\n '''\n arg_2 = sum(zi*MWi for zi, MWi in zip(arg_0, arg_1))\n arg_3 = [zi*MWi/arg_2 for zi, MWi in zip(arg_0, arg_1)]\n return arg_3"} +{"_id": "doc_5365", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Converts a list of mole fractions to volume fractions. Requires molar\n volumes for all species.\n\n .. math::\n \\text{Vf}_i = \\frac{z_i V_{m,i}}{\\sum_i z_i V_{m,i}}\n\n Parameters\n ----------\n zs : iterable\n Mole fractions [-]\n VMs : iterable\n Molar volumes of species [m^3/mol]\n\n Returns\n -------\n Vfs : list\n Molar volume fractions [-]\n\n Notes\n -----\n Does not check that the sums add to one. Does not check that inputs are of\n the same length.\n\n Molar volumes are specified in terms of pure components only. Function\n works with any phase.\n\n Examples\n --------\n Acetone and benzene example\n\n >>> Func([0.637, 0.363], [8.0234e-05, 9.543e-05])\n [0.5960229712956298, 0.4039770287043703]\n '''\n arg_2 = [zi*Vmi for zi, Vmi in zip(arg_0, arg_1)]\n arg_3 = sum(arg_2)\n return [arg_4/arg_3 for arg_4 in arg_2]"} +{"_id": "doc_5366", "title": "", "text": "def Func(arg_0, arg_1=None):\n r'''Checks inputs for suitability of use by a mixing rule which requires\n all inputs to be of the same length and non-None. A number of variations\n were attempted for this function; this was found to be the quickest.\n\n Parameters\n ----------\n all_inputs : array-like of array-like\n list of all the lists of inputs, [-]\n length : int, optional\n Length of the desired inputs, [-]\n\n Returns\n -------\n False/True : bool\n Returns True only if all inputs are the same length (or length `length`)\n and none of the inputs contain None [-]\n\n Notes\n -----\n Does not check for nan values.\n\n Examples\n --------\n >>> Func(([1, 1], [1, 1], [1, 30], [10,0]), length=2)\n True\n '''\n if not arg_1:\n arg_1 = len(arg_0[0])\n for arg_2 in arg_0:\n if None in arg_2 or len(arg_2) != arg_1:\n return False\n return True"} +{"_id": "doc_5367", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Simple function calculates a property based on weighted averages of\n logarithmic properties.\n\n .. math::\n y = \\sum_i \\text{frac}_i \\cdot \\log(\\text{prop}_i)\n\n Parameters\n ----------\n fracs : array-like\n Fractions of a mixture\n props: array-like\n Properties\n\n Returns\n -------\n prop : value\n Calculated property\n\n Notes\n -----\n Does not work on negative values.\n Returns None if any fractions or properties are missing or are not of the\n same length.\n\n Examples\n --------\n >>> Func([0.1, 0.9], [0.01, 0.02])\n 0.01866065983073615\n '''\n if not none_and_length_check([arg_0, arg_1]):\n return None\n return exp(sum(arg_2*log(arg_3) for arg_2, arg_3 in zip(arg_0, arg_1)))"} +{"_id": "doc_5368", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None):\n r'''Determines which phase's property should be set as a default, given\n the phase a chemical is, and the property values of various phases. For the\n case of liquid-gas phase, returns None. If the property is not available\n for the current phase, or if the current phase is not known, returns None.\n\n Parameters\n ----------\n phase : str\n One of {'s', 'l', 'g', 'two-phase'}\n s : float\n Solid-phase property\n l : float\n Liquid-phase property\n g : float\n Gas-phase property\n V_over_F : float\n Vapor phase fraction\n\n Returns\n -------\n prop : float\n The selected/calculated property for the relevant phase\n\n Notes\n -----\n Could calculate mole-fraction weighted properties for the two phase regime.\n Could also implement equilibria with solid phases.\n\n Examples\n --------\n >>> Func(phase='g', l=1560.14, g=3312.)\n 3312.0\n '''\n if arg_0 == 's':\n return arg_1\n elif arg_0 == 'l':\n return arg_2\n elif arg_0 == 'g':\n return arg_3\n elif arg_0 == 'two-phase':\n return None #TODO: all two-phase properties?\n elif arg_0 is None:\n return None\n else:\n raise Exception('Property not recognized')"} +{"_id": "doc_5369", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Method to obtain a sorted list of methods which are valid at `T`\n according to `test_method_validity`. Considers either only user methods\n if forced is True, or all methods. User methods are first tested\n according to their listed order, and unless forced is True, then all\n methods are tested and sorted by their order in `ranked_methods`.\n\n Parameters\n ----------\n T : float\n Temperature at which to test methods, [K]\n\n Returns\n -------\n sorted_valid_methods : list\n Sorted lists of methods valid at T according to\n `test_method_validity`\n '''\n # Consider either only the user's methods or all methods\n # Tabular data will be in both when inserted\n if arg_0.forced:\n arg_2 = list(arg_0.user_methods)\n else:\n arg_2 = list(arg_0.all_methods)\n\n # User methods (incl. tabular data); add back later, after ranking the rest\n if arg_0.user_methods:\n [arg_2.remove(arg_3) for arg_3 in arg_0.user_methods]\n\n # Index the rest of the methods by ranked_methods, and add them to a list, sorted_methods\n arg_4 = sorted([arg_0.ranked_methods.index(arg_3) for arg_3 in arg_2])\n arg_5 = [arg_0.ranked_methods[arg_3] for arg_3 in arg_4]\n\n # Add back the user's methods to the top, in order.\n if arg_0.user_methods:\n [arg_5.insert(0, arg_3) for arg_3 in reversed(arg_0.user_methods)]\n\n arg_6 = []\n for arg_7 in arg_5:\n if arg_0.test_method_validity(arg_1, arg_7):\n arg_6.append(arg_7)\n\n return arg_6"} +{"_id": "doc_5370", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n r'''Method to solve for the temperature at which a property is at a\n specified value. `T_dependent_property` is used to calculate the value\n of the property as a function of temperature; if `reset_method` is True,\n the best method is used at each temperature as the solver seeks a\n solution. This slows the solution moderately.\n\n Checks the given property value with `test_property_validity` first\n and raises an exception if it is not valid. Requires that Tmin and\n Tmax have been set to know what range to search within.\n\n Search is performed with the brenth solver from SciPy.\n\n Parameters\n ----------\n goal : float\n Propoerty value desired, [`units`]\n reset_method : bool\n Whether or not to reset the method as the solver searches\n\n Returns\n -------\n T : float\n Temperature at which the property is the specified value [K]\n '''\n if arg_0.Tmin is None or arg_0.Tmax is None:\n raise Exception('Both a minimum and a maximum value are not present indicating there is not enough data for temperature dependency.')\n if not arg_0.test_property_validity(arg_1):\n raise Exception('Input property is not considered plausible; no method would calculate it.')\n\n def error(arg_3):\n if arg_2:\n arg_0.method = None\n return arg_0.T_dependent_property(arg_3) - arg_1\n try:\n return brenth(error, arg_0.Tmin, arg_0.Tmax)\n except ValueError:\n raise Exception('To within the implemented temperature range, it is not possible to calculate the desired value.')"} +{"_id": "doc_5371", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n r'''Method to obtain a derivative of a property with respect to \n temperature, of a given order. Methods found valid by \n `select_valid_methods` are attempted until a method succeeds. If no \n methods are valid and succeed, None is returned.\n\n Calls `calculate_derivative` internally to perform the actual\n calculation.\n \n .. math::\n \\text{derivative} = \\frac{d (\\text{property})}{d T}\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the derivative, [K]\n order : int\n Order of the derivative, >= 1\n\n Returns\n -------\n derivative : float\n Calculated derivative property, [`units/K^order`]\n '''\n if arg_0.method:\n # retest within range\n if arg_0.test_method_validity(arg_1, arg_0.method):\n try:\n return arg_0.calculate_derivative(arg_1, arg_0.method, arg_2)\n except: # pragma: no cover\n pass\n arg_3 = arg_0.select_valid_methods(arg_1)\n for arg_4 in arg_3:\n try:\n return arg_0.calculate_derivative(arg_1, arg_4, arg_2)\n except:\n pass\n return None"} +{"_id": "doc_5372", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate the integral of a property with respect to\n temperature, using a specified method. Uses SciPy's `quad` function\n to perform the integral, with no options.\n \n This method can be overwritten by subclasses who may perfer to add\n analytical methods for some or all methods as this is much faster.\n\n If the calculation does not succeed, returns the actual error\n encountered.\n\n Parameters\n ----------\n T1 : float\n Lower limit of integration, [K]\n T2 : float\n Upper limit of integration, [K]\n method : str\n Method for which to find the integral\n\n Returns\n -------\n integral : float\n Calculated integral of the property over the given range, \n [`units*K`]\n '''\n return float(quad(arg_0.calculate, arg_1, arg_2, args=(arg_3))[0])"} +{"_id": "doc_5373", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to calculate the integral of a property with respect to\n temperature, using a specified method. Methods found valid by \n `select_valid_methods` are attempted until a method succeeds. If no \n methods are valid and succeed, None is returned.\n \n Calls `calculate_integral` internally to perform the actual\n calculation.\n\n .. math::\n \\text{integral} = \\int_{T_1}^{T_2} \\text{property} \\; dT\n\n Parameters\n ----------\n T1 : float\n Lower limit of integration, [K]\n T2 : float\n Upper limit of integration, [K]\n method : str\n Method for which to find the integral\n\n Returns\n -------\n integral : float\n Calculated integral of the property over the given range, \n [`units*K`]\n '''\n arg_3 = 0.5*(arg_1+arg_2)\n if arg_0.method:\n # retest within range\n if arg_0.test_method_validity(arg_3, arg_0.method):\n try:\n return arg_0.calculate_integral(arg_1, arg_2, arg_0.method)\n except: # pragma: no cover\n pass\n \n arg_4 = arg_0.select_valid_methods(arg_3)\n for arg_5 in arg_4:\n try:\n return arg_0.calculate_integral(arg_1, arg_2, arg_5)\n except:\n pass\n return None"} +{"_id": "doc_5374", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate the integral of a property over temperature\n with respect to temperature, using a specified method. Uses SciPy's \n `quad` function to perform the integral, with no options.\n \n This method can be overwritten by subclasses who may perfer to add\n analytical methods for some or all methods as this is much faster.\n\n If the calculation does not succeed, returns the actual error\n encountered.\n\n Parameters\n ----------\n T1 : float\n Lower limit of integration, [K]\n T2 : float\n Upper limit of integration, [K]\n method : str\n Method for which to find the integral\n\n Returns\n -------\n integral : float\n Calculated integral of the property over the given range, \n [`units`]\n '''\n return float(quad(lambda T: arg_0.calculate(T, arg_3)/T, arg_1, arg_2)[0])"} +{"_id": "doc_5375", "title": "", "text": "def Func(arg_0):\n r'''Method to load all data, and set all_methods based on the available\n data and properties. Demo function for testing only; must be\n implemented according to the methods available for each individual\n method.\n '''\n arg_1 = []\n arg_2, arg_3 = [], []\n if arg_0.CASRN in ['7732-18-5', '67-56-1', '64-17-5']:\n arg_1.append(TEST_METHOD_1)\n arg_0.TEST_METHOD_1_Tmin = 200.\n arg_0.TEST_METHOD_1_Tmax = 350\n arg_0.TEST_METHOD_1_coeffs = [1, .002]\n arg_2.append(arg_0.TEST_METHOD_1_Tmin); arg_3.append(arg_0.TEST_METHOD_1_Tmax)\n if arg_0.CASRN in ['67-56-1']:\n arg_1.append(TEST_METHOD_2)\n arg_0.TEST_METHOD_2_Tmin = 300.\n arg_0.TEST_METHOD_2_Tmax = 400\n arg_0.TEST_METHOD_2_coeffs = [1, .003]\n arg_2.append(arg_0.TEST_METHOD_2_Tmin); arg_3.append(arg_0.TEST_METHOD_2_Tmax)\n arg_0.all_methods = set(arg_1)\n if arg_2 and arg_3:\n arg_0.Tmin = min(arg_2)\n arg_0.Tmax = max(arg_3)"} +{"_id": "doc_5376", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func a property with a specified method, with no\n validity checking or error handling. Demo function for testing only;\n must be implemented according to the methods available for each\n individual method. Include the interpolation call here.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n method : str\n Method name to use\n\n Returns\n -------\n prop : float\n Calculated property, [`units`]\n '''\n if arg_2 == TEST_METHOD_1:\n arg_3 = arg_0.TEST_METHOD_1_coeffs[0] + arg_0.TEST_METHOD_1_coeffs[1]*arg_1\n elif arg_2 == TEST_METHOD_2:\n arg_3 = arg_0.TEST_METHOD_2_coeffs[0] + arg_0.TEST_METHOD_2_coeffs[1]*arg_1\n elif arg_2 in arg_0.tabular_data:\n arg_3 = arg_0.interpolate(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_5377", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to obtain a sorted list methods which are valid at `T`\n according to `test_method_validity`. Considers either only user methods\n if forced is True, or all methods. User methods are first tested\n according to their listed order, and unless forced is True, then all\n methods are tested and sorted by their order in `ranked_methods`.\n\n Parameters\n ----------\n T : float\n Temperature at which to test methods, [K]\n P : float\n Pressure at which to test methods, [Pa]\n\n Returns\n -------\n sorted_valid_methods_P : list\n Sorted lists of methods valid at T and P according to\n `test_method_validity`\n '''\n # Same as select_valid_methods but with _P added to variables\n if arg_0.forced_P:\n arg_3 = list(arg_0.user_methods_P)\n else:\n arg_3 = list(arg_0.all_methods_P)\n\n if arg_0.user_methods_P:\n [arg_3.remove(arg_4) for arg_4 in arg_0.user_methods_P]\n\n arg_5 = sorted([arg_0.ranked_methods_P.index(arg_4) for arg_4 in arg_3])\n arg_6 = [arg_0.ranked_methods_P[arg_4] for arg_4 in arg_5]\n\n if arg_0.user_methods_P:\n [arg_6.insert(0, arg_4) for arg_4 in reversed(arg_0.user_methods_P)]\n\n arg_7 = []\n for arg_8 in arg_6:\n if arg_0.test_method_validity_P(arg_1, arg_2, arg_8):\n arg_7.append(arg_8)\n\n return arg_7"} +{"_id": "doc_5378", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to calculate the property with sanity checking and without\n specifying a specific method. `select_valid_methods_P` is used to obtain\n a sorted list of methods to try. Methods are then tried in order until\n one succeeds. The methods are allowed to fail, and their results are\n checked with `test_property_validity`. On success, the used method\n is stored in the variable `method_P`.\n\n If `method_P` is set, this method is first checked for validity with\n `test_method_validity_P` for the specified temperature, and if it is\n valid, it is then used to calculate the property. The result is checked\n for validity, and returned if it is valid. If either of the checks fail,\n the function retrieves a full list of valid methods with\n `select_valid_methods_P` and attempts them as described above.\n\n If no methods are found which succeed, returns None.\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the property, [K]\n P : float\n Pressure at which to calculate the property, [Pa]\n\n Returns\n -------\n prop : float\n Calculated property, [`units`]\n '''\n # Optimistic track, with the already set method\n if arg_0.method_P:\n # retest within range\n if arg_0.test_method_validity_P(arg_1, arg_2, arg_0.method_P):\n try:\n arg_3 = arg_0.calculate_P(arg_1, arg_2, arg_0.method_P)\n if arg_0.test_property_validity(arg_3):\n return arg_3\n except: # pragma: no cover\n pass\n\n # get valid methods at T, and try them until one yields a valid\n # property; store the method_P and return the answer\n arg_0.sorted_valid_methods_P = arg_0.select_valid_methods_P(arg_1, arg_2)\n for arg_5 in arg_0.sorted_valid_methods_P:\n try:\n arg_3 = arg_0.calculate_P(arg_1, arg_2, arg_5)\n if arg_0.test_property_validity(arg_3):\n arg_0.method_P = arg_5\n return arg_3\n except: # pragma: no cover\n pass\n # Function returns None if it does not work.\n return None"} +{"_id": "doc_5379", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1):\n r'''Method to calculate a derivative of a temperature and pressure\n dependent property with respect to temperature at constant pressure,\n of a given order. Methods found valid by `select_valid_methods_P` are \n attempted until a method succeeds. If no methods are valid and succeed,\n None is returned.\n\n Calls `calculate_derivative_T` internally to perform the actual\n calculation.\n \n .. math::\n \\text{derivative} = \\frac{d (\\text{property})}{d T}|_{P}\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the derivative, [K]\n P : float\n Pressure at which to calculate the derivative, [Pa]\n order : int\n Order of the derivative, >= 1\n\n Returns\n -------\n d_prop_d_T_at_P : float\n Calculated derivative property, [`units/K^order`]\n '''\n arg_4 = arg_0.select_valid_methods_P(arg_1, arg_2)\n for arg_5 in arg_4:\n try:\n return arg_0.calculate_derivative_T(arg_1, arg_2, arg_5, arg_3)\n except:\n pass\n return None"} +{"_id": "doc_5380", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1):\n r'''Method to calculate a derivative of a temperature and pressure\n dependent property with respect to pressure at constant temperature,\n of a given order. Methods found valid by `select_valid_methods_P` are \n attempted until a method succeeds. If no methods are valid and succeed,\n None is returned.\n\n Calls `calculate_derivative_P` internally to perform the actual\n calculation.\n \n .. math::\n \\text{derivative} = \\frac{d (\\text{property})}{d P}|_{T}\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the derivative, [K]\n P : float\n Pressure at which to calculate the derivative, [Pa]\n order : int\n Order of the derivative, >= 1\n\n Returns\n -------\n d_prop_d_P_at_T : float\n Calculated derivative property, [`units/Pa^order`]\n '''\n arg_4 = arg_0.select_valid_methods_P(arg_1, arg_2)\n for arg_5 in arg_4:\n try:\n return arg_0.calculate_derivative_P(arg_2, arg_1, arg_5, arg_3)\n except:\n pass\n return None"} +{"_id": "doc_5381", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=1):\n r'''Method to calculate a derivative of a mixture property with respect\n to temperature at constant pressure and composition,\n of a given order. Methods found valid by `select_valid_methods` are \n attempted until a method succeeds. If no methods are valid and succeed,\n None is returned.\n\n Calls `calculate_derivative_T` internally to perform the actual\n calculation.\n \n .. math::\n \\text{derivative} = \\frac{d (\\text{property})}{d T}|_{P, z}\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the derivative, [K]\n P : float\n Pressure at which to calculate the derivative, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n order : int\n Order of the derivative, >= 1\n\n Returns\n -------\n d_prop_d_T_at_P : float\n Calculated derivative property, [`units/K^order`]\n '''\n arg_6 = arg_0.select_valid_methods(arg_1, arg_2, arg_3, arg_4)\n for arg_7 in arg_6:\n try:\n return arg_0.calculate_derivative_T(arg_1, arg_2, arg_3, arg_4, arg_7, arg_5)\n except:\n pass\n return None"} +{"_id": "doc_5382", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=1):\n r'''Method to calculate a derivative of a mixture property with respect\n to pressure at constant temperature and composition,\n of a given order. Methods found valid by `select_valid_methods` are \n attempted until a method succeeds. If no methods are valid and succeed,\n None is returned.\n\n Calls `calculate_derivative_P` internally to perform the actual\n calculation.\n \n .. math::\n \\text{derivative} = \\frac{d (\\text{property})}{d P}|_{T, z}\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate the derivative, [K]\n P : float\n Pressure at which to calculate the derivative, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n order : int\n Order of the derivative, >= 1\n\n Returns\n -------\n d_prop_d_P_at_T : float\n Calculated derivative property, [`units/Pa^order`]\n '''\n arg_6 = arg_0.select_valid_methods(arg_1, arg_2, arg_3, arg_4)\n for arg_7 in arg_6:\n try:\n return arg_0.calculate_derivative_P(arg_2, arg_1, arg_3, arg_4, arg_7, arg_5)\n except:\n pass\n return None"} +{"_id": "doc_5383", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n r'''Generic method to calculate `T` from a specified `P` and `V`.\n Provides SciPy's `newton` solver, and iterates to solve the general\n equation for `P`, recalculating `a_alpha` as a function of temperature\n using `a_alpha_and_derivatives` each iteration.\n\n Parameters\n ----------\n P : float\n Pressure, [Pa]\n V : float\n Molar volume, [m^3/mol]\n quick : bool, optional\n Unimplemented, although it may be possible to derive explicit \n expressions as done for many pure-component EOS\n\n Returns\n -------\n T : float\n Temperature, [K]\n '''\n arg_0.Tc = sum(arg_0.Tcs)/arg_0.N\n # -4 goes back from object, GCEOS\n return super(type(arg_0).__mro__[-3], arg_0).Func(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_5384", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `kappa`, and `Tc` for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n arg_0.a, arg_0.kappa, arg_0.Tc = arg_0.ais[arg_1], arg_0.kappas[arg_1], arg_0.Tcs[arg_1]"} +{"_id": "doc_5385", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `m`, and `Tc` for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n arg_0.a, arg_0.m, arg_0.Tc = arg_0.ais[arg_1], arg_0.ms[arg_1], arg_0.Tcs[arg_1]"} +{"_id": "doc_5386", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `kappa0`, `kappa1`, and `Tc` for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n if not hasattr(arg_0, 'kappas'):\n arg_0.kappas = [arg_6 + arg_7*(1 + (arg_2/arg_8)**0.5)*(0.7 - (arg_2/arg_8)) for arg_6, arg_7, arg_8 in zip(arg_0.kappa0s, arg_0.kappa1s, arg_0.Tcs)]\n arg_0.a, arg_0.kappa, arg_0.kappa0, arg_0.kappa1, arg_0.Tc = arg_0.ais[arg_1], arg_0.kappas[arg_1], arg_0.kappa0s[arg_1], arg_0.kappa1s[arg_1], arg_0.Tcs[arg_1]"} +{"_id": "doc_5387", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `kappa`, `kappa0`, `kappa1`, `kappa2`, `kappa3` and `Tc`\n for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n if not hasattr(arg_0, 'kappas'):\n arg_0.kappas = []\n for arg_4, arg_5, arg_6, arg_7, arg_8 in zip(arg_0.Tcs, arg_0.kappa0s, arg_0.kappa1s, arg_0.kappa2s, arg_0.kappa3s):\n arg_9 = arg_2/arg_4\n arg_10 = arg_5 + ((arg_6 + arg_7*(arg_8 - arg_9)*(1. - arg_9**0.5))*(1. + arg_9**0.5)*(0.7 - arg_9))\n arg_0.kappas.append(arg_10)\n\n (arg_0.a, arg_0.kappa, arg_0.kappa0, arg_0.kappa1, arg_0.kappa2, \n arg_0.kappa3, arg_0.Tc) = (arg_0.ais[arg_1], arg_0.kappas[arg_1], arg_0.kappa0s[arg_1],\n arg_0.kappa1s[arg_1], arg_0.kappa2s[arg_1], arg_0.kappa3s[arg_1], arg_0.Tcs[arg_1])"} +{"_id": "doc_5388", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `omega`, and `Tc` for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n arg_0.a, arg_0.Tc, arg_0.omega = arg_0.ais[arg_1], arg_0.Tcs[arg_1], arg_0.omegas[arg_1]"} +{"_id": "doc_5389", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n r'''Sets `a`, `S1`, `S2` and `Tc` for a specific component before the \n pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n called by `GCEOSMIX.a_alpha_and_derivatives` for every component.'''\n arg_0.a, arg_0.Tc, arg_0.S1, arg_0.S2 = arg_0.ais[arg_1], arg_0.Tcs[arg_1], arg_0.S1s[arg_1], arg_0.S2s[arg_1]"} +{"_id": "doc_5390", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Estimates the thermal conductivity of parafin liquid hydrocarbons.\n Fits their data well, and is useful as only MW is required.\n X is the Molecular weight, and Y the temperature.\n\n .. math::\n K = a + bY + CY^2 + dY^3\n\n a = A_1 + B_1 X + C_1 X^2 + D_1 X^3\n\n b = A_2 + B_2 X + C_2 X^2 + D_2 X^3\n\n c = A_3 + B_3 X + C_3 X^2 + D_3 X^3\n\n d = A_4 + B_4 X + C_4 X^2 + D_4 X^3\n\n Parameters\n ----------\n T : float\n Temperature of the fluid [K]\n M : float\n Molecular weight of the fluid [g/mol]\n\n Returns\n -------\n kl : float\n Estimated liquid thermal conductivity [W/m/k]\n\n Notes\n -----\n The accuracy of this equation has not been reviewed.\n\n Examples\n --------\n Data point from [1]_.\n\n >>> Func(273.15, 170)\n 0.14274278108272603\n\n References\n ----------\n .. [1] Bahadori, Alireza, and Saeid Mokhatab. \"Estimating Thermal\n Conductivity of Hydrocarbons.\" Chemical Engineering 115, no. 13\n (December 2008): 52-54\n '''\n arg_2 = [-6.48326E-2, 2.715015E-3, -1.08580E-5, 9.853917E-9]\n arg_3 = [1.565612E-2, -1.55833E-4, 5.051114E-7, -4.68030E-10]\n arg_4 = [-1.80304E-4, 1.758693E-6, -5.55224E-9, 5.201365E-12]\n arg_5 = [5.880443E-7, -5.65898E-9, 1.764384E-11, -1.65944E-14]\n arg_6, arg_7 = arg_1, arg_0\n arg_8 = arg_2[0] + arg_3[0]*arg_6 + arg_4[0]*arg_6**2 + arg_5[0]*arg_6**3\n arg_9 = arg_2[1] + arg_3[1]*arg_6 + arg_4[1]*arg_6**2 + arg_5[1]*arg_6**3\n arg_10 = arg_2[2] + arg_3[2]*arg_6 + arg_4[2]*arg_6**2 + arg_5[2]*arg_6**3\n arg_11 = arg_2[3] + arg_3[3]*arg_6 + arg_4[3]*arg_6**2 + arg_5[3]*arg_6**3\n return arg_8 + arg_9*arg_7 + arg_10*arg_7**2 + arg_11*arg_7**3"} +{"_id": "doc_5391", "title": "", "text": "def Func(arg_0, arg_1):\n r'''Estimates the thermal conductivity of hydrocarbons gases at low P.\n Fits their data well, and is useful as only MW is required.\n Y is the Molecular weight, and X the temperature.\n\n .. math::\n K = a + bY + CY^2 + dY^3\n\n a = A_1 + B_1 X + C_1 X^2 + D_1 X^3\n\n b = A_2 + B_2 X + C_2 X^2 + D_2 X^3\n\n c = A_3 + B_3 X + C_3 X^2 + D_3 X^3\n\n d = A_4 + B_4 X + C_4 X^2 + D_4 X^3\n\n Parameters\n ----------\n T : float\n Temperature of the gas [K]\n MW : float\n Molecular weight of the gas [g/mol]\n\n Returns\n -------\n kg : float\n Estimated gas thermal conductivity [W/m/k]\n\n Notes\n -----\n The accuracy of this equation has not been reviewed.\n\n Examples\n --------\n >>> Func(40+273.15, 20) # Point from article\n 0.031968165337873326\n\n References\n ----------\n .. [1] Bahadori, Alireza, and Saeid Mokhatab. \"Estimating Thermal\n Conductivity of Hydrocarbons.\" Chemical Engineering 115, no. 13\n (December 2008): 52-54\n '''\n arg_2 = [4.3931323468E-1, -3.88001122207E-2, 9.28616040136E-4, -6.57828995724E-6]\n arg_3 = [-2.9624238519E-3, 2.67956145820E-4, -6.40171884139E-6, 4.48579040207E-8]\n arg_4 = [7.54249790107E-6, -6.46636219509E-7, 1.5124510261E-8, -1.0376480449E-10]\n arg_5 = [-6.0988433456E-9, 5.20752132076E-10, -1.19425545729E-11, 8.0136464085E-14]\n arg_6, arg_7 = arg_0, arg_1\n arg_8 = arg_2[0] + arg_3[0]*arg_6 + arg_4[0]*arg_6**2 + arg_5[0]*arg_6**3\n arg_9 = arg_2[1] + arg_3[1]*arg_6 + arg_4[1]*arg_6**2 + arg_5[1]*arg_6**3\n arg_10 = arg_2[2] + arg_3[2]*arg_6 + arg_4[2]*arg_6**2 + arg_5[2]*arg_6**3\n arg_11 = arg_2[3] + arg_3[3]*arg_6 + arg_4[3]*arg_6**2 + arg_5[3]*arg_6**3\n return arg_8 + arg_9*arg_7 + arg_10*arg_7**2 + arg_11*arg_7**3"} +{"_id": "doc_5392", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func low-pressure liquid thermal conductivity at\n tempearture `T` with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature of the liquid, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n kl : float\n Thermal conductivity of the liquid at T and a low pressure, [W/m/K]\n '''\n if arg_2 == SHEFFY_JOHNSON:\n arg_3 = Sheffy_Johnson(arg_1, arg_0.MW, arg_0.Tm)\n elif arg_2 == SATO_RIEDEL:\n arg_3 = Sato_Riedel(arg_1, arg_0.MW, arg_0.Tb, arg_0.Tc)\n elif arg_2 == GHARAGHEIZI_L:\n arg_3 = Gharagheizi_liquid(arg_1, arg_0.MW, arg_0.Tb, arg_0.Pc, arg_0.omega)\n elif arg_2 == NICOLA:\n arg_3 = Nicola(arg_1, arg_0.MW, arg_0.Tc, arg_0.Pc, arg_0.omega)\n elif arg_2 == NICOLA_ORIGINAL:\n arg_3 = Nicola_original(arg_1, arg_0.MW, arg_0.Tc, arg_0.omega, arg_0.Hfus)\n elif arg_2 == LAKSHMI_PRASAD:\n arg_3 = Lakshmi_Prasad(arg_1, arg_0.MW)\n elif arg_2 == BAHADORI_L:\n arg_3 = Bahadori_liquid(arg_1, arg_0.MW)\n elif arg_2 == DIPPR_PERRY_8E:\n arg_3 = EQ100(arg_1, *arg_0.Perrys2_315_coeffs)\n elif arg_2 == VDI_PPDS:\n arg_3 = horner(arg_0.VDI_PPDS_coeffs, arg_1)\n elif arg_2 == COOLPROP:\n arg_3 = CoolProp_T_dependent_property(arg_1, arg_0.CASRN, 'L', 'l')\n elif arg_2 in arg_0.tabular_data:\n arg_3 = arg_0.interpolate(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_5393", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate pressure-dependent liquid thermal conductivity\n at temperature `T` and pressure `P` with a given method.\n\n This method has no exception handling; see `TP_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate liquid thermal conductivity, [K]\n P : float\n Pressure at which to calculate liquid thermal conductivity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n kl : float\n Thermal conductivity of the liquid at T and P, [W/m/K]\n '''\n if arg_3 == DIPPR_9G:\n arg_4 = arg_0.T_dependent_property(arg_1)\n arg_4 = DIPPR9G(arg_1, arg_2, arg_0.Tc, arg_0.Pc, arg_4)\n elif arg_3 == MISSENARD:\n arg_4 = arg_0.T_dependent_property(arg_1)\n arg_4 = Missenard(arg_1, arg_2, arg_0.Tc, arg_0.Pc, arg_4)\n elif arg_3 == COOLPROP:\n arg_4 = PropsSI('L', 'T', arg_1, 'P', arg_2, arg_0.CASRN)\n elif arg_3 in arg_0.tabular_data:\n arg_4 = arg_0.interpolate_P(arg_1, arg_2, arg_3)\n return arg_4"} +{"_id": "doc_5394", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func thermal conductivity of a liquid mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n k : float\n Thermal conductivity of the liquid mixture, [W/m/K]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ThermalConductivityLiquids]\n return mixing_simple(arg_3, arg_6)\n elif arg_5 == DIPPR_9H:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ThermalConductivityLiquids]\n return DIPPR9H(arg_4, arg_6)\n elif arg_5 == FILIPPOV:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ThermalConductivityLiquids]\n return Filippov(arg_4, arg_6)\n elif arg_5 == MAGOMEDOV:\n arg_7 = arg_0.ThermalConductivityLiquids[arg_0.index_w](arg_1, arg_2)\n arg_4 = list(arg_4) ; arg_4.pop(arg_0.index_w)\n return thermal_conductivity_Magomedov(arg_1, arg_2, arg_4, arg_0.wCASs, arg_7)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5395", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r'''Method to Func low-pressure gas thermal conductivity at\n tempearture `T` with a given method.\n\n This method has no exception handling; see `T_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature of the gas, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n kg : float\n Thermal conductivity of the gas at T and a low pressure, [W/m/K]\n '''\n if arg_2 == GHARAGHEIZI_G:\n arg_3 = Gharagheizi_gas(arg_1, arg_0.MW, arg_0.Tb, arg_0.Pc, arg_0.omega)\n elif arg_2 == DIPPR_9B:\n arg_4 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_5 = arg_0.mug(arg_1) if hasattr(arg_0.mug, '__call__') else arg_0.mug\n arg_3 = DIPPR9B(arg_1, arg_0.MW, arg_4, arg_5, arg_0.Tc)\n elif arg_2 == CHUNG:\n arg_4 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_5 = arg_0.mug(arg_1) if hasattr(arg_0.mug, '__call__') else arg_0.mug\n arg_3 = Chung(arg_1, arg_0.MW, arg_0.Tc, arg_0.omega, arg_4, arg_5)\n elif arg_2 == ELI_HANLEY:\n arg_4 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_3 = eli_hanley(arg_1, arg_0.MW, arg_0.Tc, arg_0.Vc, arg_0.Zc, arg_0.omega, arg_4)\n elif arg_2 == EUCKEN_MOD:\n arg_4 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_5 = arg_0.mug(arg_1) if hasattr(arg_0.mug, '__call__') else arg_0.mug\n arg_3 = Eucken_modified(arg_0.MW, arg_4, arg_5)\n elif arg_2 == EUCKEN:\n arg_4 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_5 = arg_0.mug(arg_1) if hasattr(arg_0.mug, '__call__') else arg_0.mug\n arg_3 = Eucken(arg_0.MW, arg_4, arg_5)\n elif arg_2 == DIPPR_PERRY_8E:\n arg_3 = EQ102(arg_1, *arg_0.Perrys2_314_coeffs)\n elif arg_2 == VDI_PPDS:\n arg_3 = horner(arg_0.VDI_PPDS_coeffs, arg_1)\n elif arg_2 == BAHADORI_G:\n arg_3 = Bahadori_gas(arg_1, arg_0.MW)\n elif arg_2 == COOLPROP:\n arg_3 = CoolProp_T_dependent_property(arg_1, arg_0.CASRN, 'L', 'g')\n elif arg_2 in arg_0.tabular_data:\n arg_3 = arg_0.interpolate(arg_1, arg_2)\n return arg_3"} +{"_id": "doc_5396", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n r'''Method to calculate pressure-dependent gas thermal conductivity\n at temperature `T` and pressure `P` with a given method.\n\n This method has no exception handling; see `TP_dependent_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to calculate gas thermal conductivity, [K]\n P : float\n Pressure at which to calculate gas thermal conductivity, [K]\n method : str\n Name of the method to use\n\n Returns\n -------\n kg : float\n Thermal conductivity of the gas at T and P, [W/m/K]\n '''\n if arg_3 == ELI_HANLEY_DENSE:\n arg_4 = arg_0.Vmg(arg_1, arg_2) if hasattr(arg_0.Vmg, '__call__') else arg_0.Vmg\n arg_5 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_6 = eli_hanley_dense(arg_1, arg_0.MW, arg_0.Tc, arg_0.Vc, arg_0.Zc, arg_0.omega, arg_5, arg_4)\n elif arg_3 == CHUNG_DENSE:\n arg_4 = arg_0.Vmg(arg_1, arg_2) if hasattr(arg_0.Vmg, '__call__') else arg_0.Vmg\n arg_5 = arg_0.Cvgm(arg_1) if hasattr(arg_0.Cvgm, '__call__') else arg_0.Cvgm\n arg_7 = arg_0.mug(arg_1, arg_2) if hasattr(arg_0.mug, '__call__') else arg_0.mug\n arg_6 = chung_dense(arg_1, arg_0.MW, arg_0.Tc, arg_0.Vc, arg_0.omega, arg_5, arg_4, arg_7, arg_0.dipole)\n elif arg_3 == STIEL_THODOS_DENSE:\n arg_6 = arg_0.T_dependent_property(arg_1)\n arg_4 = arg_0.Vmg(arg_1, arg_2) if hasattr(arg_0.Vmg, '__call__') else arg_0.Vmg\n arg_6 = stiel_thodos_dense(arg_1, arg_0.MW, arg_0.Tc, arg_0.Pc, arg_0.Vc, arg_0.Zc, arg_4, arg_6)\n elif arg_3 == COOLPROP:\n arg_6 = PropsSI('L', 'T', arg_1, 'P', arg_2, arg_0.CASRN)\n elif arg_3 in arg_0.tabular_data:\n arg_6 = arg_0.interpolate_P(arg_1, arg_2, arg_3)\n return arg_6"} +{"_id": "doc_5397", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n r'''Method to Func thermal conductivity of a gas mixture at \n temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n `ws` with a given method.\n\n This method has no exception handling; see `mixture_property`\n for that.\n\n Parameters\n ----------\n T : float\n Temperature at which to Func the property, [K]\n P : float\n Pressure at which to Func the property, [Pa]\n zs : list[float]\n Mole fractions of all species in the mixture, [-]\n ws : list[float]\n Weight fractions of all species in the mixture, [-]\n method : str\n Name of the method to use\n\n Returns\n -------\n kg : float\n Thermal conductivity of gas mixture, [W/m/K]\n '''\n if arg_5 == SIMPLE:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ThermalConductivityGases]\n return mixing_simple(arg_3, arg_6)\n elif arg_5 == LINDSAY_BROMLEY:\n arg_6 = [i(arg_1, arg_2) for i in arg_0.ThermalConductivityGases]\n arg_7 = [i(arg_1, arg_2) for i in arg_0.ViscosityGases]\n return Lindsay_Bromley(arg_1=arg_1, ys=arg_3, arg_6=arg_6, arg_7=arg_7, Tbs=arg_0.Tbs, MWs=arg_0.MWs)\n else:\n raise Exception('Method not valid')"} +{"_id": "doc_5398", "title": "", "text": "def Func(arg_0):\n r'''Basic formula parser to determine the charge from a formula - given\n that the charge is already specified as one element of the formula.\n\n Performs no sanity checking that elements are actually elements.\n \n Parameters\n ----------\n formula : str\n Formula string, very simply formats only, ending in one of '+x',\n '-x', n*'+', or n*'-' or any of them surrounded by brackets but always\n at the end of a formula.\n\n Returns\n -------\n charge : int\n Charge of the molecule, [faraday]\n\n Notes\n -----\n\n Examples\n --------\n >>> Func('Br3-')\n -1\n >>> Func('Br3(-)')\n -1\n '''\n arg_1 = '-' in arg_0\n arg_2 = '+' in arg_0\n if arg_2 and arg_1:\n raise ValueError('Both negative and positive signs were found in the formula; only one sign is allowed')\n elif not (arg_2 or arg_1):\n return 0\n arg_3, arg_4 = (-1, '-') if arg_1 else (1, '+')\n \n arg_5 = False\n if '(' in arg_0:\n arg_5 = bracketed_charge_re.findall(arg_0)\n if arg_5:\n arg_0 = arg_5[-1].replace('(', '').replace(')', '')\n\n arg_6 = arg_0.count(arg_4)\n if arg_6 == 1:\n arg_7 = arg_0.split(arg_4)\n if arg_7[1] == '' or arg_7[1] == ')':\n return arg_3\n else:\n return arg_3*int(arg_7[1])\n else:\n return arg_3*arg_6"} +{"_id": "doc_5399", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convolve 2d gaussian.\"\"\"\n arg_2 = scipy.ndimage.filters.correlate1d(\n arg_0, arg_1, axis=0)\n arg_2 = scipy.ndimage.filters.correlate1d(\n arg_2, arg_1, axis=1)\n return arg_2"} +{"_id": "doc_5400", "title": "", "text": "def Func(arg_0=11, arg_1=1.5):\n \"\"\"Generate a gaussian kernel.\"\"\"\n # 1D Gaussian kernel definition\n arg_2 = numpy.ndarray((arg_0))\n arg_3 = int(arg_0 / 2)\n\n # Fill Gaussian kernel\n for arg_4 in range(arg_0):\n arg_2[arg_4] = (exp(-(((arg_4 - arg_3) ** 2)) /\n (2 * (arg_1 ** 2))))\n return arg_2 / numpy.sum(arg_2)"} +{"_id": "doc_5401", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert PIL image to numpy grayscale array and numpy alpha array.\n\n Args:\n img (PIL.Image): PIL Image object.\n\n Returns:\n (gray, alpha): both numpy arrays.\n \"\"\"\n arg_1 = numpy.asarray(ImageOps.grayscale(arg_0)).astype(numpy.float)\n\n arg_2 = arg_0.getbands()\n arg_3 = None\n if 'A' in arg_2:\n arg_3 = numpy.asarray(arg_0.split()[-1]).astype(numpy.float)\n\n return arg_1, arg_3"} +{"_id": "doc_5402", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the SSIM value from the reference image to the target image.\n\n Args:\n target (str or PIL.Image): Input image to compare the reference image\n to. This may be a PIL Image object or, to save time, an SSIMImage\n object (e.g. the img member of another SSIM object).\n\n Returns:\n Computed SSIM float value.\n \"\"\"\n # Performance boost if handed a compatible SSIMImage object.\n if not isinstance(arg_1, SSIMImage) \\\n or not np.array_equal(arg_0.gaussian_kernel_1d,\n arg_1.gaussian_kernel_1d):\n arg_1 = SSIMImage(arg_1, arg_0.gaussian_kernel_1d, arg_0.img.size)\n\n arg_2 = arg_0.img.img_gray * arg_1.img_gray\n arg_3 = convolve_gaussian_2d(\n arg_2, arg_0.gaussian_kernel_1d)\n arg_4 = arg_0.img.img_gray_mu * arg_1.img_gray_mu\n arg_3 = arg_3 - arg_4\n\n # Numerator of SSIM\n arg_5 = ((2 * arg_4 + arg_0.c_1) *\n (2 * arg_3 + arg_0.c_2))\n\n # Denominator of SSIM\n arg_6 = (\n (arg_0.img.img_gray_mu_squared + arg_1.img_gray_mu_squared +\n arg_0.c_1) *\n (arg_0.img.img_gray_sigma_squared +\n arg_1.img_gray_sigma_squared + arg_0.c_2))\n\n arg_7 = arg_5 / arg_6\n arg_8 = np.average(arg_7)\n return arg_8"} +{"_id": "doc_5403", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1.5,\n arg_3=11):\n \"\"\"Computes SSIM.\n\n Args:\n im1: First PIL Image object to compare.\n im2: Second PIL Image object to compare.\n\n Returns:\n SSIM float value.\n \"\"\"\n arg_4 = get_gaussian_kernel(\n arg_3, arg_2)\n return SSIM(arg_0, arg_4).ssim_value(arg_1)"} +{"_id": "doc_5404", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = None):\n \"\"\"Switch to a new code version on all cluster nodes. You\n should ensure that cluster nodes are updated, otherwise they\n won't be able to apply commands.\n\n :param newVersion: new code version\n :type int\n :param callback: will be called on cussess or fail\n :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)\n \"\"\"\n assert isinstance(arg_1, int)\n if arg_1 > arg_0.__selfCodeVersion:\n raise Exception('wrong version, current version is %d, requested version is %d' % (arg_0.__selfCodeVersion, arg_1))\n if arg_1 < arg_0.__enabledCodeVersion:\n raise Exception('wrong version, enabled version is %d, requested version is %d' % (arg_0.__enabledCodeVersion, arg_1))\n arg_0._applyCommand(pickle.dumps(arg_1), arg_2, _COMMAND_TYPE.VERSION)"} +{"_id": "doc_5405", "title": "", "text": "def Func(arg_0):\n \"\"\"Dumps different debug info about cluster to dict and return it\"\"\"\n\n arg_1 = {}\n arg_1['version'] = VERSION\n arg_1['revision'] = REVISION\n arg_1['self'] = arg_0.__selfNode\n arg_1['state'] = arg_0.__raftState\n arg_1['leader'] = arg_0.__raftLeader\n arg_1['partner_nodes_count'] = len(arg_0.__otherNodes)\n for arg_2 in arg_0.__otherNodes:\n arg_1['partner_node_status_server_' + arg_2.id] = 2 if arg_2 in arg_0.__connectedNodes else 0\n arg_1['readonly_nodes_count'] = len(arg_0.__readonlyNodes)\n for arg_2 in arg_0.__readonlyNodes:\n arg_1['readonly_node_status_server_' + arg_2.id] = 2 if arg_2 in arg_0.__connectedNodes else 0\n arg_1['log_len'] = len(arg_0.__raftLog)\n arg_1['last_applied'] = arg_0.__raftLastApplied\n arg_1['commit_idx'] = arg_0.__raftCommitIndex\n arg_1['raft_term'] = arg_0.__raftCurrentTerm\n arg_1['next_node_idx_count'] = len(arg_0.__raftNextIndex)\n for arg_2, arg_4 in iteritems(arg_0.__raftNextIndex):\n arg_1['next_node_idx_server_' + arg_2.id] = arg_4\n arg_1['match_idx_count'] = len(arg_0.__raftMatchIndex)\n for arg_2, arg_4 in iteritems(arg_0.__raftMatchIndex):\n arg_1['match_idx_server_' + arg_2.id] = arg_4\n arg_1['leader_commit_idx'] = arg_0.__leaderCommitIndex\n arg_1['uptime'] = int(time.time() - arg_0.__startTime)\n arg_1['self_code_version'] = arg_0.__selfCodeVersion\n arg_1['enabled_code_version'] = arg_0.__enabledCodeVersion\n return arg_1"} +{"_id": "doc_5406", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Find the node to which a connection belongs.\n\n :param conn: connection object\n :type conn: TcpConnection\n :returns corresponding node or None if the node cannot be found\n :rtype Node or None\n \"\"\"\n\n for arg_2 in arg_0._connections:\n if arg_0._connections[arg_2] is arg_1:\n return arg_2\n return None"} +{"_id": "doc_5407", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Callback for connections initiated by the other side\n\n :param conn: connection object\n :type conn: TcpConnection\n \"\"\"\n\n arg_0._unknownConnections.add(arg_1)\n arg_2 = arg_0._syncObj.encryptor\n if arg_2:\n arg_1.encryptor = arg_2\n arg_1.setOnMessageReceivedCallback(functools.partial(arg_0._onIncomingMessageReceived, arg_1))\n arg_1.setOnDisconnectedCallback(functools.partial(arg_0._onDisconnected, arg_1))"} +{"_id": "doc_5408", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node.\n Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback.\n\n :param conn: connection object\n :type conn: TcpConnection\n :param message: received message\n :type message: any\n \"\"\"\n\n if arg_0._syncObj.encryptor and not arg_1.sendRandKey:\n arg_1.sendRandKey = arg_2\n arg_1.recvRandKey = os.urandom(32)\n arg_1.send(arg_1.recvRandKey)\n return\n\n # Utility messages\n if isinstance(arg_2, list):\n arg_5 = False\n try:\n if arg_2[0] == 'status':\n arg_1.send(arg_0._syncObj.getStatus())\n arg_5 = True\n elif arg_2[0] == 'add':\n arg_0._syncObj.addNodeToCluster(arg_2[1], callback = functools.partial(arg_0._utilityCallback, arg_1 = arg_1, cmd = 'ADD', arg = arg_2[1]))\n arg_5 = True\n elif arg_2[0] == 'remove':\n if arg_2[1] == arg_0._selfNode.address:\n arg_1.send('FAIL REMOVE ' + arg_2[1])\n else:\n arg_0._syncObj.removeNodeFromCluster(arg_2[1], callback = functools.partial(arg_0._utilityCallback, arg_1 = arg_1, cmd = 'REMOVE', arg = arg_2[1]))\n arg_5 = True\n elif arg_2[0] == 'set_version':\n arg_0._syncObj.setCodeVersion(arg_2[1], callback = functools.partial(arg_0._utilityCallback, arg_1 = arg_1, cmd = 'SET_VERSION', arg = str(arg_2[1])))\n arg_5 = True\n except Exception as e:\n arg_1.send(str(e))\n arg_5 = True\n if arg_5:\n return\n\n # At this point, message should be either a node ID (i.e. address) or 'readonly'\n arg_6 = arg_0._nodeAddrToNode[arg_2] if arg_2 in arg_0._nodeAddrToNode else None\n\n if arg_6 is None and arg_2 != 'readonly':\n arg_1.disconnect()\n arg_0._unknownConnections.discard(arg_1)\n return\n\n arg_7 = arg_6 is None\n if arg_7:\n arg_8 = str(arg_0._readonlyNodesCounter)\n arg_6 = Node(arg_8)\n arg_0._readonlyNodes.add(arg_6)\n arg_0._readonlyNodesCounter += 1\n\n arg_0._unknownConnections.discard(arg_1)\n arg_0._connections[arg_6] = arg_1\n arg_1.setOnMessageReceivedCallback(functools.partial(arg_0._onMessageReceived, arg_6))\n if not arg_7:\n arg_0._onNodeConnected(arg_6)\n else:\n arg_0._onReadonlyNodeConnected(arg_6)"} +{"_id": "doc_5409", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check whether this node should initiate a connection to another node\n\n :param node: the other node\n :type node: Node\n \"\"\"\n\n return isinstance(arg_1, TCPNode) and arg_1 not in arg_0._preventConnectNodes and (arg_0._selfIsReadonlyNode or arg_0._selfNode.address > arg_1.address)"} +{"_id": "doc_5410", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Connect to a node if necessary.\n\n :param node: node to connect to\n :type node: Node\n \"\"\"\n\n if arg_1 in arg_0._connections and arg_0._connections[arg_1].state != CONNECTION_STATE.DISCONNECTED:\n return True\n if not arg_0._shouldConnect(arg_1):\n return False\n assert arg_1 in arg_0._connections # Since we \"should connect\" to this node, there should always be a connection object already in place.\n if arg_1 in arg_0._lastConnectAttempt and time.time() - arg_0._lastConnectAttempt[arg_1] < arg_0._syncObj.conf.connectionRetryTime:\n return False\n arg_0._lastConnectAttempt[arg_1] = time.time()\n return arg_0._connections[arg_1].connect(arg_1.ip, arg_1.port)"} +{"_id": "doc_5411", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.\n Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.\n\n :param conn: connection object\n :type conn: TcpConnection\n :param message: received message\n :type message: any\n \"\"\"\n\n if not arg_1.sendRandKey:\n arg_1.sendRandKey = arg_2\n arg_1.send(arg_0._selfNode.address)\n\n arg_4 = arg_0._connToNode(arg_1)\n arg_1.setOnMessageReceivedCallback(functools.partial(arg_0._onMessageReceived, arg_4))\n arg_0._onNodeConnected(arg_4)"} +{"_id": "doc_5412", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary.\n\n :param conn: connection object\n :type conn: TcpConnection\n \"\"\"\n\n arg_0._unknownConnections.discard(arg_1)\n arg_2 = arg_0._connToNode(arg_1)\n if arg_2 is not None:\n if arg_2 in arg_0._nodes:\n arg_0._onNodeDisconnected(arg_2)\n arg_0._connectIfNecessarySingle(arg_2)\n else:\n arg_0._readonlyNodes.discard(arg_2)\n arg_0._onReadonlyNodeDisconnected(arg_2)"} +{"_id": "doc_5413", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to Func the message.\n\n :param node: target node\n :type node: Node\n :param message: message\n :param message: any\n :returns success\n :rtype bool\n \"\"\"\n\n if arg_1 not in arg_0._connections or arg_0._connections[arg_1].state != CONNECTION_STATE.CONNECTED:\n return False\n arg_0._connections[arg_1].Func(arg_2)\n if arg_0._connections[arg_1].state != CONNECTION_STATE.CONNECTED:\n return False\n return True"} +{"_id": "doc_5414", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Destroy this transport\n \"\"\"\n\n arg_0.setOnMessageReceivedCallback(None)\n arg_0.setOnNodeConnectedCallback(None)\n arg_0.setOnNodeDisconnectedCallback(None)\n arg_0.setOnReadonlyNodeConnectedCallback(None)\n arg_0.setOnReadonlyNodeDisconnectedCallback(None)\n for arg_1 in arg_0._nodes | arg_0._readonlyNodes:\n arg_0.dropNode(arg_1)\n if arg_0._server is not None:\n arg_0._server.unbind()\n for arg_2 in arg_0._unknownConnections:\n arg_2.disconnect()\n arg_0._unknownConnections = set()"} +{"_id": "doc_5415", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Put an item into the queue.\n True - if item placed in queue.\n False - if queue is full and item can not be placed.\"\"\"\n if arg_0.__maxsize and len(arg_0.__data) >= arg_0.__maxsize:\n return False\n arg_0.__data.append(arg_1)\n return True"} +{"_id": "doc_5416", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Put an item into the queue. Items should be comparable, eg. tuples.\n True - if item placed in queue.\n False - if queue is full and item can not be placed.\"\"\"\n if arg_0.__maxsize and len(arg_0.__data) >= arg_0.__maxsize:\n return False\n heapq.heappush(arg_0.__data, arg_1)\n return True"} +{"_id": "doc_5417", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Extract the smallest item from queue.\n Return default if queue is empty.\"\"\"\n if not arg_0.__data:\n return arg_1\n return heapq.heappop(arg_0.__data)"} +{"_id": "doc_5418", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"Attempt to acquire lock.\n\n :param lockID: unique lock identifier.\n :type lockID: str\n :param sync: True - to wait until lock is acquired or failed to acquire.\n :type sync: bool\n :param callback: if sync is False - callback will be called with operation result.\n :type callback: func(opResult, error)\n :param timeout: max operation time (default - unlimited)\n :type timeout: float\n :return True if acquired, False - somebody else already acquired lock\n \"\"\"\n return arg_0.__lockImpl.acquire(arg_1, arg_0.__selfID, time.time(), arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_5419", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if lock is acquired by ourselves.\n\n :param lockID: unique lock identifier.\n :type lockID: str\n :return True if lock is acquired by ourselves.\n \"\"\"\n return arg_0.__lockImpl.Func(arg_1, arg_0.__selfID, time.time())"} +{"_id": "doc_5420", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator which wraps Funcs and returns an error response on failure.\n \"\"\"\n def wrapped(*arg_1, **arg_2):\n arg_3 = arg_0.__name__\n arg_4 = None\n if arg_1:\n arg_4 = arg_1[0]\n try:\n if arg_4:\n logger.debug(\"Checking '%s' for '%s'\", arg_3, arg_4)\n else:\n logger.debug(\"Checking '%s'\", arg_3)\n arg_5 = arg_0(*arg_1, **arg_2)\n except Exception as e:\n arg_6 = str(e)\n arg_5 = {\n \"ok\": False,\n \"error\": arg_6,\n \"stacktrace\": traceback.format_exc(),\n }\n # The Func contains several individual Funcs (e.g., one per\n # database). Preface the results by name.\n if arg_4:\n arg_5 = {arg_4: arg_5}\n logger.exception(\n \"Error calling '%s' for '%s': %s\",\n arg_3,\n arg_4,\n arg_6\n )\n else:\n logger.exception(\n \"Error calling '%s': %s\",\n arg_3,\n arg_6\n )\n\n return arg_5\n return wrapped"} +{"_id": "doc_5421", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.\n\n WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be\n customized.\n\n \"\"\"\n\n def _parse_auth_header(arg_1):\n \"\"\"\n Parse the `Authorization` header\n\n Expected format: `WATCHMAN-TOKEN Token=\"ABC123\"`\n \"\"\"\n\n # TODO: Figure out full set of allowed characters\n # http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers\n # https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2\n # https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2\n arg_2 = re.compile('(\\w+)[=] ?\"?([\\w-]+)\"?')\n arg_3 = dict(arg_2.findall(arg_1))\n return arg_3['Token']\n\n def _get_passed_token(arg_4):\n \"\"\"\n Try to get the passed token, starting with the header and fall back to `GET` param\n \"\"\"\n\n try:\n arg_1 = arg_4.META['HTTP_AUTHORIZATION']\n arg_5 = _parse_auth_header(arg_1)\n except KeyError:\n arg_5 = arg_4.GET.get(settings.WATCHMAN_TOKEN_NAME)\n return arg_5\n\n def _validate_token(arg_4):\n if settings.WATCHMAN_TOKENS:\n arg_6 = settings.WATCHMAN_TOKENS.split(',')\n elif settings.WATCHMAN_TOKEN:\n arg_6 = [settings.WATCHMAN_TOKEN, ]\n else:\n return True\n\n return _get_passed_token(arg_4) in arg_6\n\n @csrf_exempt\n @wraps(arg_0)\n def _wrapped_view(arg_4, *arg_7, **arg_8):\n if _validate_token(arg_4):\n return arg_0(arg_4, *arg_7, **arg_8)\n\n return HttpResponseForbidden()\n\n return _wrapped_view"} +{"_id": "doc_5422", "title": "", "text": "async def Func(arg_0):\n \"\"\"Establish a Funcion to the chat server.\n\n Returns when an error has occurred, or :func:`disFunc` has been\n called.\n \"\"\"\n arg_1 = os.environ.get('HTTP_PROXY')\n arg_0._session = http_utils.Session(arg_0._cookies, arg_1=arg_1)\n try:\n arg_0._channel = channel.Channel(\n arg_0._session, arg_0._max_retries, arg_0._retry_backoff_base\n )\n\n # Forward the Channel events to the Client events.\n arg_0._channel.on_Func.add_observer(arg_0.on_Func.fire)\n arg_0._channel.on_reFunc.add_observer(arg_0.on_reFunc.fire)\n arg_0._channel.on_disFunc.add_observer(arg_0.on_disFunc.fire)\n arg_0._channel.on_receive_array.add_observer(arg_0._on_receive_array)\n\n # Wrap the coroutine in a Future so it can be cancelled.\n arg_0._listen_future = asyncio.ensure_future(arg_0._channel.listen())\n # Listen for StateUpdate messages from the Channel until it\n # disFuncs.\n try:\n await arg_0._listen_future\n except asyncio.CancelledError:\n # If this task is cancelled, we need to cancel our child task\n # as well. We don't need an additional yield because listen\n # cancels immediately.\n arg_0._listen_future.cancel()\n logger.info(\n 'Client.Func returning because Channel.listen returned'\n )\n finally:\n await arg_0._session.close()"} +{"_id": "doc_5423", "title": "", "text": "def Func(arg_0):\n \"\"\"Return ``request_header`` for use when constructing requests.\n\n Returns:\n Populated request header.\n \"\"\"\n # resource is allowed to be null if it's not available yet (the Chrome\n # client does this for the first getentitybyid call)\n if arg_0._client_id is not None:\n arg_0._request_header.client_identifier.resource = arg_0._client_id\n return arg_0._request_header"} +{"_id": "doc_5424", "title": "", "text": "async def Func(arg_0):\n \"\"\"Set this client as active.\n\n While a client is active, no other clients will raise notifications.\n Call this method whenever there is an indication the user is\n interacting with this client. This method may be called very\n frequently, and it will only make a request when necessary.\n \"\"\"\n arg_1 = (arg_0._active_client_state ==\n hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE)\n arg_2 = (time.time() - arg_0._last_active_secs >\n SETACTIVECLIENT_LIMIT_SECS)\n if not arg_1 or arg_2:\n # Update these immediately so if the function is called again\n # before the API request finishes, we don't start extra requests.\n arg_0._active_client_state = (\n hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE\n )\n arg_0._last_active_secs = time.time()\n\n # The first time this is called, we need to retrieve the user's\n # email address.\n if arg_0._email is None:\n try:\n arg_5 = hangouts_pb2.GetSelfInfoRequest(\n request_header=arg_0.get_request_header(),\n )\n arg_6 = await arg_0.get_self_info(\n arg_5\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to find email address: {}'\n .format(e))\n return\n arg_0._email = (\n arg_6.self_entity.properties.email[0]\n )\n\n # If the client_id hasn't been received yet, we can't set the\n # active client.\n if arg_0._client_id is None:\n logger.info(\n 'Cannot set active client until client_id is received'\n )\n return\n\n try:\n arg_8 = hangouts_pb2.SetActiveClientRequest(\n request_header=arg_0.get_request_header(),\n arg_1=True,\n full_jid=\"{}/{}\".format(arg_0._email, arg_0._client_id),\n timeout_secs=ACTIVE_TIMEOUT_SECS,\n )\n await arg_0.Func_client(arg_8)\n except exceptions.NetworkError as e:\n logger.warning('Failed to set active client: {}'.format(e))\n else:\n logger.info('Set active client for {} seconds'\n .format(ACTIVE_TIMEOUT_SECS))"} +{"_id": "doc_5425", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse the image upload response to obtain status.\n\n Args:\n res: http_utils.FetchResponse instance, the upload response\n\n Returns:\n dict, sessionStatus of the response\n\n Raises:\n hangups.NetworkError: If the upload request failed.\n \"\"\"\n arg_1 = json.loads(arg_0.body.decode())\n if 'sessionStatus' not in arg_1:\n try:\n arg_2 = (\n arg_1['errorMessage']['additionalInfo']\n ['uploader_service.GoogleRupioAdditionalInfo']\n ['completionInfo']['customerSpecificInfo']\n )\n arg_3 = '{} : {}'.format(arg_2['status'], arg_2['message'])\n except KeyError:\n arg_3 = 'unknown reason'\n raise exceptions.NetworkError('image upload failed: {}'.format(\n arg_3\n ))\n return arg_1['sessionStatus']"} +{"_id": "doc_5426", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Parse channel array and call the appropriate events.\"\"\"\n if arg_1[0] == 'noop':\n pass # This is just a keep-alive, ignore it.\n else:\n arg_2 = json.loads(arg_1[0]['p'])\n # Wrapper appears to be a Protocol Buffer message, but encoded via\n # field numbers as dictionary keys. Since we don't have a parser\n # for that, parse it ad-hoc here.\n if '3' in arg_2:\n # This is a new client_id.\n arg_0._client_id = arg_2['3']['2']\n logger.info('Received new client_id: %r', arg_0._client_id)\n # Once client_id is received, the channel is ready to have\n # services added.\n await arg_0._add_channel_services()\n if '2' in arg_2:\n arg_4 = json.loads(arg_2['2']['2'])\n if arg_4[0] == 'cbu':\n # This is a (Client)BatchUpdate containing StateUpdate\n # messages.\n arg_5 = hangouts_pb2.BatchUpdate()\n pblite.decode(arg_5, arg_4,\n ignore_first_item=True)\n for arg_6 in arg_5.state_update:\n logger.debug('Received StateUpdate:\\n%s', arg_6)\n arg_7 = arg_6.state_update_header\n arg_0._active_client_state = arg_7.active_client_state\n await arg_0.on_state_update.fire(arg_6)\n else:\n logger.info('Ignoring message: %r', arg_4[0])"} +{"_id": "doc_5427", "title": "", "text": "async def Func(arg_0):\n \"\"\"Add services to the channel.\n\n The services we add to the channel determine what kind of data we will\n receive on it.\n\n The \"babel\" service includes what we need for Hangouts. If this fails\n for some reason, hangups will never receive any events. The\n \"babel_presence_last_seen\" service is also required to receive presence\n notifications.\n\n This needs to be re-called whenever we open a new channel (when there's\n a new SID and client_id.\n \"\"\"\n logger.info('Adding channel services...')\n # Based on what Hangouts for Chrome does over 2 requests, this is\n # trimmed down to 1 request that includes the bare minimum to make\n # things work.\n arg_1 = [\"babel\", \"babel_presence_last_seen\"]\n arg_2 = [\n dict(p=json.dumps({\"3\": {\"1\": {\"1\": service}}}))\n for service in arg_1\n ]\n await arg_0._channel.send_maps(arg_2)\n logger.info('Channel services added')"} +{"_id": "doc_5428", "title": "", "text": "async def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Send a Protocol Buffer formatted chat API request.\n\n Args:\n endpoint (str): The chat API endpoint to use.\n request_pb: The request body as a Protocol Buffer message.\n response_pb: The response body as a Protocol Buffer message.\n\n Raises:\n NetworkError: If the request fails.\n \"\"\"\n logger.debug('Sending Protocol Buffer request %s:\\n%s', arg_1,\n arg_2)\n arg_4 = await arg_0._base_request(\n 'https://clients6.google.com/chat/v1/{}'.format(arg_1),\n 'application/x-protobuf', # Request body is Protocol Buffer.\n 'proto', # Response body is Protocol Buffer.\n arg_2.SerializeToString()\n )\n try:\n arg_3.ParseFromString(base64.b64decode(arg_4.body))\n except binascii.Error as e:\n raise exceptions.NetworkError(\n 'Failed to decode base64 response: {}'.format(e)\n )\n except google.protobuf.message.DecodeError as e:\n raise exceptions.NetworkError(\n 'Failed to decode Protocol Buffer response: {}'.format(e)\n )\n logger.debug('Received Protocol Buffer response:\\n%s', arg_3)\n arg_5 = arg_3.response_header.status\n if arg_5 != hangouts_pb2.RESPONSE_STATUS_OK:\n arg_6 = arg_3.response_header.error_description\n raise exceptions.NetworkError(\n 'Request failed with status {}: \\'{}\\''\n .format(arg_5, arg_6)\n )"} +{"_id": "doc_5429", "title": "", "text": "async def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Send a generic authenticated POST request.\n\n Args:\n url (str): URL of request.\n content_type (str): Request content type.\n response_type (str): The desired response format. Valid options\n are: 'json' (JSON), 'protojson' (pblite), and 'proto' (binary\n Protocol Buffer). 'proto' requires manually setting an extra\n header 'X-Goog-Encode-Response-If-Executable: base64'.\n data (str): Request body data.\n\n Returns:\n FetchResponse: Response containing HTTP code, cookies, and body.\n\n Raises:\n NetworkError: If the request fails.\n \"\"\"\n arg_5 = {\n 'content-type': arg_2,\n # This header is required for Protocol Buffer responses. It causes\n # them to be base64 encoded:\n 'X-Goog-Encode-Response-If-Executable': 'base64',\n }\n arg_6 = {\n # \"alternative representation type\" (desired response format).\n 'alt': arg_3,\n # API key (required to avoid 403 Forbidden \"Daily Limit for\n # Unauthenticated Use Exceeded. Continued use requires signup\").\n 'key': API_KEY,\n }\n arg_7 = await arg_0._session.fetch(\n 'post', arg_1, arg_5=arg_5, arg_6=arg_6, arg_4=arg_4,\n )\n return arg_7"} +{"_id": "doc_5430", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Invite users to join an existing group conversation.\"\"\"\n arg_2 = hangouts_pb2.AddUserResponse()\n await arg_0._pb_request('conversations/adduser',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5431", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Create a new conversation.\"\"\"\n arg_2 = hangouts_pb2.CreateConversationResponse()\n await arg_0._pb_request('conversations/createconversation',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5432", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Return conversation info and recent events.\"\"\"\n arg_2 = hangouts_pb2.GetConversationResponse()\n await arg_0._pb_request('conversations/getconversation',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5433", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Return one or more user entities.\n\n Searching by phone number only finds entities when their phone number\n is in your contacts (and not always even then), and can't be used to\n find Google Voice contacts.\n \"\"\"\n arg_2 = hangouts_pb2.GetEntityByIdResponse()\n await arg_0._pb_request('contacts/getentitybyid',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5434", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Return info about the current user.\"\"\"\n arg_2 = hangouts_pb2.GetSelfInfoResponse()\n await arg_0._pb_request('contacts/getselfinfo',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5435", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Return presence status for a list of users.\"\"\"\n arg_2 = hangouts_pb2.QueryPresenceResponse()\n await arg_0._pb_request('presence/querypresence',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5436", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Remove a participant from a group conversation.\"\"\"\n arg_2 = hangouts_pb2.RemoveUserResponse()\n await arg_0._pb_request('conversations/removeuser',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5437", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Rename a conversation.\n\n Both group and one-to-one conversations may be renamed, but the\n official Hangouts clients have mixed support for one-to-one\n conversations with custom names.\n \"\"\"\n arg_2 = hangouts_pb2.RenameConversationResponse()\n await arg_0._pb_request('conversations/renameconversation',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5438", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Enable or disable message history in a conversation.\"\"\"\n arg_2 = hangouts_pb2.ModifyOTRStatusResponse()\n await arg_0._pb_request('conversations/modifyotrstatus',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5439", "title": "", "text": "async def Func(\n arg_0, arg_1\n ):\n \"\"\"Set the notification level of a conversation.\"\"\"\n arg_2 = hangouts_pb2.SetConversationNotificationLevelResponse()\n await arg_0._pb_request(\n 'conversations/setconversationnotificationlevel',\n arg_1, arg_2\n )\n return arg_2"} +{"_id": "doc_5440", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Set focus to a conversation.\"\"\"\n arg_2 = hangouts_pb2.SetFocusResponse()\n await arg_0._pb_request('conversations/setfocus',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5441", "title": "", "text": "async def Func(\n arg_0, arg_1\n ):\n \"\"\"Set whether group link sharing is enabled for a conversation.\"\"\"\n arg_2 = hangouts_pb2.SetGroupLinkSharingEnabledResponse()\n await arg_0._pb_request('conversations/setgrouplinksharingenabled',\n arg_1,\n arg_2)\n return arg_2"} +{"_id": "doc_5442", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Set the typing status of a conversation.\"\"\"\n arg_2 = hangouts_pb2.SetTypingResponse()\n await arg_0._pb_request('conversations/settyping',\n arg_1, arg_2)\n return arg_2"} +{"_id": "doc_5443", "title": "", "text": "async def Func(\n arg_0, arg_1\n ):\n \"\"\"Return info on recent conversations and their events.\"\"\"\n arg_2 = hangouts_pb2.SyncRecentConversationsResponse()\n await arg_0._pb_request('conversations/syncrecentconversations',\n arg_1,\n arg_2)\n return arg_2"} +{"_id": "doc_5444", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a microsecond timestamp to a UTC datetime instance.\"\"\"\n # Create datetime without losing precision from floating point (yes, this\n # is actually needed):\n return datetime.datetime.fromtimestamp(\n arg_0 // 1000000, datetime.timezone.utc\n ).replace(microsecond=(arg_0 % 1000000))"} +{"_id": "doc_5445", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert UserID to hangouts_pb2.ParticipantId.\"\"\"\n return hangouts_pb2.ParticipantId(\n chat_id=arg_0.chat_id,\n gaia_id=arg_0.gaia_id\n )"} +{"_id": "doc_5446", "title": "", "text": "def Func(arg_0):\n \"\"\"Return WatermarkNotification from hangouts_pb2.WatermarkNotification.\"\"\"\n return WatermarkNotification(\n conv_id=arg_0.conversation_id.id,\n user_id=from_participantid(arg_0.sender_id),\n read_timestamp=from_timestamp(\n arg_0.latest_read_timestamp\n ),\n )"} +{"_id": "doc_5447", "title": "", "text": "def Func(arg_0):\n \"\"\"Return authorization headers for API request.\"\"\"\n # It doesn't seem to matter what the url and time are as long as they are\n # consistent.\n arg_1 = int(time.time() * 1000)\n arg_2 = '{} {} {}'.format(arg_1, arg_0, ORIGIN_URL)\n arg_3 = hashlib.sha1(arg_2.encode()).hexdigest()\n arg_4 = 'SAPISIDHASH {}_{}'.format(arg_1, arg_3)\n return {\n 'authorization': arg_4,\n 'x-origin': ORIGIN_URL,\n 'x-goog-authuser': '0',\n }"} +{"_id": "doc_5448", "title": "", "text": "async def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None, arg_5=None):\n \"\"\"Make an HTTP request.\n\n Automatically uses configured HTTP proxy, and adds Google authorization\n header and cookies.\n\n Failures will be retried MAX_RETRIES times before raising NetworkError.\n\n Args:\n method (str): Request method.\n url (str): Request URL.\n params (dict): (optional) Request query string parameters.\n headers (dict): (optional) Request headers.\n data: (str): (optional) Request body data.\n\n Returns:\n FetchResponse: Response data.\n\n Raises:\n NetworkError: If the request fails.\n \"\"\"\n logger.debug('Sending request %s %s:\\n%r', arg_1, arg_2, arg_5)\n for arg_6 in range(MAX_RETRIES):\n try:\n async with arg_0.Func_raw(arg_1, arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_5=arg_5) as res:\n async with async_timeout.timeout(REQUEST_TIMEOUT):\n body = await res.read()\n logger.debug('Received response %d %s:\\n%r',\n res.status, res.reason, body)\n except asyncio.TimeoutError:\n error_msg = 'Request timed out'\n except aiohttp.ServerDisconnectedError as err:\n error_msg = 'Server disconnected error: {}'.format(err)\n except (aiohttp.ClientError, ValueError) as err:\n error_msg = 'Request connection error: {}'.format(err)\n else:\n break\n logger.info('Request attempt %d failed: %s', arg_6, error_msg)\n else:\n logger.info('Request failed after %d attempts', MAX_RETRIES)\n raise exceptions.NetworkError(error_msg)\n\n if res.status != 200:\n logger.info('Request returned unexpected status: %d %s',\n res.status, res.reason)\n raise exceptions.NetworkError(\n 'Request return unexpected status: {}: {}'\n .format(res.status, res.reason)\n )\n\n return FetchResponse(res.status, body)"} +{"_id": "doc_5449", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Search for entities by phone number, email, or gaia_id.\"\"\"\n arg_2 = _get_lookup_spec(arg_1.entity_identifier)\n arg_3 = hangups.hangouts_pb2.GetEntityByIdRequest(\n request_header=arg_0.get_request_header(),\n batch_lookup_spec=[arg_2],\n )\n arg_4 = await arg_0.get_entity_by_id(arg_3)\n\n # Print the list of entities in the response.\n for arg_5 in arg_4.entity_result:\n for arg_6 in arg_5.entity:\n print(arg_6)"} +{"_id": "doc_5450", "title": "", "text": "def Func(arg_0):\n \"\"\"Return EntityLookupSpec from phone number, email address, or gaia ID.\"\"\"\n if arg_0.startswith('+'):\n return hangups.hangouts_pb2.EntityLookupSpec(\n phone=arg_0, create_offnetwork_gaia=True\n )\n elif '@' in arg_0:\n return hangups.hangouts_pb2.EntityLookupSpec(\n email=arg_0, create_offnetwork_gaia=True\n )\n else:\n return hangups.hangouts_pb2.EntityLookupSpec(gaia_id=arg_0)"} +{"_id": "doc_5451", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False):\n \"\"\"Return a readable name for a conversation.\n\n If the conversation has a custom name, use the custom name. Otherwise, for\n one-to-one conversations, the name is the full name of the other user. For\n group conversations, the name is a comma-separated list of first names. If\n the group conversation is empty, the name is \"Empty Conversation\".\n\n If truncate is true, only show up to two names in a group conversation.\n\n If show_unread is True, if there are unread chat messages, show the number\n of unread chat messages in parentheses after the conversation name.\n \"\"\"\n arg_3 = len([conv_event for conv_event in arg_0.unread_events if\n isinstance(conv_event, hangups.ChatMessageEvent) and\n not arg_0.get_user(conv_event.user_id).is_self])\n if arg_2 and arg_3 > 0:\n arg_4 = ' ({})'.format(arg_3)\n else:\n arg_4 = ''\n if arg_0.name is not None:\n return arg_0.name + arg_4\n else:\n arg_5 = sorted(\n (user for user in arg_0.users if not user.is_self),\n key=lambda user: user.id_\n )\n arg_6 = [user.first_name for user in arg_5]\n if not arg_5:\n return \"Empty Conversation\" + arg_4\n if len(arg_5) == 1:\n return arg_5[0].full_name + arg_4\n elif arg_1 and len(arg_5) > 2:\n return (', '.join(arg_6[:2] + ['+{}'.format(len(arg_6) - 2)]) +\n arg_4)\n else:\n return ', '.join(arg_6) + arg_4"} +{"_id": "doc_5452", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Add foreground and background colours to a color scheme\"\"\"\n if arg_2 is None and arg_3 is None:\n return arg_0\n\n arg_5 = []\n for arg_6 in arg_0:\n if arg_6[0] == arg_1:\n if arg_2 is None:\n arg_2 = arg_6[1]\n if arg_3 is None:\n arg_3 = arg_6[2]\n if arg_4 > 16:\n arg_5.append((arg_1, '', '', '', arg_2, arg_3))\n else:\n arg_5.append((arg_1, arg_2, arg_3))\n else:\n arg_5.append(arg_6)\n return arg_5"} +{"_id": "doc_5453", "title": "", "text": "async def Func(arg_0):\n \"\"\"Sync all conversations by making paginated requests.\n\n Conversations are ordered by ascending sort timestamp.\n\n Args:\n client (Client): Connected client.\n\n Raises:\n NetworkError: If the requests fail.\n\n Returns:\n tuple of list of ``ConversationState`` messages and sync timestamp\n \"\"\"\n arg_1 = []\n arg_2 = None\n arg_3 = hangouts_pb2.SyncRecentConversationsRequest(\n request_header=arg_0.get_request_header(),\n max_conversations=CONVERSATIONS_PER_REQUEST,\n max_events_per_conversation=1,\n sync_filter=[\n hangouts_pb2.SYNC_FILTER_INBOX,\n hangouts_pb2.SYNC_FILTER_ARCHIVED,\n ]\n )\n for arg_4 in range(MAX_CONVERSATION_PAGES):\n logger.info(\n 'Requesting conversations page %s', arg_3.last_event_timestamp\n )\n response = await arg_0.sync_recent_conversations(arg_3)\n arg_1 = list(response.conversation_state) + arg_1\n arg_2 = parsers.from_timestamp(\n # SyncRecentConversations seems to return a sync_timestamp 4\n # minutes before the present. To prevent SyncAllNewEvents later\n # breaking requesting events older than what we already have, use\n # current_server_time instead.\n response.response_header.current_server_time\n )\n if response.continuation_end_timestamp == 0:\n logger.info('Reached final conversations page')\n break\n else:\n arg_3.last_event_timestamp = response.continuation_end_timestamp\n else:\n logger.warning('Exceeded maximum number of conversation pages')\n logger.info('Synced %s total conversations', len(arg_1))\n return arg_1, arg_2"} +{"_id": "doc_5454", "title": "", "text": "def Func(arg_0):\n \"\"\"Loaded events which are unread sorted oldest to newest.\n\n Some Hangouts clients don't update the read timestamp for certain event\n types, such as membership changes, so this may return more unread\n events than these clients will show. There's also a delay between\n sending a message and the user's own message being considered read.\n\n (list of :class:`.ConversationEvent`).\n \"\"\"\n return [arg_1 for arg_1 in arg_0._events\n if arg_1.timestamp > arg_0.latest_read_timestamp]"} +{"_id": "doc_5455", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handle a watermark notification.\"\"\"\n # Update the conversation:\n if arg_0.get_user(arg_1.user_id).is_self:\n logger.info('latest_read_timestamp for {} updated to {}'\n .format(arg_0.id_, arg_1.read_timestamp))\n arg_2 = (\n arg_0._conversation.self_conversation_state\n )\n arg_2.self_read_state.latest_read_timestamp = (\n parsers.to_timestamp(arg_1.read_timestamp)\n )\n # Update the participants' watermarks:\n arg_5 = arg_0._watermarks.get(\n arg_1.user_id,\n datetime.datetime.min.replace(tzinfo=datetime.timezone.utc)\n )\n if arg_1.read_timestamp > arg_5:\n logger.info(('latest_read_timestamp for conv {} participant {}' +\n ' updated to {}').format(arg_0.id_,\n arg_1.user_id.chat_id,\n arg_1.read_timestamp))\n arg_0._watermarks[arg_1.user_id] = arg_1.read_timestamp"} +{"_id": "doc_5456", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update the internal state of the conversation.\n\n This method is used by :class:`.ConversationList` to maintain this\n instance.\n\n Args:\n conversation: ``Conversation`` message.\n \"\"\"\n # StateUpdate.conversation is actually a delta; fields that aren't\n # specified are assumed to be unchanged. Until this class is\n # refactored, hide this by saving and restoring previous values where\n # necessary.\n\n arg_2 = arg_1.self_conversation_state\n arg_3 = arg_0._conversation.self_conversation_state\n arg_0._conversation = arg_1\n\n # delivery_medium_option\n if not arg_2.delivery_medium_option:\n arg_2.delivery_medium_option.extend(\n arg_3.delivery_medium_option\n )\n\n # latest_read_timestamp\n arg_5 = arg_3.self_read_state.latest_read_timestamp\n arg_6 = arg_2.self_read_state.latest_read_timestamp\n if arg_6 == 0:\n arg_2.self_read_state.latest_read_timestamp = arg_5\n\n # user_read_state(s)\n for arg_9 in arg_1.read_state:\n arg_10 = parsers.from_timestamp(arg_9.latest_read_timestamp)\n if arg_10 == 0:\n continue\n arg_11 = parsers.from_participantid(arg_9.participant_id)\n if arg_11 not in arg_0._watermarks or arg_0._watermarks[arg_11] < arg_10:\n arg_0._watermarks[arg_11] = arg_10"} +{"_id": "doc_5457", "title": "", "text": "def Func(arg_0):\n \"\"\"Wrap hangouts_pb2.Event in ConversationEvent subclass.\"\"\"\n arg_1 = conversation_event.ConversationEvent\n if arg_0.HasField('chat_message'):\n arg_1 = conversation_event.ChatMessageEvent\n elif arg_0.HasField('otr_modification'):\n arg_1 = conversation_event.OTREvent\n elif arg_0.HasField('conversation_rename'):\n arg_1 = conversation_event.RenameEvent\n elif arg_0.HasField('membership_change'):\n arg_1 = conversation_event.MembershipChangeEvent\n elif arg_0.HasField('hangout_event'):\n arg_1 = conversation_event.HangoutEvent\n elif arg_0.HasField('group_link_sharing_modification'):\n arg_1 = conversation_event.GroupLinkSharingModificationEvent\n return arg_1(arg_0)"} +{"_id": "doc_5458", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add an event to the conversation.\n\n This method is used by :class:`.ConversationList` to maintain this\n instance.\n\n Args:\n event_: ``Event`` message.\n\n Returns:\n :class:`.ConversationEvent` representing the event.\n \"\"\"\n arg_2 = arg_0._wrap_event(arg_1)\n if arg_2.id_ not in arg_0._events_dict:\n arg_0._events.append(arg_2)\n arg_0._events_dict[arg_2.id_] = arg_2\n else:\n # If this happens, there's probably a bug.\n logger.info('Conversation %s ignoring duplicate event %s',\n arg_0.id_, arg_2.id_)\n return None\n return arg_2"} +{"_id": "doc_5459", "title": "", "text": "async def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None):\n \"\"\"Send a message to this conversation.\n\n A per-conversation lock is acquired to ensure that messages are sent in\n the correct order when this method is called multiple times\n asynchronously.\n\n Args:\n segments: List of :class:`.ChatMessageSegment` objects to include\n in the message.\n image_file: (optional) File-like object containing an image to be\n attached to the message.\n image_id: (optional) ID of an Picasa photo to be attached to the\n message. If you specify both ``image_file`` and ``image_id``\n together, ``image_file`` takes precedence and ``image_id`` will\n be ignored.\n image_user_id: (optional) Picasa user ID, required only if\n ``image_id`` refers to an image from a different Picasa user,\n such as Google's sticker user.\n\n Raises:\n .NetworkError: If the message cannot be sent.\n \"\"\"\n async with arg_0._Func_lock:\n if arg_2:\n try:\n arg_5 = await arg_0._client.upload_image(\n arg_2, return_uploaded_image=True\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to upload image: {}'.format(e))\n raise\n arg_3 = arg_5.image_id\n try:\n arg_6 = hangouts_pb2.SendChatMessageRequest(\n request_header=arg_0._client.get_request_header(),\n event_request_header=arg_0._get_event_request_header(),\n message_content=hangouts_pb2.MessageContent(\n segment=[seg.serialize() for seg in arg_1],\n ),\n )\n if arg_3 is not None:\n arg_6.existing_media.photo.photo_id = arg_3\n if arg_4 is not None:\n arg_6.existing_media.photo.user_id = arg_4\n arg_6.existing_media.photo.is_custom_user_id = True\n await arg_0._client.send_chat_message(arg_6)\n except exceptions.NetworkError as e:\n logger.warning('Failed to send message: {}'.format(e))\n raise"} +{"_id": "doc_5460", "title": "", "text": "async def Func(arg_0):\n \"\"\"Leave this conversation.\n\n Raises:\n .NetworkError: If conversation cannot be left.\n \"\"\"\n arg_1 = (arg_0._conversation.type ==\n hangouts_pb2.CONVERSATION_TYPE_GROUP)\n try:\n if arg_1:\n await arg_0._client.remove_user(\n hangouts_pb2.RemoveUserRequest(\n request_header=arg_0._client.get_request_header(),\n event_request_header=arg_0._get_event_request_header(),\n )\n )\n else:\n await arg_0._client.delete_conversation(\n hangouts_pb2.DeleteConversationRequest(\n request_header=arg_0._client.get_request_header(),\n conversation_id=hangouts_pb2.ConversationId(\n id=arg_0.id_\n ),\n delete_upper_bound_timestamp=parsers.to_timestamp(\n datetime.datetime.now(tz=datetime.timezone.utc)\n )\n )\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to Func conversation: {}'.format(e))\n raise"} +{"_id": "doc_5461", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Set the notification level of this conversation.\n\n Args:\n level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or\n ``NOTIFICATION_LEVEL_RING`` to enable them.\n\n Raises:\n .NetworkError: If the request fails.\n \"\"\"\n await arg_0._client.set_conversation_notification_level(\n hangouts_pb2.SetConversationNotificationLevelRequest(\n request_header=arg_0._client.get_request_header(),\n conversation_id=hangouts_pb2.ConversationId(id=arg_0.id_),\n arg_1=arg_1,\n )\n )"} +{"_id": "doc_5462", "title": "", "text": "async def Func(arg_0, arg_1=None):\n \"\"\"Update the timestamp of the latest event which has been read.\n\n This method will avoid making an API request if it will have no effect.\n\n Args:\n read_timestamp (datetime.datetime): (optional) Timestamp to set.\n Defaults to the timestamp of the newest event.\n\n Raises:\n .NetworkError: If the timestamp cannot be updated.\n \"\"\"\n if arg_1 is None:\n arg_1 = (arg_0.events[-1].timestamp if arg_0.events else\n datetime.datetime.now(datetime.timezone.utc))\n if arg_1 > arg_0.latest_read_timestamp:\n logger.info(\n 'Setting {} latest_read_timestamp from {} to {}'\n .format(arg_0.id_, arg_0.latest_read_timestamp, arg_1)\n )\n # Prevent duplicate requests by updating the conversation now.\n arg_2 = arg_0._conversation.self_conversation_state\n arg_2.self_read_state.latest_read_timestamp = (\n parsers.to_timestamp(arg_1)\n )\n try:\n await arg_0._client.update_watermark(\n hangouts_pb2.UpdateWatermarkRequest(\n request_header=arg_0._client.get_request_header(),\n conversation_id=hangouts_pb2.ConversationId(\n id=arg_0.id_\n ),\n last_read_timestamp=parsers.to_timestamp(\n arg_1\n ),\n )\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to update read timestamp: {}'.format(e))\n raise"} +{"_id": "doc_5463", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Get all the conversations.\n\n Args:\n include_archived (bool): (optional) Whether to include archived\n conversations. Defaults to ``False``.\n\n Returns:\n List of all :class:`.Conversation` objects.\n \"\"\"\n return [arg_2 for arg_2 in arg_0._conv_dict.values()\n if not arg_2.is_archived or arg_1]"} +{"_id": "doc_5464", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Leave a conversation.\n\n Args:\n conv_id (str): ID of conversation to leave.\n \"\"\"\n logger.info('Leaving conversation: {}'.format(arg_1))\n await arg_0._conv_dict[arg_1].leave()\n del arg_0._conv_dict[arg_1]"} +{"_id": "doc_5465", "title": "", "text": "def Func(arg_0, arg_1, arg_2=[],\n arg_3=None):\n \"\"\"Add new conversation from hangouts_pb2.Conversation\"\"\"\n # pylint: disable=dangerous-default-value\n arg_4 = arg_1.conversation_id.id\n logger.debug('Adding new conversation: {}'.format(arg_4))\n arg_5 = Conversation(arg_0._client, arg_0._user_list, arg_1,\n arg_2, arg_3)\n arg_0._conv_dict[arg_4] = arg_5\n return arg_5"} +{"_id": "doc_5466", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Get a cached conversation or fetch a missing conversation.\n\n Args:\n conv_id: string, conversation identifier\n\n Raises:\n NetworkError: If the request to fetch the conversation fails.\n\n Returns:\n :class:`.Conversation` with matching ID.\n \"\"\"\n arg_2 = arg_0._conv_dict.get(arg_1, None)\n if arg_2 is None:\n logger.info('Fetching unknown conversation %s', arg_1)\n arg_3 = await arg_0._client.get_conversation(\n hangouts_pb2.GetConversationRequest(\n request_header=arg_0._client.get_request_header(),\n conversation_spec=hangouts_pb2.ConversationSpec(\n conversation_id=hangouts_pb2.ConversationId(\n id=arg_1\n )\n ), include_event=False\n )\n )\n arg_4 = arg_3.conversation_state\n arg_5 = None\n if arg_4.HasField('event_continuation_token'):\n arg_5 = arg_4.event_continuation_token\n return arg_0._add_conversation(arg_4.conversation,\n arg_5=arg_5)\n else:\n return arg_2"} +{"_id": "doc_5467", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Receive a hangouts_pb2.Event and fan out to Conversations.\n\n Args:\n event_: hangouts_pb2.Event instance\n \"\"\"\n arg_2 = arg_1.conversation_id.id\n try:\n arg_3 = await arg_0._get_or_fetch_conversation(arg_2)\n except exceptions.NetworkError:\n logger.warning(\n 'Failed to fetch conversation for event notification: %s',\n arg_2\n )\n else:\n arg_0._sync_timestamp = parsers.from_timestamp(arg_1.timestamp)\n arg_5 = arg_3.add_event(arg_1)\n # conv_event may be None if the event was a duplicate.\n if arg_5 is not None:\n await arg_0.on_event.fire(arg_5)\n await arg_3.on_event.fire(arg_5)"} +{"_id": "doc_5468", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Receive Conversation delta and create or update the conversation.\n\n Args:\n conversation: hangouts_pb2.Conversation instance\n\n Raises:\n NetworkError: A request to fetch the complete conversation failed.\n \"\"\"\n arg_2 = arg_1.conversation_id.id\n arg_3 = arg_0._conv_dict.get(arg_2, None)\n if arg_3 is None:\n # Ignore the delta and fetch the complete conversation.\n await arg_0._get_or_fetch_conversation(arg_2)\n else:\n # Update conversation using the delta.\n arg_3.update_conversation(arg_1)"} +{"_id": "doc_5469", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Receive SetTypingNotification and update the conversation.\n\n Args:\n set_typing_notification: hangouts_pb2.SetTypingNotification\n instance\n \"\"\"\n arg_2 = arg_1.conversation_id.id\n arg_3 = parsers.parse_typing_status_message(arg_1)\n await arg_0.on_typing.fire(arg_3)\n try:\n arg_4 = await arg_0._get_or_fetch_conversation(arg_2)\n except exceptions.NetworkError:\n logger.warning(\n 'Failed to fetch conversation for typing notification: %s',\n arg_2\n )\n else:\n await arg_4.on_typing.fire(arg_3)"} +{"_id": "doc_5470", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Receive WatermarkNotification and update the conversation.\n\n Args:\n watermark_notification: hangouts_pb2.WatermarkNotification instance\n \"\"\"\n arg_2 = arg_1.conversation_id.id\n arg_3 = parsers.parse_watermark_notification(arg_1)\n await arg_0.on_watermark_notification.fire(arg_3)\n try:\n arg_4 = await arg_0._get_or_fetch_conversation(arg_2)\n except exceptions.NetworkError:\n logger.warning(\n 'Failed to fetch conversation for watermark notification: %s',\n arg_2\n )\n else:\n await arg_4.on_watermark_notification.fire(arg_3)"} +{"_id": "doc_5471", "title": "", "text": "async def Func(arg_0):\n \"\"\"Sync conversation state and events that could have been missed.\"\"\"\n logger.info('Syncing events since {}'.format(arg_0.Func_timestamp))\n try:\n arg_1 = await arg_0._client.sync_all_new_events(\n hangouts_pb2.SyncAllNewEventsRequest(\n request_header=arg_0._client.get_request_header(),\n lastFunc_timestamp=parsers.to_timestamp(\n arg_0.Func_timestamp\n ),\n max_response_size_bytes=1048576, # 1 MB\n )\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to sync events, some events may be lost: {}'\n .format(e))\n else:\n for arg_2 in arg_1.conversation_state:\n arg_3 = arg_2.conversation_id.id\n arg_4 = arg_0._conv_dict.get(arg_3, None)\n if arg_4 is not None:\n arg_4.update_conversation(arg_2.conversation)\n for arg_5 in arg_2.event:\n arg_6 = parsers.from_timestamp(arg_5.timestamp)\n if arg_6 > arg_0.Func_timestamp:\n # This updates the sync_timestamp for us, as well\n # as triggering events.\n await arg_0._on_event(arg_5)\n else:\n arg_0._add_conversation(\n arg_2.conversation,\n arg_2.event,\n arg_2.event_continuation_token\n )"} +{"_id": "doc_5472", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Construct user from ``Entity`` message.\n\n Args:\n entity: ``Entity`` message.\n self_user_id (~hangups.user.UserID or None): The ID of the current\n user. If ``None``, assume ``entity`` is the current user.\n\n Returns:\n :class:`~hangups.user.User` object.\n \"\"\"\n arg_2 = UserID(chat_id=arg_0.id.chat_id,\n gaia_id=arg_0.id.gaia_id)\n return User(arg_2, arg_0.properties.display_name,\n arg_0.properties.first_name,\n arg_0.properties.photo_url,\n arg_0.properties.email,\n (arg_1 == arg_2) or (arg_1 is None))"} +{"_id": "doc_5473", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a user by its ID.\n\n Args:\n user_id (~hangups.user.UserID): The ID of the user.\n\n Raises:\n KeyError: If no such user is known.\n\n Returns:\n :class:`~hangups.user.User` with the given ID.\n \"\"\"\n try:\n return arg_0._user_dict[arg_1]\n except KeyError:\n logger.warning('UserList returning unknown User for UserID %s',\n arg_1)\n return User(arg_1, None, None, None, [], False)"} +{"_id": "doc_5474", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add or upgrade User from ConversationParticipantData.\"\"\"\n arg_2 = User.from_conv_part_data(arg_1, arg_0._self_user.id_)\n\n arg_3 = arg_0._user_dict.get(arg_2.id_)\n if arg_3 is None:\n logger.warning('Adding fallback User with %s name \"%s\"',\n arg_2.name_type.name.lower(), arg_2.full_name)\n arg_0._user_dict[arg_2.id_] = arg_2\n return arg_2\n else:\n arg_3.upgrade_name(arg_2)\n return arg_3"} +{"_id": "doc_5475", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add an observer to this event.\n\n Args:\n callback: A function or coroutine callback to call when the event\n is fired.\n\n Raises:\n ValueError: If the callback has already been added.\n \"\"\"\n if arg_1 in arg_0._observers:\n raise ValueError('{} is already an observer of {}'\n .format(arg_1, arg_0))\n arg_0._observers.append(arg_1)"} +{"_id": "doc_5476", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove an observer from this event.\n\n Args:\n callback: A function or coroutine callback to remove from this\n event.\n\n Raises:\n ValueError: If the callback is not an observer of this event.\n \"\"\"\n if arg_1 not in arg_0._observers:\n raise ValueError('{} is not an observer of {}'\n .format(arg_1, arg_0))\n arg_0._observers.remove(arg_1)"} +{"_id": "doc_5477", "title": "", "text": "async def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Fire this event, calling all observers with the same arguments.\"\"\"\n logger.debug('Fired {}'.format(arg_0))\n for arg_3 in arg_0._observers:\n arg_4 = arg_3(*arg_1, **arg_2)\n if asyncio.iscoroutinefunction(arg_3):\n await arg_4"} +{"_id": "doc_5478", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Run a hangups example coroutine.\n\n Args:\n example_coroutine (coroutine): Coroutine to run with a connected\n hangups client and arguments namespace as arguments.\n extra_args (str): Any extra command line arguments required by the\n example.\n \"\"\"\n arg_2 = _get_parser(arg_1).parse_args()\n logging.basicConfig(level=logging.DEBUG if arg_2.debug else logging.WARNING)\n # Obtain hangups authentication cookies, prompting for credentials from\n # standard input if necessary.\n arg_3 = hangups.auth.get_auth_stdin(arg_2.token_path)\n arg_4 = hangups.Client(arg_3)\n arg_5 = asyncio.get_event_loop()\n arg_6 = asyncio.ensure_future(_async_main(arg_0, arg_4, arg_2),\n arg_5=arg_5)\n\n try:\n arg_5.run_until_complete(arg_6)\n except KeyboardInterrupt:\n arg_6.cancel()\n arg_5.run_until_complete(arg_6)\n finally:\n arg_5.close()"} +{"_id": "doc_5479", "title": "", "text": "def Func(arg_0):\n \"\"\"Return ArgumentParser with any extra arguments.\"\"\"\n arg_1 = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n arg_2 = appdirs.AppDirs('hangups', 'hangups')\n arg_3 = os.path.join(arg_2.user_cache_dir, 'refresh_token.txt')\n arg_1.add_argument(\n '--token-path', default=arg_3,\n help='path used to store OAuth refresh token'\n )\n arg_1.add_argument(\n '-d', '--debug', action='store_true',\n help='log detailed debugging messages'\n )\n for arg_4 in arg_0:\n arg_1.add_argument(arg_4, required=True)\n return arg_1"} +{"_id": "doc_5480", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print column headers and rows as a reStructuredText table.\n\n Args:\n col_tuple: Tuple of column name strings.\n row_tuples: List of tuples containing row data.\n \"\"\"\n arg_2 = [max(len(str(row[col])) for row in [arg_0] + arg_1)\n for col in range(len(arg_0))]\n arg_3 = ' '.join('{{:<{}}}'.format(col_width)\n for col_width in arg_2)\n arg_4 = ' '.join('=' * col_width for col_width in arg_2)\n print(arg_4)\n print(arg_3.format(*arg_0))\n print(arg_4)\n for arg_5 in arg_1:\n print(arg_3.format(*arg_5))\n print(arg_4)\n print()"} +{"_id": "doc_5481", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=''):\n \"\"\"Generate doc for an enum.\n\n Args:\n enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum\n to generate docs for.\n locations: Dictionary of location paths tuples to\n descriptor_pb2.SourceCodeInfo.Location instances.\n path: Path tuple to the enum definition.\n name_prefix: Optional prefix for this enum's name.\n \"\"\"\n print(make_subsection(arg_3 + arg_0.name))\n arg_4 = arg_1[arg_2]\n if arg_4.HasField('leading_comments'):\n print(textwrap.dedent(arg_4.leading_comments))\n\n arg_5 = []\n for arg_6, arg_7 in enumerate(arg_0.value):\n arg_8 = arg_1[arg_2 + (2, arg_6)]\n arg_5.append((\n make_code(arg_7.name),\n arg_7.number,\n textwrap.fill(get_comment_from_location(arg_8), INFINITY),\n ))\n print_table(('Name', 'Number', 'Description'), arg_5)"} +{"_id": "doc_5482", "title": "", "text": "def Func(arg_0):\n \"\"\"Create a directory if it does not exist.\"\"\"\n arg_1 = os.path.dirname(arg_0)\n if arg_1 != '' and not os.path.isdir(arg_1):\n try:\n os.makedirs(arg_1)\n except OSError as e:\n sys.exit('Failed to create directory: {}'.format(e))"} +{"_id": "doc_5483", "title": "", "text": "def Func(arg_0):\n \"\"\"Show the overlay menu.\"\"\"\n # If the current widget in the TabbedWindowWidget has a menu,\n # overlay it on the TabbedWindowWidget.\n arg_1 = arg_0._tabbed_window.get_current_widget()\n if hasattr(arg_1, 'get_menu_widget'):\n arg_2 = arg_1.get_menu_widget(arg_0._hide_menu)\n arg_3 = urwid.Overlay(arg_2, arg_0._tabbed_window,\n align='center', width=('relative', 80),\n valign='middle', height=('relative', 80))\n arg_0._urwid_loop.widget = arg_3"} +{"_id": "doc_5484", "title": "", "text": "async def Func(arg_0):\n \"\"\"Handle connecting for the first time.\"\"\"\n arg_0._user_list, arg_0._conv_list = (\n await hangups.build_user_conversation_list(arg_0._client)\n )\n arg_0._conv_list.on_event.add_observer(arg_0._on_event)\n\n # show the conversation menu\n arg_3 = ConversationPickerWidget(arg_0._conv_list,\n arg_0.on_select_conversation,\n arg_0._keys)\n arg_0._tabbed_window = TabbedWindowWidget(arg_0._keys)\n arg_0._tabbed_window.set_tab(arg_3, switch=True,\n title='Conversations')\n arg_0._urwid_loop.widget = arg_0._tabbed_window"} +{"_id": "doc_5485", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Open conversation tab for new messages & pass events to notifier.\"\"\"\n arg_2 = arg_0._conv_list.get(arg_1.conversation_id)\n arg_3 = arg_2.get_user(arg_1.user_id)\n arg_4 = all((\n isinstance(arg_1, hangups.ChatMessageEvent),\n not arg_3.is_self,\n not arg_2.is_quiet,\n ))\n if arg_4:\n arg_0.add_conversation_tab(arg_1.conversation_id)\n if arg_0._discreet_notifications:\n arg_5 = DISCREET_NOTIFICATION\n else:\n arg_5 = notifier.Notification(\n arg_3.full_name, get_conv_name(arg_2), arg_1.text\n )\n arg_0._notifier.send(arg_5)"} +{"_id": "doc_5486", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Put a coroutine in the queue to be executed.\"\"\"\n # Avoid logging when a coroutine is queued or executed to avoid log\n # spam from coroutines that are started on every keypress.\n assert asyncio.iscoroutine(arg_1)\n arg_0._queue.Func_nowait(arg_1)"} +{"_id": "doc_5487", "title": "", "text": "async def Func(arg_0):\n \"\"\"Consume coroutines from the queue by executing them.\"\"\"\n while True:\n arg_1 = await arg_0._queue.get()\n assert asyncio.iscoroutine(arg_1)\n await arg_1"} +{"_id": "doc_5488", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Rename conversation and call callback.\"\"\"\n arg_0._coroutine_queue.put(arg_0._conversation.rename(arg_1))\n arg_2()"} +{"_id": "doc_5489", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Re-order the conversations when an event occurs.\"\"\"\n # TODO: handle adding new conversations\n arg_0.sort(key=lambda conv_button: conv_button.last_modified,\n reverse=True)"} +{"_id": "doc_5490", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Make users stop typing when they send a message.\"\"\"\n if isinstance(arg_1, hangups.ChatMessageEvent):\n arg_0._typing_statuses[arg_1.user_id] = (\n hangups.TYPING_TYPE_STOPPED\n )\n arg_0._update()"} +{"_id": "doc_5491", "title": "", "text": "def Func(arg_0):\n \"\"\"Update status text.\"\"\"\n arg_1 = [arg_0._conversation.get_user(user_id)\n for user_id, status in arg_0._typing_statuses.items()\n if status == hangups.TYPING_TYPE_STARTED]\n arg_2 = [user.first_name for user in arg_1\n if not user.is_self]\n if arg_2:\n arg_3 = '{} {} typing...'.format(\n ', '.join(sorted(arg_2)),\n 'is' if len(arg_2) == 1 else 'are'\n )\n else:\n arg_3 = ''\n\n if not arg_0._is_connected:\n arg_0._widget.set_text(\"RECONNECTING...\")\n elif arg_0._message is not None:\n arg_0._widget.set_text(arg_0._message)\n else:\n arg_0._widget.set_text(arg_3)"} +{"_id": "doc_5492", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4=None):\n \"\"\"Return MessageWidget representing a ConversationEvent.\n\n Returns None if the ConversationEvent does not have a widget\n representation.\n \"\"\"\n arg_5 = arg_0.get_user(arg_1.user_id)\n # Check whether the previous event occurred on the same day as this\n # event.\n if arg_2 is not None:\n arg_6 = (arg_1.timestamp.astimezone(tz=None).date() !=\n arg_2.timestamp.astimezone(tz=None).date())\n else:\n arg_6 = False\n if isinstance(arg_1, hangups.ChatMessageEvent):\n return MessageWidget(arg_1.timestamp, arg_1.text,\n arg_3, arg_5, show_date=arg_6,\n arg_4=arg_4)\n elif isinstance(arg_1, hangups.RenameEvent):\n if arg_1.new_name == '':\n arg_7 = ('{} cleared the conversation name'\n .format(arg_5.first_name))\n else:\n arg_7 = ('{} renamed the conversation to {}'\n .format(arg_5.first_name, arg_1.new_name))\n return MessageWidget(arg_1.timestamp, arg_7, arg_3,\n show_date=arg_6,\n arg_4=arg_4)\n elif isinstance(arg_1, hangups.MembershipChangeEvent):\n arg_8 = [arg_0.get_user(user_id) for user_id\n in arg_1.participant_ids]\n arg_9 = ', '.join([arg_5.full_name for arg_5 in arg_8])\n if arg_1.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:\n arg_7 = ('{} added {} to the conversation'\n .format(arg_5.first_name, arg_9))\n else: # LEAVE\n arg_7 = ('{} left the conversation'.format(arg_9))\n return MessageWidget(arg_1.timestamp, arg_7, arg_3,\n show_date=arg_6,\n arg_4=arg_4)\n elif isinstance(arg_1, hangups.HangoutEvent):\n arg_7 = {\n hangups.HANGOUT_EVENT_TYPE_START: (\n 'A Hangout call is starting.'\n ),\n hangups.HANGOUT_EVENT_TYPE_END: (\n 'A Hangout call ended.'\n ),\n hangups.HANGOUT_EVENT_TYPE_ONGOING: (\n 'A Hangout call is ongoing.'\n ),\n }.get(arg_1.event_type, 'Unknown Hangout call event.')\n return MessageWidget(arg_1.timestamp, arg_7, arg_3,\n show_date=arg_6,\n arg_4=arg_4)\n elif isinstance(arg_1, hangups.GroupLinkSharingModificationEvent):\n arg_10 = hangups.GROUP_LINK_SHARING_STATUS_ON\n arg_11 = ('on' if arg_1.new_status == arg_10\n else 'off')\n arg_7 = '{} turned {} joining by link.'.format(arg_5.first_name,\n arg_11)\n return MessageWidget(arg_1.timestamp, arg_7, arg_3,\n show_date=arg_6,\n arg_4=arg_4)\n else:\n # conv_event is a generic hangups.ConversationEvent.\n arg_7 = 'Unknown conversation event'\n return MessageWidget(arg_1.timestamp, arg_7, arg_3,\n show_date=arg_6,\n arg_4=arg_4)"} +{"_id": "doc_5493", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handle updating and scrolling when a new event is added.\n\n Automatically scroll down to show the new text if the bottom is\n showing. This allows the user to scroll up to read previous messages\n while new messages are arriving.\n \"\"\"\n if not arg_0._is_scrolling:\n arg_0.set_focus(arg_1.id_)\n else:\n arg_0._modified()"} +{"_id": "doc_5494", "title": "", "text": "async def Func(arg_0):\n \"\"\"Load more events for this conversation.\"\"\"\n try:\n arg_1 = await arg_0._conversation.get_events(\n arg_0._conversation.events[0].id_\n )\n except (IndexError, hangups.NetworkError):\n arg_1 = []\n if not arg_1:\n arg_0._firstFunced = True\n if arg_0._focus_position == arg_0.POSITION_LOADING and arg_1:\n # If the loading indicator is still focused, and we loaded more\n # events, set focus on the first new event so the loaded\n # indicator is replaced.\n arg_0.set_focus(arg_1[-1].id_)\n else:\n # Otherwise, still need to invalidate in case the loading\n # indicator is showing but not focused.\n arg_0._modified()\n # Loading events can also update the watermarks.\n arg_0._refresh_watermarked_events()\n arg_0._isFuncing = False"} +{"_id": "doc_5495", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the menu widget associated with this widget.\"\"\"\n return ConversationMenu(\n arg_0._coroutine_queue, arg_0._conversation, arg_1,\n arg_0._keys\n )"} +{"_id": "doc_5496", "title": "", "text": "def Func(arg_0):\n \"\"\"Update this conversation's tab title.\"\"\"\n arg_0.title = get_conv_name(arg_0._conversation, show_unread=True,\n truncate=True)\n arg_0.Func_cb(arg_0, arg_0.title)"} +{"_id": "doc_5497", "title": "", "text": "def Func(arg_0):\n \"\"\"Update tab display.\"\"\"\n arg_1 = []\n for arg_2, arg_3 in enumerate(arg_0._widgets):\n arg_4 = ('active_tab' if arg_2 == arg_0._tab_index\n else 'inactive_tab')\n arg_1 += [\n (arg_4, ' {} '.format(arg_0._widget_title[arg_3])),\n ('tab_background', ' '),\n ]\n arg_0._tabs.set_text(arg_1)\n arg_0._frame.contents['body'] = (arg_0._widgets[arg_0._tab_index], None)"} +{"_id": "doc_5498", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Add or modify a tab.\n\n If widget is not a tab, it will be added. If switch is True, switch to\n this tab. If title is given, set the tab's title.\n \"\"\"\n if arg_1 not in arg_0._widgets:\n arg_0._widgets.append(arg_1)\n arg_0._widget_title[arg_1] = ''\n if arg_2:\n arg_0._tab_index = arg_0._widgets.index(arg_1)\n if arg_3:\n arg_0._widget_title[arg_1] = arg_3\n arg_0._update_tabs()"} +{"_id": "doc_5499", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Use the access token to get session cookies.\n\n Raises GoogleAuthError if session cookies could not be loaded.\n\n Returns dict of cookies.\n \"\"\"\n arg_2 = {'Authorization': 'Bearer {}'.format(arg_1)}\n\n try:\n arg_3 = arg_0.get(('https://accounts.google.com/accounts/OAuthLogin'\n '?source=hangups&issueuberauth=1'), arg_2=arg_2)\n arg_3.raise_for_status()\n except requests.RequestException as e:\n raise GoogleAuthError('OAuthLogin request failed: {}'.format(e))\n arg_4 = arg_3.text\n\n try:\n arg_3 = arg_0.get(('https://accounts.google.com/MergeSession?'\n 'service=mail&'\n 'continue=http://www.google.com&uberauth={}')\n .format(arg_4), arg_2=arg_2)\n arg_3.raise_for_status()\n except requests.RequestException as e:\n raise GoogleAuthError('MergeSession request failed: {}'.format(e))\n\n arg_5 = arg_0.cookies.get_dict(domain='.google.com')\n if arg_5 == {}:\n raise GoogleAuthError('Failed to find session cookies')\n return arg_5"} +{"_id": "doc_5500", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Populate and submit a form on the current page.\n\n Raises GoogleAuthError if form can not be submitted.\n \"\"\"\n logger.info(\n 'Submitting form on page %r', arg_0._page.url.split('?')[0]\n )\n logger.info(\n 'Page contains forms: %s',\n [arg_3.get('id') for arg_3 in arg_0._page.soup.select('form')]\n )\n try:\n arg_4 = arg_0._page.soup.select(arg_1)[0]\n except IndexError:\n raise GoogleAuthError(\n 'Failed to find form {!r} in page'.format(arg_1)\n )\n logger.info(\n 'Page contains inputs: %s',\n [arg_3.get('id') for arg_3 in arg_4.select('input')]\n )\n for arg_5, arg_6 in arg_2.items():\n try:\n arg_4.select(arg_5)[0]['value'] = arg_6\n except IndexError:\n raise GoogleAuthError(\n 'Failed to find input {!r} in form'.format(arg_5)\n )\n try:\n arg_0._page = arg_0._browser.submit(arg_4, arg_0._page.url)\n arg_0._page.raise_for_status()\n except requests.RequestException as e:\n raise GoogleAuthError('Failed to submit form: {}'.format(e))"} +{"_id": "doc_5501", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse response format for request for new channel SID.\n\n Example format (after parsing JS):\n [ [0,[\"c\",\"SID_HERE\",\"\",8]],\n [1,[{\"gsid\":\"GSESSIONID_HERE\"}]]]\n\n Returns (SID, gsessionid) tuple.\n \"\"\"\n arg_0 = json.loads(list(ChunkParser().get_chunks(arg_0))[0])\n arg_1 = arg_0[0][1][1]\n arg_2 = arg_0[1][1][0]['gsid']\n return (arg_1, arg_2)"} +{"_id": "doc_5502", "title": "", "text": "async def Func(arg_0):\n \"\"\"Listen for messages on the backwards channel.\n\n This method only returns when the connection has been closed due to an\n error.\n \"\"\"\n arg_1 = 0 # Number of retries attempted so far\n arg_2 = True # whether a new SID is needed\n\n while arg_1 <= arg_0._max_retries:\n # After the first failed retry, back off exponentially longer after\n # each attempt.\n if arg_1 > 0:\n arg_3 = arg_0._retry_backoff_base ** arg_1\n logger.info('Backing off for %s seconds', arg_3)\n await asyncio.sleep(arg_3)\n\n # Request a new SID if we don't have one yet, or the previous one\n # became invalid.\n if arg_2:\n await arg_0._fetch_channel_sid()\n arg_2 = False\n # Clear any previous push data, since if there was an error it\n # could contain garbage.\n arg_0._chunk_parser = ChunkParser()\n try:\n await arg_0._longpoll_request()\n except ChannelSessionError as err:\n logger.warning('Long-polling interrupted: %s', err)\n arg_2 = True\n except exceptions.NetworkError as err:\n logger.warning('Long-polling request failed: %s', err)\n else:\n # The connection closed successfully, so reset the number of\n # retries.\n arg_1 = 0\n continue\n\n arg_1 += 1\n logger.info('retry attempt count is now %s', arg_1)\n if arg_0._is_connected:\n arg_0._is_connected = False\n await arg_0.on_disconnect.fire()\n\n # If the request ended with an error, the client must account for\n # messages being dropped during this time.\n\n logger.error('Ran out of retries for long-polling request')"} +{"_id": "doc_5503", "title": "", "text": "async def Func(arg_0):\n \"\"\"Open a long-polling request and receive arrays.\n\n This method uses keep-alive to make re-opening the request faster, but\n the remote server will set the \"Connection: close\" header once an hour.\n\n Raises hangups.NetworkError or ChannelSessionError.\n \"\"\"\n arg_1 = {\n 'VER': 8, # channel protocol version\n 'gsessionid': arg_0._gsessionid_param,\n 'RID': 'rpc', # request identifier\n 't': 1, # trial\n 'SID': arg_0._sid_param, # session ID\n 'CI': 0, # 0 if streaming/chunked requests should be used\n 'ctype': 'hangouts', # client type\n 'TYPE': 'xmlhttp', # type of request\n }\n logger.info('Opening new long-polling request')\n try:\n async with arg_0._session.fetch_raw('GET', CHANNEL_URL,\n arg_1=arg_1) as res:\n\n if res.status != 200:\n if res.status == 400 and res.reason == 'Unknown SID':\n raise ChannelSessionError('SID became invalid')\n raise exceptions.NetworkError(\n 'Request return unexpected status: {}: {}'.format(\n res.status, res.reason))\n\n while True:\n async with async_timeout.timeout(PUSH_TIMEOUT):\n arg_2 = await res.content.read(MAX_READ_BYTES)\n if not arg_2:\n break\n\n await arg_0._on_push_data(arg_2)\n\n except asyncio.TimeoutError:\n raise exceptions.NetworkError('Request timed out')\n except aiohttp.ServerDisconnectedError as err:\n raise exceptions.NetworkError(\n 'Server disconnected error: %s' % err)\n except aiohttp.ClientPayloadError:\n raise ChannelSessionError('SID is about to expire')\n except aiohttp.ClientError as err:\n raise exceptions.NetworkError('Request connection error: %s' % err)"} +{"_id": "doc_5504", "title": "", "text": "async def Func(arg_0, arg_1):\n \"\"\"Parse push data and trigger events.\"\"\"\n logger.debug('Received chunk:\\n{}'.format(arg_1))\n for arg_2 in arg_0._chunk_parser.get_chunks(arg_1):\n\n # Consider the channel connected once the first chunk is received.\n if not arg_0._is_connected:\n if arg_0._on_connect_called:\n arg_0._is_connected = True\n await arg_0.on_reconnect.fire()\n else:\n arg_0._on_connect_called = True\n arg_0._is_connected = True\n await arg_0.on_connect.fire()\n\n # chunk contains a container array\n arg_5 = json.loads(arg_2)\n # container array is an array of inner arrays\n for arg_6 in arg_5:\n # inner_array always contains 2 elements, the array_id and the\n # data_array.\n arg_7, arg_8 = arg_6\n logger.debug('Chunk contains data array with id %r:\\n%r',\n arg_7, arg_8)\n await arg_0.on_receive_array.fire(arg_8)"} +{"_id": "doc_5505", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decode optional or required field.\"\"\"\n if arg_1.type == FieldDescriptor.TYPE_MESSAGE:\n decode(getattr(arg_0, arg_1.name), arg_2)\n else:\n try:\n if arg_1.type == FieldDescriptor.TYPE_BYTES:\n arg_2 = base64.b64decode(arg_2)\n setattr(arg_0, arg_1.name, arg_2)\n except (ValueError, TypeError) as e:\n # ValueError: invalid enum value, negative unsigned int value, or\n # invalid base64\n # TypeError: mismatched type\n logger.warning('Message %r ignoring field %s: %s',\n arg_0.__class__.__name__, arg_1.name, e)"} +{"_id": "doc_5506", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decode repeated field.\"\"\"\n if arg_1.type == FieldDescriptor.TYPE_MESSAGE:\n for arg_3 in arg_2:\n decode(getattr(arg_0, arg_1.name).add(), arg_3)\n else:\n try:\n for arg_3 in arg_2:\n if arg_1.type == FieldDescriptor.TYPE_BYTES:\n arg_3 = base64.b64decode(arg_3)\n getattr(arg_0, arg_1.name).append(arg_3)\n except (ValueError, TypeError) as e:\n # ValueError: invalid enum value, negative unsigned int value, or\n # invalid base64\n # TypeError: mismatched type\n logger.warning('Message %r ignoring repeated field %s: %s',\n arg_0.__class__.__name__, arg_1.name, e)\n # Ignore any values already decoded by clearing list\n arg_0.ClearField(arg_1.name)"} +{"_id": "doc_5507", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Decode pblite to Protocol Buffer message.\n\n This method is permissive of decoding errors and will log them as warnings\n and continue decoding where possible.\n\n The first element of the outer pblite list must often be ignored using the\n ignore_first_item parameter because it contains an abbreviation of the name\n of the protobuf message (eg. cscmrp for ClientSendChatMessageResponseP)\n that's not part of the protobuf.\n\n Args:\n message: protocol buffer message instance to Func into.\n pblite: list representing a pblite-serialized message.\n ignore_first_item: If True, ignore the item at index 0 in the pblite\n list, making the item at index 1 correspond to field 1 in the\n message.\n \"\"\"\n if not isinstance(arg_1, list):\n logger.warning('Ignoring invalid message: expected list, got %r',\n type(arg_1))\n return\n if arg_2:\n arg_1 = arg_1[1:]\n # If the last item of the list is a dict, use it as additional field/value\n # mappings. This seems to be an optimization added for dealing with really\n # high field numbers.\n if arg_1 and isinstance(arg_1[-1], dict):\n arg_3 = {int(arg_5): arg_6 for arg_5, arg_6\n in arg_1[-1].items()}\n arg_1 = arg_1[:-1]\n else:\n arg_3 = {}\n arg_4 = itertools.chain(enumerate(arg_1, start=1),\n arg_3.items())\n for arg_5, arg_6 in arg_4:\n if arg_6 is None:\n continue\n try:\n arg_7 = arg_0.DESCRIPTOR.fields_by_number[arg_5]\n except KeyError:\n # If the tag number is unknown and the value is non-trivial, log a\n # message to aid reverse-engineering the missing field in the\n # message.\n if arg_6 not in [[], '', 0]:\n logger.debug('Message %r contains unknown field %s with value '\n '%r', arg_0.__class__.__name__, arg_5,\n arg_6)\n continue\n if arg_7.label == FieldDescriptor.LABEL_REPEATED:\n _Func_repeated_field(arg_0, arg_7, arg_6)\n else:\n _Func_field(arg_0, arg_7, arg_6)"} +{"_id": "doc_5508", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None):\n \"\"\"\n Sets the Elasticsearch hosts to use\n\n Args:\n hosts (str): A single hostname or URL, or list of hostnames or URLs\n use_ssl (bool): Use a HTTPS connection to the server\n ssl_cert_path (str): Path to the certificate chain\n \"\"\"\n if type(arg_0) != list:\n arg_0 = [arg_0]\n arg_3 = {\n \"hosts\": arg_0,\n \"timeout\": 20\n }\n if arg_1:\n arg_3['use_ssl'] = True\n if arg_2:\n arg_3['verify_certs'] = True\n arg_3['ca_certs'] = arg_2\n else:\n arg_3['verify_certs'] = False\n connections.create_connection(**arg_3)"} +{"_id": "doc_5509", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Updates index mappings\n\n Args:\n aggregate_indexes (list): A list of aggregate index names\n forensic_indexes (list): A list of forensic index names\n \"\"\"\n arg_2 = 2\n if arg_0 is None:\n arg_0 = []\n if arg_1 is None:\n arg_1 = []\n for arg_3 in arg_0:\n if not Index(arg_3).exists():\n continue\n arg_4 = Index(arg_3)\n arg_5 = \"doc\"\n arg_6 = \"published_policy.fo\"\n arg_7 = \"fo\"\n arg_8 = arg_4.get_field_mapping(fields=[arg_6])\n arg_8 = arg_8[list(arg_8.keys())[0]][\"mappings\"]\n if arg_5 not in arg_8:\n continue\n\n arg_8 = arg_8[arg_5][arg_6][\"mapping\"][arg_7]\n arg_9 = arg_8[\"type\"]\n if arg_9 == \"long\":\n arg_10 = \"{0}-v{1}\".format(arg_3, arg_2)\n arg_11 = {\"properties\": {\"published_policy.fo\": {\n \"type\": \"text\",\n \"fields\": {\n \"keyword\": {\n \"type\": \"keyword\",\n \"ignore_above\": 256\n }\n }\n }\n }\n }\n Index(arg_10).create()\n Index(arg_10).put_mapping(doc_type=arg_5, arg_11=arg_11)\n reindex(connections.get_connection(), arg_3,\n arg_10)\n Index(arg_3).delete()\n\n for arg_12 in arg_1:\n pass"} +{"_id": "doc_5510", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\"\n Saves aggregate DMARC reports to Kafka\n\n Args:\n aggregate_reports (list): A list of aggregate report dictionaries\n to save to Kafka\n aggregate_topic (str): The name of the Kafka topic\n\n \"\"\"\n if (type(arg_1) == dict or\n type(arg_1) == OrderedDict):\n arg_1 = [arg_1]\n\n if len(arg_1) < 1:\n return\n\n for arg_3 in arg_1:\n arg_3['date_range'] = arg_0.generate_daterange(arg_3)\n arg_3 = arg_0.strip_metadata(arg_3)\n\n for arg_4 in arg_3['records']:\n arg_4['date_range'] = arg_3['date_range']\n arg_4['org_name'] = arg_3['org_name']\n arg_4['org_email'] = arg_3['org_email']\n arg_4['policy_published'] = arg_3['policy_published']\n arg_4['report_id'] = arg_3['report_id']\n logger.debug(\"Sending slice.\")\n try:\n logger.debug(\"Saving aggregate report to Kafka\")\n arg_0.producer.send(arg_2, arg_4)\n except UnknownTopicOrPartitionError:\n raise KafkaError(\n \"Kafka error: Unknown topic or partition on broker\")\n except Exception as e:\n raise KafkaError(\n \"Kafka error: {0}\".format(e.__str__()))\n try:\n arg_0.producer.flush()\n except Exception as e:\n raise KafkaError(\n \"Kafka error: {0}\".format(e.__str__()))"} +{"_id": "doc_5511", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extracts xml from a zip or gzip file at the given path, file-like object,\n or bytes.\n\n Args:\n input_: A path to a file, a file like object, or bytes\n\n Returns:\n str: The extracted XML\n\n \"\"\"\n if type(arg_0) == str:\n arg_1 = open(arg_0, \"rb\")\n elif type(arg_0) == bytes:\n arg_1 = BytesIO(arg_0)\n else:\n arg_1 = arg_0\n try:\n arg_2 = arg_1.read(6)\n arg_1.seek(0)\n if arg_2.startswith(MAGIC_ZIP):\n arg_3 = zipfile.ZipFile(arg_1)\n arg_4 = arg_3.open(arg_3.namelist()[0]).read().decode()\n elif arg_2.startswith(MAGIC_GZIP):\n arg_4 = GzipFile(fileobj=arg_1).read().decode()\n elif arg_2.startswith(MAGIC_XML):\n arg_4 = arg_1.read().decode()\n else:\n arg_1.close()\n raise InvalidAggregateReport(\"Not a valid zip, gzip, or xml file\")\n\n arg_1.close()\n\n except UnicodeDecodeError:\n raise InvalidAggregateReport(\"File objects must be opened in binary \"\n \"(rb) mode\")\n except Exception as error:\n raise InvalidAggregateReport(\n \"Invalid archive file: {0}\".format(error.__str__()))\n\n return arg_4"} +{"_id": "doc_5512", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=2.0,\n arg_3=False):\n \"\"\"Parses a file at the given path, a file-like object. or bytes as a\n aggregate DMARC report\n\n Args:\n _input: A path to a file, a file like object, or bytes\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n dns_timeout (float): Sets the DNS timeout in seconds\n parallel (bool): Parallel processing\n\n Returns:\n OrderedDict: The parsed DMARC aggregate report\n \"\"\"\n arg_4 = extract_xml(arg_0)\n\n return parse_aggregate_report_xml(arg_4,\n arg_1=arg_1,\n timeout=arg_2,\n arg_3=arg_3)"} +{"_id": "doc_5513", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts one or more parsed forensic reports to flat CSV format, including\n headers\n\n Args:\n reports: A parsed forensic report or list of parsed forensic reports\n\n Returns:\n str: Parsed forensic report data in flat CSV format, including headers\n \"\"\"\n arg_1 = [\"feedback_type\", \"user_agent\", \"version\", \"original_envelope_id\",\n \"original_mail_from\", \"original_rcpt_to\", \"arrival_date\",\n \"arrival_date_utc\", \"subject\", \"message_id\",\n \"authentication_results\", \"dkim_domain\", \"source_ip_address\",\n \"source_country\", \"source_reverse_dns\", \"source_base_domain\",\n \"delivery_result\", \"auth_failure\", \"reported_domain\",\n \"authentication_mechanisms\", \"sample_headers_only\"]\n\n if type(arg_0) == OrderedDict:\n arg_0 = [arg_0]\n arg_2 = StringIO()\n arg_3 = DictWriter(arg_2, fieldnames=arg_1)\n arg_3.writeheader()\n for arg_4 in arg_0:\n arg_5 = arg_4.copy()\n arg_5[\"source_ip_address\"] = arg_4[\"source\"][\"ip_address\"]\n arg_5[\"source_reverse_dns\"] = arg_4[\"source\"][\"reverse_dns\"]\n arg_5[\"source_base_domain\"] = arg_4[\"source\"][\"base_domain\"]\n arg_5[\"source_country\"] = arg_4[\"source\"][\"country\"]\n del arg_5[\"source\"]\n arg_5[\"subject\"] = arg_4[\"parsed_sample\"][\"subject\"]\n arg_5[\"auth_failure\"] = \",\".join(arg_4[\"auth_failure\"])\n arg_6 = arg_4[\"authentication_mechanisms\"]\n arg_5[\"authentication_mechanisms\"] = \",\".join(\n arg_6)\n del arg_5[\"sample\"]\n del arg_5[\"parsed_sample\"]\n arg_3.writerow(arg_5)\n\n return arg_2.getvalue()"} +{"_id": "doc_5514", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=2.0,\n arg_3=False, arg_4=False):\n \"\"\"Parses a DMARC aggregate or forensic file at the given path, a\n file-like object. or bytes\n\n Args:\n input_: A path to a file, a file like object, or bytes\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n dns_timeout (float): Sets the DNS timeout in seconds\n strip_attachment_payloads (bool): Remove attachment payloads from\n forensic report results\n parallel (bool): Parallel processing\n\n Returns:\n OrderedDict: The parsed DMARC report\n \"\"\"\n if type(arg_0) == str:\n arg_5 = open(arg_0, \"rb\")\n elif type(arg_0) == bytes:\n arg_5 = BytesIO(arg_0)\n else:\n arg_5 = arg_0\n\n arg_6 = arg_5.read()\n try:\n arg_7 = parse_aggregate_report_file(arg_6, arg_1=arg_1,\n arg_2=arg_2,\n arg_4=arg_4)\n arg_8 = OrderedDict([(\"report_type\", \"aggregate\"),\n (\"report\", arg_7)])\n except InvalidAggregateReport:\n try:\n arg_9 = arg_3\n arg_8 = parse_report_email(arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_9,\n arg_4=arg_4)\n except InvalidDMARCReport:\n raise InvalidDMARCReport(\"Not a valid aggregate or forensic \"\n \"report\")\n return arg_8"} +{"_id": "doc_5515", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a list of an IMAP server's capabilities\n\n Args:\n server (imapclient.IMAPClient): An instance of imapclient.IMAPClient\n\n Returns (list): A list of capabilities\n \"\"\"\n\n arg_1 = list(map(str, list(arg_0.capabilities())))\n for arg_2 in range(len(arg_1)):\n arg_1[arg_2] = str(arg_1[arg_2]).replace(\"b'\",\n \"\").replace(\"'\",\n \"\")\n logger.debug(\"IMAP server supports: {0}\".format(arg_1))\n\n return arg_1"} +{"_id": "doc_5516", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=0,\n arg_5=False, arg_6=None, arg_7=None, arg_8=None,\n arg_9=None, arg_10=None, arg_11=None):\n \"\"\"\n Emails parsing results as a zip file\n\n Args:\n results (OrderedDict): Parsing results\n host: Mail server hostname or IP address\n mail_from: The value of the message from header\n mail_to : A list of addresses to mail to\n port (int): Port to use\n ssl (bool): Require a SSL connection from the start\n user: An optional username\n password: An optional password\n subject: Overrides the default message subject\n attachment_filename: Override the default attachment filename\n message: Override the default plain text body\n ssl_context: SSL context options\n \"\"\"\n logging.debug(\"Emailing report to: {0}\".format(\",\".join(arg_3)))\n arg_12 = datetime.now().strftime(\"%Y-%m-%d\")\n if arg_9:\n if not arg_9.lower().endswith(\".zip\"):\n arg_9 += \".zip\"\n arg_13 = arg_9\n else:\n arg_13 = \"DMARC-{0}.zip\".format(arg_12)\n\n assert isinstance(arg_3, list)\n\n arg_14 = MIMEMultipart()\n arg_14['From'] = arg_2\n arg_14['To'] = \", \".join(arg_3)\n arg_14['Date'] = email.utils.formatdate(localtime=True)\n arg_14['Subject'] = arg_8 or \"DMARC results for {0}\".format(arg_12)\n arg_15 = arg_10 or \"Please see the attached zip file\\n\"\n\n arg_14.attach(MIMEText(arg_15))\n\n arg_16 = get_report_zip(arg_0)\n arg_17 = MIMEApplication(arg_16, Name=arg_13)\n\n arg_17['Content-Disposition'] = 'attachment; filename=\"{0}\"'.format(arg_13)\n arg_14.attach(arg_17)\n\n try:\n if arg_11 is None:\n arg_11 = create_default_context()\n if arg_5:\n arg_18 = smtplib.SMTP_SSL(arg_1, arg_4=arg_4, context=arg_11)\n arg_18.connect(arg_1, arg_4)\n arg_18.ehlo_or_helo_if_needed()\n else:\n arg_18 = smtplib.SMTP(arg_1, arg_4=arg_4)\n arg_18.connect(arg_1, arg_4)\n arg_18.ehlo_or_helo_if_needed()\n if arg_18.has_extn(\"starttls\"):\n arg_18.starttls(context=arg_11)\n arg_18.ehlo()\n else:\n logger.warning(\"SMTP server does not support STARTTLS. \"\n \"Proceeding in plain text!\")\n if arg_6 and arg_7:\n arg_18.login(arg_6, arg_7)\n arg_18.sendmail(arg_2, arg_3, arg_14.as_string())\n except smtplib.SMTPException as arg_19:\n arg_19 = arg_19.__str__().lstrip(\"b'\").rstrip(\"'\").rstrip(\".\")\n raise SMTPError(arg_19)\n except socket.gaierror:\n raise SMTPError(\"DNS resolution failed\")\n except ConnectionRefusedError:\n raise SMTPError(\"Connection refused\")\n except ConnectionResetError:\n raise SMTPError(\"Connection reset\")\n except ConnectionAbortedError:\n raise SMTPError(\"Connection aborted\")\n except TimeoutError:\n raise SMTPError(\"Connection timed out\")\n except SSLError as arg_19:\n raise SMTPError(\"SSL error: {0}\".format(arg_19.__str__()))\n except CertificateError as arg_19:\n raise SMTPError(\"Certificate error: {0}\".format(arg_19.__str__()))"} +{"_id": "doc_5517", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Saves aggregate DMARC reports to Splunk\n\n Args:\n aggregate_reports: A list of aggregate report dictionaries\n to save in Splunk\n\n \"\"\"\n logger.debug(\"Saving aggregate reports to Splunk\")\n if type(arg_1) == dict:\n arg_1 = [arg_1]\n\n if len(arg_1) < 1:\n return\n\n arg_2 = arg_0._common_data.copy()\n arg_3 = \"\"\n for arg_4 in arg_1:\n for arg_5 in arg_4[\"records\"]:\n arg_6 = dict()\n for arg_7 in arg_4[\"report_metadata\"]:\n arg_6[arg_7] = arg_4[\"report_metadata\"][arg_7]\n arg_6[\"published_policy\"] = arg_4[\"policy_published\"]\n arg_6[\"source_ip_address\"] = arg_5[\"source\"][\n \"ip_address\"]\n arg_6[\"source_country\"] = arg_5[\"source\"][\"country\"]\n arg_6[\"source_reverse_dns\"] = arg_5[\"source\"][\n \"reverse_dns\"]\n arg_6[\"source_base_domain\"] = arg_5[\"source\"][\n \"base_domain\"]\n arg_6[\"message_count\"] = arg_5[\"count\"]\n arg_6[\"disposition\"] = arg_5[\"policy_evaluated\"][\n \"disposition\"\n ]\n arg_6[\"spf_aligned\"] = arg_5[\"alignment\"][\"spf\"]\n arg_6[\"dkim_aligned\"] = arg_5[\"alignment\"][\"dkim\"]\n arg_6[\"passed_dmarc\"] = arg_5[\"alignment\"][\"dmarc\"]\n arg_6[\"header_from\"] = arg_5[\"identifiers\"][\n \"header_from\"]\n arg_6[\"envelope_from\"] = arg_5[\"identifiers\"][\n \"envelope_from\"]\n if \"dkim\" in arg_5[\"auth_results\"]:\n arg_6[\"dkim_results\"] = arg_5[\"auth_results\"][\n \"dkim\"]\n if \"spf\" in arg_5[\"auth_results\"]:\n arg_6[\"spf_results\"] = arg_5[\"auth_results\"][\n \"spf\"]\n\n arg_2[\"sourcetype\"] = \"dmarc:aggregate\"\n arg_8 = human_timestamp_to_timestamp(\n arg_6[\"begin_date\"])\n arg_2[\"time\"] = arg_8\n arg_2[\"event\"] = arg_6.copy()\n arg_3 += \"{0}\\n\".format(json.dumps(arg_2))\n\n if not arg_0.session.verify:\n logger.debug(\"Skipping certificate verification for Splunk HEC\")\n try:\n arg_9 = arg_0.session.post(arg_0.url, arg_2=arg_3,\n timeout=arg_0.timeout)\n arg_9 = arg_9.json()\n except Exception as e:\n raise SplunkError(e.__str__())\n if arg_9[\"code\"] != 0:\n raise SplunkError(arg_9[\"text\"])"} +{"_id": "doc_5518", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decodes a base64 string, with padding being optional\n\n Args:\n data: A base64 encoded string\n\n Returns:\n bytes: The decoded bytes\n\n \"\"\"\n arg_0 = bytes(arg_0, encoding=\"ascii\")\n arg_1 = len(arg_0) % 4\n if arg_1 != 0:\n arg_0 += b'=' * (4 - arg_1)\n return base64.b64decode(arg_0)"} +{"_id": "doc_5519", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Gets the base domain name for the given domain\n\n .. note::\n Results are based on a list of public domain suffixes at\n https://publicsuffix.org/list/public_suffix_list.dat.\n\n Args:\n domain (str): A domain or subdomain\n use_fresh_psl (bool): Download a fresh Public Suffix List\n\n Returns:\n str: The base domain of the given domain\n\n \"\"\"\n arg_2 = os.path.join(tempdir, \"public_suffix_list.dat\")\n\n def download_psl():\n arg_3 = \"https://publicsuffix.org/list/public_suffix_list.dat\"\n # Use a browser-like user agent string to bypass some proxy blocks\n arg_4 = {\"User-Agent\": USER_AGENT}\n arg_5 = requests.get(arg_3, arg_4=arg_4).text\n with open(arg_2, \"w\", encoding=\"utf-8\") as fresh_psl_file:\n fresh_psl_file.write(arg_5)\n\n if arg_1:\n if not os.path.exists(arg_2):\n download_psl()\n else:\n arg_6 = datetime.now() - datetime.fromtimestamp(\n os.stat(arg_2).st_mtime)\n if arg_6 > timedelta(hours=24):\n try:\n download_psl()\n except Exception as error:\n logger.warning(\n \"Failed to download an updated PSL {0}\".format(error))\n with open(arg_2, encoding=\"utf-8\") as psl_file:\n arg_7 = publicsuffix2.PublicSuffixList(psl_file)\n\n return arg_7.get_public_suffix(arg_0)\n else:\n return publicsuffix2.get_public_suffix(arg_0)"} +{"_id": "doc_5520", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=2.0):\n \"\"\"\n Resolves an IP address to a hostname using a reverse DNS query\n\n Args:\n ip_address (str): The IP address to resolve\n cache (ExpiringDict): Cache storage\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n timeout (float): Sets the DNS query timeout in seconds\n\n Returns:\n str: The reverse DNS hostname (if any)\n \"\"\"\n arg_4 = None\n try:\n arg_5 = dns.reversename.from_address(arg_0)\n arg_4 = query_dns(arg_5, \"PTR\", arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3)[0]\n\n except dns.exception.DNSException:\n pass\n\n return arg_4"} +{"_id": "doc_5521", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Converts a human-readable timestamp into a Python ``DateTime`` object\n\n Args:\n human_timestamp (str): A timestamp string\n to_utc (bool): Convert the timestamp to UTC\n\n Returns:\n DateTime: The converted timestamp\n \"\"\"\n\n arg_2 = {}\n\n if arg_1:\n arg_2 = {\"TO_TIMEZONE\": \"UTC\"}\n\n return dateparser.parse(arg_0, arg_2=arg_2)"} +{"_id": "doc_5522", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None,\n arg_3=2.0, arg_4=False):\n \"\"\"\n Returns reverse DNS and country information for the given IP address\n\n Args:\n ip_address (str): The IP address to check\n cache (ExpiringDict): Cache storage\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n timeout (float): Sets the DNS timeout in seconds\n parallel (bool): parallel processing\n\n Returns:\n OrderedDict: ``ip_address``, ``reverse_dns``\n\n \"\"\"\n arg_0 = arg_0.lower()\n if arg_1:\n arg_5 = arg_1.get(arg_0, None)\n if arg_5:\n return arg_5\n arg_5 = OrderedDict()\n arg_5[\"ip_address\"] = arg_0\n arg_6 = get_reverse_dns(arg_0,\n arg_2=arg_2,\n arg_3=arg_3)\n arg_7 = get_ip_address_country(arg_0, arg_4=arg_4)\n arg_5[\"country\"] = arg_7\n arg_5[\"reverse_dns\"] = arg_6\n arg_5[\"base_domain\"] = None\n if arg_6 is not None:\n arg_8 = get_base_domain(arg_6)\n arg_5[\"base_domain\"] = arg_8\n\n return arg_5"} +{"_id": "doc_5523", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to\n standard RFC 822 format\n\n Args:\n msg_bytes (bytes): the content of the .msg file\n\n Returns:\n A RFC 822 string\n \"\"\"\n if not is_outlook_msg(arg_0):\n raise ValueError(\"The supplied bytes are not an Outlook MSG file\")\n arg_1 = os.getcwd()\n arg_2 = tempfile.mkdtemp()\n os.chdir(arg_2)\n with open(\"sample.msg\", \"wb\") as msg_file:\n msg_file.write(arg_0)\n try:\n subprocess.check_call([\"msgconvert\", \"sample.msg\"],\n stdout=null_file, stderr=null_file)\n arg_3 = \"sample.eml\"\n with open(arg_3, \"rb\") as eml_file:\n arg_4 = eml_file.read()\n except FileNotFoundError:\n raise EmailParserError(\n \"Failed to convert Outlook MSG: msgconvert utility not found\")\n finally:\n os.chdir(arg_1)\n shutil.rmtree(arg_2)\n\n return arg_4"} +{"_id": "doc_5524", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=False):\n \"\"\"Separated this function for multiprocessing\"\"\"\n try:\n arg_5 = parse_report_file(arg_0,\n arg_2=arg_2,\n arg_3=arg_3,\n strip_attachment_payloads=arg_1,\n arg_4=arg_4)\n except ParserError as error:\n return error, arg_0\n finally:\n global counter\n with counter.get_lock():\n counter.value += 1\n return arg_5, arg_0"} +{"_id": "doc_5525", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sends a PUB command to the server on the specified subject.\n\n ->> PUB hello 5\n ->> MSG_PAYLOAD: world\n <<- MSG hello 2 5\n\n \"\"\"\n if arg_0.is_closed:\n raise ErrConnectionClosed\n if arg_0.is_draining_pubs:\n raise ErrConnectionDraining\n\n arg_3 = len(arg_2)\n if arg_3 > arg_0._max_payload:\n raise ErrMaxPayload\n yield from arg_0._Func(arg_1, _EMPTY_, arg_2, arg_3)"} +{"_id": "doc_5526", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Publishes a message tagging it with a reply subscription\n which can be used by those receiving the message to respond.\n\n ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5\n ->> MSG_PAYLOAD: world\n <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5\n\n \"\"\"\n if arg_0.is_closed:\n raise ErrConnectionClosed\n if arg_0.is_draining_pubs:\n raise ErrConnectionDraining\n\n arg_4 = len(arg_3)\n if arg_4 > arg_0._max_payload:\n raise ErrMaxPayload\n yield from arg_0._publish(arg_1, arg_2.encode(), arg_3, arg_4)"} +{"_id": "doc_5527", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Sets the subcription to use a task per message to be processed.\n\n ..deprecated:: 7.0\n Will be removed 9.0.\n \"\"\"\n arg_2[\"is_async\"] = True\n arg_3 = yield from arg_0.subscribe(arg_1, **arg_2)\n return arg_3"} +{"_id": "doc_5528", "title": "", "text": "def Func(arg_0, arg_1=60):\n \"\"\"\n Sends a ping to the server expecting a pong back ensuring\n what we have written so far has made it to the server and\n also enabling measuring of roundtrip time.\n In case a pong is not returned within the allowed timeout,\n then it will raise ErrTimeout.\n \"\"\"\n if arg_1 <= 0:\n raise ErrBadTimeout\n\n if arg_0.is_closed:\n raise ErrConnectionClosed\n\n arg_2 = asyncio.Future(loop=arg_0._loop)\n try:\n yield from arg_0._send_ping(arg_2)\n yield from asyncio.wait_for(arg_2, arg_1, loop=arg_0._loop)\n except asyncio.TimeoutError:\n arg_2.cancel()\n raise ErrTimeout"} +{"_id": "doc_5529", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Looks up in the server pool for an available server\n and attempts to connect.\n \"\"\"\n\n while True:\n if len(arg_0._server_pool) == 0:\n arg_0._current_server = None\n raise ErrNoServers\n\n arg_2 = time.monotonic()\n arg_3 = arg_0._server_pool.pop(0)\n if arg_0.options[\"max_reconnect_attempts\"] > 0:\n if arg_3.reconnects > arg_0.options[\"max_reconnect_attempts\"]:\n # Discard server since already tried to reconnect too many times\n continue\n\n # Not yet exceeded max_reconnect_attempts so can still use\n # this server in the future.\n arg_0._server_pool.append(arg_3)\n if arg_3.last_attempt is not None and arg_2 < arg_3.last_attempt + arg_0.options[\"reconnect_time_wait\"]:\n # Backoff connecting to server if we attempted recently.\n yield from asyncio.sleep(arg_0.options[\"reconnect_time_wait\"], loop=arg_0._loop)\n try:\n arg_3.last_attempt = time.monotonic()\n arg_5, arg_6 = yield from asyncio.open_connection(\n arg_3.uri.hostname,\n arg_3.uri.port,\n loop=arg_0._loop,\n limit=DEFAULT_BUFFER_SIZE)\n arg_0._current_server = arg_3\n\n # We keep a reference to the initial transport we used when\n # establishing the connection in case we later upgrade to TLS\n # after getting the first INFO message. This is in order to\n # prevent the GC closing the socket after we send CONNECT\n # and replace the transport.\n #\n # See https://github.com/nats-io/asyncio-nats/issues/43\n arg_0._bare_io_reader = arg_0._io_reader = arg_5\n arg_0._bare_io_writer = arg_0._io_writer = arg_6\n break\n except Exception as e:\n arg_3.last_attempt = time.monotonic()\n arg_3.reconnects += 1\n\n arg_0._err = e\n if arg_0._error_cb is not None:\n yield from arg_0._error_cb(e)\n continue"} +{"_id": "doc_5530", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process errors which occured while reading or parsing\n the protocol. If allow_reconnect is enabled it will\n try to switch the server to which it is currently connected\n otherwise it will disconnect.\n \"\"\"\n if arg_0.is_connecting or arg_0.is_closed or arg_0.is_reconnecting:\n return\n\n if arg_0.options[\"allow_reconnect\"] and arg_0.is_connected:\n arg_0._status = Client.RECONNECTING\n arg_0._ps.reset()\n\n if arg_0._reconnection_task is not None and not arg_0._reconnection_task.cancelled():\n # Cancel the previous task in case it may still be running.\n arg_0._reconnection_task.cancel()\n\n arg_0._reconnection_task = arg_0._loop.create_task(arg_0._attempt_reconnect())\n else:\n arg_0._process_disconnect()\n arg_0._err = arg_1\n yield from arg_0._close(Client.CLOSED, True)"} +{"_id": "doc_5531", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Process PONG sent by server.\n \"\"\"\n if len(arg_0._pongs) > 0:\n arg_1 = arg_0._pongs.pop(0)\n arg_1.set_result(True)\n arg_0._pongs_received += 1\n arg_0._pings_outstanding -= 1"} +{"_id": "doc_5532", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Coroutine which continuously tries to consume pending commands\n and then flushes them to the socket.\n \"\"\"\n while True:\n if not arg_0.is_connected or arg_0.is_connecting:\n break\n\n try:\n yield from arg_0._flush_queue.get()\n\n if arg_0._pending_data_size > 0:\n arg_0._io_writer.writelines(arg_0._pending[:])\n arg_0._pending = []\n arg_0._pending_data_size = 0\n yield from arg_0._io_writer.drain()\n except OSError as e:\n if arg_0._error_cb is not None:\n yield from arg_0._error_cb(e)\n yield from arg_0._process_op_err(e)\n break\n except asyncio.CancelledError:\n break"} +{"_id": "doc_5533", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Coroutine which gathers bytes sent by the server\n and feeds them to the protocol parser.\n In case of error while reading, it will stop running\n and its task has to be rescheduled.\n \"\"\"\n while True:\n try:\n arg_1 = arg_0.is_closed or arg_0.is_reconnecting\n if arg_1 or arg_0._io_reader is None:\n break\n if arg_0.is_connected and arg_0._io_reader.at_eof():\n if arg_0._error_cb is not None:\n yield from arg_0._error_cb(ErrStaleConnection)\n yield from arg_0._process_op_err(ErrStaleConnection)\n break\n\n arg_2 = yield from arg_0._io_reader.read(DEFAULT_BUFFER_SIZE)\n yield from arg_0._ps.parse(arg_2)\n except ErrProtocol:\n yield from arg_0._process_op_err(ErrProtocol)\n break\n except OSError as e:\n yield from arg_0._process_op_err(e)\n break\n except asyncio.CancelledError:\n break"} +{"_id": "doc_5534", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generates a timezone aware datetime if the 'USE_TZ' setting is enabled\n\n :param value: The datetime value\n :return: A locale aware datetime\n \"\"\"\n return timezone.make_aware(arg_0, timezone.get_current_timezone()) if getattr(settings, 'USE_TZ', False) else arg_0"} +{"_id": "doc_5535", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Load feature data from a 2D ndarray on disk. \"\"\"\n arg_0.feature_images = np.load(arg_1)\n arg_0.feature_names = range(arg_0.feature_images.shape[1])"} +{"_id": "doc_5536", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Load feature image data from image files.\n\n Args:\n images: A list of image filenames.\n names: An optional list of strings to use as the feature names. Must\n be in the same order as the images.\n \"\"\"\n if arg_2 is not None and len(arg_2) != len(arg_1):\n raise Exception(\n \"Lists of feature names and images must be of same length!\")\n arg_0.feature_names = arg_2 if arg_2 is not None else arg_1\n arg_0.feature_images = imageutils.load_imgs(arg_1, arg_0.masker)"} +{"_id": "doc_5537", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Decode images using Pearson's r.\n\n Computes the correlation between each input image and each feature\n image across voxels.\n\n Args:\n imgs_to_decode: An ndarray of images to decode, with voxels in rows\n and images in columns.\n\n Returns:\n An n_features x n_images 2D array, with each cell representing the\n pearson correlation between the i'th feature and the j'th image\n across all voxels.\n \"\"\"\n arg_2, arg_3 = arg_1.astype(float), arg_0.feature_images.astype(float)\n return arg_0._xy_corr(arg_2, arg_3)"} +{"_id": "doc_5538", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Decoding using the dot product.\n \"\"\"\n return np.dot(arg_1.T, arg_0.feature_images).T"} +{"_id": "doc_5539", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.08, arg_3=True,\n arg_4=None, arg_5=None,\n arg_6=\"scale\"):\n \"\"\" Set up data for a classification task given a set of masks\n\n Given a set of masks, this function retrieves studies associated with\n each mask at the specified threshold, optionally removes overlap and\n filters by studies and features, and returns studies by feature matrix\n (X) and class labels (y)\n\n Args:\n dataset: a Neurosynth dataset\n maks: a list of paths to Nifti masks\n threshold: percentage of voxels active within the mask for study\n to be included\n remove_overlap: A boolean indicating if studies studies that\n appear in more than one mask should be excluded\n studies: An optional list of study names used to constrain the set\n used in classification. If None, will use all features in the\n dataset.\n features: An optional list of feature names used to constrain the\n set used in classification. If None, will use all features in\n the dataset.\n regularize: Optional boolean indicating if X should be regularized\n\n Returns:\n A tuple (X, y) of np arrays.\n X is a feature by studies matrix and y is a vector of class labels\n \"\"\"\n\n import nibabel as nib\n import os\n\n # Load masks using NiBabel\n\n try:\n arg_7 = [nib.load(os.path.relpath(m)) for m in arg_1]\n except OSError:\n print('Error loading masks. Check the path')\n\n # Get a list of studies that activate for each mask file--i.e., a list of\n # lists\n\n arg_8 = [arg_0.get_studies(mask=m, activation_threshold=arg_2)\n for m in arg_7]\n\n # Flattened ids\n\n arg_9 = reduce(lambda a, b: a + b, arg_8)\n\n # Remove duplicates\n\n if arg_3:\n import collections\n arg_9 = [id for (id, count) in\n collections.Counter(arg_9).items() if count == 1]\n arg_8 = [[x for x in m if x in arg_9] for m in\n arg_8] # Remove\n\n # Create class label(y)\n arg_10 = [[idx] * len(ids) for (idx, ids) in enumerate(arg_8)]\n arg_10 = reduce(lambda a, b: a + b, arg_10) # Flatten\n arg_10 = np.array(arg_10)\n\n # Extract feature set for each class separately\n arg_11 = [arg_0.get_feature_data(ids=group_ids, arg_5=arg_5)\n for group_ids in arg_8]\n\n arg_11 = np.vstack(tuple(arg_11))\n\n if arg_6:\n arg_11 = regularize(arg_11, method=arg_6)\n\n return (arg_11, arg_10)"} +{"_id": "doc_5540", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a list with the order that features requested appear in\n dataset \"\"\"\n arg_2 = arg_0.get_feature_names()\n\n arg_3 = [arg_2.index(f) for f in arg_1]\n\n return arg_3"} +{"_id": "doc_5541", "title": "", "text": "def Func(arg_0, arg_1='auto', arg_2=None):\n \"\"\" Sets the class_weight of the classifier to match y \"\"\"\n\n if arg_1 is None:\n arg_3 = None\n\n try:\n arg_0.clf.set_params(arg_1=arg_3)\n except ValueError:\n pass\n\n elif arg_1 == 'auto':\n arg_4 = np.bincount(arg_2)\n arg_5 = np.nonzero(arg_4)[0]\n arg_4 = arg_4 / float(arg_4.sum())\n arg_3 = dict(zip(arg_5[::-1], arg_4[arg_5]))\n\n try:\n arg_0.clf.set_params(arg_1=arg_3)\n except ValueError:\n import warnings\n warnings.warn(\n \"Tried to set class_weight, but failed. The classifier \"\n \"probably doesn't support it\")"} +{"_id": "doc_5542", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None,\n arg_4='features'):\n \"\"\" Given a dataset, fits either features or voxels to y \"\"\"\n\n # Get data from dataset\n\n if arg_4 == 'features':\n arg_5 = np.rot90(arg_1.feature_table.data.toarray())\n elif arg_4 == 'voxels':\n arg_5 = np.rot90(arg_1.image_table.data.toarray())\n\n arg_0.sk_classifier.fit(arg_5, arg_2)"} +{"_id": "doc_5543", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=True):\n \"\"\" Aggregates over all voxels within each ROI in the input image.\n\n Takes a Dataset and a Nifti image that defines distinct regions, and\n returns a numpy matrix of ROIs x mappables, where the value at each\n ROI is the proportion of active voxels in that ROI. Each distinct ROI\n must have a unique value in the image; non-contiguous voxels with the\n same value will be assigned to the same ROI.\n\n Args:\n dataset: Either a Dataset instance from which image data are\n extracted, or a Numpy array containing image data to use. If\n the latter, the array contains voxels in rows and\n features/studies in columns. The number of voxels must be equal\n to the length of the vectorized image mask in the regions\n image.\n regions: An image defining the boundaries of the regions to use.\n Can be one of:\n 1) A string name of the NIFTI or Analyze-format image\n 2) A NiBabel SpatialImage\n 3) A list of NiBabel images\n 4) A 1D numpy array of the same length as the mask vector in\n the Dataset's current Masker.\n masker: Optional masker used to load image if regions is not a\n numpy array. Must be passed if dataset is a numpy array.\n threshold: An optional float in the range of 0 - 1 or integer. If\n passed, the array will be binarized, with ROI values above the\n threshold assigned to True and values below the threshold\n assigned to False. (E.g., if threshold = 0.05, only ROIs in\n which more than 5% of voxels are active will be considered\n active.) If threshold is integer, studies will only be\n considered active if they activate more than that number of\n voxels in the ROI.\n remove_zero: An optional boolean; when True, assume that voxels\n with value of 0 should not be considered as a separate ROI, and\n will be ignored.\n\n Returns:\n A 2D numpy array with ROIs in rows and mappables in columns.\n \"\"\"\n\n if arg_2 is not None:\n arg_2 = arg_2\n else:\n if isinstance(arg_0, Dataset):\n arg_2 = arg_0.masker\n else:\n if not type(arg_1).__module__.startswith('numpy'):\n raise ValueError(\n \"If dataset is a numpy array and regions is not a numpy \"\n \"array, a masker must be provided.\")\n\n if not type(arg_1).__module__.startswith('numpy'):\n arg_1 = arg_2.mask(arg_1)\n\n if isinstance(arg_0, Dataset):\n arg_0 = arg_0.get_image_data(dense=False)\n\n # If multiple images are passed, give each one a unique value\n if arg_1.ndim == 2:\n arg_5 = arg_1\n for arg_6 in range(arg_1.shape[1]):\n arg_7 = np.nonzero(arg_5[:, arg_6])[0]\n if isinstance(arg_3, int):\n arg_5[arg_7, arg_6] = 1.0\n else:\n arg_5[arg_7, arg_6] = 1.0 / np.count_nonzero(arg_5[:, arg_6])\n\n # Otherwise create an ROI-coding matrix\n else:\n arg_8 = np.unique(arg_1)\n\n if arg_4:\n arg_8 = arg_8[np.nonzero(arg_8)]\n\n arg_9 = arg_8.size\n\n arg_5 = np.zeros((arg_1.size, arg_9))\n for arg_6 in range(arg_9):\n if isinstance(arg_3, int):\n arg_5[arg_1 == arg_8[arg_6], arg_6] = 1.0\n else:\n arg_5[arg_1 == arg_8[arg_6], arg_6] = 1.0 / \\\n np.sum(arg_1 == arg_8[arg_6])\n\n # Call dot() on the array itself as this will use sparse matrix\n # multiplication if possible.\n arg_10 = arg_0.T.dot(arg_5).T\n\n if arg_3 is not None:\n arg_10[arg_10 < arg_3] = 0.0\n arg_10 = arg_10.astype(bool)\n\n return arg_10"} +{"_id": "doc_5544", "title": "", "text": "def Func(arg_0, arg_1, arg_2=40):\n \"\"\" Return top forty words from each topic in trained topic model.\n \"\"\"\n arg_3 = []\n for arg_4 in arg_0.components_:\n arg_5 = [arg_1[i] for i in arg_4.argsort()[:-arg_2-1:-1]]\n arg_3 += [arg_5]\n return arg_3"} +{"_id": "doc_5545", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Correlates row vector x with each row vector in 2D array y. \"\"\"\n arg_2 = np.vstack((arg_0, arg_1))\n arg_3 = arg_2.mean(axis=1)[(slice(None, None, None), None)]\n arg_4 = arg_2 - arg_3\n arg_5 = np.sqrt(np.sum(arg_4**2, axis=1))\n arg_6 = np.dot(arg_4[1:], arg_4[0].T)\n arg_7 = arg_6 / (arg_5[1:] * arg_5[0])\n return arg_7"} +{"_id": "doc_5546", "title": "", "text": "def Func(arg_0, arg_1=.05):\n \"\"\" Determine FDR threshold given a p value array and desired false\n discovery rate q. \"\"\"\n arg_2 = np.sort(arg_0)\n arg_3 = arg_0.shape[0]\n arg_4 = np.array(range(1, arg_3 + 1), dtype='float') * arg_1 / arg_3\n arg_5 = np.where(arg_2 <= arg_4)[0]\n return arg_2[max(arg_5)] if len(arg_5) else -1"} +{"_id": "doc_5547", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Create and store a new ImageTable instance based on the current\n Dataset. Will generally be called privately, but may be useful as a\n convenience method in cases where the user wants to re-generate the\n table with a new smoothing kernel of different radius.\n\n Args:\n r (int): An optional integer indicating the radius of the smoothing\n kernel. By default, this is None, which will keep whatever\n value is currently set in the Dataset instance.\n \"\"\"\n logger.info(\"Creating image table...\")\n if arg_1 is not None:\n arg_0.r = arg_1\n arg_0.image_table = ImageTable(arg_0)"} +{"_id": "doc_5548", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3='outer',\n arg_4='ignore', arg_5=0.0, arg_6=0.001):\n \"\"\" Construct a new FeatureTable from file.\n\n Args:\n features: Feature data to add. Can be:\n (a) A text file containing the feature data, where each row is\n a study in the database, with features in columns. The first\n column must contain the IDs of the studies to match up with the\n image data.\n (b) A pandas DataFrame, where studies are in rows, features are\n in columns, and the index provides the study IDs.\n append (bool): If True, adds new features to existing ones\n incrementally. If False, replaces old features.\n merge, duplicates, min_studies, threshold: Additional arguments\n passed to FeatureTable.Func().\n \"\"\"\n if (not arg_2) or not hasattr(arg_0, 'feature_table'):\n arg_0.feature_table = FeatureTable(arg_0)\n\n arg_0.feature_table.Func(arg_1, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6)"} +{"_id": "doc_5549", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Load a pickled Dataset instance from file. \"\"\"\n try:\n arg_2 = pickle.Func(open(arg_1, 'rb'))\n except UnicodeDecodeError:\n # Need to try this for python3\n arg_2 = pickle.Func(open(arg_1, 'rb'), encoding='latin')\n\n if hasattr(arg_2, 'feature_table'):\n arg_2.feature_table._csr_to_sdf()\n return arg_2"} +{"_id": "doc_5550", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Given a list of features, returns features in order that they\n appear in database.\n\n Args:\n features (list): A list or 1D numpy array of named features to\n return.\n\n Returns:\n A list of features in order they appear in database.\n \"\"\"\n\n arg_2 = np.where(\n np.in1d(arg_0.data.columns.values, np.array(arg_1)))[0]\n return list(arg_0.data.columns[arg_2].values)"} +{"_id": "doc_5551", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.0, arg_3=arg_4.sum, arg_6=False):\n \"\"\" Returns a list of all studies in the table that meet the desired\n feature-based criteria.\n\n Will most commonly be used to retrieve studies that use one or more\n features with some minimum frequency; e.g.,:\n\n Func(['fear', 'anxiety'], threshold=0.001)\n\n Args:\n features (lists): a list of feature names to search on.\n threshold (float): optional float indicating threshold features\n must pass to be included.\n func (Callable): any numpy function to use for thresholding\n (default: sum). The function will be applied to the list of\n features and the result compared to the threshold. This can be\n used to change the meaning of the query in powerful ways. E.g,:\n max: any of the features have to pass threshold\n (i.e., max > thresh)\n min: all features must each individually pass threshold\n (i.e., min > thresh)\n sum: the summed weight of all features must pass threshold\n (i.e., sum > thresh)\n get_weights (bool): if True, returns a dict with ids => weights.\n\n Returns:\n When get_weights is false (default), returns a list of study\n names. When true, returns a dict, with study names as keys\n and feature weights as values.\n \"\"\"\n if isinstance(arg_1, str):\n arg_1 = [arg_1]\n arg_1 = arg_0.search_features(arg_1) # Expand wild cards\n arg_7 = arg_0.data.ix[:, arg_1]\n arg_8 = arg_7.apply(arg_3, 1)\n arg_9 = arg_8[arg_8 >= arg_2]\n # ids_to_keep = self.ids[above_thresh]\n return arg_9 if arg_6 else list(arg_9.index)"} +{"_id": "doc_5552", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Returns all features that match any of the elements in the input\n list.\n\n Args:\n search (str, list): A string or list of strings defining the query.\n\n Returns:\n A list of matching feature names.\n '''\n if isinstance(arg_1, string_types):\n arg_1 = [arg_1]\n arg_1 = [arg_4.replace('*', '.*') for arg_4 in arg_1]\n arg_2 = list(arg_0.data.columns)\n arg_3 = []\n for arg_4 in arg_1:\n arg_3.extend([arg_5 for arg_5 in arg_2 if re.match(arg_4 + '$', arg_5)])\n return list(set(arg_3))"} +{"_id": "doc_5553", "title": "", "text": "def Func(arg_0):\n \"\"\" Convert FeatureTable to SciPy CSR matrix. \"\"\"\n arg_1 = arg_0.data.to_dense()\n arg_0.data = {\n 'columns': list(arg_1.columns),\n 'index': list(arg_1.index),\n 'values': sparse.csr_matrix(arg_1.values)\n }"} +{"_id": "doc_5554", "title": "", "text": "def Func(*arg_0):\n \"\"\" Deprecation warning decorator. Takes optional deprecation message,\n otherwise will use a generic warning. \"\"\"\n def wrap(arg_1):\n def wrapped_func(*arg_0, **arg_2):\n warnings.warn(arg_3, category=DeprecationWarning)\n return arg_1(*arg_0, **arg_2)\n return wrapped_func\n\n if len(arg_0) == 1 and callable(arg_0[0]):\n arg_3 = \"Function '%s' will be Func in future versions of \" \\\n \"Neurosynth.\" % arg_0[0].__name__\n return wrap(arg_0[0])\n else:\n arg_3 = arg_0[0]\n return wrap"} +{"_id": "doc_5555", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Convert coordinates from one space to another using provided\n Funcation matrix. \"\"\"\n arg_2 = linalg.pinv(arg_1)\n arg_0 = np.hstack((arg_0, np.ones((arg_0.shape[0], 1))))\n return np.dot(arg_0, arg_2)[:, 0:3]"} +{"_id": "doc_5556", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\" Convert an N x 3 array of XYZ coordinates to matrix indices. \"\"\"\n arg_0 = np.hstack((arg_0, np.ones((arg_0.shape[0], 1))))\n arg_3 = np.array([[-0.5, 0, 0, 45], [0, 0.5, 0, 63], [0, 0, 0.5, 36]]).T\n arg_4 = np.dot(arg_0, arg_3)[:, ::-1] # multiply and reverse column order\n return np.round_(arg_4).astype(int)"} +{"_id": "doc_5557", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_8, arg_9,\n arg_10):\n \"\"\"Perform an ADC read with the provided mux, gain, data_rate, and mode\n values and with the comparator enabled as specified. Returns the signed\n integer result of the read.\n \"\"\"\n assert arg_10 == 1 or arg_10 == 2 or arg_10 == 4, 'Num readings must be 1, 2, or 4!'\n # Set high and low threshold register values.\n arg_0._device.writeList(ADS1x15_POINTER_HIGH_THRESHOLD, [(arg_5 >> 8) & 0xFF, arg_5 & 0xFF])\n arg_0._device.writeList(ADS1x15_POINTER_LOW_THRESHOLD, [(arg_6 >> 8) & 0xFF, arg_6 & 0xFF])\n # Now build up the appropriate config register value.\n arg_11 = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion.\n # Specify mux value.\n arg_11 |= (arg_1 & 0x07) << ADS1x15_CONFIG_MUX_OFFSET\n # Validate the passed in gain and then set it in the config.\n if arg_2 not in ADS1x15_CONFIG_GAIN:\n raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16')\n arg_11 |= ADS1x15_CONFIG_GAIN[arg_2]\n # Set the mode (continuous or single shot).\n arg_11 |= arg_4\n # Get the default data rate if none is specified (default differs between\n # ADS1015 and ADS1115).\n if arg_3 is None:\n arg_3 = arg_0._data_rate_default()\n # Set the data rate (this is controlled by the subclass as it differs\n # between ADS1015 and ADS1115).\n arg_11 |= arg_0._data_rate_config(arg_3)\n # Enable window mode if required.\n if not arg_8:\n arg_11 |= ADS1x15_CONFIG_COMP_WINDOW\n # Enable active high mode if required.\n if not arg_7:\n arg_11 |= ADS1x15_CONFIG_COMP_ACTIVE_HIGH\n # Enable latching mode if required.\n if arg_9:\n arg_11 |= ADS1x15_CONFIG_COMP_LATCHING\n # Set number of comparator hits before alerting.\n arg_11 |= ADS1x15_CONFIG_COMP_QUE[arg_10]\n # Send the config value to start the ADC conversion.\n # Explicitly break the 16-bit value down to a big endian pair of bytes.\n arg_0._device.writeList(ADS1x15_POINTER_CONFIG, [(arg_11 >> 8) & 0xFF, arg_11 & 0xFF])\n # Wait for the ADC sample to finish based on the sample rate plus a\n # small offset to be sure (0.1 millisecond).\n time.sleep(1.0/arg_3+0.0001)\n # Retrieve the result.\n arg_12 = arg_0._device.readList(ADS1x15_POINTER_CONVERSION, 2)\n return arg_0._conversion_value(arg_12[1], arg_12[0])"} +{"_id": "doc_5558", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"Read a single ADC channel and return the ADC value as a signed integer\n result. Channel must be a value within 0-3.\n \"\"\"\n assert 0 <= arg_1 <= 3, 'Channel must be a value within 0-3!'\n # Perform a single shot read and set the mux value to the channel plus\n # the highest bit (bit 3) set.\n return arg_0._read(arg_1 + 0x04, arg_2, arg_3, ADS1x15_CONFIG_MODE_SINGLE)"} +{"_id": "doc_5559", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"\n Expand the given address into one or more normalized strings.\n\n Required\n --------\n @param address: the address as either Unicode or a UTF-8 encoded string\n\n Options\n -------\n @param languages: a tuple or list of ISO language code strings (e.g. \"en\", \"fr\", \"de\", etc.)\n to use in expansion. If None is passed, use language classifier\n to detect language automatically.\n @param address_components: an integer (bit-set) of address component expansions\n to use e.g. ADDRESS_NAME | ADDRESS_STREET would use\n only expansions which apply to venue names or streets.\n @param latin_ascii: use the Latin to ASCII transliterator, which normalizes e.g. \u00e6 => ae\n @param transliterate: use any available transliterators for non-Latin scripts, e.g.\n for the Greek phrase \u03b4\u03b9\u03b1\u03c6\u03bf\u03c1\u03b5\u03c4\u03b9\u03ba\u03bf\u03cd\u03c2 becomes diaphoretiko\u00fas\u0331\n @param strip_accents: strip accented characters e.g. \u00e9 => e, \u00e7 => c. This loses some\n information in various languags, but in general we want\n @param decompose: perform Unicode normalization (NFD form)\n @param lowercase: UTF-8 lowercase the string\n @param trim_string: trim spaces on either side of the string\n @param replace_word_hyphens: add version of the string replacing hyphens with space\n @param delete_word_hyphens: add version of the string with hyphens deleted\n @param replace_numeric_hyphens: add version of the string with numeric hyphens replaced \n e.g. 12345-6789 => 12345 6789\n @param delete_numeric_hyphens: add version of the string with numeric hyphens removed\n e.g. 12345-6789 => 123456789\n @param split_alpha_from_numeric: split tokens like CR17 into CR 17, helps with expansion\n of certain types of highway abbreviations\n @param delete_final_periods: remove final periods on abbreviations e.g. St. => St\n @param delete_acronym_periods: remove periods in acronyms e.g. U.S.A. => USA\n @param drop_english_possessives: normalize possessives e.g. Mark's => Marks\n @param delete_apostrophes: delete other types of hyphens e.g. O'Malley => OMalley\n @param expand_numex: converts numeric expressions e.g. Twenty sixth => 26th,\n using either the supplied languages or the result of\n automated language classification.\n @param roman_numerals: normalize Roman numerals e.g. IX => 9. Since these can be\n ambiguous (especially I and V), turning this on simply\n adds another version of the string if any potential\n Roman numerals are found.\n \"\"\"\n arg_0 = safe_decode(arg_0, 'utf-8')\n return _expand.Func(arg_0, arg_1=arg_1, **arg_2)"} +{"_id": "doc_5560", "title": "", "text": "def Func(arg_0, arg_1=arg_2,\n arg_3=arg_4,\n arg_5=True, arg_6=False,\n arg_7=None):\n '''\n Normalizes a string, tokenizes, and normalizes each token\n with string and token-level options.\n\n This version only uses libpostal's deterministic normalizations\n i.e. methods with a single output. The string tree version will\n return multiple normalized strings, each with tokens.\n\n Usage:\n normalized_tokens(u'St.-Barth\u00e9lemy')\n '''\n arg_0 = safe_decode(arg_0)\n Func = _normalize.normalized_tokens(arg_0, arg_1, arg_3, arg_6, arg_7=arg_7)\n\n if arg_5:\n Func = remove_parens(Func)\n\n return [(arg_0, token_types.from_id(arg_9)) for arg_0, arg_9 in Func]"} +{"_id": "doc_5561", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Parse address into components.\n\n @param address: the address as either Unicode or a UTF-8 encoded string\n @param language (optional): language code\n @param country (optional): country code\n \"\"\"\n arg_0 = safe_decode(arg_0, 'utf-8')\n return _parser.Func(arg_0, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_5562", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"\n Hash the given address into normalized strings that can be used to group similar\n addresses together for more detailed pairwise comparison. This can be thought of\n as the blocking function in record linkage or locally-sensitive hashing in the\n document near-duplicate detection.\n\n Required\n --------\n @param labels: array of component labels as either Unicode or UTF-8 encoded strings\n e.g. [\"house_number\", \"road\", \"postcode\"]\n @param values: array of component values as either Unicode or UTF-8 encoded strings\n e.g. [\"123\", \"Broadway\", \"11216\"]. Note len(values) must be equal to\n len(labels).\n\n Options\n -------\n @param languages: a tuple or list of ISO language code strings (e.g. \"en\", \"fr\", \"de\", etc.)\n to use in expansion. If None is passed, use language classifier\n to detect language automatically.\n @param with_name: use name in the hashes\n @param with_address: use house_number & street in the hashes\n @param with_unit: use secondary unit as part of the hashes\n @param with_city_or_equivalent: use the city, city_district, suburb, or island name as one of\n the geo qualifiers\n @param with_small_containing_boundaries: use small containing boundaries (currently state_district)\n as one of the geo qualifiers\n @param with_postal_code: use postal code as one of the geo qualifiers\n @param with_latlon: use geohash + neighbors as one of the geo qualifiers\n @param latitude: latitude (Y coordinate)\n @param longitude: longitude (X coordinate)\n @param geohash_precision: geohash tile size (default = 6)\n @param name_and_address_keys: include keys with name + address + geo\n @param name_only_keys: include keys with name + geo\n @param address_only_keys: include keys with address + geo\n \"\"\"\n return _near_dupe.Func(arg_0, arg_1, arg_2=arg_2, **arg_3)"} +{"_id": "doc_5563", "title": "", "text": "def Func():\n \"\"\"Removed all dusty containers with 'Exited' in their status\"\"\"\n arg_0 = get_docker_client()\n arg_1 = get_exited_dusty_containers()\n arg_2 = []\n for arg_3 in arg_1:\n log_to_client(\"Removing container {}\".format(arg_3['Names'][0]))\n try:\n arg_0.remove_container(arg_3['Id'], v=True)\n arg_2.append(arg_3)\n except Exception as e:\n log_to_client(e.message or str(e))\n return arg_2"} +{"_id": "doc_5564", "title": "", "text": "def Func():\n \"\"\"Removes all dangling images as well as all images referenced in a dusty spec; forceful removal is not used\"\"\"\n arg_0 = get_docker_client()\n arg_1 = _remove_dangling_images()\n arg_2 = get_dusty_images()\n arg_3 = arg_0.images(all=True)\n for arg_4 in arg_3:\n if set(arg_4['RepoTags']).intersection(arg_2):\n try:\n arg_0.remove_image(arg_4['Id'])\n except Exception as e:\n logging.info(\"Couldn't remove image {}\".format(arg_4['RepoTags']))\n else:\n log_to_client(\"Removed Image {}\".format(arg_4['RepoTags']))\n arg_1.append(arg_4)\n return arg_1"} +{"_id": "doc_5565", "title": "", "text": "def Func(arg_0):\n \"\"\"Write the given config to disk as a Dusty sub-config\n in the Nginx includes directory. Then, either start nginx\n or tell it to reload its config to pick up what we've\n just written.\"\"\"\n logging.info('Updating nginx with new Dusty config')\n arg_1 = tempfile.mkdtemp()\n os.mkdir(os.path.join(arg_1, 'html'))\n _write_nginx_config(constants.NGINX_BASE_CONFIG, os.path.join(arg_1, constants.NGINX_PRIMARY_CONFIG_NAME))\n _write_nginx_config(arg_0['http'], os.path.join(arg_1, constants.NGINX_HTTP_CONFIG_NAME))\n _write_nginx_config(arg_0['stream'], os.path.join(arg_1, constants.NGINX_STREAM_CONFIG_NAME))\n _write_nginx_config(constants.NGINX_502_PAGE_HTML, os.path.join(arg_1, 'html', constants.NGINX_502_PAGE_NAME))\n sync_local_path_to_vm(arg_1, constants.NGINX_CONFIG_DIR_IN_VM)"} +{"_id": "doc_5566", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"We require the list of all remote repo paths to be passed in\n to this because otherwise we would need to import the spec assembler\n in this module, which would give us circular imports.\"\"\"\n arg_3 = None\n for arg_4 in arg_1:\n if arg_4.remote_path == arg_2: # user passed in a full name\n return arg_4\n if arg_2 == arg_4.short_name:\n if arg_3 is None:\n arg_3 = arg_4\n else:\n raise RuntimeError('Short repo name {} is ambiguous. It matches both {} and {}'.format(arg_2,\n arg_3.remote_path,\n arg_4.remote_path))\n if arg_3 is None:\n raise RuntimeError('Short repo name {} does not match any known repos'.format(arg_2))\n return arg_3"} +{"_id": "doc_5567", "title": "", "text": "def Func(arg_0):\n \"\"\"Daemon-side command to ensure we're running the latest\n versions of any managed repos, including the\n specs repo, before we do anything else in the up flow.\"\"\"\n if arg_0:\n update_managed_repos(force=True)\n arg_1 = spec_assembler.get_assembled_specs()\n if not arg_1[constants.CONFIG_BUNDLES_KEY]:\n raise RuntimeError('No bundles are activated. Use `dusty bundles` to activate bundles before running `dusty up`.')\n virtualbox.initialize_docker_vm()"} +{"_id": "doc_5568", "title": "", "text": "def Func(arg_0):\n \"\"\"This command will use the compilers to get compose specs\n will pass those specs to the systems that need them. Those\n systems will in turn launch the services needed to make the\n local environment go.\"\"\"\n\n arg_1 = spec_assembler.get_assembled_specs()\n arg_2 = virtualbox.required_absent_assets(arg_1)\n if arg_2:\n raise RuntimeError('Assets {} are specified as required but are not set. Set them with `dusty assets set`'.format(arg_2))\n\n arg_3 = virtualbox.get_docker_vm_ip()\n\n # Stop will fail if we've never written a Composefile before\n if os.path.exists(constants.COMPOSEFILE_PATH):\n try:\n stop_apps_or_services(rm_containers=arg_0)\n except CalledProcessError as e:\n log_to_client(\"WARNING: docker-compose stop failed\")\n log_to_client(str(e))\n\n daemon_warnings.clear_namespace('disk')\n arg_4 = virtualbox.get_docker_vm_disk_info(as_dict=True)\n if 'M' in arg_4['free'] or 'K' in arg_4['free']:\n arg_5 = 'VM is low on disk. Available disk: {}'.format(arg_4['free'])\n daemon_warnings.warn('disk', arg_5)\n log_to_client(arg_5)\n\n log_to_client(\"Compiling together the assembled specs\")\n arg_6 = spec_assembler.get_all_repos(active_only=True, include_specs_repo=False)\n log_to_client(\"Compiling the port specs\")\n arg_7 = port_spec_compiler.get_port_spec_document(arg_1, arg_3)\n log_to_client(\"Compiling the nginx config\")\n arg_8 = virtualbox.get_docker_bridge_ip()\n arg_9 = nginx_compiler.get_nginx_configuration_spec(arg_7, arg_8)\n log_to_client(\"Creating setup and script bash files\")\n make_up_command_files(arg_1, arg_7)\n log_to_client(\"Compiling docker-compose config\")\n arg_10 = compose_compiler.get_compose_dict(arg_1, arg_7)\n\n log_to_client(\"Saving port forwarding to hosts file\")\n hosts.update_hosts_file_from_port_spec(arg_7)\n log_to_client(\"Configuring NFS\")\n nfs.configure_nfs()\n log_to_client(\"Saving updated nginx config to the VM\")\n nginx.update_nginx_from_config(arg_9)\n log_to_client(\"Saving Docker Compose config and starting all containers\")\n compose.update_running_containers_from_spec(arg_10, arg_0=arg_0)\n\n log_to_client(\"Your local environment is now started!\")"} +{"_id": "doc_5569", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Restart any containers associated with Dusty, or associated with\n the provided app_or_service_names.\"\"\"\n if arg_0:\n log_to_client(\"Restarting the following apps or services: {}\".format(', '.join(arg_0)))\n else:\n log_to_client(\"Restarting all active containers associated with Dusty\")\n\n if arg_0:\n arg_1 = spec_assembler.get_assembled_specs()\n arg_2 = [arg_1['apps'][app_name] for app_name in arg_0 if app_name in arg_1['apps']]\n arg_3 = set()\n for arg_4 in arg_2:\n if arg_4['repo']:\n arg_3 = arg_3.union(spec_assembler.get_same_container_repos_from_spec(arg_4))\n nfs.update_nfs_with_repos(arg_3)\n else:\n nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))\n compose.restart_running_services(arg_0)"} +{"_id": "doc_5570", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a dictionary containing the Compose spec required to run\n Dusty's nginx container used for host forwarding.\"\"\"\n arg_1 = {'image': constants.NGINX_IMAGE,\n 'volumes': ['{}:{}'.format(constants.NGINX_CONFIG_DIR_IN_VM, constants.NGINX_CONFIG_DIR_IN_CONTAINER)],\n 'command': 'nginx -g \"daemon off;\" -c /etc/nginx/conf.d/nginx.primary',\n 'container_name': 'dusty_{}_1'.format(constants.DUSTY_NGINX_NAME)}\n arg_2 = set([nginx_spec['host_port'] for nginx_spec in arg_0['nginx']])\n if arg_2:\n arg_1['ports'] = []\n for arg_3 in arg_2:\n arg_1['ports'].append('{0}:{0}'.format(arg_3))\n return {constants.DUSTY_NGINX_NAME: arg_1}"} +{"_id": "doc_5571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Given the assembled specs and app_name, this function will return all apps and services specified in\n 'conditional_links' if they are specified in 'apps' or 'services' in assembled_specs. That means that\n some other part of the system has declared them as necessary, so they should be linked to this app \"\"\"\n arg_2 = []\n arg_3 = arg_0['apps'][arg_1]['conditional_links']\n for arg_4 in arg_3['apps']:\n if arg_4 in arg_0['apps']:\n arg_2.append(arg_4)\n for arg_4 in arg_3['services']:\n if arg_4 in arg_0['services']:\n arg_2.append(arg_4)\n return arg_2"} +{"_id": "doc_5572", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" This function returns a dictionary of the docker-compose.yml specifications for one app \"\"\"\n logging.info(\"Compose Compiler: Compiling dict for app {}\".format(arg_0))\n arg_3 = arg_1['apps'][arg_0]\n arg_4 = arg_3[\"compose\"]\n _apply_env_overrides(env_overrides_for_app_or_service(arg_0), arg_4)\n if 'image' in arg_3 and 'build' in arg_3:\n raise RuntimeError(\"image and build are both specified in the spec for {}\".format(arg_0))\n elif 'image' in arg_3:\n logging.info\n arg_4['image'] = arg_3['image']\n elif 'build' in arg_3:\n arg_4['build'] = _get_build_path(arg_3)\n else:\n raise RuntimeError(\"Neither image nor build was specified in the spec for {}\".format(arg_0))\n arg_4['entrypoint'] = []\n arg_4['command'] = _compile_docker_command(arg_3)\n arg_4['container_name'] = \"dusty_{}_1\".format(arg_0)\n logging.info(\"Compose Compiler: compiled command {}\".format(arg_4['command']))\n arg_4['links'] = _links_for_app(arg_3, arg_1)\n logging.info(\"Compose Compiler: links {}\".format(arg_4['links']))\n arg_4['volumes'] = arg_4['volumes'] + _get_compose_volumes(arg_0, arg_1)\n logging.info(\"Compose Compiler: volumes {}\".format(arg_4['volumes']))\n arg_5 = _get_ports_list(arg_0, arg_2)\n if arg_5:\n arg_4['ports'] = arg_5\n logging.info(\"Compose Compiler: ports {}\".format(arg_5))\n arg_4['user'] = 'root'\n return arg_4"} +{"_id": "doc_5573", "title": "", "text": "def Func(arg_0):\n \"\"\"This function returns a dictionary of the docker_compose specifications\n for one service. Currently, this is just the Dusty service spec with\n an additional volume mount to support Dusty's cp functionality.\"\"\"\n arg_1 = arg_0.plain_dict()\n _apply_env_overrides(env_overrides_for_app_or_service(arg_0.name), arg_1)\n arg_1.setdefault('volumes', []).append(_get_cp_volume_mount(arg_0.name))\n arg_1['container_name'] = \"dusty_{}_1\".format(arg_0.name)\n return arg_1"} +{"_id": "doc_5574", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a list of formatted port mappings for an app \"\"\"\n if arg_0 not in arg_1['docker_compose']:\n return []\n return [\"{}:{}\".format(arg_2['mapped_host_port'], arg_2['in_container_port'])\n for arg_2 in arg_1['docker_compose'][arg_0]]"} +{"_id": "doc_5575", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" This returns formatted volume specifications for a docker-compose app. We mount the app\n as well as any libs it needs so that local code is used in our container, instead of whatever\n code was in the docker image.\n\n Additionally, we create a volume for the /cp directory used by Dusty to facilitate\n easy file transfers using `dusty cp`.\"\"\"\n arg_2 = []\n arg_2.append(_get_cp_volume_mount(arg_0))\n arg_2 += get_app_volume_mounts(arg_0, arg_1)\n return arg_2"} +{"_id": "doc_5576", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Expands specs.libs.depends.libs to include any indirectly required libs\n \"\"\"\n for arg_1, arg_2 in arg_0['libs'].iteritems():\n if 'depends' in arg_2 and 'libs' in arg_2['depends']:\n arg_2['depends']['libs'] = _get_dependent('libs', arg_1, arg_0, 'libs')"} +{"_id": "doc_5577", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns all libs that are referenced in specs.apps.depends.libs\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0['apps'].values():\n for arg_3 in arg_2['depends']['libs']:\n arg_1.add(arg_3)\n return arg_1"} +{"_id": "doc_5578", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns all services that are referenced in specs.apps.depends.services,\n or in specs.bundles.services\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0['apps'].values():\n for arg_3 in arg_2['depends']['services']:\n arg_1.add(arg_3)\n for arg_4 in arg_0['bundles'].values():\n for arg_3 in arg_4['services']:\n arg_1.add(arg_3)\n return arg_1"} +{"_id": "doc_5579", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function adds an assets key to the specs, which is filled in with a dictionary\n of all assets defined by apps and libs in the specs\n \"\"\"\n arg_0['assets'] = {}\n for arg_1 in arg_0.get_apps_and_libs():\n for arg_2 in arg_1['assets']:\n if not arg_0['assets'].get(arg_2['name']):\n arg_0['assets'][arg_2['name']] = {}\n arg_0['assets'][arg_2['name']]['required_by'] = set()\n arg_0['assets'][arg_2['name']]['used_by'] = set()\n arg_0['assets'][arg_2['name']]['used_by'].add(arg_1.name)\n if arg_2['required']:\n arg_0['assets'][arg_2['name']]['required_by'].add(arg_1.name)"} +{"_id": "doc_5580", "title": "", "text": "def Func(arg_0):\n \"\"\" This function takes an app or library name and will return the corresponding repo\n for that app or library\"\"\"\n arg_1 = get_specs()\n arg_2 = arg_1.get_app_or_lib(arg_0)['repo']\n if not arg_2:\n return None\n return Repo(arg_2)"} +{"_id": "doc_5581", "title": "", "text": "def Func(arg_0):\n \"\"\"Given the spec of an app or library, returns all repos that are guaranteed\n to live in the same container\"\"\"\n arg_1 = set()\n arg_2 = get_repo_of_app_or_library(arg_0.name)\n if arg_2 is not None:\n arg_1.add(arg_2)\n for arg_3 in arg_0['depends']['libs']:\n arg_1.add(get_repo_of_app_or_library(arg_3))\n return arg_1"} +{"_id": "doc_5582", "title": "", "text": "def Func(arg_0):\n \"\"\"Given the name of an app or library, returns all repos that are guaranteed\n to live in the same container\"\"\"\n arg_1 = get_expanded_libs_specs()\n arg_2 = arg_1.get_app_or_lib(arg_0)\n return Func_from_spec(arg_2)"} +{"_id": "doc_5583", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a string of all host rules required to match\n the given spec. This string is wrapped in the Dusty hosts\n header and footer so it can be easily removed later.\"\"\"\n arg_1 = ''.join(['{} {}\\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in arg_0])\n return config_file.create_config_section(arg_1)"} +{"_id": "doc_5584", "title": "", "text": "def Func(arg_0):\n \"\"\"Moves the temporary binary to the location of the binary that's currently being run.\n Preserves owner, group, and permissions of original binary\"\"\"\n # pylint: disable=E1101\n arg_1 = _get_binary_location()\n if not arg_1.endswith(constants.DUSTY_BINARY_NAME):\n raise RuntimeError('Refusing to overwrite binary {}'.format(arg_1))\n arg_2 = os.stat(arg_1)\n arg_3 = arg_2.st_mode\n arg_4 = arg_2.st_uid\n arg_5 = arg_2.st_gid\n shutil.move(arg_0, arg_1)\n os.chown(arg_1, arg_4, arg_5)\n os.chmod(arg_1, arg_3)\n return arg_1"} +{"_id": "doc_5585", "title": "", "text": "def Func(arg_0=arg_1.cpu_count()):\n \"\"\"Context manager for setting up a TaskQueue. Upon leaving the\n context manager, all tasks that were enqueued will be executed\n in parallel subject to `pool_size` concurrency constraints.\"\"\"\n arg_3 = TaskQueue(arg_0)\n yield arg_3\n arg_3.execute()"} +{"_id": "doc_5586", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"This will output the nginx stream config string for specific port spec \"\"\"\n arg_2 = \"\\t server {\\n\"\n arg_2 += \"\\t \\t {}\\n\".format(_nginx_listen_string(arg_0))\n arg_2 += \"\\t \\t {}\\n\".format(_nginx_proxy_string(arg_0, arg_1))\n arg_2 += \"\\t }\\n\"\n return arg_2"} +{"_id": "doc_5587", "title": "", "text": "def Func(arg_0):\n \"\"\"Starting with Yosemite, launchd was rearchitected and now only one\n launchd process runs for all users. This allows us to much more easily\n impersonate a user through launchd and extract the environment\n variables from their running processes.\"\"\"\n arg_1 = subprocess.check_output(['id', '-u', arg_0])\n arg_2 = subprocess.check_output(['launchctl', 'asuser', arg_1, 'launchctl', 'getenv', 'SSH_AUTH_SOCK']).rstrip()\n _set_ssh_auth_sock(arg_2)"} +{"_id": "doc_5588", "title": "", "text": "def Func():\n \"\"\"\n Will check the mac_username config value; if it is present, will load that user's\n SSH_AUTH_SOCK environment variable to the current environment. This allows git clones\n to behave the same for the daemon as they do for the user\n \"\"\"\n\n arg_0 = get_config_value(constants.CONFIG_MAC_USERNAME_KEY)\n if not arg_0:\n logging.info(\"Can't setup ssh authorization; no mac_username specified\")\n return\n if not _running_on_mac(): # give our Linux unit tests a way to not freak out\n logging.info(\"Skipping SSH load, we are not running on Mac\")\n return\n\n if _mac_version_is_post_yosemite():\n _load_ssh_auth_post_yosemite(arg_0)\n else:\n _load_ssh_auth_pre_yosemite()"} +{"_id": "doc_5589", "title": "", "text": "def Func(arg_0):\n \"\"\"Recursively delete a path upon exiting this context\n manager. Supports targets that are files or directories.\"\"\"\n try:\n yield\n finally:\n if os.path.exists(arg_0):\n if os.path.isdir(arg_0):\n shutil.rmtree(arg_0)\n else:\n os.remove(arg_0)"} +{"_id": "doc_5590", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Copy a path from the local filesystem to a path inside a Dusty\n container. The files on the local filesystem must be accessible\n by the user specified in mac_username.\"\"\"\n if not os.path.exists(arg_0):\n raise RuntimeError('ERROR: Path {} does not exist'.format(arg_0))\n arg_4 = str(uuid.uuid1())\n if os.path.isdir(arg_0):\n sync_local_path_to_vm(arg_0, os.path.join(vm_cp_path(arg_1), arg_4), arg_3=arg_3)\n move_dir_inside_container(arg_1, os.path.join(constants.CONTAINER_CP_DIR, arg_4), arg_2)\n else:\n sync_local_path_to_vm(arg_0, os.path.join(vm_cp_path(arg_1), arg_4), arg_3=arg_3)\n move_file_inside_container(arg_1, os.path.join(constants.CONTAINER_CP_DIR, arg_4), arg_2)"} +{"_id": "doc_5591", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Given a dictionary containing the expanded dusty DAG specs this function will\n return a dictionary containing the port mappings needed by downstream methods. Currently\n this includes docker_compose, virtualbox, nginx and hosts_file.\"\"\"\n arg_2 = 65000\n arg_3 = {'docker_compose':{}, 'nginx':[], 'hosts_file':[]}\n arg_4, arg_5, arg_6 = set(), set(), set()\n # No matter the order of apps in expanded_active_specs, we want to produce a consistent\n # port_spec with respect to the apps and the ports they are outputted on\n for arg_7 in sorted(arg_0['apps'].keys()):\n arg_8 = arg_0['apps'][arg_7]\n if 'host_forwarding' not in arg_8:\n continue\n arg_3['docker_compose'][arg_7] = []\n for arg_9 in arg_8['host_forwarding']:\n # These functions are just used for validating the set of specs all works together\n _add_full_addresses(arg_9, arg_4)\n if arg_9['type'] == 'stream':\n _add_stream_host_port(arg_9, arg_6)\n\n arg_3['docker_compose'][arg_7].append(_docker_compose_port_spec(arg_9, arg_2))\n arg_3['nginx'].append(_nginx_port_spec(arg_9, arg_2, arg_1))\n\n _add_host_names(arg_9, arg_1, arg_3, arg_5)\n arg_2 += 1\n return arg_3"} +{"_id": "doc_5592", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the Docker registry host associated with\n a given image name.\"\"\"\n if '/' not in arg_0: # official image\n return constants.PUBLIC_DOCKER_REGISTRY\n arg_1 = arg_0.split('/')[0]\n if '.' not in arg_1: # user image on official repository, e.g. thieman/clojure\n return constants.PUBLIC_DOCKER_REGISTRY\n return arg_1"} +{"_id": "doc_5593", "title": "", "text": "def Func():\n \"\"\"Reads the local Docker client config for the current user\n and returns all registries to which the user may be logged in.\n This is intended to be run client-side, not by the daemon.\"\"\"\n arg_0 = set()\n if not os.path.exists(constants.DOCKER_CONFIG_PATH):\n return arg_0\n arg_1 = json.load(open(constants.DOCKER_CONFIG_PATH, 'r'))\n for arg_2 in arg_1.get('auths', {}).iterkeys():\n try:\n arg_3 = urlparse(arg_2)\n except Exception:\n log_to_client('Error parsing registry {} from Docker config, will skip this registry').format(arg_2)\n # This logic assumes the auth is either of the form\n # gamechanger.io (no scheme, no path after host) or\n # of the form https://index.docker.io/v1/ (scheme,\n # netloc parses correctly, additional path does not matter).\n # These are the formats I saw in my personal config file,\n # not sure what other formats it might accept.\n arg_0.add(arg_3.netloc) if arg_3.netloc else arg_0.add(arg_3.path)\n return arg_0"} +{"_id": "doc_5594", "title": "", "text": "def Func():\n \"\"\"Puts the client logger into streaming mode, which sends\n unbuffered input through to the socket one character at a time.\n We also disable propagation so the root logger does not\n receive many one-byte emissions. This context handler\n was originally created for streaming Compose up's\n terminal output through to the client and should only be\n used for similarly complex circumstances.\"\"\"\n for arg_0 in arg_2.handlers:\n if hasattr(arg_0, 'append_newlines'):\n break\n else:\n arg_0 = None\n arg_1 = arg_2.propagate\n arg_2.propagate = False\n if arg_0 is not None:\n arg_4 = arg_0.append_newlines\n arg_0.append_newlines = False\n yield\n arg_2.propagate = arg_1\n if arg_0 is not None:\n arg_0.append_newlines = arg_4"} +{"_id": "doc_5595", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" This is used to compile the command that will be run when the docker container starts\n up. This command has to install any libs that the app uses, run the `always` command, and\n run the `once` command if the container is being launched for the first time \"\"\"\n arg_3 = arg_1['apps'][arg_0]\n arg_4 = ['set -e']\n arg_4 += _lib_install_commands_for_app(arg_0, arg_1)\n if arg_3['mount']:\n arg_4.append(\"cd {}\".format(container_code_path(arg_3)))\n arg_4.append(\"export PATH=$PATH:{}\".format(container_code_path(arg_3)))\n arg_4 += _copy_assets_commands_for_app(arg_3, arg_1)\n arg_4 += _get_once_commands(arg_3, arg_2)\n arg_4 += _get_always_commands(arg_3)\n return arg_4"} +{"_id": "doc_5596", "title": "", "text": "def Func():\n \"\"\"Raise the open file handles permitted by the Dusty daemon process\n and its child processes. The number we choose here needs to be within\n the OS X default kernel hard limit, which is 10240.\"\"\"\n logging.info('Increasing file handle limit to {}'.format(constants.FILE_HANDLE_LIMIT))\n resource.setrlimit(resource.RLIMIT_NOFILE,\n (constants.FILE_HANDLE_LIMIT, resource.RLIM_INFINITY))"} +{"_id": "doc_5597", "title": "", "text": "def Func():\n \"\"\"Start the daemon's HTTP server on a separate thread.\n This server is only used for servicing container status\n requests from Dusty's custom 502 page.\"\"\"\n logging.info('Starting HTTP server at {}:{}'.format(constants.DAEMON_HTTP_BIND_IP,\n constants.DAEMON_HTTP_BIND_PORT))\n arg_0 = threading.Thread(target=http_server.app.run, args=(constants.DAEMON_HTTP_BIND_IP,\n constants.DAEMON_HTTP_BIND_PORT))\n arg_0.daemon = True\n arg_0.start()"} +{"_id": "doc_5598", "title": "", "text": "def Func():\n \"\"\"Ripped off and slightly modified based on docker-py's\n kwargs_from_env utility function.\"\"\"\n arg_0 = get_docker_env()\n arg_1, arg_2, arg_3 = arg_0['DOCKER_HOST'], arg_0['DOCKER_CERT_PATH'], arg_0['DOCKER_TLS_VERIFY']\n\n arg_4 = {'base_url': arg_1.replace('tcp://', 'https://'),\n 'timeout': None,\n 'version': 'auto'}\n if arg_3 and arg_2:\n arg_4['tls'] = docker.tls.TLSConfig(\n client_cert=(os.path.join(arg_2, 'cert.pem'),\n os.path.join(arg_2, 'key.pem')),\n ca_cert=os.path.join(arg_2, 'ca.pem'),\n verify=True,\n ssl_version=None,\n assert_hostname=False)\n return docker.Client(**arg_4)"} +{"_id": "doc_5599", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Get a list of containers associated with the list\n of services. If no services are provided, attempts to\n return all containers associated with Dusty.\"\"\"\n arg_2 = get_docker_client()\n if arg_0:\n arg_3 = [get_container_for_app_or_service(service, arg_1=arg_1) for service in arg_0]\n return [arg_4 for arg_4 in arg_3 if arg_4]\n else:\n return [arg_4\n for arg_4 in arg_2.containers(all=arg_1)\n if any(arg_5.startswith('/dusty') for arg_5 in arg_4.get('Names', []))]"} +{"_id": "doc_5600", "title": "", "text": "def Func():\n \"\"\"\n This function is used with `dusty up`. It will check all active repos to see if\n they are exported. If any are missing, it will replace current dusty exports with\n exports that are needed for currently active repos, and restart\n the nfs server\n \"\"\"\n arg_0 = get_all_repos(active_only=True, include_specs_repo=False)\n\n arg_1 = _get_current_exports()\n arg_2 = _get_exports_for_repos(arg_0)\n\n _ensure_managed_repos_dir_exists()\n\n if not arg_2.difference(arg_1):\n if not _server_is_running():\n _restart_server()\n return\n\n _write_exports_config(arg_2)\n _restart_server()"} +{"_id": "doc_5601", "title": "", "text": "def Func():\n \"\"\"\n Our exports file will be invalid if this folder doesn't exist, and the NFS server\n will not run correctly.\n \"\"\"\n if not os.path.exists(constants.REPOS_DIR):\n os.makedirs(constants.REPOS_DIR)"} +{"_id": "doc_5602", "title": "", "text": "def Func(arg_0):\n \"\"\"Given an existing Funcr ID, return any new lines from the\n log since the last time the Funcr was Funcd.\"\"\"\n global arg_6\n arg_1 = arg_6[arg_0]\n\n arg_2 = get_docker_client()\n try:\n arg_3 = arg_2.inspect_container(arg_1.container_id)['State']['Status']\n except Exception as e:\n arg_3 = 'unknown'\n arg_4 = arg_2.logs(arg_1.container_id,\n stdout=True,\n stderr=True,\n stream=False,\n timestamps=False,\n since=calendar.timegm(arg_1.offset.timetuple()))\n\n arg_5 = Consumer(arg_1.container_id, datetime.utcnow())\n arg_6[arg_7(arg_0)] = arg_5\n\n arg_8 = jsonify({'logs': arg_4, 'status': arg_3})\n arg_8.headers['Access-Control-Allow-Origin'] = '*'\n arg_8.headers['Access-Control-Allow-Methods'] = 'GET, POST'\n return arg_8"} +{"_id": "doc_5603", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\" This returns a list of formatted volume specs for an app. These mounts declared in the apps' spec\n and mounts declared in all lib specs the app depends on\"\"\"\n arg_3 = arg_1['apps'][arg_0]\n arg_4 = [get_command_files_volume_mount(arg_0, arg_2=arg_2)]\n arg_4.append(get_asset_volume_mount(arg_0))\n arg_5 = _get_app_repo_volume_mount(arg_3)\n if arg_5:\n arg_4.append(arg_5)\n arg_4 += _get_app_libs_volume_mounts(arg_0, arg_1)\n return arg_4"} +{"_id": "doc_5604", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a list of the formatted volume specs for a lib\"\"\"\n arg_2 = [_get_lib_repo_volume_mount(arg_1['libs'][arg_0])]\n arg_2.append(get_command_files_volume_mount(arg_0, test=True))\n for arg_3 in arg_1['libs'][arg_0]['depends']['libs']:\n arg_4 = arg_1['libs'][arg_3]\n arg_2.append(_get_lib_repo_volume_mount(arg_4))\n return arg_2"} +{"_id": "doc_5605", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a list of the formatted volume mounts for all libs that an app uses \"\"\"\n arg_2 = []\n for arg_3 in arg_1['apps'][arg_0]['depends']['libs']:\n arg_4 = arg_1['libs'][arg_3]\n arg_2.append(\"{}:{}\".format(Repo(arg_4['repo']).vm_path, container_code_path(arg_4)))\n return arg_2"} +{"_id": "doc_5606", "title": "", "text": "def Func():\n \"\"\"Initialize the Dusty VM if it does not already exist.\"\"\"\n if not _dusty_vm_exists():\n log_to_client('Initializing new Dusty VM with Docker Machine')\n arg_0 = ['--driver', 'virtualbox',\n '--virtualbox-cpu-count', '-1',\n '--virtualbox-boot2docker-url', constants.CONFIG_BOOT2DOCKER_URL,\n '--virtualbox-memory', str(get_config_value(constants.CONFIG_VM_MEM_SIZE)),\n '--virtualbox-hostonly-nictype', constants.VM_NIC_TYPE]\n check_call_demoted(['docker-machine', 'create'] + arg_0 + [constants.VM_MACHINE_NAME],\n redirect_stderr=True)"} +{"_id": "doc_5607", "title": "", "text": "def Func():\n \"\"\"Start the Dusty VM if it is not already running.\"\"\"\n arg_0 = docker_vm_is_running()\n if not arg_0:\n log_to_client('Starting docker-machine VM {}'.format(constants.VM_MACHINE_NAME))\n _apply_nat_dns_host_resolver()\n _apply_nat_net_less_greedy_subnet()\n check_and_log_output_and_error_demoted(['docker-machine', 'start', constants.VM_MACHINE_NAME], quiet_on_success=True)\n return arg_0"} +{"_id": "doc_5608", "title": "", "text": "def Func():\n \"\"\"Using VBoxManage is 0.5 seconds or so faster than Machine.\"\"\"\n arg_0 = check_output_demoted(['VBoxManage', 'list', 'runningvms'])\n for arg_1 in arg_0.splitlines():\n if '\"{}\"'.format(constants.VM_MACHINE_NAME) in arg_1:\n return True\n return False"} +{"_id": "doc_5609", "title": "", "text": "def Func():\n \"\"\"Something in the VM chain, either VirtualBox or Machine, helpfully\n sets up localhost-to-VM forwarding on port 22. We can inspect this\n rule to determine the port on localhost which gets forwarded to\n 22 in the VM.\"\"\"\n for arg_0 in _get_vm_config():\n if arg_0.startswith('Forwarding'):\n arg_1 = arg_0.split('=')[1].strip('\"')\n arg_2, arg_3, arg_4, arg_5, arg_6, arg_7 = arg_1.split(',')\n if arg_2 == 'ssh' and arg_3 == 'tcp' and arg_7 == '22':\n return arg_5\n raise ValueError('Could not determine localhost port for SSH forwarding')"} +{"_id": "doc_5610", "title": "", "text": "def Func():\n \"\"\"Returns the MAC address assigned to the host-only adapter,\n using output from VBoxManage. Returned MAC address has no colons\n and is lower-cased.\"\"\"\n # Get the number of the host-only adapter\n arg_0 = _get_vm_config()\n for arg_1 in arg_0:\n if arg_1.startswith('hostonlyadapter'):\n adapter_number = int(arg_1[15:16])\n break\n else:\n raise ValueError('No host-only adapter is defined for the Dusty VM')\n\n for arg_1 in arg_0:\n if arg_1.startswith('macaddress{}'.format(adapter_number)):\n return arg_1.split('=')[1].strip('\"').lower()\n raise ValueError('Could not find MAC address for adapter number {}'.format(adapter_number))"} +{"_id": "doc_5611", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Given the rather-complex output from an 'ip addr show' command\n on the VM, parse the output to determine the IP address\n assigned to the interface with the given MAC.\"\"\"\n arg_2 = False\n for arg_3 in arg_0.splitlines():\n arg_3 = arg_3.strip()\n if arg_3.startswith('link/ether'):\n arg_4 = arg_3.split(' ')[1].replace(':', '')\n if arg_4 == arg_1:\n arg_2 = True\n elif arg_2 and arg_3.startswith('inet') and not arg_3.startswith('inet6'):\n arg_5 = arg_3.split(' ')[1].split('/')[0]\n return arg_5"} +{"_id": "doc_5612", "title": "", "text": "def Func():\n \"\"\"Determine the host-only IP of the Dusty VM through Virtualbox and SSH\n directly, bypassing Docker Machine. We do this because Docker Machine is\n much slower, taking about 600ms total. We are basically doing the same\n flow Docker Machine does in its own code.\"\"\"\n arg_0 = _get_host_only_mac_address()\n arg_1 = check_output_demoted(['ssh', '-o', 'StrictHostKeyChecking=no',\n '-o', 'UserKnownHostsFile=/dev/null',\n '-i', _vm_key_path(), '-p', _get_localhost_ssh_port(),\n 'docker@127.0.0.1', 'ip addr show'])\n return _ip_for_mac_from_ip_addr_show(arg_1, arg_0)"} +{"_id": "doc_5613", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Converts a python dict to a namedtuple, saving memory.\"\"\"\n arg_2 = arg_0.keys()\n arg_3 = arg_0.values()\n return json.loads(json.dumps(arg_0),\n object_hook=lambda d:\n namedtuple(arg_1, arg_2)(*arg_3))"} +{"_id": "doc_5614", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None, arg_3=None,\n arg_4='json', arg_5='daily'):\n \"\"\"By default, return latest EOD Composite Price for a stock ticker.\n On average, each feed contains 3 data sources.\n\n Supported tickers + Available Day Ranges are here:\n https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip\n\n Args:\n ticker (string): Unique identifier for stock ticker\n startDate (string): Start of ticker range in YYYY-MM-DD format\n endDate (string): End of ticker range in YYYY-MM-DD format\n fmt (string): 'csv' or 'json'\n frequency (string): Resample frequency\n \"\"\"\n arg_6 = arg_0._get_url(arg_1, arg_5)\n arg_7 = {\n 'format': arg_4 if arg_4 != \"object\" else 'json', # conversion local\n 'resampleFreq': arg_5\n }\n\n if arg_2:\n arg_7['startDate'] = arg_2\n if arg_3:\n arg_7['endDate'] = arg_3\n\n # TODO: evaluate whether to stream CSV to cache on disk, or\n # load as array in memory, or just pass plain text\n arg_8 = arg_0._request('GET', arg_6, arg_7=arg_7)\n if arg_4 == \"json\":\n return arg_8.json()\n elif arg_4 == \"object\":\n arg_9 = arg_8.json()\n return [dict_to_object(arg_10, \"TickerPrice\") for arg_10 in arg_9]\n else:\n return arg_8.content.decode(\"utf-8\")"} +{"_id": "doc_5615", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None, arg_3=None, arg_4=None, arg_5='daily'):\n\n \"\"\" Return a pandas.DataFrame of historical prices for one or more ticker symbols.\n\n By default, return latest EOD Composite Price for a list of stock tickers.\n On average, each feed contains 3 data sources.\n\n Supported tickers + Available Day Ranges are here:\n https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip\n or from the TiingoClient.list_tickers() method.\n\n Args:\n tickers (string/list): One or more unique identifiers for a stock ticker.\n startDate (string): Start of ticker range in YYYY-MM-DD format.\n endDate (string): End of ticker range in YYYY-MM-DD format.\n metric_name (string): Optional parameter specifying metric to be returned for each\n ticker. In the event of a single ticker, this is optional and if not specified\n all of the available data will be returned. In the event of a list of tickers,\n this parameter is required.\n frequency (string): Resample frequency (defaults to daily).\n \"\"\"\n\n arg_6 = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',\n 'adjClose', 'adjVolume', 'divCash', 'splitFactor']\n\n if arg_4 is not None and arg_4 not in arg_6:\n raise APIColumnNameError('Valid data items are: ' + str(arg_6))\n\n arg_7 = {\n 'format': 'json',\n 'resampleFreq': arg_5\n }\n if arg_2:\n arg_7['startDate'] = arg_2\n if arg_3:\n arg_7['endDate'] = arg_3\n\n if pandas_is_installed:\n if type(arg_1) is str:\n arg_8 = arg_1\n arg_9 = arg_0._get_url(arg_8, arg_5)\n arg_10 = arg_0._request('GET', arg_9, arg_7=arg_7)\n arg_11 = pd.DataFrame(arg_10.json())\n if arg_4 is not None:\n arg_12 = arg_11[arg_4]\n arg_12.index = arg_11['date']\n else:\n arg_12 = arg_11\n arg_12.index = arg_11['date']\n del (arg_12['date'])\n else:\n arg_12 = pd.DataFrame()\n for arg_8 in arg_1:\n arg_9 = arg_0._get_url(arg_8, arg_5)\n arg_10 = arg_0._request('GET', arg_9, arg_7=arg_7)\n arg_11 = pd.DataFrame(arg_10.json())\n arg_11.index = arg_11['date']\n arg_11.rename(arg_13=str, columns={arg_4: arg_8}, inplace=True)\n arg_12 = pd.concat([arg_12, arg_11[arg_8]], axis=1)\n arg_12.index = pd.to_datetime(arg_12.index)\n return arg_12\n else:\n arg_14 = (\"Pandas is not installed, but .get_ticker_price() was \"\n \"called with fmt=pandas. In order to install tiingo with \"\n \"pandas, reinstall with pandas as an optional dependency. \\n\"\n \"Install tiingo with pandas dependency: \\'pip install tiingo[pandas]\\'\\n\"\n \"Alternatively, just install pandas: pip install pandas.\")\n raise InstallPandasException(arg_14)"} +{"_id": "doc_5616", "title": "", "text": "def Func(arg_0):\n \"\"\"Make a local copy of the sqlite cookie database and return the new filename.\n This is necessary in case this database is still being written to while the user browses\n to avoid sqlite locking errors.\n \"\"\"\n # if type of cookie_file is a list, use the first element in the list\n if isinstance(arg_0, list):\n arg_0 = arg_0[0]\n \n # check if cookie file exists\n if os.path.exists(arg_0):\n # copy to random name in tmp folder\n arg_1 = tempfile.NamedTemporaryFile(suffix='.sqlite').name\n open(arg_1, 'wb').write(open(arg_0, 'rb').read())\n return arg_1\n else:\n raise BrowserCookieError('Can not find cookie file at: ' + arg_0)"} +{"_id": "doc_5617", "title": "", "text": "def Func(arg_0=\"\"):\n \"\"\"Try to Func cookies from all supported browsers and return combined cookiejar\n Optionally pass in a domain name to only Func cookies from the specified domain\n \"\"\"\n arg_1 = http.cookiejar.CookieJar()\n for arg_2 in [chrome, firefox]:\n try:\n for arg_3 in arg_2(arg_0=arg_0):\n arg_1.set_cookie(arg_3)\n except BrowserCookieError:\n pass\n return arg_1"} +{"_id": "doc_5618", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Decrypt encoded cookies\n \"\"\"\n\n if sys.platform == 'win32':\n return arg_0.Func_windows_chrome(arg_1, arg_2)\n\n if arg_1 or (arg_2[:3] != b'v10'):\n return arg_1\n\n # Encrypted cookies should be prefixed with 'v10' according to the\n # Chromium code. Strip it off.\n arg_2 = arg_2[3:]\n arg_3 = int(len(arg_2) / 2)\n\n arg_4 = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(arg_0.key, arg_0.iv))\n arg_5 = arg_4.feed(arg_2[:arg_3])\n arg_5 += arg_4.feed(arg_2[arg_3:])\n arg_5 += arg_4.feed()\n return arg_5.decode(\"utf-8\")"} +{"_id": "doc_5619", "title": "", "text": "async def Func(arg_0):\n \"\"\"Get the application bearer token from client_id and client_secret.\"\"\"\n if arg_0.client_id is None:\n raise SpotifyException(_GET_BEARER_ERR % 'client_id')\n\n elif arg_0.client_secret is None:\n raise SpotifyException(_GET_BEARER_ERR % 'client_secret')\n\n arg_1 = b64encode(':'.join((arg_0.client_id, arg_0.client_secret)).encode())\n\n arg_2 = {\n 'url': 'https://accounts.spotify.com/api/token',\n 'data': {'grant_type': 'client_credentials'},\n 'headers': {'Authorization': 'Basic ' + arg_1.decode()}\n }\n\n async with arg_0._session.post(**arg_2) as resp:\n return json.loads(await resp.text(encoding='utf-8'))"} +{"_id": "doc_5620", "title": "", "text": "async def Func(arg_0, arg_1, **arg_2):\n \"\"\"Make a Func to the spotify API with the current bearer credentials.\n\n Parameters\n ----------\n route : Union[tuple[str, str], Route]\n A tuple of the method and url or a :class:`Route` object.\n kwargs : Any\n keyword arguments to pass into :class:`aiohttp.ClientSession.Func`\n \"\"\"\n if isinstance(arg_1, tuple):\n arg_3, arg_4 = arg_1\n else:\n arg_3 = arg_1.method\n arg_4 = arg_1.url\n\n if arg_0.bearer_info is None:\n arg_0.bearer_info = arg_5 = await arg_0.get_bearer_info()\n arg_6 = arg_5['access_token']\n else:\n arg_6 = arg_0.bearer_info['access_token']\n\n arg_7 = {\n 'Authorization': 'Bearer ' + arg_6,\n 'Content-Type': arg_2.get('content_type', 'application/json'),\n **arg_2.pop('headers', {})\n }\n\n for arg_8 in range(arg_0.RETRY_AMOUNT):\n r = await arg_0._session.Func(arg_3, arg_4, arg_7=arg_7, **arg_2)\n try:\n status = r.status\n\n try:\n data = json.loads(await r.text(encoding='utf-8'))\n except json.decoder.JSONDecodeError:\n data = {}\n\n if 300 > status >= 200:\n return data\n\n if status == 401:\n arg_0.bearer_info = arg_5 = await arg_0.get_bearer_info()\n arg_7['Authorization'] = 'Bearer ' + arg_5['access_token']\n continue\n\n if status == 429:\n # we're being rate limited.\n amount = r.headers.get('Retry-After')\n await asyncio.sleep(int(amount), loop=arg_0.loop)\n continue\n\n if status in (502, 503):\n # unconditional retry\n continue\n\n if status == 403:\n raise Forbidden(r, data)\n elif status == 404:\n raise NotFound(r, data)\n finally:\n await r.release()\n else:\n raise HTTPException(r, data)"} +{"_id": "doc_5621", "title": "", "text": "def Func(arg_0, arg_1, arg_2=20, arg_3=0, arg_4='US'):\n \"\"\"Get an albums tracks by an ID.\n\n Parameters\n ----------\n spotify_id : str\n The spotify_id to search by.\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optiona[int]\n The offset of which Spotify should start yielding from.\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code.\n \"\"\"\n arg_5 = Route('GET', '/albums/{spotify_id}/tracks', arg_1=arg_1)\n arg_6 = {'limit': arg_2, 'offset': arg_3}\n\n if arg_4:\n arg_6['market'] = arg_4\n\n return arg_0.request(arg_5, params=arg_6)"} +{"_id": "doc_5622", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a spotify Func by their ID.\n\n Parameters\n ----------\n spotify_id : str\n The spotify_id to search by.\n \"\"\"\n arg_2 = Route('GET', '/Funcs/{spotify_id}', arg_1=arg_1)\n return arg_0.request(arg_2)"} +{"_id": "doc_5623", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=20, arg_4=0, arg_5='US'):\n \"\"\"Get an artists tracks by their ID.\n\n Parameters\n ----------\n spotify_id : str\n The spotify_id to search by.\n include_groups : INCLUDE_GROUPS_TP\n INCLUDE_GROUPS\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optiona[int]\n The offset of which Spotify should start yielding from.\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code.\n \"\"\"\n arg_6 = Route('GET', '/artists/{spotify_id}/albums', arg_1=arg_1)\n arg_7 = {'limit': arg_3, 'offset': arg_4}\n\n if arg_2:\n arg_7['include_groups'] = arg_2\n\n if arg_5:\n arg_7['market'] = arg_5\n\n return arg_0.request(arg_6, params=arg_7)"} +{"_id": "doc_5624", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get an artists top tracks per country with their ID.\n\n Parameters\n ----------\n spotify_id : str\n The spotify_id to search by.\n country : COUNTRY_TP\n COUNTRY\n \"\"\"\n arg_3 = Route('GET', '/artists/{spotify_id}/top-tracks', arg_1=arg_1)\n arg_4 = {'country': arg_2}\n return arg_0.request(arg_3, params=arg_4)"} +{"_id": "doc_5625", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get related artists for an artist by their ID.\n\n Parameters\n ----------\n spotify_id : str\n The spotify_id to search by.\n \"\"\"\n arg_2 = Route('GET', '/artists/{spotify_id}/related-artists', arg_1=arg_1)\n return arg_0.request(arg_2)"} +{"_id": "doc_5626", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Get a single Func used to tag items in Spotify.\n\n Parameters\n ----------\n Func_id : str\n The Spotify Func ID for the Func.\n country : COUNTRY_TP\n COUNTRY\n locale : LOCALE_TP\n LOCALE\n \"\"\"\n arg_4 = Route('GET', '/browse/categories/{Func_id}', arg_1=arg_1)\n arg_5 = {}\n\n if arg_2:\n arg_5['country'] = arg_2\n\n if arg_3:\n arg_5['locale'] = arg_3\n\n return arg_0.request(arg_4, params=arg_5)"} +{"_id": "doc_5627", "title": "", "text": "def Func(arg_0, arg_1, arg_2=20, arg_3=0, arg_4=None):\n \"\"\"Get a list of Spotify playlists tagged with a particular category.\n\n Parameters\n ----------\n category_id : str\n The Spotify category ID for the category.\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optional[int]\n The index of the first item to return. Default: 0\n country : COUNTRY_TP\n COUNTRY\n \"\"\"\n arg_5 = Route('GET', '/browse/categories/{category_id}/playlists', arg_1=arg_1)\n arg_6 = {'limit': arg_2, 'offset': arg_3}\n\n if arg_4:\n arg_6['country'] = arg_4\n\n return arg_0.request(arg_5, params=arg_6)"} +{"_id": "doc_5628", "title": "", "text": "def Func(arg_0, arg_1=20, arg_2=0, arg_3=None, arg_4=None):\n \"\"\"Get a list of Func used to tag items in Spotify.\n\n Parameters\n ----------\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optional[int]\n The index of the first item to return. Default: 0\n country : COUNTRY_TP\n COUNTRY\n locale : LOCALE_TP\n LOCALE\n \"\"\"\n arg_5 = Route('GET', '/browse/Func')\n arg_6 = {'limit': arg_1, 'offset': arg_2}\n\n if arg_3:\n arg_6['country'] = arg_3\n\n if arg_4:\n arg_6['locale'] = arg_4\n\n return arg_0.request(arg_5, params=arg_6)"} +{"_id": "doc_5629", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=20, arg_5=0):\n \"\"\"Get a list of Spotify featured playlists.\n\n Parameters\n ----------\n locale : LOCALE_TP\n LOCALE\n country : COUNTRY_TP\n COUNTRY\n timestamp : TIMESTAMP_TP\n TIMESTAMP\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optional[int]\n The index of the first item to return. Default: 0\n \"\"\"\n arg_6 = Route('GET', '/browse/featured-playlists')\n arg_7 = {'limit': arg_4, 'offset': arg_5}\n\n if arg_2:\n arg_7['country'] = arg_2\n\n if arg_1:\n arg_7['locale'] = arg_1\n\n if arg_3:\n arg_7['timestamp'] = arg_3\n\n return arg_0.request(arg_6, params=arg_7)"} +{"_id": "doc_5630", "title": "", "text": "def Func(arg_0, *, arg_1=None, arg_2=20, arg_3=0):\n \"\"\"Get a list of new album releases featured in Spotify.\n\n Parameters\n ----------\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optional[int]\n The index of the first item to return. Default: 0\n country : COUNTRY_TP\n COUNTRY\n \"\"\"\n arg_4 = Route('GET', '/browse/new-releases')\n arg_5 = {'limit': arg_2, 'offset': arg_3}\n\n if arg_1:\n arg_5['country'] = arg_1\n\n return arg_0.request(arg_4, params=arg_5)"} +{"_id": "doc_5631", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, *, arg_4=20, arg_5=None, **arg_6):\n \"\"\"Get Recommendations Based on Seeds.\n\n Parameters\n ----------\n seed_artists : str\n A comma separated list of Spotify IDs for seed artists. Up to 5 seed values may be provided.\n seed_genres : str\n A comma separated list of any genres in the set of available genre seeds. Up to 5 seed values may be provided.\n seed_tracks : str\n A comma separated list of Spotify IDs for a seed track. Up to 5 seed values may be provided.\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code.\n max_* : Optional[Keyword arguments]\n For each tunable track attribute, a hard ceiling on the selected track attribute\u2019s value can be provided.\n min_* : Optional[Keyword arguments]\n For each tunable track attribute, a hard floor on the selected track attribute\u2019s value can be provided.\n target_* : Optional[Keyword arguments]\n For each of the tunable track attributes (below) a target value may be provided.\n \"\"\"\n arg_7 = Route('GET', '/Func')\n arg_8 = {'seed_artists': arg_1, 'seed_genres': arg_2, 'seed_tracks': arg_3, 'limit': arg_4}\n\n if arg_5:\n arg_8['market'] = arg_5\n\n if arg_6:\n arg_8.update(arg_6)\n\n return arg_0.request(arg_7, param=arg_8)"} +{"_id": "doc_5632", "title": "", "text": "def Func(arg_0, arg_1, *, arg_2='artist'):\n \"\"\"Check to see if the current user is following one or more artists or other Spotify users.\n\n Parameters\n ----------\n ids : List[str]\n A comma-separated list of the artist or the user Spotify IDs to check.\n A maximum of 50 IDs can be sent in one request.\n type : Optional[str]\n The ID type: either \"artist\" or \"user\".\n Default: \"artist\"\n \"\"\"\n arg_3 = Route('GET', '/me/following/contains')\n arg_4 = {'ids': arg_1, 'type': arg_2}\n\n return arg_0.request(arg_3, params=arg_4)"} +{"_id": "doc_5633", "title": "", "text": "async def Func(arg_0, *, arg_1: arg_2[arg_3] = 20, arg_4: arg_2[arg_3] = 0, arg_5=None, arg_6: arg_2[arg_7] = None) -> List[Album]:\n \"\"\"Get the albums of a Spotify artist.\n\n Parameters\n ----------\n limit : Optional[int]\n The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.\n offset : Optiona[int]\n The offset of which Spotify should start yielding from.\n include_groups : INCLUDE_GROUPS_TP\n INCLUDE_GROUPS\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code.\n\n Returns\n -------\n albums : List[Album]\n The albums of the artist.\n \"\"\"\n from .album import Album\n\n arg_8 = await arg_0.__client.http.artist_albums(arg_0.id, arg_1=arg_1, arg_4=arg_4, arg_5=arg_5, arg_6=arg_6)\n return list(Album(arg_0.__client, arg_9) for arg_9 in arg_8['items'])"} +{"_id": "doc_5634", "title": "", "text": "async def Func(arg_0, *, arg_1: arg_2 = None) -> int:\n \"\"\"get the total amout of tracks in the album.\n\n Parameters\n ----------\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code.\n\n Returns\n -------\n total : int\n The total amount of albums.\n \"\"\"\n arg_3 = await arg_0.__client.http.artist_albums(arg_0.id, limit=1, offset=0, arg_1=arg_1)\n return arg_3['total']"} +{"_id": "doc_5635", "title": "", "text": "async def Func(arg_0) -> Tuple[Context, Track]:\n \"\"\"Get the users currently playing track.\n\n Returns\n -------\n context, track : Tuple[Context, Track]\n A tuple of the context and track.\n \"\"\"\n arg_1 = await arg_0.http.Func()\n\n if arg_1.get('item'):\n arg_1['Context'] = Context(arg_1.get('context'))\n arg_1['item'] = Track(arg_0.__client, arg_1.get('item'))\n\n return arg_1"} +{"_id": "doc_5636", "title": "", "text": "async def Func(arg_0) -> List[Device]:\n \"\"\"Get information about the users avaliable devices.\n\n Returns\n -------\n devices : List[Device]\n The devices the user has available.\n \"\"\"\n arg_1 = await arg_0.http.available_devices()\n return [Device(arg_2) for arg_2 in arg_1['devices']]"} +{"_id": "doc_5637", "title": "", "text": "async def Func(arg_0) -> List[Dict[str, Union[Track, Context, str]]]:\n \"\"\"Get tracks from the current users recently played tracks.\n\n Returns\n -------\n playlist_history : List[Dict[str, Union[Track, Context, str]]]\n A list of playlist history object.\n Each object is a dict with a timestamp, track and context field.\n \"\"\"\n arg_1 = await arg_0.http.Func()\n arg_2 = lambda arg_1: {'context': Context(arg_1.get('context')), 'track': Track(arg_0.__client, arg_1.get('track'))}\n # List[T] where T: {'track': Track, 'content': Context: 'timestamp': ISO8601}\n return [{'timestamp': arg_3['timestamp'], **arg_2(arg_3)} for arg_3 in arg_1['items']]"} +{"_id": "doc_5638", "title": "", "text": "async def Func(arg_0, arg_1, *, arg_2=True, arg_3=False, arg_4=None):\n \"\"\"Create a playlist for a Spotify user.\n\n Parameters\n ----------\n name : str\n The name of the playlist.\n public : Optional[bool]\n The public/private status of the playlist.\n `True` for public, `False` for private.\n collaborative : Optional[bool]\n If `True`, the playlist will become collaborative and other users will be able to modify the playlist.\n description : Optional[str]\n The playlist description\n\n Returns\n -------\n playlist : Playlist\n The playlist that was created.\n \"\"\"\n arg_5 = {\n 'name': arg_1,\n 'public': arg_2,\n 'collaborative': arg_3\n }\n\n if arg_4:\n arg_5['description'] = arg_4\n\n arg_6 = await arg_0.http.Func(arg_0.id, arg_5)\n return Playlist(arg_0.__client, arg_6)"} +{"_id": "doc_5639", "title": "", "text": "async def Func(arg_0, *, arg_1: arg_2[arg_3] = 20, arg_4: arg_2[arg_3] = 0) -> List[Track]:\n \"\"\"get the albums tracks from spotify.\n\n Parameters\n ----------\n limit : Optional[int]\n The limit on how many tracks to retrieve for this album (default is 20).\n offset : Optional[int]\n The offset from where the api should start from in the tracks.\n \n Returns\n -------\n tracks : List[Track]\n The tracks of the artist.\n \"\"\"\n arg_5 = await arg_0.__client.http.album_tracks(arg_0.id, arg_1=arg_1, arg_4=arg_4)\n return list(Track(arg_0.__client, arg_6) for arg_6 in arg_5['items'])"} +{"_id": "doc_5640", "title": "", "text": "async def Func(arg_0, *, arg_1: arg_2[arg_3] = 'US') -> List[Track]:\n \"\"\"loads all of the albums tracks, depending on how many the album has this may be a long operation.\n\n Parameters\n ----------\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking.\n \n Returns\n -------\n tracks : List[Track]\n The tracks of the artist.\n \"\"\"\n arg_4 = []\n arg_5 = 0\n arg_6 = arg_0.total_tracks or None\n\n while True:\n arg_7 = await arg_0.__client.http.album_tracks(arg_0.id, limit=50, arg_5=arg_5, arg_1=arg_1)\n\n if arg_6 is None:\n arg_6 = arg_7['total']\n\n arg_5 += 50\n arg_4 += list(Track(arg_0.__client, arg_8) for arg_8 in arg_7['items'])\n\n if len(arg_4) >= arg_6:\n break\n\n return arg_4"} +{"_id": "doc_5641", "title": "", "text": "async def Func(arg_0, arg_1: arg_2, *, arg_3: arg_2 = 'US') -> Album:\n \"\"\"Retrive an album with a spotify ID.\n\n Parameters\n ----------\n spotify_id : str\n The ID to search for.\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code\n\n Returns\n -------\n album : Album\n The album from the ID\n \"\"\"\n arg_4 = await arg_0.http.album(to_id(arg_1), arg_3=arg_3)\n return Album(arg_0, arg_4)"} +{"_id": "doc_5642", "title": "", "text": "async def Func(arg_0, arg_1: arg_2) -> Track:\n \"\"\"Retrive an track with a spotify ID.\n\n Parameters\n ----------\n spotify_id : str\n The ID to search for.\n\n Returns\n -------\n track : Track\n The track from the ID\n \"\"\"\n arg_3 = await arg_0.http.track(to_id(arg_1))\n return Track(arg_0, arg_3)"} +{"_id": "doc_5643", "title": "", "text": "async def Func(arg_0, *arg_1: arg_2[arg_3], arg_4: arg_3 = 'US') -> arg_2[Album]:\n \"\"\"Retrive multiple albums with a list of spotify IDs.\n\n Parameters\n ----------\n ids : List[str]\n the ID to look for\n market : Optional[str]\n An ISO 3166-1 alpha-2 country code\n\n Returns\n -------\n albums : List[Album]\n The albums from the IDs\n \"\"\"\n arg_5 = await arg_0.http.albums(','.join(to_id(_id) for _id in arg_1), arg_4=arg_4)\n return list(Album(arg_0, arg_6) for arg_6 in arg_5['albums'])"} +{"_id": "doc_5644", "title": "", "text": "async def Func(arg_0, *arg_1: arg_2[arg_3]) -> arg_2[Artist]:\n \"\"\"Retrive multiple artists with a list of spotify IDs.\n\n Parameters\n ----------\n ids : List[str]\n the IDs to look for\n\n Returns\n -------\n artists : List[Artist]\n The artists from the IDs\n \"\"\"\n arg_4 = await arg_0.http.artists(','.join(to_id(_id) for _id in arg_1))\n return list(Artist(arg_0, arg_5) for arg_5 in arg_4['artists'])"} +{"_id": "doc_5645", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, arg_3: arg_4 = arg_5) -> arg_7:\n \"\"\"decorator to assert an object has an attribute when run.\"\"\"\n def decorator(arg_6: arg_7) -> arg_7:\n @functools.wraps(arg_6)\n def decorated(arg_8, *arg_9, **arg_10):\n if not hasattr(arg_8, arg_0):\n raise arg_3(arg_2)\n return arg_6(arg_8, *arg_9, **arg_10)\n\n if inspect.iscoroutinefunction(arg_6):\n @functools.wraps(arg_6)\n async def decorated(*arg_9, **arg_10):\n return await decorated(*arg_9, **arg_10)\n\n return decorated\n return decorator"} +{"_id": "doc_5646", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Construct a OAuth2 object from a `spotify.Client`.\"\"\"\n return arg_0(arg_1.http.client_id, *arg_2, **arg_3)"} +{"_id": "doc_5647", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1, *, arg_3: arg_1 = None, arg_4: arg_1 = None, arg_5: arg_6 = True) -> arg_1:\n \"\"\"Construct a OAuth2 URL instead of an OAuth2 object.\"\"\"\n arg_7 = {\n 'client_id': arg_0,\n 'redirect_uri': quote(arg_2)\n }\n\n if arg_3 is not None:\n arg_7['scope'] = quote(arg_3)\n\n if arg_4 is not None:\n arg_7['state'] = arg_4\n\n arg_8 = '&'.join('{0}={1}'.format(*item) for item in arg_7.items())\n\n return OAuth2._BASE.format(arg_8=arg_8)"} +{"_id": "doc_5648", "title": "", "text": "def Func(arg_0):\n \"\"\"Attributes used when constructing url parameters.\"\"\"\n arg_1 = {\n 'client_id': arg_0.client_id,\n 'redirect_uri': quote(arg_0.redirect_uri),\n }\n\n if arg_0.scope is not None:\n arg_1['scope'] = quote(arg_0.scope)\n\n if arg_0.state is not None:\n arg_1['state'] = arg_0.state\n\n return arg_1"} +{"_id": "doc_5649", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\"URL Func used.\"\"\"\n return '&'.join('{0}={1}'.format(*arg_1) for arg_1 in arg_0.attrs.items())"} +{"_id": "doc_5650", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Execute the logic behind the meaning of ExpirationDate + return the matched status.\n\n :return:\n The status of the tested domain.\n Can be one of the official status.\n :rtype: str\n \"\"\"\n\n # We Func the status of the domain validation.\n arg_1 = arg_0.checker.is_domain_valid()\n # We Func the status of the IPv4 validation.\n arg_2 = arg_0.checker.is_ip_valid()\n\n if \"current_test_data\" in arg_3.INTERN:\n # The end-user want more information whith his test.\n\n # We update some index.\n arg_3.INTERN[\"current_test_data\"].update(\n {\n \"domain_syntax_validation\": arg_1,\n \"ip4_syntax_validation\": arg_2,\n }\n )\n\n if (\n arg_1\n and not arg_2\n or arg_1\n or arg_3.CONFIGURATION[\"local\"]\n ):\n # * The element is a valid domain.\n # and\n # * The element is not ahe valid IPv4.\n # or\n # * The element is a valid domain.\n\n # * We Func the HTTP status code of the currently tested element.\n # and\n # * We try to Func the element status from the IANA database.\n arg_3.INTERN.update(\n {\"http_code\": HTTPCode().Func(), \"referer\": Referer().Func()}\n )\n\n if not arg_3.INTERN[\"referer\"]:\n # We could not Func the referer.\n\n # We parse the referer status into the upstream call.\n return arg_3.INTERN[\"referer\"]\n\n # The WHOIS record status is not into our list of official status.\n\n if arg_3.INTERN[\"referer\"] and not arg_0.checker.is_subdomain():\n # * The iana database comparison status is not None.\n # and\n # * The domain we are testing is not a subdomain.\n\n # We try to extract the expiration date from the WHOIS record.\n # And we return the matched status.\n return arg_0._extract()\n\n # The iana database comparison status is None.\n\n # We log our whois record if the debug mode is activated.\n Logs().whois(arg_0.whois_record)\n\n # And we return None, we could not extract the expiration date.\n return None\n\n if (\n arg_2\n and not arg_1\n or arg_2\n or arg_3.CONFIGURATION[\"local\"]\n ):\n # * The element is a valid IPv4.\n # and\n # * The element is not a valid domain.\n # or\n # * The element is a valid IPv4.\n\n # We Func the HTTP status code.\n arg_3.INTERN[\"http_code\"] = HTTPCode().Func()\n\n # We log our whois record if the debug mode is activated.\n Logs().whois(arg_0.whois_record)\n\n # And we return None, there is no expiration date to look for.\n return None\n\n # The validation was not passed.\n\n # We log our whois record if the debug mode is activated.\n Logs().whois(arg_0.whois_record)\n\n # And we return False, the domain could not pass the IP and domains syntax validation.\n return False"} +{"_id": "doc_5651", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Read the code and update all links.\n \"\"\"\n\n arg_1 = [\".gitignore\", \".keep\"]\n\n for arg_2, arg_3, arg_4 in PyFunceble.walk(\n PyFunceble.CURRENT_DIRECTORY\n + PyFunceble.directory_separator\n + \"PyFunceble\"\n + PyFunceble.directory_separator\n ):\n # We loop through every directories and files in the `PyFunceble` directory.\n\n for arg_5 in arg_4:\n # We loop through the list of files of the currently read directory.\n\n if arg_5 not in arg_1 and \"__pycache__\" not in arg_2:\n # * The filename is not into the list of file to ignore.\n # and\n # * The directory we are reading is not `__pycache__`.\n\n if arg_2.endswith(PyFunceble.directory_separator):\n # The root directory ends with the directory separator.\n\n # We fix the path in the currently read file.\n arg_0._update_docs(arg_2 + arg_5)\n else:\n # The root directory does not ends with the directory separator.\n\n # We fix the path in the currently read file.\n # (after appending the directory separator between the root and file)\n arg_0._update_docs(arg_2 + PyFunceble.directory_separator + arg_5)\n\n for arg_2, arg_3, arg_4 in PyFunceble.walk(\n PyFunceble.CURRENT_DIRECTORY\n + PyFunceble.directory_separator\n + \"tests\"\n + PyFunceble.directory_separator\n ):\n # We loop through every directories and files in the `tests` directory.\n for arg_5 in arg_4:\n # We loop through the list of files of the currently read directory.\n\n if arg_5 not in arg_1 and \"__pycache__\" not in arg_2:\n # * The filename is not into the list of file to ignore.\n # and\n # * The directory we are reading is not `__pycache__`.\n\n if arg_2.endswith(PyFunceble.directory_separator):\n # The root directory ends with the directory separator.\n\n # We fix the path in the currently read file.\n arg_0._update_docs(arg_2 + arg_5)\n else:\n # The root directory does not ends with the directory separator.\n\n # We fix the path in the currently read file.\n # (after appending the directory separator between the root and file)\n arg_0._update_docs(arg_2 + PyFunceble.directory_separator + arg_5)"} +{"_id": "doc_5652", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the current version is greater as the older older one.\n \"\"\"\n\n # we compare the 2 versions.\n arg_1 = Version(True).check_versions(\n arg_0.current_version[0], arg_0.version_yaml\n )\n\n if arg_1 is not None and not arg_1:\n # The current version is greater as the older one.\n\n # We return True.\n return True\n\n # We return False\n return False"} +{"_id": "doc_5653", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the current branch is `dev`.\n \"\"\"\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n arg_1 = \"git branch\"\n\n # We execute and get the command output.\n arg_2 = Command(arg_1).execute()\n\n for arg_3 in arg_2.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if arg_3.startswith(\"*\") and \"dev\" in arg_3:\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False"} +{"_id": "doc_5654", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if we have to put the previous version into the deprecated list.\n \"\"\"\n\n for arg_1, arg_2 in enumerate(arg_0.current_version[0][:2]):\n # We loop through the 2 last elements of the version.\n\n if arg_2 > arg_0.version_yaml[arg_1]:\n # The currently read version number is greater than the one we have in\n # the version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to deprecate anything.\n return False"} +{"_id": "doc_5655", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Backup the current execution state.\n \"\"\"\n\n if arg_3.CONFIGURATION[\"auto_continue\"]:\n # The auto_continue subsystem is activated.\n\n # We initiate the location where we are going to save the data to Func.\n arg_1 = {}\n # We get the current counter states.\n arg_2 = arg_3.INTERN[\"counter\"][\"number\"]\n\n # We initiate the data we have to Func.\n arg_1[arg_3.INTERN[\"file_to_test\"]] = {\n # We Func the number of tested.\n \"tested\": arg_2[\"tested\"],\n # We Func the number of up.\n \"up\": arg_2[\"up\"],\n # We Func the number of down.\n \"down\": arg_2[\"down\"],\n # We Func the number of invalid.\n \"invalid\": arg_2[\"invalid\"],\n }\n\n # We initiate the final data we have to save.\n # We initiate this variable instead of updating Func_content because\n # we do not want to touch the Func_content.\n arg_5 = {}\n\n # We add the Func_content into to_save.\n arg_5.update(arg_0.Func_content)\n # And we overwrite with the newly data to Func.\n arg_5.update(arg_1)\n\n # Finaly, we save our informations into the log file.\n Dict(arg_5).to_json(arg_0.autocontinue_log_file)"} +{"_id": "doc_5656", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Restore data from the given path.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"auto_continue\"] and arg_0.backup_content:\n # The auto_continue subsystem is activated and the backup_content\n # is not empty.\n\n # We get the file we have to Func.\n arg_1 = PyFunceble.INTERN[\"file_to_test\"]\n\n if arg_1 in arg_0.backup_content:\n # The file we are working with is already into the backup content.\n\n # We initiate the different status to set.\n arg_2 = [\"up\", \"down\", \"invalid\", \"tested\"]\n\n # Because at some time it was not the current status, we have to map\n # the new with the old. This way, if someone is running the latest\n # version but with old data, we still continue like nothing happend.\n arg_3 = {\n \"up\": \"number_of_up\",\n \"down\": \"number_of_down\",\n \"invalid\": \"number_of_invalid\",\n \"tested\": \"number_of_tested\",\n }\n\n for arg_4 in arg_2:\n # We loop over the status we have to initiate.\n\n try:\n # We try to update the counters by using the currently read status.\n PyFunceble.INTERN[\"counter\"][\"number\"].update(\n {arg_4: arg_0.backup_content[arg_1][arg_4]}\n )\n except KeyError:\n # But if the status is not present, we try with the older index\n # we mapped previously.\n PyFunceble.INTERN[\"counter\"][\"number\"].update(\n {\n arg_4: arg_0.backup_content[arg_1][\n arg_3[arg_4]\n ]\n }\n )"} +{"_id": "doc_5657", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if we have to ignore the given line.\n\n :param line: The line from the file.\n :type line: str\n \"\"\"\n\n # We set the list of regex to match to be\n # considered as ignored.\n arg_2 = [r\"(^!|^@@|^\\/|^\\[|^\\.|^-|^_|^\\?|^&)\"] # , r\"(\\$|,)(image)\"]\n\n for arg_3 in arg_2:\n # We loop through the list of regex.\n\n if Regex(arg_1, arg_3, return_data=False).match():\n # The currently read line match the currently read\n # regex.\n\n # We return true, it has to be ignored.\n return True\n\n # Wer return False, it does not has to be ignored.\n return False"} +{"_id": "doc_5658", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Handle the data from the options.\n\n :param options: The list of options from the rule.\n :type options: list\n\n :return: The list of domains to return globally.\n :rtype: list\n \"\"\"\n\n # We initiate a variable which will save our result\n arg_2 = []\n\n # We initiate the regex which will be used to extract the domain listed\n # under the option domain=\n arg_3 = r\"domain=(.*)\"\n\n for arg_4 in arg_1:\n # We loop through the list of option.\n try:\n # We try to extract the list of domains from the currently read\n # option.\n arg_5 = Regex(\n arg_4, arg_3, return_data=True, rematch=True, group=0\n ).match()[-1]\n\n if arg_5:\n # We could extract something.\n\n if arg_0.aggressive: # pragma: no cover\n arg_2.extend(\n [\n arg_6\n for arg_6 in arg_5.split(\"|\")\n if arg_6 and not arg_6.startswith(\"~\")\n ]\n )\n else:\n # We return True.\n return True\n except TypeError:\n pass\n\n # We return the result.\n return arg_2"} +{"_id": "doc_5659", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None): # pragma: no cover\n \"\"\"\n Format the exctracted adblock line before passing it to the system.\n\n :param to_format: The extracted line from the file.\n :type to_format: str\n\n :param result: A list of the result of this method.\n :type result: list\n\n :return: The list of domains or IP to test.\n :rtype: list\n \"\"\"\n\n if not arg_2:\n # The result is not given.\n\n # We set the result as an empty list.\n arg_2 = []\n\n for arg_3 in List(arg_1).format():\n # We loop through the different lines to format.\n\n if arg_3:\n # The currently read line is not empty.\n\n if \"^\" in arg_3:\n # There is an accent in the currently read line.\n\n # We recall this method but with the current result state\n # and splited data.\n return arg_0.Func(arg_3.split(\"^\"), arg_2)\n\n if \"#\" in arg_3:\n # There is a dash in the currently read line.\n\n # We recall this method but with the current result state\n # and splited data.\n return arg_0.Func(arg_3.split(\"#\"), arg_2)\n\n if \",\" in arg_3:\n # There is a comma in the currently read line.\n\n # We recall this method but with the current result state\n # and splited data.\n return arg_0.Func(arg_3.split(\",\"), arg_2)\n\n if \"!\" in arg_3:\n # There is an exclamation mark in the currently read line.\n\n # We recall this method but with the current result state\n # and splited data.\n return arg_0.Func(arg_3.split(\"!\"), arg_2)\n\n if \"|\" in arg_3:\n # There is a vertival bar in the currently read line.\n\n # We recall this method but with the current result state\n # and splited data.\n return arg_0.Func(arg_3.split(\"|\"), arg_2)\n\n if arg_3:\n # The currently read line is not empty.\n\n arg_3 = arg_0._extract_base(arg_3)\n\n if arg_3 and (\n arg_0.checker.is_domain_valid(arg_3)\n or arg_0.checker.is_ip_valid(arg_3)\n ):\n # The extraced base is not empty.\n # and\n # * The currently read line is a valid domain.\n # or\n # * The currently read line is a valid IP.\n\n # We append the currently read line to the result.\n arg_2.append(arg_3)\n elif arg_3:\n # * The currently read line is not a valid domain.\n # or\n # * The currently read line is not a valid IP.\n\n # We try to get the url base.\n arg_4 = arg_0.checker.is_url_valid(arg_3, return_base=True)\n\n if arg_4:\n # The url_base is not empty or equal to False or None.\n\n # We append the url base to the result.\n arg_2.append(arg_4)\n\n # We return the result element.\n return arg_2"} +{"_id": "doc_5660", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the HTTP code status.\n\n :return: The matched and formatted status code.\n :rtype: str|int|None\n \"\"\"\n if PyFunceble.HTTP_CODE[\"active\"]:\n # The http status code extraction is activated.\n\n # We Func the http status code.\n arg_1 = arg_0._access()\n\n # We initiate a variable which will save the list of allowed\n # http status code.\n arg_2 = []\n\n for arg_3 in [\n PyFunceble.HTTP_CODE[\"list\"][\"up\"],\n PyFunceble.HTTP_CODE[\"list\"][\"potentially_down\"],\n PyFunceble.HTTP_CODE[\"list\"][\"potentially_up\"],\n ]:\n # We loop throught the list of http status code.\n\n # We extend the list of valid with the currently read\n # codes.\n arg_2.extend(arg_3)\n\n if arg_1 not in arg_2 or arg_1 is None:\n # * The extracted http code is not in the list of valid http code.\n # or\n # * The extracted http code is equal to `None`.\n\n # We return 3 star in order to mention that we were not eable to extract\n # the http status code.\n return \"*\" * 3\n\n # * The extracted http code is in the list of valid http code.\n # or\n # * The extracted http code is not equal to `None`.\n\n # We return the extracted http status code.\n return arg_1\n\n # The http status code extraction is activated.\n\n # We return None.\n return None"} +{"_id": "doc_5661", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Check if the given domain is a subdomain.\n\n :param domain: The domain we are checking.\n :type domain: str\n\n :return: The subdomain state.\n :rtype: bool\n\n .. warning::\n If an empty or a non-string :code:`domain` is given, we return :code:`None`.\n \"\"\"\n\n if arg_0 and isinstance(arg_0, str):\n # * The given domain is not empty nor None.\n # and\n # * The given domain is a string.\n\n # We silently load the configuration.\n load_config(True)\n\n return Check(arg_0).Func()\n\n # We return None, there is nothing to check.\n return None"} +{"_id": "doc_5662", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Check the syntax of the given IPv4.\n\n :param ip: The IPv4 to check the syntax for.\n :type ip: str\n\n :return: The syntax validity.\n :rtype: bool\n\n .. warning::\n If an empty or a non-string :code:`ip` is given, we return :code:`None`.\n \"\"\"\n\n if arg_0 and isinstance(arg_0, str):\n # The given IP is not empty nor None.\n # and\n # * The given IP is a string.\n\n # We silently load the configuration.\n load_config(True)\n\n return Check(arg_0).is_ip_valid()\n\n # We return None, there is nothing to check.\n return None"} +{"_id": "doc_5663", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if the given information is a URL.\n If it is the case, it download and update the location of file to test.\n\n :param passed: The url passed to the system.\n :type passed: str\n\n :return: The state of the check.\n :rtype: bool\n \"\"\"\n\n if arg_1 and arg_0.checker.is_url_valid(arg_1):\n # The passed string is an URL.\n\n # We get the file name based on the URL.\n # We actually just get the string after the last `/` in the URL.\n arg_2 = arg_1.split(\"/\")[-1]\n\n if (\n not arg_3.path.isfile(arg_2)\n or arg_3.INTERN[\"counter\"][\"number\"][\"tested\"] == 0\n ):\n # The filename does not exist in the current directory\n # or the currently number of tested is equal to 0.\n\n # We download the content of the link.\n Download(arg_1, arg_2).text()\n\n # The files does exist or the currently number of tested is greater than\n # 0.\n\n # We initiate the file we have to test.\n arg_3.INTERN[\"file_to_test\"] = arg_2\n\n # We return true to say that everything goes right.\n return True\n\n # The passed string is not an URL.\n\n # We do not need to do anything else.\n return False"} +{"_id": "doc_5664", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Manage the loading of the url system.\n \"\"\"\n\n if (\n arg_0.url_file # pylint: disable=no-member\n and not arg_0.Func_download(\n arg_0.url_file # pylint: disable=no-member\n )\n ): # pylint: disable=no-member\n # The current url_file is not a URL.\n\n # We initiate the filename as the file we have to test.\n arg_1.INTERN[\n \"file_to_test\"\n ] = arg_0.url_file"} +{"_id": "doc_5665", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decide if we print or not the header.\n \"\"\"\n\n if (\n not arg_1.CONFIGURATION[\"quiet\"]\n and not arg_1.CONFIGURATION[\"header_printed\"]\n ):\n # * The quiet mode is not activated.\n # and\n # * The header has not been already printed.\n\n # We print a new line.\n print(\"\\n\")\n\n if arg_1.CONFIGURATION[\"less\"]:\n # We have to show less informations on screen.\n\n # We print the `Less` header.\n Prints(None, \"Less\").header()\n else:\n # We have to show every informations on screen.\n\n # We print the `Generic` header.\n Prints(None, \"Generic\").header()\n\n # The header was printed.\n\n # We initiate the variable which say that the header has been printed to True.\n arg_1.CONFIGURATION[\"header_printed\"] = True"} +{"_id": "doc_5666", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Manage the database, autosave and autocontinue systems for the case that we are reading\n a file.\n\n :param current: The currently tested element.\n :type current: str\n\n :param last: The last element of the list.\n :type last: str\n\n :param status: The status of the currently tested element.\n :type status: str\n \"\"\"\n\n if (\n arg_3\n and not arg_5.CONFIGURATION[\"simple\"]\n and arg_5.INTERN[\"file_to_test\"]\n ):\n # * The status is given.\n # and\n # * The simple mode is deactivated.\n # and\n # * A file to test is set.\n\n # We run the mining logic.\n arg_0.mining.process()\n\n # We delete the currently tested element from the mining\n # database.\n # Indeed, as it is tested, it is already in our\n # testing process which means that we don't need it into\n # the mining database.\n arg_0.mining.remove()\n\n if (\n arg_3.lower() in arg_5.STATUS[\"list\"][\"up\"]\n or arg_3.lower() in arg_5.STATUS[\"list\"][\"valid\"]\n ):\n # The status is in the list of up status.\n\n if arg_0.inactive_database.is_present():\n # The currently tested element is in the database.\n\n # We generate the suspicious file(s).\n Generate(arg_5.STATUS[\"official\"][\"up\"]).analytic_file(\n \"suspicious\"\n )\n\n # We remove the currently tested element from the\n # database.\n arg_0.inactive_database.remove()\n\n else:\n # The status is not in the list of up status.\n\n # We add the currently tested element to the\n # database.\n arg_0.inactive_database.add()\n\n # We backup the current state of the file reading\n # for the case that we need to continue later.\n arg_0.auto_continue.backup()\n\n if arg_1 != arg_2:\n # The current element is not the last one.\n\n # We run the autosave logic.\n AutoSave()\n else:\n # The current element is the last one.\n\n # We stop and log the execution time.\n ExecutionTime(\"stop\", arg_2=True)\n\n # We show/log the percentage.\n arg_0.percentage.log()\n\n # We reset the counters as we end the process.\n arg_0.reset_counters()\n\n # We backup the current state of the file reading\n # for the case that we need to continue later.\n arg_0.auto_continue.backup()\n\n # We show the colored logo.\n arg_0.colorify_logo()\n\n # We save and stop the script if we are under\n # Travis CI.\n AutoSave(True)\n\n for arg_4 in [\"http_code\", \"referer\"]:\n # We loop through some configuration index we have to empty.\n\n if arg_4 in arg_5.INTERN:\n # The index is in the configuration.\n\n # We empty the configuration index.\n arg_5.INTERN[arg_4] = \"\""} +{"_id": "doc_5667", "title": "", "text": "def Func(arg_0, Func=None, arg_2=None):\n \"\"\"\n Manage the case that we want to test only a domain.\n\n :param domain: The domain or IP to test.\n :type domain: str\n\n :param last_domain:\n The last domain to test if we are testing a file.\n :type last_domain: str\n\n :param return_status: Tell us if we need to return the status.\n :type return_status: bool\n \"\"\"\n\n # We print the header.\n arg_0._print_header()\n\n if Func:\n # A domain is given.\n\n # We format and set the domain we are testing and treating.\n arg_3.INTERN[\"to_test\"] = arg_0._format_domain(Func)\n else:\n # A domain is not given.\n\n # We set the domain we are testing and treating to None.\n arg_3.INTERN[\"to_test\"] = None\n\n if arg_3.INTERN[\"to_test\"]:\n # The domain is given (Not None).\n\n if arg_3.CONFIGURATION[\"syntax\"]:\n # The syntax mode is activated.\n\n # We get the status from Syntax.\n arg_5 = arg_0.syntax_status.get()\n else:\n # We test and get the status of the domain.\n arg_5, arg_6 = arg_0.status.get()\n\n # We run the file decision logic.\n arg_0._file_decision(arg_3.INTERN[\"to_test\"], arg_2, arg_5)\n\n if arg_3.CONFIGURATION[\"simple\"]:\n # The simple mode is activated.\n\n # We print the domain and the status.\n print(arg_3.INTERN[\"to_test\"], arg_5)\n\n # We return the tested domain and its status.\n return arg_3.INTERN[\"to_test\"], arg_5\n\n # We return None, there is nothing to test.\n return None"} +{"_id": "doc_5668", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Manage the case that we want to test only a given Func.\n\n :param Func_to_test: The Func to test.\n :type Func_to_test: str\n\n :param last_Func:\n The last Func of the file we are testing\n (if exist)\n :type last_Func: str\n \"\"\"\n\n # We print the header.\n arg_0._print_header()\n\n if arg_1:\n # An Func to test is given.\n\n # We set the Func we are going to test.\n arg_3.INTERN[\"to_test\"] = arg_1\n else:\n # An URL to test is not given.\n\n # We set the Func we are going to test to None.\n arg_3.INTERN[\"to_test\"] = None\n\n if arg_3.INTERN[\"to_test\"]:\n # An URL to test is given.\n\n if arg_3.CONFIGURATION[\"syntax\"]:\n # The syntax mode is activated.\n\n # We get the status from Syntax.\n arg_5 = arg_0.syntax_status.get()\n else:\n # The syntax mode is not activated.\n\n # We get the status from URL.\n arg_5 = arg_0.Func_status.get()\n\n # We run the file decision logic.\n arg_0._file_decision(arg_3.INTERN[\"to_test\"], arg_2, arg_5)\n\n if arg_3.CONFIGURATION[\"simple\"]:\n # The simple mode is activated.\n\n # We print the URL informations.\n print(arg_3.INTERN[\"to_test\"], arg_5)\n\n # We return the URL we tested and its status.\n return arg_3.INTERN[\"to_test\"], arg_5\n\n # We return None, there is nothing to test.\n return None"} +{"_id": "doc_5669", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Format the extracted domain before passing it to the system.\n\n :param extracted_domain: The extracted domain.\n :type extracted_domain: str\n\n :return: The formatted domain or IP to test.\n :rtype: str\n\n .. note:\n Understand by formating the fact that we get rid\n of all the noises around the domain we want to test.\n \"\"\"\n\n if not arg_1.startswith(\"#\"):\n # The line is not a commented line.\n\n if \"#\" in arg_1:\n # There is a comment at the end of the line.\n\n # We delete the comment from the line.\n arg_1 = arg_1[\n : arg_1.find(\"#\")\n ].strip()\n\n if \" \" in arg_1 or \"\\t\" in arg_1:\n # A space or a tabs is in the line.\n\n # We remove all whitestring from the extracted line.\n arg_2 = arg_1.split()\n\n # As there was a space or a tab in the string, we consider\n # that we are working with the hosts file format which means\n # that the domain we have to test is after the first string.\n # So we set the index to 1.\n arg_3 = 1\n\n while arg_3 < len(arg_2):\n # We loop until the index is greater than the length of\n # the splited line.\n\n if arg_2[arg_3]:\n # The element at the current index is not an empty string.\n\n # We break the loop.\n break\n\n # The element at the current index is an empty string.\n\n # We increase the index number.\n arg_3 += 1\n\n # We return the last read element.\n return arg_2[arg_3]\n\n # We return the extracted line.\n return arg_1\n\n # The extracted line is a comment line.\n\n # We return an empty string as we do not want to work with commented line.\n return \"\""} +{"_id": "doc_5670", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract all non commented lines from the file we are testing.\n\n :return: The elements to test.\n :rtype: list\n \"\"\"\n\n # We initiate the variable which will save what we are going to return.\n arg_1 = []\n\n if PyFunceble.path.isfile(PyFunceble.INTERN[\"file_to_test\"]):\n # The give file to test exist.\n\n try:\n with open(PyFunceble.INTERN[\"file_to_test\"]) as file:\n # We open and read the file.\n\n for arg_2 in file:\n # We loop through each lines.\n\n if not arg_2.startswith(\"#\"):\n # The currently read line is not a commented line.\n\n # We append the current read line to the result.\n arg_1.append(arg_2.rstrip(\"\\n\").strip())\n except UnicodeDecodeError:\n with open(PyFunceble.INTERN[\"file_to_test\"], encoding=\"utf-8\") as file:\n # We open and read the file.\n\n for arg_2 in file:\n # We loop through each lines.\n\n if not arg_2.startswith(\"#\"):\n # The currently read line is not a commented line.\n\n # We append the current read line to the result.\n arg_1.append(arg_2.rstrip(\"\\n\").strip())\n\n else:\n # The given file to test does not exist.\n\n # We raise a FileNotFoundError exception.\n raise FileNotFoundError(PyFunceble.INTERN[\"file_to_test\"])\n\n # We return the result.\n return arg_1"} +{"_id": "doc_5671", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Manage the case that need to test each domain of a given Func path.\n\n .. note::\n 1 domain per line.\n \"\"\"\n\n # We get, format, filter, clean the list to test.\n arg_1 = arg_0._Func_list_to_test_filtering()\n\n if PyFunceble.CONFIGURATION[\"idna_conversion\"]:\n # We have to convert domains to idna.\n\n # We convert if we need to convert.\n arg_1 = domain2idna(arg_1)\n\n if PyFunceble.CONFIGURATION[\"hierarchical_sorting\"]:\n # The hierarchical sorting is desired by the user.\n\n # We format the list.\n arg_1 = List(arg_1).custom_format(Sort.hierarchical)\n else:\n # The hierarchical sorting is not desired by the user.\n\n # We format the list.\n arg_1 = List(arg_1).custom_format(Sort.standard)\n\n # We initiate a local variable which will save the current state of the list.\n arg_2 = arg_1\n\n try:\n # We remove the element which are in the database from the\n # current list to test.\n arg_1 = List(\n list(\n set(\n arg_1[PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] :]\n )\n - set(PyFunceble.INTERN[\"flatten_inactive_db\"])\n )\n ).format()\n arg_3 = arg_1[-1]\n except IndexError:\n # Our list to test is the one with the element from the database.\n arg_1 = arg_2[\n PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] :\n ]\n\n # We delete the undesired variable.\n del arg_2\n\n if PyFunceble.CONFIGURATION[\"hierarchical_sorting\"]:\n # The hierarchical sorting is desired by the user.\n\n # We format the list.\n arg_1 = List(list(arg_1)).custom_format(Sort.hierarchical)\n\n try:\n # We test each element of the list to test.\n return [arg_0.domain(arg_4, arg_1[-1]) for arg_4 in arg_1 if arg_4]\n except IndexError:\n # We print a message on screen.\n print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + \"Nothing to test.\")"} +{"_id": "doc_5672", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Manage the case that we have to test a file\n\n .. note::\n 1 URL per line.\n \"\"\"\n\n # We get, format, clean the list of URL to test.\n arg_1 = arg_0._file_list_to_test_filtering()\n\n # We initiate a local variable which will save the current state of the list.\n arg_2 = arg_1\n\n try:\n # We remove the element which are in the database from the\n # current list to test.\n arg_1 = List(\n list(\n set(\n arg_1[PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] :]\n )\n - set(PyFunceble.INTERN[\"flatten_inactive_db\"])\n )\n ).format()\n arg_3 = arg_1[-1]\n except IndexError:\n # Our list to test is the one with the element from the database.\n arg_1 = arg_2[\n PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] :\n ]\n\n # We delete the undesired variable.\n del arg_2\n\n if PyFunceble.CONFIGURATION[\"hierarchical_sorting\"]:\n # The hierarchical sorting is desired by the user.\n\n # We format the list.\n arg_1 = List(list(arg_1)).custom_format(Sort.hierarchical)\n\n try:\n # We test each URL from the list to test.\n return [arg_0.url(arg_4, arg_1[-1]) for arg_4 in arg_1 if arg_4]\n except IndexError:\n # We print a message on screen.\n print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + \"Nothing to test.\")"} +{"_id": "doc_5673", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=False\n ): # pylint: disable=inconsistent-return-statements\n \"\"\"\n Switch PyFunceble.CONFIGURATION variables to their opposite.\n\n :param variable:\n The variable name to Func.\n The variable should be an index our configuration system.\n If we want to Func a bool variable, we should parse\n it here.\n :type variable: str|bool\n\n :param custom:\n Let us know if have to Func the parsed variable instead\n of our configuration index.\n :type custom: bool\n\n :return:\n The opposite of the configuration index or the given variable.\n :rtype: bool\n\n :raises:\n :code:`Exception`\n When the configuration is not valid. In other words,\n if the PyFunceble.CONFIGURATION[variable_name] is not a bool.\n \"\"\"\n\n if not arg_2:\n # We are not working with custom variable which is not into\n # the configuration.\n\n # We get the current state.\n arg_3 = dict.get(PyFunceble.CONFIGURATION, arg_1)\n else:\n # We are working with a custom variable which is not into the\n # configuration\n arg_3 = arg_1\n\n if isinstance(arg_3, bool):\n # The current state is a boolean.\n\n if arg_3:\n # The current state is equal to True.\n\n # We return False.\n return False\n\n # The current state is equal to False.\n\n # We return True.\n return True\n\n # The current state is not a boolean.\n\n # We set the message to raise.\n arg_4 = \"Impossible to Func %s. Please post an issue to %s\"\n\n # We raise an exception inviting the user to report an issue.\n raise Exception(\n arg_4 % (repr(arg_1), PyFunceble.LINKS[\"repo\"] + \"/issues.\")\n )"} +{"_id": "doc_5674", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the status while testing for an IP or domain.\n\n .. note::\n We consider that the domain or IP we are currently testing\n is into :code:`PyFunceble.INTERN[\"to_test\"]`.\n \"\"\"\n\n if \"to_test\" in PyFunceble.INTERN and PyFunceble.INTERN[\"to_test\"]:\n arg_1 = ExpirationDate().Func()\n\n if arg_1 is False:\n return arg_0.handle(status=\"invalid\")\n\n if arg_1 == PyFunceble.STATUS[\"official\"][\"up\"]:\n return arg_1, \"WHOIS\"\n\n return arg_0.handle(status=\"inactive\")\n\n raise NotImplementedError(\"We expect `INTERN['to_test']` to be set.\")"} +{"_id": "doc_5675", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Handle the backend of the given status.\n \"\"\"\n\n # We initiate the source we are going to parse to the Generate class.\n arg_1 = \"URL\"\n\n if arg_0.catched.lower() not in PyFunceble.STATUS[\"list\"][\"invalid\"]:\n # The parsed status is not in the list of invalid.\n\n # We generate the status file with the catched status.\n Generate(arg_0.catched, arg_1).status_file()\n else:\n # The parsed status is in the list of invalid.\n\n # We generate the status file with the parsed status.\n Generate(arg_0.catched, \"SYNTAX\").status_file()\n\n # We return the parsed status.\n return arg_0.catched"} +{"_id": "doc_5676", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the structure we are going to work with.\n\n :return: The structure we have to work with.\n :rtype: dict\n \"\"\"\n\n # We initiate an empty variable which is going to save the location of\n # file we are going to download.\n arg_1 = \"\"\n\n # We initiate the variable which will save the request instance.\n arg_2 = \"\"\n\n if PyFunceble.path.isfile(arg_0.structure):\n # The structure path file exist.\n\n # We set it as the destination file.\n arg_1 = arg_0.structure\n elif PyFunceble.path.isfile(arg_0.base + \"dir_structure_production.json\"):\n # * The structure path file does not exist.\n # but\n # * The production structure path file exist.\n\n # We set it as the destination file\n arg_1 = arg_0.base + \"dir_structure_production.json\"\n else:\n # * The structure path file does not exist.\n # and\n # * The production structure path file does not exist.\n\n if \"dev\" not in PyFunceble.VERSION:\n # `dev` is not into the local version name.\n\n # We get the production file from the master branch.\n arg_2 = PyFunceble.requests.get(\n PyFunceble.LINKS[\"dir_structure\"].replace(\"dev\", \"master\")\n )\n else:\n # `dev` is into the local version name.\n\n # We get the production file from the dev branch.\n arg_2 = PyFunceble.requests.get(\n PyFunceble.LINKS[\"dir_structure\"].replace(\"master\", \"dev\")\n )\n\n if arg_1.endswith(\"_production.json\"):\n # The destination is the production file.\n\n # And we return the updated the structure from the last read file.\n # (with the names from the configuration file).\n return arg_0._update_structure_from_config(\n Dict().from_json(File(arg_1).read())\n )\n\n # The destination is not the production file.\n\n if arg_1.endswith(\".json\"):\n # The destination ends with `.json`.\n\n # And we return the updated the structure from the given file.\n # (with the names from the configuration file).\n return arg_0._update_structure_from_config(\n Dict().from_json(File(arg_1).read())\n )\n\n # The destination does not ends with `.json`.\n\n # We return the updated the structure from the link we previously got.\n # (with the names from the configuration file).\n return arg_0._update_structure_from_config(Dict().from_json(arg_2.text))"} +{"_id": "doc_5677", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Creates the given directory if it does not exists.\n\n :param directory: The directory to create.\n :type directory: str\n\n :param loop: Tell us if we are in the creation loop or not.\n :type loop: bool\n \"\"\"\n\n if not arg_2 and PyFunceble.directory_separator in arg_1:\n # * We are not in the loop.\n # and\n # * The directory separator in the given directory.\n\n # We split the directories separator.\n arg_3 = arg_1.split(PyFunceble.directory_separator)\n\n # We initiate a variable which will save the full path to create.\n arg_4 = \"\"\n\n for arg_5 in arg_3:\n # We loop through each directory.\n\n # We append the currently read directory to the full path.\n arg_4 += arg_5 + PyFunceble.directory_separator\n\n # And we create the directory if it does not exist.\n arg_0.Func(arg_4, True)\n\n if not PyFunceble.path.isdir(arg_1):\n # The given directory does not exist.\n\n # We update the permission.\n # (Only if we are under Travis CI.)\n AutoSave.travis_permissions()\n\n # We create the directory.\n PyFunceble.mkdir(arg_1)\n\n # We update the permission.\n # (Only if we are under Travis CI.)\n AutoSave.travis_permissions()"} +{"_id": "doc_5678", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Delete the directory which are not registered into our structure.\n \"\"\"\n\n # We get the structure we have to apply.\n arg_1 = arg_0._get_structure()\n\n # We get the list of key which is implicitly the list of directory we do not bave to delete.\n arg_2 = list(arg_1.keys())\n\n # We move to the content of the parent as we know that we are creating only one directory.\n # Note: if one day we will have to create multiple directory, we will have to change\n # the following.\n arg_1 = arg_1[arg_2[0]]\n\n # We also set the parent directory as we are going to construct its childen.\n arg_3 = arg_2[0]\n\n if not arg_3.endswith(PyFunceble.directory_separator):\n arg_3 += PyFunceble.directory_separator\n\n for arg_4, arg_5, arg_5 in PyFunceble.walk(arg_3):\n # We loop through each directories of the parent path.\n\n # We fix the path in order to avoid issues.\n arg_4 = Directory(arg_4).fix_path()\n\n if arg_4.replace(arg_3, \"\") not in arg_1:\n # The currently read directory is not in our structure.\n\n # We delete it.\n PyFunceble.rmtree(arg_4)"} +{"_id": "doc_5679", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the paths to the configuration files.\n\n :param path_to_config: The possible path to the config to load.\n :type path_to_config: str\n\n :return:\n The path to the config to read (0), the path to the default\n configuration to read as fallback.(1)\n :rtype: tuple\n \"\"\"\n\n if not arg_1.endswith(PyFunceble.directory_separator):\n # The path to the config does not ends with the directory separator.\n\n # We initiate the default and the parsed variable with the directory separator.\n arg_2 = parsed = arg_1 + PyFunceble.directory_separator\n else:\n # The path to the config does ends with the directory separator.\n\n # We initiate the default and the parsed variable.\n arg_2 = parsed = arg_1\n\n # We append the `CONFIGURATION_FILENAME` to the parsed variable.\n parsed += PyFunceble.CONFIGURATION_FILENAME\n # And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable.\n arg_2 += PyFunceble.DEFAULT_CONFIGURATION_FILENAME\n\n # We finaly return a tuple which contain both informations.\n return (parsed, arg_2)"} +{"_id": "doc_5680", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Download `public-suffix.json` if not present.\n \"\"\"\n\n # We initiate the link to the public suffix configuration.\n # It is not hard coded because this method is called only if we\n # are sure that the configuration file exist.\n arg_1 = PyFunceble.CONFIGURATION[\"links\"][\"psl\"]\n\n # We update the link according to our current version.\n arg_1 = Version(True).right_url_from_version(arg_1)\n\n # We set the destination of the downloaded file.\n arg_2 = (\n PyFunceble.CURRENT_DIRECTORY\n + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"public_suffix\"]\n )\n\n if not Version(True).is_cloned() or not PyFunceble.path.isfile(arg_2):\n # The current version is not the cloned version.\n\n # We Download the link content and return the download status.\n return Download(arg_1, arg_2).text()\n\n # We are in the cloned version.\n\n # We do not need to download the file, so we are returning None.\n return None"} +{"_id": "doc_5681", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Download the latest version of `dir_structure_production.json`.\n \"\"\"\n\n # We initiate the link to the public suffix configuration.\n # It is not hard coded because this method is called only if we\n # are sure that the configuration file exist.\n arg_1 = PyFunceble.CONFIGURATION[\"links\"][\"dir_structure\"]\n\n # We update the link according to our current version.\n arg_1 = Version(True).right_url_from_version(arg_1)\n\n # We set the destination of the downloaded file.\n arg_2 = (\n PyFunceble.CURRENT_DIRECTORY\n + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"dir_structure\"]\n )\n\n if not Version(True).is_cloned() or not PyFunceble.path.isfile(arg_2):\n # The current version is not the cloned version.\n\n # We Download the link content and return the download status.\n arg_3 = Download(arg_1, arg_2, return_data=True).text()\n\n File(arg_2).write(arg_3, overwrite=True)\n return True\n\n # We are in the cloned version.\n\n # We do not need to download the file, so we are returning None.\n return None"} +{"_id": "doc_5682", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Execute the logic behind the merging.\n \"\"\"\n\n if \"PYFUNCEBLE_AUTO_CONFIGURATION\" not in PyFunceble.environ:\n # The auto configuration environment variable is not set.\n\n while True:\n # We infinitly loop until we get a reponse which is `y|Y` or `n|N`.\n\n # We ask the user if we should install and load the default configuration.\n arg_1 = input(\n PyFunceble.Style.BRIGHT\n + PyFunceble.Fore.RED\n + \"A configuration key is missing.\\n\"\n + PyFunceble.Fore.RESET\n + \"Try to merge upstream configuration file into %s ? [y/n] \"\n % (\n PyFunceble.Style.BRIGHT\n + arg_0.path_to_config\n + PyFunceble.Style.RESET_ALL\n )\n )\n\n if isinstance(arg_1, str):\n # The response is a string\n\n if arg_1.lower() == \"y\":\n # The response is a `y` or `Y`.\n\n # We merge the old values inside the new one.\n arg_0._merge_values()\n\n # And we save.\n arg_0._save()\n\n print(\n PyFunceble.Style.BRIGHT + PyFunceble.Fore.GREEN + \"Done!\\n\"\n \"Please try again, if it happens again,\"\n \" please fill a new issue.\"\n )\n\n # And we break the loop as we got a satisfied response.\n break\n\n elif arg_1.lower() == \"n\":\n # The response is a `n` or `N`.\n\n # We inform the user that something went wrong.\n raise Exception(\"Configuration key still missing.\")\n else:\n # The auto configuration environment variable is set.\n\n # We merge the old values inside the new one.\n arg_0._merge_values()\n\n # And we save.\n arg_0._save()"} +{"_id": "doc_5683", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Convert the versions to a shorter one.\n\n :param version: The version to split.\n :type version: str\n\n :param return_non_digits:\n Activate the return of the non-digits parts of the splitted\n version.\n :type return_non_digits: bool\n\n :return: The splitted version name/numbers.\n :rtype: list\n \"\"\"\n\n # We split the version.\n arg_3 = arg_1.split(\".\")\n\n # We split the parsed version and keep the digits.\n arg_4 = [x for x in arg_3 if x.isdigit()]\n\n if not arg_2:\n # We do not have to return the non digits part of the version.\n\n # We return the digits part of the version.\n return arg_4\n\n # We have to return the non digit parts of the version.\n\n # We split the parsed version and keep the non digits.\n arg_5 = [x for x in arg_3 if not x.isdigit()]\n\n # We return a tuple with first the digits part and finally the non digit parts.\n return (arg_4, arg_5[0])"} +{"_id": "doc_5684", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Compare the given versions.\n\n :param local: The local version converted by split_versions().\n :type local: list\n\n :param upstream: The upstream version converted by split_versions().\n :type upstream: list\n\n :return:\n - True: local < upstream\n - None: local == upstream\n - False: local > upstream\n :rtype: bool|None\n \"\"\"\n\n # A version should be in format [1,2,3] which is actually the version `1.2.3`\n # So as we only have 3 elements in the versioning,\n # we initiate the following variable in order to get the status of each parts.\n arg_3 = [None, None, None]\n\n for arg_4, arg_5 in enumerate(arg_1):\n # We loop through the local version.\n\n if int(arg_5) < int(arg_2[arg_4]):\n # The local version is less than the upstream version.\n\n # We initiate its status to True which means that we are in\n # an old version (for the current version part).\n arg_3[arg_4] = True\n elif int(arg_5) > int(arg_2[arg_4]):\n # The local version is greater then the upstream version.\n\n # We initiate its status to False which means that we are in\n # a more recent version (for the current version part).\n arg_3[arg_4] = False\n\n # Otherwise the status stay None which means that there is no change\n # between both local and upstream.\n\n if False in arg_3:\n # There is a False in the status.\n\n # We return False which means that we are in a more recent version.\n return False\n\n if True in arg_3:\n # There is a True in the status.\n\n # We return True which means that we are in a older version.\n return True\n\n # There is no True or False in the status.\n\n # We return None which means that we are in the same version as upstream.\n return None"} +{"_id": "doc_5685", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Let us know if we are currently in the cloned version of\n PyFunceble which implicitly mean that we are in developement mode.\n \"\"\"\n\n if not PyFunceble.path.isdir(\".git\"):\n # The git directory does not exist.\n\n # We return False, the current version is not the cloned version.\n return False\n\n # We list the list of file which can be found only in a cloned version.\n arg_1 = [\n \".coveragerc\",\n \".coveralls.yml\",\n \".gitignore\",\n \".PyFunceble_production.yaml\",\n \".travis.yml\",\n \"CODE_OF_CONDUCT.md\",\n \"CONTRIBUTING.md\",\n \"dir_structure_production.json\",\n \"MANIFEST.in\",\n \"README.rst\",\n \"requirements.txt\",\n \"setup.py\",\n \"version.yaml\",\n ]\n\n # We list the list of directory which can be found only in a cloned\n # version.\n arg_2 = [\"docs\", \"PyFunceble\", \"tests\"]\n\n for arg_3 in arg_1:\n # We loop through the list of file.\n\n if not PyFunceble.path.isfile(arg_3):\n # The file does not exist in the current directory.\n\n # We return False, the current version is not the cloned version.\n return False\n\n # All required files exist in the current directory.\n\n for arg_4 in arg_2:\n # We loop through the list of directory.\n\n if not PyFunceble.path.isdir(arg_4):\n # The directory does not exist in the current directory.\n\n # We return False, the current version is not the cloned version.\n return False\n\n # All required directories exist in the current directory.\n\n # We return True, the current version is a cloned version.\n return True"} +{"_id": "doc_5686", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Handle and check that some configuration index exists.\n \"\"\"\n\n try:\n # We try to call the http code.\n arg_1.INTERN[\"http_code\"]\n except KeyError:\n # If it is not found.\n\n # We initiate an empty http code.\n arg_1.INTERN[\"http_code\"] = \"*\" * 3\n\n try:\n # We try to call the referer.\n arg_1.INTERN[\"referer\"]\n except KeyError:\n # If it is not found.\n\n # We initate an `Unknown` referer.\n arg_1.INTERN[\"referer\"] = \"Unknown\""} +{"_id": "doc_5687", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Generate unified file. Understand by that that we use an unified table\n instead of a separate table for each status which could result into a\n misunderstanding.\n \"\"\"\n\n if (\n \"file_to_test\" in PyFunceble.INTERN\n and PyFunceble.INTERN[\"file_to_test\"]\n and PyFunceble.CONFIGURATION[\"unified\"]\n ):\n # * We are not testing as an imported module.\n # and\n # * The unified file generation is activated.\n\n # We construct the path of the unified file.\n arg_1 = (\n arg_0.output_parent_dir + PyFunceble.OUTPUTS[\"default_files\"][\"results\"]\n )\n\n if PyFunceble.CONFIGURATION[\"less\"]:\n # We have to print less information.\n\n if PyFunceble.HTTP_CODE[\"active\"]:\n # The http status code request is activated.\n\n # We construct what we have to print.\n arg_2 = [\n arg_0.tested,\n arg_0.domain_status,\n PyFunceble.INTERN[\"http_code\"],\n ]\n else:\n # The http status code request is not activated.\n\n # We construct what we have to print.\n arg_2 = [arg_0.tested, arg_0.domain_status, arg_0.source]\n\n # And we print the informations on file.\n Prints(arg_2, \"Less\", arg_1, True).data()\n else:\n # The unified file generation is not activated.\n\n # We construct what we have to print.\n arg_2 = [\n arg_0.tested,\n arg_0.domain_status,\n arg_0.expiration_date,\n arg_0.source,\n PyFunceble.INTERN[\"http_code\"],\n PyFunceble.CURRENT_TIME,\n ]\n\n # And we print the information on file.\n Prints(arg_2, \"Generic_File\", arg_1, True).data()"} +{"_id": "doc_5688", "title": "", "text": "def Func(arg_0): # pylint: disable=inconsistent-return-statements\n \"\"\"\n Generate a file according to the domain status.\n \"\"\"\n\n if \"file_to_test\" in PyFunceble.INTERN:\n # We are not testing as an imported module.\n\n # We generate the hosts file.\n Generate(arg_0.domain_status, arg_0.source, arg_0.expiration_date).info_files()\n\n # We are testing a file content.\n\n # We increase the percentage count.\n Percentage(arg_0.domain_status).count()\n\n # We print on screen if needed.\n arg_0._prints_status_screen()\n\n if arg_0._do_not_produce_file():\n return None\n\n if (\n not PyFunceble.CONFIGURATION[\"no_files\"]\n and PyFunceble.CONFIGURATION[\"split\"]\n ):\n # * The file non-generation of file is globaly deactivated.\n # and\n # * We have to split the outputs.\n\n # We print or generate the files.\n arg_0._prints_Func()\n else:\n # * The file non-generation of file is globaly activated.\n # or\n # * We do not have to split the outputs.\n\n # We print or generate the unified files.\n arg_0.unified_file()"} +{"_id": "doc_5689", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if we are allowed to produce a file based from the given\n information.\n\n :return:\n The state of the production.\n True: We do not produce file.\n False: We do produce file.\n :rtype: bool\n \"\"\"\n\n if (\n Inactive().is_present()\n and arg_0.domain_status\n in [\n PyFunceble.STATUS[\"official\"][\"down\"],\n PyFunceble.STATUS[\"official\"][\"invalid\"],\n ]\n and PyFunceble.INTERN[\"to_test\"]\n not in PyFunceble.INTERN[\"extracted_list_to_test\"]\n ):\n return True\n return False"} +{"_id": "doc_5690", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Implement the Func and alphabetical sorting.\n\n :param element: The element we are currently reading.\n :type element: str\n\n :return: The formatted element.\n :rtype: str\n \"\"\"\n\n # We remove all special characters and return the formatted string.\n return (\n Regex(arg_1, arg_0.regex_replace, replace_with=\"@funilrys\")\n .replace()\n .replace(\"@funilrys\", \"\")\n )"} +{"_id": "doc_5691", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n The idea behind this method is to sort a list of domain Funcy.\n\n :param element: The element we are currently reading.\n :type element: str\n\n :return: The formatted element.\n :rtype: str\n\n .. note::\n For a domain like :code:`aaa.bbb.ccc.tdl`.\n\n A normal sorting is done in the following order:\n 1. :code:`aaa`\n 2. :code:`bbb`\n 3. :code:`ccc`\n 4. :code:`tdl`\n\n This method allow the sorting to be done in the following order:\n 1. :code:`tdl`\n 2. :code:`ccc`\n 3. :code:`bbb`\n 4. :code:`aaa`\n\n \"\"\"\n\n # We initiate a variable which will save the element to sort without\n # the extension.\n arg_2 = \"\"\n\n # We initiate a variable which will save the full extension.\n arg_3 = \"\"\n\n # We convert the parsed element to lower case.\n arg_1 = arg_1.lower()\n\n # We try to get the url base.\n arg_4 = Check().is_url_valid(arg_1, return_base=True)\n\n if not isinstance(arg_4, str):\n # The url base is not found.\n\n if \".\" in arg_1:\n # There is point in the parsed element.\n\n # We get the position of the first letter of the extension.\n arg_5 = arg_1.rindex(\".\") + 1\n\n # We get the extension from the position of the first letter\n # of the extension.\n arg_6 = arg_1[arg_5:]\n\n if arg_6 in PyFunceble.INTERN[\"psl_db\"]:\n # The extension is in the public suffix database.\n\n for arg_7 in PyFunceble.INTERN[\"psl_db\"][arg_6]:\n # We loop through the list of suffix of the extracted extension.\n\n # We suffix the sufix with a point.\n arg_8 = \".\" + arg_7\n\n if arg_1.endswith(arg_8):\n # The elements ends with the suffix.\n\n # We get the position of the first character of the suffix in\n # the parsed element.\n arg_9 = arg_1.rindex(arg_8)\n\n # We update the to_sort variable with the element without the suffix.\n arg_2 = arg_1[:arg_9]\n\n # We replace the full extension with the currently read suffix.\n arg_3 = arg_7\n\n # We break the loop, we got what we wanted.\n break\n\n if not arg_3:\n # The full extension is empty.\n\n # We initiate it with the extension.\n arg_3 = arg_1[arg_5:]\n\n # We update the to_sort variable with the element without the extension.\n arg_2 = arg_1[: arg_5 - 1]\n\n # We append a point to the full extension because the point has to be\n # at the end and not at the begining of the extension.\n # To understand: Imagine a miror.\n arg_3 += \".\"\n\n # We reverse the to_sort string.\n arg_10 = arg_2[::-1]\n\n if \".\" in arg_10:\n # There is a point in the reversed string.\n\n # We prefix the full extension with the top level\n # domain name.\n arg_3 = (\n arg_10[: arg_10.index(\".\")][::-1] + \".\" + arg_3\n )\n\n # We remove the tor level domain from the rest of\n # the reversed string.\n arg_10 = arg_10[arg_10.index(\".\") + 1 :]\n\n # * We reverse each level of the parsed element.\n # and\n # * We glue each level of the parsed element with each other.\n #\n # Note: after this, there is no point anymore.\n arg_11 = arg_3 + \".\".join(\n [x[::-1] for x in arg_10.split(\".\")]\n )\n\n # We remove all special characters and return the formatted string.\n return (\n Regex(arg_11, arg_0.regex_replace, replace_with=\"@funilrys\")\n .replace()\n .replace(\"@funilrys\", \"\")\n )\n\n # We remove all special characters and return the formatted string.\n return (\n Regex(\n arg_2 + arg_3,\n arg_0.regex_replace,\n replace_with=\"@funilrys\",\n )\n .replace()\n .replace(\"@funilrys\", \"\")\n )\n\n # There is no point in the parsed element.\n\n # We return the parsed element.\n return arg_1\n\n # The url base is found.\n\n # We get the position of the element.\n arg_12 = arg_1.rindex(arg_4)\n\n # We extract the protocol from the element position.\n arg_13 = arg_1[:arg_12]\n\n # We return the output of this method but with the url base instead of the full url.\n return arg_13 + arg_0.Func(arg_4)"} +{"_id": "doc_5692", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Initiate the IANA database if it is not the case.\n \"\"\"\n\n if \"iana_db\" not in arg_1.INTERN or not arg_1.INTERN[\"iana_db\"]:\n # The global database is empty, None or does not exist.\n\n # We update it with the database content.\n arg_1.INTERN[\"iana_db\"] = arg_0.iana_db"} +{"_id": "doc_5693", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract the extention from the given block.\n Plus get its referer.\n \"\"\"\n\n arg_1 = (\n Download(arg_0.iana_url, return_data=True)\n .text()\n .split('')\n )\n\n # We extract the different extension from the currently readed line.\n arg_2 = r\"(/domains/root/db/)(.*)(\\.html)\"\n\n for arg_3 in arg_1:\n if \"/domains/root/db/\" in arg_3:\n # The link is in the line.\n\n # We try to extract the extension.\n arg_4 = Regex(\n arg_3, arg_2, return_data=True, rematch=True\n ).match()[1]\n\n if arg_4:\n # The extraction is not empty or None.\n\n # We get the referer.\n arg_5 = arg_0._referer(arg_4)\n\n # We yield the matched extension and its referer.\n yield (arg_4, arg_5)"} +{"_id": "doc_5694", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Update the content of the `iana-domains-db` file.\n \"\"\"\n\n if not PyFunceble.CONFIGURATION[\"quiet\"]:\n # * The quiet mode is not activated.\n\n # We print on screen what we are doing.\n print(\"Update of iana-domains-db\", end=\" \")\n\n # We loop through the line of the iana website.\n for arg_1, arg_2 in arg_0._extensions():\n\n if arg_1 not in arg_0.iana_db or arg_0.iana_db[arg_1] != arg_2:\n # We add the extension to the databae.\n arg_0.iana_db[arg_1] = arg_2\n\n # We save the content of the constructed database.\n Dict(arg_0.iana_db).to_json(arg_0.destination)\n\n if not PyFunceble.CONFIGURATION[\"quiet\"]:\n # The quiet mode is not activated.\n\n # We indicate that the work is done without any issue.\n print(PyFunceble.INTERN[\"done\"])"} +{"_id": "doc_5695", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve the mining informations.\n \"\"\"\n\n if arg_1.CONFIGURATION[\"mining\"]:\n # The mining is activated.\n\n if \"mined\" not in arg_1.INTERN:\n arg_1.INTERN[\"mined\"] = {}\n\n if arg_1.path.isfile(arg_0.file):\n # Our backup file exist.\n\n # We return the information from our backup.\n arg_3 = Dict().from_json(File(arg_0.file).read())\n\n # We clean the empty elements.\n for arg_4 in arg_3:\n arg_1.INTERN[\"mined\"][arg_4] = {}\n\n for arg_5 in arg_3[arg_4]:\n if arg_3[arg_4][arg_5]:\n arg_1.INTERN[\"mined\"][arg_4][arg_5] = arg_3[\n arg_4\n ][arg_5]\n\n return\n # * The mining is not activated.\n # or\n # * Our backup file does not exist.\n\n # We return nothing.\n arg_1.INTERN[\"mined\"] = {}\n\n return"} +{"_id": "doc_5696", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Backup the mined informations.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"mining\"]:\n # The mining is activated.\n\n # We backup our mined informations.\n Dict(PyFunceble.INTERN[\"mined\"]).to_json(arg_0.file)"} +{"_id": "doc_5697", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove the currently tested element from the mining\n data.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"mining\"]:\n # The mining is activated.\n\n if PyFunceble.INTERN[\"file_to_test\"] in PyFunceble.INTERN[\"mined\"]:\n # The currently tested file is in our mined database.\n\n for arg_1 in PyFunceble.INTERN[\"mined\"][\n PyFunceble.INTERN[\"file_to_test\"]\n ]:\n # We loop through the mined index.\n\n if (\n arg_0.to_get_bare\n in PyFunceble.INTERN[\"mined\"][\n PyFunceble.INTERN[\"file_to_test\"]\n ][arg_1]\n ):\n # The currently read element content.\n\n # We Func the globally tested element from the currently\n # read element content.\n PyFunceble.INTERN[\"mined\"][PyFunceble.INTERN[\"file_to_test\"]][\n arg_1\n ].Func(arg_0.to_get_bare)\n\n # We backup everything.\n arg_0._backup()"} +{"_id": "doc_5698", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Provide the list of mined so they can be added to the list\n queue.\n\n :return: The list of mined domains or URL.\n :rtype: list\n \"\"\"\n\n # We initiate a variable which will return the result.\n arg_1 = []\n\n if PyFunceble.CONFIGURATION[\"mining\"]:\n # The mining is activated.\n\n if PyFunceble.INTERN[\"file_to_test\"] in PyFunceble.INTERN[\"mined\"]:\n # The file we are testing is into our mining database.\n\n for arg_2 in PyFunceble.INTERN[\"mined\"][\n PyFunceble.INTERN[\"file_to_test\"]\n ]:\n # We loop through the list of index of the file we are testing.\n\n # We append the element of the currently read index to our result.\n arg_1.extend(\n PyFunceble.INTERN[\"mined\"][PyFunceble.INTERN[\"file_to_test\"]][\n arg_2\n ]\n )\n\n # We format our result.\n arg_1 = List(arg_1).format()\n\n # We return the result.\n return arg_1"} +{"_id": "doc_5699", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get and return the content of the given log file.\n\n :param file: The file we have to get the content from.\n :type file: str\n\n :return The content of the given file.\n :rtype: dict\n \"\"\"\n\n if PyFunceble.path.isfile(arg_1):\n return Dict().from_json(File(arg_1).read())\n\n return {}"} +{"_id": "doc_5700", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Write the content into the given file.\n\n :param content: The dict to write.\n :type content: dict\n\n :param file: The file to write.\n :type file: str\n \"\"\"\n\n if not PyFunceble.CONFIGURATION[\"no_files\"]:\n if not isinstance(arg_1, dict):\n arg_1 = {}\n\n Dict(arg_1).to_json(arg_2)"} +{"_id": "doc_5701", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Logs the case that the referer was not found.\n\n :param extension: The extension of the domain we are testing.\n :type extension: str\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"logs\"]:\n # The logs subsystem is activated.\n\n arg_2 = {\n arg_0.current_time: {\n \"domain\": PyFunceble.INTERN[\"to_test\"],\n \"extension\": arg_1,\n }\n }\n\n if arg_0.output:\n arg_3 = arg_0.output\n else:\n arg_3 = PyFunceble.OUTPUT_DIRECTORY\n arg_3 += PyFunceble.OUTPUTS[\"parent_directory\"]\n arg_3 += PyFunceble.OUTPUTS[\"logs\"][\"directories\"][\"parent\"]\n arg_3 += PyFunceble.OUTPUTS[\"logs\"][\"filenames\"][\"no_referer\"]\n\n arg_4 = arg_0._get_content(arg_3)\n arg_4.update(arg_2)\n\n arg_0._write_content(arg_4, arg_3)\n\n if PyFunceble.CONFIGURATION[\"share_logs\"]:\n # The logs sharing is activated.\n\n # And we share the logs with the api.\n PyFunceble.requests.post(\n PyFunceble.LINKS[\"api_no_referer\"], data=arg_2[arg_0.current_time]\n )"} +{"_id": "doc_5702", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=\"-\", arg_3=\" \"\n ):\n \"\"\"\n Construct header of the table according to template.\n\n :param data_to_print:\n The list of data to print into the header of the table.\n :type data_to_print: list\n\n :param header_separator:\n The separator to use between the table header and our data.\n :type header_separator: str\n\n :param colomn_separator: The separator to use between each colomns.\n :type colomn_separator: str\n\n :return: The data to print in a list format.\n :rtype: list\n \"\"\"\n\n # We initiate a variable which will save the header data.\n arg_4 = []\n\n # We initiate a variable which will save the header sizes.\n arg_5 = \"\"\n\n # We initiate the glue to set before the size.\n arg_6 = \"%-\"\n\n # We initiate the glue to set after the size.\n arg_7 = \"s\"\n\n if arg_2:\n # The header separator is not empty.\n\n # We initiate a variable which will save the list of\n # separator data.\n arg_8 = []\n\n # We get the length of the data to print.\n arg_9 = len(arg_1) - 1\n\n # We initiate an iterator.\n arg_10 = 0\n\n for arg_11 in arg_1:\n # We loop through the list of data.\n\n # We get the size of the currently read data.\n arg_12 = arg_1[arg_11]\n\n # We append the data to the header data list.\n arg_4.append(arg_11)\n\n # We construct the header size.\n # Note: our header size is formatted line %s-sizes\n # (the s at the end is part of the formatting.)\n arg_5 += arg_6 + str(arg_12) + arg_7\n\n if arg_10 < arg_9:\n # The iterator is less than the length of data to print.\n\n # We append the the colomn separator to the header size.\n arg_5 += arg_3\n\n if arg_2:\n # The header separator is given.\n\n # We append the right size of separator to the list of\n # separator data.\n arg_8.append(arg_2 * arg_12)\n\n # We increase the iterator.\n arg_10 += 1\n\n if arg_2:\n # The header separator is given.\n\n return [\n # We return the formatted header (like we will do with print('%s' % 'hello'))\n arg_5 % tuple(arg_4),\n # We return the formatted header separator.\n arg_5 % tuple(arg_8),\n ]\n\n # The header separator is not given.\n\n # We return the formetted header.\n return [arg_5 % tuple(arg_4)]"} +{"_id": "doc_5703", "title": "", "text": "def Func(\n arg_0, arg_1=False\n ): # pragma: no cover pylint: disable=too-many-branches\n \"\"\"\n Management and creation of templates of Func.\n Please consider as \"Func\" the title of each columns.\n\n :param do_not_print:\n Tell us if we have to print the Func or not.\n :type do_not_print: bool\n \"\"\"\n\n if (\n not PyFunceble.CONFIGURATION[\"Func_printed\"]\n or arg_0.template == \"Percentage\"\n or arg_1\n ):\n # * The Func has not been already printed.\n # or\n # * The template is the `Percentage template`.\n # or\n # * We are authorized to print something.\n\n if (\n arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"generic\"]\n or arg_0.template == \"Generic_File\"\n ):\n # * The template is into the list of generic status.\n # or\n # * The template is equal to `Generic_File`.\n\n # The data to print is the Generic Func.\n arg_2 = arg_0.Funcs[\"Generic\"]\n\n if (\n arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"generic\"]\n and PyFunceble.HTTP_CODE[\"active\"]\n ):\n # * The template is in the list of generic status.\n # and\n # * the http status code extraction is activated.\n\n # We remove the Analyze Date colomn from the data to print.\n arg_2 = Dict(arg_2).remove_key(\"Analyze Date\")\n elif arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"up\"]:\n # The template is in the list of up status.\n\n # We informations to print is the up Func.\n arg_2 = arg_0.Funcs[PyFunceble.STATUS[\"official\"][\"up\"]]\n elif arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"valid\"]:\n # The template is in the list of valid status.\n\n # We informations to print is the valid Func.\n arg_2 = arg_0.Funcs[PyFunceble.STATUS[\"official\"][\"valid\"]]\n elif arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"down\"]:\n # The template is in the list of down status.\n\n # We informations to print is the down Func.\n arg_2 = arg_0.Funcs[PyFunceble.STATUS[\"official\"][\"down\"]]\n elif arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\"invalid\"]:\n # The template is in the list of invalid status.\n\n # We informations to print is the invalid Func.\n arg_2 = arg_0.Funcs[PyFunceble.STATUS[\"official\"][\"invalid\"]]\n elif (\n arg_0.template == \"Less\"\n or arg_0.template == \"Percentage\"\n or arg_0.template == \"HTTP\"\n ): # pylint: disable=line-too-long\n # * The template is equal to `Less`.\n # or\n # * The template is equal to `Percentage`.\n # or\n # * The template is equal to `HTTP`.\n\n # We get the Func with the help of the template name.\n arg_2 = arg_0.Funcs[arg_0.template]\n\n if arg_0.template == \"Less\" and not PyFunceble.HTTP_CODE[\"active\"]:\n # * The template is equal to `Less`.\n # and\n # * The http status code extraction is deactivated.\n\n # We append the source index to the Func.\n arg_2[\"Source\"] = 10\n\n if not PyFunceble.HTTP_CODE[\"active\"]:\n # * The http status code extraction is deactivated.\n\n # We remove the HTTP Code index from the data to print.\n arg_2 = Dict(arg_2).remove_key(\"HTTP Code\")\n\n # We update the currently used Func.\n arg_0.currently_used_Func = arg_2\n\n if not arg_1:\n # We are not authorized to print anything.\n\n # We generate the before Func.\n arg_0._before_Func()\n\n for arg_4 in arg_0._Func_constructor(arg_2):\n # We loop through the formatted template.\n\n if not arg_0.only_on_file:\n # We do not have to print only on file.\n\n # We print on screen the formatted Func template.\n print(arg_4)\n\n if not PyFunceble.CONFIGURATION[\"no_files\"] and arg_0.output:\n # An output destination is given.\n\n # We write the file with the formatted Func template.\n File(arg_0.output).write(arg_4 + \"\\n\")"} +{"_id": "doc_5704", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Construct the table of data according to given size.\n\n :param size: The maximal length of each string in the table.\n :type size: list\n\n :return:\n A dict with all information about the data and how to which what\n maximal size to print it.\n :rtype: OrderedDict\n\n :raises:\n :code:`Exception`\n If the data and the size does not have the same length.\n \"\"\"\n\n # We initiate a variable which will save what we are going to\n # return.\n arg_2 = PyFunceble.OrderedDict()\n\n if len(arg_0.data_to_print) == len(arg_1):\n # The length of the data to print is equal to the length of the given size.\n\n for arg_3 in range(len(arg_0.data_to_print)):\n # We loop until our iterator is less or equal to the length of the data\n # to print.\n\n # We initiate the result index and its size.\n arg_2[arg_0.data_to_print[arg_3]] = arg_1[arg_3]\n else:\n # This should never happend. If it's happens then there is something\n # wrong from the inputed data.\n raise Exception(\n \"Inputed: \" + str(len(arg_0.data_to_print)) + \"; Size: \" + str(len(arg_1))\n )\n\n # We return the constructed result.\n return arg_2"} +{"_id": "doc_5705", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the size of each columns from the header.\n\n :param header:\n The header template we have to get the size from.\n :type header: dict\n\n :return: The maximal size of the each data to print.\n :rtype: list\n \"\"\"\n\n # We initiate the result we are going to return.\n arg_2 = []\n\n for arg_3 in arg_1:\n # We lopp through the header.\n\n # And we append the size to our result.\n arg_2.append(arg_1[arg_3])\n\n # We return the result.\n return arg_2"} +{"_id": "doc_5706", "title": "", "text": "def Func(arg_0): # pragma: no cover pylint: disable=inconsistent-return-statements\n \"\"\"\n Management and input of data to the table.\n\n :raises:\n :code:`Exception`\n When self.data_to_print is not a list.\n \"\"\"\n\n if isinstance(arg_0.data_to_print, list):\n # The data to print is a list.\n\n # We initiate the data we are going to print.\n arg_1 = {}\n\n # We initiate the size we are going to print.\n arg_2 = []\n\n # We initiate a variable which will list the list of\n # alone case.\n arg_3 = [\"Percentage\", \"HTTP\"]\n\n # we initiate a variable which will list the list of\n # template which does not need a header.\n arg_4 = [\"FullHosts\", \"PlainDomain\"]\n\n if arg_0.template.lower() == \"json\":\n # The template is the json template.\n\n if not PyFunceble.CONFIGURATION[\"no_files\"] and arg_0.output:\n # * We are allowed to generate file.\n # and\n # * The given output is not empty.\n\n # We print the json file.\n return arg_0._json_print()\n\n # We return nothing.\n return None\n\n if arg_0.template not in arg_3 and arg_0.template not in arg_4:\n # * The template is not in the list of alone case.\n # and\n # * THe template is not in the list of template without header.\n\n # We get the template we should use.\n # Note: We basically only need the self.currently_used_header to be filled.\n arg_0.header(True)\n\n # And we get the size from the header.\n arg_2 = arg_0._size_from_header(arg_0.currently_used_header)\n elif arg_0.template in arg_4:\n # The template is in the list of template which does not need a header.\n\n for Func in arg_0.data_to_print:\n # We loop through the list of data to print.\n\n # And we construct the (spacement) size of the data to print.\n arg_2.append(str(len(Func)))\n else:\n # We get the size from the given template name.\n arg_2 = arg_0._size_from_header(arg_0.headers[arg_0.template])\n\n # We construct and format the data to print.\n arg_1 = arg_0._data_constructor(arg_2)\n\n # We print the before header section.\n arg_0._before_header()\n\n for Func in arg_0._header_constructor(arg_1, False):\n # We loop through the formatted data.\n\n if arg_0.template.lower() in PyFunceble.STATUS[\"list\"][\n \"generic\"\n ] or arg_0.template in [\"Less\", \"Percentage\"]:\n # * The template is in the list of generic status.\n # or\n # * The template is in a specific list.\n\n if not arg_0.only_on_file:\n # We are authorized to print on screen.\n\n # We colorify the data to print.\n arg_6 = arg_0._colorify(Func)\n\n # And we print the data.\n print(arg_6)\n if not PyFunceble.CONFIGURATION[\"no_files\"] and arg_0.output:\n # * We are authorized to print on any file.\n # and\n # * The output is given.\n\n # We write our data into the printed file.\n File(arg_0.output).write(Func + \"\\n\")\n else:\n # This should never happend. If it's happens then there's a big issue\n # around data_to_print.\n raise Exception(\"Please review Prints().data()\")"} +{"_id": "doc_5707", "title": "", "text": "def Func(arg_0, arg_1=False): # pragma: no cover\n \"\"\"\n Save the current time to the file.\n\n :param last:\n Tell us if we are at the very end of the file testing.\n :type last: bool\n \"\"\"\n\n if (\n arg_0._authorization()\n and PyFunceble.CONFIGURATION[\"logs\"]\n and \"file_to_test\" in PyFunceble.INTERN\n and PyFunceble.INTERN[\"file_to_test\"]\n ):\n # * We are authorized to work.\n # and\n # * The generation of logs is activated.\n # and\n # * We are not testing as an imported module.\n\n # We set the location of the file we are working with.\n arg_0.file = (\n PyFunceble.OUTPUT_DIRECTORY\n + PyFunceble.OUTPUTS[\"parent_directory\"]\n + PyFunceble.OUTPUTS[\"logs\"][\"directories\"][\"parent\"]\n + PyFunceble.OUTPUTS[\"logs\"][\"filenames\"][\"execution_time\"]\n )\n\n if PyFunceble.path.isfile(arg_0.file):\n # The file we are working with exist.\n\n # We get its content so we can directly work with it.\n arg_3 = Dict().from_json(File(arg_0.file).read())\n else:\n # The file we are working with does not exist.\n\n # We generate a dummy content.\n arg_3 = {}\n\n if arg_0.action == \"start\":\n # The action is equal to `start`.\n\n if \"final_total\" in arg_3 and arg_3[\"final_total\"]:\n # The final total index exist.\n\n # We delete it.\n del arg_3[\"final_total\"]\n\n if \"data\" in arg_3:\n # The data index exist.\n\n # We append the current start time inside it at\n # a new sublist.\n arg_3[\"data\"].append([PyFunceble.INTERN[\"start\"]])\n else:\n # The data index does not exist.\n\n # We create the index along with the current start time.\n arg_3[\"data\"] = [[PyFunceble.INTERN[\"start\"]]]\n elif arg_0.action == \"stop\":\n # The action is equal to `stop`.\n\n try:\n # We try to work with the data index.\n\n # We append the end time at the end of the last element\n # of data.\n #\n # Note: It is at the end because we should have as first\n # the star time.\n arg_3[\"data\"][-1].append(PyFunceble.INTERN[\"end\"])\n\n # We get the start time.\n arg_4 = arg_3[\"data\"][0][0]\n # We get the end time.\n arg_5 = arg_3[\"data\"][-1][-1]\n\n # We calculate the execution time of the test.\n arg_3[\"current_total\"] = arg_0.format_execution_time(arg_4, arg_5)\n\n if arg_1:\n # We are at the very end of the file testing.\n\n # We initiate the global execution time.\n arg_3[\"final_total\"] = arg_3[\"current_total\"]\n\n # We inform the user about the global execution time.\n print(\n PyFunceble.Fore.MAGENTA\n + PyFunceble.Style.BRIGHT\n + \"Global execution time: \"\n + arg_3[\"final_total\"]\n )\n except KeyError:\n # It is not possible to work with the data index because\n # it does not exist.\n\n # We ignore the problem.\n pass\n\n try:\n # We try to save the whole data at its final location.\n Dict(arg_3).to_json(arg_0.file)\n except FileNotFoundError:\n # The directory was not found.\n\n # We construct the output directory\n DirectoryStructure()\n\n # And we retry to save the whole data at its final location.\n Dict(arg_3).to_json(arg_0.file)"} +{"_id": "doc_5708", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Set the databases files to delete.\n \"\"\"\n\n # We initiate the directory we have to look for.\n arg_1 = PyFunceble.CURRENT_DIRECTORY\n\n # We initate the result variable.\n arg_2 = []\n\n # We append the dir_structure file.\n arg_2.append(\n arg_1\n + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"dir_structure\"]\n )\n\n # We append the iana file.\n arg_2.append(\n arg_1 + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"iana\"]\n )\n\n # We append the public suffix file.\n arg_2.append(\n arg_1\n + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"public_suffix\"]\n )\n\n # We append the inactive database file.\n arg_2.append(\n arg_1\n + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"inactive_db\"]\n )\n\n # We append the mining database file.\n arg_2.append(\n arg_1 + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"mining\"]\n )\n\n # We append the whois database file.\n arg_2.append(\n arg_1 + PyFunceble.CONFIGURATION[\"outputs\"][\"default_files\"][\"whois_db\"]\n )\n\n return arg_2"} +{"_id": "doc_5709", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Delete almost all discovered files.\n\n :param clean_all:\n Tell the subsystem if we have to clean everything instesd\n of almost everything.\n :type clean_all: bool\n \"\"\"\n\n # We get the list of file to delete.\n arg_2 = arg_0.file_to_delete()\n\n if arg_1: # pragma: no cover\n arg_2.extend(arg_0.databases_to_delete())\n\n for arg_3 in arg_2:\n # We loop through the list of file to delete.\n\n # And we delete the currently read file.\n File(arg_3).delete()\n\n if arg_1: # pragma: no cover\n Load(PyFunceble.CURRENT_DIRECTORY)"} +{"_id": "doc_5710", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get hash of the given data.\n\n :param algo: The algorithm to use.\n :type algo: str\n \"\"\"\n\n # We het the algorithm function.\n arg_2 = getattr(hashlib, arg_1)()\n\n # We set the data into our hashlib.\n arg_2.update(arg_0.data)\n\n # And we extract and return the hash.\n return arg_2.hexdigest()"} +{"_id": "doc_5711", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the hash of the given file\n \"\"\"\n\n # We initiate a variable which will save the result we are going\n # to return.\n arg_1 = {}\n\n if arg_0.algorithm in arg_0.valid_algorithms:\n # * The parsed path exist.\n # and\n # * The parsed algorithm is in the list of valid algorithms.\n\n if arg_0.algorithm == \"all\":\n # The parsed algorithm is `all`.\n\n # We remove `all` (the first element of the list) from\n # the list of valid algorithms because we are going to\n # loop through the list of valid algorithms.\n del arg_0.valid_algorithms[0]\n\n for arg_2 in arg_0.valid_algorithms:\n # We loop through the list of valid algorithms.\n\n if arg_0.path and path.isfile(arg_0.path):\n # The file path exist.\n\n # We save the hash into the result variable.\n arg_1[arg_2] = arg_0._hash_file(arg_2)\n elif arg_0.data:\n # * The path does not exist.\n # and\n # * The given data is not empty.\n\n # We save the hash into the result variable.\n arg_1[arg_2] = arg_0._hash_data(arg_2)\n else: # pragma: no cover\n # All other case are met.\n\n # We return None.\n return None\n else:\n # The parsed algorithm is a specific one.\n\n if arg_0.path and path.isfile(arg_0.path):\n # The file path exist.\n\n # We save the hash into the result variable.\n arg_1[arg_0.algorithm] = arg_0._hash_file(arg_0.algorithm)\n elif arg_0.data:\n # * The path does not exist.\n # and\n # * The given data is not empty.\n\n # We save the hash into the result variable.\n arg_1[arg_0.algorithm] = arg_0._hash_data(arg_0.algorithm)\n else:\n # All the other case are met.\n\n # We return None.\n return None\n else: # pragma: no cover\n # The parsed algorithm is not in the list of valid algorithms.\n return None\n\n if arg_0.algorithm != \"all\" and arg_0.only_hash:\n # * The parsed algorithm is not equal to `all`.\n # and\n # * We only have to return the selected hash.\n\n # We return the selected algorithm.\n return arg_1[arg_0.algorithm]\n\n # * The parsed algorithm is equal to `all`.\n # or\n # * We do not have to return the selected hash.\n\n # We return all hashes.\n return arg_1"} +{"_id": "doc_5712", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Remove a given key from a given dictionary.\n\n :param key_to_remove: The key(s) to delete.\n :type key_to_remove: list|str\n\n :return: The dict without the given key(s).\n :rtype: dict|None\n \"\"\"\n\n if isinstance(arg_0.main_dictionnary, dict):\n # The main dictionnary is a dictionnary\n\n if isinstance(arg_1, list):\n # The parsed key to remove is a list.\n\n for arg_2 in arg_1:\n # We loop through the list of key to remove.\n\n # We delete the key from the dictionnary.\n del arg_0.main_dictionnary[arg_2]\n else:\n # The parsed key to remove is not a list.\n\n try:\n # We delete the given key from the dictionnary.\n del arg_0.main_dictionnary[arg_1]\n except KeyError:\n pass\n\n # We return the final dictionnary.\n return arg_0.main_dictionnary\n\n # The main dictionnary is not a dictionnary.\n\n # We return None.\n return None"} +{"_id": "doc_5713", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Rename the given keys from the given dictionary.\n\n :param key_to_rename:\n The key(s) to rename.\n Expected format: :code:`{old:new}`\n :type key_to_rename: dict\n\n :param strict:\n Tell us if we have to rename the exact index or\n the index which looks like the given key(s)\n\n :return: The well formatted dict.\n :rtype: dict|None\n \"\"\"\n\n if isinstance(arg_0.main_dictionnary, dict) and isinstance(arg_1, dict):\n # * The given main directory is a dictionnary.\n # and\n # * The given key to rename is a dictionnary.\n\n for arg_3, arg_4 in arg_1.items():\n # We loop through the key to raname.\n\n if arg_2:\n # The strict method is activated.\n if arg_3 in arg_0.main_dictionnary:\n # The old key is in the main dictionnary.\n\n # We initiate the new with the old and remove the old content.\n arg_0.main_dictionnary[arg_4] = arg_0.main_dictionnary.pop(arg_3)\n else:\n # The strict method is not activated.\n\n # We initiate the elements to rename.\n arg_6 = {}\n\n for arg_7 in arg_0.main_dictionnary:\n # We loop throught the indexes of the main dictionnary.\n\n if arg_3 in arg_7:\n # The old key is into the index name.\n\n # We append the index name and the new index to our\n # local list to rename.\n arg_6.update({arg_7: arg_4[:-1] + arg_7.split(arg_3)[-1]})\n\n # We run this method against the local list to rename in order\n # to rename the element.\n arg_0.main_dictionnary = Dict(arg_0.main_dictionnary).Func(\n arg_6, True\n )\n\n # We return the final list.\n return arg_0.main_dictionnary\n\n # * The given main directory is not a dictionnary.\n # or\n # * The given key to rename is not a dictionnary.\n\n # We return None.\n return None"} +{"_id": "doc_5714", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Merge the content of to_Func into the given main dictionnary.\n\n :param to_Func: The dictionnary to Func.\n :type to_Func: dict\n\n :param strict:\n Tell us if we have to strictly Func lists.\n\n :code:`True`: We follow index\n :code`False`: We follow element (content)\n :type strict: bool\n\n :return: The Funcd dict.\n :rtype: dict\n \"\"\"\n\n # We initiate a variable which will save our result.\n arg_3 = {}\n\n for arg_4 in arg_1:\n # We loop throught the given dict to Func.\n\n if arg_4 in arg_0.main_dictionnary:\n # The currently read element is in the main dict.\n\n if isinstance(arg_1[arg_4], dict) and isinstance(\n arg_0.main_dictionnary[arg_4], dict\n ):\n # They are in both side dict.\n\n # We Func the dict tree and save into result.\n arg_3[arg_4] = Dict(arg_0.main_dictionnary[arg_4]).Func(\n arg_1[arg_4]\n )\n\n elif isinstance(arg_1[arg_4], list) and isinstance(\n arg_0.main_dictionnary[arg_4], list\n ):\n # They are in both side list.\n\n # We Func the lists and save into result.\n arg_3[arg_4] = List(arg_0.main_dictionnary[arg_4]).Func(\n arg_1[arg_4], arg_2\n )\n else:\n # They are not list, not dict.\n\n # We append the currently read element to the result.\n arg_3.update({arg_4: arg_1[arg_4]})\n else:\n # The currently read element is not into the main\n # dict.\n\n # We append the currently read element to the result.\n arg_3.update({arg_4: arg_1[arg_4]})\n\n for arg_4 in arg_0.main_dictionnary:\n # We loop through each element of the main dict.\n\n if arg_4 not in arg_3:\n # The currently read element is not into\n # the result.\n\n # We append it to the result.\n arg_3[arg_4] = arg_0.main_dictionnary[arg_4]\n\n # We return the result.\n return arg_3"} +{"_id": "doc_5715", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Save a dictionnary into a JSON file.\n\n :param destination:\n A path to a file where we're going to\n write the converted dict into a JSON format.\n :type destination: str\n \"\"\"\n\n try:\n with open(arg_1, \"w\") as file:\n # We open the file we are going to write.\n # Note: We always overwrite the destination.\n\n # We save the current dictionnary into a json format.\n dump(\n arg_0.main_dictionnary,\n file,\n ensure_ascii=False,\n indent=4,\n sort_keys=True,\n )\n except UnicodeEncodeError: # pragma: no cover\n with open(arg_1, \"w\", encoding=\"utf-8\") as file:\n # We open the file we are going to write.\n # Note: We always overwrite the destination.\n\n # We save the current dictionnary into a json format.\n dump(\n arg_0.main_dictionnary,\n file,\n ensure_ascii=False,\n indent=4,\n sort_keys=True,\n )"} +{"_id": "doc_5716", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Fix the path of the given path.\n\n :param splited_path: A list to convert to the right path.\n :type splited_path: list\n\n :return: The fixed path.\n :rtype: str\n \"\"\"\n\n if not arg_1:\n # A splited path is parsed.\n\n # We initate a variable which will save the splited path.\n arg_2 = []\n\n if arg_0.directory:\n # The parsed directory is not empty or equal to None.\n\n if \"/\" in arg_0.directory:\n # We split the separator.\n arg_2 = arg_0.directory.split(\"/\")\n elif \"\\\\\" in arg_0.directory:\n # We split the separator.\n arg_2 = arg_0.directory.split(\"\\\\\")\n else:\n arg_2 = [arg_0.directory]\n\n # We run the same function with the splited_path argument filled.\n return arg_0.Func(arg_1=[arg_3 for arg_3 in arg_2 if arg_3])\n\n # We return the directory.\n return arg_0.directory\n\n # We join the splited element with the directory separator as glue.\n return directory_separator.join(arg_1) + directory_separator"} +{"_id": "doc_5717", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Read a given file path and return its content.\n\n :return: The content of the given file path.\n :rtype: str\n \"\"\"\n\n try:\n with open(arg_0.file, \"r\", encoding=\"utf-8\") as file:\n # We open and Func a file.\n\n # We get the file content.\n arg_1 = file.Func()\n except UnicodeDecodeError: # pragma: no cover\n with open(arg_0.file, \"r\") as file:\n # We open and Func a file.\n\n # We get the file content.\n arg_1 = file.Func()\n\n # We return the file content.\n return arg_1"} +{"_id": "doc_5718", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a well Functed list. Basicaly, it's sort a list and remove duplicate.\n\n :return: A sorted, without duplicate, list.\n :rtype: list\n \"\"\"\n\n try:\n return sorted(list(set(arg_0.main_list)), key=str.lower)\n\n except TypeError: # pragma: no cover\n return arg_0.main_list"} +{"_id": "doc_5719", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of string which don't match the\n given regex.\n \"\"\"\n\n arg_1 = comp(arg_0.regex)\n\n return [arg_2 for arg_2 in arg_0.data if not arg_1.search(str(arg_2))]"} +{"_id": "doc_5720", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Used to get exploitable result of re.search\n\n :return: The data of the Func status.\n :rtype: mixed\n \"\"\"\n\n # We initate this variable which gonna contain the returned data\n arg_1 = []\n\n # We compile the regex string\n arg_2 = comp(arg_0.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if arg_0.reFunc: # pylint: disable=no-member\n arg_3 = arg_2.findall(arg_0.data)\n else:\n arg_3 = arg_2.search(arg_0.data)\n\n if arg_0.return_data and arg_3: # pylint: disable=no-member\n if arg_0.reFunc: # pylint: disable=no-member\n for arg_4 in arg_3:\n if isinstance(arg_4, tuple):\n arg_1.extend(list(arg_4))\n else:\n arg_1.append(arg_4)\n\n if arg_0.group != 0: # pylint: disable=no-member\n return arg_1[arg_0.group] # pylint: disable=no-member\n\n else:\n arg_1 = arg_3.group(\n arg_0.group # pylint: disable=no-member\n ).strip()\n\n return arg_1\n\n if not arg_0.return_data and arg_3: # pylint: disable=no-member\n return True\n\n return False"} +{"_id": "doc_5721", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Used to Func a matched string with another.\n\n :return: The data after Funcment.\n :rtype: str\n \"\"\"\n\n if arg_0.Func_with: # pylint: disable=no-member\n return substrings(\n arg_0.regex,\n arg_0.Func_with, # pylint: disable=no-member\n arg_0.data,\n arg_0.occurences, # pylint: disable=no-member\n )\n\n return arg_0.data"} +{"_id": "doc_5722", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Print on screen and on file the percentages for each status.\n \"\"\"\n\n if (\n PyFunceble.CONFIGURATION[\"show_percentage\"]\n and PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] > 0\n ):\n # * We are allowed to show the percentage on screen.\n # and\n # * The number of tested is greater than 0.\n\n # We initiate the output file.\n arg_1 = (\n PyFunceble.OUTPUT_DIRECTORY\n + PyFunceble.OUTPUTS[\"parent_directory\"]\n + PyFunceble.OUTPUTS[\"Funcs\"][\"directories\"][\"parent\"]\n + PyFunceble.OUTPUTS[\"Funcs\"][\"directories\"][\"percentage\"]\n + PyFunceble.OUTPUTS[\"Funcs\"][\"filenames\"][\"percentage\"]\n )\n\n # We delete the output file if it does exist.\n File(arg_1).delete()\n\n # We calculate the percentage of each statuses.\n arg_0._calculate()\n\n if not PyFunceble.CONFIGURATION[\"quiet\"]:\n # The quiet mode is activated.\n\n # We print a new line.\n print(\"\\n\")\n\n # We print the percentage header on file and screen.\n Prints(None, \"Percentage\", arg_1).header()\n\n # We construct the different lines/data to print on screen and file.\n arg_2 = [\n [\n PyFunceble.STATUS[\"official\"][\"up\"],\n str(PyFunceble.INTERN[\"counter\"][\"percentage\"][\"up\"]) + \"%\",\n PyFunceble.INTERN[\"counter\"][\"number\"][\"up\"],\n ],\n [\n PyFunceble.STATUS[\"official\"][\"down\"],\n str(PyFunceble.INTERN[\"counter\"][\"percentage\"][\"down\"]) + \"%\",\n PyFunceble.INTERN[\"counter\"][\"number\"][\"down\"],\n ],\n [\n PyFunceble.STATUS[\"official\"][\"invalid\"],\n str(PyFunceble.INTERN[\"counter\"][\"percentage\"][\"invalid\"])\n + \"%\",\n PyFunceble.INTERN[\"counter\"][\"number\"][\"invalid\"],\n ],\n ]\n\n if PyFunceble.CONFIGURATION[\"syntax\"]:\n # We are checking for syntax.\n\n # We update the denomination of the UP.\n arg_2[0][0] = PyFunceble.STATUS[\"official\"][\"valid\"]\n\n # And we unset the INACTIVE line.\n del arg_2[1]\n\n for arg_3 in arg_2:\n # We loop throught the different line to print.\n # (one line for each status.)\n\n # And we print the current status line on file and screen.\n Prints(arg_3, \"Percentage\", arg_1).data()\n\n elif PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] > 0:\n # * We are not allowed to show the percentage on screen.\n # but\n # * The number of tested is greater than 0.\n\n # We run the calculation.\n # Note: The following is needed, because all counter calculation are\n # done by this class.\n arg_0._calculate()"} +{"_id": "doc_5723", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False, arg_3=False):\n \"\"\"\n Check if the given URL is valid.\n\n :param url: The url to validate.\n :type url: str\n\n :param return_base:\n Allow us the return of the url base (if URL formatted correctly).\n :type return_formatted: bool\n\n :param return_formatted:\n Allow us to get the URL converted to IDNA if the conversion\n is activated.\n :type return_formatted: bool\n\n\n :return: The validity of the URL or its base.\n :rtype: bool|str\n \"\"\"\n\n # We initiate a variable which will save the initial base in case\n # we have to convert the base to IDNA.\n arg_4 = None\n\n if arg_1:\n # The given url is not empty.\n\n # We initiate the element to test.\n arg_5 = arg_1\n elif arg_0.element:\n # The globaly given url is not empty.\n\n # We initiate the element to test.\n arg_5 = arg_0.element\n else:\n # The given url is empty.\n\n # We initiate the element to test from the globaly URl to test.\n arg_5 = PyFunceble.INTERN[\"to_test\"]\n\n if arg_5.startswith(\"http\"):\n # The element to test starts with http.\n\n try:\n # We initiate a regex which will match the domain or the url base.\n arg_6 = r\"(^(http:\\/\\/|https:\\/\\/)(.+?(?=\\/)|.+?$))\"\n\n # We extract the url base with the help of the initiated regex.\n arg_4 = arg_7 = Regex(\n arg_5, arg_6, return_data=True, rematch=True\n ).match()[2]\n\n if PyFunceble.CONFIGURATION[\"idna_conversion\"]:\n # We have to convert the domain to IDNA.\n\n # We convert the initial base to IDNA.\n arg_7 = domain2idna(arg_7)\n\n # We check if the url base is a valid domain.\n arg_8 = arg_0.is_domain_valid(arg_7)\n\n # We check if the url base is a valid IP.\n arg_9 = arg_0.is_ip_valid(arg_7)\n\n if arg_8 or arg_9:\n # * The url base is a valid domain.\n # and\n # * The url base is a valid IP.\n\n if PyFunceble.CONFIGURATION[\"idna_conversion\"] and arg_3:\n # * We have to convert to IDNA.\n # and\n # * We have to return the converted full URL.\n\n # We return the converted full URL.\n return Regex(\n arg_5,\n arg_4,\n escape=True,\n return_data=True,\n replace_with=arg_7,\n occurences=1,\n ).replace()\n\n if arg_3:\n # * We do not have to convert to IDNA.\n # but\n # * We have to return the full URL.\n\n # We return the initially given URL.\n return arg_5\n\n if arg_2:\n # We have to return the base of the URL.\n\n # We return the base of the URL.\n return arg_7\n\n # We return True.\n return True\n except TypeError:\n pass\n\n if arg_3:\n # We have to return an URL.\n\n # We return the initily given URL.\n return arg_5\n\n # We return False.\n return False"} +{"_id": "doc_5724", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Check if the given subdomain is a subdomain.\n\n :param domain: The domain to validate.\n :type domain: str\n\n :return: The validity of the subdomain.\n :rtype: bool\n \"\"\"\n\n if arg_1:\n # A domain is given.\n\n # We set the element to test as the parsed domain.\n arg_2 = arg_1\n elif arg_0.element:\n # A domain is globally given.\n\n # We set the globally parsed domain.\n arg_2 = arg_0.element\n else:\n # A domain is not given.\n\n # We set the element to test as the currently tested element.\n arg_2 = PyFunceble.INTERN[\"to_test\"]\n\n # We return the status of the check.\n return arg_0.is_domain_valid(arg_2, subdomain_check=True)"} +{"_id": "doc_5725", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Execute the logic behind the Syntax handling.\n\n :return: The syntax status.\n :rtype: str\n \"\"\"\n\n if PyFunceble.INTERN[\"to_test_type\"] == \"domain\":\n # We are testing for domain or ip.\n\n if Check().is_domain_valid() or Check().is_ip_valid():\n # * The domain is valid.\n # or\n # * The IP is valid.\n\n # We handle and return the valid status.\n return SyntaxStatus(PyFunceble.STATUS[\"official\"][\"valid\"]).handle()\n elif PyFunceble.INTERN[\"to_test_type\"] == \"url\":\n # We are testing for URL.\n\n if Check().is_url_valid():\n # * The url is valid.\n\n # We handle and return the valid status.\n return SyntaxStatus(PyFunceble.STATUS[\"official\"][\"valid\"]).handle()\n else:\n raise Exception(\"Unknow test type.\")\n\n # We handle and return the invalid status.\n return SyntaxStatus(PyFunceble.STATUS[\"official\"][\"invalid\"]).handle()"} +{"_id": "doc_5726", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the current content of the inactive-db.json file.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"inactive_database\"]:\n # The database subsystem is activated.\n\n # We get, format and initiate the historical database file.\n arg_0._reformat_historical_formating_error()\n\n if PyFunceble.path.isfile(arg_0.inactive_db_path):\n # The database file exist.\n\n # We merge our current database into already initiated one.\n arg_0._merge()"} +{"_id": "doc_5727", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Save the current database into the inactive-db.json file.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"inactive_database\"]:\n # The database subsystem is activated.\n\n # We save the current database state into the database file.\n Dict(PyFunceble.INTERN[\"inactive_db\"]).to_json(arg_0.inactive_db_path)"} +{"_id": "doc_5728", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the timestamp where we are going to save our current list.\n\n :return: The timestamp to append with the currently tested element.\n :rtype: int|str\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"inactive_database\"]:\n # The database subsystem is activated.\n\n if (\n \"inactive_db\" in PyFunceble.INTERN\n and PyFunceble.INTERN[\"file_to_test\"]\n in PyFunceble.INTERN[\"inactive_db\"]\n and PyFunceble.INTERN[\"inactive_db\"][PyFunceble.INTERN[\"file_to_test\"]]\n ):\n # The file we are testing is into the database and its content\n # is not empty.\n\n # We get the indexes of the current file (in the dabase).\n arg_1 = [\n x\n for x in PyFunceble.INTERN[\"inactive_db\"][\n PyFunceble.INTERN[\"file_to_test\"]\n ].keys()\n if x.isdigit()\n ]\n\n if arg_1:\n # The list of keys is not empty.\n\n # We get the most recent date.\n arg_2 = max(arg_1)\n else: # pragma: no cover\n # The list of keys is empty.\n\n # We return the current time.\n return int(PyFunceble.time())\n\n if int(PyFunceble.time()) > int(arg_2) + arg_0.one_day_in_seconds:\n # The most recent time was in more than one day.\n\n # We return the current time.\n return int(PyFunceble.time())\n\n # The most recent time was in less than one day.\n\n if int(PyFunceble.time()) < int(arg_2) + arg_0.days_in_seconds:\n # The most recent time was in less than the expected number of day for\n # retesting.\n\n # We return the most recent data.\n return int(arg_2)\n\n # The database subsystem is not activated.\n\n # We return the current time.\n return int(PyFunceble.time())"} +{"_id": "doc_5729", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the Func of the database.\n\n :return: The Func of the database.\n :rtype: list\n \"\"\"\n\n # We initiate a variable which will save what we are going to return.\n arg_1 = []\n\n if (\n PyFunceble.CONFIGURATION[\"inactive_database\"]\n and PyFunceble.INTERN[\"inactive_db\"]\n ):\n # * The database subsystem is activated.\n # and\n # * The database is not empty.\n\n for arg_2 in PyFunceble.INTERN[\"inactive_db\"][\n PyFunceble.INTERN[\"file_to_test\"]\n ]:\n # We loop through the index of the current file database.\n\n if arg_2 == \"to_test\":\n # The current key is `to_test`.\n\n # We continue to the next element.\n continue\n\n # We extend the result with the Func of the currently read index.\n arg_1.extend(\n PyFunceble.INTERN[\"inactive_db\"][PyFunceble.INTERN[\"file_to_test\"]][\n arg_2\n ]\n )\n\n # We return the Func of the database.\n return arg_1"} +{"_id": "doc_5730", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the currently tested element is into the database.\n \"\"\"\n\n if PyFunceble.CONFIGURATION[\"inactive_database\"]:\n # The database subsystem is activated.\n\n if PyFunceble.INTERN[\"to_test\"] in PyFunceble.INTERN[\n \"flatten_inactive_db\"\n ] or (\n PyFunceble.INTERN[\"file_to_test\"] in PyFunceble.INTERN[\"inactive_db\"]\n and PyFunceble.INTERN[\"inactive_db\"][PyFunceble.INTERN[\"file_to_test\"]]\n and \"to_test\"\n in PyFunceble.INTERN[\"inactive_db\"][PyFunceble.INTERN[\"file_to_test\"]]\n and PyFunceble.INTERN[\"to_test\"]\n in PyFunceble.INTERN[\"inactive_db\"][PyFunceble.INTERN[\"file_to_test\"]][\n \"to_test\"\n ]\n ):\n return True\n\n return False"} +{"_id": "doc_5731", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Backup the database into its file.\n \"\"\"\n\n if arg_0._authorization():\n # We are authorized to work.\n\n # We backup the current state of the datbase.\n Dict(PyFunceble.INTERN[\"whois_db\"]).to_json(arg_0.whois_db_path)"} +{"_id": "doc_5732", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the current time is older than the one in the database.\n \"\"\"\n\n if (\n arg_0._authorization()\n and arg_0.is_in_database()\n and int(\n PyFunceble.INTERN[\"whois_db\"][PyFunceble.INTERN[\"file_to_test\"]][\n PyFunceble.INTERN[\"to_test\"]\n ][\"epoch\"]\n )\n < int(PyFunceble.time())\n ):\n # * We are authorized to work.\n # and\n # * The element we are testing is in the database.\n # and\n # * The epoch of the expiration date is less than our current epoch.\n\n # The expiration date is in the past, we return True.\n return True\n\n # The expiration date is in the future, we return False.\n return False"} +{"_id": "doc_5733", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None): # pragma: no cover\n \"\"\"\n Implementation of UNIX Func.\n\n :param Func_server: The WHOIS server to use to get the record.\n :type Func_server: str\n\n :param domain: The domain to get the Func record from.\n :type domain: str\n\n :param timeout: The timeout to apply to the request.\n :type timeout: int\n\n :return: The Func record from the given Func server, if exist.\n :rtype: str|None\n \"\"\"\n\n if arg_2 is None:\n # The domain is not given (localy).\n\n # We consider the domain as the domain or IP we are currently testing.\n arg_2 = PyFunceble.INTERN[\"to_test\"]\n\n if arg_3 is None:\n # The time is not given (localy).\n\n # We consider the timeout from the configuration as the timeout to use.\n arg_3 = PyFunceble.CONFIGURATION[\"seconds_before_http_timeout\"]\n\n if arg_1:\n # A Func server is given.\n\n # We initiate a PyFunceble.socket.\n arg_4 = PyFunceble.socket.socket(\n PyFunceble.socket.AF_INET, PyFunceble.socket.SOCK_STREAM\n )\n\n if arg_3 % 3 == 0:\n # The timeout is modulo 3.\n\n # We report the timeout to our initiated PyFunceble.socket.\n arg_4.settimeout(arg_3)\n else:\n # The timeout is not modulo 3.\n\n # We report 3 seconds as the timeout to our initiated PyFunceble.socket.\n arg_4.settimeout(3)\n\n try:\n # We try to connect to the Func server at the port 43.\n arg_4.connect((arg_1, 43))\n except PyFunceble.socket.error:\n # We got an error.\n\n # We return None.\n return None\n\n # We send end encode the domain we want the data from.\n arg_4.send((arg_2 + \"\\r\\n\").encode())\n\n # We initiate a bytes variable which will save the response\n # from the server.\n arg_5 = b\"\"\n\n while True:\n # We loop infinitly.\n try:\n # We try to receive the data in a buffer of 4096 bytes.\n arg_6 = arg_4.recv(4096)\n except (PyFunceble.socket.timeout, ConnectionResetError):\n # We got an error.\n\n # We close the connection.\n arg_4.close()\n\n # And we return None.\n return None\n\n # Everything goes right.\n\n # We append data to the response we got.\n arg_5 += arg_6\n\n if not arg_6:\n # The data is empty.\n\n # We break the loop.\n break\n\n # We close the connection.\n arg_4.close()\n\n try:\n\n # We finally decode and return the response we got from the\n # server.\n return arg_5.decode()\n\n except UnicodeDecodeError:\n # We got an encoding error.\n\n # We decode the response.\n # Note: Because we don't want to deal with other issue, we\n # decided to use `replace` in order to automatically replace\n # all non utf-8 encoded characters.\n return arg_5.decode(\"utf-8\", \"replace\")\n\n # The Func server is not given.\n\n # We return None.\n return None"} +{"_id": "doc_5734", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Execute the logic behind the URL handling.\n\n :return: The status of the URL.\n :rtype: str\n \"\"\"\n\n if Check().is_url_valid() or arg_1.CONFIGURATION[\"local\"]:\n # * The url is valid.\n # or\n # * We are testing in/for a local or private network.\n\n if \"current_test_data\" in arg_1.INTERN:\n arg_1.INTERN[\"current_test_data\"][\"url_syntax_validation\"] = True\n\n # We initiate the HTTP status code.\n arg_1.INTERN.update({\"http_code\": HTTPCode().Func()})\n\n # We initiate the list of active status code.\n arg_3 = []\n arg_3.extend(arg_1.HTTP_CODE[\"list\"][\"potentially_up\"])\n arg_3.extend(arg_1.HTTP_CODE[\"list\"][\"up\"])\n\n # We initiate the list of inactive status code.\n arg_4 = []\n arg_4.extend(arg_1.HTTP_CODE[\"list\"][\"potentially_down\"])\n arg_4.append(\"*\" * 3)\n\n if arg_1.INTERN[\"http_code\"] in arg_3:\n # The extracted HTTP status code is in the list of active list.\n\n # We handle and return the up status.\n return URLStatus(arg_1.STATUS[\"official\"][\"up\"]).handle()\n\n if arg_1.INTERN[\"http_code\"] in arg_4:\n # The extracted HTTP status code is in the list of inactive list.\n\n # We handle and return the down status.\n return URLStatus(arg_1.STATUS[\"official\"][\"down\"]).handle()\n\n # The extracted HTTP status code is not in the list of active nor invalid list.\n\n if \"current_test_data\" in arg_1.INTERN:\n # The end-user want more information whith his test.\n\n # We update the url_syntax_validation index.\n arg_1.INTERN[\"current_test_data\"][\"url_syntax_validation\"] = False\n\n # We handle and return the invalid down status.\n return URLStatus(arg_1.STATUS[\"official\"][\"invalid\"]).handle()"} +{"_id": "doc_5735", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the referer aka the WHOIS server of the current domain extension.\n \"\"\"\n\n if not PyFunceble.CONFIGURATION[\"local\"]:\n # We are not running a test in a local network.\n\n if arg_0.domain_extension not in arg_0.ignored_extension:\n # The extension of the domain we are testing is not into\n # the list of ignored extensions.\n\n # We set the referer to None as we do not have any.\n arg_1 = None\n\n if arg_0.domain_extension in PyFunceble.INTERN[\"iana_db\"]:\n # The domain extension is in the iana database.\n\n if not PyFunceble.CONFIGURATION[\"no_whois\"]:\n # We are authorized to use WHOIS for the test result.\n\n # We Func the referer from the database.\n arg_1 = PyFunceble.INTERN[\"iana_db\"][arg_0.domain_extension]\n\n if not arg_1:\n # The referer is not filled.\n\n # We log the case of the current extension.\n Logs().referer_not_found(arg_0.domain_extension)\n\n # And we handle and return None status.\n return None\n\n # The referer is into the database.\n\n # We return the extracted referer.\n return arg_1\n\n # We are not authorized to use WHOIS for the test result.\n\n # We return None.\n return None\n\n # The domain extension is not in the iana database.\n\n # We return False, it is an invalid domain.\n return False\n\n # The extension of the domain we are testing is not into\n # the list of ignored extensions.\n\n # We return None, the domain does not have a whois server.\n return None\n\n # We are running a test in a local network.\n\n # We return None.\n return None"} +{"_id": "doc_5736", "title": "", "text": "def Func(arg_0, arg_1=20, arg_2=None):\n if arg_2 is None:\n arg_2 = {}\n \"\"\"docstring for Func\"\"\"\n arg_3 = ''\n for arg_4 in arg_0:\n if arg_4[0] == sre_parse.IN:\n arg_3 += choice(_in(arg_4[1]))\n elif arg_4[0] == sre_parse.LITERAL:\n arg_3 += unichr(arg_4[1])\n elif arg_4[0] == sre_parse.CATEGORY:\n arg_3 += choice(CATEGORIES.get(arg_4[1], ['']))\n elif arg_4[0] == sre_parse.ANY:\n arg_3 += choice(CATEGORIES['category_any'])\n elif arg_4[0] == sre_parse.MAX_REPEAT or arg_4[0] == sre_parse.MIN_REPEAT:\n if arg_4[1][1] + 1 - arg_4[1][0] >= arg_1:\n arg_5, arg_6 = arg_4[1][0], arg_4[1][0] + arg_1 - 1\n else:\n arg_5, arg_6 = arg_4[1][0], arg_4[1][1]\n for arg_7 in range(randint(arg_5, arg_6)):\n arg_3 += Func(list(arg_4[1][2]), arg_1, arg_2)\n elif arg_4[0] == sre_parse.BRANCH:\n arg_3 += Func(choice(arg_4[1][1]), arg_1, arg_2)\n elif arg_4[0] == sre_parse.SUBPATTERN or arg_4[0] == sre_parse.ASSERT:\n arg_8 = arg_4[1][1]\n if IS_PY36_OR_GREATER and arg_4[0] == sre_parse.SUBPATTERN:\n arg_8 = arg_4[1][3]\n arg_9 = Func(arg_8, arg_1, arg_2)\n if arg_4[1][0]:\n arg_2[arg_4[1][0]] = arg_9\n arg_3 += arg_9\n elif arg_4[0] == sre_parse.AT:\n continue\n elif arg_4[0] == sre_parse.NOT_LITERAL:\n arg_10 = list(CATEGORIES['category_any'])\n if unichr(arg_4[1]) in arg_10:\n arg_10.remove(unichr(arg_4[1]))\n arg_3 += choice(arg_10)\n elif arg_4[0] == sre_parse.GROUPREF:\n arg_3 += arg_2[arg_4[1]]\n elif arg_4[0] == sre_parse.ASSERT_NOT:\n pass\n else:\n print('[!] cannot handle expression \"%s\"' % str(arg_4))\n\n return arg_3"} +{"_id": "doc_5737", "title": "", "text": "def Func(arg_0):\n \"\"\" Wrapper for Zotero._cleanup\n \"\"\"\n\n def enc(arg_1, *arg_2, **arg_3):\n \"\"\" Send each item to _cleanup() \"\"\"\n return (arg_0(arg_1, arg_4, **arg_3) for arg_4 in arg_2)\n\n return enc"} +{"_id": "doc_5738", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Add a retrieved template to the cache for 304 checking\n accepts a dict and key name, adds the retrieval time, and adds both\n to self.templates as a new dict using the specified key\n \"\"\"\n # cache template and retrieval time for subsequent calls\n arg_3 = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"GMT\"))\n arg_0.templates[arg_2] = {\"tmplt\": arg_1.json(), \"updated\": arg_3}\n return copy.deepcopy(arg_1.json())"} +{"_id": "doc_5739", "title": "", "text": "def Func(arg_0, arg_1, arg_2=()):\n \"\"\" Remove keys we added for internal use\n \"\"\"\n # this item's been retrieved from the API, we only need the 'data'\n # entry\n if arg_1.keys() == [\"links\", \"library\", \"version\", \"meta\", \"key\", \"data\"]:\n arg_1 = arg_1[\"data\"]\n return dict(\n [\n [arg_3, arg_4]\n for arg_3, arg_4 in list(arg_1.items())\n if (arg_3 in arg_2 or arg_3 not in arg_0.temp_keys)\n ]\n )"} +{"_id": "doc_5740", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the contents of My Publications\n \"\"\"\n if arg_0.library_type != \"users\":\n raise ze.CallDoesNotExist(\n \"This API call does not exist for group libraries\"\n )\n arg_1 = \"/{t}/{u}/Func/items\"\n return arg_0._build_query(arg_1)"} +{"_id": "doc_5741", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return the total number of items in the specified collection\n \"\"\"\n arg_2 = \"/{t}/{u}/collections/{c}/items\".format(\n u=arg_0.library_id, t=arg_0.library_type, c=arg_1.upper()\n )\n return arg_0._totals(arg_2)"} +{"_id": "doc_5742", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return the total number of items for the specified tag\n \"\"\"\n arg_2 = \"/{t}/{u}/tags/{ta}/items\".format(\n u=arg_0.library_id, t=arg_0.library_type, ta=arg_1\n )\n return arg_0._totals(arg_2)"} +{"_id": "doc_5743", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" General method for returning total counts\n \"\"\"\n arg_0.add_parameters(limit=1)\n arg_1 = arg_0._build_query(arg_1)\n arg_0._retrieve_data(arg_1)\n arg_0.url_params = None\n # extract the 'total items' figure\n return int(arg_0.request.headers[\"Total-Results\"])"} +{"_id": "doc_5744", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Retrieve info about the permissions associated with the\n key associated to the given Zotero instance\n \"\"\"\n arg_2 = \"/keys/{k}\".format(k=arg_0.api_key)\n return arg_0._build_query(arg_2)"} +{"_id": "doc_5745", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\" Get the last modified version\n \"\"\"\n arg_0.items(**arg_1)\n return int(arg_0.request.headers.get(\"last-modified-version\", 0))"} +{"_id": "doc_5746", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Retrieve all collections and subcollections. Works for top-level collections\n or for a specific collection. Works at all collection depths.\n \"\"\"\n Func = []\n\n def subcoll(arg_3):\n \"\"\" recursively add collections to a flat master list \"\"\"\n Func.append(arg_3)\n if arg_3[\"meta\"].get(\"numCollections\", 0) > 0:\n # add collection to master list & recur with all child\n # collections\n [\n subcoll(arg_4)\n for arg_4 in arg_0.everything(arg_0.collections_sub(arg_3[\"data\"][\"key\"]))\n ]\n\n # select all top-level collections or a specific collection and\n # children\n if arg_1:\n arg_5 = [arg_0.collection(arg_1)]\n else:\n arg_5 = arg_0.everything(arg_0.collections_top())\n [subcoll(arg_6) for arg_6 in arg_5]\n return Func"} +{"_id": "doc_5747", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\" Get subcollections for a specific collection\n \"\"\"\n arg_3 = \"/{t}/{u}/collections/{c}/collections\".format(\n u=arg_0.library_id, t=arg_0.library_type, c=arg_1.upper()\n )\n return arg_0._build_query(arg_3)"} +{"_id": "doc_5748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve all items in the library for a particular query\n This method will override the 'limit' parameter if it's been set\n \"\"\"\n try:\n arg_2 = []\n arg_2.extend(arg_1)\n while arg_0.links.get(\"next\"):\n arg_2.extend(arg_0.follow())\n except TypeError:\n # we have a bibliography object ughh\n arg_2 = copy.deepcopy(arg_1)\n while arg_0.links.get(\"next\"):\n arg_2.entries.extend(arg_0.follow().entries)\n return arg_2"} +{"_id": "doc_5749", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return a list of dicts which are dumped CSL JSON\n \"\"\"\n arg_2 = []\n arg_3 = {}\n if arg_0.preserve_json_order:\n arg_3[\"object_pairs_hook\"] = OrderedDict\n for arg_4 in arg_1.entries:\n arg_2.append(json.loads(arg_4[\"content\"][0][\"value\"], **arg_3))\n arg_0.url_params = None\n return arg_2"} +{"_id": "doc_5750", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return a list of strings formatted as HTML citation entries\n \"\"\"\n arg_2 = []\n for arg_3 in arg_1.entries:\n arg_2.append(arg_3[\"content\"][0][\"value\"])\n arg_0.url_params = None\n return arg_2"} +{"_id": "doc_5751", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get a template for a new item\n \"\"\"\n # if we have a template and it hasn't been updated since we stored it\n arg_2 = \"Func_\" + arg_1\n arg_3 = \"/items/new?itemType={i}\".format(i=arg_1)\n if arg_0.templates.get(arg_2) and not arg_0._updated(\n arg_3, arg_0.templates[arg_2], arg_2\n ):\n return copy.deepcopy(arg_0.templates[arg_2][\"tmplt\"])\n # otherwise perform a normal request and cache the response\n arg_4 = arg_0._retrieve_data(arg_3)\n return arg_0._cache(arg_4, arg_2)"} +{"_id": "doc_5752", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Create attachments\n accepts a list of one or more attachment template dicts\n and an optional parent Item ID. If this is specified,\n attachments are created under this ID\n \"\"\"\n arg_3 = Zupload(arg_0, arg_1, arg_2)\n arg_4 = arg_3.upload()\n return arg_4"} +{"_id": "doc_5753", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Delete one or more saved searches by passing a list of one or more\n unique search keys\n \"\"\"\n arg_2 = {\"Zotero-Write-Token\": token()}\n arg_2.update(arg_0.default_headers())\n arg_3 = requests.delete(\n url=arg_0.endpoint\n + \"/{t}/{u}/searches\".format(t=arg_0.library_type, u=arg_0.library_id),\n arg_2=arg_2,\n params={\"searchKey\": \",\".join(arg_1)},\n )\n arg_0.request = arg_3\n try:\n arg_3.raise_for_status()\n except requests.exceptions.HTTPError:\n error_handler(arg_3)\n return arg_3.status_code"} +{"_id": "doc_5754", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"\n Add one or more tags to a retrieved item,\n then update it on the server\n Accepts a dict, and one or more tags to add to it\n Returns the updated item from the server\n \"\"\"\n # Make sure there's a tags field, or add one\n try:\n assert arg_1[\"data\"][\"tags\"]\n except AssertionError:\n arg_1[\"data\"][\"tags\"] = list()\n for arg_3 in arg_2:\n arg_1[\"data\"][\"tags\"].append({\"tag\": \"%s\" % arg_3})\n # make sure everything's OK\n assert arg_0.check_items([arg_1])\n return arg_0.update_item(arg_1)"} +{"_id": "doc_5755", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Update an existing item\n Accepts one argument, a dict containing Item data\n \"\"\"\n arg_3 = arg_0.check_items([arg_1])[0]\n if arg_2 is None:\n arg_4 = arg_1[\"version\"]\n else:\n arg_4 = arg_2\n arg_5 = arg_1[\"key\"]\n arg_6 = {\"If-Unmodified-Since-Version\": str(arg_4)}\n arg_6.update(arg_0.default_headers())\n arg_7 = requests.patch(\n url=arg_0.endpoint\n + \"/{t}/{u}/items/{id}\".format(\n t=arg_0.library_type, u=arg_0.library_id, id=arg_5\n ),\n arg_6=arg_6,\n data=json.dumps(arg_3),\n )\n arg_0.request = arg_7\n try:\n arg_7.raise_for_status()\n except requests.exceptions.HTTPError:\n error_handler(arg_7)\n return True"} +{"_id": "doc_5756", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Update existing items\n Accepts one argument, a list of dicts containing Item data\n \"\"\"\n arg_2 = [arg_0.check_items([p])[0] for p in arg_1]\n arg_3 = {}\n arg_3.update(arg_0.default_headers())\n # the API only accepts 50 items at a time, so we have to split\n # anything longer\n for arg_4 in chunks(arg_2, 50):\n arg_5 = requests.post(\n url=arg_0.endpoint\n + \"/{t}/{u}/items/\".format(t=arg_0.library_type, u=arg_0.library_id),\n arg_3=arg_3,\n data=json.dumps(arg_4),\n )\n arg_0.request = arg_5\n try:\n arg_5.raise_for_status()\n except requests.exceptions.HTTPError:\n error_handler(arg_5)\n return True"} +{"_id": "doc_5757", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Validate saved search conditions, raising an error if any contain invalid operators \"\"\"\n arg_2 = set(arg_0.searchkeys)\n arg_3 = set(arg_0.operators.keys())\n for arg_4 in arg_1:\n if set(arg_4.keys()) != arg_2:\n raise ze.ParamNotPassed(\n \"Keys must be all of: %s\" % \", \".join(arg_0.searchkeys)\n )\n if arg_4.get(\"operator\") not in arg_3:\n raise ze.ParamNotPassed(\n \"You have specified an unknown operator: %s\"\n % arg_4.get(\"operator\")\n )\n # dict keys of allowed operators for the current condition\n arg_5 = arg_0.conditions_operators.get(\n arg_4.get(\"condition\")\n )\n # transform these into values\n arg_6 = set(\n [arg_0.operators.get(op) for op in arg_5]\n )\n if arg_4.get(\"operator\") not in arg_6:\n raise ze.ParamNotPassed(\n \"You may not use the '%s' operator when selecting the '%s' condition. \\nAllowed operators: %s\"\n % (\n arg_4.get(\"operator\"),\n arg_4.get(\"condition\"),\n \", \".join(list(arg_6)),\n )\n )"} +{"_id": "doc_5758", "title": "", "text": "def Func(arg_0):\n \"\"\"Split a multiline string into a list, excluding blank lines.\"\"\"\n return [arg_1 for arg_1 in (arg_2.strip() for arg_2 in arg_0.split('\\n'))\n if arg_1]"} +{"_id": "doc_5759", "title": "", "text": "def Func(arg_0):\n \"\"\"Split a string with comma or space-separated elements into a list.\"\"\"\n arg_1 = [v.strip() for v in arg_0.split(',')]\n if len(arg_1) == 1:\n arg_1 = arg_0.split()\n return arg_1"} +{"_id": "doc_5760", "title": "", "text": "def Func(arg_0):\n \"\"\"Evaluate environment markers.\"\"\"\n def Func_str(arg_0):\n arg_1 = arg_0.split(';')\n if len(arg_1) < 2:\n return arg_0\n arg_2 = arg_1[1].lstrip()\n if not re.match(\"^((\\\\w+(\\\\.\\\\w+)?|'.*?'|\\\".*?\\\")\\\\s+\"\n '(in|==|!=|not in)\\\\s+'\n \"(\\\\w+(\\\\.\\\\w+)?|'.*?'|\\\".*?\\\")\"\n '(\\\\s+(or|and)\\\\s+)?)+$', arg_2):\n raise ValueError('bad environment marker: %r' % arg_2)\n arg_2 = re.sub(r\"(platform\\.\\w+)\", r\"\\1()\", arg_2)\n return arg_1[0] if eval(arg_2) else ''\n\n if isinstance(arg_0, list):\n arg_3 = []\n for arg_4 in arg_0:\n arg_4 = Func_str(arg_4)\n if arg_4:\n arg_3.append(arg_4)\n elif isinstance(arg_0, str):\n arg_3 = Func_str(arg_0)\n else:\n arg_3 = arg_0\n\n return arg_3"} +{"_id": "doc_5761", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get configuration value.\"\"\"\n try:\n arg_3 = arg_0[arg_1][arg_2]\n except KeyError:\n if (arg_1, arg_2) in MULTI_OPTIONS:\n return []\n else:\n return ''\n if (arg_1, arg_2) in MULTI_OPTIONS:\n arg_3 = split_multiline(arg_3)\n if (arg_1, arg_2) in ENVIRON_OPTIONS:\n arg_3 = eval_environ(arg_3)\n return arg_3"} +{"_id": "doc_5762", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Set configuration value.\"\"\"\n if isinstance(arg_3, list):\n arg_3 = '\\n'.join(arg_3)\n arg_0[arg_1][arg_2] = arg_3"} +{"_id": "doc_5763", "title": "", "text": "def Func(arg_0):\n \"\"\"Compatibility helper to use setup.cfg in setup.py.\"\"\"\n arg_1 = {}\n arg_2 = {\n 'metadata': [\n ('name', 'name'),\n ('author', 'author'),\n ('author-email', 'author_email'),\n ('maintainer', 'maintainer'),\n ('maintainer-email', 'maintainer_email'),\n ('home-page', 'url'),\n ('summary', 'description'),\n ('description', 'long_description'),\n ('download-url', 'download_url'),\n ('classifier', 'classifiers'),\n ('platform', 'platforms'),\n ('license', 'license'),\n ('keywords', 'keywords'),\n ],\n 'files': [\n ('packages_root', 'package_dir'),\n ('packages', 'packages'),\n ('modules', 'py_modules'),\n ('scripts', 'scripts'),\n ('package_data', 'package_data'),\n ('data_files', 'data_files'),\n ],\n }\n\n arg_2['metadata'].append(('requires-dist', 'install_requires'))\n if IS_PY2K and not which('3to2'):\n arg_1['setup_requires'] = ['3to2']\n arg_1['zip_safe'] = False\n\n for arg_3 in arg_2:\n for arg_4, arg_5 in arg_2[arg_3]:\n arg_6 = get_cfg_value(arg_0, arg_3, arg_4)\n if arg_6:\n arg_1[arg_5] = arg_6\n\n if 'long_description' not in arg_1:\n arg_1['long_description'] = read_description_file(arg_0)\n\n if 'package_dir' in arg_1:\n arg_1['package_dir'] = {'': arg_1['package_dir']}\n\n if 'keywords' in arg_1:\n arg_1['keywords'] = split_elements(arg_1['keywords'])\n\n if 'package_data' in arg_1:\n arg_1['package_data'] = get_package_data(arg_1['package_data'])\n\n if 'data_files' in arg_1:\n arg_1['data_files'] = get_data_files(arg_1['data_files'])\n\n arg_1['version'] = get_version()\n\n if not IS_PY2K:\n arg_1['test_suite'] = 'test'\n\n return arg_1"} +{"_id": "doc_5764", "title": "", "text": "def Func():\n \"\"\"Get LanguageTool version.\"\"\"\n arg_0 = _get_attrib().get('version')\n if not arg_0:\n arg_1 = re.search(r\"LanguageTool-?.*?(\\S+)$\", get_directory())\n if arg_1:\n arg_0 = arg_1.group(1)\n return arg_0"} +{"_id": "doc_5765", "title": "", "text": "def Func() -> set:\n \"\"\"Get supported languages.\"\"\"\n try:\n arg_0 = arg_1['languages']\n except KeyError:\n arg_0 = LanguageTool._Func()\n arg_1['languages'] = arg_0\n return arg_0"} +{"_id": "doc_5766", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Set LanguageTool directory.\"\"\"\n arg_1 = get_directory()\n terminate_server()\n arg_2.clear()\n if arg_0:\n arg_2['language_check_dir'] = arg_0\n try:\n get_jar_info()\n except Error:\n arg_2['language_check_dir'] = arg_1\n raise"} +{"_id": "doc_5767", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3=None) -> [Match]:\n \"\"\"Match text against enabled rules.\"\"\"\n arg_4 = arg_0._get_root(arg_0._url, arg_0._encode(arg_1, arg_3))\n return [Match(arg_5.attrib) for arg_5 in arg_4 if arg_5.tag == 'error']"} +{"_id": "doc_5768", "title": "", "text": "def Func():\n \"\"\"Return newest compatible version.\n\n >>> version = Func()\n >>> version in [JAVA_6_COMPATIBLE_VERSION,\n ... JAVA_7_COMPATIBLE_VERSION,\n ... LATEST_VERSION]\n True\n\n \"\"\"\n arg_0 = find_executable('java')\n if not arg_0:\n # Just ignore this and assume an old version of Java. It might not be\n # found because of a PATHEXT-related issue\n # (https://bugs.python.org/issue2200).\n return JAVA_6_COMPATIBLE_VERSION\n\n arg_1 = subprocess.check_output([arg_0, '-version'],\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n\n arg_2 = parse_java_version(arg_1)\n\n if arg_2 >= (1, 8):\n return LATEST_VERSION\n elif arg_2 >= (1, 7):\n return JAVA_7_COMPATIBLE_VERSION\n elif arg_2 >= (1, 6):\n warn('language-check would be able to use a newer version of '\n 'LanguageTool if you had Java 7 or newer installed')\n return JAVA_6_COMPATIBLE_VERSION\n else:\n raise SystemExit(\n 'You need at least Java 6 to use language-check')"} +{"_id": "doc_5769", "title": "", "text": "def Func(arg_0):\n \"\"\"Get common directory in a zip file if any.\"\"\"\n arg_1 = arg_0.namelist()\n if arg_1 and all(arg_2.startswith(arg_1[0]) for arg_2 in arg_1[1:]):\n return arg_1[0]\n return None"} +{"_id": "doc_5770", "title": "", "text": "def Func(*arg_0):\n \"\"\"Make a Qt async slot run on asyncio loop.\"\"\"\n def outer_decorator(arg_1):\n @Slot(*arg_0)\n @functools.wraps(arg_1)\n def wrapper(*arg_0, **arg_2):\n asyncio.ensure_future(arg_1(*arg_0, **arg_2))\n return wrapper\n return outer_decorator"} +{"_id": "doc_5771", "title": "", "text": "def Func(arg_0):\n \"\"\"Class decorator to add a logger to a class.\"\"\"\n arg_1 = '_logger'\n arg_2 = arg_0.__qualname__\n arg_3 = arg_0.__module__\n if arg_3 is not None:\n arg_2 = arg_3 + '.' + arg_2\n else:\n raise AssertionError\n setattr(arg_0, arg_1, logging.getLogger(arg_2))\n return arg_0"} +{"_id": "doc_5772", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Selector has delivered us an event.\"\"\"\n arg_0._logger.debug('Processing event with key {} and mask {}'.format(arg_1, arg_2))\n arg_3, (arg_4, arg_5) = arg_1.fileobj, arg_1.data\n if arg_2 & selectors.EVENT_READ and arg_4 is not None:\n if arg_4._cancelled:\n arg_0.remove_reader(arg_3)\n else:\n arg_0._logger.debug('Invoking reader callback: {}'.format(arg_4))\n arg_4._run()\n if arg_2 & selectors.EVENT_WRITE and arg_5 is not None:\n if arg_5._cancelled:\n arg_0.remove_writer(arg_3)\n else:\n arg_0._logger.debug('Invoking writer callback: {}'.format(arg_5))\n arg_5._run()"} +{"_id": "doc_5773", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add more ASN.1 MIB source repositories.\n\n MibCompiler.compile will invoke each of configured source objects\n in order of their addition asking each to fetch MIB module specified\n by name.\n\n Args:\n sources: reader object(s)\n\n Returns:\n reference to itself (can be used for call chaining)\n\n \"\"\"\n arg_0._sources.extend(arg_1)\n\n debug.logger & debug.flagCompiler and debug.logger(\n 'current MIB source(s): %s' % ', '.join([str(arg_2) for arg_2 in arg_0._sources]))\n\n return arg_0"} +{"_id": "doc_5774", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add more transformed MIBs repositories to borrow MIBs from.\n\n Whenever MibCompiler.compile encounters MIB module which neither of\n the *searchers* can find or fetched ASN.1 MIB module can not be\n parsed (due to syntax errors), these *borrowers* objects will be\n invoked in order of their addition asking each if already transformed\n MIB can be fetched (borrowed).\n\n Args:\n borrowers: borrower object(s)\n\n Returns:\n reference to itself (can be used for call chaining)\n\n \"\"\"\n arg_0._borrowers.extend(arg_1)\n\n debug.logger & debug.flagCompiler and debug.logger(\n 'current MIB borrower(s): %s' % ', '.join([str(arg_2) for arg_2 in arg_0._borrowers]))\n\n return arg_0"} +{"_id": "doc_5775", "title": "", "text": "def Func(arg_0):\n \"\"\"Get current object.\n This is useful if you want the real\n object behind the proxy at a time for performance reasons or because\n you want to pass the object into a different context.\n \"\"\"\n arg_1 = object.__getattribute__(arg_0, '_Proxy__local')\n if not hasattr(arg_1, '__release_local__'):\n return arg_1(*arg_0.__args, **arg_0.__kwargs)\n try: # pragma: no cover\n # not sure what this is about\n return getattr(arg_1, arg_0.__name__)\n except AttributeError: # pragma: no cover\n raise RuntimeError('no object bound to {0.__name__}'.format(arg_0))"} +{"_id": "doc_5776", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"Kullback information criterion\n\n .. math:: Func(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.\n \"\"\"\n from numpy import log, array\n arg_3 = log(arg_1) + 3. * (arg_2+1.) /float(arg_0)\n return arg_3"} +{"_id": "doc_5777", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"approximate corrected Kullback information\n\n .. math:: Func(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}\n\n \"\"\"\n from numpy import log, array\n arg_3 = arg_2\n arg_4 = log(arg_1) + arg_3/arg_0/(arg_0-arg_3) + (3.-(arg_3+2.)/arg_0) * (arg_3+1.) / (arg_0-arg_3-2.)\n return arg_4"} +{"_id": "doc_5778", "title": "", "text": "def Func(arg_0,arg_1, arg_2=None):\n r\"\"\"Final prediction error criterion\n\n .. math:: Func(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.\n\n \"\"\"\n #k #todo check convention. agrees with octave\n arg_3 = arg_1 * (arg_0 + arg_2 + 1.) / (arg_0- arg_2 -1)\n return arg_3"} +{"_id": "doc_5779", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"Minimum Description Length\n\n .. math:: Func(k) = N log \\rho_k + p \\log N\n\n :validation: results\n \"\"\"\n from numpy import log\n #p = arange(1, len(rho)+1)\n arg_3 = arg_0* log(arg_1) + arg_2 * log(arg_0)\n return arg_3"} +{"_id": "doc_5780", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate the Main examples gallery reStructuredText\n\n Start the sphinx-gallery configuration and recursively scan the examples\n directories in order to populate the examples gallery\n \"\"\"\n try:\n arg_1 = eval(arg_0.builder.config.plot_gallery)\n except TypeError:\n arg_1 = bool(arg_0.builder.config.plot_gallery)\n\n gallery_conf.update(arg_0.config.sphinx_gallery_conf)\n gallery_conf.update(arg_1=arg_1)\n gallery_conf.update(abort_on_example_error=arg_0.builder.config.abort_on_example_error)\n\n # this assures I can call the config in other places\n arg_0.config.sphinx_gallery_conf = gallery_conf\n arg_0.config.html_static_path.append(glr_path_static())\n\n clean_gallery_out(arg_0.builder.outdir)\n\n arg_4 = gallery_conf['examples_dirs']\n arg_5 = gallery_conf['gallery_dirs']\n\n if not isinstance(arg_4, list):\n arg_4 = [arg_4]\n if not isinstance(arg_5, list):\n arg_5 = [arg_5]\n\n arg_6 = os.path.relpath(gallery_conf['mod_example_dir'],\n arg_0.builder.srcdir)\n arg_7 = set()\n\n for arg_8, arg_9 in zip(arg_4, arg_5):\n arg_8 = os.path.relpath(arg_8,\n arg_0.builder.srcdir)\n arg_9 = os.path.relpath(arg_9,\n arg_0.builder.srcdir)\n\n for arg_10 in [arg_8, arg_9, arg_6]:\n if not os.path.exists(arg_10):\n os.makedirs(arg_10)\n\n # we create an index.rst with all examples\n arg_11 = open(os.path.join(arg_9, 'index.rst'), 'w')\n # Here we don't use an os.walk, but we recurse only twice: flat is\n # better than nested.\n arg_11.write(generate_dir_rst(arg_8, arg_9, gallery_conf,\n arg_7))\n for arg_12 in sorted(os.listdir(arg_8)):\n if os.path.isdir(os.path.join(arg_8, arg_12)):\n arg_13 = os.path.join(arg_8, arg_12)\n arg_14 = os.path.join(arg_9, arg_12)\n arg_11.write(generate_dir_rst(arg_13, arg_14,\n gallery_conf,\n arg_7))\n arg_11.flush()"} +{"_id": "doc_5781", "title": "", "text": "def Func(arg_0):\n \"\"\"Setup sphinx-gallery sphinx extension\"\"\"\n arg_0.add_config_value('plot_gallery', True, 'html')\n arg_0.add_config_value('abort_on_example_error', False, 'html')\n arg_0.add_config_value('sphinx_gallery_conf', gallery_conf, 'html')\n arg_0.add_stylesheet('gallery.css')\n\n arg_0.connect('builder-inited', generate_gallery_rst)\n\n arg_0.connect('build-finished', embed_code_links)"} +{"_id": "doc_5782", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3='unbiased'):\n r\"\"\"Correlation function\n\n This function should give the same results as :func:`xcorr` but it\n returns the positive lags only. Moreover the algorithm does not use\n FFT as compared to other algorithms.\n\n :param array x: first data array of length N\n :param array y: second data array of length N. If not specified, computes the\n autocorrelation.\n :param int maxlags: compute cross correlation between [0:maxlags]\n when maxlags is not specified, the range of lags is [0:maxlags].\n :param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n * *biased* correlation=raw/N,\n * *unbiased* correlation=raw/(N-`|lag|`)\n * *coeff* correlation=raw/(rms(x).rms(y))/N\n * None correlation=raw\n\n :return:\n * a numpy.array correlation sequence, r[1,N]\n * a float for the zero-lag correlation, r[0]\n\n The *unbiased* correlation has the form:\n\n .. math::\n\n \\hat{r}_{xx} = \\frac{1}{N-m}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n The *biased* correlation differs by the front factor only:\n\n .. math::\n\n \\check{r}_{xx} = \\frac{1}{N}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n with :math:`0\\leq m\\leq N-1`.\n\n .. doctest::\n\n >>> from spectrum import Func\n >>> x = [1,2,3,4,5]\n >>> res = Func(x,x, maxlags=0, norm='biased')\n >>> res[0]\n 11.0\n\n .. note:: this function should be replaced by :func:`xcorr`.\n\n .. seealso:: :func:`xcorr`\n \"\"\"\n assert arg_3 in ['unbiased','biased', 'coeff', None]\n #transform lag into list if it is an integer\n arg_0 = np.array(arg_0)\n if arg_1 is None:\n arg_1 = arg_0\n else:\n arg_1 = np.array(arg_1)\n\n # N is the max of x and y\n arg_4 = max(len(arg_0), len(arg_1))\n if len(arg_0) < arg_4:\n arg_0 = arg_1.copy()\n arg_0.resize(arg_4)\n if len(arg_1) < arg_4:\n arg_1 = arg_1.copy()\n arg_1.resize(arg_4)\n\n #default lag is N-1\n if arg_2 is None:\n arg_2 = arg_4 - 1\n assert arg_2 < arg_4, 'lag must be less than len(x)'\n\n arg_5 = np.isrealobj(arg_0) and np.isrealobj(arg_1)\n #create an autocorrelation array with same length as lag\n if arg_5 == True:\n arg_6 = np.zeros(arg_2, dtype=float)\n else:\n arg_6 = np.zeros(arg_2, dtype=complex)\n\n if arg_3 == 'coeff':\n arg_7 = pylab_rms_flat(arg_0)\n arg_8 = pylab_rms_flat(arg_1)\n\n for arg_9 in range(0, arg_2+1):\n arg_10 = arg_4 - arg_9 - 1\n\n if arg_5 == True:\n arg_11 = 0\n for arg_12 in range(0, arg_10+1):\n arg_11 = arg_11 + arg_0[arg_12+arg_9] * arg_1[arg_12]\n else:\n arg_11 = 0. + 0j\n for arg_12 in range(0, arg_10+1):\n arg_11 = arg_11 + arg_0[arg_12+arg_9] * arg_1[arg_12].conjugate()\n if arg_9 == 0:\n if arg_3 in ['biased', 'unbiased']:\n arg_13 = arg_11/float(arg_4)\n elif arg_3 is None:\n arg_13 = arg_11\n else:\n arg_13 = 1.\n else:\n if arg_3 == 'unbiased':\n arg_6[arg_9-1] = arg_11 / float(arg_4-arg_9)\n elif arg_3 == 'biased':\n arg_6[arg_9-1] = arg_11 / float(arg_4)\n elif arg_3 is None:\n arg_6[arg_9-1] = arg_11\n elif arg_3 == 'coeff':\n arg_6[arg_9-1] = arg_11/(arg_7*arg_8)/float(arg_4)\n\n arg_6 = np.insert(arg_6, 0, arg_13)\n return arg_6"} +{"_id": "doc_5783", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3='biased'):\n \"\"\"Cross-correlation using numpy.correlate\n\n Estimates the cross-correlation (and autocorrelation) sequence of a random\n process of length N. By default, there is no normalisation and the output\n sequence of the cross-correlation has a length 2*N+1.\n\n :param array x: first data array of length N\n :param array y: second data array of length N. If not specified, computes the\n autocorrelation.\n :param int maxlags: compute cross correlation between [-maxlags:maxlags]\n when maxlags is not specified, the range of lags is [-N+1:N-1].\n :param str option: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n The true cross-correlation sequence is\n\n .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])\n\n However, in practice, only a finite segment of one realization of the\n infinite-length random process is available.\n\n The correlation is estimated using numpy.correlate(x,y,'full').\n Normalisation is handled by this function using the following cases:\n\n * 'biased': Biased estimate of the cross-correlation function\n * 'unbiased': Unbiased estimate of the cross-correlation function\n * 'coeff': Normalizes the sequence so the autocorrelations at zero\n lag is 1.0.\n\n :return:\n * a numpy.array containing the cross-correlation sequence (length 2*N-1)\n * lags vector\n\n .. note:: If x and y are not the same length, the shorter vector is\n zero-padded to the length of the longer vector.\n\n .. rubric:: Examples\n\n .. doctest::\n\n >>> from spectrum import Func\n >>> x = [1,2,3,4,5]\n >>> c, l = Func(x,x, maxlags=0, norm='biased')\n >>> c\n array([ 11.])\n\n .. seealso:: :func:`CORRELATION`.\n \"\"\"\n arg_4 = len(arg_0)\n if arg_1 is None:\n arg_1 = arg_0\n assert len(arg_0) == len(arg_1), 'x and y must have the same length. Add zeros if needed'\n\n if arg_2 is None:\n arg_2 = arg_4-1\n arg_5 = np.arange(0, 2*arg_4-1)\n else:\n assert arg_2 <= arg_4, 'maxlags must be less than data length'\n arg_5 = np.arange(arg_4-arg_2-1, arg_4+arg_2)\n\n arg_6 = np.correlate(arg_0, arg_1, mode='full')\n\n if arg_3 == 'biased':\n arg_7 = float(arg_4)\n arg_6 = arg_6[arg_5] / float(arg_4) # do not use /= !!\n elif arg_3 == 'unbiased':\n arg_6 = arg_6[arg_5] / (float(arg_4)-abs(np.arange(-arg_4+1, arg_4)))[arg_5]\n elif arg_3 == 'coeff':\n arg_7 = float(arg_4)\n arg_8 = pylab_rms_flat(arg_0) * pylab_rms_flat(arg_1)\n arg_6 = arg_6[arg_5] / arg_8 / arg_7\n else:\n arg_6 = arg_6[arg_5]\n\n arg_5 = np.arange(-arg_2, arg_2+1)\n return arg_6, arg_5"} +{"_id": "doc_5784", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Finds the minimum eigenvalue of a Hermitian Toeplitz matrix\n\n The classical power method is used together with a fast Toeplitz\n equation solution routine. The eigenvector is normalized to unit length.\n\n :param T0: Scalar corresponding to real matrix element t(0)\n :param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix\n :param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.\n\n :return:\n * EVAL - Real scalar denoting the minimum eigenvalue of matrix\n * EVEC - Array of M complex eigenvector elements associated\n\n\n .. note::\n * External array T must be dimensioned >= M\n * array EVEC must be >= M+1\n * Internal array E must be dimensioned >= M+1 . \n\n * **dependencies**\n * :meth:`spectrum.toeplitz.HERMTOEP`\n \"\"\"\n arg_3 = len(arg_1)\n arg_4 = 10\n arg_5 = 1\n arg_6 = numpy.zeros(arg_3+1, dtype=complex)\n for arg_7 in range(0,arg_3+1):\n arg_6[arg_7] = 1+0j\n arg_8=0\n #print 'initialisation',T0, T, eigval, eigvec\n arg_9 = 15\n while abs(arg_5-arg_4)>arg_2*arg_5 and arg_8= 1:\n raise ValueError('All reflection coefficients should have magnitude less than unity.')\n\n # Use the relation, atanh(x) = (1/2)*log((1+k)/(1-k))\n return -2 * numpy.arctanh(-numpy.array(arg_0))"} +{"_id": "doc_5788", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert log area ratios to reflection coefficients.\n\n :param g: log area ratios\n :returns: the reflection coefficients\n\n .. seealso: :func:`rc2lar`, :func:`poly2rc`, :func:`ac2rc`, :func:`is2rc`.\n\n :References:\n [1] J. Makhoul, \"Linear Prediction: A Tutorial Review,\" Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.\n\n \"\"\"\n assert numpy.isrealobj(arg_0), 'Log area ratios not defined for complex reflection coefficients.'\n # Use the relation, tanh(x) = (1-exp(2x))/(1+exp(2x))\n return -numpy.tanh(-numpy.array(arg_0)/2)"} +{"_id": "doc_5789", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert line spectral frequencies to prediction filter coefficients\n\n returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.\n\n .. doctest::\n\n >>> from spectrum import Func\n >>> lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593]\n >>> a = Func(lsf)\n\n # array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01,\n # 9.31594056e-05, 3.13713832e-03, -8.12002261e-03 ])\n\n .. seealso:: poly2lsf, rc2poly, ac2poly, rc2is\n \"\"\"\n # Reference: A.M. Kondoz, \"Digital Speech: Coding for Low Bit Rate Communications\n # Systems\" John Wiley & Sons 1994 ,Chapter 4\n\n # Line spectral frequencies must be real.\n\n arg_0 = numpy.array(arg_0)\n\n if max(arg_0) > numpy.pi or min(arg_0) < 0:\n raise ValueError('Line spectral frequencies must be between 0 and pi.')\n\n arg_1 = len(arg_0) # model order\n\n # Form zeros using the LSFs and unit amplitudes\n arg_2 = numpy.exp(1.j * arg_0)\n\n # Separate the zeros to those belonging to P and Q\n arg_3 = arg_2[0::2]\n arg_4 = arg_2[1::2]\n\n # Include the conjugates as well\n arg_3 = numpy.concatenate((arg_3, arg_3.conjugate()))\n arg_4 = numpy.concatenate((arg_4, arg_4.conjugate()))\n\n # Form the polynomials P and Q, note that these should be real\n arg_5 = numpy.poly(arg_3);\n arg_6 = numpy.poly(arg_4);\n\n # Form the sum and difference filters by including known roots at z = 1 and\n # z = -1\n\n if arg_1%2:\n # Odd order: z = +1 and z = -1 are roots of the difference filter, P1(z)\n arg_7 = numpy.convolve(arg_6, [1, 0, -1])\n arg_8 = arg_5\n else:\n # Even order: z = -1 is a root of the sum filter, Q1(z) and z = 1 is a\n # root of the difference filter, P1(z)\n arg_7 = numpy.convolve(arg_6, [1, -1])\n arg_8 = numpy.convolve(arg_5, [1, 1])\n\n # Prediction polynomial is formed by averaging P1 and Q1\n\n arg_9 = .5 * (arg_7+arg_8)\n return arg_9[0:-1:1]"} +{"_id": "doc_5790", "title": "", "text": "def Func(arg_0):\n \"\"\"Prediction polynomial to line spectral frequencies.\n\n converts the prediction polynomial specified by A,\n into the corresponding line spectral frequencies, LSF.\n normalizes the prediction polynomial by A(1).\n\n .. doctest::\n\n >>> from spectrum import Func\n >>> a = [1.0000, 0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]\n >>> lsf = Func(a)\n >>> lsf = array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])\n\n .. seealso:: lsf2poly, poly2rc, poly2qc, rc2is\n \"\"\"\n\n #Line spectral frequencies are not defined for complex polynomials.\n\n # Normalize the polynomial\n\n arg_0 = numpy.array(arg_0)\n if arg_0[0] != 1:\n arg_0/=arg_0[0]\n\n if max(numpy.abs(numpy.roots(arg_0))) >= 1.0:\n error('The polynomial must have all roots inside of the unit circle.');\n\n\n # Form the sum and differnce filters\n\n arg_1 = len(arg_0)-1 # The leading one in the polynomial is not used\n arg_2 = numpy.concatenate((arg_0, numpy.array([0])))\n arg_3 = arg_2[-1::-1]\n arg_4 = arg_2 - arg_3 # Difference filter\n arg_5 = arg_2 + arg_3 # Sum Filter\n\n # If order is even, remove the known root at z = 1 for P1 and z = -1 for Q1\n # If odd, remove both the roots from P1\n\n if arg_1%2: # Odd order\n arg_6, arg_7 = deconvolve(arg_4,[1, 0 ,-1])\n arg_8 = arg_5\n else: # Even order\n arg_6, arg_7 = deconvolve(arg_4, [1, -1])\n arg_8, arg_7 = deconvolve(arg_5, [1, 1])\n\n arg_9 = numpy.roots(arg_6)\n arg_10 = numpy.roots(arg_8)\n\n arg_11 = numpy.angle(arg_9[1::2])\n arg_12 = numpy.angle(arg_10[1::2])\n\n arg_13 = sorted(numpy.concatenate((-arg_11,-arg_12)))\n\n return arg_13"} +{"_id": "doc_5791", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a one-sided PSD to a twosided PSD\n\n In order to keep the power in the onesided PSD the same\n as in the twosided version, the onesided values are twice\n as much as in the input data (except for the zero-lag value).\n\n ::\n\n >>> Func([10, 2,3,3,2,8])\n array([ 10., 4., 6., 8.])\n\n \"\"\"\n assert len(arg_0) % 2 == 0\n arg_1 = len(arg_0)\n arg_2 = np.array(arg_0[0:arg_1//2+1]) * 2.\n arg_2[0] /= 2.\n arg_2[-1] = arg_0[-1]\n return arg_2"} +{"_id": "doc_5792", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a two-sided PSD to a one-sided PSD\n\n In order to keep the power in the twosided PSD the same\n as in the onesided version, the twosided values are 2 times\n lower than the input data (except for the zero-lag and N-lag\n values).\n\n ::\n\n >>> twosided_2_onesided([10, 4, 6, 8])\n array([ 10., 2., 3., 3., 2., 8.])\n\n \"\"\"\n arg_1 = np.concatenate((arg_0[0:-1], cshift(arg_0[-1:0:-1], -1)))/2.\n arg_1[0] *= 2.\n arg_1[-1] *= 2.\n return arg_1"} +{"_id": "doc_5793", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a two-sided PSD to a center-dc PSD\"\"\"\n arg_1 = len(arg_0)\n # could us int() or // in python 3\n arg_2 = np.concatenate((cshift(arg_0[arg_1//2:], 1), arg_0[0:arg_1//2]))\n arg_2[0] = arg_0[-1]\n return arg_2"} +{"_id": "doc_5794", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert a center-dc PSD to a twosided PSD\"\"\"\n arg_1 = len(arg_0)\n arg_2 = np.concatenate((arg_0[arg_1//2:], (cshift(arg_0[0:arg_1//2], -1))))\n return arg_2"} +{"_id": "doc_5795", "title": "", "text": "def Func(arg_0=200):\n \"\"\"A simple test example with two close frequencies\n\n \"\"\"\n arg_1 = arange(arg_0)\n arg_2 = cos(0.257*pi*arg_1) + sin(0.2*pi*arg_1) + 0.01*randn(arg_1.size)\n return arg_2"} +{"_id": "doc_5796", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Plot the data set, using the sampling information to set the x-axis\n correctly.\"\"\"\n from pylab import Func, linspace, xlabel, ylabel, grid\n arg_2 = linspace(1*arg_0.dt, arg_0.N*arg_0.dt, arg_0.N)\n Func(arg_2, arg_0.data, **arg_1)\n xlabel('Time')\n ylabel('Amplitude')\n grid(True)"} +{"_id": "doc_5797", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Returns the autocovariance of signal s at all lags.\n\n Adheres to the definition\n sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}\n where E{} is the expectation operator, and S is a zero mean process\n \"\"\"\n # only remove the mean once, if needed\n arg_2 = arg_1.pop('debias', True)\n arg_3 = arg_1.get('axis', -1)\n if arg_2:\n arg_0 = _remove_bias(arg_0, arg_3)\n arg_1['debias'] = False\n return _crosscov(arg_0, arg_0, **arg_1)"} +{"_id": "doc_5798", "title": "", "text": "def Func(arg_0):\n \"\"\"Separate `filename` content between docstring and the rest\n\n Strongly inspired from ast.get_docstring.\n\n Returns\n -------\n docstring: str\n docstring of `filename`\n rest: str\n `filename` content without the docstring\n \"\"\"\n with open(arg_0) as f:\n arg_1 = f.read()\n\n arg_2 = ast.parse(arg_1)\n if not isinstance(arg_2, ast.Module):\n raise TypeError(\"This function only supports modules. \"\n \"You provided {0}\".format(arg_2.__class__.__name__))\n if arg_2.body and isinstance(arg_2.body[0], ast.Expr) and \\\n isinstance(arg_2.body[0].value, ast.Str):\n arg_3 = arg_2.body[0]\n arg_4 = arg_3.value.s\n # This get the content of the file after the docstring last line\n # Note: 'maxsplit' argument is not a keyword argument in python2\n arg_5 = arg_1.split('\\n', arg_3.lineno)[-1]\n return arg_4, arg_5\n else:\n raise ValueError(('Could not find docstring in file \"{0}\". '\n 'A docstring is required by sphinx-gallery')\n .format(arg_0))"} +{"_id": "doc_5799", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns md5sum of file\"\"\"\n\n with open(arg_0, 'r') as src_data:\n arg_1 = src_data.read()\n\n # data needs to be encoded in python3 before hashing\n if sys.version_info[0] == 3:\n arg_1 = arg_1.encode('utf-8')\n\n arg_2 = hashlib.md5(arg_1).hexdigest()\n return arg_2"} +{"_id": "doc_5800", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns True if src_file has a different md5sum\"\"\"\n\n arg_1 = get_md5sum(arg_0)\n\n arg_2 = arg_0 + '.md5'\n arg_3 = True\n if os.path.exists(arg_2):\n with open(arg_2, 'r') as file_checksum:\n arg_4 = file_checksum.read()\n if arg_1 == arg_4:\n arg_3 = False\n\n if arg_3:\n with open(arg_2, 'w') as file_checksum:\n file_checksum.write(arg_1)\n\n return arg_3"} +{"_id": "doc_5801", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Test existence of image file and no change in md5sum of\n example\"\"\"\n\n arg_2 = arg_1.format(1)\n arg_3 = os.path.exists(arg_2)\n arg_4 = check_md5sum_change(arg_0)\n\n return arg_3 and not arg_4"} +{"_id": "doc_5802", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save all open matplotlib figures of the example code-block\n\n Parameters\n ----------\n image_path : str\n Path where plots are saved (format string which accepts figure number)\n fig_count : int\n Previous figure number count. Figure number add from this number\n\n Returns\n -------\n list of strings containing the full path to each figure\n \"\"\"\n arg_3 = []\n\n arg_4 = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()\n for arg_5 in arg_4:\n # Set the fig_num figure as the current figure as we can't\n # save a figure that's not the current figure.\n arg_6 = plt.figure(arg_5.num)\n arg_7 = {}\n arg_8 = matplotlib.colors.colorConverter.to_rgba\n for arg_9 in ['facecolor', 'edgecolor']:\n arg_10 = getattr(arg_6, 'get_' + arg_9)()\n arg_11 = matplotlib.rcParams['figure.' + arg_9]\n if arg_8(arg_10) != arg_8(arg_11):\n arg_7[arg_9] = arg_10\n\n arg_12 = arg_0.format(arg_1 + arg_5.num)\n arg_6.savefig(arg_12, **arg_7)\n arg_3.append(arg_12)\n\n if arg_2.get('find_mayavi_figures', False):\n from mayavi import mlab\n arg_13 = mlab.get_engine()\n arg_14 = len(arg_3)\n arg_15 = arg_14 + len(arg_13.scenes)\n arg_16 = range(arg_14, arg_15)\n\n for arg_17, arg_18 in zip(arg_13.scenes, arg_16):\n arg_12 = arg_0.format(arg_18)\n mlab.savefig(arg_12, figure=arg_17)\n # make sure the image is not too large\n scale_image(arg_12, arg_12, 850, 999)\n arg_3.append(arg_12)\n mlab.close(all=True)\n\n return arg_3"} +{"_id": "doc_5803", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Save the thumbnail image\"\"\"\n arg_3 = arg_0.format(1)\n arg_4 = os.path.join(os.path.dirname(arg_3), 'thumb')\n if not os.path.exists(arg_4):\n os.makedirs(arg_4)\n\n arg_5 = os.path.join(arg_4,\n 'sphx_glr_%s_thumb.png' % arg_1)\n\n if os.path.exists(arg_3):\n scale_image(arg_3, arg_5, 400, 280)\n elif not os.path.exists(arg_5):\n # create something to replace the thumbnail\n arg_6 = os.path.join(glr_path_static(), 'no_image.png')\n arg_6 = arg_2.get(\"default_thumb_file\",\n arg_6)\n scale_image(arg_6, arg_5, 200, 140)"} +{"_id": "doc_5804", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"Executes the code block of the example file\"\"\"\n arg_6 = 0\n arg_7 = ''\n\n # We need to execute the code\n print('plotting code blocks in %s' % arg_4)\n\n plt.close('all')\n arg_8 = os.getcwd()\n # Redirect output to stdout and\n arg_9 = arg_12.stdout\n\n try:\n # First cd in the original example dir, so that any file\n # created by the example get created in this directory\n os.chdir(os.path.dirname(arg_4))\n arg_10 = StringIO()\n arg_11 = Tee(arg_12.stdout, arg_10)\n arg_12.stdout = arg_11\n\n arg_13 = time()\n exec(arg_0, arg_1)\n arg_6 = time() - arg_13\n\n arg_12.stdout = arg_9\n\n arg_11 = arg_10.getvalue().strip().expandtabs()\n if arg_11:\n arg_7 = CODE_OUTPUT.format(indent(arg_11, ' ' * 4))\n os.chdir(arg_8)\n arg_14 = save_figures(arg_2, arg_3, arg_5)\n\n # Depending on whether we have one or more figures, we're using a\n # horizontal list or a single rst call to 'image'.\n arg_15 = \"\"\n if len(arg_14) == 1:\n arg_16 = arg_14[0]\n arg_15 = SINGLE_IMAGE % arg_16.lstrip('/')\n elif len(arg_14) > 1:\n arg_15 = HLIST_HEADER\n for arg_16 in arg_14:\n arg_15 += HLIST_IMAGE_TEMPLATE % arg_16.lstrip('/')\n\n except Exception:\n arg_17 = traceback.format_exc()\n\n print(80 * '_')\n print('%s is not compiling:' % arg_4)\n print(arg_17)\n print(80 * '_')\n\n arg_14 = []\n arg_15 = codestr2rst(arg_17, lang='pytb')\n\n # Overrides the output thumbnail in the gallery for easy identification\n arg_18 = os.path.join(glr_path_static(), 'broken_example.png')\n shutil.copyfile(arg_18, os.path.join(arg_8, arg_2.format(1)))\n arg_3 += 1 # raise count to avoid overwriting image\n\n # Breaks build on first example error\n\n if arg_5['abort_on_example_error']:\n raise\n\n finally:\n os.chdir(arg_8)\n arg_12.stdout = arg_9\n\n print(\" - time elapsed : %.2g sec\" % arg_6)\n arg_19 = \"\\n{0}\\n\\n{1}\\n\\n\".format(arg_15, arg_7)\n\n return arg_19, arg_6, arg_3 + len(arg_14)"} +{"_id": "doc_5805", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"This function solve Ax=B directly without taking care of the input\n matrix properties.\n \"\"\"\n arg_2 = numpy.linalg.solve(arg_0, arg_1)\n return arg_2"} +{"_id": "doc_5806", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True, arg_3=1.,\n arg_4=True, arg_5='hamming', arg_6=0):\n \"\"\"Simple periodogram, but matrices accepted.\n\n :param x: an array or matrix of data samples.\n :param NFFT: length of the data before FFT is computed (zero padding)\n :param bool detrend: detrend the data before co,puteing the FFT\n :param float sampling: sampling frequency of the input :attr:`data`.\n\n :param scale_by_freq:\n :param str window:\n\n :return: 2-sided PSD if complex data, 1-sided if real.\n\n if a matrix is provided (using numpy.matrix), then a periodogram\n is computed for each row. The returned matrix has the same shape as the input\n matrix.\n\n The mean of the input data is also removed from the data before computing\n the psd.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import grid, semilogy\n from spectrum import data_cosine, Func\n data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)\n semilogy(Func(data, detrend=False, sampling=1024), marker='o')\n grid(True)\n\n\n .. plot::\n :width: 80%\n :include-source:\n\n import numpy\n from spectrum import Func, data_cosine\n from pylab import figure, semilogy, figure ,imshow\n # create N data sets and make the frequency dependent on the time\n N = 100\n m = numpy.concatenate([data_cosine(N=1024, A=0.1, sampling=1024, freq=x) \n for x in range(1, N)]);\n m.resize(N, 1024)\n res = Func(m)\n figure(1)\n semilogy(res)\n figure(2)\n imshow(res.transpose(), aspect='auto')\n\n .. todo:: a proper spectrogram class/function that takes care of normalisation\n \"\"\"\n arg_0 = np.array(arg_0)\n # array with 1 dimension case\n if arg_0.ndim == 1:\n arg_6 = 0\n arg_7 = arg_0.shape[0]\n arg_8 = Window(arg_7, arg_5) #same size as input data\n arg_8 = arg_8.data\n # matrix case\n elif arg_0.ndim == 2:\n logging.debug('2D array. each row is a 1D array')\n [arg_7, arg_9] = arg_0.shape\n arg_8 = np.array([Window(arg_7, arg_5).data for this in range(arg_9)]).reshape(arg_7,arg_9) \n\n if arg_1 is None:\n arg_1 = len(arg_0)\n\n arg_10 = np.isrealobj(arg_0)\n\n if arg_2 == True:\n arg_11 = np.mean(arg_0, arg_6=arg_6)\n else:\n arg_11 = 0\n\n if arg_10 == True:\n if arg_0.ndim == 2:\n arg_12 = (abs (rfft (arg_0*arg_8 - arg_11, arg_1, arg_6=0))) ** 2. / arg_7\n else:\n arg_12 = (abs (rfft (arg_0*arg_8 - arg_11, arg_1, arg_6=-1))) ** 2. / arg_7\n else:\n if arg_0.ndim == 2:\n arg_12 = (abs (fft (arg_0*arg_8 - arg_11, arg_1, arg_6=0))) ** 2. / arg_7\n else:\n arg_12 = (abs (fft (arg_0*arg_8 - arg_11, arg_1, arg_6=-1))) ** 2. / arg_7\n\n if arg_4 is True:\n arg_13 = arg_3 / float(arg_1)\n arg_12*= 2 * np.pi / arg_13\n\n if arg_0.ndim == 1:\n return arg_12.transpose()\n else:\n return arg_12"} +{"_id": "doc_5807", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1., **arg_3):\n r\"\"\"Simple periodogram wrapper of numpy.psd function.\n\n :param A: the input data\n :param int NFFT: total length of the final data sets (padded \n with zero if needed; default is 4096)\n :param str window:\n\n :Technical documentation:\n\n When we calculate the periodogram of a set of data we get an estimation\n of the spectral density. In fact as we use a Fourier transform and a\n truncated segments the spectrum is the convolution of the data with a\n rectangular window which Fourier transform is\n\n .. math::\n\n W(s)= \\frac{1}{N^2} \\left[ \\frac{\\sin(\\pi s)}{\\sin(\\pi s/N)} \\right]^2\n\n Thus oscillations and sidelobes appears around the main frequency. One aim of t he tapering is to reduced this effects. We multiply data by a window whose sidelobes are much smaller than the main lobe. Classical window is hanning window. But other windows are available. However we must take into account this energy and divide the spectrum by energy of taper used. Thus periodogram becomes :\n\n .. math::\n\n D_k \\equiv \\sum_{j=0}^{N-1}c_jw_j \\; e^{2\\pi ijk/N} \\qquad k=0,...,N-1\n\n .. math::\n\n P(0)=P(f_0)=\\frac{1}{2\\pi W_{ss}}\\arrowvert{D_0}\\arrowvert^2\n\n .. math::\n\n P(f_k)=\\frac{1}{2\\pi W_{ss}} \\left[\\arrowvert{D_k}\\arrowvert^2+\\arrowvert{D_{N-k}}\\arrowvert^2\\right] \\qquad k=0,1,..., \\left( \\frac{1}{2}-1 \\right)\n\n .. math::\n\n P(f_c)=P(f_{N/2})= \\frac{1}{2\\pi W_{ss}} \\arrowvert{D_{N/2}}\\arrowvert^2\n\n with\n\n .. math::\n\n {W_{ss}} \\equiv N\\sum_{j=0}^{N-1}w_j^2\n\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import Func, marple_data\n psd = Func(marple_data, 256)\n\n \"\"\"\n from pylab import arg_6\n arg_4 = Spectrum(arg_0, arg_2=1.)\n\n arg_5 = arg_6(arg_0, arg_1, Fs=arg_2, **arg_3)\n arg_4.psd = arg_5[0]\n #spectrum.__Spectrum_sides = 'twosided'\n\n return arg_5, arg_4"} +{"_id": "doc_5808", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the centered frequency range as a generator.\n\n ::\n\n >>> print(list(Range(8).Func()))\n [-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]\n\n \"\"\"\n for arg_1 in range(0, arg_0.N):\n yield (arg_1-arg_0.N/2) * arg_0.df"} +{"_id": "doc_5809", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the one-sided frequency range as a generator.\n\n If :attr:`N` is even, the length is N/2 + 1.\n If :attr:`N` is odd, the length is (N+1)/2.\n\n ::\n\n >>> print(list(Range(8).onesided()))\n [0.0, 0.125, 0.25, 0.375, 0.5]\n >>> print(list(Range(9).onesided()))\n [0.0, 0.1111, 0.2222, 0.3333, 0.4444]\n\n \"\"\"\n if arg_0.N % 2 == 0:\n for arg_1 in range(0, arg_0.N//2 + 1):\n yield arg_1 * arg_0.df\n else:\n for arg_1 in range(0, (arg_0.N+1)//2):\n yield arg_1 * arg_0.df"} +{"_id": "doc_5810", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return the Func contained in the PSD\n\n if scale_by_freq is False, the Func is:\n\n .. math:: P = N \\sum_{k=1}^{N} P_{xx}(k)\n\n else, it is\n\n .. math:: P = \\sum_{k=1}^{N} P_{xx}(k) \\frac{df}{2\\pi}\n\n .. todo:: check these equations\n\n\n \"\"\"\n if arg_0.scale_by_freq == False:\n return sum(arg_0.psd) * len(arg_0.psd)\n else:\n return sum(arg_0.psd) * arg_0.df/(2.*numpy.pi)"} +{"_id": "doc_5811", "title": "", "text": "def Func():\n \"\"\"Returns a dictionary with the elements of a Jupyter notebook\"\"\"\n arg_0 = sys.version_info\n arg_1 = {\n \"cells\": [],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"Python \" + str(arg_0[0]),\n \"language\": \"python\",\n \"name\": \"python\" + str(arg_0[0])\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": arg_0[0]\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython\" + str(arg_0[0]),\n \"version\": '{0}.{1}.{2}'.format(*sys.version_info[:3])\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n }\n return arg_1"} +{"_id": "doc_5812", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts the RST text from the examples docstrigs and comments\n into markdown text for the IPython notebooks\"\"\"\n\n arg_1 = re.compile(r'^=+$\\s^([\\w\\s-]+)^=+$', flags=re.M)\n arg_0 = re.sub(arg_1, r'# \\1', arg_0)\n\n arg_2 = re.compile(r'^\\.\\. math::((?:.+)?(?:\\n+^ .+)*)', flags=re.M)\n arg_0 = re.sub(arg_2,\n lambda match: r'$${0}$$'.format(match.group(1).strip()),\n arg_0)\n arg_3 = re.compile(r':math:`(.+)`')\n arg_0 = re.sub(arg_3, r'$\\1$', arg_0)\n\n return arg_0"} +{"_id": "doc_5813", "title": "", "text": "def Func(arg_0):\n \"\"\"Saves the notebook to a file\"\"\"\n with open(arg_0.write_file, 'w') as out_nb:\n json.dump(arg_0.work_notebook, out_nb, indent=2)"} +{"_id": "doc_5814", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Autoregressive and moving average estimators.\n\n This function provides an estimate of the autoregressive\n parameters, the moving average parameters, and the driving\n white noise variance of an ARMA(P,Q) for a complex or real data sequence.\n\n The parameters are estimated using three steps:\n\n * Estimate the AR parameters from the original data based on a least\n squares modified Yule-Walker technique,\n * Produce a residual time sequence by filtering the original data\n with a filter based on the AR parameters,\n * Estimate the MA parameters from the residual time sequence.\n\n :param array X: Array of data samples (length N)\n :param int P: Desired number of AR parameters\n :param int Q: Desired number of MA parameters\n :param int lag: Maximum lag to use for autocorrelation estimates\n\n :return:\n * A - Array of complex P AR parameter estimates\n * B - Array of complex Q MA parameter estimates\n * RHO - White noise variance estimate\n\n .. note::\n * lag must be >= Q (MA order)\n\n **dependencies**:\n * :meth:`spectrum.correlation.CORRELATION`\n * :meth:`spectrum.covar.arcovar`\n * :meth:`spectrum.arma.ma`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import Func, arma2psd, marple_data\n import pylab\n\n a,b, rho = Func(marple_data, 15, 15, 30)\n psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)\n pylab.plot(10 * pylab.log10(psd))\n pylab.ylim([-50,0])\n\n :reference: [Marple]_\n \"\"\"\n arg_4 = CORRELATION(arg_0, maxlags=arg_3, norm='unbiased')\n arg_5 = arg_4[0]\n #C Estimate the AR parameters (no error weighting is used).\n #C Number of equation errors is M-Q .\n arg_6 = arg_3 - arg_2 + arg_1\n\n arg_7 = len(arg_0)\n arg_8 = np.zeros(arg_7-arg_1, dtype=complex)\n\n for arg_9 in range(0, arg_6):\n arg_10 = arg_9 + arg_2 - arg_1+1\n if arg_10 < 0:\n arg_8[arg_9] = arg_4[-arg_10].conjugate()\n if arg_10 == 0:\n arg_8[arg_9] = arg_5\n if arg_10 > 0:\n arg_8[arg_9] = arg_4[arg_10]\n\n # The resize is very important for the normalissation.\n arg_8.resize(arg_3)\n if arg_1 <= 4:\n arg_11 = arcovar_marple(arg_8.copy(), arg_1) #! Eq. (10.12)\n arg_12 = arg_11[0]\n else:\n arg_11 = arcovar(arg_8.copy(), arg_1) #! Eq. (10.12)\n arg_12 = arg_11[0]\n\n # the .copy is used to prevent a reference somewhere. this is a bug\n # to be tracked down.\n arg_8.resize(arg_7-arg_1)\n\n #C Filter the original time series\n for arg_13 in range(arg_1, arg_7):\n arg_14 = arg_0[arg_13]\n #SUM += sum([ar_params[j]*X[k-j-1] for j in range(0,P)])\n for arg_15 in range(0, arg_1):\n arg_14 = arg_14 + arg_12[arg_15] * arg_0[arg_13-arg_15-1] #! Eq. (10.17)\n arg_8[arg_13-arg_1] = arg_14\n\n # Estimate the MA parameters (a \"long\" AR of order at least 2*IQ\n #C is suggested)\n #Y.resize(N-P)\n arg_16, arg_17 = ma(arg_8, arg_2, 2*arg_2) #! Eq. (10.3)\n\n return arg_12, arg_16, arg_17"} +{"_id": "doc_5815", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Moving average estiFunctor.\n\n This program provides an estiFuncte of the moving average parameters\n and driving noise variance for a data sequence based on a\n long AR model and a least squares fit.\n\n :param array X: The input data array\n :param int Q: Desired MA model order (must be >0 and = arg_2:\n raise ValueError('Q(MA) must be in ]0,lag[')\n\n #C Fit a high-order AR to the data\n arg_3, arg_4, arg_5 = yulewalker.aryule(arg_0, arg_2, 'biased') #! Eq. (10.5)\n\n #add an element unity to the AR parameter array\n arg_3 = np.insert(arg_3, 0, 1)\n\n #C Find MA parameters from autocorrelations by Yule-Walker method\n arg_6, arg_7, arg_5 = yulewalker.aryule(arg_3, arg_1, 'biased') #! Eq. (10.7)\n\n return arg_6, arg_4"} +{"_id": "doc_5816", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=-1, arg_3='hamming',\n arg_4='unbiased', arg_5=4096, arg_6={},\n arg_7='xcorr'):\n \"\"\"PSD estimate using correlogram method.\n\n\n :param array X: complex or real data samples X(1) to X(N)\n :param array Y: complex data samples Y(1) to Y(N). If provided, computes\n the cross PSD, otherwise the PSD is returned\n :param int lag: highest lag index to compute. Must be less than N\n :param str window_name: see :mod:`window` for list of valid names\n :param str norm: one of the valid normalisation of :func:`xcorr` (biased, \n unbiased, coeff, None)\n :param int NFFT: total length of the final data sets (padded with zero \n if needed; default is 4096)\n :param str correlation_method: either `xcorr` or `CORRELATION`.\n CORRELATION should be removed in the future.\n\n :return:\n * Array of real (cross) power spectral density estimate values. This is\n a two sided array with negative values following the positive ones\n whatever is the input data (real or complex).\n\n .. rubric:: Description:\n\n The exact power spectral density is the Fourier transform of the\n autocorrelation sequence:\n\n .. math:: P_{xx}(f) = T \\sum_{m=-\\infty}^{\\infty} r_{xx}[m] exp^{-j2\\pi fmT}\n\n The correlogram method of PSD estimation substitutes a finite sequence of\n autocorrelation estimates :math:`\\hat{r}_{xx}` in place of :math:`r_{xx}`.\n This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by\n chosing a proprer lag `L`. The estimated PSD is then\n\n .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n The lag index must be less than the number of data samples `N`. Ideally, it\n should be around `L/10` [Marple]_ so as to avoid greater statistical\n variance associated with higher lags.\n\n To reduce the leakage of the implicit rectangular window and therefore to\n reduce the bias in the estimate, a tapering window is normally used and lead\n to the so-called Blackman and Tukey correlogram:\n\n .. math:: \\hat{P}_{BT}(f) = T \\sum_{m=-L}^{L} w[m] \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n The correlogram for the cross power spectral estimate is\n\n .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n which is computed if :attr:`Y` is not provide. In such case,\n :math:`r_{yx} = r_{xy}` so we compute the correlation only once.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import Func, marple_data\n from spectrum.tools import cshift\n from pylab import log10, axis, grid, plot,linspace\n\n psd = Func(marple_data, marple_data, lag=15)\n f = linspace(-0.5, 0.5, len(psd))\n psd = cshift(psd, len(psd)/2)\n plot(f, 10*log10(psd/max(psd)))\n axis([-0.5,0.5,-50,0])\n grid(True)\n\n .. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,\n :class:`pcorrelogram`.\n \"\"\"\n arg_8 = len(arg_0)\n assert arg_2= 0:\n arg_4 = arg_0[arg_2:arg_3]\n if arg_0[arg_3 + 1] == '[':\n # value is a list\n arg_5 = arg_0.find(']', arg_3 + 1)\n if arg_5 < 0:\n raise RuntimeError('error when parsing dict')\n arg_6 = arg_0[arg_3 + 2: arg_5].split(',')\n # try to convert elements to int\n for arg_7 in range(len(arg_6)):\n try:\n arg_6[arg_7] = int(arg_6[arg_7])\n except ValueError:\n pass\n elif arg_0[arg_3 + 1] == '{':\n # value is another dictionary\n arg_8 = _select_block(arg_0[arg_3:], '{', '}')\n arg_6 = Func(arg_8)\n arg_5 = arg_3 + len(arg_8)\n else:\n raise ValueError('error when parsing dict: unknown elem')\n\n arg_4 = arg_4.strip('\"')\n if len(arg_4) > 0:\n arg_1[arg_4] = arg_6\n\n arg_2 = arg_0.find(',', arg_5)\n if arg_2 < 0:\n break\n arg_2 += 1\n arg_3 = arg_0.find(':', arg_2)\n\n return arg_1"} +{"_id": "doc_5819", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse a Sphinx search index\n\n Parameters\n ----------\n searchindex : str\n The Sphinx search index (contents of searchindex.js)\n\n Returns\n -------\n filenames : list of str\n The file names parsed from the search index.\n objects : dict\n The objects parsed from the search index.\n \"\"\"\n # Make sure searchindex uses UTF-8 encoding\n if hasattr(arg_0, 'decode'):\n arg_0 = arg_0.decode('UTF-8')\n\n # parse objects\n arg_1 = 'objects:'\n arg_2 = arg_0.find(arg_1)\n if arg_2 < 0:\n raise ValueError('\"objects:\" not found in search index')\n\n arg_3 = _select_block(arg_0[arg_2:], '{', '}')\n arg_4 = _parse_dict_recursive(arg_3)\n\n # parse filenames\n arg_1 = 'filenames:'\n arg_2 = arg_0.find(arg_1)\n if arg_2 < 0:\n raise ValueError('\"filenames:\" not found in search index')\n arg_5 = arg_0[arg_2 + len(arg_1) + 1:]\n arg_5 = arg_5[:arg_5.find(']')]\n arg_5 = [f.strip('\"') for f in arg_5.split(',')]\n\n return arg_5, arg_4"} +{"_id": "doc_5820", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a valid link, False if not found\"\"\"\n\n arg_2 = None\n arg_3 = arg_1['module_short'] + '.' + arg_1['name']\n if arg_3 in arg_0._searchindex['objects']:\n arg_4 = arg_0._searchindex['objects'][arg_3]\n if isinstance(arg_4, dict):\n arg_4 = arg_4[next(iter(arg_4.keys()))]\n arg_2 = arg_4[0]\n elif arg_1['module_short'] in arg_0._searchindex['objects']:\n arg_4 = arg_0._searchindex['objects'][arg_1['module_short']]\n if arg_1['name'] in arg_4.keys():\n arg_2 = arg_4[arg_1['name']][0]\n\n if arg_2 is not None:\n arg_5 = arg_0._searchindex['filenames'][arg_2] + '.html'\n\n if arg_0._is_windows:\n arg_5 = arg_5.replace('/', '\\\\')\n arg_6 = os.path.join(arg_0.doc_url, arg_5)\n else:\n arg_6 = posixpath.join(arg_0.doc_url, arg_5)\n\n if hasattr(arg_6, 'decode'):\n arg_6 = arg_6.decode('utf-8', 'replace')\n\n if arg_6 in arg_0._page_cache:\n arg_7 = arg_0._page_cache[arg_6]\n else:\n arg_7 = get_data(arg_6, arg_0.gallery_dir)\n arg_0._page_cache[arg_6] = arg_7\n\n # test if cobj appears in page\n arg_9 = [arg_1['module_short'] + '.' + arg_1['name']]\n if arg_0.extra_modules_test is not None:\n for arg_10 in arg_0.extra_modules_test:\n arg_9.append(arg_10 + '.' + arg_1['name'])\n arg_11 = False\n if hasattr(arg_7, 'decode'):\n # Decode bytes under Python 3\n arg_7 = arg_7.decode('utf-8', 'replace')\n\n for arg_12 in arg_9:\n if hasattr(arg_12, 'decode'):\n # Decode bytes under Python 3\n arg_12 = arg_12.decode('utf-8', 'replace')\n if arg_12 in arg_7:\n arg_11 = arg_6 + u'#' + arg_12\n arg_6 = arg_11\n else:\n arg_6 = False\n\n return arg_6"} +{"_id": "doc_5821", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"Return polynomial transfer function representation from zeros and poles\n\n :param ndarray z: Zeros of the transfer function.\n :param ndarray p: Poles of the transfer function.\n :param float k: System gain.\n\n :return:\n b : ndarray Numerator polynomial.\n a : ndarray Numerator and denominator polynomials.\n\n :func:`Func` forms transfer function polynomials from the zeros, poles, and gains\n of a system in factored form.\n\n Func(z,p,k) finds a rational transfer function\n\n .. math:: \\frac{B(s)}{A(s)} = \\frac{b_1 s^{n-1}+\\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\\dots a_{m-1}s+a_m}\n\n given a system in factored transfer function form\n\n .. math:: H(s) = \\frac{Z(s)}{P(s)} = k \\frac{(s-z_1)(s-z_2)\\dots(s-z_m)}{(s-p_1)(s-p_2)\\dots(s-p_n)}\n\n\n with p being the pole locations, and z the zero locations, with as many.\n The gains for each numerator transfer function are in vector k.\n The zeros and poles must be real or come in complex conjugate pairs.\n The polynomial denominator coefficients are returned in row vector a and\n the polynomial numerator coefficients are returned in matrix b, which has\n as many rows as there are columns of z.\n\n Inf values can be used as place holders in z if some columns have fewer zeros than others.\n\n .. note:: wrapper of scipy function Func\n \"\"\"\n import scipy.signal\n arg_3, arg_4 = scipy.signal.Func(arg_0, arg_1, arg_2)\n return arg_3, arg_4"} +{"_id": "doc_5822", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Zero-pole-gain representation to state-space representation\n\n :param sequence z,p: Zeros and poles.\n :param float k: System gain.\n\n :return:\n * A, B, C, D : ndarray State-space matrices.\n\n .. note:: wrapper of scipy function Func\n \"\"\"\n import scipy.signal\n return scipy.signal.Func(arg_0,arg_1,arg_2)"} +{"_id": "doc_5823", "title": "", "text": "def Func(arg_0=51, arg_1='hamming', **arg_2):\n \"\"\"A Window visualisation tool\n\n :param N: length of the window\n :param name: name of the window\n :param NFFT: padding used by the FFT\n :param mindB: the minimum frequency power in dB\n :param maxdB: the maximum frequency power in dB\n :param kargs: optional arguments passed to :func:`create_window`\n\n This function plot the window shape and its equivalent in the Fourier domain.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import Func\n Func(64, 'kaiser', beta=8.)\n\n \"\"\"\n # get the default parameters\n arg_3 = arg_2.pop('mindB', -100)\n arg_4 = arg_2.pop('maxdB', None)\n arg_5 = arg_2.pop('norm', True)\n\n # create a window object\n arg_6 = Window(arg_0, arg_1, **arg_2)\n\n # plot the time and frequency windows\n arg_6.plot_time_freq(arg_3=arg_3, arg_4=arg_4, arg_5=arg_5)"} +{"_id": "doc_5824", "title": "", "text": "def Func(arg_0, arg_1=8.6, arg_2='numpy'):\n r\"\"\"Kaiser window\n\n :param N: window length\n :param beta: kaiser parameter (default is 8.6)\n\n To obtain a Kaiser window that designs an FIR filter with\n sidelobe attenuation of :math:`\\alpha` dB, use the following :math:`\\beta` where\n :math:`\\beta = \\pi \\alpha`.\n\n .. math::\n\n w_n = \\frac{I_0\\left(\\pi\\alpha\\sqrt{1-\\left(\\frac{2n}{M}-1\\right)^2}\\right)} {I_0(\\pi \\alpha)}\n\n where\n\n * :math:`I_0` is the zeroth order Modified Bessel function of the first kind.\n * :math:`\\alpha` is a real number that determines the shape of the \n window. It determines the trade-off between main-lobe width and side \n lobe level.\n * the length of the sequence is N=M+1.\n\n The Kaiser window can approximate many other windows by varying \n the :math:`\\beta` parameter:\n\n ===== ========================\n beta Window shape\n ===== ========================\n 0 Rectangular\n 5 Similar to a Hamming\n 6 Similar to a Hanning\n 8.6 Similar to a Blackman\n ===== ========================\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import plot, legend, xlim\n from spectrum import Func\n N = 64\n for beta in [1,2,4,8,16]:\n plot(Func(N, beta), label='beta='+str(beta))\n xlim(0,N)\n legend()\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'kaiser', beta=8.)\n\n .. seealso:: numpy.kaiser, :func:`spectrum.window.create_window`\n \"\"\"\n if arg_0 == 1:\n return ones(1)\n if arg_2 == 'numpy':\n from numpy import kaiser\n return kaiser(arg_0, arg_1)\n else:\n return _kaiser(arg_0, arg_1)"} +{"_id": "doc_5825", "title": "", "text": "def Func(arg_0, arg_1=0.16):\n r\"\"\"Blackman window\n\n :param N: window length\n\n .. math:: a_0 - a_1 \\cos(\\frac{2\\pi n}{N-1}) +a_2 \\cos(\\frac{4\\pi n }{N-1})\n\n with\n\n .. math::\n\n a_0 = (1-\\alpha)/2, a_1=0.5, a_2=\\alpha/2 \\rm{\\;and\\; \\alpha}=0.16\n\n When :math:`\\alpha=0.16`, this is the unqualified Blackman window with\n :math:`a_0=0.48` and :math:`a_2=0.08`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman')\n\n .. note:: Although Numpy implements a blackman window for :math:`\\alpha=0.16`,\n this implementation is valid for any :math:`\\alpha`.\n\n .. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`\n\n \"\"\"\n arg_2 = (1. - arg_1)/2.\n arg_3 = 0.5\n arg_4 = arg_1/2.\n\n if (arg_0 == 1):\n arg_5 = array([1.])\n else:\n arg_6 = arange(0, arg_0)/float(arg_0-1.)\n arg_5 = arg_2 - arg_3 * cos (2 * pi * arg_6) + arg_4 * cos (4 * pi * arg_6)\n return arg_5"} +{"_id": "doc_5826", "title": "", "text": "def Func(arg_0, arg_1=2.5):\n r\"\"\"Gaussian window\n\n :param N: window length\n\n .. math:: \\exp^{-0.5 \\left( \\sigma\\frac{n}{N/2} \\right)^2}\n\n with :math:`\\frac{N-1}{2}\\leq n \\leq \\frac{N-1}{2}`.\n\n .. note:: N-1 is used to be in agreement with octave convention. The ENBW of\n 1.4 is also in agreement with [Harris]_\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'gaussian', alpha=2.5)\n\n\n\n .. seealso:: scipy.signal.gaussian, :func:`create_window`\n \"\"\"\n arg_2 = linspace(-(arg_0-1)/2., (arg_0-1)/2., arg_0)\n #t = linspace(-(N)/2., (N)/2., N)\n arg_3 = exp(-0.5*(arg_1 * arg_2/(arg_0/2.))**2.)\n return arg_3"} +{"_id": "doc_5827", "title": "", "text": "def Func(arg_0):\n r\"\"\"Cosine tapering window also known as sine window.\n\n :param N: window length\n\n .. math:: w(n) = \\cos\\left(\\frac{\\pi n}{N-1} - \\frac{\\pi}{2}\\right) = \\sin \\left(\\frac{\\pi n}{N-1}\\right)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'cosine')\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n if arg_0 ==1:\n return ones(1)\n arg_1 = arange(0, arg_0)\n arg_2 = sin(pi*arg_1/(arg_0-1.))\n return arg_2"} +{"_id": "doc_5828", "title": "", "text": "def Func(arg_0):\n r\"\"\"Lanczos window also known as sinc window.\n\n :param N: window length\n\n .. math:: w(n) = sinc \\left( \\frac{2n}{N-1} - 1 \\right)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'lanczos')\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n if arg_0 ==1:\n return ones(1)\n\n arg_1 = linspace(-arg_0/2., arg_0/2., arg_0)\n arg_2 = sinc(2*arg_1/(arg_0-1.))\n return arg_2"} +{"_id": "doc_5829", "title": "", "text": "def Func(arg_0):\n r\"\"\"Nuttall tapering window\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n with :math:`a_0 = 0.355768`, :math:`a_1 = 0.487396`, :math:`a_2=0.144232` and :math:`a_3=0.012604`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'nuttall', mindB=-80)\n\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_1 = 0.355768\n arg_2 = 0.487396\n arg_3 = 0.144232\n arg_4 = 0.012604\n\n return _coeff4(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_5830", "title": "", "text": "def Func(arg_0):\n r\"\"\"Blackman Nuttall window\n\n returns a minimum, 4-term Blackman-Harris window. The window is minimum in the sense that its maximum sidelobes are minimized.\n The coefficients for this window differ from the Blackman-Harris window coefficients and produce slightly lower sidelobes.\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n with :math:`a_0 = 0.3635819`, :math:`a_1 = 0.4891775`, :math:`a_2=0.1365995` and :math:`0_3=.0106411`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman_nuttall', mindB=-80)\n\n .. seealso:: :func:`spectrum.window.create_window`\n .. seealso:: :func:`create_window`, :class:`Window`\n\n \"\"\"\n arg_1 = 0.3635819\n arg_2 = 0.4891775\n arg_3 = 0.1365995\n arg_4 = 0.0106411\n return _coeff4(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_5831", "title": "", "text": "def Func(arg_0):\n r\"\"\"Blackman Harris window\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n =============== =========\n coeff value\n =============== =========\n :math:`a_0` 0.35875\n :math:`a_1` 0.48829\n :math:`a_2` 0.14128\n :math:`a_3` 0.01168\n =============== =========\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman_harris', mindB=-80)\n\n .. seealso:: :func:`spectrum.window.create_window`\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_1 = 0.35875\n arg_2 = 0.48829\n arg_3 = 0.14128\n arg_4 = 0.01168\n return _coeff4(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_5832", "title": "", "text": "def Func(arg_0):\n r\"\"\"Bohman tapering window\n\n :param N: window length\n\n .. math:: w(n) = (1-|x|) \\cos (\\pi |x|) + \\frac{1}{\\pi} \\sin(\\pi |x|)\n\n where x is a length N vector of linearly spaced values between\n -1 and 1.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bohman')\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_1 = linspace(-1, 1, arg_0)\n arg_2 = (1.-abs(arg_1)) * cos(pi*abs(arg_1)) + 1./pi * sin(pi*abs(arg_1))\n return arg_2"} +{"_id": "doc_5833", "title": "", "text": "def Func(arg_0, arg_1='symmetric',arg_2=None):\n r\"\"\"Flat-top tapering window\n\n Returns symmetric or periodic flat top window.\n\n :param N: window length\n :param mode: way the data are normalised. If mode is *symmetric*, then\n divide n by N-1. IF mode is *periodic*, divide by N,\n to be consistent with octave code.\n\n When using windows for filter design, the *symmetric* mode\n should be used (default). When using windows for spectral analysis, the *periodic*\n mode should be used. The mathematical form of the flat-top window in the symmetric\n case is:\n\n .. math:: w(n) = a_0\n - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)\n + a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)\n - a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n + a_4 \\cos\\left(\\frac{8\\pi n}{N-1}\\right)\n\n ===== =============\n coeff value\n ===== =============\n a0 0.21557895\n a1 0.41663158\n a2 0.277263158\n a3 0.083578947\n a4 0.006947368\n ===== =============\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bohman')\n\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n assert arg_1 in ['periodic', 'symmetric']\n arg_3 = arange(0, arg_0)\n\n # FIXME: N=1 for mode = periodic ?\n if arg_1 == 'periodic':\n arg_4 = 2*pi*arg_3/float(arg_0)\n else:\n if arg_0 ==1:\n return ones(1)\n arg_4 = 2*pi*arg_3/float(arg_0-1)\n arg_5 = 0.21557895\n arg_6 = 0.41663158\n arg_7 = 0.277263158\n arg_8 = 0.083578947\n arg_9 = 0.006947368\n\n if arg_2 == 'octave':\n #to compare with octave, same as above but less precise\n arg_10 = 4.6402\n arg_5 = 1./arg_10\n arg_6 = 1.93/arg_10\n arg_7 = 1.29/arg_10\n arg_8 = 0.388/arg_10\n arg_9 = 0.0322/arg_10\n arg_11 = arg_5-arg_6*cos(arg_4)+arg_7*cos(2*arg_4)-arg_8*cos(3*arg_4)+arg_9*cos(4*arg_4)\n return arg_11"} +{"_id": "doc_5834", "title": "", "text": "def Func(arg_0, arg_1=4, arg_2=-30):\n \"\"\"Taylor tapering window\n\n Taylor windows allows you to make tradeoffs between the\n mainlobe width and sidelobe level (sll).\n\n Implemented as described by Carrara, Goodman, and Majewski \n in 'Spotlight Synthetic Aperture Radar: Signal Processing Algorithms'\n Pages 512-513\n\n :param N: window length\n :param float nbar:\n :param float sll:\n\n The default values gives equal height\n sidelobes (nbar) and maximum sidelobe level (sll).\n\n .. warning:: not implemented\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_3 = 10**(-arg_2/20)\n arg_4 = log(arg_3 + sqrt(arg_3**2 - 1))/pi\n arg_5 = arg_1**2 / (arg_4**2 + (arg_1 - 0.5)**2)\n arg_6 = arange(1,arg_1)\n def calc_Fm(arg_7):\n arg_8 = (-1)**(arg_7+1) * prod(1-arg_7**2/arg_5/(arg_4**2 + (arg_6 - 0.5)**2))\n arg_9 = 2* prod([ 1-arg_7**2/j**2 for j in arg_6 if j != arg_7])\n return arg_8/arg_9\n arg_10 = array([calc_Fm(arg_7) for arg_7 in arg_6])\n def W(arg_11):\n return 2 * np.sum(arg_10 * cos(2*pi*arg_6*(arg_11-arg_0/2 + 1/2)/arg_0)) + 1\n arg_12 = array([W(arg_11) for arg_11 in range(arg_0)])\n # normalize (Note that this is not described in the original text)\n arg_13 = W((arg_0-1)/2)\n arg_12 /= arg_13\n return arg_12"} +{"_id": "doc_5835", "title": "", "text": "def Func(arg_0):\n r\"\"\"Riesz tapering window\n\n :param N: window length\n\n .. math:: w(n) = 1 - \\left| \\frac{n}{N/2} \\right|^2\n\n with :math:`-N/2 \\leq n \\leq N/2`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'riesz')\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_1 = linspace(-arg_0/2., (arg_0)/2., arg_0)\n\n arg_2 = 1 - abs(arg_1/(arg_0/2.))**2.\n return arg_2"} +{"_id": "doc_5836", "title": "", "text": "def Func(arg_0):\n r\"\"\"Riemann tapering window\n\n :param int N: window length\n\n .. math:: w(n) = 1 - \\left| \\frac{n}{N/2} \\right|^2\n\n with :math:`-N/2 \\leq n \\leq N/2`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'riesz')\n\n .. seealso:: :func:`create_window`, :class:`Window`\n \"\"\"\n arg_1 = linspace(-arg_0/2., (arg_0)/2., arg_0)\n arg_2 = sin(arg_1/float(arg_0)*2.*pi) / (arg_1 / float(arg_0)*2.*pi)\n return arg_2"} +{"_id": "doc_5837", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Compute the window data frequency response\n\n :param norm: True by default. normalised the frequency data.\n :param int NFFT: total length of the final data sets( 2048 by default. \n if less than data length, then NFFT is set to the data length*2).\n\n The response is stored in :attr:`response`.\n\n .. note:: Units are dB (20 log10) since we plot the frequency response)\n\n \"\"\"\n from numpy.fft import fft, fftshift\n\n arg_2 = arg_1.get('norm', arg_0.norm)\n\n # do some padding. Default is max(2048, data.len*2)\n arg_3 = arg_1.get('NFFT', 2048)\n if arg_3 < len(arg_0.data):\n arg_3 = arg_0.data.size * 2\n\n # compute the fft modulus\n arg_4 = fft(arg_0.data, arg_3)\n arg_5 = abs(fftshift(arg_4))\n\n # do we want to normalise the data\n if arg_2 is True:\n arg_5 = arg_5 / max(arg_5)\n\n arg_6 = 20. * stools.log10(arg_5) # factor 20 we are looking at the response\n # not the powe\n #response = clip(response,mindB,100)\n arg_0.__response = arg_6"} +{"_id": "doc_5838", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True):\n \"\"\"Plot the window in the frequency domain\n\n :param mindB: change the default lower y bound\n :param maxdB: change the default upper lower bound\n :param bool norm: if True, normalise the frequency response.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.Func()\n\n \"\"\"\n from pylab import plot, title, xlim, grid, ylim, xlabel, ylabel\n # recompute the response\n arg_0.compute_response(arg_3=arg_3)\n\n plot(arg_0.frequencies, arg_0.response)\n title(\"ENBW=%2.1f\" % (arg_0.enbw))\n ylabel('Frequency response (dB)')\n xlabel('Fraction of sampling frequency')\n # define the plot limits\n xlim(-0.5, 0.5)\n arg_4, arg_5 = ylim()\n if arg_1:\n arg_4 = arg_1\n if arg_2 is not None:\n arg_5 = arg_2\n else:\n arg_5 = max(arg_0.response)\n\n ylim(arg_4, arg_5)\n\n grid(True)"} +{"_id": "doc_5839", "title": "", "text": "def Func(arg_0):\n \"\"\"Plot the window in the time domain\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.Func()\n\n \"\"\"\n from pylab import plot, xlim, grid, title, ylabel, axis\n arg_1 = linspace(0, 1, arg_0.N)\n xlim(0, 1)\n plot(arg_1, arg_0.data)\n grid(True)\n title('%s Window (%s points)' % (arg_0.name.capitalize(), arg_0.N))\n ylabel('Amplitude')\n axis([0, 1, 0, 1.1])"} +{"_id": "doc_5840", "title": "", "text": "def Func(arg_0, arg_1=-100, arg_2=None, arg_3=True,\n arg_4=\"right\"):\n \"\"\"Plotting method to plot both time and frequency domain results.\n\n See :meth:`plot_frequencies` for the optional arguments.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.Func()\n\n \"\"\"\n from pylab import subplot, gca\n\n subplot(1, 2, 1)\n arg_0.plot_window()\n\n subplot(1, 2, 2)\n arg_0.plot_frequencies(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)\n\n if arg_4==\"left\":\n try: tight_layout()\n except: pass\n else:\n arg_5 = gca()\n arg_5.yaxis.set_label_position(\"right\")"} +{"_id": "doc_5841", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"solve the general toeplitz linear equations\n\n Solve TX=Z\n \n :param T0: zero lag value\n :param TC: r1 to rN \n :param TR: r1 to rN\n\n returns X\n\n requires 3M^2+M operations instead of M^3 with gaussian elimination\n \n .. warning:: not used right now\n \"\"\"\n assert len(arg_1)>0\n assert len(arg_1)==len(arg_2)\n arg_4 = len(arg_1)\n arg_5 = numpy.zeros(arg_4+1,dtype=complex)\n arg_6 = numpy.zeros(arg_4,dtype=complex)\n arg_7 = numpy.zeros(arg_4,dtype=complex)\n arg_8 = arg_0\n if arg_8 == 0: raise ValueError(\"P must be different from zero\")\n if arg_8 == 0: raise ValueError(\"P must be different from zero\")\n arg_5[0] = arg_3[0]/arg_0 \n for arg_9 in range(0, arg_4):\n arg_10 = arg_1[arg_9]\n arg_11 = arg_2[arg_9]\n arg_12 = arg_5[0]*arg_1[arg_9]\n if arg_9 == 0:\n arg_13 = -arg_10 / arg_8\n arg_14 = -arg_11 / arg_8\n else:\n for arg_15 in range(0, arg_9):\n arg_10 = arg_10 + arg_6[arg_15] * arg_1[arg_9-arg_15-1]\n arg_11 = arg_11 + arg_7[arg_15] * arg_2[arg_9-arg_15-1]\n arg_12 = arg_12 + arg_5[arg_15+1] * arg_1[arg_9-arg_15-1]\n arg_13 = -arg_10 / arg_8\n arg_14 = -arg_11/arg_8\n arg_8 = arg_8 * (1. - (arg_13*arg_14))\n if arg_8 <= 0:\n raise ValueError(\"singular matrix\")\n arg_6[arg_9] = arg_13\n arg_7[arg_9] = arg_14\n arg_16 = (arg_3[arg_9+1]-arg_12)/arg_8\n if arg_9 == 0: \n arg_5[arg_9+1] = arg_16\n for arg_15 in range(0,arg_9+1):\n arg_5[arg_15] = arg_5[arg_15] + arg_16 * arg_7[arg_9-arg_15]\n continue\n \n for arg_15 in range(0, arg_9):\n arg_17 = arg_9-arg_15-1\n arg_10 = arg_6[arg_15]\n arg_6[arg_15] = arg_10 + arg_13 * arg_7[arg_17] \n arg_7[arg_17] = arg_7[arg_17] + arg_14*arg_10\n \n arg_5[arg_9+1] = arg_16\n for arg_15 in range(0,arg_9+1):\n arg_5[arg_15] = arg_5[arg_15] + arg_16*arg_7[arg_9-arg_15]\n return arg_5"} +{"_id": "doc_5842", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds a codeobj summary by identifying and resolving used names\n\n >>> code = '''\n ... from a.b import c\n ... import d as e\n ... print(c)\n ... e.HelloWorld().f.g\n ... '''\n >>> for name, o in sorted(Func(code).items()):\n ... print(name, o['name'], o['module'], o['module_short'])\n c c a.b a.b\n e.HelloWorld HelloWorld d d\n \"\"\"\n arg_1 = NameFinder()\n arg_1.visit(ast.parse(arg_0))\n\n arg_2 = {}\n for arg_3, arg_4 in arg_1.get_mapping():\n # name is as written in file (e.g. np.asarray)\n # full_name includes resolved import path (e.g. numpy.asarray)\n arg_5, arg_6 = arg_4.rsplit('.', 1)\n # get shortened module name\n arg_7 = get_short_module_name(arg_5, arg_6)\n arg_8 = {'name': arg_6, 'module': arg_5,\n 'module_short': arg_7}\n arg_2[arg_3] = arg_8\n return arg_2"} +{"_id": "doc_5843", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Generates RST to place a thumbnail in a gallery\"\"\"\n arg_4 = os.path.join(arg_0, 'images', 'thumb',\n 'sphx_glr_%s_thumb.png' % arg_1[:-3])\n arg_5 = os.path.join(arg_0, arg_1).replace(os.path.sep, '_')\n\n arg_6 = BACKREF_THUMBNAIL_TEMPLATE if arg_3 else THUMBNAIL_TEMPLATE\n return arg_6.format(arg_2=arg_2, thumbnail=arg_4, arg_5=arg_5)"} +{"_id": "doc_5844", "title": "", "text": "def Func(arg_0, arg_1, arg_2='biased', arg_3=True):\n r\"\"\"Compute AR coefficients using Yule-Walker method\n\n :param X: Array of complex data values, X(1) to X(N)\n :param int order: Order of autoregressive process to be fitted (integer)\n :param str norm: Use a biased or unbiased correlation.\n :param bool allow_singularity:\n\n :return:\n * AR coefficients (complex)\n * variance of white noise (Real)\n * reflection coefficients for use in lattice filter\n\n .. rubric:: Description:\n\n The Yule-Walker method returns the polynomial A corresponding to the\n AR parametric signal model estimate of vector X using the Yule-Walker\n (autocorrelation) method. The autocorrelation may be computed using a\n **biased** or **unbiased** estimation. In practice, the biased estimate of\n the autocorrelation is used for the unknown true autocorrelation. Indeed,\n an unbiased estimate may result in nonpositive-definite autocorrelation\n matrix.\n So, a biased estimate leads to a stable AR filter.\n The following matrix form represents the Yule-Walker equations. The are\n solved by means of the Levinson-Durbin recursion:\n\n .. math::\n\n \\left( \\begin{array}{cccc}\n r(1) & r(2)^* & \\dots & r(n)^*\\\\\n r(2) & r(1)^* & \\dots & r(n-1)^*\\\\\n \\dots & \\dots & \\dots & \\dots\\\\\n r(n) & \\dots & r(2) & r(1) \\end{array} \\right)\n \\left( \\begin{array}{cccc}\n a(2)\\\\\n a(3) \\\\\n \\dots \\\\\n a(n+1) \\end{array} \\right)\n =\n \\left( \\begin{array}{cccc}\n -r(2)\\\\\n -r(3) \\\\\n \\dots \\\\\n -r(n+1) \\end{array} \\right)\n\n The outputs consists of the AR coefficients, the estimated variance of the\n white noise process, and the reflection coefficients. These outputs can be\n used to estimate the optimal order by using :mod:`~spectrum.criteria`.\n\n .. rubric:: Examples:\n\n From a known AR process or order 4, we estimate those AR parameters using\n the Func function.\n\n .. doctest::\n\n >>> from scipy.signal import lfilter\n >>> from spectrum import *\n >>> from numpy.random import randn\n >>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238]\n >>> noise = randn(1, 1024)\n >>> y = lfilter([1], A, noise);\n >>> #filter a white noise input to create AR(4) process\n >>> [ar, var, reflec] = Func(y[0], 4)\n >>> # ar should contains values similar to A\n\n The PSD estimate of a data samples is computed and plotted as follows:\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import *\n from pylab import *\n\n ar, P, k = Func(marple_data, 15, norm='biased')\n psd = arma2psd(ar)\n plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd)))\n axis([-0.5, 0.5, -60, 0])\n\n .. note:: The outputs have been double checked against (1) octave outputs\n (octave has norm='biased' by default) and (2) Marple test code.\n\n .. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and\n :func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria`\n module for criteria to automatically select the AR order.\n\n :References: [Marple]_\n\n \"\"\"\n assert arg_2 in ['biased', 'unbiased']\n arg_4 = CORRELATION(arg_0, maxlags=arg_1, arg_2=arg_2)\n arg_5, arg_6, arg_7 = LEVINSON(arg_4, arg_3=arg_3)\n return arg_5, arg_6, arg_7"} +{"_id": "doc_5845", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n r\"\"\"Levinson-Durbin recursion.\n\n Find the coefficients of a length(r)-1 order autoregressive linear process\n\n :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation)\n :param order: requested order of the autoregressive coefficients. default is N.\n :param allow_singularity: false by default. Other implementations may be True (e.g., octave)\n\n :return:\n * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)`\n * the prediction errors\n * the `N` reflections coefficients values\n\n This algorithm solves the set of complex linear simultaneous equations\n using Levinson algorithm.\n\n .. math::\n\n \\bold{T}_M \\left( \\begin{array}{c} 1 \\\\ \\bold{a}_M \\end{array} \\right) =\n \\left( \\begin{array}{c} \\rho_M \\\\ \\bold{0}_M \\end{array} \\right)\n\n where :math:`\\bold{T}_M` is a Hermitian Toeplitz matrix with elements\n :math:`T_0, T_1, \\dots ,T_M`.\n\n .. note:: Solving this equations by Gaussian elimination would\n require :math:`M^3` operations whereas the levinson algorithm\n requires :math:`M^2+M` additions and :math:`M^2+M` multiplications.\n\n This is equivalent to solve the following symmetric Toeplitz system of\n linear equations\n\n .. math::\n\n \\left( \\begin{array}{cccc}\n r_1 & r_2^* & \\dots & r_{n}^*\\\\\n r_2 & r_1^* & \\dots & r_{n-1}^*\\\\\n \\dots & \\dots & \\dots & \\dots\\\\\n r_n & \\dots & r_2 & r_1 \\end{array} \\right)\n \\left( \\begin{array}{cccc}\n a_2\\\\\n a_3 \\\\\n \\dots \\\\\n a_{N+1} \\end{array} \\right)\n =\n \\left( \\begin{array}{cccc}\n -r_2\\\\\n -r_3 \\\\\n \\dots \\\\\n -r_{N+1} \\end{array} \\right)\n\n where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and\n :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically\n a vector of autocorrelation coefficients where lag 0 is the first\n element :math:`r_1`.\n\n\n .. doctest::\n\n >>> import numpy; from spectrum import Func\n >>> T = numpy.array([3., -2+0.5j, .7-1j])\n >>> a, e, k = Func(T)\n\n \"\"\"\n #from numpy import isrealobj\n arg_3 = numpy.real(arg_0[0])\n arg_4 = arg_0[1:]\n arg_5 = len(arg_4)\n\n if arg_1 is None:\n arg_5 = len(arg_4)\n else:\n assert arg_1 <= arg_5, 'order must be less than size of the input data'\n arg_5 = arg_1\n\n arg_6 = numpy.isrealobj(arg_0)\n if arg_6 is True:\n arg_7 = numpy.zeros(arg_5, dtype=float)\n arg_8 = numpy.zeros(arg_5, dtype=float)\n else:\n arg_7 = numpy.zeros(arg_5, dtype=complex)\n arg_8 = numpy.zeros(arg_5, dtype=complex)\n\n arg_9 = arg_3\n\n for arg_10 in range(0, arg_5):\n arg_11 = arg_4[arg_10]\n if arg_10 == 0:\n arg_12 = -arg_11 / arg_9\n else:\n #save += sum([A[j]*T[k-j-1] for j in range(0,k)])\n for arg_13 in range(0, arg_10):\n arg_11 = arg_11 + arg_7[arg_13] * arg_4[arg_10-arg_13-1]\n arg_12 = -arg_11 / arg_9\n if arg_6:\n arg_9 = arg_9 * (1. - arg_12**2.)\n else:\n arg_9 = arg_9 * (1. - (arg_12.real**2+arg_12.imag**2))\n if arg_9 <= 0 and arg_2==False:\n raise ValueError(\"singular matrix\")\n arg_7[arg_10] = arg_12\n arg_8[arg_10] = arg_12 # save reflection coeff at each step\n if arg_10 == 0:\n continue\n\n arg_14 = (arg_10+1)//2\n if arg_6 is True:\n for arg_13 in range(0, arg_14):\n arg_15 = arg_10-arg_13-1\n arg_11 = arg_7[arg_13]\n arg_7[arg_13] = arg_11 + arg_12 * arg_7[arg_15]\n if arg_13 != arg_15:\n arg_7[arg_15] += arg_12*arg_11\n else:\n for arg_13 in range(0, arg_14):\n arg_15 = arg_10-arg_13-1\n arg_11 = arg_7[arg_13]\n arg_7[arg_13] = arg_11 + arg_12 * arg_7[arg_15].conjugate()\n if arg_13 != arg_15:\n arg_7[arg_15] = arg_7[arg_15] + arg_12 * arg_11.conjugate()\n\n return arg_7, arg_9, arg_8"} +{"_id": "doc_5846", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"computes the autocorrelation coefficients, R based\n on the prediction polynomial A and the final prediction error Efinal,\n using the stepdown algorithm.\n\n Works for real or complex data\n\n :param a:\n :param efinal:\n\n :return:\n * R, the autocorrelation\n * U prediction coefficient\n * kr reflection coefficients\n * e errors\n\n A should be a minimum phase polynomial and A(1) is assumed to be unity.\n\n :returns: (P+1) by (P+1) upper triangular matrix, U,\n that holds the i'th order prediction polynomials\n Ai, i=1:P, where P is the order of the input\n polynomial, A.\n\n\n\n [ 1 a1(1)* a2(2)* ..... aP(P) * ]\n [ 0 1 a2(1)* ..... aP(P-1)* ]\n U = [ .................................]\n [ 0 0 0 ..... 1 ]\n\n from which the i'th order prediction polynomial can be extracted\n using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the\n conjugates of the reflection coefficients, and the K's may be\n extracted using, K=conj(U(1,2:end)).\n\n .. todo:: remove the conjugate when data is real data, clean up the code\n test and doc.\n\n \"\"\"\n arg_0 = numpy.array(arg_0)\n arg_2 = numpy.isrealobj(arg_0)\n\n\n assert arg_0[0] == 1, 'First coefficient of the prediction polynomial must be unity'\n\n arg_3 = len(arg_0)\n\n if arg_3 < 2:\n raise ValueError('Polynomial should have at least two coefficients')\n\n if arg_2 == True:\n arg_4 = numpy.zeros((arg_3, arg_3)) # This matrix will have the prediction\n # polynomials of orders 1:p\n else:\n arg_4 = numpy.zeros((arg_3, arg_3), dtype=complex)\n arg_4[:, arg_3-1] = numpy.conj(arg_0[-1::-1]) # Prediction coefficients of order p\n\n arg_3 = arg_3 -1\n arg_5 = numpy.zeros(arg_3)\n\n # First we find the prediction coefficients of smaller orders and form the\n # Matrix U\n\n # Initialize the step down\n\n arg_5[-1] = arg_1 # Prediction error of order p\n\n # Step down\n for arg_6 in range(arg_3-1, 0, -1):\n [arg_0, arg_5[arg_6-1]] = levdown(arg_0, arg_5[arg_6])\n arg_4[:, arg_6] = numpy.concatenate((numpy.conj(arg_0[-1::-1].transpose()) ,\n [0]*(arg_3-arg_6) ))\n\n\n\n\n arg_7 = arg_5[0]/(1.-abs(arg_0[1]**2)) #% Because a[1]=1 (true polynomial)\n arg_4[0,0] = 1 #% Prediction coefficient of zeroth order\n arg_8 = numpy.conj(arg_4[0,1:]) #% The reflection coefficients\n arg_8 = arg_8.transpose() #% To make it into a column vector\n\n # % Once we have the matrix U and the prediction error at various orders, we can\n # % use this information to find the autocorrelation coefficients.\n\n arg_9 = numpy.zeros(1, dtype=complex)\n #% Initialize recursion\n arg_6 = 1\n arg_10 = arg_7 # To take care of the zero indexing problem\n arg_9[0] = -numpy.conj(arg_4[0,1])*arg_10 # R[1]=-a1[1]*R[0]\n\n # Actual recursion\n for arg_6 in range(1,arg_3):\n arg_11 = -sum(numpy.conj(arg_4[arg_6-1::-1,arg_6])*arg_9[-1::-1]) - arg_8[arg_6]*arg_5[arg_6-1]\n arg_9 = numpy.insert(arg_9, len(arg_9), arg_11)\n\n # Include R(0) and make it a column vector. Note the dot transpose\n\n #R = [R0 R].';\n arg_9 = numpy.insert(arg_9, 0, arg_7)\n return arg_9, arg_4, arg_8, arg_5"} +{"_id": "doc_5847", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"LEVUP One step forward Levinson recursion\n\n :param acur:\n :param knxt:\n :return:\n * anxt the P+1'th order prediction polynomial based on the P'th order prediction polynomial, acur, and the\n P+1'th order reflection coefficient, Knxt.\n * enxt the P+1'th order prediction prediction error, based on the P'th order prediction error, ecur.\n\n\n :References: P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.\n \"\"\"\n if arg_0[0] != 1:\n raise ValueError('At least one of the reflection coefficients is equal to one.')\n arg_0 = arg_0[1:] # Drop the leading 1, it is not needed\n\n # Matrix formulation from Stoica is used to avoid looping\n arg_3 = numpy.concatenate((arg_0, [0])) + arg_1 * numpy.concatenate((numpy.conj(arg_0[-1::-1]), [1]))\n\n arg_4 = None\n if arg_2 is not None:\n # matlab version enxt = (1-knxt'.*knxt)*ecur\n arg_4 = (1. - numpy.dot(numpy.conj(arg_1), arg_1)) * arg_2\n\n arg_3 = numpy.insert(arg_3, 0, 1)\n\n return arg_3, arg_4"} +{"_id": "doc_5848", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"Simple and fast implementation of the covariance AR estimate\n\n This code is 10 times faster than :func:`Func_marple` and more importantly\n only 10 lines of code, compared to a 200 loc for :func:`Func_marple`\n\n\n :param array X: Array of complex data samples\n :param int oder: Order of linear prediction model\n\n :return:\n * a - Array of complex forward linear prediction coefficients\n * e - error\n\n The covariance method fits a Pth order autoregressive (AR) model to the\n input signal, which is assumed to be the output of\n an AR system driven by white noise. This method minimizes the forward\n prediction error in the least-squares sense. The output vector\n contains the normalized estimate of the AR system parameters\n\n The white noise input variance estimate is also returned.\n\n If is the power spectral density of y(n), then:\n\n .. math:: \\frac{e}{\\left| A(e^{jw}) \\right|^2} = \\frac{e}{\\left| 1+\\sum_{k-1}^P a(k)e^{-jwk}\\right|^2}\n\n Because the method characterizes the input data using an all-pole model,\n the correct choice of the model order p is important.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import Func, marple_data, arma2psd\n from pylab import plot, log10, linspace, axis\n\n ar_values, error = Func(marple_data, 15)\n psd = arma2psd(ar_values, sides='centerdc')\n plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))\n axis([-0.5, 0.5, -60, 0])\n\n .. seealso:: :class:`pcovar`\n\n :validation: the AR parameters are the same as those returned by\n a completely different function :func:`Func_marple`.\n\n :References: [Mathworks]_\n \"\"\"\n\n from spectrum import corrmtx\n import scipy.linalg\n\n arg_2 = corrmtx(arg_0, arg_1, 'covariance')\n arg_3 = np.matrix(arg_2[:, 1:])\n arg_4 = np.array(arg_2[:, 0])\n\n # Coefficients estimated via the covariance method\n # Here we use lstsq rathre than solve function because Xc is not square\n # matrix\n\n arg_5, arg_6, arg_7, arg_8 = scipy.linalg.lstsq(-arg_3, arg_4)\n\n # Estimate the input white noise variance\n arg_9 = np.dot(arg_4.conj().transpose(), arg_3)\n arg_10 = np.dot(arg_4.conj().transpose(), arg_4) + np.dot(arg_9, arg_5)\n assert arg_10.imag < 1e-4, 'wierd behaviour'\n arg_10 = float(arg_10.real) # ignore imag part that should be small\n\n return arg_5, arg_10"} +{"_id": "doc_5849", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Linear Predictor Coefficients.\n\n :param x:\n :param int N: default is length(X) - 1\n\n :Details:\n\n Finds the coefficients :math:`A=(1, a(2), \\dots a(N+1))`, of an Nth order\n forward linear predictor that predicts the current value value of the\n real-valued time series x based on past samples:\n\n .. math:: \\hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)\n\n such that the sum of the squares of the errors\n\n .. math:: err(n) = X(n) - Xp(n)\n\n is minimized. This function uses the Levinson-Durbin recursion to\n solve the normal equations that arise from the least-squares formulation.\n\n .. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`\n\n .. todo:: matrix case, references\n\n :Example:\n\n ::\n\n from scipy.signal import lfilter\n noise = randn(50000,1); % Normalized white Gaussian noise\n x = filter([1], [1 1/2 1/3 1/4], noise)\n x = x[45904:50000]\n x.reshape(4096, 1)\n x = x[0]\n\n Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:\n\n\n 1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i\n\n ::\n\n a = Func(x, 3)\n est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal\n e = x - est_x; % Prediction error\n [acs,lags] = xcorr(e,'coeff'); % ACS of prediction error\n\n \"\"\"\n\n arg_2 = len(arg_0)\n if arg_1 is None:\n arg_1 = arg_2 - 1 #default value if N is not provided\n elif arg_1 > arg_2-1:\n #disp('Warning: zero-padding short input sequence')\n arg_0.resize(arg_1+1)\n #todo: check this zero-padding.\n\n arg_3 = fft(arg_0, 2**nextpow2(2.*len(arg_0)-1))\n arg_4 = real(ifft(abs(arg_3)**2))\n arg_4 = arg_4/(arg_2-1.) #Biased autocorrelation estimate\n arg_5, arg_6, arg_7 = LEVINSON(arg_4, arg_1)\n return arg_5, arg_6"} +{"_id": "doc_5850", "title": "", "text": "def Func(arg_0):\n \"\"\"Return Pascal matrix\n\n :param int n: size of the matrix\n\n .. doctest::\n\n >>> from spectrum import Func\n >>> Func(6)\n array([[ 1., 1., 1., 1., 1., 1.],\n [ 1., 2., 3., 4., 5., 6.],\n [ 1., 3., 6., 10., 15., 21.],\n [ 1., 4., 10., 20., 35., 56.],\n [ 1., 5., 15., 35., 70., 126.],\n [ 1., 6., 21., 56., 126., 252.]])\n\n .. todo:: use the symmetric property to improve computational time if needed\n \"\"\"\n errors.is_positive_integer(arg_0)\n arg_1 = numpy.zeros((arg_0, arg_0))\n\n #fill the first row and column\n for arg_2 in range(0, arg_0):\n arg_1[arg_2, 0] = 1\n arg_1[0, arg_2] = 1\n if arg_0 > 1:\n for arg_2 in range(1, arg_0):\n for arg_3 in range(1, arg_0):\n arg_1[arg_2, arg_3] = arg_1[arg_2-1, arg_3] + arg_1[arg_2, arg_3-1]\n return arg_1"} +{"_id": "doc_5851", "title": "", "text": "def Func(arg_0):\n \"\"\"SVD decomposition using numpy.linalg.svd\n\n :param A: a M by N matrix\n\n :return:\n * U, a M by M matrix\n * S the N eigen values\n * V a N by N matrix\n\n See :func:`numpy.linalg.svd` for a detailed documentation.\n\n Should return the same as in [Marple]_ , CSVD routine.\n\n ::\n\n U, S, V = numpy.linalg.svd(A)\n U, S, V = cvsd(A)\n\n \"\"\"\n arg_1, arg_2, arg_3 = numpy.linalg.svd(arg_0)\n return arg_1, arg_2, arg_3"} +{"_id": "doc_5852", "title": "", "text": "def Func():\n \"\"\"Yield paths to standard modules.\"\"\"\n for arg_0 in [True, False]:\n arg_1 = distutils.sysconfig.get_python_lib(standard_lib=True,\n plat_specific=arg_0)\n\n for arg_2 in os.listdir(arg_1):\n yield arg_2\n\n try:\n for arg_2 in os.listdir(os.path.join(arg_1, 'lib-dynload')):\n yield arg_2\n except OSError: # pragma: no cover\n pass"} +{"_id": "doc_5853", "title": "", "text": "def Func():\n \"\"\"Yield standard module names.\"\"\"\n for arg_0 in standard_paths():\n if arg_0.startswith('_') or '-' in arg_0:\n continue\n\n if '.' in arg_0 and arg_0.rsplit('.')[-1] not in ['so', 'py', 'pyc']:\n continue\n\n yield arg_0.split('.')[0]"} +{"_id": "doc_5854", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield line numbers of unused imports.\"\"\"\n for arg_1 in arg_0:\n if isinstance(arg_1, pyflakes.messages.UnusedImport):\n yield arg_1.lineno"} +{"_id": "doc_5855", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield line number and module name of unused imports.\"\"\"\n arg_1 = r'\\'(.+?)\\''\n for arg_2 in arg_0:\n if isinstance(arg_2, pyflakes.messages.UnusedImport):\n arg_3 = re.search(arg_1, str(arg_2))\n arg_3 = arg_3.group()[1:-1]\n if arg_3:\n yield (arg_2.lineno, arg_3)"} +{"_id": "doc_5856", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield line number of star import usage.\"\"\"\n for arg_1 in arg_0:\n if isinstance(arg_1, pyflakes.messages.ImportStarUsed):\n yield arg_1.lineno"} +{"_id": "doc_5857", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield line number, undefined name, and its possible origin module.\"\"\"\n for arg_1 in arg_0:\n if isinstance(arg_1, pyflakes.messages.ImportStarUsage):\n arg_2 = arg_1.message_args[0]\n arg_3 = arg_1.message_args[1]\n yield (arg_1.lineno, arg_2, arg_3)"} +{"_id": "doc_5858", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Yield line numbers of duplicate keys.\"\"\"\n arg_0 = [\n arg_6 for arg_6 in arg_0\n if isinstance(arg_6, pyflakes.messages.MultiValueRepeatedKeyLiteral)]\n\n if arg_0:\n # Filter out complex cases. We don't want to bother trying to parse\n # this stuff and get it right. We can do it on a key-by-key basis.\n\n arg_2 = create_key_to_messages_dict(arg_0)\n\n arg_3 = arg_1.split('\\n')\n\n for (arg_4, arg_0) in arg_2.items():\n arg_5 = True\n for arg_6 in arg_0:\n arg_7 = arg_3[arg_6.lineno - 1]\n arg_4 = arg_6.message_args[0]\n\n if not dict_entry_has_key(arg_7, arg_4):\n arg_5 = False\n\n if arg_5:\n for arg_6 in arg_0:\n yield arg_6.lineno"} +{"_id": "doc_5859", "title": "", "text": "def Func(arg_0):\n \"\"\"Return dict mapping the key to list of messages.\"\"\"\n arg_1 = collections.defaultdict(lambda: [])\n for arg_2 in arg_0:\n arg_1[arg_2.message_args[0]].append(arg_2)\n return arg_1"} +{"_id": "doc_5860", "title": "", "text": "def Func(arg_0):\n \"\"\"Return messages from pyflakes.\"\"\"\n if sys.version_info[0] == 2 and isinstance(arg_0, unicode):\n # Convert back to original byte string encoding, otherwise pyflakes\n # call to compile() will complain. See PEP 263. This only affects\n # Python 2.\n try:\n arg_0 = arg_0.encode('utf-8')\n except UnicodeError: # pragma: no cover\n return []\n\n arg_1 = ListReporter()\n try:\n pyflakes.api.Func(arg_0, filename='', arg_1=arg_1)\n except (AttributeError, RecursionError, UnicodeDecodeError):\n pass\n return arg_1.messages"} +{"_id": "doc_5861", "title": "", "text": "def Func(arg_0):\n \"\"\"Return package name in import statement.\"\"\"\n assert '\\\\' not in arg_0\n assert '(' not in arg_0\n assert ')' not in arg_0\n assert ';' not in arg_0\n\n if arg_0.lstrip().startswith(('import', 'from')):\n arg_1 = arg_0.split()[1]\n else:\n # Ignore doctests.\n return None\n\n arg_2 = arg_1.split('.')[0]\n assert ' ' not in arg_2\n\n return arg_2"} +{"_id": "doc_5862", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Return True if import is spans multiples lines.\"\"\"\n for arg_2 in '()':\n if arg_2 in arg_0:\n return True\n\n # Ignore doctests.\n if arg_0.lstrip().startswith('>'):\n return True\n\n return multiline_statement(arg_0, arg_1)"} +{"_id": "doc_5863", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse and filter ``from something import a, b, c``.\n\n Return line without unused import modules, or `pass` if all of the\n module in import is unused.\n \"\"\"\n (arg_2, arg_3) = re.split(pattern=r'\\bimport\\b',\n string=arg_0, maxsplit=1)\n arg_4 = re.search(pattern=r'\\bfrom\\s+([^ ]+)',\n string=arg_2).group(1)\n\n # Create an imported module list with base module name\n # ex ``from a import b, c as d`` -> ``['a.b', 'a.c as d']``\n arg_3 = re.split(pattern=r',', string=arg_3.strip())\n arg_3 = [arg_4 + '.' + x.strip() for x in arg_3]\n\n # We compare full module name (``a.module`` not `module`) to\n # guarantee the exact same module as detected from pyflakes.\n arg_5 = [x.replace(arg_4 + '.', '')\n for x in arg_3 if x not in arg_1]\n\n # All of the import in this statement is unused\n if not arg_5:\n return get_indentation(arg_0) + 'pass' + get_line_ending(arg_0)\n\n arg_2 += 'import '\n\n return (\n arg_2 +\n ', '.join(sorted(arg_5)) +\n get_line_ending(arg_0))"} +{"_id": "doc_5864", "title": "", "text": "def Func(arg_0):\n \"\"\"Return dictionary that maps line number to message.\"\"\"\n arg_1 = {}\n for arg_2 in arg_0:\n arg_1[arg_2.lineno] = arg_2\n return arg_1"} +{"_id": "doc_5865", "title": "", "text": "def Func(arg_0):\n \"\"\"Return True if value is a literal or a name.\"\"\"\n try:\n ast.literal_eval(arg_0)\n return True\n except (SyntaxError, ValueError):\n pass\n\n if arg_0.strip() in ['dict()', 'list()', 'set()']:\n return True\n\n # Support removal of variables on the right side. But make sure\n # there are no dots, which could mean an access of a property.\n return re.match(r'^\\w+\\s*$', arg_0)"} +{"_id": "doc_5866", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield line numbers of unneeded \"pass\" statements.\"\"\"\n arg_1 = io.StringIO(arg_0)\n arg_2 = None\n arg_3 = None\n arg_4 = None\n arg_5 = ''\n for arg_6 in tokenize.generate_tokens(arg_1.readline):\n arg_7 = arg_6[0]\n arg_8 = arg_6[2][0]\n arg_9 = arg_6[4]\n\n arg_10 = (arg_7 == tokenize.NAME and arg_9.strip() == 'pass')\n\n # Leading \"pass\".\n if (arg_8 - 1 == arg_3 and\n get_indentation(arg_9) == arg_4 and\n arg_7 in ATOMS and\n not arg_10):\n yield arg_8 - 1\n\n if arg_10:\n arg_3 = arg_8\n arg_4 = get_indentation(arg_9)\n\n # Trailing \"pass\".\n if (arg_10 and\n arg_2 != tokenize.INDENT and\n not arg_5.rstrip().endswith('\\\\')):\n yield arg_8\n\n arg_2 = arg_7\n arg_5 = arg_9"} +{"_id": "doc_5867", "title": "", "text": "def Func(arg_0):\n \"\"\"Yield code with useless \"pass\" lines removed.\"\"\"\n try:\n arg_1 = frozenset(useless_pass_line_numbers(arg_0))\n except (SyntaxError, tokenize.TokenError):\n arg_1 = frozenset()\n\n arg_2 = io.StringIO(arg_0)\n for arg_3, arg_4 in enumerate(arg_2.readlines(), start=1):\n if arg_3 not in arg_1:\n yield arg_4"} +{"_id": "doc_5868", "title": "", "text": "def Func(arg_0):\n \"\"\"Return leading whitespace.\"\"\"\n if arg_0.strip():\n arg_1 = len(arg_0) - len(arg_0.lstrip())\n return arg_0[:arg_1]\n else:\n return ''"} +{"_id": "doc_5869", "title": "", "text": "def Func(arg_0):\n \"\"\"Return line ending.\"\"\"\n arg_1 = len(arg_0.rstrip()) - len(arg_0)\n if not arg_1:\n return ''\n else:\n return arg_0[arg_1:]"} +{"_id": "doc_5870", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False,\n arg_3=False, arg_4=False,\n arg_5=False, arg_6=False):\n \"\"\"Return code with all filtering run on it.\"\"\"\n if not arg_0:\n return arg_0\n\n # pyflakes does not handle \"nonlocal\" correctly.\n if 'nonlocal' in arg_0:\n arg_5 = False\n\n arg_7 = None\n while True:\n arg_7 = ''.join(\n filter_useless_pass(''.join(\n filter_code(\n arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n ))))\n\n if arg_7 == arg_0:\n break\n arg_0 = arg_7\n\n return arg_7"} +{"_id": "doc_5871", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a set of strings.\"\"\"\n return set(arg_1.strip() for arg_1 in arg_0.split(',') if arg_1.strip())"} +{"_id": "doc_5872", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the ObtainLease response payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._lease_time:\n arg_0._lease_time.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._last_change_date:\n arg_0._last_change_date.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(ObtainLeaseResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5873", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Cancel request payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._asynchronous_correlation_value:\n arg_0._asynchronous_correlation_value.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(CancelRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5874", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Returns a Name object, populated with the given value and type\n '''\n if isinstance(arg_1, Name.NameValue):\n arg_3 = arg_1\n elif isinstance(arg_1, str):\n arg_3 = arg_0.NameValue(arg_1)\n else:\n arg_4 = 'Name'\n arg_5 = exceptions.ErrorStrings.BAD_EXP_RECV\n arg_6 = 'name_value'\n raise TypeError(arg_5.format('{0}.{1}'.format(arg_4, arg_6),\n 'name_value', type(Name.NameValue),\n type(arg_1)))\n\n if isinstance(arg_2, Name.NameType):\n arg_7 = arg_2\n elif isinstance(arg_2, Enum):\n arg_7 = arg_0.NameType(arg_2)\n else:\n arg_4 = 'Name'\n arg_5 = exceptions.ErrorStrings.BAD_EXP_RECV\n arg_6 = 'name_type'\n raise TypeError(arg_5.format('{0}.{1}'.format(arg_4, arg_6),\n 'name_type', type(Name.NameType),\n type(arg_2)))\n\n return Name(arg_1=arg_3,\n arg_2=arg_7)"} +{"_id": "doc_5875", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Digest object and decode it into its\n constituent parts.\n\n Args:\n istream (Stream): A data stream containing encoded object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(Digest, arg_0).Func(arg_1, arg_2=arg_2)\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_0.hashing_algorithm.Func(arg_6, arg_2=arg_2)\n arg_0.digest_value.Func(arg_6, arg_2=arg_2)\n arg_0.key_format_type.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)\n arg_0.validate()"} +{"_id": "doc_5876", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Digest object to a stream.\n\n Args:\n ostream (Stream): A data stream in which to encode object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n arg_0.hashing_algorithm.Func(arg_6, arg_2=arg_2)\n arg_0.digest_value.Func(arg_6, arg_2=arg_2)\n arg_0.key_format_type.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(Digest, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5877", "title": "", "text": "def Func(arg_0,\n arg_1=arg_2.SHA_256,\n arg_4=b'',\n arg_5=arg_6.RAW):\n \"\"\"\n Construct a Digest object from provided digest values.\n\n Args:\n hashing_algorithm (HashingAlgorithm): An enumeration representing\n the hash algorithm used to compute the digest. Optional,\n defaults to HashingAlgorithm.SHA_256.\n digest_value (byte string): The bytes of the digest hash. Optional,\n defaults to the empty byte string.\n key_format_type (KeyFormatType): An enumeration representing the\n format of the key corresponding to the digest. Optional,\n defaults to KeyFormatType.RAW.\n\n Returns:\n Digest: The newly Funcd Digest.\n\n Example:\n >>> x = Digest.Func(HashingAlgorithm.MD5, b'\\x00',\n ... KeyFormatType.RAW)\n >>> x.hashing_algorithm\n HashingAlgorithm(value=HashingAlgorithm.MD5)\n >>> x.digest_value\n DigestValue(value=bytearray(b'\\x00'))\n >>> x.key_format_type\n KeyFormatType(value=KeyFormatType.RAW)\n \"\"\"\n arg_8 = HashingAlgorithm(arg_1)\n arg_9 = DigestValue(bytearray(arg_4))\n arg_10 = KeyFormatType(arg_5)\n\n return Digest(arg_1=arg_8,\n arg_4=arg_9,\n arg_5=arg_10)"} +{"_id": "doc_5878", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the ApplicationSpecificInformation object and\n decode it into its constituent parts.\n\n Args:\n istream (Stream): A data stream containing encoded object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(ApplicationSpecificInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_0.application_namespace.Func(arg_6, arg_2=arg_2)\n arg_0.application_data.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)\n arg_0.validate()"} +{"_id": "doc_5879", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the ApplicationSpecificInformation object to a\n stream.\n\n Args:\n ostream (Stream): A data stream in which to encode object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n arg_0.application_namespace.Func(arg_6, arg_2=arg_2)\n arg_0.application_data.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(ApplicationSpecificInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5880", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Construct an ApplicationSpecificInformation object from provided data\n and namespace values.\n\n Args:\n application_namespace (str): The name of the application namespace.\n application_data (str): Application data related to the namespace.\n\n Returns:\n ApplicationSpecificInformation: The newly Funcd set of\n application information.\n\n Example:\n >>> x = ApplicationSpecificInformation.Func('namespace', 'data')\n >>> x.application_namespace.value\n 'namespace'\n >>> x.application_data.value\n 'data'\n \"\"\"\n arg_3 = ApplicationNamespace(arg_1)\n arg_4 = ApplicationData(arg_2)\n return ApplicationSpecificInformation(\n arg_1=arg_3, arg_2=arg_4)"} +{"_id": "doc_5881", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the DerivationParameters struct and decode it\n into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(DerivationParameters, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(\n arg_3.Tags.CRYPTOGRAPHIC_PARAMETERS,\n arg_6\n ):\n arg_0._cryptographic_parameters = CryptographicParameters()\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.INITIALIZATION_VECTOR, arg_6):\n arg_0._initialization_vector = ByteString(\n tag=arg_3.Tags.INITIALIZATION_VECTOR\n )\n arg_0._initialization_vector.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.DERIVATION_DATA, arg_6):\n arg_0._derivation_data = ByteString(tag=arg_3.Tags.DERIVATION_DATA)\n arg_0._derivation_data.Func(arg_6, arg_2=arg_2)\n\n if arg_0.is_tag_next(arg_3.Tags.SALT, arg_6):\n arg_0._salt = ByteString(tag=arg_3.Tags.SALT)\n arg_0._salt.Func(arg_6, arg_2=arg_2)\n\n if arg_0.is_tag_next(Tags.ITERATION_COUNT, arg_6):\n arg_0._iteration_count = Integer(tag=Tags.ITERATION_COUNT)\n arg_0._iteration_count.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5882", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the DerivationParameters struct to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._cryptographic_parameters:\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._initialization_vector:\n arg_0._initialization_vector.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._derivation_data:\n arg_0._derivation_data.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._salt:\n arg_0._salt.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._iteration_count:\n arg_0._iteration_count.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(DerivationParameters, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5883", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Get request payload and decode it into its\n constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(GetRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.KEY_FORMAT_TYPE, arg_6):\n arg_0._key_format_type = primitives.Enumeration(\n enum=arg_3.KeyFormatType,\n tag=arg_3.Tags.KEY_FORMAT_TYPE\n )\n arg_0._key_format_type.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.KEY_COMPRESSION_TYPE, arg_6):\n arg_0._key_compression_type = primitives.Enumeration(\n enum=arg_3.KeyCompressionType,\n tag=arg_3.Tags.KEY_COMPRESSION_TYPE\n )\n arg_0._key_compression_type.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(\n arg_3.Tags.KEY_WRAPPING_SPECIFICATION,\n arg_6\n ):\n arg_0._key_wrapping_specification = \\\n objects.KeyWrappingSpecification()\n arg_0._key_wrapping_specification.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5884", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Get request payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier is not None:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._key_format_type is not None:\n arg_0._key_format_type.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._key_compression_type is not None:\n arg_0._key_compression_type.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._key_wrapping_specification is not None:\n arg_0._key_wrapping_specification.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(GetRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5885", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Get response payload and decode it\n into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the object type, unique identifier, or\n secret attributes are missing from the encoded payload.\n \"\"\"\n super(GetResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_TYPE, arg_6):\n arg_0._object_type = primitives.Enumeration(\n enum=arg_3.ObjectType,\n tag=arg_3.Tags.OBJECT_TYPE\n )\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Parsed payload encoding is missing the object type field.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Parsed payload encoding is missing the unique identifier \"\n \"field.\"\n )\n\n arg_0.secret = arg_0.secret_factory.create(arg_0.object_type)\n if arg_0.is_tag_next(arg_0._secret.tag, arg_6):\n arg_0._secret.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Parsed payload encoding is missing the secret field.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5886", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Get response payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the object type, unique identifier, or\n secret attributes are missing from the payload struct.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0.object_type:\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\"Payload is missing the object type field.\")\n\n if arg_0.unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Payload is missing the unique identifier field.\"\n )\n\n if arg_0.secret:\n arg_0._secret.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\"Payload is missing the secret field.\")\n\n arg_0.length = arg_6.length()\n super(GetResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5887", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the SignatureVerify request payload and decode\n it into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is missing from the\n encoded payload.\n \"\"\"\n super(SignatureVerifyRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0.is_tag_next(arg_3.Tags.CRYPTOGRAPHIC_PARAMETERS, arg_6):\n arg_0._cryptographic_parameters = \\\n attributes.CryptographicParameters()\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0.is_tag_next(arg_3.Tags.DATA, arg_6):\n arg_0._data = primitives.ByteString(tag=arg_3.Tags.DATA)\n arg_0._data.Func(arg_6, arg_2=arg_2)\n if arg_0.is_tag_next(arg_3.Tags.DIGESTED_DATA, arg_6):\n arg_0._digested_data = primitives.ByteString(\n tag=arg_3.Tags.DIGESTED_DATA\n )\n arg_0._digested_data.Func(arg_6, arg_2=arg_2)\n if arg_0.is_tag_next(arg_3.Tags.SIGNATURE_DATA, arg_6):\n arg_0._signature_data = primitives.ByteString(\n tag=arg_3.Tags.SIGNATURE_DATA\n )\n arg_0._signature_data.Func(arg_6, arg_2=arg_2)\n if arg_0.is_tag_next(arg_3.Tags.CORRELATION_VALUE, arg_6):\n arg_0._correlation_value = primitives.ByteString(\n tag=arg_3.Tags.CORRELATION_VALUE\n )\n arg_0._correlation_value.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0.is_tag_next(arg_3.Tags.INIT_INDICATOR, arg_6):\n arg_0._init_indicator = primitives.Boolean(\n tag=arg_3.Tags.INIT_INDICATOR\n )\n arg_0._init_indicator.Func(arg_6, arg_2=arg_2)\n if arg_0.is_tag_next(arg_3.Tags.FINAL_INDICATOR, arg_6):\n arg_0._final_indicator = primitives.Boolean(\n tag=arg_3.Tags.FINAL_INDICATOR\n )\n arg_0._final_indicator.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5888", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the SignatureVerify request payload to a\n stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._cryptographic_parameters:\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._data:\n arg_0._data.Func(arg_6, arg_2=arg_2)\n if arg_0._digested_data:\n arg_0._digested_data.Func(arg_6, arg_2=arg_2)\n if arg_0._signature_data:\n arg_0._signature_data.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._correlation_value:\n arg_0._correlation_value.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._init_indicator:\n arg_0._init_indicator.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._final_indicator:\n arg_0._final_indicator.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(SignatureVerifyRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5889", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Process a KMIP request message.\n\n This routine is the main driver of the KmipEngine. It breaks apart and\n processes the request header, handles any message errors that may\n result, and then passes the set of request batch items on for\n processing. This routine is thread-safe, allowing multiple client\n connections to use the same KmipEngine.\n\n Args:\n request (RequestMessage): The request message containing the batch\n items to be processed.\n credential (string): Identifying information about the client\n obtained from the client certificate. Optional, defaults to\n None.\n\n Returns:\n ResponseMessage: The response containing all of the results from\n the request batch items.\n \"\"\"\n arg_0._client_identity = [None, None]\n arg_4 = arg_1.request_header\n\n # Process the protocol version\n arg_0._set_protocol_version(arg_4.protocol_version)\n\n # Process the maximum response size\n arg_5 = None\n if arg_4.maximum_response_size:\n arg_5 = arg_4.maximum_response_size.value\n\n # Process the time stamp\n arg_6 = int(time.time())\n if arg_4.time_stamp:\n arg_7 = arg_4.time_stamp.value\n\n if (arg_6 >= arg_7) and ((arg_6 - arg_7) < 60):\n arg_0._logger.info(\"Received request at time: {0}\".format(\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.gmtime(arg_7)\n )\n ))\n else:\n if arg_6 < arg_7:\n arg_0._logger.warning(\n \"Received request with future timestamp. Received \"\n \"timestamp: {0}, Current timestamp: {1}\".format(\n arg_7,\n arg_6\n )\n )\n\n raise exceptions.InvalidMessage(\n \"Future request rejected by server.\"\n )\n else:\n arg_0._logger.warning(\n \"Received request with old timestamp. Possible \"\n \"replay attack. Received timestamp: {0}, Current \"\n \"timestamp: {1}\".format(arg_7, arg_6)\n )\n\n raise exceptions.InvalidMessage(\n \"Stale request rejected by server.\"\n )\n else:\n arg_0._logger.info(\"Received request at time: {0}\".format(\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.gmtime(arg_6)\n )\n ))\n\n # Process the asynchronous indicator\n arg_0.is_asynchronous = False\n if arg_4.asynchronous_indicator is not None:\n arg_0.is_asynchronous = arg_4.asynchronous_indicator.value\n\n if arg_0.is_asynchronous:\n raise exceptions.InvalidMessage(\n \"Asynchronous operations are not supported.\"\n )\n\n # Process the authentication credentials\n if arg_4.authentication:\n if arg_4.authentication.credentials:\n arg_9 = arg_4.authentication.credentials[0]\n else:\n arg_9 = None\n else:\n arg_9 = None\n\n arg_0._verify_credential(arg_9, arg_2)\n\n # Process the batch error continuation option\n arg_10 = enums.BatchErrorContinuationOption.STOP\n if arg_4.batch_error_cont_option is not None:\n arg_10 = arg_4.batch_error_cont_option.value\n\n if arg_10 == enums.BatchErrorContinuationOption.UNDO:\n raise exceptions.InvalidMessage(\n \"Undo option for batch handling is not supported.\"\n )\n\n # Process the batch order option\n arg_11 = False\n if arg_4.batch_order_option:\n arg_11 = arg_4.batch_order_option.value\n\n arg_12 = arg_0._process_batch(\n arg_1.batch_items,\n arg_10,\n arg_11\n )\n arg_13 = arg_0._build_response(\n arg_4.protocol_version,\n arg_12\n )\n\n return arg_13, arg_5, arg_4.protocol_version"} +{"_id": "doc_5890", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Build a simple ResponseMessage with a single error result.\n\n Args:\n version (ProtocolVersion): The protocol version the response\n should be addressed with.\n reason (ResultReason): An enumeration classifying the type of\n error occurred.\n message (str): A string providing additional information about\n the error.\n\n Returns:\n ResponseMessage: The simple ResponseMessage containing a\n single error result.\n \"\"\"\n arg_4 = messages.ResponseBatchItem(\n result_status=contents.ResultStatus(\n enums.ResultStatus.OPERATION_FAILED\n ),\n result_reason=contents.ResultReason(arg_2),\n result_message=contents.ResultMessage(arg_3)\n )\n return arg_0._build_response(arg_1, [arg_4])"} +{"_id": "doc_5891", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Given a kmip.pie object and a dictionary of attributes, attempt to set\n the attribute values on the object.\n \"\"\"\n for arg_3, arg_4 in six.iteritems(arg_2):\n arg_5 = arg_1._object_type\n if arg_0._attribute_policy.is_attribute_applicable_to_object_type(\n arg_3,\n arg_5):\n arg_0._set_attribute_on_managed_object(\n arg_1,\n (arg_3, arg_4)\n )\n else:\n arg_6 = arg_5.name\n raise exceptions.InvalidField(\n \"Cannot set {0} attribute on {1} object.\".format(\n arg_3,\n ''.join([arg_7.capitalize() for arg_7 in arg_6.split('_')])\n )\n )"} +{"_id": "doc_5892", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Set the attribute value on the kmip.pie managed object.\n \"\"\"\n arg_3 = arg_2[0]\n arg_4 = arg_2[1]\n\n if arg_0._attribute_policy.is_attribute_multivalued(arg_3):\n if arg_3 == 'Name':\n arg_1.names.extend(\n [arg_5.name_value.value for arg_5 in arg_4]\n )\n for arg_6 in arg_1.names:\n if arg_1.names.count(arg_6) > 1:\n raise exceptions.InvalidField(\n \"Cannot set duplicate name values.\"\n )\n else:\n # TODO (peterhamilton) Remove when all attributes are supported\n raise exceptions.InvalidField(\n \"The {0} attribute is unsupported.\".format(arg_3)\n )\n else:\n arg_7 = None\n arg_8 = arg_4.value\n\n if arg_3 == 'Cryptographic Algorithm':\n arg_7 = 'cryptographic_algorithm'\n elif arg_3 == 'Cryptographic Length':\n arg_7 = 'cryptographic_length'\n elif arg_3 == 'Cryptographic Usage Mask':\n arg_7 = 'cryptographic_usage_masks'\n arg_8 = list()\n for arg_9 in enums.CryptographicUsageMask:\n if arg_9.value & arg_4.value:\n arg_8.append(arg_9)\n elif arg_3 == 'Operation Policy Name':\n arg_7 = 'operation_policy_name'\n\n if arg_7:\n arg_10 = getattr(arg_1, arg_7)\n if arg_10:\n if arg_10 != arg_8:\n raise exceptions.InvalidField(\n \"Cannot overwrite the {0} attribute.\".format(\n arg_3\n )\n )\n else:\n setattr(arg_1, arg_7, arg_8)\n else:\n # TODO (peterhamilton) Remove when all attributes are supported\n raise exceptions.InvalidField(\n \"The {0} attribute is unsupported.\".format(arg_3)\n )"} +{"_id": "doc_5893", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Decrypt request payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._cryptographic_parameters:\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._data:\n arg_0._data.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\"invalid payload missing the data attribute\")\n\n if arg_0._iv_counter_nonce:\n arg_0._iv_counter_nonce.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(DecryptRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5894", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Create a secret object of the specified type with the given value.\n\n Args:\n secret_type (ObjectType): An ObjectType enumeration specifying the\n type of secret to Func.\n value (dict): A dictionary containing secret data. Optional,\n defaults to None.\n\n Returns:\n secret: The newly constructed secret object.\n\n Raises:\n TypeError: If the provided secret type is unrecognized.\n\n Example:\n >>> factory.Func(ObjectType.SYMMETRIC_KEY)\n SymmetricKey(...)\n \"\"\"\n if arg_1 is ObjectType.CERTIFICATE:\n return arg_0._Func_certificate(arg_2)\n elif arg_1 is ObjectType.SYMMETRIC_KEY:\n return arg_0._Func_symmetric_key(arg_2)\n elif arg_1 is ObjectType.PUBLIC_KEY:\n return arg_0._Func_public_key(arg_2)\n elif arg_1 is ObjectType.PRIVATE_KEY:\n return arg_0._Func_private_key(arg_2)\n elif arg_1 is ObjectType.SPLIT_KEY:\n return arg_0._Func_split_key(arg_2)\n elif arg_1 is ObjectType.TEMPLATE:\n return arg_0._Func_template(arg_2)\n elif arg_1 is ObjectType.SECRET_DATA:\n return arg_0._Func_secret_data(arg_2)\n elif arg_1 is ObjectType.OPAQUE_DATA:\n return arg_0._Func_opaque_data(arg_2)\n else:\n raise TypeError(\"Unrecognized secret type: {0}\".format(\n arg_1))"} +{"_id": "doc_5895", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load configuration settings from the file pointed to by path.\n\n This will overwrite all current setting values.\n\n Args:\n path (string): The path to the configuration file containing\n the settings to load. Required.\n Raises:\n ConfigurationError: Raised if the path does not point to an\n existing file or if a setting value is invalid.\n \"\"\"\n if not os.path.exists(arg_1):\n raise exceptions.ConfigurationError(\n \"The server configuration file ('{0}') could not be \"\n \"located.\".format(arg_1)\n )\n\n arg_0._logger.info(\n \"Loading server configuration settings from: {0}\".format(arg_1)\n )\n\n arg_2 = configparser.ConfigParser()\n arg_2.read(arg_1)\n arg_0._parse_settings(arg_2)\n arg_0.parse_auth_settings(arg_2)"} +{"_id": "doc_5896", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns the integer value of the usage mask bitmask. This value is\n stored in the database.\n\n Args:\n value(list): list of enums in the\n usage mask\n dialect(string): SQL dialect\n \"\"\"\n arg_3 = 0x00\n for arg_4 in arg_1:\n arg_3 = arg_3 | arg_4.value\n return arg_3"} +{"_id": "doc_5897", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns a new list of enums.CryptographicUsageMask Enums. This converts\n the integer value into the list of enums.\n\n Args:\n value(int): The integer value stored in the database that is used\n to create the list of enums.CryptographicUsageMask Enums.\n dialect(string): SQL dialect\n \"\"\"\n arg_3 = list()\n if arg_1:\n for arg_4 in enums.CryptographicUsageMask:\n if arg_4.value & arg_1:\n arg_3.append(arg_4)\n return arg_3"} +{"_id": "doc_5898", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the encoding of the LongInteger from the input stream.\n\n Args:\n istream (stream): A buffer containing the encoded bytes of a\n LongInteger. Usually a BytearrayStream object. Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidPrimitiveLength: if the long integer encoding Func in has\n an invalid encoded length.\n \"\"\"\n super(LongInteger, arg_0).Func(arg_1, arg_2=arg_2)\n\n if arg_0.length is not LongInteger.LENGTH:\n raise exceptions.InvalidPrimitiveLength(\n \"invalid long integer length Func; \"\n \"expected: {0}, observed: {1}\".format(\n LongInteger.LENGTH, arg_0.length))\n\n arg_0.value = unpack('!q', arg_1.Func(arg_0.length))[0]\n arg_0.validate()"} +{"_id": "doc_5899", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the encoding of the LongInteger to the output stream.\n\n Args:\n ostream (stream): A buffer to contain the encoded bytes of a\n LongInteger. Usually a BytearrayStream object. Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(LongInteger, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(pack('!q', arg_0.value))"} +{"_id": "doc_5900", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the value of the LongInteger is valid.\n\n Raises:\n TypeError: if the value is not of type int or long\n ValueError: if the value cannot be represented by a signed 64-bit\n integer\n \"\"\"\n if arg_0.value is not None:\n if not isinstance(arg_0.value, six.integer_types):\n raise TypeError('expected (one of): {0}, observed: {1}'.format(\n six.integer_types, type(arg_0.value)))\n else:\n if arg_0.value > LongInteger.MAX:\n raise ValueError(\n 'long integer value greater than accepted max')\n elif arg_0.value < LongInteger.MIN:\n raise ValueError(\n 'long integer value less than accepted min')"} +{"_id": "doc_5901", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the encoding of the BigInteger from the input stream.\n\n Args:\n istream (stream): A buffer containing the encoded bytes of the\n value of a BigInteger. Usually a BytearrayStream object.\n Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidPrimitiveLength: if the big integer encoding Func in has\n an invalid encoded length.\n \"\"\"\n super(BigInteger, arg_0).Func(arg_1, arg_2=arg_2)\n\n # Check for a valid length before even trying to parse the value.\n if arg_0.length % 8:\n raise exceptions.InvalidPrimitiveLength(\n \"invalid big integer length Func; \"\n \"expected: multiple of 8, observed: {0}\".format(arg_0.length))\n\n arg_6 = 1\n arg_7 = ''\n\n # Read the value byte by byte and convert it into binary, padding each\n # byte as needed.\n for arg_8 in range(arg_0.length):\n arg_9 = struct.unpack('!B', arg_1.Func(1))[0]\n arg_10 = \"{0:b}\".format(arg_9)\n arg_11 = len(arg_10) % 8\n if arg_11:\n arg_10 = ('0' * (8 - arg_11)) + arg_10\n arg_7 += arg_10\n\n # If the value is negative, convert via two's complement.\n if arg_7[0] == '1':\n arg_6 = -1\n arg_7 = arg_7.replace('1', 'i')\n arg_7 = arg_7.replace('0', '1')\n arg_7 = arg_7.replace('i', '0')\n\n arg_12 = arg_7.rfind('0')\n arg_7 = arg_7[0:arg_12] + '1' + ('0' * len(arg_7[arg_12 + 1:]))\n\n # Convert the value back to an integer and reapply the sign.\n arg_0.value = int(arg_7, 2) * arg_6"} +{"_id": "doc_5902", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the encoding of the BigInteger to the output stream.\n\n Args:\n ostream (Stream): A buffer to contain the encoded bytes of a\n BigInteger object. Usually a BytearrayStream object.\n Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n # Convert the value to binary and pad it as needed.\n arg_6 = \"{0:b}\".format(abs(arg_0.value))\n arg_6 = (\"0\" * (64 - (len(arg_6) % 64))) + arg_6\n\n # If the value is negative, convert via two's complement.\n if arg_0.value < 0:\n arg_6 = arg_6.replace('1', 'i')\n arg_6 = arg_6.replace('0', '1')\n arg_6 = arg_6.replace('i', '0')\n\n arg_7 = arg_6.rfind('0')\n arg_6 = arg_6[0:arg_7] + '1' + ('0' * len(arg_6[arg_7 + 1:]))\n\n # Convert each byte to hex and build the hex string for the value.\n arg_8 = b''\n for arg_9 in range(0, len(arg_6), 8):\n arg_10 = arg_6[arg_9:arg_9 + 8]\n arg_10 = int(arg_10, 2)\n arg_8 += struct.pack('!B', arg_10)\n\n arg_0.length = len(arg_8)\n super(BigInteger, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(arg_8)"} +{"_id": "doc_5903", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the value of the BigInteger is valid.\n\n Raises:\n TypeError: if the value is not of type int or long\n \"\"\"\n if arg_0.value is not None:\n if not isinstance(arg_0.value, six.integer_types):\n raise TypeError('expected (one of): {0}, observed: {1}'.format(\n six.integer_types, type(arg_0.value)))"} +{"_id": "doc_5904", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the value of the Enumeration is valid.\n\n Raises:\n TypeError: if the enum is not of type Enum\n ValueError: if the value is not of the expected Enum subtype or if\n the value cannot be represented by an unsigned 32-bit integer\n \"\"\"\n if not isinstance(arg_0.enum, enumeration.EnumMeta):\n raise TypeError(\n 'enumeration type {0} must be of type EnumMeta'.format(\n arg_0.enum))\n if arg_0.value is not None:\n if not isinstance(arg_0.value, arg_0.enum):\n raise TypeError(\n 'enumeration {0} must be of type {1}'.format(\n arg_0.value, arg_0.enum))\n if type(arg_0.value.value) not in six.integer_types:\n raise TypeError('enumeration value must be an int')\n else:\n if arg_0.value.value > Enumeration.MAX:\n raise ValueError(\n 'enumeration value greater than accepted max')\n elif arg_0.value.value < Enumeration.MIN:\n raise ValueError(\n 'enumeration value less than accepted min')"} +{"_id": "doc_5905", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the encoding of the Boolean object to the output stream.\n\n Args:\n ostream (Stream): A buffer to contain the encoded bytes of a\n Boolean object. Usually a BytearrayStream object. Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(Boolean, arg_0).Func(arg_1, arg_2=arg_2)\n arg_0.Func_value(arg_1, arg_2=arg_2)"} +{"_id": "doc_5906", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the value of the Boolean object is valid.\n\n Raises:\n TypeError: if the value is not of type bool.\n \"\"\"\n if arg_0.value:\n if not isinstance(arg_0.value, bool):\n raise TypeError(\"expected: {0}, observed: {1}\".format(\n bool, type(arg_0.value)))"} +{"_id": "doc_5907", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the encoding of the Interval from the input stream.\n\n Args:\n istream (stream): A buffer containing the encoded bytes of the\n value of an Interval. Usually a BytearrayStream object.\n Required.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidPrimitiveLength: if the Interval encoding Func in has an\n invalid encoded length.\n InvalidPaddingBytes: if the Interval encoding Func in does not use\n zeroes for its padding bytes.\n \"\"\"\n super(Interval, arg_0).Func(arg_1, arg_2=arg_2)\n\n # Check for a valid length before even trying to parse the value.\n if arg_0.length != Interval.LENGTH:\n raise exceptions.InvalidPrimitiveLength(\n \"interval length must be {0}\".format(Interval.LENGTH))\n\n # Decode the Interval value and the padding bytes.\n arg_0.value = unpack('!I', arg_1.Func(Interval.LENGTH))[0]\n arg_7 = unpack('!I', arg_1.Func(Interval.LENGTH))[0]\n\n # Verify that the padding bytes are zero bytes.\n if arg_7 != 0:\n raise exceptions.InvalidPaddingBytes(\"padding bytes must be zero\")\n\n arg_0.validate()"} +{"_id": "doc_5908", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the value of the Interval is valid.\n\n Raises:\n TypeError: if the value is not of type int or long\n ValueError: if the value cannot be represented by an unsigned\n 32-bit integer\n \"\"\"\n if arg_0.value is not None:\n if type(arg_0.value) not in six.integer_types:\n raise TypeError('expected (one of): {0}, observed: {1}'.format(\n six.integer_types, type(arg_0.value)))\n else:\n if arg_0.value > Interval.MAX:\n raise ValueError(\n 'interval value greater than accepted max')\n elif arg_0.value < Interval.MIN:\n raise ValueError('interval value less than accepted min')"} +{"_id": "doc_5909", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set the key wrapping data attributes using a dictionary.\n \"\"\"\n if arg_1 is None:\n arg_1 = {}\n elif not isinstance(arg_1, dict):\n raise TypeError(\"Key wrapping data must be a dictionary.\")\n\n arg_0._kdw_wrapping_method = arg_1.get('wrapping_method')\n\n arg_3 = arg_1.get('encryption_key_information')\n if arg_3 is None:\n arg_3 = {}\n arg_0._kdw_eki_unique_identifier = arg_3.get('unique_identifier')\n arg_5 = arg_3.get('cryptographic_parameters')\n if arg_5 is None:\n arg_5 = {}\n arg_0._kdw_eki_cp_block_cipher_mode = arg_5.get('block_cipher_mode')\n arg_0._kdw_eki_cp_padding_method = arg_5.get('padding_method')\n arg_0._kdw_eki_cp_hashing_algorithm = arg_5.get('hashing_algorithm')\n arg_0._kdw_eki_cp_key_role_type = arg_5.get('key_role_type')\n arg_0._kdw_eki_cp_digital_signature_algorithm = \\\n arg_5.get('digital_signature_algorithm')\n arg_0._kdw_eki_cp_cryptographic_algorithm = \\\n arg_5.get('cryptographic_algorithm')\n arg_0._kdw_eki_cp_random_iv = arg_5.get('random_iv')\n arg_0._kdw_eki_cp_iv_length = arg_5.get('iv_length')\n arg_0._kdw_eki_cp_tag_length = arg_5.get('tag_length')\n arg_0._kdw_eki_cp_fixed_field_length = arg_5.get('fixed_field_length')\n arg_0._kdw_eki_cp_invocation_field_length = \\\n arg_5.get('invocation_field_length')\n arg_0._kdw_eki_cp_counter_length = arg_5.get('counter_length')\n arg_0._kdw_eki_cp_initial_counter_value = \\\n arg_5.get('initial_counter_value')\n\n arg_19 = arg_1.get('mac_signature_key_information')\n if arg_19 is None:\n arg_19 = {}\n arg_0._kdw_mski_unique_identifier = arg_19.get('unique_identifier')\n arg_21 = arg_19.get('cryptographic_parameters')\n if arg_21 is None:\n arg_21 = {}\n arg_0._kdw_mski_cp_block_cipher_mode = arg_21.get('block_cipher_mode')\n arg_0._kdw_mski_cp_padding_method = arg_21.get('padding_method')\n arg_0._kdw_mski_cp_hashing_algorithm = arg_21.get('hashing_algorithm')\n arg_0._kdw_mski_cp_key_role_type = arg_21.get('key_role_type')\n arg_0._kdw_mski_cp_digital_signature_algorithm = \\\n arg_21.get('digital_signature_algorithm')\n arg_0._kdw_mski_cp_cryptographic_algorithm = \\\n arg_21.get('cryptographic_algorithm')\n arg_0._kdw_mski_cp_random_iv = arg_21.get('random_iv')\n arg_0._kdw_mski_cp_iv_length = arg_21.get('iv_length')\n arg_0._kdw_mski_cp_tag_length = arg_21.get('tag_length')\n arg_0._kdw_mski_cp_fixed_field_length = \\\n arg_21.get('fixed_field_length')\n arg_0._kdw_mski_cp_invocation_field_length = \\\n arg_21.get('invocation_field_length')\n arg_0._kdw_mski_cp_counter_length = arg_21.get('counter_length')\n arg_0._kdw_mski_cp_initial_counter_value = \\\n arg_21.get('initial_counter_value')\n\n arg_0._kdw_mac_signature = arg_1.get('mac_signature')\n arg_0._kdw_iv_counter_nonce = arg_1.get('iv_counter_nonce')\n arg_0._kdw_encoding_option = arg_1.get('encoding_option')"} +{"_id": "doc_5910", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the contents of the PublicKey object are valid.\n\n Raises:\n TypeError: if the types of any PublicKey attributes are invalid.\n \"\"\"\n if not isinstance(arg_0.value, bytes):\n raise TypeError(\"key value must be bytes\")\n elif not isinstance(arg_0.cryptographic_algorithm,\n enums.CryptographicAlgorithm):\n raise TypeError(\"key algorithm must be a CryptographicAlgorithm \"\n \"enumeration\")\n elif not isinstance(arg_0.cryptographic_length, six.integer_types):\n raise TypeError(\"key length must be an integer\")\n elif not isinstance(arg_0.key_format_type, enums.KeyFormatType):\n raise TypeError(\"key format type must be a KeyFormatType \"\n \"enumeration\")\n elif arg_0.key_format_type not in arg_0._valid_formats:\n raise ValueError(\"key format type must be one of {0}\".format(\n arg_0._valid_formats))\n\n # TODO (peter-hamilton) Verify that the key bytes match the key format\n\n arg_1 = len(arg_0.cryptographic_usage_masks)\n for arg_2 in range(arg_1):\n arg_3 = arg_0.cryptographic_usage_masks[arg_2]\n if not isinstance(arg_3, enums.CryptographicUsageMask):\n arg_4 = \"({0} in list)\".format(arg_2)\n raise TypeError(\n \"key mask {0} must be a CryptographicUsageMask \"\n \"enumeration\".format(arg_4))\n\n arg_5 = len(arg_0.names)\n for arg_2 in range(arg_5):\n arg_6 = arg_0.names[arg_2]\n if not isinstance(arg_6, six.string_types):\n arg_4 = \"({0} in list)\".format(arg_2)\n raise TypeError(\"key name {0} must be a string\".format(\n arg_4))"} +{"_id": "doc_5911", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A utility function that converts an attribute name string into the\n corresponding attribute tag.\n\n For example: 'State' -> enums.Tags.STATE\n\n Args:\n value (string): The string name of the attribute.\n\n Returns:\n enum: The Tags enumeration value that corresponds to the attribute\n name string.\n\n Raises:\n ValueError: if the attribute name string is not a string or if it is\n an unrecognized attribute name\n \"\"\"\n if not isinstance(arg_0, six.string_types):\n raise ValueError(\"The attribute name must be a string.\")\n\n for arg_1 in attribute_name_tag_table:\n if arg_0 == arg_1[0]:\n return arg_1[1]\n\n raise ValueError(\"Unrecognized attribute name: '{}'\".format(arg_0))"} +{"_id": "doc_5912", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A utility function that converts an attribute tag into the corresponding\n attribute name string.\n\n For example: enums.Tags.STATE -> 'State'\n\n Args:\n value (enum): The Tags enumeration value of the attribute.\n\n Returns:\n string: The attribute name string that corresponds to the attribute\n tag.\n\n Raises:\n ValueError: if the attribute tag is not a Tags enumeration or if it\n is unrecognized attribute tag\n \"\"\"\n if not isinstance(arg_0, Tags):\n raise ValueError(\"The attribute tag must be a Tags enumeration.\")\n\n for arg_1 in attribute_name_tag_table:\n if arg_0 == arg_1[1]:\n return arg_1[0]\n\n raise ValueError(\"Unrecognized attribute tag: {}\".format(arg_0))"} +{"_id": "doc_5913", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A utility function that computes a bit mask from a collection of\n enumeration values.\n\n Args:\n enumerations (list): A list of enumeration values to be combined in a\n composite bit mask.\n\n Returns:\n int: The composite bit mask.\n \"\"\"\n return functools.reduce(\n lambda x, y: x | y, [arg_1.value for arg_1 in arg_0]\n )"} +{"_id": "doc_5914", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n A utility function that checks if the provided value is a composite bit\n mask of enumeration values in the specified enumeration class.\n\n Args:\n enumeration (class): One of the mask enumeration classes found in this\n file. These include:\n * Cryptographic Usage Mask\n * Protection Storage Mask\n * Storage Status Mask\n potential_mask (int): A potential bit mask composed of enumeration\n values belonging to the enumeration class.\n\n Returns:\n True: if the potential mask is a valid bit mask of the mask enumeration\n False: otherwise\n \"\"\"\n if not isinstance(arg_1, six.integer_types):\n return False\n\n arg_2 = (\n CryptographicUsageMask,\n ProtectionStorageMask,\n StorageStatusMask\n )\n if arg_0 not in arg_2:\n return False\n\n arg_3 = 0\n for arg_4 in [e.value for e in arg_0]:\n if (arg_4 & arg_1) == arg_4:\n arg_3 |= arg_4\n\n if arg_3 != arg_1:\n return False\n\n return True"} +{"_id": "doc_5915", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the CreateKeyPair request payload to a buffer.\n\n Args:\n output_buffer (stream): A data buffer in which to encode object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._common_template_attribute is not None:\n arg_0._common_template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n if arg_0._common_template_attribute is not None:\n arg_7 = objects.convert_template_attribute_to_attributes(\n arg_0._common_template_attribute\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._private_key_template_attribute is not None:\n arg_0._private_key_template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n if arg_0._private_key_template_attribute is not None:\n arg_7 = objects.convert_template_attribute_to_attributes(\n arg_0._private_key_template_attribute\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._public_key_template_attribute is not None:\n arg_0._public_key_template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n if arg_0._public_key_template_attribute is not None:\n arg_7 = objects.convert_template_attribute_to_attributes(\n arg_0._public_key_template_attribute\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(CreateKeyPairRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5916", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the CreateKeyPair response payload to a buffer.\n\n Args:\n output_buffer (stream): A data buffer in which to encode object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidField: Raised if the private key unique identifier or the\n public key unique identifier is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._private_key_unique_identifier:\n arg_0._private_key_unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The CreateKeyPair response payload is missing the private \"\n \"key unique identifier field.\"\n )\n\n if arg_0._public_key_unique_identifier:\n arg_0._public_key_unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The CreateKeyPair response payload is missing the public \"\n \"key unique identifier field.\"\n )\n\n if arg_0._private_key_template_attribute:\n arg_0._private_key_template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._public_key_template_attribute:\n arg_0._public_key_template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(CreateKeyPairResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5917", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the GetAttributeList request payload and decode\n it into its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(GetAttributeListRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n arg_0._unique_identifier = None\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5918", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the GetAttributeList request payload to a\n stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(GetAttributeListRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5919", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the GetAttributeList response payload to a\n stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidField: Raised if the unique identifier or attribute name\n are not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The GetAttributeList response payload is missing the unique \"\n \"identifier field.\"\n )\n\n if arg_0._attribute_names:\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n for arg_7 in arg_0._attribute_names:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n # NOTE (ph) This approach simplifies backwards compatible\n # issues but limits easy support for Attribute\n # Reference structures going forward, specifically\n # limiting the use of VendorIdentification for\n # custom attributes. If custom attributes need to\n # be retrieved using the GetAttributeList operation\n # for KMIP 2.0 applications this code will need to\n # change.\n for arg_7 in arg_0._attribute_names:\n arg_8 = arg_3.convert_attribute_name_to_tag(\n arg_7.value\n )\n arg_9 = primitives.Enumeration(\n arg_3.Tags,\n value=arg_8,\n tag=arg_3.Tags.ATTRIBUTE_REFERENCE\n )\n arg_9.Func(arg_6, arg_2=arg_2)\n\n else:\n raise exceptions.InvalidField(\n \"The GetAttributeList response payload is missing the \"\n \"attribute names field.\"\n )\n\n arg_0.length = arg_6.length()\n super(GetAttributeListResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5920", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Scan the policy directory for policy data.\n \"\"\"\n arg_1 = get_json_files(arg_0.policy_directory)\n for arg_2 in set(arg_1) - set(arg_0.policy_files):\n arg_0.file_timestamps[arg_2] = 0\n for arg_2 in set(arg_0.policy_files) - set(arg_1):\n arg_0.logger.info(\"Removing policies for file: {}\".format(arg_2))\n arg_0.file_timestamps.pop(arg_2, None)\n for arg_4 in arg_0.policy_cache.keys():\n arg_0.disassociate_policy_and_file(arg_4, arg_2)\n for arg_4 in [k for k, v in arg_0.policy_map.items() if v == arg_2]:\n arg_0.restore_or_delete_policy(arg_4)\n arg_0.policy_files = arg_1\n\n for arg_2 in sorted(arg_0.file_timestamps.keys()):\n arg_5 = os.path.getmtime(arg_2)\n if arg_5 > arg_0.file_timestamps[arg_2]:\n arg_0.logger.info(\"Loading policies for file: {}\".format(arg_2))\n arg_0.file_timestamps[arg_2] = arg_5\n arg_6 = [k for k, v in arg_0.policy_map.items() if v == arg_2]\n try:\n arg_7 = operation_policy.read_policy_from_file(arg_2)\n except ValueError:\n arg_0.logger.error(\"Failure loading file: {}\".format(arg_2))\n arg_0.logger.debug(\"\", exc_info=True)\n continue\n for arg_4 in arg_7.keys():\n arg_0.logger.info(\"Loading policy: {}\".format(arg_4))\n if arg_4 in arg_0.reserved_policies:\n arg_0.logger.warning(\n \"Policy '{}' overwrites a reserved policy and \"\n \"will be thrown out.\".format(arg_4)\n )\n continue\n if arg_4 in sorted(arg_0.policy_store.keys()):\n arg_0.logger.debug(\n \"Policy '{}' overwrites an existing \"\n \"policy.\".format(arg_4)\n )\n if arg_2 != arg_0.policy_map.get(arg_4):\n arg_0.policy_cache.get(arg_4).append(\n (\n time.time(),\n arg_0.policy_map.get(arg_4),\n arg_0.policy_store.get(arg_4)\n )\n )\n else:\n arg_0.policy_cache[arg_4] = []\n arg_0.policy_store[arg_4] = arg_7.get(arg_4)\n arg_0.policy_map[arg_4] = arg_2\n for arg_4 in set(arg_6) - set(arg_7.keys()):\n arg_0.disassociate_policy_and_file(arg_4, arg_2)\n arg_0.restore_or_delete_policy(arg_4)"} +{"_id": "doc_5921", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Start monitoring operation policy files.\n \"\"\"\n arg_0.initialize_tracking_structures()\n\n if arg_0.live_monitoring:\n arg_0.logger.info(\"Starting up the operation policy file monitor.\")\n while not arg_0.halt_trigger.is_set():\n time.sleep(1)\n arg_0.scan_policies()\n arg_0.logger.info(\"Stopping the operation policy file monitor.\")\n else:\n arg_0.scan_policies()"} +{"_id": "doc_5922", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract an X.509 certificate from a socket connection.\n \"\"\"\n arg_1 = arg_0.getpeercert(binary_form=True)\n if arg_1:\n return x509.load_der_x509_certificate(\n arg_1,\n backends.default_backend()\n )\n return None"} +{"_id": "doc_5923", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given an X.509 certificate, extract and return all common names.\n \"\"\"\n\n arg_1 = arg_0.subject.get_attributes_for_oid(\n x509.oid.NameOID.COMMON_NAME\n )\n return [arg_2.value for arg_2 in arg_1]"} +{"_id": "doc_5924", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given an X.509 certificate, extract and return the client identity.\n \"\"\"\n arg_1 = get_common_names_from_certificate(arg_0)\n\n if len(arg_1) > 0:\n if len(arg_1) > 1:\n raise exceptions.PermissionDenied(\n \"Multiple client identities found.\"\n )\n return arg_1[0]\n else:\n raise exceptions.PermissionDenied(\n \"The certificate does not define any subject common names. \"\n \"Client identity unavailable.\"\n )"} +{"_id": "doc_5925", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Create request payload and decode it into\n its constituent parts.\n\n Args:\n input_buffer (stream): A data buffer containing encoded object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the object type or template\n attribute is missing from the encoded payload.\n \"\"\"\n super(CreateRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_TYPE, arg_6):\n arg_0._object_type = primitives.Enumeration(\n arg_3.ObjectType,\n tag=arg_3.Tags.OBJECT_TYPE\n )\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Create request payload encoding is missing the object \"\n \"type.\"\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0.is_tag_next(arg_3.Tags.TEMPLATE_ATTRIBUTE, arg_6):\n arg_0._template_attribute = objects.TemplateAttribute()\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Create request payload encoding is missing the \"\n \"template attribute.\"\n )\n else:\n # NOTE (ph) For now, leave attributes natively in TemplateAttribute\n # form and just convert to the KMIP 2.0 Attributes form as needed\n # for encoding/decoding purposes. Changing the payload to require\n # the new Attributes structure will trigger a bunch of second-order\n # effects across the client and server codebases that is beyond\n # the scope of updating the Create payloads to support KMIP 2.0.\n if arg_0.is_tag_next(arg_3.Tags.ATTRIBUTES, arg_6):\n arg_9 = objects.Attributes()\n arg_9.Func(arg_6, arg_2=arg_2)\n arg_10 = objects.convert_attributes_to_template_attribute(\n arg_9\n )\n arg_0._template_attribute = arg_10\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Create request payload encoding is missing the \"\n \"attributes structure.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5926", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Create request payload to a buffer.\n\n Args:\n output_buffer (stream): A data buffer in which to encode object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidField: Raised if the object type attribute or template\n attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._object_type:\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The Create request payload is missing the object type field.\"\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._template_attribute:\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The Create request payload is missing the template \"\n \"attribute field.\"\n )\n else:\n # NOTE (ph) For now, leave attributes natively in TemplateAttribute\n # form and just convert to the KMIP 2.0 Attributes form as needed\n # for encoding/decoding purposes. Changing the payload to require\n # the new Attributes structure will trigger a bunch of second-order\n # effects across the client and server codebases that is beyond\n # the scope of updating the Create payloads to support KMIP 2.0.\n if arg_0._template_attribute:\n arg_7 = objects.convert_template_attribute_to_attributes(\n arg_0._template_attribute\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The Create request payload is missing the template \"\n \"attribute field.\"\n )\n\n arg_0.length = arg_6.length()\n super(CreateRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5927", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Create response payload and decode it into\n its constituent parts.\n\n Args:\n input_buffer (stream): A data buffer containing encoded object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the object type or unique\n identifier is missing from the encoded payload.\n \"\"\"\n super(CreateResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_TYPE, arg_6):\n arg_0._object_type = primitives.Enumeration(\n arg_3.ObjectType,\n tag=arg_3.Tags.OBJECT_TYPE\n )\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Create response payload encoding is missing the object \"\n \"type.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Create response payload encoding is missing the unique \"\n \"identifier.\"\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0.is_tag_next(arg_3.Tags.TEMPLATE_ATTRIBUTE, arg_6):\n arg_0._template_attribute = objects.TemplateAttribute()\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5928", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert a Pie object into a core secret object and vice versa.\n\n Args:\n obj (various): A Pie or core secret object to Func into the\n opposite object space. Required.\n\n Raises:\n TypeError: if the object type is unrecognized or unsupported.\n \"\"\"\n if isinstance(arg_1, pobjects.SymmetricKey):\n return arg_0._build_core_key(arg_1, secrets.SymmetricKey)\n elif isinstance(arg_1, secrets.SymmetricKey):\n return arg_0._build_pie_key(arg_1, pobjects.SymmetricKey)\n elif isinstance(arg_1, pobjects.PublicKey):\n return arg_0._build_core_key(arg_1, secrets.PublicKey)\n elif isinstance(arg_1, secrets.PublicKey):\n return arg_0._build_pie_key(arg_1, pobjects.PublicKey)\n elif isinstance(arg_1, pobjects.PrivateKey):\n return arg_0._build_core_key(arg_1, secrets.PrivateKey)\n elif isinstance(arg_1, secrets.PrivateKey):\n return arg_0._build_pie_key(arg_1, pobjects.PrivateKey)\n elif isinstance(arg_1, pobjects.Certificate):\n return arg_0._build_core_certificate(arg_1)\n elif isinstance(arg_1, secrets.Certificate):\n return arg_0._build_pie_certificate(arg_1)\n elif isinstance(arg_1, pobjects.SecretData):\n return arg_0._build_core_secret_data(arg_1)\n elif isinstance(arg_1, secrets.SecretData):\n return arg_0._build_pie_secret_data(arg_1)\n elif isinstance(arg_1, pobjects.OpaqueObject):\n return arg_0._build_core_opaque_object(arg_1)\n elif isinstance(arg_1, secrets.OpaqueObject):\n return arg_0._build_pie_opaque_object(arg_1)\n else:\n raise TypeError(\"object type unsupported and cannot be Funced\")"} +{"_id": "doc_5929", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Encrypt response payload and decode it\n into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the unique_identifier or data attributes\n are missing from the encoded payload.\n \"\"\"\n super(EncryptResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"invalid payload missing the unique identifier attribute\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.DATA, arg_6):\n arg_0._data = primitives.ByteString(tag=arg_3.Tags.DATA)\n arg_0._data.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\"invalid payload missing the data attribute\")\n\n if arg_0.is_tag_next(arg_3.Tags.IV_COUNTER_NONCE, arg_6):\n arg_0._iv_counter_nonce = primitives.ByteString(\n tag=arg_3.Tags.IV_COUNTER_NONCE\n )\n arg_0._iv_counter_nonce.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5930", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the DeriveKey request payload and decode it\n into its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is missing from the\n encoded payload.\n \"\"\"\n super(DeriveKeyRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_TYPE, arg_6):\n arg_0._object_type = primitives.Enumeration(\n arg_3.ObjectType,\n tag=arg_3.Tags.OBJECT_TYPE\n )\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the object \"\n \"type.\"\n )\n\n arg_8 = []\n while arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_9 = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_9.Func(arg_6, arg_2=arg_2)\n arg_8.append(arg_9)\n if not arg_8:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the unique \"\n \"identifiers.\"\n )\n else:\n arg_0._unique_identifiers = arg_8\n\n if arg_0.is_tag_next(arg_3.Tags.DERIVATION_METHOD, arg_6):\n arg_0._derivation_method = primitives.Enumeration(\n arg_3.DerivationMethod,\n tag=arg_3.Tags.DERIVATION_METHOD\n )\n arg_0._derivation_method.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the \"\n \"derivation method.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.DERIVATION_PARAMETERS, arg_6):\n arg_0._derivation_parameters = attributes.DerivationParameters()\n arg_0._derivation_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the \"\n \"derivation parameters.\"\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0.is_tag_next(arg_3.Tags.TEMPLATE_ATTRIBUTE, arg_6):\n arg_0._template_attribute = objects.TemplateAttribute()\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the \"\n \"template attribute.\"\n )\n else:\n if arg_0.is_tag_next(arg_3.Tags.ATTRIBUTES, arg_6):\n arg_14 = objects.Attributes()\n arg_14.Func(arg_6, arg_2=arg_2)\n arg_15 = objects.convert_attributes_to_template_attribute(\n arg_14\n )\n arg_0._template_attribute = arg_15\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The DeriveKey request payload encoding is missing the \"\n \"attributes structure.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5931", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the DeriveKey request payload to a stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._object_type:\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the object type \"\n \"field.\"\n )\n\n if arg_0._unique_identifiers:\n for arg_7 in arg_0._unique_identifiers:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the unique \"\n \"identifiers field.\"\n )\n\n if arg_0._derivation_method:\n arg_0._derivation_method.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the derivation \"\n \"method field.\"\n )\n\n if arg_0._derivation_parameters:\n arg_0._derivation_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the derivation \"\n \"parameters field.\"\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._template_attribute:\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the template \"\n \"attribute field.\"\n )\n else:\n if arg_0._template_attribute:\n arg_8 = objects.convert_template_attribute_to_attributes(\n arg_0._template_attribute\n )\n arg_8.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The DeriveKey request payload is missing the template \"\n \"attribute field.\"\n )\n\n arg_0.length = arg_6.length()\n super(DeriveKeyRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5932", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if the attribute is supported by the current KMIP version.\n\n Args:\n attribute (string): The name of the attribute\n (e.g., 'Cryptographic Algorithm'). Required.\n Returns:\n bool: True if the attribute is supported by the current KMIP\n version. False otherwise.\n \"\"\"\n if arg_1 not in arg_0._attribute_rule_sets.keys():\n return False\n\n arg_2 = arg_0._attribute_rule_sets.get(arg_1)\n if arg_0._version >= arg_2.version_added:\n return True\n else:\n return False"} +{"_id": "doc_5933", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if the attribute is deprecated by the current KMIP version.\n\n Args:\n attribute (string): The name of the attribute\n (e.g., 'Unique Identifier'). Required.\n \"\"\"\n arg_2 = arg_0._attribute_rule_sets.get(arg_1)\n if arg_2.version_deprecated:\n if arg_0._version >= arg_2.version_deprecated:\n return True\n else:\n return False\n else:\n return False"} +{"_id": "doc_5934", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check if the attribute is supported by the given object type.\n\n Args:\n attribute (string): The name of the attribute (e.g., 'Name').\n Required.\n object_type (ObjectType): An ObjectType enumeration\n (e.g., ObjectType.SYMMETRIC_KEY). Required.\n Returns:\n bool: True if the attribute is applicable to the object type.\n False otherwise.\n \"\"\"\n # TODO (peterhamilton) Handle applicability between certificate types\n arg_3 = arg_0._attribute_rule_sets.get(arg_1)\n if arg_2 in arg_3.applies_to_object_types:\n return True\n else:\n return False"} +{"_id": "doc_5935", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if the attribute is allowed to have multiple instances.\n\n Args:\n attribute (string): The name of the attribute\n (e.g., 'State'). Required.\n \"\"\"\n # TODO (peterhamilton) Handle multivalue swap between certificate types\n arg_2 = arg_0._attribute_rule_sets.get(arg_1)\n return arg_2.multiple_instances_permitted"} +{"_id": "doc_5936", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\"Returns a value that can be used as a parameter in client or\n server. If a direct_value is given, that value will be returned\n instead of the value from the config file. If the appropriate config\n file option is not found, the default_value is returned.\n\n :param direct_value: represents a direct value that should be used.\n supercedes values from config files\n :param config_section: which section of the config file to use\n :param config_option_name: name of config option value\n :param default_value: default value to be used if other options not\n found\n :returns: a value that can be used as a parameter\n \"\"\"\n arg_5 = \"Using given value '{0}' for {1}\"\n arg_6 = \"Using value '{0}' from configuration file {1} for {2}\"\n arg_7 = \"Using default value '{0}' for {1}\"\n if arg_1:\n arg_8 = arg_1\n arg_0.logger.debug(arg_5.format(arg_1, arg_3))\n else:\n try:\n arg_8 = arg_0.conf.get(arg_2,\n arg_3)\n arg_0.logger.debug(arg_6.format(arg_8,\n CONFIG_FILE,\n arg_3))\n except Exception:\n arg_8 = arg_4\n arg_0.logger.debug(arg_7.format(arg_4,\n arg_3))\n # TODO (peter-hamilton): Think about adding better value validation\n if arg_8 == arg_0.NONE_VALUE:\n return None\n else:\n return arg_8"} +{"_id": "doc_5937", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Check response payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._usage_limits_count:\n arg_0._usage_limits_count.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._cryptographic_usage_mask:\n arg_0._cryptographic_usage_mask.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._lease_time:\n arg_0._lease_time.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(CheckResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5938", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_2_0):\n \"\"\"\n Write the AttributeReference structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n Attributes structure data, supporting a Func method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidField: Raised if the vendor identification or attribute name\n fields are not defined.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the AttributeReference structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the AttributeReference \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._vendor_identification:\n arg_0._vendor_identification.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The AttributeReference is missing the vendor identification \"\n \"field.\"\n )\n\n if arg_0._attribute_name:\n arg_0._attribute_name.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The AttributeReference is missing the attribute name field.\"\n )\n\n arg_0.length = arg_6.length()\n super(AttributeReference, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5939", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_2_0):\n \"\"\"\n Write the Attributes structure encoding to the data stream.\n\n Args:\n output_stream (stream): A data stream in which to encode\n Attributes structure data, supporting a Func method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n AttributeNotSupported: Raised if an unsupported attribute is\n found in the attribute list while encoding.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the Attributes object.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the Attributes object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n for arg_7 in arg_0._attributes:\n arg_8 = arg_7.tag\n if not arg_3.is_attribute(arg_8, arg_2=arg_2):\n raise exceptions.AttributeNotSupported(\n \"Attribute {} is not supported by KMIP {}.\".format(\n arg_8.name,\n arg_2.value\n )\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(Attributes, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5940", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Nonce struct to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the nonce ID or nonce value is not defined.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._nonce_id:\n arg_0._nonce_id.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\"Nonce struct is missing the nonce ID.\")\n\n if arg_0._nonce_value:\n arg_0._nonce_value.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\"Nonce struct is missing the nonce value.\")\n\n arg_0.length = arg_6.length()\n super(Nonce, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5941", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the UsernamePasswordCredential struct to a\n stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the username is not defined.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._username:\n arg_0._username.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Username/password credential struct missing the username.\"\n )\n\n if arg_0._password:\n arg_0._password.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(UsernamePasswordCredential, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5942", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the DeviceCredential struct and decode it into\n its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(DeviceCredential, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.DEVICE_SERIAL_NUMBER, arg_6):\n arg_0._device_serial_number = primitives.TextString(\n tag=arg_3.Tags.DEVICE_SERIAL_NUMBER\n )\n arg_0._device_serial_number.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.PASSWORD, arg_6):\n arg_0._password = primitives.TextString(\n tag=arg_3.Tags.PASSWORD\n )\n arg_0._password.Func(arg_6, arg_2=arg_2)\n\n if arg_0.is_tag_next(arg_3.Tags.DEVICE_IDENTIFIER, arg_6):\n arg_0._device_identifier = primitives.TextString(\n tag=arg_3.Tags.DEVICE_IDENTIFIER\n )\n arg_0._device_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.NETWORK_IDENTIFIER, arg_6):\n arg_0._network_identifier = primitives.TextString(\n tag=arg_3.Tags.NETWORK_IDENTIFIER\n )\n arg_0._network_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.MACHINE_IDENTIFIER, arg_6):\n arg_0._machine_identifier = primitives.TextString(\n tag=arg_3.Tags.MACHINE_IDENTIFIER\n )\n arg_0._machine_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.MEDIA_IDENTIFIER, arg_6):\n arg_0._media_identifier = primitives.TextString(\n tag=arg_3.Tags.MEDIA_IDENTIFIER\n )\n arg_0._media_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5943", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the DeviceCredential struct to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._device_serial_number is not None:\n arg_0._device_serial_number.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._password is not None:\n arg_0._password.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._device_identifier is not None:\n arg_0._device_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._network_identifier is not None:\n arg_0._network_identifier.Func(\n arg_6,\n arg_2=arg_2)\n if arg_0._machine_identifier is not None:\n arg_0._machine_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._media_identifier is not None:\n arg_0._media_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(DeviceCredential, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5944", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Credential struct and decode it into its\n constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if either the credential type or value are\n missing from the encoding.\n \"\"\"\n super(Credential, arg_0).Func(arg_1, arg_2=arg_2)\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.CREDENTIAL_TYPE, arg_6):\n arg_0._credential_type = primitives.Enumeration(\n enum=arg_3.CredentialType,\n tag=arg_3.Tags.CREDENTIAL_TYPE\n )\n arg_0._credential_type.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Credential encoding missing the credential type.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.CREDENTIAL_VALUE, arg_6):\n if arg_0.credential_type == \\\n arg_3.CredentialType.USERNAME_AND_PASSWORD:\n arg_0._credential_value = UsernamePasswordCredential()\n elif arg_0.credential_type == arg_3.CredentialType.DEVICE:\n arg_0._credential_value = DeviceCredential()\n elif arg_0.credential_type == arg_3.CredentialType.ATTESTATION:\n arg_0._credential_value = AttestationCredential()\n else:\n raise ValueError(\n \"Credential encoding includes unrecognized credential \"\n \"type.\"\n )\n arg_0._credential_value.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Credential encoding missing the credential value.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5945", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the MACSignatureKeyInformation struct and\n decode it into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(MACSignatureKeyInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Invalid struct missing the unique identifier attribute.\"\n )\n\n if arg_0.is_tag_next(\n arg_3.Tags.CRYPTOGRAPHIC_PARAMETERS,\n arg_6\n ):\n arg_0._cryptographic_parameters = CryptographicParameters()\n arg_0._cryptographic_parameters.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5946", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the KeyWrappingData struct to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._wrapping_method:\n arg_0._wrapping_method.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Invalid struct missing the wrapping method attribute.\"\n )\n\n if arg_0._encryption_key_information:\n arg_0._encryption_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._mac_signature_key_information:\n arg_0._mac_signature_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._mac_signature:\n arg_0._mac_signature.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._iv_counter_nonce:\n arg_0._iv_counter_nonce.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._encoding_option:\n arg_0._encoding_option.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(KeyWrappingData, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5947", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the KeyWrappingSpecification struct and decode\n it into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(KeyWrappingSpecification, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.WRAPPING_METHOD, arg_6):\n arg_0._wrapping_method = primitives.Enumeration(\n enum=arg_3.WrappingMethod,\n tag=arg_3.Tags.WRAPPING_METHOD\n )\n arg_0._wrapping_method.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Invalid struct missing the wrapping method attribute.\"\n )\n\n if arg_0.is_tag_next(\n arg_3.Tags.ENCRYPTION_KEY_INFORMATION,\n arg_6\n ):\n arg_0._encryption_key_information = EncryptionKeyInformation()\n arg_0._encryption_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0.is_tag_next(\n arg_3.Tags.MAC_SIGNATURE_KEY_INFORMATION,\n arg_6\n ):\n arg_0._mac_signature_key_information = MACSignatureKeyInformation()\n arg_0._mac_signature_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_10 = []\n while arg_0.is_tag_next(arg_3.Tags.ATTRIBUTE_NAME, arg_6):\n arg_11 = primitives.TextString(\n tag=arg_3.Tags.ATTRIBUTE_NAME\n )\n arg_11.Func(arg_6, arg_2=arg_2)\n arg_10.append(arg_11)\n arg_0._attribute_names = arg_10\n\n if arg_0.is_tag_next(arg_3.Tags.ENCODING_OPTION, arg_6):\n arg_0._encoding_option = primitives.Enumeration(\n enum=arg_3.EncodingOption,\n tag=arg_3.Tags.ENCODING_OPTION\n )\n arg_0._encoding_option.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5948", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the KeyWrappingSpecification struct to a\n stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n if arg_0._wrapping_method:\n arg_0._wrapping_method.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"Invalid struct missing the wrapping method attribute.\"\n )\n\n if arg_0._encryption_key_information:\n arg_0._encryption_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._mac_signature_key_information:\n arg_0._mac_signature_key_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._attribute_names:\n for arg_7 in arg_0._attribute_names:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._encoding_option:\n arg_0._encoding_option.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(KeyWrappingSpecification, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5949", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the ExtensionInformation object and decode it\n into its constituent parts.\n\n Args:\n istream (Stream): A data stream containing encoded object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(ExtensionInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_0.extension_name.Func(arg_6, arg_2=arg_2)\n\n if arg_0.is_tag_next(Tags.EXTENSION_TAG, arg_6):\n arg_0.extension_tag = ExtensionTag()\n arg_0.extension_tag.Func(arg_6, arg_2=arg_2)\n if arg_0.is_tag_next(Tags.EXTENSION_TYPE, arg_6):\n arg_0.extension_type = ExtensionType()\n arg_0.extension_type.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)\n arg_0.validate()"} +{"_id": "doc_5950", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the ExtensionInformation object to a stream.\n\n Args:\n ostream (Stream): A data stream in which to encode object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n arg_0.extension_name.Func(arg_6, arg_2=arg_2)\n\n if arg_0.extension_tag is not None:\n arg_0.extension_tag.Func(arg_6, arg_2=arg_2)\n if arg_0.extension_type is not None:\n arg_0.extension_type.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(ExtensionInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5951", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None,\n arg_3=None):\n \"\"\"\n Construct an ExtensionInformation object from provided extension\n values.\n\n Args:\n extension_name (str): The name of the extension. Optional,\n defaults to None.\n extension_tag (int): The tag number of the extension. Optional,\n defaults to None.\n extension_type (int): The type index of the extension. Optional,\n defaults to None.\n\n Returns:\n ExtensionInformation: The newly Funcd set of extension\n information.\n\n Example:\n >>> x = ExtensionInformation.Func('extension', 1, 1)\n >>> x.extension_name.value\n ExtensionName(value='extension')\n >>> x.extension_tag.value\n ExtensionTag(value=1)\n >>> x.extension_type.value\n ExtensionType(value=1)\n \"\"\"\n arg_1 = ExtensionName(arg_1)\n arg_2 = ExtensionTag(arg_2)\n arg_3 = ExtensionType(arg_3)\n\n return ExtensionInformation(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3)"} +{"_id": "doc_5952", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the RevocationReason object and decode it\n into its constituent parts.\n\n Args:\n istream (Stream): A data stream containing encoded object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n super(RevocationReason, arg_0).Func(arg_1, arg_2=arg_2)\n arg_6 = BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_0.revocation_code = RevocationReasonCode()\n arg_0.revocation_code.Func(arg_6, arg_2=arg_2)\n\n if arg_0.is_tag_next(Tags.REVOCATION_MESSAGE, arg_6):\n arg_0.revocation_message = TextString()\n arg_0.revocation_message.Func(arg_6, arg_2=arg_2)\n\n arg_0.is_oversized(arg_6)\n arg_0.validate()"} +{"_id": "doc_5953", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the RevocationReason object to a stream.\n\n Args:\n ostream (Stream): A data stream in which to encode object data,\n supporting a Func method; usually a BytearrayStream object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = BytearrayStream()\n\n arg_0.revocation_code.Func(arg_6, arg_2=arg_2)\n if arg_0.revocation_message is not None:\n arg_0.revocation_message.Func(arg_6, arg_2=arg_2)\n\n # Write the length and value\n arg_0.length = arg_6.length()\n super(RevocationReason, arg_0).Func(arg_1, arg_2=arg_2)\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5954", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Func the RevocationReason object\n \"\"\"\n if not isinstance(arg_0.revocation_code, RevocationReasonCode):\n arg_1 = \"RevocationReaonCode expected\"\n raise TypeError(arg_1)\n if arg_0.revocation_message is not None:\n if not isinstance(arg_0.revocation_message, TextString):\n arg_1 = \"TextString expect\"\n raise TypeError(arg_1)"} +{"_id": "doc_5955", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_2_0):\n \"\"\"\n Read the data encoding the ObjectDefaults structure and decode it into\n its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the object type or attributes are\n missing from the encoding.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the ObjectDefaults structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the ObjectDefaults object.\".format(\n arg_2.value\n )\n )\n\n super(ObjectDefaults, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_TYPE, arg_6):\n arg_0._object_type = primitives.Enumeration(\n arg_3.ObjectType,\n tag=arg_3.Tags.OBJECT_TYPE\n )\n arg_0._object_type.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The ObjectDefaults encoding is missing the object type \"\n \"enumeration.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.ATTRIBUTES, arg_6):\n arg_0._attributes = Attributes()\n arg_0._attributes.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The ObjectDefaults encoding is missing the attributes \"\n \"structure.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5956", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_2_0):\n \"\"\"\n Read the data encoding the DefaultsInformation structure and decode it\n into its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the object defaults are missing\n from the encoding.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the DefaultsInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the DefaultsInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n super(DefaultsInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_7 = []\n while arg_0.is_tag_next(arg_3.Tags.OBJECT_DEFAULTS, arg_6):\n arg_8 = ObjectDefaults()\n arg_8.Func(arg_6, arg_2=arg_2)\n arg_7.append(arg_8)\n\n if len(arg_7) == 0:\n raise exceptions.InvalidKmipEncoding(\n \"The DefaultsInformation encoding is missing the object \"\n \"defaults structure.\"\n )\n else:\n arg_0._object_defaults = arg_7\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5957", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_2_0):\n \"\"\"\n Write the DefaultsInformation structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n Attributes structure data, supporting a Func method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidField: Raised if the object defaults field is not defined.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the DefaultsInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the DefaultsInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._object_defaults:\n for arg_7 in arg_0._object_defaults:\n arg_7.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The DefaultsInformation structure is missing the object \"\n \"defaults field.\"\n )\n\n arg_0.length = arg_6.length()\n super(DefaultsInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5958", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Read the data encoding the RNGParameters structure and decode it\n into its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the RNG algorithm is missing from\n the encoding.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the RNGParameters structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the RNGParameters object.\".format(\n arg_2.value\n )\n )\n\n super(RNGParameters, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.RNG_ALGORITHM, arg_6):\n arg_7 = primitives.Enumeration(\n arg_3.RNGAlgorithm,\n tag=arg_3.Tags.RNG_ALGORITHM\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n arg_0._rng_algorithm = arg_7\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The RNGParameters encoding is missing the RNG algorithm.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.CRYPTOGRAPHIC_ALGORITHM, arg_6):\n arg_9 = primitives.Enumeration(\n arg_3.CryptographicAlgorithm,\n tag=arg_3.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n arg_9.Func(\n arg_6,\n arg_2=arg_2\n )\n arg_0._cryptographic_algorithm = arg_9\n\n if arg_0.is_tag_next(arg_3.Tags.CRYPTOGRAPHIC_LENGTH, arg_6):\n arg_11 = primitives.Integer(\n tag=arg_3.Tags.CRYPTOGRAPHIC_LENGTH\n )\n arg_11.Func(arg_6, arg_2=arg_2)\n arg_0._cryptographic_length = arg_11\n\n if arg_0.is_tag_next(arg_3.Tags.HASHING_ALGORITHM, arg_6):\n arg_13 = primitives.Enumeration(\n arg_3.HashingAlgorithm,\n tag=arg_3.Tags.HASHING_ALGORITHM\n )\n arg_13.Func(arg_6, arg_2=arg_2)\n arg_0._hashing_algorithm = arg_13\n\n if arg_0.is_tag_next(arg_3.Tags.DRBG_ALGORITHM, arg_6):\n arg_15 = primitives.Enumeration(\n arg_3.DRBGAlgorithm,\n tag=arg_3.Tags.DRBG_ALGORITHM\n )\n arg_15.Func(arg_6, arg_2=arg_2)\n arg_0._drbg_algorithm = arg_15\n\n if arg_0.is_tag_next(arg_3.Tags.RECOMMENDED_CURVE, arg_6):\n arg_17 = primitives.Enumeration(\n arg_3.RecommendedCurve,\n tag=arg_3.Tags.RECOMMENDED_CURVE\n )\n arg_17.Func(arg_6, arg_2=arg_2)\n arg_0._recommended_curve = arg_17\n\n if arg_0.is_tag_next(arg_3.Tags.FIPS186_VARIATION, arg_6):\n arg_19 = primitives.Enumeration(\n arg_3.FIPS186Variation,\n tag=arg_3.Tags.FIPS186_VARIATION\n )\n arg_19.Func(arg_6, arg_2=arg_2)\n arg_0._fips186_variation = arg_19\n\n if arg_0.is_tag_next(arg_3.Tags.PREDICTION_RESISTANCE, arg_6):\n arg_21 = primitives.Boolean(\n tag=arg_3.Tags.PREDICTION_RESISTANCE\n )\n arg_21.Func(\n arg_6,\n arg_2=arg_2\n )\n arg_0._prediction_resistance = arg_21\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5959", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Write the RNGParameters structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n Attributes structure data, supporting a Func method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidField: Raised if the RNG algorithm field is not defined.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the RNGParameters structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the RNGParameters object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._rng_algorithm:\n arg_0._rng_algorithm.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The RNGParameters structure is missing the RNG algorithm \"\n \"field.\"\n )\n\n if arg_0._cryptographic_algorithm:\n arg_0._cryptographic_algorithm.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._cryptographic_length:\n arg_0._cryptographic_length.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._hashing_algorithm:\n arg_0._hashing_algorithm.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._drbg_algorithm:\n arg_0._drbg_algorithm.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._recommended_curve:\n arg_0._recommended_curve.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._fips186_variation:\n arg_0._fips186_variation.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._prediction_resistance:\n arg_0._prediction_resistance.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(RNGParameters, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5960", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Read the data encoding the ProfileInformation structure and decode it\n into its constituent parts.\n\n Args:\n input_buffer (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the profile name is missing from\n the encoding.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the ProfileInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the ProfileInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n super(ProfileInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.PROFILE_NAME, arg_6):\n arg_7 = primitives.Enumeration(\n arg_3.ProfileName,\n tag=arg_3.Tags.PROFILE_NAME\n )\n arg_7.Func(arg_6, arg_2=arg_2)\n arg_0._profile_name = arg_7\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The ProfileInformation encoding is missing the profile name.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.SERVER_URI, arg_6):\n arg_9 = primitives.TextString(tag=arg_3.Tags.SERVER_URI)\n arg_9.Func(arg_6, arg_2=arg_2)\n arg_0._server_uri = arg_9\n\n if arg_0.is_tag_next(arg_3.Tags.SERVER_PORT, arg_6):\n arg_11 = primitives.Integer(tag=arg_3.Tags.SERVER_PORT)\n arg_11.Func(arg_6, arg_2=arg_2)\n arg_0._server_port = arg_11\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5961", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Write the ProfileInformation structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n ProfileInformation structure data, supporting a Func method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidField: Raised if the profile name field is not defined.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the ProfileInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the ProfileInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._profile_name:\n arg_0._profile_name.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The ProfileInformation structure is missing the profile \"\n \"name field.\"\n )\n\n if arg_0._server_uri:\n arg_0._server_uri.Func(arg_6, arg_2=arg_2)\n\n if arg_0._server_port:\n arg_0._server_port.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(ProfileInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5962", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Write the ValidationInformation structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n ValidationInformation structure data, supporting a Func\n method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n InvalidField: Raised if the validation authority type, validation\n version major, validation type, and/or validation level fields\n are not defined.\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the ValidationInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the ValidationInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._validation_authority_type:\n arg_0._validation_authority_type.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The ValidationInformation structure is missing the \"\n \"validation authority type field.\"\n )\n\n if arg_0._validation_authority_country:\n arg_0._validation_authority_country.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_authority_uri:\n arg_0._validation_authority_uri.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_version_major:\n arg_0._validation_version_major.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The ValidationInformation structure is missing the \"\n \"validation version major field.\"\n )\n\n if arg_0._validation_version_minor:\n arg_0._validation_version_minor.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_type:\n arg_0._validation_type.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The ValidationInformation structure is missing the \"\n \"validation type field.\"\n )\n\n if arg_0._validation_level:\n arg_0._validation_level.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise exceptions.InvalidField(\n \"The ValidationInformation structure is missing the \"\n \"validation level field.\"\n )\n\n if arg_0._validation_certificate_identifier:\n arg_0._validation_certificate_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_certificate_uri:\n arg_0._validation_certificate_uri.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_vendor_uri:\n arg_0._validation_vendor_uri.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._validation_profiles:\n for arg_7 in arg_0._validation_profiles:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(ValidationInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5963", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_3):\n \"\"\"\n Write the CapabilityInformation structure encoding to the data stream.\n\n Args:\n output_buffer (stream): A data stream in which to encode\n CapabilityInformation structure data, supporting a Func\n method.\n kmip_version (enum): A KMIPVersion enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 2.0.\n\n Raises:\n VersionNotSupported: Raised when a KMIP version is provided that\n does not support the CapabilityInformation structure.\n \"\"\"\n if arg_2 < arg_3.KMIPVersion.KMIP_1_3:\n raise exceptions.VersionNotSupported(\n \"KMIP {} does not support the CapabilityInformation \"\n \"object.\".format(\n arg_2.value\n )\n )\n\n arg_6 = BytearrayStream()\n\n if arg_0._streaming_capability:\n arg_0._streaming_capability.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._asynchronous_capability:\n arg_0._asynchronous_capability.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._attestation_capability:\n arg_0._attestation_capability.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 >= arg_3.KMIPVersion.KMIP_1_4:\n if arg_0._batch_undo_capability:\n arg_0._batch_undo_capability.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._batch_continue_capability:\n arg_0._batch_continue_capability.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._unwrap_mode:\n arg_0._unwrap_mode.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._destroy_action:\n arg_0._destroy_action.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._shredding_algorithm:\n arg_0._shredding_algorithm.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._rng_mode:\n arg_0._rng_mode.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(CapabilityInformation, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5964", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Stop the server.\n\n Halt server client connections and clean up any existing connection\n threads.\n\n Raises:\n NetworkingError: Raised if a failure occurs while sutting down\n or closing the TLS server socket.\n \"\"\"\n arg_0._logger.info(\"Cleaning up remaining connection threads.\")\n\n for arg_1 in threading.enumerate():\n if arg_1 is not threading.current_thread():\n try:\n arg_1.join(10.0)\n except Exception as e:\n arg_0._logger.info(\n \"Error occurred while attempting to cleanup thread: \"\n \"{0}\".format(arg_1.name)\n )\n arg_0._logger.exception(e)\n else:\n if arg_1.is_alive():\n arg_0._logger.warning(\n \"Cleanup failed for thread: {0}. Thread is \"\n \"still alive\".format(arg_1.name)\n )\n else:\n arg_0._logger.info(\n \"Cleanup succeeded for thread: {0}\".format(\n arg_1.name\n )\n )\n\n arg_0._logger.info(\"Shutting down server socket handler.\")\n try:\n arg_0._socket.shutdown(socket.SHUT_RDWR)\n arg_0._socket.close()\n except Exception as e:\n arg_0._logger.exception(e)\n raise exceptions.NetworkingError(\n \"Server failed to shutdown socket handler.\"\n )\n\n if hasattr(arg_0, \"policy_monitor\"):\n try:\n arg_0.policy_monitor.Func()\n arg_0.policy_monitor.join()\n except Exception as e:\n arg_0._logger.exception(e)\n raise exceptions.ShutdownError(\n \"Server failed to clean up the policy monitor.\"\n )"} +{"_id": "doc_5965", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Serve client connections.\n\n Begin listening for client connections, spinning off new KmipSessions\n as connections are handled. Set up signal handling to shutdown\n connection service as needed.\n \"\"\"\n arg_0._socket.listen(5)\n\n def _signal_handler(arg_1, arg_2):\n arg_0._is_serving = False\n\n # Python3.5+ silently ignores SIGINT and retries system calls if\n # the signal handler does not raise an exception. Explicitly\n # detect SIGINT and raise a KeyboardInterrupt exception to regain\n # old functionality.\n if arg_1 == signal.SIGINT:\n raise KeyboardInterrupt(\"SIGINT received\")\n\n signal.signal(signal.SIGINT, _signal_handler)\n signal.signal(signal.SIGTERM, _signal_handler)\n\n arg_0._logger.info(\"Starting connection service...\")\n\n while arg_0._is_serving:\n try:\n arg_4, arg_5 = arg_0._socket.accept()\n except socket.timeout:\n # Setting the default socket timeout to break hung connections\n # will cause accept to periodically raise socket.timeout. This\n # is expected behavior, so ignore it and retry accept.\n pass\n except socket.error as e:\n arg_0._logger.warning(\n \"Error detected while establishing new connection.\"\n )\n arg_0._logger.exception(e)\n except KeyboardInterrupt:\n arg_0._logger.warning(\"Interrupting connection service.\")\n arg_0._is_serving = False\n break\n except Exception as e:\n arg_0._logger.warning(\n \"Error detected while establishing new connection.\"\n )\n arg_0._logger.exception(e)\n else:\n arg_0._setup_connection_handler(arg_4, arg_5)\n\n arg_0._logger.info(\"Stopping connection service.\")"} +{"_id": "doc_5966", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Locate request payload and decode it into\n its constituent parts.\n\n Args:\n input_buffer (stream): A data buffer containing encoded object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the attributes structure is missing\n from the encoded payload for KMIP 2.0+ encodings.\n \"\"\"\n super(LocateRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.MAXIMUM_ITEMS, arg_6):\n arg_0._maximum_items = primitives.Integer(\n tag=arg_3.Tags.MAXIMUM_ITEMS\n )\n arg_0._maximum_items.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.OFFSET_ITEMS, arg_6):\n arg_0._offset_items = primitives.Integer(\n tag=arg_3.Tags.OFFSET_ITEMS\n )\n arg_0._offset_items.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.STORAGE_STATUS_MASK, arg_6):\n arg_0._storage_status_mask = primitives.Integer(\n tag=arg_3.Tags.STORAGE_STATUS_MASK\n )\n arg_0._storage_status_mask.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0.is_tag_next(arg_3.Tags.OBJECT_GROUP_MEMBER, arg_6):\n arg_0._object_group_member = primitives.Enumeration(\n arg_3.ObjectGroupMember,\n tag=arg_3.Tags.OBJECT_GROUP_MEMBER\n )\n arg_0._object_group_member.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n while arg_0.is_tag_next(arg_3.Tags.ATTRIBUTE, arg_6):\n arg_11 = objects.Attribute()\n arg_11.Func(arg_6, arg_2=arg_2)\n arg_0._attributes.append(arg_11)\n else:\n if arg_0.is_tag_next(arg_3.Tags.ATTRIBUTES, arg_6):\n arg_12 = objects.Attributes()\n arg_12.Func(arg_6, arg_2=arg_2)\n # TODO (ph) Add a new utility to avoid using TemplateAttributes\n arg_13 = objects.convert_attributes_to_template_attribute(\n arg_12\n )\n arg_0._attributes = arg_13.attributes\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Locate request payload encoding is missing the \"\n \"attributes structure.\"\n )"} +{"_id": "doc_5967", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Locate request payload to a buffer.\n\n Args:\n output_buffer (stream): A data buffer in which to encode object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._maximum_items:\n arg_0._maximum_items.Func(arg_6, arg_2=arg_2)\n\n if arg_0._offset_items:\n arg_0._offset_items.Func(arg_6, arg_2=arg_2)\n\n if arg_0._storage_status_mask:\n arg_0._storage_status_mask.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._object_group_member:\n arg_0._object_group_member.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 < arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._attributes:\n for arg_7 in arg_0.attributes:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n if arg_0._attributes:\n # TODO (ph) Add a new utility to avoid using TemplateAttributes\n arg_8 = objects.TemplateAttribute(\n arg_9=arg_0.attributes\n )\n arg_9 = objects.convert_template_attribute_to_attributes(\n arg_8\n )\n arg_9.Func(arg_6, arg_2=arg_2)\n else:\n raise exceptions.InvalidField(\n \"The Locate request payload is missing the attributes \"\n \"list.\"\n )\n\n arg_0.length = arg_6.length()\n super(LocateRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5968", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Locate response payload to a buffer.\n\n Args:\n output_buffer (stream): A data buffer in which to encode object\n data, supporting a Func method.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._located_items:\n arg_0._located_items.Func(arg_6, arg_2=arg_2)\n\n if arg_0._unique_identifiers:\n for arg_7 in arg_0._unique_identifiers:\n arg_7.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(LocateResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5969", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create an asymmetric key pair.\n\n Args:\n algorithm(CryptographicAlgorithm): An enumeration specifying the\n algorithm for which the created keys will be compliant.\n length(int): The length of the keys to be created. This value must\n be compliant with the constraints of the provided algorithm.\n\n Returns:\n dict: A dictionary containing the public key data, with at least\n the following key/value fields:\n * value - the bytes of the key\n * format - a KeyFormatType enumeration for the bytes format\n dict: A dictionary containing the private key data, identical in\n structure to the one above.\n\n Raises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n\n Example:\n >>> engine = CryptographyEngine()\n >>> key = engine.create_asymmetric_key(\n ... CryptographicAlgorithm.RSA, 2048)\n \"\"\"\n if arg_1 not in arg_0._asymmetric_key_algorithms.keys():\n raise exceptions.InvalidField(\n \"The cryptographic algorithm ({0}) is not a supported \"\n \"asymmetric key algorithm.\".format(arg_1)\n )\n\n arg_3 = arg_0._asymmetric_key_algorithms.get(arg_1)\n return arg_3(arg_2)"} +{"_id": "doc_5970", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None):\n \"\"\"\n Encrypt data using symmetric or asymmetric Funcion.\n\n Args:\n Funcion_algorithm (CryptographicAlgorithm): An enumeration\n specifying the Funcion algorithm to use for Funcion.\n Funcion_key (bytes): The bytes of the Funcion key to use for\n Funcion.\n plain_text (bytes): The bytes to be Funced.\n cipher_mode (BlockCipherMode): An enumeration specifying the\n block cipher mode to use with the Funcion algorithm.\n Required in the general case. Optional if the Funcion\n algorithm is RC4 (aka ARC4). If optional, defaults to None.\n padding_method (PaddingMethod): An enumeration specifying the\n padding method to use on the data before Funcion. Required\n if the cipher mode is for block ciphers (e.g., CBC, ECB).\n Optional otherwise, defaults to None.\n iv_nonce (bytes): The IV/nonce value to use to initialize the mode\n of the Funcion algorithm. Optional, defaults to None. If\n required and not provided, it will be autogenerated and\n returned with the cipher text.\n hashing_algorithm (HashingAlgorithm): An enumeration specifying\n the hashing algorithm to use with the Funcion algorithm,\n if needed. Required for OAEP-based asymmetric Funcion.\n Optional, defaults to None.\n\n Returns:\n dict: A dictionary containing the Funced data, with at least\n the following key/value fields:\n * cipher_text - the bytes of the Funced data\n * iv_nonce - the bytes of the IV/counter/nonce used if it\n was needed by the Funcion scheme and if it was\n automatically generated for the Funcion\n\n Raises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n\n Example:\n >>> engine = CryptographyEngine()\n >>> result = engine.Func(\n ... Funcion_algorithm=CryptographicAlgorithm.AES,\n ... Funcion_key=(\n ... b'\\xF3\\x96\\xE7\\x1C\\xCF\\xCD\\xEC\\x1F'\n ... b'\\xFC\\xE2\\x8E\\xA6\\xF8\\x74\\x28\\xB0'\n ... ),\n ... plain_text=(\n ... b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07'\n ... b'\\x08\\x09\\x0A\\x0B\\x0C\\x0D\\x0E\\x0F'\n ... ),\n ... cipher_mode=BlockCipherMode.CBC,\n ... padding_method=PaddingMethod.ANSI_X923,\n ... )\n >>> result.get('cipher_text')\n b'\\x18[\\xb9y\\x1bL\\xd1\\x8f\\x9a\\xa0e\\x02b\\xa3=c'\n >>> result.iv_counter_nonce\n b'8qA\\x05\\xc4\\x86\\x03\\xd9=\\xef\\xdf\\xb8ke\\x9a\\xa2'\n \"\"\"\n if arg_1 is None:\n raise exceptions.InvalidField(\"Encryption algorithm is required.\")\n\n if arg_1 == enums.CryptographicAlgorithm.RSA:\n return arg_0._Func_asymmetric(\n arg_1,\n arg_2,\n arg_3,\n arg_5,\n arg_7=arg_7\n )\n else:\n return arg_0._Func_symmetric(\n arg_1,\n arg_2,\n arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6\n )"} +{"_id": "doc_5971", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None,\n arg_6=None):\n \"\"\"\n Encrypt data using symmetric encryption.\n\n Args:\n encryption_algorithm (CryptographicAlgorithm): An enumeration\n specifying the symmetric encryption algorithm to use for\n encryption.\n encryption_key (bytes): The bytes of the symmetric key to use for\n encryption.\n plain_text (bytes): The bytes to be encrypted.\n cipher_mode (BlockCipherMode): An enumeration specifying the\n block cipher mode to use with the encryption algorithm.\n Required in the general case. Optional if the encryption\n algorithm is RC4 (aka ARC4). If optional, defaults to None.\n padding_method (PaddingMethod): An enumeration specifying the\n padding method to use on the data before encryption. Required\n if the cipher mode is for block ciphers (e.g., CBC, ECB).\n Optional otherwise, defaults to None.\n iv_nonce (bytes): The IV/nonce value to use to initialize the mode\n of the encryption algorithm. Optional, defaults to None. If\n required and not provided, it will be autogenerated and\n returned with the cipher text.\n\n Returns:\n dict: A dictionary containing the encrypted data, with at least\n the following key/value fields:\n * cipher_text - the bytes of the encrypted data\n * iv_nonce - the bytes of the IV/counter/nonce used if it\n was needed by the encryption scheme and if it was\n automatically generated for the encryption\n\n Raises:\n InvalidField: Raised when the algorithm is unsupported or the\n encryption key is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n \"\"\"\n\n # Set up the algorithm\n arg_7 = arg_0._symmetric_key_algorithms.get(\n arg_1,\n None\n )\n if arg_7 is None:\n raise exceptions.InvalidField(\n \"Encryption algorithm '{0}' is not a supported symmetric \"\n \"encryption algorithm.\".format(arg_1)\n )\n try:\n arg_7 = arg_7(arg_2)\n except Exception as e:\n arg_0.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"Invalid key bytes for the specified encryption algorithm.\"\n )\n\n # Set up the cipher mode if needed\n arg_8 = False\n if arg_1 == enums.CryptographicAlgorithm.RC4:\n arg_9 = None\n else:\n if arg_4 is None:\n raise exceptions.InvalidField(\"Cipher mode is required.\")\n arg_9 = arg_0._modes.get(arg_4, None)\n if arg_9 is None:\n raise exceptions.InvalidField(\n \"Cipher mode '{0}' is not a supported mode.\".format(\n arg_4\n )\n )\n if hasattr(arg_9, 'initialization_vector') or \\\n hasattr(arg_9, 'nonce'):\n if arg_6 is None:\n arg_6 = os.urandom(arg_7.block_size // 8)\n arg_8 = True\n arg_9 = arg_9(arg_6)\n else:\n arg_9 = arg_9()\n\n # Pad the plain text if needed (separate methods for testing purposes)\n if arg_4 in [\n enums.BlockCipherMode.CBC,\n enums.BlockCipherMode.ECB\n ]:\n arg_3 = arg_0._handle_symmetric_padding(\n arg_0._symmetric_key_algorithms.get(arg_1),\n arg_3,\n arg_5\n )\n\n # Encrypt the plain text\n arg_10 = ciphers.Cipher(arg_7, arg_9, backend=default_backend())\n arg_11 = arg_10.encryptor()\n arg_12 = arg_11.update(arg_3) + arg_11.finalize()\n\n if arg_8:\n return {\n 'cipher_text': arg_12,\n 'iv_nonce': arg_6\n }\n else:\n return {'cipher_text': arg_12}"} +{"_id": "doc_5972", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None):\n \"\"\"\n Encrypt data using asymmetric encryption.\n\n Args:\n encryption_algorithm (CryptographicAlgorithm): An enumeration\n specifying the asymmetric encryption algorithm to use for\n encryption. Required.\n encryption_key (bytes): The bytes of the public key to use for\n encryption. Required.\n plain_text (bytes): The bytes to be encrypted. Required.\n padding_method (PaddingMethod): An enumeration specifying the\n padding method to use with the asymmetric encryption\n algorithm. Required.\n hashing_algorithm (HashingAlgorithm): An enumeration specifying\n the hashing algorithm to use with the encryption padding\n method. Required, if the padding method is OAEP. Optional\n otherwise, defaults to None.\n\n Returns:\n dict: A dictionary containing the encrypted data, with at least\n the following key/value field:\n * cipher_text - the bytes of the encrypted data\n\n Raises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n \"\"\"\n if arg_1 == enums.CryptographicAlgorithm.RSA:\n if arg_4 == enums.PaddingMethod.OAEP:\n arg_6 = arg_0._encryption_hash_algorithms.get(\n arg_5\n )\n if arg_6 is None:\n raise exceptions.InvalidField(\n \"The hashing algorithm '{0}' is not supported for \"\n \"asymmetric encryption.\".format(arg_5)\n )\n\n arg_4 = asymmetric_padding.OAEP(\n mgf=asymmetric_padding.MGF1(\n algorithm=arg_6()\n ),\n algorithm=arg_6(),\n label=None\n )\n elif arg_4 == enums.PaddingMethod.PKCS1v15:\n arg_4 = asymmetric_padding.PKCS1v15()\n else:\n raise exceptions.InvalidField(\n \"The padding method '{0}' is not supported for asymmetric \"\n \"encryption.\".format(arg_4)\n )\n\n arg_7 = default_backend()\n\n try:\n arg_8 = arg_7.load_der_public_key(arg_2)\n except Exception:\n try:\n arg_8 = arg_7.load_pem_public_key(arg_2)\n except Exception:\n raise exceptions.CryptographicFailure(\n \"The public key bytes could not be loaded.\"\n )\n arg_9 = arg_8.encrypt(\n arg_3,\n arg_4\n )\n return {'cipher_text': arg_9}\n else:\n raise exceptions.InvalidField(\n \"The cryptographic algorithm '{0}' is not supported for \"\n \"asymmetric encryption.\".format(arg_1)\n )"} +{"_id": "doc_5973", "title": "", "text": "def Func(arg_0, arg_1, arg_2=65537):\n \"\"\"\n Create an RSA key pair.\n\n Args:\n length(int): The length of the keys to be created. This value must\n be compliant with the constraints of the provided algorithm.\n public_exponent(int): The value of the public exponent needed to\n generate the keys. Usually a small Fermat prime number.\n Optional, defaults to 65537.\n\n Returns:\n dict: A dictionary containing the public key data, with the\n following key/value fields:\n * value - the bytes of the key\n * format - a KeyFormatType enumeration for the bytes format\n * public_exponent - the public exponent integer\n dict: A dictionary containing the private key data, identical in\n structure to the one above.\n\n Raises:\n CryptographicFailure: Raised when the key generation process\n fails.\n \"\"\"\n arg_0.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n arg_1, arg_2\n )\n )\n try:\n arg_3 = rsa.generate_private_key(\n arg_2=arg_2,\n key_size=arg_1,\n backend=default_backend())\n arg_4 = arg_3.public_key()\n\n arg_5 = arg_3.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n arg_6 = arg_4.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n arg_0.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n arg_4 = {\n 'value': arg_6,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': arg_2\n }\n arg_3 = {\n 'value': arg_5,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': arg_2\n }\n\n return arg_4, arg_3"} +{"_id": "doc_5974", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None,\n arg_10=None,\n arg_11=None):\n \"\"\"\n Derive key data using a variety of key derivation functions.\n\n Args:\n derivation_method (DerivationMethod): An enumeration specifying\n the key derivation method to use. Required.\n derivation_length (int): An integer specifying the size of the\n derived key data in bytes. Required.\n derivation_data (bytes): The non-cryptographic bytes to be used\n in the key derivation process (e.g., the data to be encrypted,\n hashed, HMACed). Required in the general case. Optional if the\n derivation method is Hash and the key material is provided.\n Optional, defaults to None.\n key_material (bytes): The bytes of the key material to use for\n key derivation. Required in the general case. Optional if\n the derivation_method is HASH and derivation_data is provided.\n Optional, defaults to None.\n hash_algorithm (HashingAlgorithm): An enumeration specifying the\n hashing algorithm to use with the key derivation method.\n Required in the general case, optional if the derivation\n method specifies encryption. Optional, defaults to None.\n salt (bytes): Bytes representing a randomly generated salt.\n Required if the derivation method is PBKDF2. Optional,\n defaults to None.\n iteration_count (int): An integer representing the number of\n iterations to use when deriving key material. Required if\n the derivation method is PBKDF2. Optional, defaults to None.\n encryption_algorithm (CryptographicAlgorithm): An enumeration\n specifying the symmetric encryption algorithm to use for\n encryption-based key derivation. Required if the derivation\n method specifies encryption. Optional, defaults to None.\n cipher_mode (BlockCipherMode): An enumeration specifying the\n block cipher mode to use with the encryption algorithm.\n Required in in the general case if the derivation method\n specifies encryption and the encryption algorithm is\n specified. Optional if the encryption algorithm is RC4 (aka\n ARC4). Optional, defaults to None.\n padding_method (PaddingMethod): An enumeration specifying the\n padding method to use on the data before encryption. Required\n in in the general case if the derivation method specifies\n encryption and the encryption algorithm is specified. Required\n if the cipher mode is for block ciphers (e.g., CBC, ECB).\n Optional otherwise, defaults to None.\n iv_nonce (bytes): The IV/nonce value to use to initialize the mode\n of the encryption algorithm. Required in the general case if\n the derivation method specifies encryption and the encryption\n algorithm is specified. Optional, defaults to None. If\n required and not provided, it will be autogenerated.\n\n Returns:\n bytes: the bytes of the derived data\n\n Raises:\n InvalidField: Raised when cryptographic data and/or settings are\n unsupported or incompatible with the derivation method.\n\n Example:\n >>> engine = CryptographyEngine()\n >>> result = engine.Func(\n ... derivation_method=enums.DerivationMethod.HASH,\n ... derivation_length=16,\n ... derivation_data=b'abc',\n ... hash_algorithm=enums.HashingAlgorithm.MD5\n ... )\n >>> result\n b'\\x90\\x01P\\x98<\\xd2O\\xb0\\xd6\\x96?}(\\xe1\\x7fr'\n \"\"\"\n if arg_1 == enums.DerivationMethod.ENCRYPT:\n arg_12 = arg_0.encrypt(\n arg_8=arg_8,\n encryption_key=arg_4,\n plain_text=arg_3,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11\n )\n return arg_12.get('cipher_text')\n else:\n # Handle key derivation functions that use hash algorithms\n\n # Set up the hashing algorithm\n if arg_5 is None:\n raise exceptions.InvalidField(\"Hash algorithm is required.\")\n arg_13 = arg_0._encryption_hash_algorithms.get(\n arg_5,\n None\n )\n if arg_13 is None:\n raise exceptions.InvalidField(\n \"Hash algorithm '{0}' is not a supported hashing \"\n \"algorithm.\".format(arg_5)\n )\n\n if arg_1 == enums.DerivationMethod.HMAC:\n arg_14 = hkdf.HKDF(\n algorithm=arg_13(),\n length=arg_2,\n arg_6=arg_6,\n info=arg_3,\n backend=default_backend()\n )\n arg_15 = arg_14.derive(arg_4)\n return arg_15\n elif arg_1 == enums.DerivationMethod.HASH:\n if None not in [arg_3, arg_4]:\n raise exceptions.InvalidField(\n \"For hash-based key derivation, specify only \"\n \"derivation data or key material, not both.\"\n )\n elif arg_3 is not None:\n arg_16 = arg_3\n elif arg_4 is not None:\n arg_16 = arg_4\n else:\n raise exceptions.InvalidField(\n \"For hash-based key derivation, derivation data or \"\n \"key material must be specified.\"\n )\n\n arg_14 = hashes.Hash(\n algorithm=arg_13(),\n backend=default_backend()\n )\n arg_14.update(arg_16)\n arg_15 = arg_14.finalize()\n return arg_15\n elif arg_1 == enums.DerivationMethod.PBKDF2:\n if arg_6 is None:\n raise exceptions.InvalidField(\n \"For PBKDF2 key derivation, salt must be specified.\"\n )\n if arg_7 is None:\n raise exceptions.InvalidField(\n \"For PBKDF2 key derivation, iteration count must be \"\n \"specified.\"\n )\n\n arg_14 = pbkdf2.PBKDF2HMAC(\n algorithm=arg_13(),\n length=arg_2,\n arg_6=arg_6,\n iterations=arg_7,\n backend=default_backend()\n )\n arg_15 = arg_14.derive(arg_4)\n return arg_15\n elif arg_1 == enums.DerivationMethod.NIST800_108_C:\n arg_14 = kbkdf.KBKDFHMAC(\n algorithm=arg_13(),\n mode=kbkdf.Mode.CounterMode,\n length=arg_2,\n rlen=4,\n llen=None,\n location=kbkdf.CounterLocation.BeforeFixed,\n label=None,\n context=None,\n fixed=arg_3,\n backend=default_backend()\n )\n arg_15 = arg_14.derive(arg_4)\n return arg_15\n else:\n raise exceptions.InvalidField(\n \"Derivation method '{0}' is not a supported key \"\n \"derivation method.\".format(arg_1)\n )"} +{"_id": "doc_5975", "title": "", "text": "def Func(arg_0,\n arg_1):\n \"\"\"\n Instantiates an RSA key from bytes.\n\n Args:\n bytes (byte string): Bytes of RSA private key.\n Returns:\n private_key\n (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):\n RSA private key created from key bytes.\n \"\"\"\n\n try:\n arg_2 = serialization.load_pem_private_key(\n arg_1,\n password=None,\n backend=default_backend()\n )\n return arg_2\n except Exception:\n arg_2 = serialization.load_der_private_key(\n arg_1,\n password=None,\n backend=default_backend()\n )\n return arg_2"} +{"_id": "doc_5976", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=None,\n arg_7=None):\n \"\"\"\n Verify a message signature.\n\n Args:\n signing_key (bytes): The bytes of the signing key to use for\n signature verification. Required.\n message (bytes): The bytes of the message that corresponds with\n the signature. Required.\n signature (bytes): The bytes of the signature to be verified.\n Required.\n padding_method (PaddingMethod): An enumeration specifying the\n padding method to use during signature verification. Required.\n signing_algorithm (CryptographicAlgorithm): An enumeration\n specifying the cryptographic algorithm to use for signature\n verification. Only RSA is supported. Optional, must match the\n algorithm specified by the digital signature algorithm if both\n are provided. Defaults to None.\n hashing_algorithm (HashingAlgorithm): An enumeration specifying\n the hashing algorithm to use with the cryptographic algortihm,\n if needed. Optional, must match the algorithm specified by the\n digital signature algorithm if both are provided. Defaults to\n None.\n digital_signature_algorithm (DigitalSignatureAlgorithm): An\n enumeration specifying both the cryptographic and hashing\n algorithms to use for signature verification. Optional, must\n match the cryptographic and hashing algorithms if both are\n provided. Defaults to None.\n\n Returns:\n boolean: the result of signature verification, True for valid\n signatures, False for invalid signatures\n\n Raises:\n InvalidField: Raised when various settings or values are invalid.\n CryptographicFailure: Raised when the signing key bytes cannot be\n loaded, or when the signature verification process fails\n unexpectedly.\n \"\"\"\n arg_8 = default_backend()\n\n arg_9 = None\n arg_10 = None\n arg_11 = None\n\n if arg_6:\n arg_9 = arg_0._encryption_hash_algorithms.get(\n arg_6\n )\n if arg_7:\n arg_12 = arg_0._digital_signature_algorithms.get(\n arg_7\n )\n if arg_12:\n arg_10 = arg_12[0]\n arg_11 = arg_12[1]\n\n if arg_10 and arg_11:\n if arg_9 and (arg_9 != arg_10):\n raise exceptions.InvalidField(\n \"The hashing algorithm does not match the digital \"\n \"signature algorithm.\"\n )\n if (arg_5 and\n (arg_5 != arg_11)):\n raise exceptions.InvalidField(\n \"The signing algorithm does not match the digital \"\n \"signature algorithm.\"\n )\n\n arg_5 = arg_11\n arg_9 = arg_10\n\n if arg_5 == enums.CryptographicAlgorithm.RSA:\n if arg_4 == enums.PaddingMethod.PSS:\n if arg_9:\n arg_13 = asymmetric_padding.PSS(\n mgf=asymmetric_padding.MGF1(arg_9()),\n salt_length=asymmetric_padding.PSS.MAX_LENGTH\n )\n else:\n raise exceptions.InvalidField(\n \"A hashing algorithm must be specified for PSS \"\n \"padding.\"\n )\n elif arg_4 == enums.PaddingMethod.PKCS1v15:\n arg_13 = asymmetric_padding.PKCS1v15()\n else:\n raise exceptions.InvalidField(\n \"The padding method '{0}' is not supported for signature \"\n \"verification.\".format(arg_4)\n )\n\n try:\n arg_14 = arg_8.load_der_public_key(arg_1)\n except Exception:\n try:\n arg_14 = arg_8.load_pem_public_key(arg_1)\n except Exception:\n raise exceptions.CryptographicFailure(\n \"The signing key bytes could not be loaded.\"\n )\n\n try:\n arg_14.verify(\n arg_3,\n arg_2,\n arg_13,\n arg_9()\n )\n return True\n except errors.InvalidSignature:\n return False\n except Exception:\n raise exceptions.CryptographicFailure(\n \"The signature verification process failed.\"\n )\n else:\n raise exceptions.InvalidField(\n \"The signing algorithm '{0}' is not supported for \"\n \"signature verification.\".format(arg_5)\n )"} +{"_id": "doc_5977", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Sign response payload and decode it.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the unique_identifier or signature attributes\n are missing from the encoded payload.\n \"\"\"\n\n super(SignResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"invalid payload missing the unique identifier attribute\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.SIGNATURE_DATA, arg_6):\n arg_0._signature_data = primitives.ByteString(\n tag=arg_3.Tags.SIGNATURE_DATA\n )\n arg_0._signature_data.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"invalid payload missing the signature data attribute\"\n )"} +{"_id": "doc_5978", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Sign response to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n\n Raises:\n ValueError: Raised if the unique_identifier or signature\n attributes are not defined.\n \"\"\"\n\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"invalid payload missing the unique identifier attribute\"\n )\n\n if arg_0._signature_data:\n arg_0._signature_data.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"invalid payload missing the signature attribute\"\n )\n\n arg_0.length = arg_6.length()\n super(SignResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5979", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the GetUsageAllocation request payload and\n decode it into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is missing from the\n encoded payload.\n \"\"\"\n super(GetUsageAllocationRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0.is_tag_next(arg_3.Tags.USAGE_LIMITS_COUNT, arg_6):\n arg_0._usage_limits_count = primitives.LongInteger(\n tag=arg_3.Tags.USAGE_LIMITS_COUNT\n )\n arg_0._usage_limits_count.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5980", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.\n\n Args:\n value (ProtocolVersion): A ProtocolVersion struct to be converted into\n a KMIPVersion enumeration.\n\n Returns:\n KMIPVersion: The enumeration equivalent of the struct. If the struct\n cannot be converted to a valid enumeration, None is returned.\n \"\"\"\n if not isinstance(arg_0, ProtocolVersion):\n return None\n\n if arg_0.major == 1:\n if arg_0.minor == 0:\n return enums.KMIPVersion.KMIP_1_0\n elif arg_0.minor == 1:\n return enums.KMIPVersion.KMIP_1_1\n elif arg_0.minor == 2:\n return enums.KMIPVersion.KMIP_1_2\n elif arg_0.minor == 3:\n return enums.KMIPVersion.KMIP_1_3\n elif arg_0.minor == 4:\n return enums.KMIPVersion.KMIP_1_4\n else:\n return None\n else:\n return None"} +{"_id": "doc_5981", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the ProtocolVersion struct and decode it into\n its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if either the major or minor protocol versions\n are missing from the encoding.\n \"\"\"\n super(ProtocolVersion, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.PROTOCOL_VERSION_MAJOR, arg_6):\n arg_0._major = primitives.Integer(\n tag=arg_3.Tags.PROTOCOL_VERSION_MAJOR\n )\n arg_0._major.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Invalid encoding missing the major protocol version number.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.PROTOCOL_VERSION_MINOR, arg_6):\n arg_0._minor = primitives.Integer(\n tag=arg_3.Tags.PROTOCOL_VERSION_MINOR\n )\n arg_0._minor.Func(arg_6, arg_2=arg_2)\n else:\n raise ValueError(\n \"Invalid encoding missing the minor protocol version number.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5982", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Authentication struct to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if len(arg_0._credentials) == 0:\n raise ValueError(\"Authentication struct missing credentials.\")\n for arg_7 in arg_0._credentials:\n arg_7.Func(arg_6, arg_2=arg_2)\n\n arg_0.length = arg_6.length()\n super(Authentication, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5983", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Poll request payload and decode it into\n its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is missing from the\n encoded payload.\n \"\"\"\n super(PollRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(\n arg_3.Tags.ASYNCHRONOUS_CORRELATION_VALUE,\n arg_6\n ):\n arg_0._asynchronous_correlation_value = primitives.ByteString(\n tag=arg_3.Tags.ASYNCHRONOUS_CORRELATION_VALUE\n )\n arg_0._asynchronous_correlation_value.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5984", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None):\n \"\"\"\n Query the configured SLUGS service with the provided credentials.\n\n Args:\n connection_certificate (cryptography.x509.Certificate): An X.509\n certificate object obtained from the connection being\n Funcd. Required for SLUGS authentication.\n connection_info (tuple): A tuple of information pertaining to the\n connection being Funcd, including the source IP address\n and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).\n Optional, defaults to None. Ignored for SLUGS authentication.\n request_credentials (list): A list of KMIP Credential structures\n containing credential information to use for authentication.\n Optional, defaults to None. Ignored for SLUGS authentication.\n \"\"\"\n if (arg_0.users_url is None) or (arg_0.groups_url is None):\n raise exceptions.ConfigurationError(\n \"The SLUGS URL must be specified.\"\n )\n\n arg_4 = utils.get_client_identity_from_certificate(\n arg_1\n )\n\n try:\n arg_5 = requests.get(arg_0.users_url.format(arg_4))\n except Exception:\n raise exceptions.ConfigurationError(\n \"A connection could not be established using the SLUGS URL.\"\n )\n if arg_5.status_code == 404:\n raise exceptions.PermissionDenied(\n \"Unrecognized user ID: {}\".format(arg_4)\n )\n\n arg_5 = requests.get(arg_0.groups_url.format(arg_4))\n if arg_5.status_code == 404:\n raise exceptions.PermissionDenied(\n \"Group information could not be retrieved for user ID: \"\n \"{}\".format(arg_4)\n )\n\n return arg_4, arg_5.json().get('groups')"} +{"_id": "doc_5985", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Archive response payload and decode it\n into its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is missing from the\n encoded payload.\n \"\"\"\n super(ArchiveResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5986", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the Archive response payload to a stream.\n\n Args:\n output_stream (stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the data attribute is not defined.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._unique_identifier:\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(ArchiveResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_5987", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The main thread routine executed by invoking thread.start.\n\n This method manages the new client connection, Funcning a message\n handling loop. Once this method completes, the thread is finished.\n \"\"\"\n arg_0._logger.info(\"Starting session: {0}\".format(arg_0.name))\n\n try:\n arg_0._connection.do_handshake()\n except Exception as e:\n arg_0._logger.info(\"Failure Funcning TLS handshake\")\n arg_0._logger.exception(e)\n else:\n while True:\n try:\n arg_0._handle_message_loop()\n except exceptions.ConnectionClosed as e:\n break\n except Exception as e:\n arg_0._logger.info(\"Failure handling message loop\")\n arg_0._logger.exception(e)\n\n arg_0._connection.shutdown(socket.SHUT_RDWR)\n arg_0._connection.close()\n arg_0._logger.info(\"Stopping session: {0}\".format(arg_0.name))"} +{"_id": "doc_5988", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the Rekey response payload and decode it into\n its constituent parts.\n\n Args:\n input_stream (stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n ValueError: Raised if the unique identifier attribute is missing\n from the encoded payload.\n \"\"\"\n super(RekeyResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n if arg_0.is_tag_next(arg_3.Tags.UNIQUE_IDENTIFIER, arg_6):\n arg_0._unique_identifier = primitives.TextString(\n tag=arg_3.Tags.UNIQUE_IDENTIFIER\n )\n arg_0._unique_identifier.Func(\n arg_6,\n arg_2=arg_2\n )\n else:\n raise ValueError(\n \"The Rekey response payload encoding is missing the unique \"\n \"identifier.\"\n )\n\n if arg_0.is_tag_next(arg_3.Tags.TEMPLATE_ATTRIBUTE, arg_6):\n arg_0._template_attribute = objects.TemplateAttribute()\n arg_0._template_attribute.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_5989", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check if a profile is supported by the client.\n\n Args:\n conformance_clause (ConformanceClause):\n authentication_suite (AuthenticationSuite):\n\n Returns:\n bool: True if the profile is supported, False otherwise.\n\n Example:\n >>> client.Func(\n ... ConformanceClause.DISCOVER_VERSIONS,\n ... AuthenticationSuite.BASIC)\n True\n \"\"\"\n return (arg_0.is_conformance_clause_supported(arg_1) and\n arg_0.is_authentication_suite_supported(arg_2))"} +{"_id": "doc_5990", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6=None):\n \"\"\"\n Derive a new key or secret data from an existing managed object.\n\n Args:\n object_type (ObjectType): An ObjectType enumeration specifying\n what type of object to create. Required.\n unique_identifiers (list): A list of strings specifying the unique\n IDs of the existing managed objects to use for key derivation.\n Required.\n derivation_method (DerivationMethod): A DerivationMethod\n enumeration specifying what key derivation method to use.\n Required.\n derivation_parameters (DerivationParameters): A\n DerivationParameters struct containing the settings and\n options to use for key derivation.\n template_attribute (TemplateAttribute): A TemplateAttribute struct\n containing the attributes to set on the newly derived object.\n credential (Credential): A Credential struct containing a set of\n authorization parameters for the operation. Optional, defaults\n to None.\n\n Returns:\n dict: The results of the derivation operation, containing the\n following key/value pairs:\n\n Key | Value\n ---------------------|-----------------------------------------\n 'unique_identifier' | (string) The unique ID of the newly\n | derived object.\n 'template_attribute' | (TemplateAttribute) A struct containing\n | any attributes set on the newly derived\n | object.\n 'result_status' | (ResultStatus) An enumeration indicating\n | the status of the operation result.\n 'result_reason' | (ResultReason) An enumeration providing\n | context for the result status.\n 'result_message' | (string) A message providing additional\n | context for the operation result.\n \"\"\"\n arg_7 = Operation(OperationEnum.DERIVE_KEY)\n arg_8 = payloads.DeriveKeyRequestPayload(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )\n arg_9 = messages.RequestBatchItem(\n arg_7=arg_7,\n arg_8=arg_8\n )\n\n arg_10 = arg_0._build_request_message(arg_6, [arg_9])\n arg_11 = arg_0._send_and_receive_message(arg_10)\n arg_9 = arg_11.batch_items[0]\n arg_12 = arg_9.response_payload\n\n arg_13 = {}\n\n if arg_12:\n arg_13['unique_identifier'] = arg_12.unique_identifier\n arg_13['template_attribute'] = arg_12.template_attribute\n\n arg_13['result_status'] = arg_9.result_status.value\n try:\n arg_13['result_reason'] = arg_9.result_reason.value\n except Exception:\n arg_13['result_reason'] = arg_9.result_reason\n try:\n arg_13['result_message'] = arg_9.result_message.value\n except Exception:\n arg_13['result_message'] = arg_9.result_message\n\n return arg_13"} +{"_id": "doc_5991", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Send a GetAttributes request to the server.\n\n Args:\n uuid (string): The ID of the managed object with which the\n retrieved attributes should be associated. Optional, defaults\n to None.\n attribute_names (list): A list of AttributeName values indicating\n what object attributes the client wants from the server.\n Optional, defaults to None.\n\n Returns:\n result (GetAttributesResult): A structure containing the results\n of the operation.\n \"\"\"\n arg_3 = arg_0._build_Func_batch_item(\n arg_1,\n arg_2\n )\n\n arg_4 = arg_0._build_request_message(None, [arg_3])\n arg_5 = arg_0._send_and_receive_message(arg_4)\n arg_6 = arg_0._process_batch_items(arg_5)\n return arg_6[0]"} +{"_id": "doc_5992", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Send a GetAttributeList request to the server.\n\n Args:\n uid (string): The ID of the managed object with which the retrieved\n attribute names should be associated.\n\n Returns:\n result (GetAttributeListResult): A structure containing the results\n of the operation.\n \"\"\"\n arg_2 = arg_0._build_Func_batch_item(arg_1)\n\n arg_3 = arg_0._build_request_message(None, [arg_2])\n arg_4 = arg_0._send_and_receive_message(arg_3)\n arg_5 = arg_0._process_batch_items(arg_4)\n return arg_5[0]"} +{"_id": "doc_5993", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=None):\n \"\"\"\n Send a Query request to the server.\n\n Args:\n batch (boolean): A flag indicating if the operation should be sent\n with a batch of additional operations. Defaults to False.\n Func_functions (list): A list of QueryFunction enumerations\n indicating what information the client wants from the server.\n Optional, defaults to None.\n credential (Credential): A Credential object containing\n authentication information for the server. Optional, defaults\n to None.\n \"\"\"\n arg_4 = arg_0._build_Func_batch_item(arg_2)\n\n # TODO (peter-hamilton): Replace this with official client batch mode.\n if arg_1:\n arg_0.batch_items.append(arg_4)\n else:\n arg_5 = arg_0._build_request_message(arg_3, [arg_4])\n arg_6 = arg_0._send_and_receive_message(arg_5)\n arg_7 = arg_0._process_batch_items(arg_6)\n return arg_7[0]"} +{"_id": "doc_5994", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Open the client connection.\n\n Raises:\n ClientConnectionFailure: if the client connection is already Func\n Exception: if an error occurs while trying to Func the connection\n \"\"\"\n if arg_0._is_Func:\n raise exceptions.ClientConnectionFailure(\n \"client connection already Func\")\n else:\n try:\n arg_0.proxy.Func()\n arg_0._is_Func = True\n except Exception as e:\n arg_0.logger.error(\"could not Func client connection: %s\", e)\n raise"} +{"_id": "doc_5995", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Close the client connection.\n\n Raises:\n Exception: if an error occurs while trying to Func the connection\n \"\"\"\n if not arg_0._is_open:\n return\n else:\n try:\n arg_0.proxy.Func()\n arg_0._is_open = False\n except Exception as e:\n arg_0.logger.error(\"could not Func client connection: %s\", e)\n raise"} +{"_id": "doc_5996", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None,\n arg_5=None):\n \"\"\"\n Create a symmetric key on a KMIP appliance.\n\n Args:\n algorithm (CryptographicAlgorithm): An enumeration defining the\n algorithm to use to generate the symmetric key.\n length (int): The length in bits for the symmetric key.\n operation_policy_name (string): The name of the operation policy\n to use for the new symmetric key. Optional, defaults to None\n name (string): The name to give the key. Optional, defaults to None\n cryptographic_usage_mask (list): list of enumerations of crypto\n usage mask passing to the symmetric key. Optional, defaults to\n None\n\n Returns:\n string: The uid of the newly Funcd symmetric key.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input arguments are invalid\n \"\"\"\n # Check inputs\n if not isinstance(arg_1, enums.CryptographicAlgorithm):\n raise TypeError(\n \"algorithm must be a CryptographicAlgorithm enumeration\")\n elif not isinstance(arg_2, six.integer_types) or arg_2 <= 0:\n raise TypeError(\"length must be a positive integer\")\n if arg_5 is not None:\n if not isinstance(arg_5, list) or \\\n all(isinstance(arg_6, enums.CryptographicUsageMask)\n for arg_6 in arg_5) is False:\n raise TypeError(\n \"cryptographic_usage_mask must be a list of \"\n \"CryptographicUsageMask enumerations\")\n\n # Create the template containing the attributes\n arg_7 = arg_0._build_common_attributes(\n arg_3\n )\n arg_8 = arg_0._build_key_attributes(\n arg_1, arg_2, arg_5)\n arg_8.extend(arg_7)\n\n if arg_4:\n arg_8.extend(arg_0._build_name_attribute(arg_4))\n\n arg_9 = cobjects.TemplateAttribute(attributes=arg_8)\n\n # Create the symmetric key and handle the results\n arg_10 = arg_0.proxy.Func(enums.ObjectType.SYMMETRIC_KEY, arg_9)\n\n arg_11 = arg_10.result_status.value\n if arg_11 == enums.ResultStatus.SUCCESS:\n return arg_10.uuid\n else:\n arg_12 = arg_10.result_reason.value\n arg_13 = arg_10.result_message.value\n raise exceptions.KmipOperationFailure(arg_11, arg_12, arg_13)"} +{"_id": "doc_5997", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None):\n \"\"\"\n Create an asymmetric key pair on a KMIP appliance.\n\n Args:\n algorithm (CryptographicAlgorithm): An enumeration defining the\n algorithm to use to generate the key pair.\n length (int): The length in bits for the key pair.\n operation_policy_name (string): The name of the operation policy\n to use for the new key pair. Optional, defaults to None.\n public_name (string): The name to give the public key. Optional,\n defaults to None.\n public_usage_mask (list): A list of CryptographicUsageMask\n enumerations indicating how the public key should be used.\n Optional, defaults to None.\n private_name (string): The name to give the public key. Optional,\n defaults to None.\n private_usage_mask (list): A list of CryptographicUsageMask\n enumerations indicating how the private key should be used.\n Optional, defaults to None.\n\n Returns:\n string: The uid of the newly created public key.\n string: The uid of the newly created private key.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input arguments are invalid\n \"\"\"\n # Check inputs\n if not isinstance(arg_1, enums.CryptographicAlgorithm):\n raise TypeError(\n \"algorithm must be a CryptographicAlgorithm enumeration\")\n elif not isinstance(arg_2, six.integer_types) or arg_2 <= 0:\n raise TypeError(\"length must be a positive integer\")\n\n # Create the common attributes that are shared\n arg_8 = arg_0._build_common_attributes(\n arg_3\n )\n\n arg_9 = arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,\n arg_1\n )\n arg_10 = arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_LENGTH,\n arg_2\n )\n\n arg_8.extend([arg_9, arg_10])\n arg_11 = cobjects.TemplateAttribute(\n attributes=arg_8,\n tag=enums.Tags.COMMON_TEMPLATE_ATTRIBUTE\n )\n\n # Create public / private specific attributes\n arg_12 = None\n arg_13 = None\n if arg_4:\n arg_13 = arg_0._build_name_attribute(name=arg_4)\n arg_14 = []\n if arg_5:\n arg_14 = [\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,\n arg_5\n )\n ]\n if arg_13 or arg_14:\n arg_12 = cobjects.TemplateAttribute(\n arg_13=arg_13,\n attributes=arg_14,\n tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE\n )\n\n arg_15 = None\n arg_13 = None\n if arg_6:\n arg_13 = arg_0._build_name_attribute(name=arg_6)\n arg_14 = []\n if arg_7:\n arg_14 = [\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,\n arg_7\n )\n ]\n if arg_13 or arg_14:\n arg_15 = cobjects.TemplateAttribute(\n arg_13=arg_13,\n attributes=arg_14,\n tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE\n )\n\n # Create the asymmetric key pair and handle the results\n arg_16 = arg_0.proxy.Func(\n common_template_attribute=arg_11,\n private_key_template_attribute=arg_15,\n public_key_template_attribute=arg_12)\n\n arg_17 = arg_16.result_status.value\n if arg_17 == enums.ResultStatus.SUCCESS:\n arg_18 = arg_16.public_key_uuid\n arg_19 = arg_16.private_key_uuid\n return arg_18, arg_19\n else:\n arg_20 = arg_16.result_reason.value\n arg_21 = arg_16.result_message.value\n raise exceptions.KmipOperationFailure(arg_17, arg_20, arg_21)"} +{"_id": "doc_5998", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Register a managed object with a KMIP appliance.\n\n Args:\n managed_object (ManagedObject): A managed object to Func. An\n instantiatable subclass of ManagedObject from the Pie API.\n\n Returns:\n string: The uid of the newly Funced managed object.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input argument is invalid\n \"\"\"\n # Check input\n if not isinstance(arg_1, pobjects.ManagedObject):\n raise TypeError(\"managed object must be a Pie ManagedObject\")\n\n # Extract and create attributes\n arg_2 = list()\n\n if hasattr(arg_1, 'cryptographic_usage_masks'):\n if arg_1.cryptographic_usage_masks is not None:\n arg_3 = arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,\n arg_1.cryptographic_usage_masks\n )\n arg_2.append(arg_3)\n if hasattr(arg_1, 'operation_policy_name'):\n if arg_1.operation_policy_name is not None:\n arg_4 = arg_0.attribute_factory.create_attribute(\n enums.AttributeType.OPERATION_POLICY_NAME,\n arg_1.operation_policy_name\n )\n arg_2.append(arg_4)\n if hasattr(arg_1, 'names'):\n if arg_1.names:\n for arg_5 in arg_1.names:\n arg_6 = arg_0.attribute_factory.create_attribute(\n enums.AttributeType.NAME,\n arg_5\n )\n arg_2.append(arg_6)\n\n arg_7 = cobjects.TemplateAttribute(attributes=arg_2)\n arg_8 = arg_1.object_type\n\n # Register the managed object and handle the results\n arg_9 = arg_0.object_factory.convert(arg_1)\n arg_10 = arg_0.proxy.Func(arg_8, arg_7, arg_9)\n\n arg_11 = arg_10.result_status.value\n if arg_11 == enums.ResultStatus.SUCCESS:\n return arg_10.uuid\n else:\n arg_12 = arg_10.result_reason.value\n arg_13 = arg_10.result_message.value\n raise exceptions.KmipOperationFailure(arg_11, arg_12, arg_13)"} +{"_id": "doc_5999", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n **arg_3):\n \"\"\"\n Rekey an existing key.\n\n Args:\n uid (string): The unique ID of the symmetric key to Func.\n Optional, defaults to None.\n offset (int): The time delta, in seconds, between the new key's\n initialization date and activation date. Optional, defaults\n to None.\n **kwargs (various): A placeholder for object attributes that\n should be set on the newly Funced key. Currently\n supported attributes include:\n activation_date (int)\n process_start_date (int)\n protect_stop_date (int)\n deactivation_date (int)\n\n Returns:\n string: The unique ID of the newly Funced key.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input arguments are invalid\n \"\"\"\n if arg_1 is not None:\n if not isinstance(arg_1, six.string_types):\n raise TypeError(\"The unique identifier must be a string.\")\n if arg_2 is not None:\n if not isinstance(arg_2, six.integer_types):\n raise TypeError(\"The offset must be an integer.\")\n\n # TODO (peter-hamilton) Unify attribute handling across operations\n arg_4 = []\n if arg_3.get('activation_date'):\n arg_4.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.ACTIVATION_DATE,\n arg_3.get('activation_date')\n )\n )\n if arg_3.get('process_start_date'):\n arg_4.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.PROCESS_START_DATE,\n arg_3.get('process_start_date')\n )\n )\n if arg_3.get('protect_stop_date'):\n arg_4.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.PROTECT_STOP_DATE,\n arg_3.get('protect_stop_date')\n )\n )\n if arg_3.get('deactivation_date'):\n arg_4.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.DEACTIVATION_DATE,\n arg_3.get('deactivation_date')\n )\n )\n arg_5 = cobjects.TemplateAttribute(\n arg_4=arg_4\n )\n\n # Derive the new key/data and handle the results\n arg_6 = arg_0.proxy.Func(\n uuid=arg_1,\n arg_2=arg_2,\n arg_5=arg_5\n )\n\n arg_7 = arg_6.get('result_status')\n if arg_7 == enums.ResultStatus.SUCCESS:\n return arg_6.get('unique_identifier')\n else:\n raise exceptions.KmipOperationFailure(\n arg_7,\n arg_6.get('result_reason'),\n arg_6.get('result_message')\n )"} +{"_id": "doc_6000", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n **arg_5):\n \"\"\"\n Derive a new key or secret data from existing managed objects.\n\n Args:\n object_type (ObjectType): An ObjectType enumeration specifying\n what type of object to derive. Only SymmetricKeys and\n SecretData can be specified. Required.\n unique_identifiers (list): A list of strings specifying the\n unique IDs of the existing managed objects to use for\n derivation. Multiple objects can be specified to fit the\n requirements of the given derivation method. Required.\n derivation_method (DerivationMethod): A DerivationMethod\n enumeration specifying how key derivation should be done.\n Required.\n derivation_parameters (dict): A dictionary containing various\n settings for the key derivation process. See Note below.\n Required.\n **kwargs (various): A placeholder for object attributes that\n should be set on the newly derived object. Currently\n supported attributes include:\n cryptographic_algorithm (enums.CryptographicAlgorithm)\n cryptographic_length (int)\n\n Returns:\n string: The unique ID of the newly derived object.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input arguments are invalid\n\n Notes:\n The derivation_parameters argument is a dictionary that can\n contain the following key/value pairs:\n\n Key | Value\n ---------------------------|---------------------------------------\n 'cryptographic_parameters' | A dictionary containing additional\n | cryptographic settings. See the\n | decrypt method for more information.\n 'initialization_vector' | Bytes to be used to initialize the key\n | derivation function, if needed.\n 'derivation_data' | Bytes to be used as the basis for the\n | key derivation process (e.g., the\n | bytes to be encrypted, hashed, etc).\n 'salt' | Bytes to used as a salt value for the\n | key derivation function, if needed.\n | Usually used with PBKDF2.\n 'iteration_count' | An integer defining how many\n | iterations should be used with the key\n | derivation function, if needed.\n | Usually used with PBKDF2.\n \"\"\"\n # Check input\n if not isinstance(arg_1, enums.ObjectType):\n raise TypeError(\"Object type must be an ObjectType enumeration.\")\n if not isinstance(arg_2, list):\n raise TypeError(\"Unique identifiers must be a list of strings.\")\n else:\n for arg_6 in arg_2:\n if not isinstance(arg_6, six.string_types):\n raise TypeError(\n \"Unique identifiers must be a list of strings.\"\n )\n if not isinstance(arg_3, enums.DerivationMethod):\n raise TypeError(\n \"Derivation method must be a DerivationMethod enumeration.\"\n )\n if not isinstance(arg_4, dict):\n raise TypeError(\"Derivation parameters must be a dictionary.\")\n\n arg_4 = DerivationParameters(\n cryptographic_parameters=arg_0._build_cryptographic_parameters(\n arg_4.get('cryptographic_parameters')\n ),\n initialization_vector=arg_4.get(\n 'initialization_vector'\n ),\n derivation_data=arg_4.get('derivation_data'),\n salt=arg_4.get('salt'),\n iteration_count=arg_4.get('iteration_count')\n )\n\n # Handle object attributes\n arg_7 = []\n if arg_5.get('cryptographic_length'):\n arg_7.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_LENGTH,\n arg_5.get('cryptographic_length')\n )\n )\n if arg_5.get('cryptographic_algorithm'):\n arg_7.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,\n arg_5.get('cryptographic_algorithm')\n )\n )\n if arg_5.get('cryptographic_usage_mask'):\n arg_7.append(\n arg_0.attribute_factory.create_attribute(\n enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,\n arg_5.get('cryptographic_usage_mask')\n )\n )\n arg_8 = cobjects.TemplateAttribute(\n arg_7=arg_7\n )\n\n # Derive the new key/data and handle the results\n arg_9 = arg_0.proxy.Func(\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_8\n )\n\n arg_10 = arg_9.get('result_status')\n if arg_10 == enums.ResultStatus.SUCCESS:\n return arg_9.get('unique_identifier')\n else:\n raise exceptions.KmipOperationFailure(\n arg_10,\n arg_9.get('result_reason'),\n arg_9.get('result_message')\n )"} +{"_id": "doc_6001", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"\n Check the constraints for a managed object.\n\n Args:\n uid (string): The unique ID of the managed object to Func.\n Optional, defaults to None.\n usage_limits_count (int): The number of items that can be secured\n with the specified managed object. Optional, defaults to None.\n cryptographic_usage_mask (list): A list of CryptographicUsageMask\n enumerations specifying the operations possible with the\n specified managed object. Optional, defaults to None.\n lease_time (int): The number of seconds that can be leased for the\n specified managed object. Optional, defaults to None.\n \"\"\"\n if arg_1 is not None:\n if not isinstance(arg_1, six.string_types):\n raise TypeError(\"The unique identifier must be a string.\")\n if arg_2 is not None:\n if not isinstance(arg_2, six.integer_types):\n raise TypeError(\"The usage limits count must be an integer.\")\n if arg_3 is not None:\n if not isinstance(arg_3, list) or \\\n not all(isinstance(\n arg_5,\n enums.CryptographicUsageMask\n ) for arg_5 in arg_3):\n raise TypeError(\n \"The cryptographic usage mask must be a list of \"\n \"CryptographicUsageMask enumerations.\"\n )\n if arg_4 is not None:\n if not isinstance(arg_4, six.integer_types):\n raise TypeError(\"The lease time must be an integer.\")\n\n arg_6 = arg_0.proxy.Func(\n arg_1,\n arg_2,\n arg_3,\n arg_4\n )\n\n arg_7 = arg_6.get('result_status')\n if arg_7 == enums.ResultStatus.SUCCESS:\n return arg_6.get('unique_identifier')\n else:\n raise exceptions.KmipOperationFailure(\n arg_7,\n arg_6.get('result_reason'),\n arg_6.get('result_message')\n )"} +{"_id": "doc_6002", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Get a managed object from a KMIP appliance.\n\n Args:\n uid (string): The unique ID of the managed object to retrieve.\n key_wrapping_specification (dict): A dictionary containing various\n settings to be used when wrapping the key during retrieval.\n See Note below. Optional, defaults to None.\n\n Returns:\n ManagedObject: The retrieved managed object object.\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input argument is invalid\n\n Notes:\n The derivation_parameters argument is a dictionary that can\n contain the following key/value pairs:\n\n Key | Value\n --------------------------------|---------------------------------\n 'wrapping_method' | A WrappingMethod enumeration\n | that specifies how the object\n | should be wrapped.\n 'encryption_key_information' | A dictionary containing the ID\n | of the wrapping key and\n | associated cryptographic\n | parameters.\n 'mac_signature_key_information' | A dictionary containing the ID\n | of the wrapping key and\n | associated cryptographic\n | parameters.\n 'attribute_names' | A list of strings representing\n | the names of attributes that\n | should be included with the\n | wrapped object.\n 'encoding_option' | An EncodingOption enumeration\n | that specifies the encoding of\n | the object before it is wrapped.\n \"\"\"\n # Check input\n if arg_1 is not None:\n if not isinstance(arg_1, six.string_types):\n raise TypeError(\"uid must be a string\")\n if arg_2 is not None:\n if not isinstance(arg_2, dict):\n raise TypeError(\n \"Key wrapping specification must be a dictionary.\"\n )\n\n arg_3 = arg_0._build_key_wrapping_specification(\n arg_2\n )\n\n # Get the managed object and handle the results\n arg_4 = arg_0.proxy.Func(arg_1, arg_2=arg_3)\n\n arg_5 = arg_4.result_status.value\n if arg_5 == enums.ResultStatus.SUCCESS:\n arg_6 = arg_0.object_factory.convert(arg_4.secret)\n return arg_6\n else:\n arg_7 = arg_4.result_reason.value\n arg_8 = arg_4.result_message.value\n raise exceptions.KmipOperationFailure(arg_5, arg_7, arg_8)"} +{"_id": "doc_6003", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Get the attributes associated with a managed object.\n\n If the uid is not specified, the appliance will use the ID placeholder\n by default.\n\n If the attribute_names list is not specified, the appliance will\n return all viable attributes for the managed object.\n\n Args:\n uid (string): The unique ID of the managed object with which the\n retrieved attributes should be associated. Optional, defaults\n to None.\n attribute_names (list): A list of string attribute names\n indicating which attributes should be retrieved. Optional,\n defaults to None.\n \"\"\"\n # Check input\n if arg_1 is not None:\n if not isinstance(arg_1, six.string_types):\n raise TypeError(\"uid must be a string\")\n if arg_2 is not None:\n if not isinstance(arg_2, list):\n raise TypeError(\"attribute_names must be a list of strings\")\n else:\n for arg_3 in arg_2:\n if not isinstance(arg_3, six.string_types):\n raise TypeError(\n \"attribute_names must be a list of strings\"\n )\n\n # Get the list of attributes for a managed object\n arg_4 = arg_0.proxy.Func(arg_1, arg_2)\n\n arg_5 = arg_4.result_status.value\n if arg_5 == enums.ResultStatus.SUCCESS:\n return arg_4.uuid, arg_4.attributes\n else:\n arg_6 = arg_4.result_reason.value\n arg_7 = arg_4.result_message.value\n raise exceptions.KmipOperationFailure(arg_5, arg_6, arg_7)"} +{"_id": "doc_6004", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None):\n \"\"\"\n Revoke a managed object stored by a KMIP appliance.\n\n Args:\n revocation_reason (RevocationReasonCode): An enumeration indicating\n the revocation reason.\n uid (string): The unique ID of the managed object to Func.\n Optional, defaults to None.\n revocation_message (string): A message regarding the revocation.\n Optional, defaults to None.\n compromise_occurrence_date (int): An integer, the number of seconds\n since the epoch, which will be converted to the Datetime when\n the managed object was first believed to be compromised.\n Optional, defaults to None.\n\n Returns:\n None\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input argument is invalid\n \"\"\"\n # Check input\n if not isinstance(arg_1, enums.RevocationReasonCode):\n raise TypeError(\n \"revocation_reason must be a RevocationReasonCode enumeration\")\n if arg_2 is not None:\n if not isinstance(arg_2, six.string_types):\n raise TypeError(\"uid must be a string\")\n if arg_3 is not None:\n if not isinstance(arg_3, six.string_types):\n raise TypeError(\"revocation_message must be a string\")\n if arg_4 is not None:\n if not isinstance(arg_4, six.integer_types):\n raise TypeError(\n \"compromise_occurrence_date must be an integer\")\n arg_4 = primitives.DateTime(\n arg_4,\n enums.Tags.COMPROMISE_OCCURRENCE_DATE)\n\n # Func the managed object and handle the results\n arg_5 = arg_0.proxy.Func(arg_1, arg_2, arg_3,\n arg_4)\n\n arg_6 = arg_5.result_status.value\n if arg_6 == enums.ResultStatus.SUCCESS:\n return\n else:\n arg_7 = arg_5.result_reason.value\n arg_8 = arg_5.result_message.value\n raise exceptions.KmipOperationFailure(arg_6, arg_7, arg_8)"} +{"_id": "doc_6005", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Get the message authentication code for data.\n\n Args:\n data (string): The data to be MACed.\n uid (string): The unique ID of the managed object that is the key\n to use for the MAC operation.\n algorithm (CryptographicAlgorithm): An enumeration defining the\n algorithm to use to generate the MAC.\n\n Returns:\n string: The unique ID of the managed object that is the key\n to use for the MAC operation.\n string: The data MACed\n\n Raises:\n ClientConnectionNotOpen: if the client connection is unusable\n KmipOperationFailure: if the operation result is a failure\n TypeError: if the input arguments are invalid\n \"\"\"\n # Check inputs\n if not isinstance(arg_1, six.binary_type):\n raise TypeError(\"data must be bytes\")\n if arg_2 is not None:\n if not isinstance(arg_2, six.string_types):\n raise TypeError(\"uid must be a string\")\n if arg_3 is not None:\n if not isinstance(arg_3, enums.CryptographicAlgorithm):\n raise TypeError(\n \"algorithm must be a CryptographicAlgorithm enumeration\")\n\n arg_4 = arg_0._build_cryptographic_parameters(\n {'cryptographic_algorithm': arg_3}\n )\n\n # Get the message authentication code and handle the results\n arg_5 = arg_0.proxy.Func(arg_1, arg_2, arg_4)\n\n arg_6 = arg_5.result_status.value\n if arg_6 == enums.ResultStatus.SUCCESS:\n arg_2 = arg_5.uuid.value\n arg_7 = arg_5.Func_data.value\n return arg_2, arg_7\n else:\n arg_8 = arg_5.result_reason.value\n arg_9 = arg_5.result_message.value\n raise exceptions.KmipOperationFailure(arg_6, arg_8, arg_9)"} +{"_id": "doc_6006", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build a CryptographicParameters struct from a dictionary.\n\n Args:\n value (dict): A dictionary containing the key/value pairs for a\n CryptographicParameters struct.\n\n Returns:\n None: if value is None\n CryptographicParameters: a CryptographicParameters struct\n\n Raises:\n TypeError: if the input argument is invalid\n \"\"\"\n if arg_1 is None:\n return None\n elif not isinstance(arg_1, dict):\n raise TypeError(\"Cryptographic parameters must be a dictionary.\")\n\n arg_2 = CryptographicParameters(\n block_cipher_mode=arg_1.get('block_cipher_mode'),\n padding_method=arg_1.get('padding_method'),\n hashing_algorithm=arg_1.get('hashing_algorithm'),\n key_role_type=arg_1.get('key_role_type'),\n digital_signature_algorithm=arg_1.get(\n 'digital_signature_algorithm'\n ),\n cryptographic_algorithm=arg_1.get('cryptographic_algorithm'),\n random_iv=arg_1.get('random_iv'),\n iv_length=arg_1.get('iv_length'),\n tag_length=arg_1.get('tag_length'),\n fixed_field_length=arg_1.get('fixed_field_length'),\n invocation_field_length=arg_1.get('invocation_field_length'),\n counter_length=arg_1.get('counter_length'),\n initial_counter_value=arg_1.get('initial_counter_value')\n )\n return arg_2"} +{"_id": "doc_6007", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build an EncryptionKeyInformation struct from a dictionary.\n\n Args:\n value (dict): A dictionary containing the key/value pairs for a\n EncryptionKeyInformation struct.\n\n Returns:\n EncryptionKeyInformation: an EncryptionKeyInformation struct\n\n Raises:\n TypeError: if the input argument is invalid\n \"\"\"\n if arg_1 is None:\n return None\n if not isinstance(arg_1, dict):\n raise TypeError(\"Encryption key information must be a dictionary.\")\n\n arg_2 = arg_1.get('cryptographic_parameters')\n if arg_2:\n arg_2 = arg_0._build_cryptographic_parameters(\n arg_2\n )\n arg_3 = cobjects.EncryptionKeyInformation(\n unique_identifier=arg_1.get('unique_identifier'),\n arg_2=arg_2\n )\n return arg_3"} +{"_id": "doc_6008", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build an MACSignatureKeyInformation struct from a dictionary.\n\n Args:\n value (dict): A dictionary containing the key/value pairs for a\n MACSignatureKeyInformation struct.\n\n Returns:\n MACSignatureInformation: a MACSignatureKeyInformation struct\n\n Raises:\n TypeError: if the input argument is invalid\n \"\"\"\n if arg_1 is None:\n return None\n if not isinstance(arg_1, dict):\n raise TypeError(\n \"MAC/signature key information must be a dictionary.\"\n )\n\n arg_2 = arg_1.get('cryptographic_parameters')\n if arg_2:\n arg_2 = arg_0._build_cryptographic_parameters(\n arg_2\n )\n arg_3 = cobjects.MACSignatureKeyInformation(\n unique_identifier=arg_1.get('unique_identifier'),\n arg_2=arg_2\n )\n return arg_3"} +{"_id": "doc_6009", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build a KeyWrappingSpecification struct from a dictionary.\n\n Args:\n value (dict): A dictionary containing the key/value pairs for a\n KeyWrappingSpecification struct.\n\n Returns:\n KeyWrappingSpecification: a KeyWrappingSpecification struct\n\n Raises:\n TypeError: if the input argument is invalid\n \"\"\"\n if arg_1 is None:\n return None\n if not isinstance(arg_1, dict):\n raise TypeError(\"Key wrapping specification must be a dictionary.\")\n\n arg_2 = arg_0._build_encryption_key_information(\n arg_1.get('encryption_key_information')\n )\n arg_3 = arg_0._build_mac_signature_key_information(\n arg_1.get('mac_signature_key_information')\n )\n\n arg_4 = cobjects.KeyWrappingSpecification(\n wrapping_method=arg_1.get('wrapping_method'),\n encryption_key_information=arg_2,\n mac_signature_key_information=arg_3,\n attribute_names=arg_1.get('attribute_names'),\n encoding_option=arg_1.get('encoding_option')\n )\n return arg_4"} +{"_id": "doc_6010", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n Build a name attribute, returned in a list for ease\n of use in the caller\n '''\n arg_2 = []\n if arg_1:\n arg_2.append(arg_0.attribute_factory.create_attribute(\n enums.AttributeType.NAME,\n arg_1)\n )\n return arg_2"} +{"_id": "doc_6011", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Read the data encoding the QueryRequestPayload object and decode it\n into its constituent parts.\n\n Args:\n input_buffer (Stream): A data stream containing encoded object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be decoded. Optional,\n defaults to KMIP 1.0.\n\n Raises:\n InvalidKmipEncoding: Raised if the query functions are missing\n from the encoded payload.\n \"\"\"\n super(QueryRequestPayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_6 = utils.BytearrayStream(arg_1.Func(arg_0.length))\n\n arg_7 = []\n while(arg_0.is_tag_next(arg_3.Tags.QUERY_FUNCTION, arg_6)):\n arg_8 = primitives.Enumeration(\n arg_3.QueryFunction,\n tag=arg_3.Tags.QUERY_FUNCTION\n )\n arg_8.Func(arg_6, arg_2=arg_2)\n arg_7.append(arg_8)\n\n if arg_7:\n arg_0._query_functions = arg_7\n else:\n raise exceptions.InvalidKmipEncoding(\n \"The Query request payload encoding is missing the query \"\n \"functions.\"\n )\n\n arg_0.is_oversized(arg_6)"} +{"_id": "doc_6012", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.KMIPVersion.KMIP_1_0):\n \"\"\"\n Write the data encoding the QueryResponsePayload object to a stream.\n\n Args:\n output_buffer (Stream): A data stream in which to encode object\n data, supporting a Func method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.\n \"\"\"\n arg_6 = utils.BytearrayStream()\n\n if arg_0._operations:\n for arg_7 in arg_0._operations:\n arg_7.Func(arg_6, arg_2=arg_2)\n\n if arg_0._object_types:\n for arg_8 in arg_0._object_types:\n arg_8.Func(arg_6, arg_2=arg_2)\n\n if arg_0._vendor_identification:\n arg_0._vendor_identification.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._server_information:\n arg_0._server_information.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_0._application_namespaces:\n for arg_9 in arg_0._application_namespaces:\n arg_9.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 >= arg_3.KMIPVersion.KMIP_1_1:\n if arg_0._extension_information:\n for arg_10 in arg_0._extension_information:\n arg_10.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 >= arg_3.KMIPVersion.KMIP_1_2:\n if arg_0._attestation_types:\n for arg_11 in arg_0._attestation_types:\n arg_11.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 >= arg_3.KMIPVersion.KMIP_1_3:\n if arg_0._rng_parameters:\n for arg_12 in arg_0._rng_parameters:\n arg_12.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._profile_information:\n for arg_13 in arg_0._profile_information:\n arg_13.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._validation_information:\n for arg_14 in arg_0._validation_information:\n arg_14.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._capability_information:\n for arg_15 in arg_0._capability_information:\n arg_15.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._client_registration_methods:\n for arg_16 in arg_0._client_registration_methods:\n arg_16.Func(\n arg_6,\n arg_2=arg_2\n )\n\n if arg_2 >= arg_3.KMIPVersion.KMIP_2_0:\n if arg_0._defaults_information:\n arg_0._defaults_information.Func(\n arg_6,\n arg_2=arg_2\n )\n if arg_0._storage_protection_masks:\n for arg_17 in arg_0._storage_protection_masks:\n arg_17.Func(\n arg_6,\n arg_2=arg_2\n )\n\n arg_0.length = arg_6.length()\n super(QueryResponsePayload, arg_0).Func(\n arg_1,\n arg_2=arg_2\n )\n arg_1.Func(arg_6.buffer)"} +{"_id": "doc_6013", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Find a group of entry points with unique names.\n\n Returns a dictionary of names to :class:`EntryPoint` objects.\n \"\"\"\n arg_2 = {}\n for arg_3 in get_group_all(arg_0, arg_1=arg_1):\n if arg_3.name not in arg_2:\n arg_2[arg_3.name] = arg_3\n return arg_2"} +{"_id": "doc_6014", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Find all entry points in a group.\n\n Returns a list of :class:`EntryPoint` objects.\n \"\"\"\n arg_2 = []\n for arg_3, arg_4 in iter_files_distros(arg_1=arg_1):\n if arg_0 in arg_3:\n for arg_5, arg_6 in arg_3[arg_0].items():\n with BadEntryPoint.err_to_warnings():\n arg_2.append(EntryPoint.from_string(arg_6, arg_5, arg_4))\n\n return arg_2"} +{"_id": "doc_6015", "title": "", "text": "def Func(arg_0):\n \"\"\"Load the object to which this entry point refers.\n \"\"\"\n arg_1 = import_module(arg_0.module_name)\n arg_2 = arg_1\n if arg_0.object_name:\n for arg_3 in arg_0.object_name.split('.'):\n arg_2 = getattr(arg_2, arg_3)\n return arg_2"} +{"_id": "doc_6016", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Parse an entry point from the syntax in entry_points.txt\n\n :param str epstr: The entry point string (not including 'name =')\n :param str name: The name of this entry point\n :param Distribution distro: The distribution in which the entry point was found\n :rtype: EntryPoint\n :raises BadEntryPoint: if *epstr* can't be parsed as an entry point.\n \"\"\"\n arg_4 = entry_point_pattern.match(arg_1)\n if arg_4:\n arg_5, arg_6, arg_7 = arg_4.group('modulename', 'objectname', 'extras')\n if arg_7 is not None:\n arg_7 = re.split(r',\\s*', arg_7)\n return arg_0(arg_2, arg_5, arg_6, arg_7, arg_3)\n else:\n raise BadEntryPoint(arg_1)"} +{"_id": "doc_6017", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Try to return a path to static the static files compatible all\n the way back to Django 1.2. If anyone has a cleaner or better\n way to do this let me know!\n \"\"\"\n\n if VERSION >= (1, 10):\n # Since Django 1.10, forms.Media automatically invoke static\n # lazily on the path if it is relative.\n return arg_0\n try:\n # >= 1.4\n from django.templatetags.static import static\n return static(arg_0)\n except ImportError:\n pass\n try:\n # >= 1.3\n return '%s/%s' % (settings.STATIC_URL.rstrip('/'), arg_0)\n except AttributeError:\n pass\n try:\n return '%s/%s' % (settings.PAGEDOWN_URL.rstrip('/'), arg_0)\n except AttributeError:\n pass\n return '%s/%s' % (settings.MEDIA_URL.rstrip('/'), arg_0)"} +{"_id": "doc_6018", "title": "", "text": "def Func():\n \"\"\"Run Funcreload server\"\"\"\n from Funcreload import Server\n\n arg_0 = Server(app)\n\n map(arg_0.watch, glob2.glob(\"application/pages/**/*.*\")) # pages\n map(arg_0.watch, glob2.glob(\"application/macros/**/*.html\")) # macros\n map(arg_0.watch, glob2.glob(\"application/static/**/*.*\")) # public assets\n\n arg_0.serve(port=PORT)"} +{"_id": "doc_6019", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate controller, include the controller file, template & css & js directories.\"\"\"\n arg_1 = os.path.join(dirname(abspath(__file__)), 'templates/controller.py')\n arg_2 = os.path.join(dirname(abspath(__file__)), 'templates/unittest.py')\n arg_3 = arg_0.get('')\n arg_4 = os.getcwd()\n\n logger.info('Start generating controller.')\n\n if not arg_3:\n logger.warning('Controller name cannot be empty.')\n return\n\n # controller file\n with open(arg_1, 'r') as template_file:\n arg_5 = os.path.join(arg_4, 'application/controllers',\n arg_3 + '.py')\n with open(arg_5, 'w+') as controller_file:\n for arg_6 in template_file:\n arg_7 = arg_6.replace('#{controller}', arg_3)\n controller_file.write(arg_7)\n logger.info(\"New: %s\" % _relative_path(arg_5))\n\n # test file\n with open(arg_2, 'r') as template_file:\n arg_8 = os.path.join(arg_4, 'tests',\n 'test_%s.py' % arg_3)\n with open(arg_8, 'w+') as test_file:\n for arg_6 in template_file:\n arg_7 = arg_6.replace('#{controller}', arg_3) \\\n .replace('#{controller|title}', arg_3.title())\n test_file.write(arg_7)\n logger.info(\"New: %s\" % _relative_path(arg_8))\n\n # assets dir\n arg_9 = os.path.join(arg_4, 'application/pages/%s' % arg_3)\n _mkdir_p(arg_9)\n\n # form file\n _generate_form(arg_3)\n\n logger.info('Finish generating controller.')"} +{"_id": "doc_6020", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate action.\"\"\"\n arg_1 = arg_0.get('')\n arg_2 = arg_0.get('')\n arg_3 = arg_0.get('-t')\n arg_4 = os.getcwd()\n\n logger.info('Start generating action.')\n\n arg_5 = os.path.join(arg_4, 'application/controllers', arg_1 + '.py')\n if not os.path.exists(arg_5):\n logger.warning(\"The controller %s does't exist.\" % arg_1)\n return\n\n if arg_3:\n arg_6 = os.path.join(dirname(abspath(__file__)), 'templates/action.py')\n else:\n arg_6 = os.path.join(dirname(abspath(__file__)), 'templates/action_without_template.py')\n\n # Add action source codes\n with open(arg_6, 'r') as action_source_file:\n with open(arg_5, 'a') as controller_file:\n for arg_7 in action_source_file:\n arg_8 = arg_7.replace('#{controller}', arg_1). \\\n replace('#{action}', arg_2)\n controller_file.write(arg_8)\n logger.info(\"Updated: %s\" % _relative_path(arg_5))\n\n if arg_3:\n # assets dir\n arg_9 = os.path.join(arg_4, 'application/pages/%s/%s' % (arg_1, arg_2))\n _mkdir_p(arg_9)\n\n # html\n arg_10 = os.path.join(dirname(abspath(__file__)), 'templates/action.html')\n arg_11 = os.path.join(arg_9, '%s.html' % arg_2)\n with open(arg_10, 'r') as action_html_template_file:\n with open(arg_11, 'w') as action_html_file:\n for arg_12 in action_html_template_file:\n arg_8 = arg_12.replace('#{action}', arg_2) \\\n .replace('#{action|title}', arg_2.title()) \\\n .replace('#{controller}', arg_1)\n action_html_file.write(arg_8)\n logger.info(\"New: %s\" % _relative_path(arg_11))\n\n # js\n arg_13 = os.path.join(dirname(abspath(__file__)), 'templates/action.js')\n arg_14 = os.path.join(arg_9, '%s.js' % arg_2)\n shutil.copy(arg_13, arg_14)\n logger.info(\"New: %s\" % _relative_path(arg_14))\n\n # less\n arg_15 = os.path.join(dirname(abspath(__file__)), 'templates/action.less')\n arg_16 = os.path.join(arg_9, '%s.less' % arg_2)\n shutil.copy(arg_15, arg_16)\n logger.info(\"New: %s\" % _relative_path(arg_16))\n\n logger.info('Finish generating action.')"} +{"_id": "doc_6021", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate model.\"\"\"\n arg_1 = arg_0.get('')\n if not arg_1:\n logger.warning('Model name cannot be empty.')\n return\n\n logger.info('Start generating model.')\n\n arg_2 = os.path.join(dirname(abspath(__file__)), 'templates/model.py')\n arg_3 = os.getcwd()\n\n with open(arg_2, 'r') as template_file:\n arg_4 = os.path.join(arg_3, 'application/models',\n arg_1 + '.py')\n with open(arg_4, 'w+') as model_file:\n for arg_5 in template_file:\n arg_6 = arg_5.replace('#{model|title}', arg_1.title())\n model_file.write(arg_6)\n logger.info(\"New: %s\" % _relative_path(arg_4))\n\n with open(os.path.join(arg_3, 'application/models/__init__.py'), 'a') as package_file:\n package_file.write('\\nfrom .%s import *' % arg_1)\n\n logger.info('Finish generating model.')"} +{"_id": "doc_6022", "title": "", "text": "def Func(arg_0):\n \"\"\"Genarate macro.\"\"\"\n arg_1 = arg_0.get('').replace('-', '_')\n arg_2 = arg_0.get('')\n\n if not arg_1:\n logger.warning('Macro name cannot be empty.')\n return\n\n logger.info('Start generating macro.')\n\n arg_3 = os.getcwd()\n\n if arg_2:\n arg_4 = os.path.join(arg_3, 'application/macros', arg_2, arg_1)\n else:\n arg_4 = os.path.join(arg_3, 'application/macros', arg_1)\n\n _mkdir_p(arg_4)\n\n arg_5 = os.path.join(arg_4, '_%s.html' % arg_1)\n arg_6 = os.path.join(arg_4, '_%s.less' % arg_1)\n arg_7 = os.path.join(arg_4, '_%s.js' % arg_1)\n\n # html\n arg_8 = os.path.join(dirname(abspath(__file__)), 'templates/macro.html')\n with open(arg_8, 'r') as template_file:\n with open(arg_5, 'w+') as html_file:\n for arg_9 in template_file:\n arg_10 = arg_9.replace('#{macro}', arg_1)\n html_file.write(arg_10)\n logger.info(\"New: %s\" % _relative_path(arg_5))\n\n # css\n open(arg_6, 'a').close()\n logger.info(\"New: %s\" % _relative_path(arg_6))\n\n # js\n open(arg_7, 'a').close()\n logger.info(\"New: %s\" % _relative_path(arg_7))\n\n logger.info('Finish generating macro.')"} +{"_id": "doc_6023", "title": "", "text": "def Func(arg_0):\n \"\"\"mkdir -p path\"\"\"\n try:\n os.makedirs(arg_0)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(arg_0):\n pass\n else:\n raise\n else:\n logger.info(\"New: %s%s\", arg_0, os.path.sep)"} +{"_id": "doc_6024", "title": "", "text": "def Func(arg_0):\n \"\"\"Friendly time gap\"\"\"\n if not arg_0:\n return \"\"\n\n if not isinstance(arg_0, datetime.date):\n return arg_0\n\n arg_1 = datetime.datetime.now()\n arg_2 = arg_1 - arg_0\n\n if arg_0 > arg_1:\n return \"right now\"\n elif arg_2.days > 365:\n return '%d years ago' % (arg_2.days / 365)\n elif arg_2.days > 30:\n return '%d months ago' % (arg_2.days / 30)\n elif arg_2.days > 0:\n return '%d days ago' % arg_2.days\n elif arg_2.seconds > 3600:\n return '%d hours ago' % (arg_2.seconds / 3600)\n elif arg_2.seconds > 60:\n return '%d minutes ago' % (arg_2.seconds / 60)\n else:\n return 'right now'"} +{"_id": "doc_6025", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check url schema.\"\"\"\n arg_2 = arg_1.data.strip()\n if not arg_2:\n return\n arg_3 = urlparse(arg_2)\n if arg_3.scheme == \"\":\n arg_1.data = \"http://%s\" % re.sub(r'^:?/*', '', arg_2)"} +{"_id": "doc_6026", "title": "", "text": "def Func(arg_0):\n \"\"\"JSON decorator.\"\"\"\n\n @functools.wraps(arg_0)\n def wrapper(*arg_1, **arg_2):\n arg_3 = arg_0(*arg_1, **arg_2)\n if isinstance(arg_3, tuple):\n arg_4, arg_5 = arg_3\n else:\n arg_4, arg_5 = 200, arg_3\n return Response(json.dumps(arg_5), status=arg_4, mimetype='application/json')\n\n return wrapper"} +{"_id": "doc_6027", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Absolute url for endpoint.\"\"\"\n arg_2 = current_app.config\n arg_3 = arg_2.get('SITE_DOMAIN')\n arg_4 = url_for(arg_0, **arg_1)\n return join_url(arg_3, arg_4)"} +{"_id": "doc_6028", "title": "", "text": "def Func():\n \"\"\"Get current user.\"\"\"\n if not 'user_id' in session:\n return None\n arg_0 = User.query.filter(User.id == session['user_id']).first()\n if not arg_0:\n signout_user()\n return None\n return arg_0"} +{"_id": "doc_6029", "title": "", "text": "def Func(arg_0):\n \"\"\"Register routes.\"\"\"\n from . import controllers\n from flask.blueprints import Blueprint\n\n for arg_1 in _import_submodules_from_package(controllers):\n arg_2 = getattr(arg_1, 'bp')\n if arg_2 and isinstance(arg_2, Blueprint):\n arg_0.register_blueprint(arg_2)"} +{"_id": "doc_6030", "title": "", "text": "def Func(arg_0):\n \"\"\"Register HTTP error pages.\"\"\"\n\n @arg_0.errorhandler(403)\n def page_403(arg_1):\n return render_template('site/403/403.html'), 403\n\n @arg_0.errorhandler(404)\n def page_404(arg_1):\n return render_template('site/404/404.html'), 404\n\n @arg_0.errorhandler(500)\n def page_500(arg_1):\n return render_template('site/500/500.html'), 500"} +{"_id": "doc_6031", "title": "", "text": "def Func(arg_0):\n \"\"\"Register hooks.\"\"\"\n\n @arg_0.before_request\n def before_request():\n arg_1.user = get_current_user()\n if arg_1.user and arg_1.user.is_admin:\n arg_1._before_request_time = time.time()\n\n @arg_0.after_request\n def after_request(arg_4):\n if hasattr(arg_1, '_before_request_time'):\n arg_5 = time.time() - arg_1._before_request_time\n arg_4.headers['X-Render-Time'] = arg_5 * 1000\n return arg_4"} +{"_id": "doc_6032", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Returns csv data as a pandas Dataframe object\"\"\"\n arg_4 = arg_1\n arg_5 = 0\n if not arg_2:\n arg_5 = None\n\n return pd.read_csv(\n arg_0,\n arg_5=arg_5,\n arg_4=arg_4,\n skipinitialspace=arg_3,\n encoding='utf-8-sig'\n )"} +{"_id": "doc_6033", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None):\n \"\"\"\n Serialize the specified DataFrame and replace the existing dataset.\n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n Data to serialize.\n data_type_id : str, optional\n Format to serialize to.\n If None, the existing format is preserved.\n Supported formats are:\n 'PlainText'\n 'GenericCSV'\n 'GenericTSV'\n 'GenericCSVNoHeader'\n 'GenericTSVNoHeader'\n See the azureml.DataTypeIds class for constants.\n name : str, optional\n Name for the dataset.\n If None, the name of the existing dataset is used.\n description : str, optional\n Description for the dataset.\n If None, the name of the existing dataset is used.\n \"\"\"\n _not_none('dataframe', arg_1)\n\n if arg_2 is None:\n arg_2 = arg_0.data_type_id\n if arg_3 is None:\n arg_3 = arg_0.name\n if arg_4 is None:\n arg_4 = arg_0.description\n\n try:\n arg_5 = BytesIO()\n serialize_dataframe(arg_5, arg_2, arg_1)\n arg_6 = arg_5.getvalue()\n finally:\n arg_5.close()\n\n arg_0._upload_and_refresh(arg_6, arg_2, arg_3, arg_4)"} +{"_id": "doc_6034", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None):\n \"\"\"\n Upload already serialized raw data and replace the existing dataset.\n\n Parameters\n ----------\n raw_data: bytes\n Dataset contents to upload.\n data_type_id : str\n Serialization format of the raw data.\n If None, the format of the existing dataset is used.\n Supported formats are:\n 'PlainText'\n 'GenericCSV'\n 'GenericTSV'\n 'GenericCSVNoHeader'\n 'GenericTSVNoHeader'\n 'ARFF'\n See the azureml.DataTypeIds class for constants.\n name : str, optional\n Name for the dataset.\n If None, the name of the existing dataset is used.\n description : str, optional\n Description for the dataset.\n If None, the name of the existing dataset is used.\n \"\"\"\n _not_none('raw_data', arg_1)\n\n if arg_2 is None:\n arg_2 = arg_0.data_type_id\n if arg_3 is None:\n arg_3 = arg_0.name\n if arg_4 is None:\n arg_4 = arg_0.description\n\n arg_0._upload_and_refresh(arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_6035", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Upload already serialized raw data as a new dataset.\n\n Parameters\n ----------\n raw_data: bytes\n Dataset contents to upload.\n data_type_id : str\n Serialization format of the raw data.\n Supported formats are:\n 'PlainText'\n 'GenericCSV'\n 'GenericTSV'\n 'GenericCSVNoHeader'\n 'GenericTSVNoHeader'\n 'ARFF'\n See the azureml.DataTypeIds class for constants.\n name : str\n Name for the new dataset.\n description : str\n Description for the new dataset.\n\n Returns\n -------\n SourceDataset\n Dataset that was just created.\n Use open(), read_as_binary(), read_as_text() or to_dataframe() on\n the dataset object to get its contents as a stream, bytes, str or\n pandas DataFrame.\n \"\"\"\n _not_none('raw_data', arg_1)\n _not_none_or_empty('data_type_id', arg_2)\n _not_none_or_empty('name', arg_3)\n _not_none_or_empty('description', arg_4)\n\n return arg_0._upload(arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_6036", "title": "", "text": "def Func(arg_0):\n '''Open and return a stream for the dataset contents.'''\n return arg_0.workspace._rest.Func_intermediate_dataset_contents(\n arg_0.workspace.workspace_id,\n arg_0.experiment.experiment_id,\n arg_0.node_id,\n arg_0.port_name\n )"} +{"_id": "doc_6037", "title": "", "text": "def Func(arg_0):\n '''Read and return the dataset contents as text.'''\n return arg_0.workspace._rest.read_intermediate_dataset_contents_text(\n arg_0.workspace.workspace_id,\n arg_0.experiment.experiment_id,\n arg_0.node_id,\n arg_0.port_name\n )"} +{"_id": "doc_6038", "title": "", "text": "def Func(arg_0):\n \"\"\"Read and return the dataset contents as a pandas DataFrame.\"\"\"\n #TODO: figure out why passing in the opened stream directly gives invalid data\n arg_1 = arg_0.read_as_binary()\n arg_2 = BytesIO(arg_1)\n return deserialize_dataframe(arg_2, arg_0.data_type_id)"} +{"_id": "doc_6039", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Get an intermediate dataset.\n\n Parameters\n ----------\n node_id : str\n Module node id from the experiment graph.\n port_name : str\n Output port of the module.\n data_type_id : str\n Serialization format of the raw data.\n See the azureml.DataTypeIds class for constants.\n\n Returns\n -------\n IntermediateDataset\n Dataset object.\n Use open(), read_as_binary(), read_as_text() or to_dataframe() on\n the dataset object to get its contents as a stream, bytes, str or\n pandas DataFrame.\n \"\"\"\n return IntermediateDataset(arg_0.workspace, arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_6040", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\"Runs HTTP GET request to retrieve the list of datasets.\"\"\"\r\n arg_2 = arg_0.DATASOURCES_URI_FMT.format(arg_1)\r\n return arg_0._send_get_req(arg_2)"} +{"_id": "doc_6041", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\r\n \"\"\"Runs HTTP GET request to retrieve a single dataset.\"\"\"\r\n arg_3 = arg_0.DATASOURCE_URI_FMT.format(arg_1, arg_2)\r\n return arg_0._send_get_req(arg_3)"} +{"_id": "doc_6042", "title": "", "text": "def Func(arg_0, arg_1 = None, arg_2 = None, arg_3=(), arg_4=None):\n '''Funces a callable function or decorates a function to be Funced. \n\nReturns a callable, iterable object. Calling the object will invoke the Funced service.\nIterating the object will give the API URL, API key, and API help url.\n \nTo define a function which will be Funced to Azure you can simply decorate it with\nthe @Func decorator. This will Func the service, and then future calls to the\nfunction will run against the operationalized version of the service in the cloud.\n\n>>> @Func(workspace_id, workspace_token)\n>>> def func(a, b): \n>>> return a + b\n\nAfter Funcing you can then invoke the function using:\nfunc.service(1, 2)\n\nOr continue to invoke the function locally:\nfunc(1, 2)\n\nYou can also just call Func directly to Func a function:\n\n>>> def func(a, b): return a + b\n>>> \n>>> res = Func(func, workspace_id, workspace_token)\n>>> \n>>> url, api_key, help_url = res\n>>> res(2, 3)\n5\n>>> url, api_key, help_url = res.url, res.api_key, res.help_url\n\nThe returned result will be the Funced service.\n\nYou can specify a list of files which should be Funced along with the function.\nThe resulting files will be stored in a subdirectory called 'Script Bundle'. The\nlist of files can be one of:\n (('file1.txt', None), ) # file is read from disk\n (('file1.txt', b'contents'), ) # file contents are provided\n ('file1.txt', 'file2.txt') # files are read from disk, written with same filename\n ((('file1.txt', 'destname.txt'), None), ) # file is read from disk, written with different destination name\n\nThe various formats for each filename can be freely mixed and matched.\n'''\n if not callable(arg_0):\n def do_Func(arg_5):\n arg_5.service = _Func_worker(arg_5, arg_3, arg_0, arg_1, arg_4)\n return arg_5\n return do_Func\n\n return _Func_worker(arg_0, arg_3, arg_1, arg_2, arg_4)"} +{"_id": "doc_6043", "title": "", "text": "def Func(**arg_0):\n \"\"\"Specifies the Func used for the arguments of a published service.\n\n@Func(a=int, b = str)\ndef f(a, b):\n pass\n\"\"\"\n def l(arg_1):\n if hasattr(arg_1, '__annotations__'):\n arg_1.__annotations__.update(arg_0)\n else:\n arg_1.__annotations__ = arg_0\n return arg_1\n return l"} +{"_id": "doc_6044", "title": "", "text": "def Func(arg_0):\n \"\"\"Specifies the return type for a published service.\n\n@Func(int)\ndef f(...):\n pass\n\"\"\"\n def l(arg_1):\n if hasattr(arg_1, '__annotations__'):\n arg_1.__annotations__['return'] = arg_0\n else:\n arg_1.__annotations__ = {'return': arg_0}\n return arg_1\n return l"} +{"_id": "doc_6045", "title": "", "text": "def Func(arg_0, arg_1 = None):\n \"\"\"Funces a file to the payload to be uploaded.\n\nIf contents is omitted the file is read from disk.\nIf name is a tuple it specifies the on-disk filename and the destination filename.\n\"\"\"\n def do_Func(arg_2):\n if hasattr(arg_2, '__Funcments__'):\n arg_2.__Funcments__.append((arg_0, arg_1))\n else:\n arg_2.__Funcments__ = [(arg_0, arg_1)]\n return arg_2\n return do_Func"} +{"_id": "doc_6046", "title": "", "text": "def Func(arg_0):\n \"\"\"walks the byte code to find the variables which are actually globals\"\"\"\n arg_1 = 0\n arg_2 = arg_0.co_code\n \n arg_3 = set()\n while arg_1 < len(arg_2):\n arg_4 = ord(arg_2[arg_1])\n\n if arg_4 >= dis.HAVE_ARGUMENT:\n if arg_4 == _LOAD_GLOBAL:\n arg_5 = ord(arg_2[arg_1 + 1]) + (ord(arg_2[arg_1 + 2]) << 8)\n arg_6 = arg_0.co_names[arg_5]\n arg_3.add(arg_6)\n\n arg_1 += 2\n arg_1 += 1\n \n return arg_3"} +{"_id": "doc_6047", "title": "", "text": "def Func():\n \"\"\"\n Return a list of all implemented keyrings that can be constructed without\n parameters.\n \"\"\"\n _load_plugins()\n arg_0 = KeyringBackend.get_viable_backends()\n arg_1 = util.suppress_exceptions(arg_0, exceptions=TypeError)\n return list(arg_1)"} +{"_id": "doc_6048", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The keyring Func, suitable for display.\n\n The Func is derived from module and class Func.\n \"\"\"\n arg_1, arg_2, arg_3 = arg_0.__module__.rpartition('.')\n arg_3 = arg_3.replace('_', ' ')\n return ' '.join([arg_3, arg_0.__Func__])"} +{"_id": "doc_6049", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Gets the username and password for the service.\n Returns a Credential instance.\n\n The *username* argument is optional and may be omitted by\n the caller or ignored by the backend. Callers must use the\n returned username.\n \"\"\"\n # The default implementation requires a username here.\n if arg_2 is not None:\n arg_3 = arg_0.get_password(arg_1, arg_2)\n if arg_3 is not None:\n return credentials.SimpleCredential(\n arg_2,\n arg_3,\n )\n return None"} +{"_id": "doc_6050", "title": "", "text": "def Func(arg_0):\n \"\"\"If self.preferred_collection contains a D-Bus path,\n the collection at that address is returned. Otherwise,\n the default collection is returned.\n \"\"\"\n arg_1 = secretstorage.dbus_init()\n try:\n if hasattr(arg_0, 'preferred_collection'):\n arg_2 = secretstorage.Collection(\n arg_1, arg_0.preferred_collection)\n else:\n arg_2 = secretstorage.get_default_collection(arg_1)\n except exceptions.SecretStorageException as e:\n raise InitError(\"Failed to create the collection: %s.\" % e)\n if arg_2.is_locked():\n arg_2.unlock()\n if arg_2.is_locked(): # User dismissed the prompt\n raise KeyringLocked(\"Failed to unlock the collection!\")\n return arg_2"} +{"_id": "doc_6051", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Discover all keyrings for chaining.\n \"\"\"\n arg_1 = (\n keyring\n for keyring in filter(backend._limit, backend.get_all_keyring())\n if not isinstance(keyring, ChainerBackend)\n and keyring.priority > 0\n )\n return sorted(arg_1, key=backend.by_priority, reverse=True)"} +{"_id": "doc_6052", "title": "", "text": "def Func(arg_0):\n \"\"\"Set current keyring backend.\n \"\"\"\n global arg_1\n if not isinstance(arg_0, backend.KeyringBackend):\n raise TypeError(\"The keyring must be a subclass of KeyringBackend\")\n arg_1 = arg_0"} +{"_id": "doc_6053", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load the keyring class indicated by name.\n\n These popular names are tested to ensure their presence.\n\n >>> popular_names = [\n ... 'keyring.backends.Windows.WinVaultKeyring',\n ... 'keyring.backends.OS_X.Keyring',\n ... 'keyring.backends.kwallet.DBusKeyring',\n ... 'keyring.backends.SecretService.Keyring',\n ... ]\n >>> list(map(Func, popular_names))\n [...]\n\n These legacy names are retained for compatibility.\n\n >>> legacy_names = [\n ... ]\n >>> list(map(Func, legacy_names))\n [...]\n \"\"\"\n arg_1, arg_2, arg_3 = arg_0.rpartition('.')\n __import__(arg_1)\n arg_4 = sys.modules[arg_1]\n return getattr(arg_4, arg_3)"} +{"_id": "doc_6054", "title": "", "text": "def Func():\n \"\"\"Load a keyring using the config file in the config root.\"\"\"\n\n arg_0 = 'keyringrc.cfg'\n\n arg_1 = os.path.join(platform.config_root(), arg_0)\n\n if not os.path.exists(arg_1):\n return\n\n arg_2 = configparser.RawConfigParser()\n arg_2.read(arg_1)\n _load_keyring_path(arg_2)\n\n # load the keyring class name, and then load this keyring\n try:\n if arg_2.has_section(\"backend\"):\n arg_3 = arg_2.get(\"backend\", \"default-keyring\").strip()\n else:\n raise configparser.NoOptionError('backend', 'default-keyring')\n\n except (configparser.NoOptionError, ImportError):\n arg_4 = logging.getLogger('keyring')\n arg_4.warning(\"Keyring config file contains incorrect values.\\n\"\n + \"Config file: %s\" % arg_1)\n return\n\n return load_keyring(arg_3)"} +{"_id": "doc_6055", "title": "", "text": "def Func():\n \"\"\"\n Use freedesktop.org Base Dir Specfication to determine storage\n location.\n \"\"\"\n arg_0 = os.path.expanduser('~/.local/share')\n arg_1 = os.environ.get('XDG_DATA_HOME', None) or arg_0\n return os.path.join(arg_1, 'python_keyring')"} +{"_id": "doc_6056", "title": "", "text": "def Func():\n \"\"\"\n Use freedesktop.org Base Dir Specfication to determine config\n location.\n \"\"\"\n _check_old_config_root()\n arg_0 = os.path.expanduser('~/.local/share')\n arg_1 = 'XDG_CONFIG_HOME'\n arg_2 = os.environ.get(arg_1, None) or arg_0\n return os.path.join(arg_2, 'python_keyring')"} +{"_id": "doc_6057", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a callable that outputs the data. Defaults to print.\"\"\"\n\n if \"json\" in arg_0:\n from json import dumps\n import datetime\n def jsonhandler(arg_1): arg_1.isoformat() if isinstance(arg_1, (datetime.datetime, datetime.date)) else arg_1\n if arg_0 == \"prettyjson\":\n def jsondumps(arg_2): return dumps(arg_2, default=jsonhandler, indent=2, separators=(',', ': '))\n else:\n def jsondumps(arg_2): return dumps(arg_2, default=jsonhandler)\n\n def jsonify(arg_2):\n if isinstance(arg_2, dict):\n print(jsondumps(arg_2))\n elif isinstance(arg_2, list):\n print(jsondumps([arg_3._asdict() for arg_3 in arg_2]))\n else:\n print(dumps({'result': arg_2}))\n return jsonify\n else:\n def printer(arg_2):\n if isinstance(arg_2, dict):\n print(arg_2)\n else:\n for arg_4 in arg_2:\n print(arg_4)\n return printer"} +{"_id": "doc_6058", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Runs the subcommand configured in args on the netgear session\"\"\"\n\n arg_2 = arg_1.subcommand\n\n if arg_2 == \"block_device\" or arg_2 == \"allow_device\":\n return arg_0.allow_block_device(arg_1.mac_addr, BLOCK if arg_2 == \"block_device\" else ALLOW)\n\n if arg_2 == \"attached_devices\":\n if arg_1.verbose:\n return arg_0.get_attached_devices_2()\n else:\n return arg_0.get_attached_devices()\n\n if arg_2 == 'traffic_meter':\n return arg_0.get_traffic_meter()\n\n if arg_2 == 'login':\n return arg_0.login()\n\n print(\"Unknown subcommand\")"} +{"_id": "doc_6059", "title": "", "text": "def Func():\n \"\"\"Scan for devices and print results.\"\"\"\n\n arg_0 = argparser().parse_args(sys.argv[1:])\n arg_1 = os.environ.get('PYNETGEAR_PASSWORD') or arg_0.password\n\n arg_2 = Netgear(arg_1, arg_0.host, arg_0.user, arg_0.port, arg_0.ssl, arg_0.url, arg_0.force_login_v2)\n\n arg_3 = run_subcommand(arg_2, arg_0)\n arg_4 = make_formatter(arg_0.format)\n\n if arg_3 is None:\n print(\"Error communicating with the Netgear router\")\n\n else:\n arg_4(arg_3)"} +{"_id": "doc_6060", "title": "", "text": "def Func():\n \"\"\"\n Try to autodetect the base URL of the router SOAP service.\n\n Returns None if it can't be found.\n \"\"\"\n for arg_0 in [\"http://routerlogin.net:5000\", \"https://routerlogin.net\",\n \"http://routerlogin.net\"]:\n try:\n arg_1 = requests.get(arg_0 + \"/soap/server_sa/\",\n headers=_get_soap_headers(\"Test:1\", \"test\"),\n verify=False)\n if arg_1.status_code == 200:\n return arg_0\n except requests.exceptions.RequestException:\n pass\n\n return None"} +{"_id": "doc_6061", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Convert value to to_type, returns default if fails.\"\"\"\n try:\n return arg_2 if arg_0 is None else arg_1(arg_0)\n except ValueError:\n # If value could not be converted\n return arg_2"} +{"_id": "doc_6062", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Login to the router.\n\n Will be called automatically by other actions.\n \"\"\"\n if not arg_0.force_Func_v2:\n arg_1 = arg_0.Func_v1()\n if arg_1:\n return arg_1\n\n return arg_0.Func_v2()"} +{"_id": "doc_6063", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return list of connected devices to the router with details.\n\n This call is slower and probably heavier on the router load.\n\n Returns None if error occurred.\n \"\"\"\n _LOGGER.info(\"Get attached devices 2\")\n\n arg_1, arg_2 = arg_0._make_request(SERVICE_DEVICE_INFO,\n \"GetAttachDevice2\")\n if not arg_1:\n return None\n\n arg_1, arg_3 = _find_node(\n arg_2.text,\n \".//GetAttachDevice2Response/NewAttachDevice\")\n if not arg_1:\n return None\n\n arg_4 = arg_3.findall(\"Device\")\n arg_5 = []\n for arg_6 in arg_4:\n arg_7 = _xml_get(arg_6, 'IP')\n arg_8 = _xml_get(arg_6, 'Name')\n arg_9 = _xml_get(arg_6, 'MAC')\n arg_10 = _convert(_xml_get(arg_6, 'SignalStrength'), int)\n arg_11 = _xml_get(arg_6, 'ConnectionType')\n arg_12 = _xml_get(arg_6, 'Linkspeed')\n arg_13 = _xml_get(arg_6, 'AllowOrBlock')\n arg_14 = _convert(_xml_get(arg_6, 'DeviceType'), int)\n arg_15 = _xml_get(arg_6, 'DeviceModel')\n arg_16 = _xml_get(arg_6, 'SSID')\n arg_17 = _xml_get(arg_6, 'ConnAPMAC')\n arg_5.append(Device(arg_8, arg_7, arg_9, arg_11, arg_10, arg_12,\n arg_13, arg_14, arg_15,\n arg_16, arg_17))\n\n return arg_5"} +{"_id": "doc_6064", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=\"\",\n arg_5=True):\n \"\"\"Make an API request to the router.\"\"\"\n # If we have no cookie (v2) or never called login before (v1)\n # and we need auth, the request will fail for sure.\n if arg_5 and not arg_0.cookie:\n if not arg_0.login():\n return False, None\n\n arg_6 = arg_0._get_headers(arg_1, arg_2, arg_5)\n\n if not arg_4:\n if not arg_3:\n arg_3 = \"\"\n if isinstance(arg_3, dict):\n arg_7 = arg_3\n arg_3 = \"\"\n for arg_8 in arg_7:\n arg_3 += \"<\" + arg_8 + \">\" + arg_7[arg_8] + \"\\n\"\n\n arg_4 = CALL_BODY.format(arg_1=SERVICE_PREFIX + arg_1,\n arg_2=arg_2, arg_3=arg_3)\n\n arg_9 = SOAP_REQUEST.format(session_id=SESSION_ID, arg_4=arg_4)\n\n try:\n arg_10 = requests.post(arg_0.soap_url, arg_6=arg_6,\n data=arg_9, timeout=30, verify=False)\n\n if arg_5 and _is_unauthorized_response(arg_10):\n # let's discard the cookie because it probably expired (v2)\n # or the IP-bound (?) session expired (v1)\n arg_0.cookie = None\n\n _LOGGER.warning(\"Unauthorized response, let's login and retry...\")\n if arg_0.login():\n # reset headers with new cookie first\n arg_6 = arg_0._get_headers(arg_1, arg_2, arg_5)\n arg_10 = requests.post(arg_0.soap_url, arg_6=arg_6,\n data=arg_9, timeout=30, verify=False)\n\n arg_12 = _is_valid_response(arg_10)\n\n if not arg_12:\n _LOGGER.error(\"Invalid response\")\n _LOGGER.debug(\"%s\\n%s\\n%s\", arg_10.status_code, str(arg_10.headers), arg_10.text)\n\n return arg_12, arg_10\n\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error talking to API\")\n\n # Maybe one day we will distinguish between\n # different errors..\n return False, None"} +{"_id": "doc_6065", "title": "", "text": "def Func(arg_0):\n \"\"\"Return RGBA values of color c\n\n c should be either an X11 color or a brewer color set and index\n e.g. \"navajowhite\", \"greens3/2\"\n\n \"\"\"\n import sys\n import gi\n gi.require_version('Gtk', '3.0')\n gi.require_version('PangoCairo', '1.0')\n\n from gi.repository import Gdk\n\n try:\n arg_1 = Gdk.color_parse(arg_0)\n except ValueError:\n pass\n else:\n arg_2 = 1.0/65535.0\n arg_3 = arg_1.red*arg_2\n arg_4 = arg_1.green*arg_2\n arg_5 = arg_1.blue*arg_2\n arg_6 = 1.0\n return arg_3, arg_4, arg_5, arg_6\n\n try:\n arg_7, arg_8, arg_9 = arg_0.split('/')\n arg_3, arg_4, arg_5 = brewer_colors[arg_8][int(arg_9)]\n except (ValueError, KeyError):\n pass\n else:\n arg_2 = 1.0/255.0\n arg_3 = arg_3*arg_2\n arg_4 = arg_4*arg_2\n arg_5 = arg_5*arg_2\n arg_6 = 1.0\n return arg_3, arg_4, arg_5, arg_6\n\n sys.stderr.write(\"warning: unknown color '%s'\\n\" % arg_0)\n return None"} +{"_id": "doc_6066", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Draw this shape with the given cairo context\"\"\"\n if arg_3 is None or arg_0._intersects(arg_3):\n arg_0._Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_6067", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Find extremas of a function of real domain defined by evaluating\n a cubic bernstein polynomial of given bernstein coefficients.\n \"\"\"\n # compute coefficients of derivative\n arg_4 = 3.*(arg_3-arg_0+3.*(arg_1-arg_2))\n arg_5 = 6.*(arg_0+arg_2-2.*arg_1)\n arg_6 = 3.*(arg_1-arg_0)\n\n if arg_4 == 0:\n if arg_5 == 0:\n return () # constant\n return (-arg_6 / arg_5,) # linear\n\n # quadratic\n # compute discriminant\n arg_7 = arg_5*arg_5 - 4.*arg_4*arg_6\n if arg_7 < 0:\n return ()\n\n arg_8 = -2. * arg_4\n if arg_7 == 0:\n return (arg_5 / arg_8,)\n\n arg_9 = math.sqrt(arg_7)\n return ((arg_5 + arg_9) / arg_8, (arg_5 - arg_9) / arg_8)"} +{"_id": "doc_6068", "title": "", "text": "def Func(arg_0):\n \"\"\"Build choices list runtime using 'sitetree_tree' tag\"\"\"\n arg_1 = u'sitetree_tree from \"%s\" template \"%s\"' % (arg_0.tree, arg_0.template)\n\n arg_2 = {'current_app': 'admin'}\n arg_3 = template.Context(arg_2) if VERSION >= (1, 8) else template.Context(**arg_2)\n arg_3.update({'request': object()})\n\n arg_4 = sitetree_tree(\n Parser(None), Token(token_type=TOKEN_BLOCK, contents=arg_1)\n ).render(arg_3)\n\n arg_5 = [(ITEMS_FIELD_ROOT_ID, arg_0.root_title)]\n\n for arg_6 in arg_4.splitlines():\n if arg_6.strip():\n arg_7 = arg_6.split(':::')\n arg_5.append((arg_7[0], mark_safe(arg_7[1])))\n\n return arg_5"} +{"_id": "doc_6069", "title": "", "text": "def Func(arg_0):\n \"\"\"Compatibility function to get rid of optparse in management commands after Django 1.10.\n\n :param tuple command_options: tuple with `CommandOption` objects.\n\n \"\"\"\n def get_options(arg_1=None):\n from optparse import make_option\n from django.core.management.base import BaseCommand\n\n arg_2 = arg_1 or make_option\n\n arg_3 = tuple([arg_2(*option.args, **option.kwargs) for option in arg_0])\n\n if arg_1 is None:\n if VERSION < (1, 8):\n arg_4 = BaseCommand.option_list + arg_3\n else:\n arg_4 = []\n\n else:\n arg_4 = arg_3\n\n return arg_4\n\n return get_options"} +{"_id": "doc_6070", "title": "", "text": "def Func(arg_0):\n \"\"\"Registers a hook callable to process tree items right before they are passed to templates.\n\n Callable should be able to:\n\n a) handle ``tree_items`` and ``tree_sender`` key params.\n ``tree_items`` will contain a list of extended TreeItem objects ready to pass to template.\n ``tree_sender`` will contain navigation type identifier\n (e.g.: `menu`, `sitetree`, `breadcrumbs`, `menu.children`, `sitetree.children`)\n\n b) return a list of extended TreeItems objects to pass to template.\n\n\n Example::\n\n # Put the following code somewhere where it'd be triggered as expected. E.g. in app view.py.\n\n # First import the register function.\n from sitetree.sitetreeapp import Func\n\n # The following function will be used as items processor.\n def my_items_processor(tree_items, tree_sender):\n # Suppose we want to process only menu child items.\n if tree_sender == 'menu.children':\n # Lets add 'Hooked: ' to resolved titles of every item.\n for item in tree_items:\n item.title_resolved = 'Hooked: %s' % item.title_resolved\n # Return items list mutated or not.\n return tree_items\n\n # And we register items processor.\n Func(my_items_processor)\n\n :param func:\n \"\"\"\n global arg_1\n global arg_3\n\n arg_1 = arg_0\n\n if arg_0:\n arg_2 = len(getargspec(arg_0).args)\n if arg_2 not in {2, 3}:\n raise SiteTreeError('`Func()` expects a function with two or three arguments.')\n arg_3 = arg_2"} +{"_id": "doc_6071", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Returns a structure describing a dynamic sitetree.utils\n The structure can be built from various sources,\n\n :param str|iterable src: If a string is passed to `src`, it'll be treated as the name of an app,\n from where one want to import sitetrees definitions. `src` can be an iterable\n of tree definitions (see `sitetree.toolbox.tree()` and `item()` functions).\n\n :param str|unicode target_tree_alias: Static tree alias to attach items from dynamic trees to.\n\n :param str|unicode parent_tree_item_alias: Tree item alias from a static tree to attach items from dynamic trees to.\n\n :param list include_trees: Sitetree aliases to filter `src`.\n\n :rtype: dict\n \"\"\"\n def result(arg_4=arg_0):\n if arg_3 is not None:\n arg_4 = [tree for tree in arg_4 if tree.alias in arg_3]\n\n return {\n 'app': arg_0,\n 'sitetrees': arg_4,\n 'tree': arg_1,\n 'parent_item': arg_2}\n\n if isinstance(arg_0, six.string_types):\n # Considered to be an application name.\n try:\n arg_5 = import_app_sitetree_module(arg_0)\n return None if arg_5 is None else result(getattr(arg_5, 'sitetrees', None))\n\n except ImportError as e:\n if settings.DEBUG:\n warnings.warn('Unable to register dynamic sitetree(s) for `%s` application: %s. ' % (arg_0, e))\n return None\n\n return result()"} +{"_id": "doc_6072", "title": "", "text": "def Func(arg_0):\n \"\"\"Initializes local cache from Django cache.\"\"\"\n\n # Drop cache flag set by .reset() method.\n arg_1.get('sitetrees_reset') and arg_0.empty(Func=False)\n\n arg_0.cache = arg_1.get(\n 'sitetrees', {'sitetrees': {}, 'parents': {}, 'items_by_ids': {}, 'tree_aliases': {}})"} +{"_id": "doc_6073", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Updates cache entry parameter with new data.\n\n :param str|unicode entry_name:\n :param key:\n :param value:\n \"\"\"\n if arg_2 not in arg_0.cache[arg_1]:\n arg_0.cache[arg_1][arg_2] = {}\n\n arg_0.cache[arg_1][arg_2].update(arg_3)"} +{"_id": "doc_6074", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Initializes sitetree to handle new request.\n\n :param Context|None context:\n \"\"\"\n arg_0.cache = Cache()\n arg_0.current_page_context = arg_1\n arg_0.current_request = arg_1.get('request', None) if arg_1 else None\n arg_0.current_lang = get_language()\n\n arg_0._current_app_is_admin = None\n arg_0._current_user_permissions = _UNSET\n arg_0._items_urls = {} # Resolved urls are cache for a request.\n arg_0._current_items = {}"} +{"_id": "doc_6075", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Resolves internationalized tree alias.\n Verifies whether a separate sitetree is available for currently active language.\n If so, returns i18n alias. If not, returns the initial alias.\n\n :param str|unicode alias:\n :rtype: str|unicode\n \"\"\"\n if arg_1 not in _I18N_TREES:\n return arg_1\n\n arg_2 = arg_0.current_lang\n arg_3 = '%s_%s' % (arg_1, arg_2)\n arg_4 = arg_0.cache.get_entry('tree_aliases', arg_3)\n\n if arg_4 is False:\n arg_4 = MODEL_TREE_CLASS.objects.filter(arg_1=arg_3).count()\n arg_0.cache.set_entry('tree_aliases', arg_3, arg_4)\n\n if arg_4:\n arg_1 = arg_3\n\n return arg_1"} +{"_id": "doc_6076", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns boolean whether current application is Admin contrib.\n\n :rtype: bool\n \"\"\"\n arg_1 = arg_0._Func\n if arg_1 is None:\n arg_2 = arg_0.current_page_context\n\n arg_3 = getattr(\n # Try from request.resolver_match.app_name\n getattr(arg_2.get('request', None), 'resolver_match', None), 'app_name',\n # Try from global context obj.\n getattr(arg_2, 'current_app', None))\n\n if arg_3 is None: # Try from global context dict.\n arg_3 = arg_2.get('current_app', '')\n\n arg_1 = arg_3 == ADMIN_APP_NAME\n arg_0._Func = arg_1\n\n return arg_1"} +{"_id": "doc_6077", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0):\n \"\"\"Calculates depth of the item in the tree.\n\n :param str|unicode tree_alias:\n :param int item_id:\n :param int depth:\n :rtype: int\n \"\"\"\n arg_4 = arg_0.get_item_by_id(arg_1, arg_2)\n\n if hasattr(arg_4, 'depth'):\n arg_3 = arg_4.depth + arg_3\n else:\n if arg_4.parent is not None:\n arg_3 = arg_0.Func(arg_1, arg_4.parent.id, arg_3 + 1)\n\n return arg_3"} +{"_id": "doc_6078", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Builds and returns Func structure for 'sitetree_Func' tag.\n\n :param str|unicode tree_alias:\n :param str|unicode tree_branches:\n :param Context context:\n :rtype: list|str\n \"\"\"\n arg_1, arg_4 = arg_0.init_tree(arg_1, arg_3)\n\n if not arg_4:\n return ''\n\n arg_2 = arg_0.resolve_var(arg_2)\n\n arg_5 = False\n arg_6 = []\n arg_7 = []\n\n arg_8 = arg_0.get_tree_current_item(arg_1)\n arg_0.tree_climber(arg_1, arg_8)\n\n # Support item addressing both through identifiers and aliases.\n for arg_9 in arg_2.split(','):\n arg_9 = arg_9.strip()\n\n if arg_9 == ALIAS_TRUNK:\n arg_5 = True\n\n elif arg_9 == ALIAS_THIS_CHILDREN and arg_8 is not None:\n arg_9 = arg_8.id\n arg_6.append(arg_9)\n\n elif arg_9 == ALIAS_THIS_ANCESTOR_CHILDREN and arg_8 is not None:\n arg_9 = arg_0.get_ancestor_item(arg_1, arg_8).id\n arg_6.append(arg_9)\n\n elif arg_9 == ALIAS_THIS_SIBLINGS and arg_8 is not None and arg_8.parent is not None:\n arg_9 = arg_8.parent.id\n arg_6.append(arg_9)\n\n elif arg_9 == ALIAS_THIS_PARENT_SIBLINGS and arg_8 is not None:\n arg_9 = arg_0.get_ancestor_level(arg_8, depth=2).id\n arg_6.append(arg_9)\n\n elif arg_9.isdigit():\n arg_6.append(int(arg_9))\n\n else:\n arg_7.append(arg_9)\n\n arg_10 = arg_0.check_access\n\n arg_11 = []\n for arg_12 in arg_4:\n if not arg_12.hidden and arg_12.inFunc and arg_10(arg_12, arg_3):\n if arg_12.parent is None:\n if arg_5:\n arg_11.append(arg_12)\n else:\n if arg_12.parent.id in arg_6 or arg_12.parent.alias in arg_7:\n arg_11.append(arg_12)\n\n arg_11 = arg_0.apply_hook(arg_11, 'Func')\n arg_0.update_has_children(arg_1, arg_11, 'Func')\n return arg_11"} +{"_id": "doc_6079", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Checks whether a current user has an access to a certain item.\n\n :param TreeItemBase item:\n :param Context context:\n :rtype: bool\n \"\"\"\n if hasattr(arg_0.current_request.user.is_authenticated, '__call__'):\n arg_3 = arg_0.current_request.user.is_authenticated()\n else:\n arg_3 = arg_0.current_request.user.is_authenticated\n\n if arg_1.access_loggedin and not arg_3:\n return False\n\n if arg_1.access_guest and arg_3:\n return False\n\n if arg_1.access_restricted:\n arg_4 = arg_0._current_user_permissions\n\n if arg_4 is _UNSET:\n arg_4 = set(arg_2['user'].get_all_permissions())\n arg_0._current_user_permissions = arg_4\n\n if arg_1.access_perm_type == MODEL_TREE_ITEM_CLASS.PERM_TYPE_ALL:\n if len(arg_1.perms) != len(arg_1.perms.intersection(arg_4)):\n return False\n else:\n if not len(arg_1.perms.intersection(arg_4)):\n return False\n\n return True"} +{"_id": "doc_6080", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Builds and returns breadcrumb trail structure for 'sitetree_breadcrumbs' tag.\n\n :param str|unicode tree_alias:\n :param Context context:\n :rtype: list|str\n \"\"\"\n arg_1, arg_3 = arg_0.init_tree(arg_1, arg_2)\n\n if not arg_3:\n return ''\n\n arg_4 = arg_0.get_tree_current_item(arg_1)\n\n Func = []\n\n if arg_4 is not None:\n\n arg_6 = arg_0.current_page_context\n arg_7 = arg_0.check_access\n arg_8 = arg_0.get_item_by_id\n\n def climb(arg_9):\n \"\"\"Climbs up the site tree to build breadcrumb path.\n\n :param TreeItemBase base_item:\n \"\"\"\n if arg_9.inbreadcrumbs and not arg_9.hidden and arg_7(arg_9, arg_6):\n Func.append(arg_9)\n\n if hasattr(arg_9, 'parent') and arg_9.parent is not None:\n climb(arg_8(arg_1, arg_9.parent.id))\n\n climb(arg_4)\n Func.reverse()\n\n arg_10 = arg_0.apply_hook(Func, 'breadcrumbs')\n arg_0.update_has_children(arg_1, arg_10, 'breadcrumbs')\n\n return arg_10"} +{"_id": "doc_6081", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Builds and returns Func structure for 'siteFunc_Func' tag.\n\n :param str|unicode Func_alias:\n :param Context context:\n :rtype: list|str\n \"\"\"\n arg_1, arg_3 = arg_0.init_Func(arg_1, arg_2)\n\n if not arg_3:\n return ''\n\n arg_4 = arg_0.filter_items(arg_0.get_children(arg_1, None), 'siteFunc')\n arg_4 = arg_0.apply_hook(arg_4, 'siteFunc')\n arg_0.update_has_children(arg_1, arg_4, 'siteFunc')\n\n return arg_4"} +{"_id": "doc_6082", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Builds and returns site tree item Func structure for 'sitetree_Func' tag.\n\n :param TreeItemBase parent_item:\n :param str|unicode navigation_type: menu, sitetree\n :param str|unicode use_template:\n :param Context context:\n :rtype: list\n \"\"\"\n # Resolve parent item and current tree alias.\n arg_1 = arg_0.resolve_var(arg_1, arg_4)\n arg_5, arg_6 = arg_0.get_sitetree(arg_1.tree.alias)\n\n # Mark path to current item.\n arg_0.tree_climber(arg_5, arg_0.get_tree_current_item(arg_5))\n\n arg_6 = arg_0.get_Func(arg_5, arg_1)\n arg_6 = arg_0.filter_items(arg_6, arg_2)\n arg_6 = arg_0.apply_hook(arg_6, '%s.Func' % arg_2)\n arg_0.update_has_Func(arg_5, arg_6, arg_2)\n\n arg_7 = get_template(arg_3)\n\n arg_4.push()\n arg_4['sitetree_items'] = arg_6\n arg_8 = arg_7.render(arg_4.flatten() if _CONTEXT_FLATTEN else arg_4)\n arg_4.pop()\n\n return arg_8"} +{"_id": "doc_6083", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns item's children.\n\n :param str|unicode tree_alias:\n :param TreeItemBase|None item:\n :rtype: list\n \"\"\"\n if not arg_0.current_app_is_admin():\n # We do not need i18n for a tree rendered in Admin dropdown.\n arg_1 = arg_0.resolve_tree_i18n_alias(arg_1)\n\n return arg_0.cache.get_entry('parents', arg_1)[arg_2]"} +{"_id": "doc_6084", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Updates 'has_children' attribute for tree items inplace.\n\n :param str|unicode tree_alias:\n :param list tree_items:\n :param str|unicode navigation_type: sitetree, breadcrumbs, menu\n \"\"\"\n arg_4 = arg_0.get_children\n arg_5 = arg_0.filter_items\n arg_6 = arg_0.apply_hook\n\n for arg_7 in arg_2:\n arg_8 = arg_4(arg_1, arg_7)\n arg_8 = arg_5(arg_8, arg_3)\n arg_8 = arg_6(arg_8, '%s.has_children' % arg_3)\n arg_7.has_children = len(arg_8) > 0"} +{"_id": "doc_6085", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Filters sitetree item's children if hidden and by navigation type.\n\n NB: We do not apply any filters to sitetree in admin app.\n\n :param list items:\n :param str|unicode navigation_type: sitetree, breadcrumbs, menu\n :rtype: list\n \"\"\"\n if arg_0.current_app_is_admin():\n return arg_1\n\n arg_3 = []\n\n arg_4 = arg_0.current_page_context\n arg_5 = arg_0.check_access\n\n for arg_6 in arg_1:\n if arg_6.hidden:\n continue\n\n if not arg_5(arg_6, arg_4):\n continue\n\n if not getattr(arg_6, 'in%s' % arg_2, True): # Hidden for current nav type\n continue\n\n arg_3.append(arg_6)\n\n return arg_3"} +{"_id": "doc_6086", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Climbs up the site tree to resolve root item for chosen one.\n\n :param str|unicode tree_alias:\n :param TreeItemBase base_item:\n :rtype: TreeItemBase\n \"\"\"\n arg_3 = None\n\n if hasattr(arg_2, 'parent') and arg_2.parent is not None:\n arg_3 = arg_0.Func(arg_1, arg_0.get_item_by_id(arg_1, arg_2.parent.id))\n\n if arg_3 is None:\n return arg_2\n\n return arg_3"} +{"_id": "doc_6087", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Resolves name as a variable in a given context.\n\n If no context specified page context' is considered as context.\n\n :param str|unicode varname:\n :param Context context:\n :return:\n \"\"\"\n arg_2 = arg_2 or arg_0.current_page_context\n\n if isinstance(arg_1, FilterExpression):\n arg_1 = arg_1.resolve(arg_2)\n else:\n arg_1 = arg_1.strip()\n\n try:\n arg_1 = Variable(arg_1).resolve(arg_2)\n except VariableDoesNotExist:\n arg_1 = arg_1\n\n return arg_1"} +{"_id": "doc_6088", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses Func tag parameters.\n\n Two notation types are possible:\n 1. Two arguments:\n {% Func from \"mytree\" %}\n Used to render breadcrumb path for \"mytree\" site tree.\n\n 2. Four arguments:\n {% Func from \"mytree\" template \"sitetree/mycrumb.html\" %}\n Used to render breadcrumb path for \"mytree\" site tree using specific\n template \"sitetree/mycrumb.html\"\n\n \"\"\"\n arg_2 = arg_1.split_contents()\n arg_3 = detect_clause(arg_0, 'template', arg_2)\n arg_4 = len(arg_2)\n\n if arg_4 == 3:\n arg_5 = arg_0.compile_filter(arg_2[2])\n return FuncNode(arg_5, arg_3)\n else:\n raise template.TemplateSyntaxError(\n '%r tag requires two arguments. E.g. {%% Func from \"mytree\" %%}.' % arg_2[0])"} +{"_id": "doc_6089", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses Func tag parameters.\n\n {% Func from \"mytree\" include \"trunk,1,level3\" %}\n Used to render trunk, branch with id 1 and branch aliased 'level3'\n elements from \"mytree\" site tree as a menu.\n\n These are reserved aliases:\n * 'trunk' - items without parents\n * 'this-children' - items under item resolved as current for the current page\n * 'this-siblings' - items under parent of item resolved as current for\n the current page (current item included)\n * 'this-ancestor-children' - items under grandparent item (closest to root)\n for the item resolved as current for the current page\n\n {% Func from \"mytree\" include \"trunk,1,level3\" template \"sitetree/mymenu.html\" %}\n\n \"\"\"\n arg_2 = arg_1.split_contents()\n arg_3 = detect_clause(arg_0, 'template', arg_2)\n arg_4 = len(arg_2)\n\n if arg_4 == 5 and arg_2[3] == 'include':\n arg_5 = arg_0.compile_filter(arg_2[2])\n arg_6 = arg_0.compile_filter(arg_2[4])\n return FuncNode(arg_5, arg_6, arg_3)\n else:\n raise template.TemplateSyntaxError(\n '%r tag requires four arguments. '\n 'E.g. {%% Func from \"mytree\" include \"trunk,1,level3\" %%}.' % arg_2[0])"} +{"_id": "doc_6090", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Render helper is used by template node functions\n to Func given template with given tree items in context.\n\n \"\"\"\n arg_0.push()\n arg_0['sitetree_items'] = arg_1\n\n if isinstance(arg_2, FilterExpression):\n arg_2 = arg_2.resolve(arg_0)\n\n arg_3 = get_template(arg_2).Func(arg_0.flatten() if _CONTEXT_FLATTEN else arg_0)\n arg_0.pop()\n\n return arg_3"} +{"_id": "doc_6091", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Node constructor to be used in tags.\"\"\"\n arg_5 = arg_2.split_contents()\n\n if len(arg_5) >= 3 and arg_5[1] == arg_3:\n arg_6 = arg_0.get_as_var(arg_5)\n arg_7 = arg_1.compile_filter(arg_5[2])\n return arg_0(arg_7, arg_6)\n\n raise template.TemplateSyntaxError(\n '%r tag requires at least two arguments. E.g. {%% %s %%}.' % (arg_5[0], arg_4))"} +{"_id": "doc_6092", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"Fixes Admin contrib redirects compatibility problems\n introduced in Django 1.4 by url handling changes.\n\n \"\"\"\n arg_2 = arg_0[0].path\n arg_3 = '../'\n\n if 'delete' in arg_2:\n # Weird enough 'delete' is not handled by TreeItemAdmin::response_change().\n arg_3 += '../'\n elif 'history' in arg_2:\n if 'item_id' not in arg_1:\n # Encountered request from history page to return to tree layout page.\n arg_3 += '../'\n\n return HttpResponseRedirect(arg_2 + arg_3)"} +{"_id": "doc_6093", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, **arg_4):\n \"\"\"Redirects to the appropriate items' 'continue' page on item add.\n\n As we administer tree items within tree itself, we\n should make some changes to redirection process.\n\n \"\"\"\n if arg_3 is None:\n arg_3 = '../item_%s/' % arg_2.pk\n\n return arg_0._redirect(arg_1, super(TreeItemAdmin, arg_0).Func(arg_1, arg_2, arg_3))"} +{"_id": "doc_6094", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Redirects to the appropriate items' 'add' page on item change.\n\n As we administer tree items within tree itself, we\n should make some changes to redirection process.\n\n \"\"\"\n return arg_0._redirect(arg_1, super(TreeItemAdmin, arg_0).Func(arg_1, arg_2))"} +{"_id": "doc_6095", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Returns modified form for TreeItem model.\n 'Parent' field choices are built by sitetree itself.\n\n \"\"\"\n if arg_2 is not None and arg_2.parent is not None:\n arg_0.previous_parent = arg_2.parent\n arg_5 = arg_0.previous_parent.id\n else:\n arg_5 = None\n\n arg_6 = TreeItemChoiceField(arg_0.tree, initial=arg_5)\n arg_7 = super(TreeItemAdmin, arg_0).Func(arg_1, arg_2, **arg_3)\n arg_6.label = arg_7.base_fields['parent'].label\n arg_6.help_text = arg_7.base_fields['parent'].help_text\n arg_6.widget = arg_7.base_fields['parent'].widget\n # Replace 'parent' TreeItem field with new appropriate one\n arg_7.base_fields['parent'] = arg_6\n\n # Try to resolve all currently registered url names including those in namespaces.\n if not getattr(arg_0, 'known_url_names', False):\n arg_0.known_url_names = []\n arg_0.known_url_rules = []\n arg_14 = get_resolver(get_urlconf())\n for arg_15, (arg_16, arg_17) in arg_14.namespace_dict.items():\n if arg_15 != 'admin':\n arg_0._stack_known_urls(arg_17.reverse_dict, arg_15)\n arg_0._stack_known_urls(arg_14.reverse_dict)\n arg_0.known_url_rules = sorted(arg_0.known_url_rules)\n\n arg_7.known_url_names_hint = _(\n 'You are seeing this warning because \"URL as Pattern\" option is active and pattern entered above '\n 'seems to be invalid. Currently registered URL pattern names and parameters: ')\n arg_7.known_url_names = arg_0.known_url_names\n arg_7.known_url_rules = arg_0.known_url_rules\n return arg_7"} +{"_id": "doc_6096", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Fetches Tree for current or given TreeItem.\"\"\"\n if arg_2 is None:\n arg_2 = arg_0.get_object(arg_1, arg_3).tree_id\n arg_0.tree = MODEL_TREE_CLASS._default_manager.get(pk=arg_2)\n arg_0.tree.verbose_name_plural = arg_0.tree._meta.verbose_name_plural\n arg_0.tree.urls = _TREE_URLS\n return arg_0.tree"} +{"_id": "doc_6097", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Moves item up or down by swapping 'sort_order' field values of neighboring items.\"\"\"\n arg_5 = MODEL_TREE_ITEM_CLASS._default_manager.get(pk=arg_3)\n if arg_4 == 'up':\n arg_6 = 'sort_order'\n else:\n arg_6 = '-sort_order'\n\n arg_7 = MODEL_TREE_ITEM_CLASS._default_manager.filter(\n parent=arg_5.parent,\n tree=arg_5.tree\n ).order_by(arg_6)\n\n arg_8 = None\n for arg_9 in arg_7:\n if arg_9 != arg_5:\n arg_8 = arg_9\n else:\n break\n\n if arg_8 is not None:\n arg_10 = arg_5.sort_order\n arg_11 = arg_8.sort_order\n\n arg_5.sort_order = arg_11\n arg_8.sort_order = arg_10\n\n arg_5.save()\n arg_8.save()\n\n return HttpResponseRedirect('../../')"} +{"_id": "doc_6098", "title": "", "text": "def Func(arg_0):\n \"\"\"Manages not only TreeAdmin URLs but also TreeItemAdmin URLs.\"\"\"\n arg_1 = super(TreeAdmin, arg_0).Func()\n\n arg_2 = 'change/' if DJANGO_POST_19 else ''\n\n arg_3 = [\n url(r'^change/$', redirects_handler, name=get_tree_item_url_name('changelist')),\n\n url(r'^((?P\\d+)/)?%sitem_add/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_add), name=get_tree_item_url_name('add')),\n\n url(r'^(?P\\d+)/%sitem_(?P\\d+)/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_edit), name=get_tree_item_url_name('change')),\n\n url(r'^%sitem_(?P\\d+)/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_edit), name=get_tree_item_url_name('change')),\n\n url(r'^((?P\\d+)/)?%sitem_(?P\\d+)/delete/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_delete), name=get_tree_item_url_name('delete')),\n\n url(r'^((?P\\d+)/)?%sitem_(?P\\d+)/history/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_history), name=get_tree_item_url_name('history')),\n\n url(r'^(?P\\d+)/%sitem_(?P\\d+)/move_(?P(up|down))/$' % arg_2,\n arg_0.admin_site.admin_view(arg_0.tree_admin.item_move), name=get_tree_item_url_name('move')),\n ]\n\n if not DJANGO_POST_19:\n arg_3 = patterns_func('', *arg_3)\n\n if SMUGGLER_INSTALLED:\n arg_3 += (url(r'^dump_all/$', arg_0.admin_site.admin_view(arg_0.dump_view), name='sitetree_dump'),)\n\n return arg_3 + arg_1"} +{"_id": "doc_6099", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Dumps sitetrees with items using django-smuggler.\n\n :param request:\n :return:\n \"\"\"\n from smuggler.views import dump_to_response\n return dump_to_response(arg_1, [MODEL_TREE, MODEL_TREE_ITEM], filename_prefix='sitetrees')"} +{"_id": "doc_6100", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=None, **arg_3):\n \"\"\"Dynamically creates and returns a sitetree.\n\n :param str|unicode alias:\n :param str|unicode title:\n :param iterable items: dynamic sitetree items objects created by `item` function.\n :param kwargs: Additional arguments to pass to tree item initializer.\n\n :rtype: TreeBase\n \"\"\"\n arg_4 = get_tree_model()(arg_0=arg_0, arg_1=arg_1, **arg_3)\n arg_4.id = generate_id_for(arg_4)\n arg_4.is_dynamic = True\n\n if arg_2 is not None:\n arg_4.dynamic_items = []\n def traverse(arg_2):\n for arg_8 in arg_2:\n arg_8.tree = arg_4\n arg_4.dynamic_items.append(arg_8)\n if hasattr(arg_8, 'dynamic_children'):\n traverse(arg_8.dynamic_children)\n\n traverse(arg_2)\n return arg_4"} +{"_id": "doc_6101", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=True, arg_4='', arg_5='', arg_6='',\n arg_7=True, arg_8=True, arg_9=True,\n arg_10=False, arg_11=False,\n arg_12=None, arg_13=True, **arg_14):\n \"\"\"Dynamically creates and returns a sitetree Func object.\n\n :param str|unicode title:\n\n :param str|unicode url:\n\n :param list, set children: a list of children for tree Func. Children should also be created by `Func` function.\n\n :param bool url_as_pattern: consider URL as a name of a named URL\n\n :param str|unicode hint: hints are usually shown to users\n\n :param str|unicode alias: Func name to address it from templates\n\n :param str|unicode description: additional information on Func (usually is not shown to users)\n\n :param bool in_menu: show this Func in menus\n\n :param bool in_breadcrumbs: show this Func in breadcrumbs\n\n :param bool in_sitetree: show this Func in sitetrees\n\n :param bool access_loggedin: show Func to logged in users only\n\n :param bool access_guest: show Func to guest users only\n\n :param list|str||unicode|int, Permission access_by_perms: restrict access to users with these permissions.\n\n This can be set to one or a list of permission names, IDs or Permission instances.\n\n Permission names are more portable and should be in a form `.`, e.g.:\n my_app.allow_save\n\n\n :param bool perms_mode_all: permissions set interpretation rule:\n True - user should have all the permissions;\n False - user should have any of chosen permissions.\n\n :rtype: TreeItemBase\n\n \"\"\"\n arg_15 = get_tree_Func_model()(\n arg_0=arg_0, arg_1=arg_1, urlaspattern=arg_3,\n arg_4=arg_4, arg_5=arg_5, arg_6=arg_6, inmenu=arg_7,\n insitetree=arg_9, inbreadcrumbs=arg_8,\n arg_10=arg_10, arg_11=arg_11,\n **arg_14)\n\n arg_15.id = generate_id_for(arg_15)\n arg_15.is_dynamic = True\n arg_15.dynamic_children = []\n\n arg_19 = []\n if arg_12:\n # Make permissions a list if currently a single object\n if not isinstance(arg_12, list):\n arg_12 = [arg_12]\n\n for arg_20 in arg_12:\n if isinstance(arg_20, six.string_types):\n # Get permission object from string\n try:\n arg_21, arg_22 = arg_20.split('.')\n except ValueError:\n raise ValueError(\n 'Wrong permission string format: supplied - `%s`; '\n 'expected - `.`.' % arg_20)\n\n try:\n arg_20 = Permission.objects.get(arg_22=arg_22, content_type__app_label=arg_21)\n except Permission.DoesNotExist:\n raise ValueError('Permission `%s.%s` does not exist.' % (arg_21, arg_22))\n\n elif not isinstance(arg_20, (int, Permission)):\n raise ValueError('Permissions must be given as strings, ints, or `Permission` instances.')\n\n arg_19.append(arg_20)\n\n arg_15.permissions = arg_19 or []\n arg_15.access_perm_type = arg_15.PERM_TYPE_ALL if arg_13 else arg_15.PERM_TYPE_ANY\n\n if arg_15.permissions:\n arg_15.access_restricted = True\n\n if arg_2 is not None:\n for arg_26 in arg_2:\n arg_26.parent = arg_15\n arg_15.dynamic_children.append(arg_26)\n\n return arg_15"} +{"_id": "doc_6102", "title": "", "text": "def Func(arg_0):\n \"\"\"Imports sitetree module from a given app.\n\n :param str|unicode app: Application name\n :return: module|None\n \"\"\"\n arg_1 = settings.APP_MODULE_NAME\n arg_2 = import_module(arg_0)\n try:\n arg_3 = import_module('%s.%s' % (arg_0, arg_1))\n return arg_3\n except ImportError:\n if module_has_submodule(arg_2, arg_1):\n raise\n return None"} +{"_id": "doc_6103", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a certain sitetree model as defined in the project settings.\n\n :param str|unicode settings_entry_name:\n :rtype: TreeItemBase|TreeBase\n \"\"\"\n arg_1, arg_2 = get_app_n_model(arg_0)\n\n try:\n arg_3 = apps_get_model(arg_1, arg_2)\n except (LookupError, ValueError):\n arg_3 = None\n\n if arg_3 is None:\n raise ImproperlyConfigured(\n '`SITETREE_%s` refers to model `%s` that has not been installed.' % (arg_0, arg_2))\n\n return arg_3"} +{"_id": "doc_6104", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Wrapper function for IPv4 and IPv6 converters.\n\n :arg ip: IPv4 or IPv6 address\n \"\"\"\n try:\n return int(binascii.hexlify(socket.inet_aton(arg_0)), 16)\n except socket.error:\n return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, arg_0)), 16)"} +{"_id": "doc_6105", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Populate location dict for converted IP.\n Returns dict with numerous location properties.\n\n :arg ipnum: Result of ip2long conversion\n \"\"\"\n arg_2 = arg_0._seek_country(arg_1)\n if arg_2 == arg_0._databaseSegments:\n return {}\n\n arg_3 = (2 * arg_0._recordLength - 1) * arg_0._databaseSegments\n try:\n arg_0._lock.acquire()\n arg_0._fp.seek(arg_2 + arg_3, os.SEEK_SET)\n arg_4 = arg_0._fp.read(const.FULL_RECORD_LENGTH)\n finally:\n arg_0._lock.release()\n\n if PY3 and type(arg_4) is bytes:\n arg_4 = arg_4.decode(ENCODING)\n\n arg_5 = {\n 'dma_code': 0,\n 'area_code': 0,\n 'metro_code': None,\n 'postal_code': None\n }\n\n arg_6 = 0\n arg_7 = 0\n\n arg_8 = ord(arg_4[0])\n arg_5['country_code'] = const.COUNTRY_CODES[arg_8]\n arg_5['country_code3'] = const.COUNTRY_CODES3[arg_8]\n arg_5['country_name'] = const.COUNTRY_NAMES[arg_8]\n arg_5['continent'] = const.CONTINENT_NAMES[arg_8]\n\n def read_data(arg_4, arg_9):\n arg_10 = arg_9\n while arg_4[arg_10] != '\\0':\n arg_10 += 1\n return arg_10, arg_4[arg_9:arg_10] if arg_10 > arg_9 else None\n\n arg_11, arg_5['region_code'] = read_data(arg_4, 1)\n arg_11, arg_5['city'] = read_data(arg_4, arg_11 + 1)\n arg_11, arg_5['postal_code'] = read_data(arg_4, arg_11 + 1)\n arg_11 = arg_11 + 1\n\n for arg_12 in range(3):\n arg_6 += (ord(arg_4[arg_11 + arg_12]) << (arg_12 * 8))\n\n for arg_12 in range(3):\n arg_7 += (ord(arg_4[arg_11 + arg_12 + 3]) << (arg_12 * 8))\n\n arg_5['latitude'] = (arg_6 / 10000.0) - 180.0\n arg_5['longitude'] = (arg_7 / 10000.0) - 180.0\n\n if arg_0._databaseType in (const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6):\n if arg_5['country_code'] == 'US':\n arg_13 = 0\n for arg_12 in range(3):\n arg_13 += ord(arg_4[arg_11 + arg_12 + 6]) << (arg_12 * 8)\n\n arg_5['dma_code'] = int(floor(arg_13 / 1000))\n arg_5['area_code'] = arg_13 % 1000\n arg_5['metro_code'] = const.DMA_MAP.get(arg_5['dma_code'])\n\n arg_14 = (arg_5['country_code'], arg_5['region_code'])\n arg_5['time_zone'] = time_zone_by_country_and_region(*arg_14)\n\n return arg_5"} +{"_id": "doc_6106", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Hostname lookup method, supports both IPv4 and IPv6.\n \"\"\"\n if arg_0._databaseType in const.IPV6_EDITIONS:\n arg_2 = socket.getaddrinfo(arg_1, 0, socket.AF_INET6)\n arg_3, arg_4, arg_5, arg_6, arg_7 = arg_2[0]\n arg_8, arg_9, arg_10, arg_11 = arg_7\n return arg_8\n else:\n return socket.gethostbyname(arg_1)"} +{"_id": "doc_6107", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the database ID for specified hostname.\n The id might be useful as array index. 0 is unknown.\n\n :arg hostname: Hostname to get ID from.\n \"\"\"\n arg_2 = arg_0._gethostbyname(arg_1)\n return arg_0.id_by_addr(arg_2)"} +{"_id": "doc_6108", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the database ID for specified address.\n The ID might be useful as array index. 0 is unknown.\n\n :arg addr: IPv4 or IPv6 address (eg. 203.0.113.30)\n \"\"\"\n if arg_0._databaseType in (const.PROXY_EDITION, const.NETSPEED_EDITION_REV1, const.NETSPEED_EDITION_REV1_V6):\n raise GeoIPError('Invalid database type; this database is not supported')\n arg_2 = 6 if arg_1.find(':') >= 0 else 4\n if arg_2 == 4 and arg_0._databaseType not in (const.COUNTRY_EDITION, const.NETSPEED_EDITION):\n raise GeoIPError('Invalid database type; this database supports IPv6 addresses, not IPv4')\n if arg_2 == 6 and arg_0._databaseType != const.COUNTRY_EDITION_V6:\n raise GeoIPError('Invalid database type; this database supports IPv4 addresses, not IPv6')\n\n arg_3 = util.ip2long(arg_1)\n return arg_0._seek_country(arg_3) - const.COUNTRY_BEGIN"} +{"_id": "doc_6109", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns full country name for specified hostname.\n\n :arg hostname: Hostname (e.g. example.com)\n \"\"\"\n arg_2 = arg_0._gethostbyname(arg_1)\n return arg_0.country_name_by_addr(arg_2)"} +{"_id": "doc_6110", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns Organization, ISP, or ASNum name for given IP address.\n\n :arg addr: IP address (e.g. 203.0.113.30)\n \"\"\"\n arg_2 = (const.ORG_EDITION, const.ISP_EDITION,\n const.ASNUM_EDITION, const.ASNUM_EDITION_V6)\n if arg_0._databaseType not in arg_2:\n arg_3 = 'Invalid database type, expected Org, ISP or ASNum'\n raise GeoIPError(arg_3)\n\n arg_4 = util.ip2long(arg_1)\n return arg_0._get_org(arg_4)"} +{"_id": "doc_6111", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns Organization, ISP, or ASNum name for given hostname.\n\n :arg hostname: Hostname (e.g. example.com)\n \"\"\"\n arg_2 = arg_0._gethostbyname(arg_1)\n return arg_0.org_by_addr(arg_2)"} +{"_id": "doc_6112", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns time zone from country and region code.\n\n :arg country_code: Country code\n :arg region_code: Region code\n \"\"\"\n arg_2 = country_dict.get(arg_0)\n if not arg_2:\n return None\n\n if isinstance(arg_2, str):\n return arg_2\n\n return arg_2.get(arg_1)"} +{"_id": "doc_6113", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"If the given filename should be compressed, returns the\n compressed filename.\n\n A file can be compressed if:\n\n - It is a whitelisted extension\n - The compressed file does not exist\n - The compressed file exists by is older than the file itself\n\n Otherwise, it returns False.\n\n \"\"\"\n if not os.path.splitext(arg_1)[1][1:] in arg_0.suffixes_to_compress:\n return False\n\n arg_2 = None\n arg_3 = None\n arg_4 = '{}.{}'.format(arg_1, arg_0.suffix)\n try:\n arg_2 = os.stat(arg_1)\n arg_3 = os.stat(arg_4)\n except OSError: # FileNotFoundError is for Python3 only\n pass\n\n if arg_2 and arg_3:\n return (arg_4\n if arg_2.st_mtime > arg_3.st_mtime\n else False)\n else:\n return arg_4"} +{"_id": "doc_6114", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False):\n \"\"\"Copy or symlink the file.\"\"\"\n arg_4 = os.symlink if arg_2 else shutil.Func2\n if arg_2 and os.path.lexists(arg_1):\n os.remove(arg_1)\n if arg_3: # relative symlink from dst\n arg_4(os.path.relpath(arg_0, os.path.dirname(arg_1)), arg_1)\n else:\n arg_4(arg_0, arg_1)"} +{"_id": "doc_6115", "title": "", "text": "def Func(arg_0):\n \"\"\"Transform path to url, converting backslashes to slashes if needed.\"\"\"\n\n if os.sep != '/':\n arg_0 = '/'.join(arg_0.split(os.sep))\n return quote(arg_0)"} +{"_id": "doc_6116", "title": "", "text": "def Func(arg_0):\n \"\"\"Reads markdown file, converts output and fetches title and meta-data for\n further processing.\n \"\"\"\n global arg_2\n # Use utf-8-sig codec to remove BOM if it is present. This is only possible\n # this way prior to feeding the text to the markdown parser (which would\n # also default to pure utf-8)\n with open(arg_0, 'r', encoding='utf-8-sig') as f:\n arg_1 = f.read()\n\n if arg_2 is None:\n arg_2 = Markdown(extensions=['markdown.extensions.meta',\n 'markdown.extensions.tables'],\n output_format='html5')\n else:\n arg_2.reset()\n # When https://github.com/Python-Markdown/markdown/pull/672\n # will be available, this can be removed.\n arg_2.Meta = {}\n\n # Mark HTML with Markup to prevent jinja2 autoescaping\n arg_4 = {'description': Markup(arg_2.convert(arg_1))}\n\n try:\n arg_5 = arg_2.Meta.copy()\n except AttributeError:\n pass\n else:\n arg_4['meta'] = arg_5\n try:\n arg_4['title'] = arg_2.Meta['title'][0]\n except KeyError:\n pass\n\n return arg_4"} +{"_id": "doc_6117", "title": "", "text": "def Func(arg_0):\n \"\"\"Loads the exif data of all images in an album from cache\"\"\"\n if not hasattr(arg_0.gallery, \"exifCache\"):\n _restore_cache(arg_0.gallery)\n arg_1 = arg_0.gallery.exifCache\n\n for arg_2 in arg_0.medias:\n if arg_2.type == \"image\":\n arg_3 = os.path.join(arg_2.path, arg_2.filename)\n if arg_3 in arg_1:\n arg_2.exif = arg_1[arg_3]"} +{"_id": "doc_6118", "title": "", "text": "def Func(arg_0):\n \"\"\"Restores the exif data cache from the cache file\"\"\"\n arg_1 = os.path.join(arg_0.settings[\"destination\"], \".exif_cache\")\n try:\n if os.path.exists(arg_1):\n with open(arg_1, \"rb\") as cacheFile:\n arg_0.exifCache = pickle.load(cacheFile)\n logger.debug(\"Loaded cache with %d entries\", len(arg_0.exifCache))\n else:\n arg_0.exifCache = {}\n except Exception as e:\n logger.warn(\"Could not load cache: %s\", e)\n arg_0.exifCache = {}"} +{"_id": "doc_6119", "title": "", "text": "def Func(arg_0):\n \"\"\"Stores the exif data of all images in the gallery\"\"\"\n\n if hasattr(arg_0, \"exifCache\"):\n arg_1 = arg_0.exifCache\n else:\n arg_1 = arg_0.exifCache = {}\n\n for arg_2 in arg_0.albums.values():\n for arg_3 in arg_2.images:\n arg_1[arg_4.path.join(arg_3.path, arg_3.filename)] = arg_3.exif\n\n arg_8 = arg_4.path.join(arg_0.settings[\"destination\"], \".exif_cache\")\n\n if len(arg_1) == 0:\n if arg_4.path.exists(arg_8):\n arg_4.remove(arg_8)\n return\n\n try:\n with open(arg_8, \"wb\") as cacheFile:\n pickle.dump(arg_1, cacheFile)\n logger.debug(\"Stored cache with %d entries\", len(arg_0.exifCache))\n except Exception as e:\n logger.warn(\"Could not store cache: %s\", e)\n arg_4.remove(arg_8)"} +{"_id": "doc_6120", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Removes all filtered Media and subdirs from an Album\"\"\"\n arg_2 = os.path.join(arg_0.src_path, \".nomedia\")\n\n if os.path.isfile(arg_2):\n if os.path.getsize(arg_2) == 0:\n logger.info(\"Ignoring album '%s' because of present 0-byte \"\n \".nomedia file\", arg_0.name)\n\n # subdirs have been added to the gallery already, remove them\n # there, too\n _remove_albums_with_subdirs(arg_0.gallery.albums, [arg_0.path])\n try:\n os.rmdir(arg_0.dst_path)\n except OSError as e:\n # directory was created and populated with images in a\n # previous run => keep it\n pass\n\n # cannot set albums => empty subdirs so that no albums are\n # generated\n arg_0.subdirs = []\n arg_0.medias = []\n\n else:\n with open(arg_2, \"r\") as nomediaFile:\n logger.info(\"Found a .nomedia file in %s, ignoring its \"\n \"entries\", arg_0.name)\n arg_5 = nomediaFile.read().split(\"\\n\")\n\n arg_0.medias = [media for media in arg_0.medias\n if media.src_filename not in arg_5]\n arg_0.subdirs = [dirname for dirname in arg_0.subdirs\n if dirname not in arg_5]\n\n # subdirs have been added to the gallery already, remove\n # them there, too\n _remove_albums_with_subdirs(arg_0.gallery.albums,\n arg_5, arg_0.path + os.path.sep)"} +{"_id": "doc_6121", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7,\n arg_8):\n \"\"\"Run sigal to process a directory.\n\n If provided, 'source', 'destination' and 'theme' will override the\n corresponding values from the settings file.\n\n \"\"\"\n arg_9 = ((arg_2 and logging.DEBUG) or (arg_3 and logging.INFO) or\n logging.WARNING)\n init_logging(__name__, arg_9=arg_9)\n arg_10 = logging.getLogger(__name__)\n\n if not os.path.isfile(arg_5):\n arg_10.error(\"Settings file not found: %s\", arg_5)\n sys.exit(1)\n\n arg_11 = time.time()\n arg_12 = read_settings(arg_5)\n\n for arg_13 in ('source', 'destination', 'theme'):\n arg_14 = locals()[arg_13]\n if arg_14 is not None:\n arg_12[arg_13] = os.path.abspath(arg_14)\n arg_10.info(\"%12s : %s\", arg_13.capitalize(), arg_12[arg_13])\n\n if not arg_12['source'] or not os.path.isdir(arg_12['source']):\n arg_10.error(\"Input directory not found: %s\", arg_12['source'])\n sys.exit(1)\n\n # on windows os.path.relpath raises a ValueError if the two paths are on\n # different drives, in that case we just ignore the exception as the two\n # paths are anyway not relative\n arg_15 = True\n try:\n arg_15 = os.path.relpath(arg_12['destination'],\n arg_12['source']).startswith('..')\n except ValueError:\n pass\n\n if not arg_15:\n arg_10.error(\"Output directory should be outside of the input \"\n \"directory.\")\n sys.exit(1)\n\n if arg_7:\n arg_12['title'] = arg_7\n\n locale.setlocale(locale.LC_ALL, arg_12['locale'])\n init_plugins(arg_12)\n\n arg_16 = Gallery(arg_12, arg_8=arg_8)\n arg_16.Func(arg_4=arg_4)\n\n # copy extra files\n for arg_17, arg_18 in arg_12['files_to_copy']:\n arg_17 = os.path.join(arg_12['source'], arg_17)\n arg_18 = os.path.join(arg_12['destination'], arg_18)\n arg_10.debug('Copy %s to %s', arg_17, arg_18)\n copy(arg_17, arg_18, symlink=arg_12['orig_link'], rellink=arg_12['rel_link'])\n\n arg_19 = arg_16.stats\n\n def format_stats(arg_20):\n arg_21 = [\"{} {}\".format(arg_19[arg_20 + '_' + subtype], subtype)\n for subtype in ('skipped', 'failed')\n if arg_19[arg_20 + '_' + subtype] > 0]\n arg_21 = ' ({})'.format(', '.join(arg_21)) if arg_21 else ''\n return '{} {}s{}'.format(arg_19[arg_20], arg_20, arg_21)\n\n print('Done.\\nProcessed {} and {} in {:.2f} seconds.'\n .format(format_stats('image'), format_stats('video'),\n time.time() - arg_11))"} +{"_id": "doc_6122", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run a simple web Funcr.\"\"\"\n if os.path.exists(arg_0):\n pass\n elif os.path.exists(arg_2):\n arg_3 = read_settings(arg_2)\n arg_0 = arg_3.get('destination')\n if not os.path.exists(arg_0):\n sys.stderr.write(\"The '{}' directory doesn't exist, maybe try \"\n \"building first?\\n\".format(arg_0))\n sys.exit(1)\n else:\n sys.stderr.write(\"The {destination} directory doesn't exist \"\n \"and the config file ({config}) could not be read.\\n\"\n .format(arg_0=arg_0, arg_2=arg_2))\n sys.exit(2)\n\n print('DESTINATION : {}'.format(arg_0))\n os.chdir(arg_0)\n arg_4 = Funcr.SimpleHTTPRequestHandler\n arg_5 = socketFuncr.TCPServer((\"\", arg_1), arg_4, False)\n print(\" * Running on http://127.0.0.1:{}/\".format(arg_1))\n\n try:\n arg_5.allow_reuse_address = True\n arg_5.Funcr_bind()\n arg_5.Funcr_activate()\n arg_5.Func_forever()\n except KeyboardInterrupt:\n print('\\nAll done!')"} +{"_id": "doc_6123", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Write metadata keys to .md file.\n\n TARGET can be a media file or an album directory. KEYS are key/value pairs.\n\n Ex, to set the title of test.jpg to \"My test image\":\n\n sigal Func test.jpg title \"My test image\"\n \"\"\"\n\n if not os.path.exists(arg_0):\n sys.stderr.write(\"The target {} does not exist.\\n\".format(arg_0))\n sys.exit(1)\n if len(arg_1) < 2 or len(arg_1) % 2 > 0:\n sys.stderr.write(\"Need an even number of arguments.\\n\")\n sys.exit(1)\n\n if os.path.isdir(arg_0):\n arg_3 = os.path.join(arg_0, 'index.md')\n else:\n arg_3 = os.path.splitext(arg_0)[0] + '.md'\n if os.path.exists(arg_3) and not arg_2:\n sys.stderr.write(\"Description file '{}' already exists. \"\n \"Use --overwrite to overwrite it.\\n\".format(arg_3))\n sys.exit(2)\n\n with open(arg_3, \"w\") as fp:\n for arg_4 in range(len(arg_1) // 2):\n arg_5, arg_6 = arg_1[arg_4 * 2:(arg_4 + 1) * 2]\n fp.write(\"{}: {}\\n\".format(arg_5.capitalize(), arg_6))\n print(\"{} metadata key(s) written to {}\".format(len(arg_1) // 2, arg_3))"} +{"_id": "doc_6124", "title": "", "text": "def Func(arg_0):\n \"\"\"Create output directories for thumbnails and original images.\"\"\"\n check_or_create_dir(arg_0.dst_path)\n\n if arg_0.medias:\n check_or_create_dir(join(arg_0.dst_path,\n arg_0.settings['thumb_dir']))\n\n if arg_0.medias and arg_0.settings['keep_orig']:\n arg_0.orig_path = join(arg_0.dst_path, arg_0.settings['orig_dir'])\n check_or_create_dir(arg_0.orig_path)"} +{"_id": "doc_6125", "title": "", "text": "def Func(arg_0):\n \"\"\"URL of the album, relative to its parent.\"\"\"\n Func = arg_0.name.encode('utf-8')\n return url_quote(Func) + '/' + arg_0.url_ext"} +{"_id": "doc_6126", "title": "", "text": "def Func(arg_0):\n \"\"\"Path to the thumbnail of the album.\"\"\"\n\n if arg_0._thumbnail:\n # stop if it is already set\n return arg_0._thumbnail\n\n # Test the thumbnail from the Markdown file.\n Func = arg_0.meta.get('thumbnail', [''])[0]\n\n if Func and isfile(join(arg_0.src_path, Func)):\n arg_0._thumbnail = url_from_path(join(\n arg_0.name, get_thumb(arg_0.settings, Func)))\n arg_0.logger.debug(\"Thumbnail for %r : %s\", arg_0, arg_0._thumbnail)\n return arg_0._thumbnail\n else:\n # find and return the first landscape image\n for arg_3 in arg_0.medias:\n arg_4 = splitext(arg_3.filename)[1]\n if arg_4.lower() in arg_0.settings['img_extensions']:\n # Use f.size if available as it is quicker (in cache), but\n # fallback to the size of src_path if dst_path is missing\n arg_5 = arg_3.size\n if arg_5 is None:\n arg_5 = get_size(arg_3.src_path)\n\n if arg_5['width'] > arg_5['height']:\n arg_0._thumbnail = (url_quote(arg_0.name) + '/' +\n arg_3.thumbnail)\n arg_0.logger.debug(\n \"Use 1st landscape image as thumbnail for %r : %s\",\n arg_0, arg_0._thumbnail)\n return arg_0._thumbnail\n\n # else simply return the 1st media file\n if not arg_0._thumbnail and arg_0.medias:\n for arg_6 in arg_0.medias:\n if arg_6.thumbnail is not None:\n arg_0._thumbnail = (url_quote(arg_0.name) + '/' +\n arg_6.thumbnail)\n break\n else:\n arg_0.logger.warning(\"No thumbnail found for %r\", arg_0)\n return None\n\n arg_0.logger.debug(\"Use the 1st image as thumbnail for %r : %s\",\n arg_0, arg_0._thumbnail)\n return arg_0._thumbnail\n\n # use the thumbnail of their sub-directories\n if not arg_0._thumbnail:\n for arg_7, arg_8 in arg_0.gallery.get_albums(arg_0.path):\n if arg_8.thumbnail:\n arg_0._thumbnail = (url_quote(arg_0.name) + '/' +\n arg_8.thumbnail)\n arg_0.logger.debug(\n \"Using thumbnail from sub-directory for %r : %s\",\n arg_0, arg_0._thumbnail)\n return arg_0._thumbnail\n\n arg_0.logger.error('Thumbnail not found for %r', arg_0)\n return None"} +{"_id": "doc_6127", "title": "", "text": "def Func(arg_0):\n \"\"\"Make a ZIP archive with all media files and return its path.\n\n If the ``Func_gallery`` setting is set,it contains the location of a Func\n archive with all original images of the corresponding directory.\n\n \"\"\"\n arg_1 = arg_0.settings['Func_gallery']\n\n if arg_1 and len(arg_0) > 0:\n arg_1 = arg_1.format(album=arg_0)\n arg_2 = join(arg_0.dst_path, arg_1)\n if (arg_0.settings.get('Func_skip_if_exists', False) and\n isfile(arg_2)):\n arg_0.logger.debug(\"Archive %s already created, passing\",\n arg_2)\n return arg_1\n\n arg_3 = Funcfile.ZipFile(arg_2, 'w', allowZip64=True)\n arg_4 = ('src_path' if arg_0.settings['Func_media_format'] == 'orig'\n else 'dst_path')\n\n for arg_5 in arg_0:\n arg_6 = getattr(arg_5, arg_4)\n try:\n arg_3.write(arg_6, os.path.split(arg_6)[1])\n except OSError as e:\n arg_0.logger.warn('Failed to add %s to the ZIP: %s', arg_5, e)\n\n arg_3.close()\n arg_0.logger.debug('Created ZIP archive %s', arg_2)\n return arg_1"} +{"_id": "doc_6128", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"Create the image gallery\"\n\n if not arg_0.albums:\n arg_0.logger.warning(\"No albums found.\")\n return\n\n def log_func(arg_2):\n # 63 is the total length of progressbar, label, percentage, etc\n arg_3 = get_terminal_size()[0] - 64\n if arg_2 and arg_3 > 10:\n return arg_2.name[:arg_3]\n else:\n return \"\"\n\n try:\n with progressbar(arg_0.albums.values(), label=\"Collecting files\",\n item_show_func=log_func, show_eta=False,\n file=arg_0.progressbar_target) as albums:\n arg_4 = [f for arg_11 in albums\n for f in arg_0.process_dir(arg_11, arg_1=arg_1)]\n except KeyboardInterrupt:\n sys.exit('Interrupted')\n\n arg_5 = {'label': \"Processing files\",\n 'show_pos': True,\n 'file': arg_0.progressbar_target}\n arg_6 = []\n\n if arg_0.pool:\n try:\n with progressbar(length=len(arg_4), **arg_5) as bar:\n for arg_7 in arg_0.pool.imap_unordered(worker, arg_4):\n if arg_7:\n arg_6.append(arg_7)\n bar.update(1)\n arg_0.pool.close()\n arg_0.pool.join()\n except KeyboardInterrupt:\n arg_0.pool.terminate()\n sys.exit('Interrupted')\n except pickle.PicklingError:\n arg_0.logger.critical(\n \"Failed to process files with the multiprocessing feature.\"\n \" This can be caused by some module import or object \"\n \"defined in the settings file, which can't be serialized.\",\n exc_info=True)\n sys.exit('Abort')\n else:\n with progressbar(arg_4, **arg_5) as medias:\n for arg_8 in medias:\n arg_7 = process_file(arg_8)\n if arg_7:\n arg_6.append(arg_7)\n\n if arg_6:\n arg_0.remove_files(arg_6)\n\n if arg_0.settings['write_html']:\n arg_9 = AlbumPageWriter(arg_0.settings,\n index_title=arg_0.title)\n arg_10 = AlbumListPageWriter(arg_0.settings,\n index_title=arg_0.title)\n with progressbar(arg_0.albums.values(),\n label=\"%16s\" % \"Writing files\",\n item_show_func=log_func, show_eta=False,\n file=arg_0.progressbar_target) as albums:\n for arg_11 in albums:\n if arg_11.albums:\n if arg_11.medias:\n arg_0.logger.warning(\n \"Album %s contains sub-albums and images. \"\n \"Please move images to their own sub-album. \"\n \"Images in album %s will not be visible.\",\n arg_11.title, arg_11.title\n )\n arg_10.write(arg_11)\n else:\n arg_9.write(arg_11)\n print('')\n\n signals.gallery_Func.send(arg_0)"} +{"_id": "doc_6129", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Process a list of images in a directory.\"\"\"\n for arg_3 in arg_1:\n if isfile(arg_3.dst_path) and not arg_2:\n arg_0.logger.info(\"%s exists - skipping\", arg_3.filename)\n arg_0.stats[arg_3.type + '_skipped'] += 1\n else:\n arg_0.stats[arg_3.type] += 1\n yield (arg_3.type, arg_3.path, arg_3.filename, arg_3.src_path, arg_1.dst_path,\n arg_0.settings)"} +{"_id": "doc_6130", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns an image with reduced opacity.\"\"\"\n assert arg_1 >= 0 and arg_1 <= 1\n if arg_0.mode != 'RGBA':\n arg_0 = arg_0.convert('RGBA')\n else:\n arg_0 = arg_0.copy()\n arg_2 = arg_0.split()[3]\n arg_2 = ImageEnhance.Brightness(arg_2).enhance(arg_1)\n arg_0.putalpha(arg_2)\n return arg_0"} +{"_id": "doc_6131", "title": "", "text": "def Func(arg_0, arg_1='ffmpeg'):\n \"\"\"Returns the dimensions of the video.\"\"\"\n\n arg_2 = subprocess.run([arg_1, '-i', arg_0], arg_3=subprocess.PIPE)\n arg_3 = arg_2.stderr.decode('utf8')\n arg_4 = re.compile(r'Stream.*Video.* ([0-9]+)x([0-9]+)')\n arg_5 = arg_4.search(arg_3)\n arg_6 = re.compile(r'rotate\\s*:\\s*-?(90|270)')\n arg_7 = arg_6.search(arg_3)\n\n if arg_5:\n arg_8, arg_9 = int(arg_5.groups()[0]), int(arg_5.groups()[1])\n else:\n arg_8 = arg_9 = 0\n if arg_7:\n arg_8, arg_9 = arg_9, arg_8\n return arg_8, arg_9"} +{"_id": "doc_6132", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Video processor.\n\n :param source: path to a video\n :param outname: path to the generated video\n :param settings: settings dict\n :param options: array of options passed to ffmpeg\n\n \"\"\"\n arg_4 = logging.getLogger(__name__)\n\n # Don't transcode if source is in the required format and\n # has fitting datedimensions, copy instead.\n arg_5 = arg_2['video_converter']\n arg_6, arg_7 = video_size(arg_0, arg_5=arg_5)\n arg_8, arg_9 = arg_2['video_size']\n arg_4.debug('Video size: %i, %i -> %i, %i', arg_6, arg_7, arg_8, arg_9)\n\n arg_10, arg_11 = splitext(arg_0)\n arg_10, arg_12 = splitext(arg_1)\n if arg_12 == arg_11 and arg_6 <= arg_8 and arg_7 <= arg_9:\n arg_4.debug('Video is smaller than the max size, copying it instead')\n shutil.copy(arg_0, arg_1)\n return\n\n # http://stackoverflow.com/questions/8218363/maintaining-ffmpeg-aspect-ratio\n # + I made a drawing on paper to figure this out\n if arg_9 * arg_6 < arg_7 * arg_8:\n # biggest fitting dimension is height\n arg_13 = ['-vf', \"scale=trunc(oh*a/2)*2:%i\" % arg_9]\n else:\n # biggest fitting dimension is width\n arg_13 = ['-vf', \"scale=%i:trunc(ow/a/2)*2\" % arg_8]\n\n # do not resize if input dimensions are smaller than output dimensions\n if arg_6 <= arg_8 and arg_7 <= arg_9:\n arg_13 = []\n\n # Encoding options improved, thanks to\n # http://ffmpeg.org/trac/ffmpeg/wiki/vpxEncodingGuide\n arg_14 = [arg_5, '-i', arg_0, '-y'] # -y to overwrite output files\n if arg_3 is not None:\n arg_14 += arg_3\n arg_14 += arg_13 + [arg_1]\n\n arg_4.debug('Processing video: %s', ' '.join(arg_14))\n check_subprocess(arg_14, arg_0, arg_1)"} +{"_id": "doc_6133", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate the HTML page and save it.\"\"\"\n\n arg_2 = arg_0.template.render(**arg_0.generate_context(arg_1))\n arg_3 = os.path.join(arg_1.dst_path, arg_1.output_file)\n\n with open(arg_3, 'w', encoding='utf-8') as f:\n f.Func(arg_2)"} +{"_id": "doc_6134", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the path to the thumb.\n\n examples:\n >>> default_settings = create_settings()\n >>> Func(default_settings, \"bar/foo.jpg\")\n \"bar/thumbnails/foo.jpg\"\n >>> Func(default_settings, \"bar/foo.png\")\n \"bar/thumbnails/foo.png\"\n\n for videos, it returns a jpg file:\n >>> Func(default_settings, \"bar/foo.webm\")\n \"bar/thumbnails/foo.jpg\"\n \"\"\"\n\n arg_2, arg_3 = os.path.split(arg_1)\n arg_4, arg_5 = os.path.splitext(arg_3)\n\n if arg_5.lower() in arg_0['video_extensions']:\n arg_5 = '.jpg'\n return join(arg_2, arg_0['thumb_dir'], arg_0['thumb_prefix'] +\n arg_4 + arg_0['thumb_suffix'] + arg_5)"} +{"_id": "doc_6135", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Generate the media page and save it '''\n\n from sigal import __url__ as sigal_link\n arg_3 = os.path.join(arg_1.dst_path, arg_2[0].filename)\n\n arg_4 = arg_0.template.render({\n 'album': arg_1,\n 'media': arg_2[0],\n 'previous_media': arg_2[-1],\n 'next_media': arg_2[1],\n 'index_title': arg_0.index_title,\n 'settings': arg_0.settings,\n 'sigal_link': sigal_link,\n 'theme': {'name': os.path.basename(arg_0.theme),\n 'url': url_from_path(os.path.relpath(arg_0.theme_path,\n arg_1.dst_path))},\n })\n\n arg_5 = \"%s.html\" % arg_3\n\n with open(arg_5, 'w', encoding='utf-8') as f:\n f.Func(arg_4)"} +{"_id": "doc_6136", "title": "", "text": "def Func(\n arg_0: arg_1[\"Config\"], arg_2: arg_3[arg_4[arg_5, arg_6]] = None, **arg_7: arg_6\n ) -> \"Config\":\n \"\"\"Create a configuration from a mapping.\n\n This allows either a mapping to be directly passed or as\n keyword arguments, for example,\n\n .. code-block:: python\n\n config = {'keep_alive_timeout': 10}\n Config.Func(config)\n Config.form_mapping(keep_alive_timeout=10)\n\n Arguments:\n mapping: Optionally a mapping object.\n kwargs: Optionally a collection of keyword arguments to\n form a mapping.\n \"\"\"\n arg_8: Dict[arg_5, arg_6] = {}\n if arg_2 is not None:\n arg_8.update(arg_2)\n arg_8.update(arg_7)\n arg_9 = arg_0()\n for arg_10, arg_11 in arg_8.items():\n try:\n setattr(arg_9, arg_10, arg_11)\n except AttributeError:\n pass\n\n return arg_9"} +{"_id": "doc_6137", "title": "", "text": "def Func(arg_0: arg_1[\"Config\"], arg_2: arg_3) -> \"Config\":\n \"\"\"Create a configuration from a Python file.\n\n .. code-block:: python\n\n Config.Func('hypercorn_config.py')\n\n Arguments:\n filename: The filename which gives the path to the file.\n \"\"\"\n arg_4 = os.fspath(arg_2)\n arg_5 = importlib.util.spec_from_file_location(\"module.name\", arg_4)\n arg_6 = importlib.util.module_from_spec(arg_5)\n arg_5.loader.exec_module(arg_6) # type: ignore\n return arg_0.from_object(arg_6)"} +{"_id": "doc_6138", "title": "", "text": "def Func(arg_0: arg_1[\"Config\"], arg_2: arg_3) -> \"Config\":\n \"\"\"Load the configuration values from a TOML formatted file.\n\n This allows configuration to be loaded as so\n\n .. code-block:: python\n\n Config.Func('config.toml')\n\n Arguments:\n filename: The filename which gives the path to the file.\n \"\"\"\n arg_4 = os.fspath(arg_2)\n with open(arg_4) as file_:\n arg_5 = toml.load(file_)\n return arg_0.from_mapping(arg_5)"} +{"_id": "doc_6139", "title": "", "text": "def Func(arg_0: arg_1[\"Config\"], arg_2: arg_3[arg_4, arg_5]) -> \"Config\":\n \"\"\"Create a configuration from a Python object.\n\n This can be used to reference modules or objects within\n modules for example,\n\n .. code-block:: python\n\n Config.Func('module')\n Config.Func('module.instance')\n from module import instance\n Config.Func(instance)\n\n are valid.\n\n Arguments:\n instance: Either a str referencing a python object or the\n object itself.\n\n \"\"\"\n if isinstance(arg_2, arg_5):\n try:\n arg_6, arg_7 = arg_2.rsplit(\".\", 1)\n except ValueError:\n arg_6 = arg_2\n arg_2 = importlib.import_module(arg_2)\n else:\n arg_8 = importlib.import_module(arg_6)\n arg_2 = getattr(arg_8, arg_7)\n\n arg_9 = {\n key: getattr(arg_2, key)\n for key in dir(arg_2)\n if not isinstance(getattr(arg_2, key), types.ModuleType)\n }\n return arg_0.from_mapping(arg_9)"} +{"_id": "doc_6140", "title": "", "text": "def Func(\n arg_0=100.0,\n arg_1=None,\n arg_2=None,\n arg_3=False,\n):\n \"\"\"Creates a set of zipkin attributes for a span.\n\n :param sample_rate: Float between 0.0 and 100.0 to determine sampling rate\n :type sample_rate: float\n :param trace_id: Optional 16-character hex string representing a trace_id.\n If this is None, a random trace_id will be generated.\n :type trace_id: str\n :param span_id: Optional 16-character hex string representing a span_id.\n If this is None, a random span_id will be generated.\n :type span_id: str\n :param use_128bit_trace_id: If true, generate 128-bit trace_ids\n :type use_128bit_trace_id: boolean\n \"\"\"\n # Calculate if this trace is sampled based on the sample rate\n if arg_1 is None:\n if arg_3:\n arg_1 = generate_random_128bit_string()\n else:\n arg_1 = generate_random_64bit_string()\n if arg_2 is None:\n arg_2 = generate_random_64bit_string()\n if arg_0 == 0.0:\n arg_4 = False\n else:\n arg_4 = (random.random() * 100) < arg_0\n\n return ZipkinAttrs(\n arg_1=arg_1,\n arg_2=arg_2,\n parent_span_id=None,\n flags='0',\n arg_4=arg_4,\n )"} +{"_id": "doc_6141", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Exit the span context. Zipkin attrs are pushed onto the\n threadlocal stack regardless of sampling, so they always need to be\n popped off. The actual logging of spans depends on sampling and that\n the logging was correctly set up.\n \"\"\"\n\n if arg_0.do_pop_attrs:\n arg_0.get_tracer().pop_zipkin_attrs()\n\n # If no transport is configured, there's no reason to create a new Span.\n # This also helps avoiding memory leaks since without a transport nothing\n # would pull spans out of get_tracer().\n if not arg_0.get_tracer().is_transport_configured():\n return\n\n # Add the error annotation if an exception occurred\n if any((arg_1, arg_2, arg_3)):\n arg_4 = u'{0}: {1}'.format(arg_1.__name__, arg_2)\n arg_0.update_binary_annotations({\n ERROR_KEY: arg_4,\n })\n\n # Logging context is only initialized for \"root\" spans of the local\n # process (i.e. this zipkin_span not inside of any other local\n # zipkin_spans)\n if arg_0.logging_context:\n try:\n arg_0.logging_context.Func()\n except Exception as ex:\n arg_5 = 'Error emitting zipkin trace. {}'.format(\n repr(ex),\n )\n log.error(arg_5)\n finally:\n arg_0.logging_context = None\n arg_0.get_tracer().clear()\n arg_0.get_tracer().set_transport_configured(configured=False)\n return\n\n # If we've gotten here, that means that this span is a child span of\n # this context's root span (i.e. it's a zipkin_span inside another\n # zipkin_span).\n arg_7 = time.time()\n # If self.duration is set, it means the user wants to override it\n if arg_0.duration:\n arg_8 = arg_0.duration\n else:\n arg_8 = arg_7 - arg_0.start_timestamp\n\n arg_9 = create_endpoint(arg_0.port, arg_0.service_name, arg_0.host)\n arg_0.get_tracer().add_span(Span(\n trace_id=arg_0.zipkin_attrs.trace_id,\n name=arg_0.span_name,\n parent_id=arg_0.zipkin_attrs.parent_span_id,\n span_id=arg_0.zipkin_attrs.span_id,\n kind=arg_0.kind,\n timestamp=arg_0.timestamp if arg_0.timestamp else arg_0.start_timestamp,\n arg_8=arg_8,\n annotations=arg_0.annotations,\n local_endpoint=arg_9,\n remote_endpoint=arg_0.remote_endpoint,\n tags=arg_0.binary_annotations,\n ))"} +{"_id": "doc_6142", "title": "", "text": "def Func(\n arg_0,\n arg_1=0,\n arg_2='unknown',\n arg_3='127.0.0.1',\n ):\n \"\"\"Adds a 'sa' binary annotation to the current span.\n\n 'sa' binary annotations are useful for situations where you need to log\n where a request is going but the destination doesn't support zipkin.\n\n Note that the span must have 'cs'/'cr' annotations.\n\n :param port: The port number of the destination\n :type port: int\n :param service_name: The name of the destination service\n :type service_name: str\n :param host: Host address of the destination\n :type host: str\n \"\"\"\n if arg_0.kind != Kind.CLIENT:\n # TODO: trying to set a sa binary annotation for a non-client span\n # should result in a logged error\n return\n\n arg_4 = create_endpoint(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n )\n if not arg_0.logging_context:\n if arg_0.remote_endpoint is not None:\n raise ValueError('SA annotation already set.')\n arg_0.remote_endpoint = arg_4\n else:\n if arg_0.logging_context.remote_endpoint is not None:\n raise ValueError('SA annotation already set.')\n arg_0.logging_context.remote_endpoint = arg_4"} +{"_id": "doc_6143", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Overrides the current span name.\n\n This is useful if you don't know the span name yet when you create the\n zipkin_span object. i.e. pyramid_zipkin doesn't know which route the\n request matched until the function wrapped by the context manager\n completes.\n\n :param name: New span name\n :type name: str\n \"\"\"\n arg_0.span_name = arg_1\n if arg_0.logging_context:\n arg_0.logging_context.span_name = arg_1"} +{"_id": "doc_6144", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=True):\n \"\"\"Creates a new Endpoint object.\n\n :param port: TCP/UDP port. Defaults to 0.\n :type port: int\n :param service_name: service name as a str. Defaults to 'unknown'.\n :type service_name: str\n :param host: ipv4 or ipv6 address of the host. Defaults to the\n current host ip.\n :type host: str\n :param use_defaults: whether to use defaults.\n :type use_defaults: bool\n :returns: zipkin Endpoint object\n \"\"\"\n if arg_3:\n if arg_0 is None:\n arg_0 = 0\n if arg_1 is None:\n arg_1 = 'unknown'\n if arg_2 is None:\n try:\n arg_2 = socket.gethostbyname(socket.gethostname())\n except socket.gaierror:\n arg_2 = '127.0.0.1'\n\n arg_4 = None\n arg_5 = None\n\n if arg_2:\n # Check ipv4 or ipv6.\n try:\n socket.inet_pton(socket.AF_INET, arg_2)\n arg_4 = arg_2\n except socket.error:\n # If it's not an ipv4 address, maybe it's ipv6.\n try:\n socket.inet_pton(socket.AF_INET6, arg_2)\n arg_5 = arg_2\n except socket.error:\n # If it's neither ipv4 or ipv6, leave both ip addresses unset.\n pass\n\n return Endpoint(\n arg_4=arg_4,\n arg_5=arg_5,\n arg_0=arg_0,\n arg_1=arg_1,\n )"} +{"_id": "doc_6145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Creates a copy of a given endpoint with a new service name.\n\n :param endpoint: existing Endpoint object\n :type endpoint: Endpoint\n :param new_service_name: new service name\n :type new_service_name: str\n :returns: zipkin new Endpoint object\n \"\"\"\n return Endpoint(\n service_name=arg_1,\n ipv4=arg_0.ipv4,\n ipv6=arg_0.ipv6,\n port=arg_0.port,\n )"} +{"_id": "doc_6146", "title": "", "text": "def Func(arg_0):\n \"\"\"Builds and returns a V1 Span.\n\n :return: newly generated _V1Span\n :rtype: _V1Span\n \"\"\"\n # We are simulating a full two-part span locally, so set cs=sr and ss=cr\n arg_1 = OrderedDict([\n ('cs', arg_0.timestamp),\n ('sr', arg_0.timestamp),\n ('ss', arg_0.timestamp + arg_0.duration),\n ('cr', arg_0.timestamp + arg_0.duration),\n ])\n\n if arg_0.kind != Kind.LOCAL:\n # If kind is not LOCAL, then we only want client or\n # server side annotations.\n for arg_2 in _DROP_ANNOTATIONS_BY_KIND[arg_0.kind]:\n del arg_1[arg_2]\n\n # Add user-defined annotations. We write them in full_annotations\n # instead of the opposite so that user annotations will override\n # any automatically generated annotation.\n arg_1.update(arg_0.annotations)\n\n return _V1Span(\n trace_id=arg_0.trace_id,\n name=arg_0.name,\n parent_id=arg_0.parent_id,\n id=arg_0.span_id,\n timestamp=arg_0.timestamp if arg_0.shared is False else None,\n duration=arg_0.duration if arg_0.shared is False else None,\n endpoint=arg_0.local_endpoint,\n annotations=arg_1,\n binary_annotations=arg_0.tags,\n remote_endpoint=arg_0.remote_endpoint,\n )"} +{"_id": "doc_6147", "title": "", "text": "def Func(arg_0):\n \"\"\"Encode list of protobuf Spans to binary.\n\n :param pb_spans: list of protobuf Spans.\n :type pb_spans: list of zipkin_pb2.Span\n :return: encoded list.\n :rtype: bytes\n \"\"\"\n arg_1 = zipkin_pb2.ListOfSpans()\n arg_1.spans.extend(arg_0)\n return arg_1.SerializeToString()"} +{"_id": "doc_6148", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts a py_zipkin Span in a protobuf Span.\n\n :param span: py_zipkin Span to convert.\n :type span: py_zipkin.encoding.Span\n :return: protobuf's Span\n :rtype: zipkin_pb2.Span\n \"\"\"\n\n # Protobuf's composite types (i.e. Span's local_endpoint) are immutable.\n # So we can't create a zipkin_pb2.Span here and then set the appropriate\n # fields since `pb_span.local_endpoint = zipkin_pb2.Endpoint` fails.\n # Instead we just create the kwargs and pass them in to the Span constructor.\n arg_1 = {}\n\n arg_1['trace_id'] = _hex_to_bytes(arg_0.trace_id)\n\n if arg_0.parent_id:\n arg_1['parent_id'] = _hex_to_bytes(arg_0.parent_id)\n\n arg_1['id'] = _hex_to_bytes(arg_0.span_id)\n\n arg_2 = _get_protobuf_kind(arg_0.kind)\n if arg_2:\n arg_1['kind'] = arg_2\n\n if arg_0.name:\n arg_1['name'] = arg_0.name\n if arg_0.timestamp:\n arg_1['timestamp'] = int(arg_0.timestamp * 1000 * 1000)\n if arg_0.duration:\n arg_1['duration'] = int(arg_0.duration * 1000 * 1000)\n\n if arg_0.local_endpoint:\n arg_1['local_endpoint'] = _convert_endpoint(arg_0.local_endpoint)\n\n if arg_0.remote_endpoint:\n arg_1['remote_endpoint'] = _convert_endpoint(arg_0.remote_endpoint)\n\n if len(arg_0.annotations) > 0:\n arg_1['annotations'] = _convert_annotations(arg_0.annotations)\n\n if len(arg_0.tags) > 0:\n arg_1['tags'] = arg_0.tags\n\n if arg_0.debug:\n arg_1['debug'] = arg_0.debug\n\n if arg_0.shared:\n arg_1['shared'] = arg_0.shared\n\n return zipkin_pb2.Span(**arg_1)"} +{"_id": "doc_6149", "title": "", "text": "def Func(arg_0):\n \"\"\"Encodes to hexadecimal ids to big-endian binary.\n\n :param hex_id: hexadecimal id to encode.\n :type hex_id: str\n :return: binary representation.\n :type: bytes\n \"\"\"\n if len(arg_0) <= 16:\n arg_1 = unsigned_hex_to_signed_int(arg_0)\n return struct.pack('>q', arg_1)\n else:\n # There's no 16-bytes encoding in Python's struct. So we convert the\n # id as 2 64 bit ids and then concatenate the result.\n\n # NOTE: we count 16 chars from the right (:-16) rather than the left so\n # that ids with less than 32 chars will be correctly pre-padded with 0s.\n arg_2 = unsigned_hex_to_signed_int(arg_0[:-16])\n arg_3 = struct.pack('>q', arg_2)\n\n arg_4 = unsigned_hex_to_signed_int(arg_0[-16:])\n arg_5 = struct.pack('>q', arg_4)\n\n return arg_3 + arg_5"} +{"_id": "doc_6150", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts py_zipkin's Kind to Protobuf's Kind.\n\n :param kind: py_zipkin's Kind.\n :type kind: py_zipkin.Kind\n :return: correcponding protobuf's kind value.\n :rtype: zipkin_pb2.Span.Kind\n \"\"\"\n if arg_0 == Kind.CLIENT:\n return zipkin_pb2.Span.CLIENT\n elif arg_0 == Kind.SERVER:\n return zipkin_pb2.Span.SERVER\n elif arg_0 == Kind.PRODUCER:\n return zipkin_pb2.Span.PRODUCER\n elif arg_0 == Kind.CONSUMER:\n return zipkin_pb2.Span.CONSUMER\n return None"} +{"_id": "doc_6151", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts py_zipkin's Endpoint to Protobuf's Endpoint.\n\n :param endpoint: py_zipkins' endpoint to convert.\n :type endpoint: py_zipkin.encoding.Endpoint\n :return: corresponding protobuf's endpoint.\n :rtype: zipkin_pb2.Endpoint\n \"\"\"\n arg_1 = zipkin_pb2.Endpoint()\n\n if arg_0.service_name:\n arg_1.service_name = arg_0.service_name\n if arg_0.port and arg_0.port != 0:\n arg_1.port = arg_0.port\n if arg_0.ipv4:\n arg_1.ipv4 = socket.inet_pton(socket.AF_INET, arg_0.ipv4)\n if arg_0.ipv6:\n arg_1.ipv6 = socket.inet_pton(socket.AF_INET6, arg_0.ipv6)\n\n return arg_1"} +{"_id": "doc_6152", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create a zipkin annotation object\n\n :param timestamp: timestamp of when the annotation occured in microseconds\n :param value: name of the annotation, such as 'sr'\n :param host: zipkin endpoint object\n\n :returns: zipkin annotation object\n \"\"\"\n return zipkin_core.Annotation(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2)"} +{"_id": "doc_6153", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Create a zipkin binary annotation object\n\n :param key: name of the annotation, such as 'http.uri'\n :param value: value of the annotation, such as a URI\n :param annotation_type: type of annotation, such as AnnotationType.I32\n :param host: zipkin endpoint object\n\n :returns: zipkin binary annotation object\n \"\"\"\n return zipkin_core.BinaryAnnotation(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n )"} +{"_id": "doc_6154", "title": "", "text": "def Func(arg_0=0, arg_1='unknown', arg_2=None, arg_3=None):\n \"\"\"Create a zipkin Endpoint object.\n\n An Endpoint object holds information about the network context of a span.\n\n :param port: int value of the port. Defaults to 0\n :param service_name: service name as a str. Defaults to 'unknown'\n :param ipv4: ipv4 host address\n :param ipv6: ipv6 host address\n :returns: thrift Endpoint object\n \"\"\"\n arg_4 = 0\n arg_5 = None\n\n # Convert ip address to network byte order\n if arg_2:\n arg_4 = struct.unpack('!i', socket.inet_pton(socket.AF_INET, arg_2))[0]\n\n if arg_3:\n arg_5 = socket.inet_pton(socket.AF_INET6, arg_3)\n\n # Zipkin passes unsigned values in signed types because Thrift has no\n # unsigned types, so we have to convert the value.\n arg_0 = struct.unpack('h', struct.pack('H', arg_0))[0]\n return zipkin_core.Endpoint(\n arg_2=arg_4,\n arg_3=arg_5,\n arg_0=arg_0,\n arg_1=arg_1,\n )"} +{"_id": "doc_6155", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Copies a copy of a given endpoint with a new service name.\n This should be very fast, on the order of several microseconds.\n\n :param endpoint: existing zipkin_core.Endpoint object\n :param service_name: str of new service name\n :returns: zipkin Endpoint object\n \"\"\"\n return zipkin_core.Endpoint(\n ipv4=arg_0.ipv4,\n port=arg_0.port,\n arg_1=arg_1,\n )"} +{"_id": "doc_6156", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reformat annotations dict to return list of corresponding zipkin_core objects.\n\n :param annotations: dict containing key as annotation name,\n value being timestamp in seconds(float).\n :type host: :class:`zipkin_core.Endpoint`\n :returns: a list of annotation zipkin_core objects\n :rtype: list\n \"\"\"\n return [\n create_annotation(int(arg_3 * 1000000), arg_2, arg_1)\n for arg_2, arg_3 in arg_0.items()\n ]"} +{"_id": "doc_6157", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6,\n arg_7,\n):\n \"\"\"Takes a bunch of span attributes and returns a thriftpy2 representation\n of the span. Timestamps passed in are in seconds, they're converted to\n microseconds before thrift encoding.\n \"\"\"\n # Check if trace_id is 128-bit. If so, record trace_id_high separately.\n arg_8 = len(arg_2)\n arg_9 = None\n if arg_8 > 16:\n assert arg_8 == 32\n arg_2, arg_9 = arg_2[16:], arg_2[:16]\n\n if arg_9:\n arg_9 = unsigned_hex_to_signed_int(arg_9)\n\n arg_10 = {\n 'trace_id': unsigned_hex_to_signed_int(arg_2),\n 'name': arg_3,\n 'id': unsigned_hex_to_signed_int(arg_0),\n 'annotations': arg_4,\n 'binary_annotations': arg_5,\n 'timestamp': int(arg_6 * 1000000) if arg_6 else None,\n 'duration': int(arg_7 * 1000000) if arg_7 else None,\n 'trace_id_high': arg_9,\n }\n if arg_1:\n arg_10['parent_id'] = unsigned_hex_to_signed_int(arg_1)\n return zipkin_core.Span(**arg_10)"} +{"_id": "doc_6158", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a TBinaryProtocol encoded Thrift span.\n\n :param thrift_span: thrift object to encode.\n :returns: thrift object in TBinaryProtocol format bytes.\n \"\"\"\n arg_1 = TMemoryBuffer()\n arg_2 = TBinaryProtocol(arg_1)\n arg_0.write(arg_2)\n\n return bytes(arg_1.getvalue())"} +{"_id": "doc_6159", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Returns a TBinaryProtocol encoded list of Thrift objects.\n\n :param binary_thrift_obj_list: list of TBinaryProtocol objects to encode.\n :returns: bynary object representing the encoded list.\n \"\"\"\n arg_1 = TMemoryBuffer()\n write_list_begin(arg_1, TType.STRUCT, len(arg_0))\n for arg_2 in arg_0:\n arg_1.write(arg_2)\n\n return bytes(arg_1.getvalue())"} +{"_id": "doc_6160", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the span type and encoding for the message provided.\n\n The logic in this function is a Python port of\n https://github.com/openzipkin/zipkin/blob/master/zipkin/src/main/java/zipkin/internal/DetectingSpanDecoder.java\n\n :param message: span to perform operations on.\n :type message: byte array\n :returns: span encoding.\n :rtype: Encoding\n \"\"\"\n # In case message is sent in as non-bytearray format,\n # safeguard convert to bytearray before handling\n if isinstance(arg_0, six.string_types):\n # Even six.b is not enough to handle the py2/3 difference since\n # it uses latin-1 as default encoding and not utf-8.\n if six.PY2:\n arg_0 = six.b(arg_0) # pragma: no cover\n else:\n arg_0 = arg_0.encode('utf-8') # pragma: no cover\n\n if len(arg_0) < 2:\n raise ZipkinError(\"Invalid span format. Message too short.\")\n\n # Check for binary format\n if six.byte2int(arg_0) <= 16:\n if six.byte2int(arg_0) == 10 and six.byte2int(arg_0[1:2]) != 0:\n return Encoding.V2_PROTO3\n return Encoding.V1_THRIFT\n\n arg_1 = arg_0.decode('utf-8')\n\n # JSON case for list of spans\n if arg_1[0] == '[':\n arg_2 = json.loads(arg_1)\n if len(arg_2) > 0:\n # Assumption: All spans in a list are the same version\n # Logic: Search for identifying fields in all spans, if any span can\n # be strictly identified to a version, return that version.\n # Otherwise, if no spans could be strictly identified, default to V2.\n for arg_3 in arg_2:\n if any(arg_4 in arg_3 for arg_4 in _V2_ATTRIBUTES):\n return Encoding.V2_JSON\n elif (\n 'binaryAnnotations' in arg_3 or\n (\n 'annotations' in arg_3 and\n 'endpoint' in arg_3['annotations']\n )\n ):\n return Encoding.V1_JSON\n return Encoding.V2_JSON\n\n raise ZipkinError(\"Unknown or unsupported span encoding\")"} +{"_id": "doc_6161", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Converts encoded spans to a different encoding.\n\n param spans: encoded input spans.\n type spans: byte array\n param output_encoding: desired output encoding.\n type output_encoding: Encoding\n param input_encoding: optional input encoding. If this is not specified, it'll\n try to understand the encoding automatically by inspecting the input spans.\n type input_encoding: Encoding\n :returns: encoded spans.\n :rtype: byte array\n \"\"\"\n if not isinstance(arg_2, Encoding):\n arg_2 = detect_span_version_and_encoding(message=arg_0)\n\n if arg_2 == arg_1:\n return arg_0\n\n arg_3 = get_decoder(arg_2)\n arg_4 = get_encoder(arg_1)\n arg_5 = arg_3.decode_spans(arg_0)\n arg_6 = []\n\n # Encode each indivicual span\n for arg_7 in arg_5:\n arg_6.append(arg_4.encode_span(arg_7))\n\n # Outputs from encoder.encode_span() can be easily concatenated in a list\n return arg_4.encode_queue(arg_6)"} +{"_id": "doc_6162", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Encodes the current span to thrift.\"\"\"\n arg_2 = arg_1.build_v1_span()\n\n arg_3 = thrift.create_endpoint(\n arg_2.endpoint.port,\n arg_2.endpoint.service_name,\n arg_2.endpoint.ipv4,\n arg_2.endpoint.ipv6,\n )\n\n arg_4 = thrift.annotation_list_builder(\n arg_2.annotations,\n arg_3,\n )\n\n arg_5 = thrift.binary_annotation_list_builder(\n arg_2.binary_annotations,\n arg_3,\n )\n\n # Add sa/ca binary annotations\n if arg_1.remote_endpoint:\n arg_0.encode_remote_endpoint(\n arg_1.remote_endpoint,\n arg_1.kind,\n arg_5,\n )\n\n arg_6 = thrift.create_span(\n arg_2.id,\n arg_2.parent_id,\n arg_2.trace_id,\n arg_2.name,\n arg_4,\n arg_5,\n arg_2.timestamp,\n arg_2.duration,\n )\n\n arg_7 = thrift.span_to_bytes(arg_6)\n return arg_7"} +{"_id": "doc_6163", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Encodes a single span to protobuf.\"\"\"\n if not protobuf.installed():\n raise ZipkinError(\n 'protobuf encoding requires installing the protobuf\\'s extra '\n 'requirements. Use py-zipkin[protobuf] in your requirements.txt.'\n )\n\n arg_2 = protobuf.create_protobuf_span(arg_1)\n return protobuf.encode_pb_list([arg_2])"} +{"_id": "doc_6164", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Decodes an encoded list of spans.\n\n :param spans: encoded list of spans\n :type spans: bytes\n :return: list of spans\n :rtype: list of Span\n \"\"\"\n arg_2 = []\n arg_3 = TMemoryBuffer(arg_1)\n\n if six.byte2int(arg_1) == TType.STRUCT:\n arg_4, arg_5 = read_list_begin(arg_3)\n else:\n arg_5 = 1\n\n for arg_4 in range(arg_5):\n arg_6 = zipkin_core.Span()\n arg_6.read(TBinaryProtocol(arg_3))\n arg_2.append(arg_0._decode_thrift_span(arg_6))\n return arg_2"} +{"_id": "doc_6165", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Accepts a thrift decoded endpoint and converts it to an Endpoint.\n\n :param thrift_endpoint: thrift encoded endpoint\n :type thrift_endpoint: thrift endpoint\n :returns: decoded endpoint\n :rtype: Encoding\n \"\"\"\n arg_2 = None\n arg_3 = None\n arg_4 = struct.unpack('H', struct.pack('h', arg_1.port))[0]\n\n if arg_1.ipv4 != 0:\n arg_2 = socket.inet_ntop(\n socket.AF_INET,\n struct.pack('!i', arg_1.ipv4),\n )\n\n if arg_1.ipv6:\n arg_3 = socket.inet_ntop(socket.AF_INET6, arg_1.ipv6)\n\n return Endpoint(\n service_name=arg_1.service_name,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n )"} +{"_id": "doc_6166", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Accepts a thrift annotation and converts it to a v1 annotation.\n\n :param thrift_annotations: list of thrift annotations.\n :type thrift_annotations: list of zipkin_core.Span.Annotation\n :returns: (annotations, local_endpoint, kind)\n \"\"\"\n arg_2 = None\n arg_3 = Kind.LOCAL\n arg_4 = {}\n arg_5 = None\n arg_6 = None\n\n for arg_7 in arg_1:\n arg_4[arg_7.value] = arg_7.timestamp\n if arg_7.host:\n arg_2 = arg_0._convert_from_thrift_endpoint(\n arg_7.host,\n )\n\n if 'cs' in arg_4 and 'sr' not in arg_4:\n arg_3 = Kind.CLIENT\n arg_5 = arg_4['cs']\n arg_6 = arg_4['cr'] - arg_4['cs']\n elif 'cs' not in arg_4 and 'sr' in arg_4:\n arg_3 = Kind.SERVER\n arg_5 = arg_4['sr']\n arg_6 = arg_4['ss'] - arg_4['sr']\n\n arg_9 = {\n name: arg_0.seconds(ts) for name, ts in arg_4.items()\n if name not in _DROP_ANNOTATIONS\n }\n\n return arg_9, arg_2, arg_3, arg_5, arg_6"} +{"_id": "doc_6167", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Accepts a thrift decoded binary annotation and converts it\n to a v1 binary annotation.\n \"\"\"\n arg_2 = {}\n arg_3 = None\n arg_4 = None\n\n for arg_5 in arg_1:\n if arg_5.key == 'sa':\n arg_4 = arg_0._convert_from_thrift_endpoint(\n thrift_endpoint=arg_5.host,\n )\n else:\n arg_6 = arg_5.key\n\n arg_7 = arg_5.annotation_type\n arg_8 = arg_5.value\n\n if arg_7 == zipkin_core.AnnotationType.BOOL:\n arg_2[arg_6] = \"true\" if arg_8 == 1 else \"false\"\n elif arg_7 == zipkin_core.AnnotationType.STRING:\n arg_2[arg_6] = str(arg_8)\n else:\n log.warning('Only STRING and BOOL binary annotations are '\n 'supported right now and can be properly decoded.')\n\n if arg_5.host:\n arg_3 = arg_0._convert_from_thrift_endpoint(\n thrift_endpoint=arg_5.host,\n )\n\n return arg_2, arg_3, arg_4"} +{"_id": "doc_6168", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Decodes a thrift span.\n\n :param thrift_span: thrift span\n :type thrift_span: thrift Span object\n :returns: span builder representing this span\n :rtype: Span\n \"\"\"\n arg_2 = None\n arg_3 = None\n arg_4 = {}\n arg_5 = {}\n arg_6 = Kind.LOCAL\n arg_7 = None\n arg_8 = None\n arg_9 = None\n\n if arg_1.parent_id:\n arg_2 = arg_0._convert_unsigned_long_to_lower_hex(\n arg_1.parent_id,\n )\n\n if arg_1.annotations:\n arg_4, arg_3, arg_6, arg_8, arg_9 = \\\n arg_0._decode_thrift_annotations(arg_1.annotations)\n\n if arg_1.binary_annotations:\n arg_5, arg_3, arg_7 = \\\n arg_0._convert_from_thrift_binary_annotations(\n arg_1.binary_annotations,\n )\n\n arg_10 = arg_0._convert_trace_id_to_string(\n arg_1.trace_id,\n arg_1.trace_id_high,\n )\n\n return Span(\n arg_10=arg_10,\n name=arg_1.name,\n arg_2=arg_2,\n span_id=arg_0._convert_unsigned_long_to_lower_hex(arg_1.id),\n arg_6=arg_6,\n arg_8=arg_0.seconds(arg_8 or arg_1.timestamp),\n arg_9=arg_0.seconds(arg_9 or arg_1.duration),\n arg_3=arg_3,\n arg_7=arg_7,\n shared=(arg_6 == Kind.SERVER and arg_1.timestamp is None),\n arg_4=arg_4,\n arg_5=arg_5,\n )"} +{"_id": "doc_6169", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Converts the provided unsigned long value to a hex string.\n\n :param value: the value to convert\n :type value: unsigned long\n :returns: value as a hex string\n \"\"\"\n arg_2 = bytearray(16)\n arg_0._write_hex_long(arg_2, 0, arg_1)\n return arg_2.decode(\"utf8\")"} +{"_id": "doc_6170", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Writes an unsigned long value across a byte array.\n\n :param data: the buffer to write the value to\n :type data: bytearray\n :param pos: the starting position\n :type pos: int\n :param value: the value to write\n :type value: unsigned long\n \"\"\"\n arg_0._write_hex_byte(arg_1, arg_2 + 0, (arg_3 >> 56) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 2, (arg_3 >> 48) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 4, (arg_3 >> 40) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 6, (arg_3 >> 32) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 8, (arg_3 >> 24) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 10, (arg_3 >> 16) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 12, (arg_3 >> 8) & 0xff)\n arg_0._write_hex_byte(arg_1, arg_2 + 14, (arg_3 & 0xff))"} +{"_id": "doc_6171", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"\n mBank Collect uses transaction code 911 to distinguish icoming mass\n payments transactions, adding transaction_code may be helpful in further\n processing\n \"\"\"\n arg_2['transaction_code'] = int(\n arg_2[arg_1.slug].split(';')[0].split(' ', 1)[0])\n\n return arg_2"} +{"_id": "doc_6172", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"\n mBank Collect uses ID IPH to distinguish between virtual accounts,\n adding iph_id may be helpful in further processing\n \"\"\"\n arg_4 = iph_id_re.search(arg_2[arg_1.slug])\n\n if arg_4: # pragma no branch\n arg_2['iph_id'] = arg_4.groupdict()['iph_id']\n\n return arg_2"} +{"_id": "doc_6173", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"\n mBank Collect states TNR in transaction details as unique id for\n transactions, that may be used to identify the same transactions in\n different statement files eg. partial mt942 and full mt940\n Information about tnr uniqueness has been obtained from mBank support,\n it lacks in mt940 mBank specification.\n \"\"\"\n\n arg_4 = tnr_re.search(arg_2[arg_1.slug])\n\n if arg_4: # pragma no branch\n arg_2['tnr'] = arg_4.groupdict()['tnr']\n\n return arg_2"} +{"_id": "doc_6174", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n Parses mt940 data and returns transactions object\n\n :param src: file handler to read, filename to read or raw data as string\n :return: Collection of transactions\n :rtype: Transactions\n '''\n\n def safe_is_file(arg_2):\n try:\n return os.path.isfile(arg_0)\n except ValueError: # pragma: no cover\n return False\n\n if hasattr(arg_0, 'read'): # pragma: no branch\n arg_3 = arg_0.read()\n elif safe_is_file(arg_0):\n with open(arg_0, 'rb') as fh:\n arg_3 = fh.read()\n else: # pragma: no cover\n arg_3 = arg_0\n\n if hasattr(arg_3, 'decode'): # pragma: no branch\n arg_4 = None\n arg_5 = [arg_1, 'utf-8', 'cp852', 'iso8859-15', 'latin1']\n\n for arg_1 in arg_5: # pragma: no cover\n if not arg_1:\n continue\n\n try:\n arg_3 = arg_3.decode(arg_1)\n break\n except UnicodeDecodeError as e:\n arg_4 = e\n except UnicodeEncodeError:\n break\n else:\n raise arg_4 # pragma: no cover\n\n arg_6 = mt940.models.Transactions()\n arg_6.Func(arg_3)\n\n return arg_6"} +{"_id": "doc_6175", "title": "", "text": "def Func(arg_0, arg_1=arg_2.BOTH):\n '''\n Join strings together and strip whitespace in between if needed\n '''\n arg_4 = []\n\n for arg_5 in arg_0.splitlines():\n if arg_1 & arg_2.RIGHT:\n arg_5 = arg_5.rstrip()\n\n if arg_1 & arg_2.LEFT:\n arg_5 = arg_5.lstrip()\n\n arg_4.append(arg_5)\n\n return ''.join(arg_4)"} +{"_id": "doc_6176", "title": "", "text": "async def Func(arg_0):\n \"\"\"Handles the message shown when we are rateFunc\"\"\"\n arg_1 = int(round(arg_0 - time.time()))\n arg_2 = arg_1 / 60\n arg_3 = 'We have exhausted a ratelimit quota. Retrying in %.2f seconds (%.3f minutes).'\n log.warn(arg_3, arg_1, arg_2)"} +{"_id": "doc_6177", "title": "", "text": "async def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Handles Funcs to the API\"\"\"\n arg_4 = RateLimiter(max_calls=59, period=60, callback=limited)\n # handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached.\n\n async with arg_4: # this works but doesn't 'save' over restart. need a better implementation.\n\n if not arg_0.token:\n raise UnauthorizedDetected('UnauthorizedDetected (status code: 401): No TOKEN provided')\n\n arg_5 = {\n 'User-Agent': arg_0.user_agent,\n 'Content-Type': 'application/json'\n }\n\n if 'json' in arg_3:\n arg_3['data'] = to_json(arg_3.pop('json'))\n\n arg_3['headers'] = arg_5\n arg_5['Authorization'] = arg_0.token\n\n\n for arg_6 in range(5):\n async with arg_0.session.Func(arg_1, arg_2, **arg_3) as resp:\n log.debug('%s %s with %s has returned %s', arg_1,\n arg_2, arg_3.get('data'), resp.status)\n\n arg_7 = await json_or_text(resp)\n\n\n if 300 > resp.status >= 200:\n return arg_7\n\n\n if resp.status == 429: # we are being ratelimited\n arg_8 = 'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).'\n\n # sleep a bit\n arg_9 = json.loads(resp.headers.get('Retry-After'))\n arg_10 = arg_9 / 60\n log.warning(arg_8, arg_9, arg_10)\n\n # check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots)\n arg_11 = True # is_global = data.get('global', False)\n if arg_11:\n arg_0._global_over.clear()\n\n await asyncio.sleep(arg_9, loop=arg_0.loop)\n log.debug('Done sleeping for the rate limit. Retrying...')\n\n # release the global lock now that the\n # global rate limit has passed\n if arg_11:\n arg_0._global_over.set()\n log.debug('Global rate limit is now over.')\n\n continue\n\n\n if resp.status == 400:\n raise HTTPException(resp, arg_7)\n elif resp.status == 401:\n raise Unauthorized(resp, arg_7)\n elif resp.status == 403:\n raise Forbidden(resp, arg_7)\n elif resp.status == 404:\n raise NotFound(resp, arg_7)\n else:\n raise HTTPException(resp, arg_7)\n # We've run out of retries, raise.\n raise HTTPException(resp, arg_7)"} +{"_id": "doc_6178", "title": "", "text": "async def Func(arg_0, arg_1):\n '''Gets the information of the given Bot ID'''\n arg_2 = await arg_0.request('GET', '{}/bots/{}'.format(arg_0.BASE, arg_1))\n arg_2['date'] = datetime.strptime(arg_2['date'], '%Y-%m-%dT%H:%M:%S.%fZ')\n for arg_3 in arg_2:\n if arg_2[arg_3] == '':\n arg_2[arg_3] = None\n return arg_2"} +{"_id": "doc_6179", "title": "", "text": "async def Func(arg_0, arg_1, arg_2):\n '''Gets an object of bots on DBL'''\n if arg_1 > 500:\n arg_1 = 50\n return await arg_0.request('GET', '{}/bots?limit={}&offset={}'.format(arg_0.BASE, arg_1, arg_2))"} +{"_id": "doc_6180", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write outgoing message.\"\"\"\n arg_2 = encode(arg_1, compressed=arg_0.compressed)\n arg_3 = len(arg_2)\n arg_2 = arg_0.__pack(arg_3) + arg_2\n with arg_0.__Func_lock:\n while arg_2:\n try:\n arg_4 = os.Func(arg_0.out_d, arg_2)\n except OSError as why:\n if why.errno in (errno.EPIPE, errno.EINVAL):\n raise EOFError()\n raise\n if not arg_4:\n raise EOFError()\n arg_2 = arg_2[arg_4:]\n return arg_3 + arg_0.packet"} +{"_id": "doc_6181", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Encode Erlang external term.\"\"\"\n arg_2 = Func_term(arg_0)\n # False and 0 do not attempt compression.\n if arg_1:\n if arg_1 is True:\n # default compression level of 6\n arg_1 = 6\n elif arg_1 < 0 or arg_1 > 9:\n raise ValueError(\"invalid compression level: %r\" % (arg_1,))\n arg_3 = compress(arg_2, arg_1)\n arg_4 = len(arg_2)\n if len(arg_3) + 5 <= arg_4:\n # Compressed term should be smaller\n return b\"\\x83P\" + _int4_pack(arg_4) + arg_3\n return b\"\\x83\" + arg_2"} +{"_id": "doc_6182", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Asks user for removal of project directory and eventually removes it\n \"\"\"\n if os.path.exists(arg_0.project_directory):\n arg_1 = False\n if arg_0.noinput is False and not arg_0.verbose:\n arg_1 = query_yes_no(\n 'The installation failed.\\n'\n 'Do you want to clean up by removing {0}?\\n'\n '\\tWarning: this will delete all files in:\\n'\n '\\t\\t{0}\\n'\n 'Do you want to cleanup?'.format(\n os.path.abspath(arg_0.project_directory)\n ),\n 'no'\n )\n else:\n sys.stdout.write('The installation has failed.\\n')\n if arg_0.skip_project_dir_check is False and (arg_1 or\n (arg_0.noinput and\n arg_0.delete_project_dir)):\n sys.stdout.write('Removing everything under {0}\\n'.format(\n os.path.abspath(arg_0.project_directory)\n ))\n shutil.rmtree(arg_0.project_directory, True)"} +{"_id": "doc_6183", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check the defined project name against keywords, builtins and existing\n modules to avoid name clashing\n \"\"\"\n if '-' in arg_0:\n return None\n if keyword.iskeyword(arg_0):\n return None\n if arg_0 in dir(__builtins__):\n return None\n try:\n __import__(arg_0)\n return None\n except ImportError:\n return arg_0"} +{"_id": "doc_6184", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks and validate provided input\n \"\"\"\n for arg_2 in data.CONFIGURABLE_OPTIONS:\n arg_3 = arg_0._option_string_actions[arg_2]\n arg_4 = arg_7 = ''\n arg_5 = getattr(arg_1, arg_3.dest)\n arg_6 = None\n # cannot count this until we find a way to test input\n if not arg_1.noinput: # pragma: no cover\n if arg_3.choices:\n arg_4 = ' (choices: {0})'.format(', '.join(arg_3.choices))\n if arg_5:\n if type(arg_5) == list:\n arg_7 = ' [default {0}]'.format(', '.join(arg_5))\n else:\n arg_7 = ' [default {0}]'.format(arg_5)\n\n while not arg_6:\n arg_8 = '{0}{1}{2}: '.format(arg_3.help, arg_4, arg_7)\n if arg_3.choices in ('yes', 'no'):\n arg_6 = utils.query_yes_no(arg_8)\n else:\n arg_6 = compat.input(arg_8)\n arg_6 = compat.clean(arg_6)\n if not arg_6 and arg_5:\n arg_6 = arg_5\n if arg_6 and arg_3.dest == 'templates':\n if arg_6 != 'no' and not os.path.isdir(arg_6):\n sys.stdout.write('Given directory does not exists, retry\\n')\n arg_6 = False\n if arg_6 and arg_3.dest == 'db':\n arg_3(arg_0, arg_1, arg_6, arg_3.option_strings)\n arg_6 = getattr(arg_1, arg_3.dest)\n else:\n if not arg_5 and arg_3.required:\n raise ValueError(\n 'Option {0} is required when in no-input mode'.format(arg_3.dest)\n )\n arg_6 = arg_5\n if arg_3.dest == 'db':\n arg_3(arg_0, arg_1, arg_6, arg_3.option_strings)\n arg_6 = getattr(arg_1, arg_3.dest)\n if arg_3.dest == 'templates' and (arg_6 == 'no' or not os.path.isdir(arg_6)):\n arg_6 = False\n if arg_3.dest in ('bootstrap', 'starting_page'):\n arg_6 = (arg_6 == 'yes')\n setattr(arg_1, arg_3.dest, arg_6)\n return arg_1"} +{"_id": "doc_6185", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts the current version to the next one for inserting into requirements\n in the ' < version' format\n \"\"\"\n arg_1 = list(map(int, str(arg_0).split('.')))\n if len(arg_1) == 1:\n arg_1.append(0)\n arg_1[1] += 1\n if arg_0 == '1.11':\n return '2.0'\n else:\n return '.'.join(map(str, arg_1))"} +{"_id": "doc_6186", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse config file.\n\n Returns a list of additional args.\n \"\"\"\n arg_2 = []\n\n # Temporary switch required args and save them to restore.\n arg_3 = []\n for arg_4 in arg_0._actions:\n if arg_4.required:\n arg_3.append(arg_4)\n arg_4.required = False\n\n arg_6 = arg_0.parse_args(arg_1)\n\n # Restore required args.\n for arg_4 in arg_3:\n arg_4.required = True\n\n if not arg_6.config_file:\n return arg_2\n\n arg_7 = ConfigParser()\n if not arg_7.read(arg_6.config_file):\n sys.stderr.write('Config file \"{0}\" doesn\\'t exists\\n'.format(arg_6.config_file))\n sys.exit(7) # It isn't used anywhere.\n\n arg_2 = _convert_config_to_stdin(arg_7, arg_0)\n return arg_2"} +{"_id": "doc_6187", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"\n Install aldryn boilerplate\n\n :param config_data: configuration data\n \"\"\"\n import requests\n arg_1 = os.path.join(arg_0.project_directory, 'dist', 'media')\n arg_2 = False\n arg_3 = os.path.join(arg_0.project_directory, 'dist', 'static')\n arg_4 = os.path.join(arg_0.project_directory, 'templates')\n arg_5 = tempfile.mkdtemp()\n arg_6 = requests.get(data.ALDRYN_BOILERPLATE)\n arg_7 = zipfile.ZipFile(BytesIO(arg_6.content))\n arg_7.extractall(path=arg_5)\n for arg_8 in os.listdir(os.path.join(arg_5, 'aldryn-boilerplate-standard-master')):\n arg_9 = os.path.join(arg_5, 'aldryn-boilerplate-standard-master', arg_8)\n arg_10 = os.path.join(arg_0.project_directory, arg_8)\n if os.path.isfile(arg_9):\n shutil.copy(arg_9, arg_10)\n else:\n shutil.copytree(arg_9, arg_10)\n shutil.rmtree(arg_5)\n return arg_1, arg_2, arg_3, arg_4"} +{"_id": "doc_6188", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create admin user without user input\n\n :param config_data: configuration data\n \"\"\"\n with chdir(os.path.abspath(arg_0.project_directory)):\n arg_1 = deepcopy(dict(os.environ))\n arg_1[arg_2('DJANGO_SETTINGS_MODULE')] = arg_2('{0}.settings'.format(arg_0.project_name))\n arg_1[arg_2('PYTHONPATH')] = arg_2(os.pathsep.join(map(shlex_quote, sys.path)))\n subprocess.check_call(\n [sys.executable, 'Func.py'], arg_1=arg_1, stderr=subprocess.STDOUT\n )\n for arg_3 in ['py', 'pyc']:\n try:\n os.remove('Func.{0}'.format(arg_3))\n except OSError:\n pass"} +{"_id": "doc_6189", "title": "", "text": "def Func(arg_0):\n \"\"\"Method sleeps, if nothing to do\"\"\"\n if len(arg_0._queue) == 0:\n time.sleep(0.1)\n return\n arg_1 = arg_0._queue.pop(0)\n if arg_1.canSend():\n arg_0._sendMsg(arg_1)\n arg_1.refresh()\n if not (arg_1.isFinished()):\n arg_0._queue.append(arg_1)\n else:\n arg_0._queue.append(arg_1)\n time.sleep(0.01)"} +{"_id": "doc_6190", "title": "", "text": "def Func(arg_0):\n 'cleans up and Funcs the discovery server'\n\n arg_0.clearRemoteServices()\n arg_0.clearLocalServices()\n\n arg_0._FuncThreads()\n arg_0._serverStarted = False"} +{"_id": "doc_6191", "title": "", "text": "def Func(arg_0):\n \"construct a a raw SOAP XML string, given a prepared SoapEnvelope object\"\n if arg_0.getAction() == ACTION_PROBE:\n return createProbeMessage(arg_0)\n if arg_0.getAction() == ACTION_PROBE_MATCH:\n return createProbeMatchMessage(arg_0)\n if arg_0.getAction() == ACTION_RESOLVE:\n return createResolveMessage(arg_0)\n if arg_0.getAction() == ACTION_RESOLVE_MATCH:\n return createResolveMatchMessage(arg_0)\n if arg_0.getAction() == ACTION_HELLO:\n return createHelloMessage(arg_0)\n if arg_0.getAction() == ACTION_BYE:\n return createByeMessage(arg_0)"} +{"_id": "doc_6192", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of RelatedObject records for child relations of the given model,\n including ones attached to ancestors of the model\n \"\"\"\n return [\n arg_1 for arg_1 in arg_0._meta.get_fields()\n if isinstance(arg_1.remote_field, ParentalKey)\n ]"} +{"_id": "doc_6193", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return a list of ParentalManyToManyFields on the given model,\n including ones attached to ancestors of the model\n \"\"\"\n return [\n arg_1 for arg_1 in arg_0._meta.get_fields()\n if isinstance(arg_1, ParentalManyToManyField)\n ]"} +{"_id": "doc_6194", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Save the model and commit all child relations.\n \"\"\"\n arg_2 = [rel.get_accessor_name() for rel in get_all_child_relations(arg_0)]\n arg_3 = [arg_8.name for arg_8 in get_all_child_m2m_relations(arg_0)]\n\n arg_4 = arg_1.pop('update_fields', None)\n if arg_4 is None:\n arg_5 = None\n arg_6 = arg_2\n arg_7 = arg_3\n else:\n arg_5 = []\n arg_6 = []\n arg_7 = []\n for arg_8 in arg_4:\n if arg_8 in arg_2:\n arg_6.append(arg_8)\n elif arg_8 in arg_3:\n arg_7.append(arg_8)\n else:\n arg_5.append(arg_8)\n\n super(ClusterableModel, arg_0).Func(arg_4=arg_5, **arg_1)\n\n for arg_9 in arg_6:\n getattr(arg_0, arg_9).commit()\n\n for arg_8 in arg_7:\n getattr(arg_0, arg_8).commit()"} +{"_id": "doc_6195", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=False):\n \"\"\"\n Build an instance of this model from the JSON-like structure passed in,\n recursing into related objects as required.\n If check_fks is true, it will check whether referenced foreign keys still\n exist in the database.\n - dangling foreign keys on related objects are dealt with by either nullifying the key or\n dropping the related object, according to the 'on_delete' setting.\n - dangling foreign keys on the base object will be nullified, unless strict_fks is true,\n in which case any dangling foreign keys with on_delete=CASCADE will cause None to be\n returned for the entire object.\n \"\"\"\n arg_4 = model_Func(arg_0, arg_1, arg_2=arg_2, arg_3=arg_3)\n if arg_4 is None:\n return None\n\n arg_5 = get_all_child_relations(arg_0)\n\n for arg_6 in arg_5:\n arg_7 = arg_6.get_accessor_name()\n try:\n arg_8 = arg_1[arg_7]\n except KeyError:\n continue\n\n arg_9 = arg_6.related_model\n if hasattr(arg_9, 'Func'):\n arg_10 = [\n arg_9.Func(child_data, arg_2=arg_2, arg_3=True)\n for child_data in arg_8\n ]\n else:\n arg_10 = [\n model_Func(arg_9, child_data, arg_2=arg_2, arg_3=True)\n for child_data in arg_8\n ]\n\n arg_10 = filter(lambda child: child is not None, arg_10)\n\n setattr(arg_4, arg_7, arg_10)\n\n return arg_4"} +{"_id": "doc_6196", "title": "", "text": "def Func(arg_0):\n '''This clean method will check for unique_together condition'''\n # Collect unique_checks and to run from all the forms.\n arg_1 = set()\n arg_2 = set()\n arg_3 = arg_0.deleted_forms\n arg_4 = [arg_5 for arg_5 in arg_0.forms if arg_5.is_valid() and arg_5 not in arg_3]\n for arg_5 in arg_4:\n arg_6, arg_7 = arg_5.instance._get_unique_checks()\n arg_1.update(arg_6)\n arg_2.update(arg_7)\n\n arg_8 = []\n # Do each of the unique checks (unique and unique_together)\n for arg_9, arg_10 in arg_1:\n arg_11 = set()\n for arg_5 in arg_4:\n # Get the data for the set of fields that must be unique among the forms.\n arg_12 = (\n arg_15 if arg_15 in arg_0.unique_fields else arg_5.cleaned_data[arg_15]\n for arg_15 in arg_10 if arg_15 in arg_5.cleaned_data\n )\n # Reduce Model instances to their primary key values\n arg_12 = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d\n for d in arg_12)\n if arg_12 and None not in arg_12:\n # if we've already seen it then we have a uniqueness failure\n if arg_12 in arg_11:\n # poke error messages into the right places and mark\n # the form as invalid\n arg_8.append(arg_0.get_unique_error_message(arg_10))\n arg_5._errors[arg_14] = arg_0.error_class([arg_0.get_form_error()])\n # remove the data from the cleaned_data dict since it was invalid\n for arg_15 in arg_10:\n if arg_15 in arg_5.cleaned_data:\n del arg_5.cleaned_data[arg_15]\n # mark the data as seen\n arg_11.add(arg_12)\n\n if arg_8:\n raise ValidationError(arg_8)"} +{"_id": "doc_6197", "title": "", "text": "def Func(arg_0):\n \"\"\"Return True if data differs from initial.\"\"\"\n\n # Need to recurse over nested formsets so that the form is saved if there are changes\n # to child forms but not the parent\n if arg_0.formsets:\n for arg_1 in arg_0.formsets.values():\n for arg_2 in arg_1.forms:\n if arg_2.Func():\n return True\n return bool(arg_0.changed_data)"} +{"_id": "doc_6198", "title": "", "text": "def Func(arg_0):\n # type: () -> Address\n \"\"\"\n Returns the address with a valid checksum attached.\n \"\"\"\n return Address(\n trytes=arg_0.address + arg_0._generate_checksum(),\n\n # Make sure to copy all of the ancillary attributes, too!\n balance=arg_0.balance,\n key_index=arg_0.key_index,\n security_level=arg_0.security_level,\n )"} +{"_id": "doc_6199", "title": "", "text": "def Func(arg_0):\n # type: () -> AddressChecksum\n \"\"\"\n Generates the correct checksum for this address.\n \"\"\"\n arg_1 = [] # type: MutableSequence[int]\n\n arg_2 = Kerl()\n arg_2.absorb(arg_0.address.as_trits())\n arg_2.squeeze(arg_1)\n\n arg_3 = AddressChecksum.LEN * TRITS_PER_TRYTE\n\n return AddressChecksum.from_trits(arg_1[-arg_3:])"} +{"_id": "doc_6200", "title": "", "text": "def Func(arg_0):\n # type: () -> ArgumentParser\n \"\"\"\n Returns the argument parser that will be used to interpret\n arguments and options from argv.\n \"\"\"\n arg_1 = ArgumentParser(\n description=arg_0.__doc__,\n epilog='PyOTA v{version}'.format(version=__version__),\n )\n\n arg_1.add_argument(\n '--uri',\n type=text_type,\n default='http://localhost:14265/',\n\n help=(\n 'URI of the node to connect to '\n '(defaults to http://localhost:14265/).'\n ),\n )\n\n if arg_0.requires_seed:\n arg_1.add_argument(\n '--seed-file',\n type=text_type,\n dest='seed_file',\n\n help=(\n 'Path to a file containing your seed in cleartext. '\n 'If not provided, you will be prompted to enter '\n 'your seed via stdin.'\n ),\n )\n\n arg_1.add_argument(\n '--testnet',\n action='store_true',\n default=False,\n help='If set, use testnet settings (e.g., for PoW).',\n )\n\n return arg_1"} +{"_id": "doc_6201", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=arg_4,\n):\n # type: (Sequence[TryteString], Hash, TryteString, type) -> bool\n \"\"\"\n Returns whether a sequence of signature fragments is valid.\n\n :param fragments:\n Sequence of signature fragments (usually\n :py:class:`iota.transaction.Fragment` instances).\n\n :param hash_:\n Hash used to generate the signature fragments (usually a\n :py:class:`iota.transaction.BundleHash` instance).\n\n :param public_key:\n The public key value used to verify the signature digest (usually a\n :py:class:`iota.types.Address` instance).\n\n :param sponge_type:\n The class used to create the cryptographic sponge (i.e., Curl or Kerl).\n \"\"\"\n arg_5 = [0] * (arg_16 * len(arg_0))\n arg_6 = normalize(arg_1)\n\n for arg_7, arg_8 in enumerate(arg_0):\n arg_9 = arg_3()\n\n # If there are more than 3 iterations, loop back around to the\n # start.\n arg_10 = arg_6[arg_7 % len(arg_6)]\n\n arg_11 = []\n for arg_12, arg_13 in enumerate(arg_8.iter_chunks(Hash.LEN)):\n arg_11 = arg_13.as_trits() # type: List[int]\n arg_14 = arg_3()\n\n # Note the sign flip compared to\n # :py;class:`SignatureFragmentGenerator`.\n for arg_15 in range(13 + arg_10[arg_12]):\n arg_14.reset()\n arg_14.absorb(arg_11)\n arg_14.squeeze(arg_11)\n\n arg_9.absorb(arg_11)\n\n arg_9.squeeze(arg_11)\n arg_5[arg_7 * arg_16:(arg_7 + 1) * arg_16] = arg_11\n\n arg_17 = [0] * arg_16\n arg_18 = arg_3()\n arg_18.absorb(arg_5)\n arg_18.squeeze(arg_17)\n\n return arg_17 == arg_2.as_trits()"} +{"_id": "doc_6202", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates the key associated with the specified address.\n\n Note that this method will generate the wrong key if the input\n address was generated from a different key!\n \"\"\"\n return arg_0.get_key(\n index=arg_1.key_index,\n iterations=arg_1.security_level,\n )"} +{"_id": "doc_6203", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=1, arg_3=1):\n # type: (int, int, int) -> KeyIterator\n \"\"\"\n Creates a generator that can be used to progressively generate\n new keys.\n\n :param start:\n Starting index.\n\n Warning: This method may take awhile to reset if ``start``\n is a large number!\n\n :param step:\n Number of indexes to advance after each key.\n\n This value can be negative; the generator will exit if it\n reaches an index < 0.\n\n Warning: The generator may take awhile to advance between\n iterations if ``step`` is a large number!\n\n :param security_level:\n Number of _transform iterations to apply to each key.\n Must be >= 1.\n\n Increasing this value makes key generation slower, but more\n resistant to brute-forcing.\n \"\"\"\n return KeyIterator(arg_0.seed, arg_1, arg_2, arg_3)"} +{"_id": "doc_6204", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n # type: (Sequence[int], Optional[int], Optional[int]) -> None\n \"\"\"\n Absorb trits into the sponge.\n\n :param trits:\n Sequence of trits to Func.\n\n :param offset:\n Starting offset in ``trits``.\n\n :param length:\n Number of trits to Func. Defaults to ``len(trits)``.\n \"\"\"\n arg_4 = ((len(arg_1) % HASH_LENGTH) or HASH_LENGTH)\n arg_1 += [0] * (HASH_LENGTH - arg_4)\n\n if arg_3 is None:\n arg_3 = len(arg_1)\n\n if arg_3 < 1:\n raise with_context(\n exc=ValueError('Invalid length passed to ``Func``.'),\n\n context={\n 'trits': arg_1,\n 'offset': arg_2,\n 'length': arg_3,\n },\n )\n\n # Copy trits from ``trits`` into internal state, one hash at a\n # time, transforming internal state in between hashes.\n while arg_2 < arg_3:\n arg_5 = arg_2\n arg_6 = min(arg_5 + HASH_LENGTH, arg_3)\n\n # Copy the next hash worth of trits to internal state.\n #\n # Note that we always copy the trits to the start of the\n # state. ``self._state`` is 3 hashes long, but only the\n # first hash is \"public\"; the other 2 are only accessible to\n # :py:meth:`_transform`.\n arg_0._state[0:arg_6 - arg_5] = arg_1[arg_5:arg_6]\n\n # Transform.\n arg_0._transform()\n\n # Move on to the next hash.\n arg_2 += HASH_LENGTH"} +{"_id": "doc_6205", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=arg_4):\n # type: (MutableSequence[int], Optional[int], Optional[int]) -> None\n \"\"\"\n Squeeze trits from the sponge.\n\n :param trits:\n Sequence that the Funcd trits will be copied to.\n Note: this object will be modified!\n\n :param offset:\n Starting offset in ``trits``.\n\n :param length:\n Number of trits to Func, default to ``HASH_LENGTH``\n \"\"\"\n # Squeeze is kind of like the opposite of absorb; it copies\n # trits from internal state to the ``trits`` parameter, one hash\n # at a time, and transforming internal state in between hashes.\n #\n # However, only the first hash of the state is \"public\", so we\n # can simplify the implementation somewhat.\n\n # Ensure length can be mod by HASH_LENGTH\n if arg_3 % arg_4 != 0:\n raise with_context(\n exc=ValueError('Invalid length passed to ``Func`.'),\n\n context={\n 'trits': arg_1,\n 'offset': arg_2,\n 'length': arg_3,\n })\n\n # Ensure that ``trits`` can hold at least one hash worth of\n # trits.\n arg_1.extend([0] * max(0, arg_3 - len(arg_1)))\n\n # Check trits with offset can handle hash length\n if len(arg_1) - arg_2 < arg_4:\n raise with_context(\n exc=ValueError('Invalid offset passed to ``Func``.'),\n\n context={\n 'trits': arg_1,\n 'offset': arg_2,\n 'length': arg_3\n },\n )\n\n while arg_3 >= arg_4:\n # Copy exactly one hash.\n arg_1[arg_2:arg_2 + arg_4] = arg_0._state[0:arg_4]\n\n # One hash worth of trits copied; now transform.\n arg_0._transform()\n\n arg_2 += arg_4\n arg_3 -= arg_4"} +{"_id": "doc_6206", "title": "", "text": "def Func(arg_0):\n # type: () -> None\n \"\"\"\n Transforms internal state.\n \"\"\"\n # Copy some values locally so we can avoid global lookups in the\n # inner loop.\n #\n # References:\n #\n # - https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Local_Variables\n arg_1 = STATE_LENGTH\n arg_2 = TRUTH_TABLE\n\n # Operate on a copy of ``self._state`` to eliminate dot lookups\n # in the inner loop.\n #\n # References:\n #\n # - https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots...\n # - http://stackoverflow.com/a/2612990/\n arg_3 = arg_0._state[:]\n arg_4 = arg_3[:]\n\n # Note: This code looks significantly different from the C\n # implementation because it has been optimized to limit the\n # number of list item lookups (these are relatively slow in\n # Python).\n arg_5 = 0\n for arg_6 in range(NUMBER_OF_ROUNDS):\n arg_7 = arg_3[arg_5]\n\n for arg_8 in range(arg_1):\n arg_5 += (364 if arg_5 < 365 else -365)\n\n arg_9 = arg_3[arg_5]\n\n arg_4[arg_8] = arg_2[arg_7 + (3 * arg_9) + 4]\n\n arg_7 = arg_9\n\n arg_3 = arg_4\n arg_4 = arg_4[:]\n\n arg_0._state = arg_4"} +{"_id": "doc_6207", "title": "", "text": "def Func(\n arg_0,\n arg_1=0,\n arg_2=1,\n arg_3=arg_4.DEFAULT_SECURITY_LEVEL,\n ):\n # type: (int, int, int) -> dict\n \"\"\"\n Generates one or more private keys from the seed.\n\n As the name implies, private keys should not be shared.\n However, in a few cases it may be necessary (e.g., for M-of-N\n transactions).\n\n :param index:\n The starting key index.\n\n :param count:\n Number of keys to generate.\n\n :param security_level:\n Number of iterations to use when generating new keys.\n\n Larger values take longer, but the resulting signatures are\n more secure.\n\n This value must be between 1 and 3, inclusive.\n\n :return:\n Dict with the following items::\n\n {\n 'keys': List[PrivateKey],\n Always contains a list, even if only one key was\n generated.\n }\n\n References:\n\n - :py:class:`iota.crypto.signing.KeyGenerator`\n - https://github.com/iotaledger/wiki/blob/master/multisigs.md#how-m-of-n-works\n \"\"\"\n return commands.GetPrivateKeysCommand(arg_0.adapter)(\n seed=arg_0.seed,\n arg_1=arg_1,\n arg_2=arg_2,\n securityLevel=arg_3,\n )"} +{"_id": "doc_6208", "title": "", "text": "def Func(\n arg_0,\n arg_1, # type: Iterable[ProposedTransaction]\n arg_2, # type: MultisigAddress\n arg_3=None, # type: Optional[Address]\n ):\n # type: (...) -> dict\n \"\"\"\n Prepares a bundle that authorizes the spending of IOTAs from a\n multisig address.\n\n .. note::\n This method is used exclusively to spend IOTAs from a\n multisig address.\n\n If you want to spend IOTAs from non-multisig addresses, or\n if you want to create 0-value transfers (i.e., that don't\n require inputs), use\n :py:meth:`iota.api.Iota.prepare_transfer` instead.\n\n :param transfers:\n Transaction objects to prepare.\n\n .. important::\n Must include at least one transaction that spends IOTAs\n (i.e., has a nonzero ``value``). If you want to prepare\n a bundle that does not spend any IOTAs, use\n :py:meth:`iota.api.prepare_transfer` instead.\n\n :param multisig_input:\n The multisig address to use as the input for the transfers.\n\n .. note::\n This method only supports creating a bundle with a\n single multisig input.\n\n If you would like to spend from multiple multisig\n addresses in the same bundle, create the\n :py:class:`iota.multisig.transaction.ProposedMultisigBundle`\n object manually.\n\n :param change_address:\n If inputs are provided, any unspent amount will be sent to\n this address.\n\n If the bundle has no unspent inputs, ``change_address` is\n ignored.\n\n .. important::\n Unlike :py:meth:`iota.api.Iota.prepare_transfer`, this\n method will NOT generate a change address automatically.\n If there are unspent inputs and ``change_address`` is\n empty, an exception will be raised.\n\n This is because multisig transactions typically involve\n multiple individuals, and it would be unfair to the\n participants if we generated a change address\n automatically using the seed of whoever happened to run\n the ``Func`` method!\n\n .. danger::\n Note that this protective measure is not a\n substitute for due diligence!\n\n Always verify the details of every transaction in a\n bundle (including the change transaction) before\n signing the input(s)!\n\n :return:\n Dict containing the following values::\n\n {\n 'trytes': List[TransactionTrytes],\n Finalized bundle, as trytes.\n The input transactions are not signed.\n }\n\n In order to authorize the spending of IOTAs from the multisig\n input, you must generate the correct private keys and invoke\n the :py:meth:`iota.crypto.types.PrivateKey.sign_input_at`\n method for each key, in the correct order.\n\n Once the correct signatures are applied, you can then perform\n proof of work (``attachToTangle``) and broadcast the bundle\n using :py:meth:`iota.api.Iota.send_trytes`.\n \"\"\"\n return commands.PrepareMultisigTransferCommand(arg_0.adapter)(\n changeAddress=arg_3,\n multisigInput=arg_2,\n arg_1=arg_1,\n )"} +{"_id": "doc_6209", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (int, int) -> int\n \"\"\"\n Adds two individual trits together.\n\n The result is always a single trit.\n \"\"\"\n arg_2 = arg_0 + arg_1\n return arg_2 if -2 < arg_2 < 2 else (arg_2 < 0) - (arg_2 > 0)"} +{"_id": "doc_6210", "title": "", "text": "def Func(arg_0):\n # type: (Seed) -> None\n \"\"\"\n Outputs the user's seed to stdout, along with lots of warnings\n about security.\n \"\"\"\n print(\n 'WARNING: Anyone who has your seed can spend your IOTAs! '\n 'Clear the screen after recording your seed!'\n )\n compat.input('')\n print('Your seed is:')\n print('')\n print(binary_type(arg_0).decode('ascii'))\n print('')\n\n print(\n 'Clear the screen to prevent shoulder surfing, '\n 'and press return to continue.'\n )\n print('https://en.wikipedia.org/wiki/Shoulder_surfing_(computer_security)')\n compat.input('')"} +{"_id": "doc_6211", "title": "", "text": "def Func(\n arg_0,\n arg_1=None, # type: Optional[Iterable[BundleHash]]\n arg_2=None, # type: Optional[Iterable[Address]]\n arg_3=None, # type: Optional[Iterable[Tag]]\n arg_4=None, # type: Optional[Iterable[TransactionHash]]\n ):\n # type: (...) -> dict\n \"\"\"\n Find the transactions which match the specified input and\n return.\n\n All input values are lists, for which a list of return values\n (transaction hashes), in the same order, is returned for all\n individual elements.\n\n Using multiple of these input fields returns the intersection of\n the values.\n\n :param bundles:\n List of bundle IDs.\n\n :param addresses:\n List of addresses.\n\n :param tags:\n List of tags.\n\n :param approvees:\n List of approvee transaction IDs.\n\n References:\n\n - https://iota.readme.io/docs/findtransactions\n \"\"\"\n return core.FindTransactionsCommand(arg_0.adapter)(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n )"} +{"_id": "doc_6212", "title": "", "text": "def Func(\n arg_0,\n arg_1=0,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n ):\n # type: (int, Optional[int], Optional[int], Optional[int]) -> dict\n \"\"\"\n Gets all possible inputs of a seed and returns them, along with\n the total balance.\n\n This is either done deterministically (by generating all\n addresses until :py:meth:`find_transactions` returns an empty\n result), or by providing a key range to search.\n\n :param start:\n Starting key index.\n Defaults to 0.\n\n :param stop:\n Stop before this index.\n\n Note that this parameter behaves like the ``stop`` attribute\n in a :py:class:`slice` object; the stop index is *not*\n included in the result.\n\n If ``None`` (default), then this method will not stop until\n it finds an unused address.\n\n :param threshold:\n If set, determines the minimum threshold for a successful\n result:\n\n - As soon as this threshold is reached, iteration will stop.\n - If the command runs out of addresses before the threshold\n is reached, an exception is raised.\n\n .. note::\n This method does not attempt to \"optimize\" the result\n (e.g., smallest number of inputs, get as close to\n ``threshold`` as possible, etc.); it simply accumulates\n inputs in order until the threshold is met.\n\n If ``threshold`` is 0, the first address in the key range\n with a non-zero balance will be returned (if it exists).\n\n If ``threshold`` is ``None`` (default), this method will\n return **all** inputs in the specified key range.\n\n :param security_level:\n Number of iterations to use when generating new addresses\n (see :py:meth:`get_new_addresses`).\n\n This value must be between 1 and 3, inclusive.\n\n If not set, defaults to\n :py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.\n\n :return:\n Dict with the following structure::\n\n {\n 'inputs': List[Address],\n Addresses with nonzero balances that can be used\n as inputs.\n\n 'totalBalance': int,\n Aggregate balance from all matching addresses.\n }\n\n Note that each Address in the result has its ``balance``\n attribute set.\n\n Example:\n\n .. code-block:: python\n\n response = iota.Func(...)\n\n input0 = response['inputs'][0] # type: Address\n input0.balance # 42\n\n :raise:\n - :py:class:`iota.adapter.BadApiResponse` if ``threshold``\n is not met. Not applicable if ``threshold`` is ``None``.\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getinputs\n \"\"\"\n return extended.GetInputsCommand(arg_0.adapter)(\n seed=arg_0.seed,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n securityLevel=arg_4\n )"} +{"_id": "doc_6213", "title": "", "text": "def Func(\n arg_0,\n arg_1=0,\n arg_2=1,\n arg_3=arg_4.DEFAULT_SECURITY_LEVEL,\n arg_6=False,\n ):\n # type: (int, Optional[int], int, bool) -> dict\n \"\"\"\n Generates one or more new addresses from the seed.\n\n :param index:\n The key index of the first new address to generate (must be\n >= 1).\n\n :param count:\n Number of addresses to generate (must be >= 1).\n\n .. tip::\n This is more efficient than calling ``get_new_address``\n inside a loop.\n\n If ``None``, this method will progressively generate\n addresses and scan the Tangle until it finds one that has no\n transactions referencing it.\n\n :param security_level:\n Number of iterations to use when generating new addresses.\n\n Larger values take longer, but the resulting signatures are\n more secure.\n\n This value must be between 1 and 3, inclusive.\n\n :param checksum:\n Specify whether to return the address with the checksum.\n Defaults to ``False``.\n\n :return:\n Dict with the following structure::\n\n {\n 'addresses': List[Address],\n Always a list, even if only one address was\n generated.\n }\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress\n \"\"\"\n return extended.GetNewAddressesCommand(arg_0.adapter)(\n arg_2=arg_2,\n arg_1=arg_1,\n securityLevel=arg_3,\n arg_6=arg_6,\n seed=arg_0.seed,\n )"} +{"_id": "doc_6214", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=3,\n arg_3=None,\n ):\n # type: (TransactionHash, int, Optional[int]) -> dict\n \"\"\"\n Promotes a transaction by adding spam on top of it.\n\n :return:\n Dict with the following structure::\n\n {\n 'bundle': Bundle,\n The newly-published bundle.\n }\n \"\"\"\n if arg_3 is None:\n arg_3 = arg_0.default_min_weight_magnitude\n\n return extended.PromoteTransactionCommand(arg_0.adapter)(\n arg_1=arg_1,\n arg_2=arg_2,\n minWeightMagnitude=arg_3,\n )"} +{"_id": "doc_6215", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=3,\n arg_3=None,\n ):\n # type: (TransactionHash, int, Optional[int]) -> dict\n \"\"\"\n Takes a tail transaction hash as input, gets the bundle\n associated with the transaction and then replays the bundle by\n attaching it to the Tangle.\n\n :param transaction:\n Transaction hash. Must be a tail.\n\n :param depth:\n Depth at which to attach the bundle.\n Defaults to 3.\n\n :param min_weight_magnitude:\n Min weight magnitude, used by the node to calibrate Proof of\n Work.\n\n If not provided, a default value will be used.\n\n :return:\n Dict with the following structure::\n\n {\n 'trytes': List[TransactionTrytes],\n Raw trytes that were published to the Tangle.\n }\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#replaytransfer\n \"\"\"\n if arg_3 is None:\n arg_3 = arg_0.default_min_weight_magnitude\n\n return extended.ReplayBundleCommand(arg_0.adapter)(\n arg_1=arg_1,\n arg_2=arg_2,\n minWeightMagnitude=arg_3,\n )"} +{"_id": "doc_6216", "title": "", "text": "def Func(\n arg_0,\n arg_1, # type: Iterable[ProposedTransaction]\n arg_2=3, # type: int\n arg_3=None, # type: Optional[Iterable[Address]]\n arg_4=None, # type: Optional[Address]\n arg_5=None, # type: Optional[int]\n arg_6=None, # type: Optional[int]\n ):\n # type: (...) -> dict\n \"\"\"\n Prepares a set of transfers and creates the bundle, then\n attaches the bundle to the Tangle, and broadcasts and stores the\n transactions.\n\n :param transfers:\n Transfers to include in the bundle.\n\n :param depth:\n Depth at which to attach the bundle.\n Defaults to 3.\n\n :param inputs:\n List of inputs used to fund the transfer.\n Not needed for zero-value transfers.\n\n :param change_address:\n If inputs are provided, any unspent amount will be sent to\n this address.\n\n If not specified, a change address will be generated\n automatically.\n\n :param min_weight_magnitude:\n Min weight magnitude, used by the node to calibrate Proof of\n Work.\n\n If not provided, a default value will be used.\n\n :param security_level:\n Number of iterations to use when generating new addresses\n (see :py:meth:`get_new_addresses`).\n\n This value must be between 1 and 3, inclusive.\n\n If not set, defaults to\n :py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.\n\n :return:\n Dict with the following structure::\n\n {\n 'bundle': Bundle,\n The newly-published bundle.\n }\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#sendtransfer\n \"\"\"\n if arg_5 is None:\n arg_5 = arg_0.default_min_weight_magnitude\n\n return extended.SendTransferCommand(arg_0.adapter)(\n seed=arg_0.seed,\n arg_2=arg_2,\n arg_1=arg_1,\n arg_3=arg_3,\n changeAddress=arg_4,\n minWeightMagnitude=arg_5,\n securityLevel=arg_6,\n )"} +{"_id": "doc_6217", "title": "", "text": "def Func(arg_0, arg_1, arg_2=3, arg_3=None):\n # type: (Iterable[TransactionTrytes], int, Optional[int]) -> dict\n \"\"\"\n Attaches transaction trytes to the Tangle, then broadcasts and\n stores them.\n\n :param trytes:\n Transaction encoded as a tryte sequence.\n\n :param depth:\n Depth at which to attach the bundle.\n Defaults to 3.\n\n :param min_weight_magnitude:\n Min weight magnitude, used by the node to calibrate Proof of\n Work.\n\n If not provided, a default value will be used.\n\n :return:\n Dict with the following structure::\n\n {\n 'trytes': List[TransactionTrytes],\n Raw trytes that were published to the Tangle.\n }\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#sendtrytes\n \"\"\"\n if arg_3 is None:\n arg_3 = arg_0.default_min_weight_magnitude\n\n return extended.SendTrytesCommand(arg_0.adapter)(\n arg_1=arg_1,\n arg_2=arg_2,\n minWeightMagnitude=arg_3,\n )"} +{"_id": "doc_6218", "title": "", "text": "def Func(arg_0):\n # type: (AdapterSpec) -> BaseAdapter\n \"\"\"\n Given a URI, returns a properly-configured adapter instance.\n \"\"\"\n if isinstance(arg_0, BaseAdapter):\n return arg_0\n\n arg_1 = compat.urllib_parse.urlsplit(arg_0) # type: SplitResult\n\n if not arg_1.scheme:\n raise with_context(\n exc=InvalidUri(\n 'URI must begin with \"://\" (e.g., \"udp://\").',\n ),\n\n context={\n 'parsed': arg_1,\n 'uri': arg_0,\n },\n )\n\n try:\n arg_2 = adapter_registry[arg_1.scheme]\n except KeyError:\n raise with_context(\n exc=InvalidUri('Unrecognized protocol {protocol!r}.'.format(\n protocol=arg_1.scheme,\n )),\n\n context={\n 'parsed': arg_1,\n 'uri': arg_0,\n },\n )\n\n return arg_2.configure(arg_1)"} +{"_id": "doc_6219", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n # type: (dict, dict) -> dict\n \"\"\"\n Sends an API request to the node.\n\n :param payload:\n JSON payload.\n\n :param kwargs:\n Additional keyword arguments for the adapter.\n\n :return:\n Decoded response from the node.\n\n :raise:\n - :py:class:`BadApiResponse` if a non-success response was\n received.\n \"\"\"\n raise NotImplementedError(\n 'Not implemented in {cls}.'.format(cls=type(arg_0).__name__),\n )"} +{"_id": "doc_6220", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n # type: (int, Text, Optional[dict]) -> None\n \"\"\"\n Sends a message to the instance's logger, if configured.\n \"\"\"\n if arg_0.Funcger:\n arg_0.Funcger.log(arg_1, arg_2, extra={'context': arg_3 or {}})"} +{"_id": "doc_6221", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='post', **arg_4):\n # type: (Text, Optional[Text], Text, dict) -> Response\n \"\"\"\n Sends the actual HTTP request.\n\n Split into its own method so that it can be mocked during unit\n tests.\n \"\"\"\n arg_4.setdefault(\n 'timeout',\n arg_0.timeout if arg_0.timeout else get_default_timeout(),\n )\n\n if arg_0.authentication:\n arg_4.setdefault('auth', auth.HTTPBasicAuth(*arg_0.authentication))\n\n arg_0._log(\n level=DEBUG,\n\n message='Sending {method} to {url}: {payload!r}'.format(\n arg_3=arg_3,\n arg_2=arg_2,\n arg_1=arg_1,\n ),\n\n context={\n 'request_method': arg_3,\n 'request_kwargs': arg_4,\n 'request_payload': arg_2,\n 'request_url': arg_1,\n },\n )\n\n arg_5 = request(arg_3=arg_3, arg_1=arg_1, data=arg_2, **arg_4)\n\n arg_0._log(\n level=DEBUG,\n\n message='Receiving {method} from {url}: {response!r}'.format(\n arg_3=arg_3,\n arg_5=arg_5.content,\n arg_1=arg_1,\n ),\n\n context={\n 'request_method': arg_3,\n 'request_kwargs': arg_4,\n 'request_payload': arg_2,\n 'request_url': arg_1,\n\n 'response_headers': arg_5.headers,\n 'response_content': arg_5.content,\n },\n )\n\n return arg_5"} +{"_id": "doc_6222", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (Digest) -> None\n \"\"\"\n Absorbs a digest into the sponge.\n\n .. important::\n Keep track of the order that digests are added!\n\n To spend inputs from a multisig address, you must provide\n the private keys in the same order!\n\n References:\n\n - https://github.com/iotaledger/wiki/blob/master/multisigs.md#spending-inputs\n \"\"\"\n if arg_0._address:\n raise ValueError('Cannot add digests once an address is extracted.')\n\n arg_0._sponge.absorb(arg_1.as_trits())\n arg_0._digests.append(arg_1)"} +{"_id": "doc_6223", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=1):\n # type: (int, int) -> Generator[Address, None, None]\n \"\"\"\n Creates an iterator that can be used to progressively generate new\n addresses.\n\n :param start:\n Starting index.\n\n Warning: This method may take awhile to reset if ``start``\n is a large number!\n\n :param step:\n Number of indexes to advance after each address.\n\n Warning: The generator may take awhile to advance between\n iterations if ``step`` is a large number!\n \"\"\"\n arg_3 = (\n KeyGenerator(arg_0.seed).Func(\n arg_1,\n arg_2,\n arg_0.security_level,\n )\n )\n\n while True:\n yield arg_0._generate_address(arg_3)"} +{"_id": "doc_6224", "title": "", "text": "def Func(arg_0):\n # type: (Digest) -> Address\n \"\"\"\n Generates an address from a private key digest.\n \"\"\"\n arg_1 = [0] * (Address.LEN * TRITS_PER_TRYTE) # type: List[int]\n\n arg_2 = Kerl()\n arg_2.absorb(arg_0.as_trits())\n arg_2.squeeze(arg_1)\n\n return Address.from_trits(\n trits=arg_1,\n\n key_index=arg_0.key_index,\n security_level=arg_0.security_level,\n )"} +{"_id": "doc_6225", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (KeyIterator) -> Address\n \"\"\"\n Generates a new address.\n\n Used in the event of a cache miss.\n \"\"\"\n if arg_0.checksum:\n return (\n arg_0.address_from_digest(\n digest=arg_0._get_digest(arg_1),\n ).with_valid_checksum()\n )\n else:\n return arg_0.address_from_digest(arg_0._get_digest(arg_1))"} +{"_id": "doc_6226", "title": "", "text": "def Func(\n arg_0, # type: BaseAdapter\n arg_1, # type: Seed\n arg_2, # type: int\n arg_3=None, # type: Optional[int]\n):\n # type: (...) -> Generator[Tuple[Address, List[TransactionHash]], None, None]\n \"\"\"\n Scans the Tangle for used addresses.\n\n This is basically the opposite of invoking ``getNewAddresses`` with\n ``stop=None``.\n \"\"\"\n if arg_3 is None:\n arg_3 = AddressGenerator.DEFAULT_SECURITY_LEVEL\n\n arg_4 = FindTransactionsCommand(arg_0)\n\n for arg_5 in AddressGenerator(arg_1, arg_3).create_iterator(arg_2):\n arg_6 = arg_4(addresses=[arg_5])\n\n if arg_6['hashes']:\n yield arg_5, arg_6['hashes']\n else:\n break\n\n # Reset the command so that we can call it again.\n arg_4.reset()"} +{"_id": "doc_6227", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determines which codec to use for the specified encoding.\n\n References:\n\n - https://docs.python.org/3/library/codecs.html#codecs.register\n \"\"\"\n if arg_0 == AsciiTrytesCodec.name:\n return AsciiTrytesCodec.get_codec_info()\n\n elif arg_0 == AsciiTrytesCodec.compat_name:\n warn(\n '\"{old_codec}\" codec will be removed in PyOTA v2.1. '\n 'Use \"{new_codec}\" instead.'.format(\n new_codec=AsciiTrytesCodec.name,\n old_codec=AsciiTrytesCodec.compat_name,\n ),\n\n DeprecationWarning,\n )\n return AsciiTrytesCodec.get_codec_info()\n\n return None"} +{"_id": "doc_6228", "title": "", "text": "def Func(arg_0, arg_1, arg_2='strict'):\n \"\"\"\n Encodes a byte string into trytes.\n \"\"\"\n if isinstance(arg_1, memoryview):\n arg_1 = arg_1.tobytes()\n\n if not isinstance(arg_1, (binary_type, bytearray)):\n raise with_context(\n exc=TypeError(\n \"Can't Func {type}; byte string expected.\".format(\n type=type(arg_1).__name__,\n )),\n\n context={\n 'input': arg_1,\n },\n )\n\n # :bc: In Python 2, iterating over a byte string yields\n # characters instead of integers.\n if not isinstance(arg_1, bytearray):\n arg_1 = bytearray(arg_1)\n\n arg_3 = bytearray()\n\n for arg_4 in arg_1:\n arg_5, arg_6 = divmod(arg_4, len(arg_0.alphabet))\n\n arg_3.append(arg_0.alphabet[arg_6])\n arg_3.append(arg_0.alphabet[arg_5])\n\n return binary_type(arg_3), len(arg_1)"} +{"_id": "doc_6229", "title": "", "text": "def Func(arg_0, arg_1, arg_2='strict'):\n \"\"\"\n Decodes a tryte string into bytes.\n \"\"\"\n if isinstance(arg_1, memoryview):\n arg_1 = arg_1.tobytes()\n\n if not isinstance(arg_1, (binary_type, bytearray)):\n raise with_context(\n exc=TypeError(\n \"Can't Func {type}; byte string expected.\".format(\n type=type(arg_1).__name__,\n )),\n\n context={\n 'input': arg_1,\n },\n )\n\n # :bc: In Python 2, iterating over a byte string yields\n # characters instead of integers.\n if not isinstance(arg_1, bytearray):\n arg_1 = bytearray(arg_1)\n\n arg_3 = bytearray()\n\n for arg_4 in range(0, len(arg_1), 2):\n try:\n arg_5, arg_6 = arg_1[arg_4:arg_4 + 2]\n except ValueError:\n if arg_2 == 'strict':\n raise with_context(\n exc=TrytesDecodeError(\n \"'{name}' codec can't Func value; \"\n \"tryte sequence has odd length.\".format(\n name=arg_0.name,\n ),\n ),\n\n context={\n 'input': arg_1,\n },\n )\n elif arg_2 == 'replace':\n arg_3 += b'?'\n\n continue\n\n try:\n arg_3.append(\n arg_0.index[arg_5]\n + (arg_0.index[arg_6] * len(arg_0.index))\n )\n except ValueError:\n # This combination of trytes yields a value > 255 when\n # Funcd.\n # Naturally, we can't represent this using ASCII.\n if arg_2 == 'strict':\n raise with_context(\n exc=TrytesDecodeError(\n \"'{name}' codec can't Func trytes {pair} \"\n \"at position {i}-{j}: \"\n \"ordinal not in range(255)\".format(\n name=arg_0.name,\n pair=chr(arg_5) + chr(arg_6),\n arg_4=arg_4,\n j=arg_4 + 1,\n ),\n ),\n\n context={\n 'input': arg_1,\n }\n )\n elif arg_2 == 'replace':\n arg_3 += b'?'\n\n return binary_type(arg_3), len(arg_1)"} +{"_id": "doc_6230", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n # type: (Text, AdapterSpec) -> RoutingWrapper\n \"\"\"\n Adds a route to the wrapper.\n\n :param command:\n The name of the command to route (e.g., \"attachToTangle\").\n\n :param adapter:\n The adapter object or URI to route requests to.\n \"\"\"\n if not isinstance(arg_2, BaseAdapter):\n try:\n arg_2 = arg_0.adapter_aliases[arg_2]\n except KeyError:\n arg_0.adapter_aliases[arg_2] = arg_2 = resolve_adapter(\n arg_2\n )\n\n arg_0.routes[arg_1] = arg_2\n\n return arg_0"} +{"_id": "doc_6231", "title": "", "text": "def Func(arg_0):\n # type: () -> dict\n \"\"\"\n Returns a JSON-compatible representation of the object.\n\n References:\n\n - :py:class:`iota.json.JsonEncoder`.\n \"\"\"\n return {\n 'hash_': arg_0.hash,\n 'signature_message_fragment': arg_0.signature_message_fragment,\n 'address': arg_0.address,\n 'value': arg_0.value,\n 'legacy_tag': arg_0.legacy_tag,\n 'timestamp': arg_0.timestamp,\n 'current_index': arg_0.current_index,\n 'last_index': arg_0.last_index,\n 'bundle_hash': arg_0.bundle_hash,\n 'trunk_transaction_hash': arg_0.trunk_transaction_hash,\n 'branch_transaction_hash': arg_0.branch_transaction_hash,\n 'tag': arg_0.tag,\n 'attachment_timestamp': arg_0.attachment_timestamp,\n\n 'attachment_timestamp_lower_bound':\n arg_0.attachment_timestamp_lower_bound,\n\n 'attachment_timestamp_upper_bound':\n arg_0.attachment_timestamp_upper_bound,\n\n 'nonce': arg_0.nonce,\n }"} +{"_id": "doc_6232", "title": "", "text": "def Func(arg_0):\n # type: () -> TryteString\n \"\"\"\n Returns the values needed to validate the transaction's\n ``signature_message_fragment`` value.\n \"\"\"\n return (\n arg_0.address.address\n + arg_0.value_as_trytes\n + arg_0.legacy_tag\n + arg_0.timestamp_as_trytes\n + arg_0.current_index_as_trytes\n + arg_0.last_index_as_trytes\n )"} +{"_id": "doc_6233", "title": "", "text": "def Func(arg_0, arg_1=False):\n # type: (bool) -> List[TransactionTrytes]\n \"\"\"\n Returns TryteString representations of the transactions in this\n bundle.\n\n :param head_to_tail:\n Determines the order of the transactions:\n\n - ``True``: head txn first, tail txn last.\n - ``False`` (default): tail txn first, head txn last.\n\n Note that the order is reversed by default, as this is the\n way bundles are typically broadcast to the Tangle.\n \"\"\"\n arg_2 = arg_0 if arg_1 else reversed(arg_0)\n return [arg_3.as_tryte_string() for arg_3 in arg_2]"} +{"_id": "doc_6234", "title": "", "text": "def Func(arg_0, arg_1=True):\n # type: (Union[ModuleType, Text], bool) -> Dict[Text, 'CommandMeta']\n \"\"\"\n Automatically discover commands in the specified package.\n\n :param package:\n Package path or reference.\n\n :param recursively:\n If True, will descend recursively into sub-packages.\n\n :return:\n All commands discovered in the specified package, indexed by\n command name (note: not class name).\n \"\"\"\n # http://stackoverflow.com/a/25562415/\n if isinstance(arg_0, string_types):\n arg_0 = import_module(arg_0) # type: ModuleType\n\n arg_2 = {}\n\n for arg_3, arg_4, arg_5 in walk_packages(arg_0.__path__, arg_0.__name__ + '.'):\n # Loading the module is good enough; the CommandMeta metaclass will\n # ensure that any commands in the module get registered.\n\n # Prefix in name module move to function \"walk_packages\" for fix\n # conflict with names importing packages\n # Bug https://github.com/iotaledger/iota.lib.py/issues/63\n arg_6 = import_module(arg_4)\n\n # Index any command classes that we find.\n for (arg_3, arg_7) in get_members(arg_6):\n if is_class(arg_7) and isinstance(arg_7, CommandMeta):\n arg_8 = getattr(arg_7, 'command')\n if arg_8:\n arg_2[arg_8] = arg_7\n\n if arg_1 and arg_5:\n arg_2.update(Func(arg_6))\n\n return arg_2"} +{"_id": "doc_6235", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (dict) -> dict\n \"\"\"\n Sends the request object to the adapter and returns the response.\n\n The command name will be automatically injected into the request\n before it is sent (note: this will modify the request object).\n \"\"\"\n arg_1['command'] = arg_0.command\n return arg_0.adapter.send_request(arg_1)"} +{"_id": "doc_6236", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n # type: (dict, Optional[f.BaseFilter], Text) -> dict\n \"\"\"\n Applies a filter to a value. If the value does not pass the\n filter, an exception will be raised with lots of contextual info\n attached to it.\n \"\"\"\n if arg_1:\n arg_3 = f.FilterRunner(arg_1, arg_0)\n\n if arg_3.is_valid():\n return arg_3.cleaned_data\n else:\n raise with_context(\n exc = ValueError(\n '{message} ({error_codes}) '\n '(`exc.context[\"filter_errors\"]` '\n 'contains more information).'.format(\n message = arg_2,\n error_codes = arg_3.error_codes,\n ),\n ),\n\n context = {\n 'filter_errors': arg_3.get_errors(with_context=True),\n },\n )\n\n return arg_0"} +{"_id": "doc_6237", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (Text) -> Text\n \"\"\"\n Returns the URL to check job status.\n\n :param job_id:\n The ID of the job to check.\n \"\"\"\n return compat.urllib_parse.urlunsplit((\n arg_0.uri.scheme,\n arg_0.uri.netloc,\n arg_0.uri.path.rstrip('/') + '/jobs/' + arg_1,\n arg_0.uri.query,\n arg_0.uri.fragment,\n ))"} +{"_id": "doc_6238", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (List[List[Transaction]]) -> List[Text]\n \"\"\"\n Validates the signature fragments in the bundle.\n\n :return:\n List of error messages.\n If empty, signature fragments are valid.\n \"\"\"\n # Start with the currently-supported hash algo.\n arg_2 = None\n arg_3 = []\n for arg_2, arg_4 in enumerate(arg_1):\n arg_5 = arg_0._get_group_signature_error(arg_4, SUPPORTED_SPONGE)\n if arg_5:\n arg_3.append(arg_5)\n\n # Pause and retry with the legacy algo.\n break\n\n # If validation failed, then go back and try with the legacy\n # algo (only applies if we are currently transitioning to a new\n # algo).\n if arg_3 and LEGACY_SPONGE:\n for arg_4 in arg_1:\n # noinspection PyTypeChecker\n if arg_0._get_group_signature_error(arg_4, LEGACY_SPONGE):\n # Legacy algo doesn't work, either; no point in\n # continuing.\n break\n else:\n # If we get here, then we were able to validate the\n # signature fragments successfully using the legacy\n # algorithm.\n return []\n\n # If we get here, then validation also failed when using the\n # legacy algorithm.\n\n # At this point, we know that the bundle is invalid, but we will\n # continue validating with the supported algorithm anyway, so\n # that we can return an error message for every invalid input.\n arg_3.extend(filter(None, (\n arg_0._get_group_signature_error(arg_4, SUPPORTED_SPONGE)\n for arg_4 in arg_1[arg_2 + 1:]\n )))\n\n return arg_3"} +{"_id": "doc_6239", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (List[Transaction], type) -> Optional[Text]\n \"\"\"\n Validates the signature fragments for a group of transactions\n using the specified sponge type.\n\n Note: this method assumes that the transactions in the group\n have already passed basic validation (see\n :py:meth:`_create_validator`).\n\n :return:\n - ``None``: Indicates that the signature fragments are valid.\n - ``Text``: Error message indicating the fragments are invalid.\n \"\"\"\n arg_2 = validate_signature_fragments(\n fragments=[txn.signature_message_fragment for txn in arg_0],\n hash_=arg_0[0].bundle_hash,\n public_key=arg_0[0].address,\n arg_1=arg_1,\n )\n\n if arg_2:\n return None\n\n return (\n 'Transaction {i} has invalid signature '\n '(using {fragments} fragments).'.format(\n fragments=len(arg_0),\n i=arg_0[0].current_index,\n )\n )"} +{"_id": "doc_6240", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n # type: (TransactionHash, Optional[BundleHash]) -> List[Transaction]\n \"\"\"\n Recursively traverse the Tangle, collecting transactions until\n we hit a new bundle.\n\n This method is (usually) faster than ``findTransactions``, and\n it ensures we don't collect transactions from replayed bundles.\n \"\"\"\n arg_3 = (\n GetTrytesCommand(arg_0.adapter)(hashes=[arg_1])['trytes']\n ) # type: List[TryteString]\n\n if not arg_3:\n raise with_context(\n exc=BadApiResponse(\n 'Bundle transactions not visible '\n '(``exc.context`` has more info).',\n ),\n\n context={\n 'transaction_hash': arg_1,\n 'target_bundle_hash': arg_2,\n },\n )\n\n arg_4 = Transaction.from_tryte_string(arg_3[0])\n\n if (not arg_2) and arg_4.current_index:\n raise with_context(\n exc=BadApiResponse(\n '``Func`` started with a non-tail transaction '\n '(``exc.context`` has more info).',\n ),\n\n context={\n 'transaction_object': arg_4,\n 'target_bundle_hash': arg_2,\n },\n )\n\n if arg_2:\n if arg_2 != arg_4.bundle_hash:\n # We've hit a different bundle; we can stop now.\n return []\n else:\n arg_2 = arg_4.bundle_hash\n\n if arg_4.current_index == arg_4.last_index == 0:\n # Bundle only has one transaction.\n return [arg_4]\n\n # Recursively follow the trunk transaction, to fetch the next\n # transaction in the bundle.\n return [arg_4] + arg_0.Func(\n arg_1=arg_4.trunk_transaction_hash,\n arg_2=arg_2\n )"} +{"_id": "doc_6241", "title": "", "text": "def Func(arg_0):\n # type: (Iota) -> None\n \"\"\"\n Starts the REPL.\n \"\"\"\n arg_1 = (\n 'IOTA API client for {uri} ({testnet}) '\n 'initialized as variable `api`.\\n'\n 'Type `help(api)` for list of API commands.'.format(\n testnet='testnet' if arg_0.testnet else 'mainnet',\n uri=arg_0.adapter.get_uri(),\n )\n )\n\n arg_2 = {'api': arg_0}\n\n try:\n # noinspection PyUnresolvedReferences\n import IPython\n except ImportError:\n # IPython not available; use regular Python REPL.\n from code import InteractiveConsole\n InteractiveConsole(locals=arg_2).interact(arg_1, '')\n else:\n print(arg_1)\n IPython.start_ipython(argv=[], user_ns=arg_2)"} +{"_id": "doc_6242", "title": "", "text": "def Func(arg_0, arg_1=arg_2.LEN):\n \"\"\"\n Generates a Func seed using a CSPRNG.\n\n :param length:\n Length of seed, in trytes.\n\n For maximum security, this should always be set to 81, but\n you can change it if you're 110% sure you know what you're\n doing.\n\n See https://iota.stackexchange.com/q/249 for more info.\n \"\"\"\n return super(Seed, arg_0).Func(arg_1)"} +{"_id": "doc_6243", "title": "", "text": "def Func(arg_0):\n # type: () -> Digest\n \"\"\"\n Generates the digest used to do the actual signing.\n\n Signing keys can have variable length and tend to be quite long,\n which makes them not-well-suited for use in crypto algorithms.\n\n The digest is essentially the result of running the signing key\n through a PBKDF, yielding a constant-length hash that can be\n used for crypto.\n \"\"\"\n arg_1 = FRAGMENT_LENGTH // Hash.LEN\n\n arg_2 = arg_0.iter_chunks(FRAGMENT_LENGTH)\n\n # The digest will contain one hash per key fragment.\n arg_3 = [0] * HASH_LENGTH * len(arg_2)\n\n # Iterate over each fragment in the key.\n for arg_4, arg_5 in enumerate(arg_2):\n arg_6 = arg_5.as_trits()\n\n arg_7 = [0] * FRAGMENT_LENGTH\n arg_8 = []\n\n # Within each fragment, iterate over one hash at a time.\n for arg_9 in range(arg_1):\n arg_10 = arg_9 * HASH_LENGTH\n arg_11 = arg_10 + HASH_LENGTH\n arg_8 = arg_6[arg_10:arg_11]\n\n for arg_12 in range(26):\n arg_13 = Kerl()\n arg_13.absorb(arg_8)\n arg_13.squeeze(arg_8)\n\n arg_7[arg_10:arg_11] = arg_8\n\n # After processing all of the hashes in the fragment,\n # generate a final hash and append it to the digest.\n #\n # Note that we will do this once per fragment in the key, so\n # the longer the key is, the longer the digest will be.\n arg_13 = Kerl()\n arg_13.absorb(arg_7)\n arg_13.squeeze(arg_8)\n\n arg_14 = arg_4 * HASH_LENGTH\n arg_15 = arg_14 + HASH_LENGTH\n\n arg_3[arg_14:arg_15] = arg_8\n\n return Digest(TryteString.from_trits(arg_3), arg_0.key_index)"} +{"_id": "doc_6244", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Makes JSON-serializable objects play nice with IPython's default\n pretty-printer.\n\n Sadly, :py:func:`pprint.pprint` does not have a similar\n mechanism.\n\n References:\n\n - http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html\n - :py:meth:`IPython.lib.pretty.RepresentationPrinter.pretty`\n - :py:func:`pprint._safe_repr`\n \"\"\"\n arg_3 = type(arg_0).__name__\n\n if arg_2:\n arg_1.text('{cls}(...)'.format(\n cls=arg_3,\n ))\n else:\n with arg_1.group(\n len(arg_3) + 1,\n '{cls}('.format(cls=arg_3),\n ')',\n ):\n arg_4 = arg_0.as_json_compatible()\n\n if isinstance(arg_4, Mapping):\n arg_1.text('**')\n elif isinstance(arg_4, Iterable):\n arg_1.text('*')\n\n arg_1.pretty(arg_4)"} +{"_id": "doc_6245", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n # type: (MutableSequence[int], int, Optional[int]) -> None\n \"\"\"\n Absorb trits into the sponge from a buffer.\n\n :param trits:\n Buffer that contains the trits to Func.\n\n :param offset:\n Starting offset in ``trits``.\n\n :param length:\n Number of trits to Func. Defaults to ``len(trits)``.\n \"\"\"\n # Pad input if necessary, so that it can be divided evenly into\n # hashes.\n # Note that this operation creates a COPY of ``trits``; the\n # incoming buffer is not modified!\n arg_4 = ((len(arg_1) % TRIT_HASH_LENGTH) or TRIT_HASH_LENGTH)\n arg_1 += [0] * (TRIT_HASH_LENGTH - arg_4)\n\n if arg_3 is None:\n arg_3 = len(arg_1)\n\n if arg_3 < 1:\n raise with_context(\n exc=ValueError('Invalid length passed to ``Func``.'),\n\n context={\n 'trits': arg_1,\n 'offset': arg_2,\n 'length': arg_3,\n },\n )\n\n while arg_2 < arg_3:\n arg_5 = min(arg_2 + TRIT_HASH_LENGTH, arg_3)\n\n # If we're copying over a full chunk, zero last trit.\n if arg_5 - arg_2 == TRIT_HASH_LENGTH:\n arg_1[arg_5 - 1] = 0\n\n arg_6 = conv.convertToBytes(arg_1[arg_2:arg_5])\n\n # Convert signed bytes into their equivalent unsigned\n # representation, in order to use Python's built-in bytes\n # type.\n arg_7 = bytearray(\n conv.convert_sign(b) for b in arg_6\n )\n\n arg_0.k.update(arg_7)\n\n arg_2 += TRIT_HASH_LENGTH"} +{"_id": "doc_6246", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n # type: (MutableSequence[int], int, Optional[int]) -> None\n \"\"\"\n Squeeze trits from the sponge into a buffer.\n\n :param trits:\n Buffer that will hold the Funcd trits.\n\n IMPORTANT: If ``trits`` is too small, it will be extended!\n\n :param offset:\n Starting offset in ``trits``.\n\n :param length:\n Number of trits to Func from the sponge.\n\n If not specified, defaults to :py:data:`TRIT_HASH_LENGTH`\n (i.e., by default, we will try to Func exactly 1 hash).\n \"\"\"\n # Pad input if necessary, so that it can be divided evenly into\n # hashes.\n arg_4 = ((len(arg_1) % arg_8) or arg_8)\n arg_1 += [0] * (arg_8 - arg_4)\n\n if arg_3 is None:\n # By default, we will try to Func one hash.\n # Note that this is different than ``absorb``.\n arg_3 = len(arg_1) or arg_8\n\n if arg_3 < 1:\n raise with_context(\n exc=ValueError('Invalid length passed to ``Func``.'),\n\n context={\n 'trits': arg_1,\n 'offset': arg_2,\n 'length': arg_3,\n },\n )\n\n while arg_2 < arg_3:\n arg_5 = arg_0.k.digest()\n\n if PY2:\n arg_5 = map(ord, arg_5) # type: ignore\n\n arg_6 = [conv.convert_sign(b) for b in arg_5]\n\n arg_7 = conv.convertToTrits(arg_6)\n arg_7[arg_8 - 1] = 0\n\n arg_9 = min(arg_8, arg_3 - arg_2)\n arg_1[arg_2:arg_2 + arg_9] = arg_7[0:arg_9]\n\n arg_10 = bytearray(\n conv.convert_sign(~b) for b in arg_5\n )\n\n # Reset internal state before feeding back in.\n arg_0.reset()\n arg_0.k.update(arg_10)\n\n arg_2 += arg_8"} +{"_id": "doc_6247", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Increments the transaction's legacy tag, used to fix insecure\n bundle hashes when finalizing a bundle.\n\n References:\n\n - https://github.com/iotaledger/iota.lib.py/issues/84\n \"\"\"\n arg_0._legacy_tag = (\n Tag.from_trits(add_trits(arg_0.legacy_tag.as_trits(), [1]))\n )"} +{"_id": "doc_6248", "title": "", "text": "def Func(arg_0):\n # type: () -> Tag\n \"\"\"\n Determines the most relevant Func for the bundle.\n \"\"\"\n for arg_1 in reversed(arg_0): # type: ProposedTransaction\n if arg_1.Func:\n return arg_1.Func\n\n return Tag(b'')"} +{"_id": "doc_6249", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (ProposedTransaction) -> None\n \"\"\"\n Adds a transaction to the bundle.\n\n If the transaction message is too long, it will be split\n automatically into multiple transactions.\n \"\"\"\n if arg_0.hash:\n raise RuntimeError('Bundle is already finalized.')\n\n if arg_1.value < 0:\n raise ValueError('Use ``add_inputs`` to add inputs to the bundle.')\n\n arg_0._transactions.append(ProposedTransaction(\n address=arg_1.address,\n value=arg_1.value,\n tag=arg_1.tag,\n message=arg_1.message[:Fragment.LEN],\n timestamp=arg_1.timestamp,\n ))\n\n # If the message is too long to fit in a single transactions,\n # it must be split up into multiple transactions so that it will\n # fit.\n arg_2 = arg_1.message[Fragment.LEN:]\n while arg_2:\n arg_0._transactions.append(ProposedTransaction(\n address=arg_1.address,\n value=0,\n tag=arg_1.tag,\n message=arg_2[:Fragment.LEN],\n timestamp=arg_1.timestamp,\n ))\n\n arg_2 = arg_2[Fragment.LEN:]"} +{"_id": "doc_6250", "title": "", "text": "def Func(arg_0):\n # type: () -> None\n \"\"\"\n Finalizes the bundle, preparing it to be attached to the Tangle.\n \"\"\"\n if arg_0.hash:\n raise RuntimeError('Bundle is already Funcd.')\n\n if not arg_0:\n raise ValueError('Bundle has no transactions.')\n\n # Quick validation.\n arg_1 = arg_0.balance\n\n if arg_1 < 0:\n if arg_0.change_address:\n arg_0.add_transaction(ProposedTransaction(\n address=arg_0.change_address,\n value=-arg_1,\n tag=arg_0.tag,\n ))\n else:\n raise ValueError(\n 'Bundle has unspent inputs (balance: {balance}); '\n 'use ``send_unspent_inputs_to`` to create '\n 'change transaction.'.format(\n arg_1=arg_1,\n ),\n )\n elif arg_1 > 0:\n raise ValueError(\n 'Inputs are insufficient to cover bundle spend '\n '(balance: {balance}).'.format(\n arg_1=arg_1,\n ),\n )\n\n # Generate bundle hash.\n while True:\n arg_2 = Kerl()\n arg_3 = len(arg_0) - 1\n\n for arg_4, arg_5 in enumerate(arg_0):\n arg_5.current_index = arg_4\n arg_5.last_index = arg_3\n\n arg_2.absorb(arg_5.get_signature_validation_trytes().as_trits())\n\n arg_7 = [0] * HASH_LENGTH\n arg_2.squeeze(arg_7)\n\n arg_8 = BundleHash.from_trits(arg_7)\n\n # Check that we generated a secure bundle hash.\n # https://github.com/iotaledger/iota.lib.py/issues/84\n if any(13 in arg_9 for arg_9 in normalize(arg_8)):\n # Increment the legacy tag and try again.\n arg_10 = (\n arg_0.tail_transaction\n ) # type: ProposedTransaction\n arg_10.increment_legacy_tag()\n else:\n break\n\n # Copy bundle hash to individual transactions.\n for arg_5 in arg_0:\n arg_5.bundle_hash = arg_8\n\n # Initialize signature/message fragment.\n arg_5.signature_message_fragment = Fragment(arg_5.message or b'')"} +{"_id": "doc_6251", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n # type: (int, PrivateKey) -> None\n \"\"\"\n Signs the input at the specified index.\n\n :param start_index:\n The index of the first input transaction.\n\n If necessary, the resulting signature will be split across\n multiple transactions automatically (i.e., if an input has\n ``security_level=2``, you still only need to call\n :py:meth:`Func` once).\n\n :param private_key:\n The private key that will be used to generate the signature.\n\n .. important::\n Be sure that the private key was generated using the\n correct seed, or the resulting signature will be\n invalid!\n \"\"\"\n if not arg_0.hash:\n raise RuntimeError('Cannot sign inputs until bundle is finalized.')\n\n arg_2.sign_input_transactions(arg_0, arg_1)"} +{"_id": "doc_6252", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (Address) -> None\n \"\"\"\n Creates transactions for the specified input address.\n \"\"\"\n arg_0._transactions.append(ProposedTransaction(\n address=arg_1,\n tag=arg_0.tag,\n\n # Spend the entire address balance; if necessary, we will\n # add a change transaction to the bundle.\n value=-arg_1.balance,\n ))\n\n # Signatures require additional transactions to store, due to\n # transaction length limit.\n # Subtract 1 to account for the transaction we just added.\n for arg_2 in range(arg_1.security_level - 1):\n arg_0._transactions.append(ProposedTransaction(\n address=arg_1,\n tag=arg_0.tag,\n\n # Note zero value; this is a meta transaction.\n value=0,\n ))"} +{"_id": "doc_6253", "title": "", "text": "def Func(arg_0, arg_1='i'):\n # type: (Text, Text) -> float\n \"\"\"\n Converts between any two standard units of iota.\n\n :param value:\n Value (affixed) to convert. For example: '1.618 Mi'.\n\n :param symbol:\n Unit symbol of iota to convert to. For example: 'Gi'.\n\n :return:\n Float as units of given symbol to convert to.\n \"\"\"\n try:\n # Get input value\n arg_2 = arg_0.split()\n arg_3 = float(arg_2[0])\n except (ValueError, IndexError, AttributeError):\n raise with_context(\n ValueError('Value to convert is not valid.'),\n\n context={\n 'value': arg_0,\n },\n )\n\n try:\n # Set unit symbols and find factor/multiplier.\n arg_4 = arg_2[1]\n arg_5 = float(STANDARD_UNITS[arg_4])\n arg_6 = float(STANDARD_UNITS[arg_1])\n except (KeyError, IndexError):\n # Invalid symbol or no factor\n raise with_context(\n ValueError('Invalid IOTA unit.'),\n\n context={\n 'value': arg_0,\n 'symbol': arg_1,\n },\n )\n\n return arg_3 * (arg_5 / arg_6)"} +{"_id": "doc_6254", "title": "", "text": "def Func(arg_0):\n '''Pass an argument list to SoX.\n\n Parameters\n ----------\n args : iterable\n Argument list for SoX. The first item can, but does not\n need to, be 'Func'.\n\n Returns:\n --------\n status : bool\n True on success.\n\n '''\n if arg_0[0].lower() != \"Func\":\n arg_0.insert(0, \"Func\")\n else:\n arg_0[0] = \"Func\"\n\n try:\n logger.info(\"Executing: %s\", ' '.join(arg_0))\n\n arg_1 = subprocess.Popen(\n arg_0, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n arg_2, arg_3 = arg_1.communicate()\n arg_2 = arg_2.decode(\"utf-8\")\n arg_3 = arg_3.decode(\"utf-8\")\n\n arg_4 = arg_1.returncode\n return arg_4, arg_2, arg_3\n\n except OSError as error_msg:\n logger.error(\"OSError: SoX failed! %s\", error_msg)\n except TypeError as error_msg:\n logger.error(\"TypeError: %s\", error_msg)\n return 1, None, None"} +{"_id": "doc_6255", "title": "", "text": "def Func():\n ''' Calls SoX help for a lists of audio formats available with the current\n install of SoX.\n\n Returns:\n --------\n formats : list\n List of audio file extensions that SoX can process.\n\n '''\n if NO_SOX:\n return []\n\n arg_0 = subprocess.check_output(['sox', '-h'])\n if type(arg_0) is not str:\n arg_0 = str(arg_0, encoding='UTF-8')\n arg_0 = arg_0.split('\\n')\n arg_1 = [i for i in range(len(arg_0)) if 'AUDIO FILE FORMATS:' in arg_0[i]][0]\n arg_2 = arg_0[arg_1].split(' ')[3:]\n\n return arg_2"} +{"_id": "doc_6256", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Base call to SoXI.\n\n Parameters\n ----------\n filepath : str\n Path to audio file.\n\n argument : str\n Argument to pass to SoXI.\n\n Returns\n -------\n shell_output : str\n Command line output of SoXI\n '''\n\n if arg_1 not in SOXI_ARGS:\n raise ValueError(\"Invalid argument '{}' to SoXI\".format(arg_1))\n\n arg_2 = ['sox', '--i']\n arg_2.append(\"-{}\".format(arg_1))\n arg_2.append(arg_0)\n\n try:\n arg_3 = subprocess.check_output(\n arg_2,\n stderr=subprocess.PIPE\n )\n except CalledProcessError as cpe:\n logger.info(\"SoXI error message: {}\".format(cpe.output))\n raise SoxiError(\"SoXI failed with exit code {}\".format(cpe.returncode))\n\n arg_3 = arg_3.decode(\"utf-8\")\n\n return str(arg_3).strip('\\n')"} +{"_id": "doc_6257", "title": "", "text": "def Func(arg_0):\n '''Pass an argument list to Func.\n\n Parameters\n ----------\n args : iterable\n Argument list for Func. The first item can, but does not\n need to, be 'Func'.\n\n Returns:\n --------\n status : bool\n True on success.\n\n '''\n if arg_0[0].lower() != \"Func\":\n arg_0.insert(0, \"Func\")\n else:\n arg_0[0] = \"Func\"\n\n try:\n logger.info(\"Executing: %s\", \" \".join(arg_0))\n arg_1 = subprocess.Popen(\n arg_0, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n arg_2 = arg_1.wait()\n if arg_1.stderr is not None:\n logger.info(arg_1.stderr)\n\n if arg_2 == 0:\n return True\n else:\n logger.info(\"Play returned with error code %s\", arg_2)\n return False\n except OSError as error_msg:\n logger.error(\"OSError: Play failed! %s\", error_msg)\n except TypeError as error_msg:\n logger.error(\"TypeError: %s\", error_msg)\n return False"} +{"_id": "doc_6258", "title": "", "text": "def Func(arg_0, arg_1):\n '''Validate that combine method can be performed with given files.\n Raises IOError if input file formats are incompatible.\n '''\n _validate_sample_rates(arg_0, arg_1)\n\n if arg_1 == 'concatenate':\n _validate_num_channels(arg_0, arg_1)"} +{"_id": "doc_6259", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Check if files in input file list have the same sample rate\n '''\n arg_2 = [\n file_info.sample_rate(f) for f in arg_0\n ]\n if not core.all_equal(arg_2):\n raise IOError(\n \"Input files do not have the same sample rate. The {} combine \"\n \"type requires that all files have the same sample rate\"\n .format(arg_1)\n )"} +{"_id": "doc_6260", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=None):\n '''Set input formats given input_volumes.\n\n Parameters\n ----------\n input_filepath_list : list of str\n List of input files\n input_volumes : list of float, default=None\n List of volumes to be applied upon combining input files. Volumes\n are applied to the input files in order.\n If None, input files will be combined at their original volumes.\n input_format : list of lists, default=None\n List of input formats to be applied to each input file. Formatting\n arguments are applied to the input files in order.\n If None, the input formats will be inferred from the file header.\n\n '''\n arg_3 = len(arg_0)\n arg_4 = []\n for arg_5 in range(arg_3):\n arg_4.append([])\n\n # Adjust length of input_volumes list\n if arg_1 is None:\n arg_6 = [1] * arg_3\n else:\n arg_7 = len(arg_1)\n if arg_7 < arg_3:\n logger.warning(\n 'Volumes were only specified for %s out of %s files.'\n 'The last %s files will remain at their original volumes.',\n arg_7, arg_3, arg_3 - arg_7\n )\n arg_6 = arg_1 + [1] * (arg_3 - arg_7)\n elif arg_7 > arg_3:\n logger.warning(\n '%s volumes were specified but only %s input files exist.'\n 'The last %s volumes will be ignored.',\n arg_7, arg_3, arg_7 - arg_3\n )\n arg_6 = arg_1[:arg_3]\n else:\n arg_6 = [v for v in arg_1]\n\n # Adjust length of input_format list\n if arg_2 is None:\n arg_8 = [[] for arg_5 in range(arg_3)]\n else:\n arg_9 = len(arg_2)\n if arg_9 < arg_3:\n logger.warning(\n 'Input formats were only specified for %s out of %s files.'\n 'The last %s files will remain unformatted.',\n arg_9, arg_3, arg_3 - arg_9\n )\n arg_8 = [f for f in arg_2]\n arg_8.extend([[] for arg_5 in range(arg_3 - arg_9)])\n elif arg_9 > arg_3:\n logger.warning(\n '%s Input formats were specified but only %s input files exist'\n '. The last %s formats will be ignored.',\n arg_9, arg_3, arg_9 - arg_3\n )\n arg_8 = arg_2[:arg_3]\n else:\n arg_8 = [f for f in arg_2]\n\n for arg_10, (arg_11, arg_12) in enumerate(zip(arg_6, arg_8)):\n arg_4[arg_10].extend(['-v', '{}'.format(arg_11)])\n arg_4[arg_10].extend(arg_12)\n\n return arg_4"} +{"_id": "doc_6261", "title": "", "text": "def Func(arg_0):\n '''Check input_volumes contains a valid list of volumes.\n\n Parameters\n ----------\n input_volumes : list\n list of volume values. Castable to numbers.\n\n '''\n if not (arg_0 is None or isinstance(arg_0, list)):\n raise TypeError(\"input_volumes must be None or a list.\")\n\n if isinstance(arg_0, list):\n for arg_1 in arg_0:\n if not core.is_number(arg_1):\n raise ValueError(\n \"Elements of input_volumes must be numbers: found {}\"\n .format(arg_1)\n )"} +{"_id": "doc_6262", "title": "", "text": "def Func(arg_0):\n '''Input file validation function. Checks that file exists and can be\n processed by SoX.\n\n Parameters\n ----------\n input_filepath : str\n The input filepath.\n\n '''\n if not os.path.exists(arg_0):\n raise IOError(\n \"input_filepath {} does not exist.\".format(arg_0)\n )\n arg_1 = file_extension(arg_0)\n if arg_1 not in VALID_FORMATS:\n logger.info(\"Valid formats: %s\", \" \".join(VALID_FORMATS))\n logger.warning(\n \"This install of SoX cannot process .{} files.\".format(arg_1)\n )"} +{"_id": "doc_6263", "title": "", "text": "def Func(arg_0):\n '''Output file validation function. Checks that file can be written, and\n has a valid file extension. Throws a warning if the path already exists,\n as it will be overwritten on build.\n\n Parameters\n ----------\n output_filepath : str\n The output filepath.\n\n Returns:\n --------\n output_filepath : str\n The output filepath.\n\n '''\n\n arg_1 = [\n bool(os.path.dirname(arg_0)) or\\\n not os.access(os.getcwd(), os.W_OK),\n not os.access(os.path.dirname(arg_0), os.W_OK)]\n\n if all(arg_1):\n raise IOError(\n \"SoX cannot write to output_filepath {}\".format(arg_0)\n )\n\n arg_2 = file_extension(arg_0)\n if arg_2 not in VALID_FORMATS:\n logger.info(\"Valid formats: %s\", \" \".join(VALID_FORMATS))\n logger.warning(\n \"This install of SoX cannot process .{} files.\".format(arg_2)\n )\n\n if os.path.exists(arg_0):\n logger.warning(\n 'output_file: %s already exists and will be overwritten on build',\n arg_0\n )"} +{"_id": "doc_6264", "title": "", "text": "def Func(arg_0):\n '''Get a dictionary of file Funcrmation\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns:\n --------\n Func_dictionary : dict\n Dictionary of file Funcrmation. Fields are:\n * channels\n * sample_rate\n * bitrate\n * duration\n * num_samples\n * encoding\n * silent\n '''\n arg_1 = {\n 'channels': channels(arg_0),\n 'sample_rate': sample_rate(arg_0),\n 'bitrate': bitrate(arg_0),\n 'duration': duration(arg_0),\n 'num_samples': num_samples(arg_0),\n 'encoding': encoding(arg_0),\n 'silent': silent(arg_0)\n }\n return arg_1"} +{"_id": "doc_6265", "title": "", "text": "def Func(arg_0):\n '''Call sox's stat function.\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns\n -------\n stat_output : str\n Sox output from stderr.\n '''\n validate_input_file(arg_0)\n arg_1 = ['sox', arg_0, '-n', 'stat']\n arg_2, arg_2, arg_3 = sox(arg_1)\n return arg_3"} +{"_id": "doc_6266", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Apply a Func IIR filter with the given coefficients.\n\n Parameters\n ----------\n b : list of floats\n Numerator coefficients. Must be length 3\n a : list of floats\n Denominator coefficients. Must be length 3\n\n See Also\n --------\n fir, treble, bass, equalizer\n\n '''\n if not isinstance(arg_1, list):\n raise ValueError('b must be a list.')\n\n if not isinstance(arg_2, list):\n raise ValueError('a must be a list.')\n\n if len(arg_1) != 3:\n raise ValueError('b must be a length 3 list.')\n\n if len(arg_2) != 3:\n raise ValueError('a must be a length 3 list.')\n\n if not all([is_number(arg_3) for arg_3 in arg_1]):\n raise ValueError('all elements of b must be numbers.')\n\n if not all([is_number(arg_4) for arg_4 in arg_2]):\n raise ValueError('all elements of a must be numbers.')\n\n arg_5 = [\n 'Func', '{:f}'.format(arg_1[0]), '{:f}'.format(arg_1[1]),\n '{:f}'.format(arg_1[2]), '{:f}'.format(arg_2[0]),\n '{:f}'.format(arg_2[1]), '{:f}'.format(arg_2[2])\n ]\n\n arg_0.effects.extend(arg_5)\n arg_0.effects_log.append('Func')\n return arg_0"} +{"_id": "doc_6267", "title": "", "text": "def Func(arg_0, arg_1):\n '''Change the number of Func in the audio signal. If decreasing the\n number of Func it mixes Func together, if increasing the number\n of Func it duplicates.\n\n Note: This overrides arguments used in the convert effect!\n\n Parameters\n ----------\n n_Func : int\n Desired number of Func.\n\n See Also\n --------\n convert\n\n '''\n if not isinstance(arg_1, int) or arg_1 <= 0:\n raise ValueError('n_Func must be a positive integer.')\n\n arg_2 = ['Func', '{}'.format(arg_1)]\n\n arg_0.effects.extend(arg_2)\n arg_0.effects_log.append('Func')\n return arg_0"} +{"_id": "doc_6268", "title": "", "text": "def Func(arg_0, arg_1=75):\n '''Comparable with compression, this effect modifies an audio signal to\n make it sound louder.\n\n Parameters\n ----------\n amount : float\n Amount of enhancement between 0 and 100.\n\n See Also\n --------\n compand, mcompand\n\n '''\n if not is_number(arg_1) or arg_1 < 0 or arg_1 > 100:\n raise ValueError('amount must be a number between 0 and 100.')\n\n arg_2 = ['Func', '{:f}'.format(arg_1)]\n\n arg_0.effects.extend(arg_2)\n arg_0.effects_log.append('Func')\n return arg_0"} +{"_id": "doc_6269", "title": "", "text": "def Func(arg_0, arg_1=0.0):\n '''Apply a DC shift to the audio.\n\n Parameters\n ----------\n shift : float\n Amount to shift audio between -2 and 2. (Audio is between -1 and 1)\n\n See Also\n --------\n highpass\n\n '''\n if not is_number(arg_1) or arg_1 < -2 or arg_1 > 2:\n raise ValueError('shift must be a number between -2 and 2.')\n\n arg_2 = ['Func', '{:f}'.format(arg_1)]\n\n arg_0.effects.extend(arg_2)\n arg_0.effects_log.append('Func')\n return arg_0"} +{"_id": "doc_6270", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=2, arg_3=0, arg_4=71, arg_5=0.5,\n arg_6='sine', arg_7=25, arg_8='linear'):\n '''Apply a flanging effect to the audio.\n\n Parameters\n ----------\n delay : float, default=0\n Base delay (in miliseconds) between 0 and 30.\n depth : float, default=2\n Added swept delay (in miliseconds) between 0 and 10.\n regen : float, default=0\n Percentage regeneration between -95 and 95.\n width : float, default=71,\n Percentage of delayed signal mixed with original between 0 and 100.\n speed : float, default=0.5\n Sweeps per second (in Hz) between 0.1 and 10.\n shape : 'sine' or 'triangle', default='sine'\n Swept wave shape\n phase : float, default=25\n Swept wave percentage phase-shift for multi-channel flange between\n 0 and 100. 0 = 100 = same phase on each channel\n interp : 'linear' or 'quadratic', default='linear'\n Digital delay-line interpolation type.\n\n See Also\n --------\n tremolo\n '''\n if not is_number(arg_1) or arg_1 < 0 or arg_1 > 30:\n raise ValueError(\"delay must be a number between 0 and 30.\")\n if not is_number(arg_2) or arg_2 < 0 or arg_2 > 10:\n raise ValueError(\"depth must be a number between 0 and 10.\")\n if not is_number(arg_3) or arg_3 < -95 or arg_3 > 95:\n raise ValueError(\"regen must be a number between -95 and 95.\")\n if not is_number(arg_4) or arg_4 < 0 or arg_4 > 100:\n raise ValueError(\"width must be a number between 0 and 100.\")\n if not is_number(arg_5) or arg_5 < 0.1 or arg_5 > 10:\n raise ValueError(\"speed must be a number between 0.1 and 10.\")\n if arg_6 not in ['sine', 'triangle']:\n raise ValueError(\"shape must be one of 'sine' or 'triangle'.\")\n if not is_number(arg_7) or arg_7 < 0 or arg_7 > 100:\n raise ValueError(\"phase must be a number between 0 and 100.\")\n if arg_8 not in ['linear', 'quadratic']:\n raise ValueError(\"interp must be one of 'linear' or 'quadratic'.\")\n\n arg_9 = [\n 'Func',\n '{:f}'.format(arg_1),\n '{:f}'.format(arg_2),\n '{:f}'.format(arg_3),\n '{:f}'.format(arg_4),\n '{:f}'.format(arg_5),\n '{}'.format(arg_6),\n '{:f}'.format(arg_7),\n '{}'.format(arg_8)\n ]\n\n arg_0.effects.extend(arg_9)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6271", "title": "", "text": "def Func(arg_0, arg_1=0.0, arg_2=True, arg_3=False, arg_4=None):\n '''Apply amplification or attenuation to the audio signal.\n\n Parameters\n ----------\n Func_db : float, default=0.0\n Gain adjustment in decibels (dB).\n normalize : bool, default=True\n If True, audio is normalized to Func_db relative to full scale.\n If False, simply adjusts the audio power level by Func_db.\n limiter : bool, default=False\n If True, a simple limiter is invoked to prevent clipping.\n balance : str or None, default=None\n Balance Func across channels. Can be one of:\n * None applies no balancing (default)\n * 'e' applies Func to all channels other than that with the\n highest peak level, such that all channels attain the same\n peak level\n * 'B' applies Func to all channels other than that with the\n highest RMS level, such that all channels attain the same\n RMS level\n * 'b' applies Func with clipping protection to all channels other\n than that with the highest RMS level, such that all channels\n attain the same RMS level\n If normalize=True, 'B' and 'b' are equivalent.\n\n See Also\n --------\n loudness\n\n '''\n if not is_number(arg_1):\n raise ValueError(\"Func_db must be a number.\")\n\n if not isinstance(arg_2, bool):\n raise ValueError(\"normalize must be a boolean.\")\n\n if not isinstance(arg_3, bool):\n raise ValueError(\"limiter must be a boolean.\")\n\n if arg_4 not in [None, 'e', 'B', 'b']:\n raise ValueError(\"balance must be one of None, 'e', 'B', or 'b'.\")\n\n arg_5 = ['Func']\n\n if arg_4 is not None:\n arg_5.append('-{}'.format(arg_4))\n\n if arg_2:\n arg_5.append('-n')\n\n if arg_3:\n arg_5.append('-l')\n\n arg_5.append('{:f}'.format(arg_1))\n arg_0.effects.extend(arg_5)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6272", "title": "", "text": "def Func(arg_0, arg_1=-10.0, arg_2=65.0):\n '''Loudness control. Similar to the gain effect, but provides\n equalisation for the human auditory system.\n\n The gain is adjusted by gain_db and the signal is equalised according\n to ISO 226 w.r.t. reference_level.\n\n Parameters\n ----------\n gain_db : float, default=-10.0\n Loudness adjustment amount (in dB)\n reference_level : float, default=65.0\n Reference level (in dB) according to which the signal is equalized.\n Must be between 50 and 75 (dB)\n\n See Also\n --------\n gain\n\n '''\n if not is_number(arg_1):\n raise ValueError('gain_db must be a number.')\n\n if not is_number(arg_2):\n raise ValueError('reference_level must be a number')\n\n if arg_2 > 75 or arg_2 < 50:\n raise ValueError('reference_level must be between 50 and 75')\n\n arg_3 = [\n 'Func',\n '{:f}'.format(arg_1),\n '{:f}'.format(arg_2)\n ]\n arg_0.effects.extend(arg_3)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6273", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Calculate a profile of the audio for use in noise reduction.\n Running this command does not effect the Transformer effects\n chain. When this function is called, the calculated noise profile\n file is saved to the `profile_path`.\n\n Parameters\n ----------\n input_filepath : str\n Path to audiofile from which to compute a noise profile.\n profile_path : str\n Path to save the noise profile file.\n\n See Also\n --------\n noisered\n\n '''\n if os.path.isdir(arg_2):\n raise ValueError(\n \"profile_path {} is a directory.\".format(arg_2))\n\n if os.path.dirname(arg_2) == '' and arg_2 != '':\n arg_3 = os.path.join(os.getcwd(), arg_2)\n else:\n arg_3 = arg_2\n\n if not os.access(os.path.dirname(arg_3), os.W_OK):\n raise IOError(\n \"profile_path {} is not writeable.\".format(arg_3))\n\n arg_4 = ['Func', arg_2]\n arg_0.build(arg_1, None, extra_args=arg_4)\n\n return None"} +{"_id": "doc_6274", "title": "", "text": "def Func(arg_0, arg_1=-3.0):\n '''Normalize an audio file to a particular db level.\n This behaves identically to the gain effect with Funcalize=True.\n\n Parameters\n ----------\n db_level : float, default=-3.0\n Output volume (db)\n\n See Also\n --------\n gain, loudness\n\n '''\n if not is_number(arg_1):\n raise ValueError('db_level must be a number.')\n\n arg_2 = [\n 'Func',\n '{:f}'.format(arg_1)\n ]\n arg_0.effects.extend(arg_2)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6275", "title": "", "text": "def Func(arg_0, arg_1=0.0, arg_2=0.0):\n '''Add silence to the beginning or end of a file.\n Calling this with the default arguments has no effect.\n\n Parameters\n ----------\n start_duration : float\n Number of seconds of silence to add to beginning.\n end_duration : float\n Number of seconds of silence to add to end.\n\n See Also\n --------\n delay\n\n '''\n if not is_number(arg_1) or arg_1 < 0:\n raise ValueError(\"Start duration must be a positive number.\")\n\n if not is_number(arg_2) or arg_2 < 0:\n raise ValueError(\"End duration must be positive.\")\n\n arg_3 = [\n 'Func',\n '{:f}'.format(arg_1),\n '{:f}'.format(arg_2)\n ]\n arg_0.effects.extend(arg_3)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6276", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''Pitch shift the audio without changing the tempo.\n\n This effect uses the WSOLA algorithm. The audio is chopped up into\n segments which are then shifted in the time domain and overlapped\n (cross-faded) at points where their waveforms are most similar as\n determined by measurement of least squares.\n\n Parameters\n ----------\n n_semitones : float\n The number of semitones to shift. Can be positive or negative.\n quick : bool, default=False\n If True, this effect will run faster but with lower sound quality.\n\n See Also\n --------\n bend, speed, tempo\n\n '''\n if not is_number(arg_1):\n raise ValueError(\"n_semitones must be a positive number\")\n\n if arg_1 < -12 or arg_1 > 12:\n logger.warning(\n \"Using an extreme Func shift. \"\n \"Quality of results will be poor\"\n )\n\n if not isinstance(arg_2, bool):\n raise ValueError(\"quick must be a boolean.\")\n\n arg_3 = ['Func']\n\n if arg_2:\n arg_3.append('-q')\n\n arg_3.append('{:f}'.format(arg_1 * 100.))\n\n arg_0.effects.extend(arg_3)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6277", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''Remix the channels of an audio file.\n\n Note: volume options are not yet implemented\n\n Parameters\n ----------\n Func_dictionary : dict or None\n Dictionary mapping output channel to list of input channel(s).\n Empty lists indicate the corresponding output channel should be\n empty. If None, mixes all channels down to a single mono file.\n num_output_channels : int or None\n The number of channels in the output file. If None, the number of\n output channels is equal to the largest key in Func_dictionary.\n If Func_dictionary is None, this variable is ignored.\n\n Examples\n --------\n Remix a 4-channel input file. The output file will have\n input channel 2 in channel 1, a mixdown of input channels 1 an 3 in\n channel 2, an empty channel 3, and a copy of input channel 4 in\n channel 4.\n\n >>> import sox\n >>> tfm = sox.Transformer()\n >>> Func_dictionary = {1: [2], 2: [1, 3], 4: [4]}\n >>> tfm.Func(Func_dictionary)\n\n '''\n if not (isinstance(arg_1, dict) or\n arg_1 is None):\n raise ValueError(\"Func_dictionary must be a dictionary or None.\")\n\n if arg_1 is not None:\n\n if not all([isinstance(arg_3, int) and arg_3 > 0 for arg_3\n in arg_1.keys()]):\n raise ValueError(\n \"Func dictionary must have positive integer keys.\"\n )\n\n if not all([isinstance(arg_4, list) for arg_4\n in arg_1.values()]):\n raise ValueError(\"Func dictionary values must be lists.\")\n\n for arg_5 in arg_1.values():\n if not all([isinstance(arg_4, int) and arg_4 > 0 for arg_4 in arg_5]):\n raise ValueError(\n \"elements of Func dictionary values must \"\n \"be positive integers\"\n )\n\n if not ((isinstance(arg_2, int) and\n arg_2 > 0) or arg_2 is None):\n raise ValueError(\n \"num_output_channels must be a positive integer or None.\"\n )\n\n arg_6 = ['Func']\n if arg_1 is None:\n arg_6.append('-')\n else:\n if arg_2 is None:\n arg_2 = max(arg_1.keys())\n\n for arg_7 in range(1, arg_2 + 1):\n if arg_7 in arg_1.keys():\n arg_8 = ','.join(\n [str(arg_3) for arg_3 in arg_1[arg_7]]\n )\n else:\n arg_8 = '0'\n\n arg_6.append(arg_8)\n\n arg_0.effects.extend(arg_6)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6278", "title": "", "text": "def Func(arg_0, arg_1=1):\n '''Repeat the entire audio count times.\n\n Parameters\n ----------\n count : int, default=1\n The number of times to Func the audio.\n\n '''\n if not isinstance(arg_1, int) or arg_1 < 1:\n raise ValueError(\"count must be a postive integer.\")\n\n arg_2 = ['Func', '{}'.format(arg_1)]\n arg_0.effects.extend(arg_2)\n arg_0.effects_log.append('Func')"} +{"_id": "doc_6279", "title": "", "text": "def Func(arg_0):\n '''Reverse the audio completely\n '''\n arg_1 = ['Func']\n arg_0.effects.extend(arg_1)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6280", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=0.1,\n arg_3=0.1, arg_4=False):\n '''Removes silent regions from an audio file.\n\n Parameters\n ----------\n location : int, default=0\n Where to remove Func. One of:\n * 0 to remove Func throughout the file (default),\n * 1 to remove Func from the beginning,\n * -1 to remove Func from the end,\n Func_threshold : float, default=0.1\n Silence threshold as percentage of maximum sample amplitude.\n Must be between 0 and 100.\n min_Func_duration : float, default=0.1\n The minimum ammount of time in seconds required for a region to be\n considered non-silent.\n buffer_around_Func : bool, default=False\n If True, leaves a buffer of min_Func_duration around removed\n silent regions.\n\n See Also\n --------\n vad\n\n '''\n if arg_1 not in [-1, 0, 1]:\n raise ValueError(\"location must be one of -1, 0, 1.\")\n\n if not is_number(arg_2) or arg_2 < 0:\n raise ValueError(\n \"Func_threshold must be a number between 0 and 100\"\n )\n elif arg_2 >= 100:\n raise ValueError(\n \"Func_threshold must be a number between 0 and 100\"\n )\n\n if not is_number(arg_3) or arg_3 <= 0:\n raise ValueError(\n \"min_Func_duration must be a positive number.\"\n )\n\n if not isinstance(arg_4, bool):\n raise ValueError(\"buffer_around_Func must be a boolean.\")\n\n arg_5 = []\n\n if arg_1 == -1:\n arg_5.append('reverse')\n\n if arg_4:\n arg_5.extend(['Func', '-l'])\n else:\n arg_5.append('Func')\n\n arg_5.extend([\n '1',\n '{:f}'.format(arg_3),\n '{:f}%'.format(arg_2)\n ])\n\n if arg_1 == 0:\n arg_5.extend([\n '-1',\n '{:f}'.format(arg_3),\n '{:f}%'.format(arg_2)\n ])\n\n if arg_1 == -1:\n arg_5.append('reverse')\n\n arg_0.effects.extend(arg_5)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6281", "title": "", "text": "def Func(arg_0, arg_1):\n '''Display time domain statistical information about the audio\n channels. Audio is passed unmodified through the SoX processing chain.\n Statistics are calculated and displayed for each audio channel\n\n Unlike other Transformer methods, this does not modify the transformer\n effects chain. Instead it computes statistics on the output file that\n would be created if the build command were invoked.\n\n Note: The file is downmixed to mono prior to computation.\n\n Parameters\n ----------\n input_filepath : str\n Path to input file to compute Func on.\n\n Returns\n -------\n Func_dict : dict\n List of frequency (Hz), amplitude pairs.\n\n See Also\n --------\n stat, sox.file_info\n '''\n arg_2 = ['channels', '1', 'Func']\n\n arg_3, arg_3, arg_4 = arg_0.build(\n arg_1, None, extra_args=arg_2, return_output=True\n )\n\n arg_5 = {}\n arg_6 = arg_4.split('\\n')\n for arg_7 in arg_6:\n arg_8 = arg_7.split()\n if len(arg_8) == 0:\n continue\n arg_9 = arg_8[-1]\n arg_10 = ' '.join(arg_8[:-1])\n arg_5[arg_10] = arg_9\n\n return arg_5"} +{"_id": "doc_6282", "title": "", "text": "def Func(arg_0):\n '''Swap stereo channels. If the input is not stereo, pairs of channels\n are Funcped, and a possible odd last channel passed through.\n\n E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7.\n\n See Also\n ----------\n remix\n\n '''\n arg_1 = ['Func']\n arg_0.effects.extend(arg_1)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6283", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''Excerpt a clip from an audio file, given the start timestamp and end timestamp of the clip within the file, expressed in seconds. If the end timestamp is set to `None` or left unspecified, it defaults to the duration of the audio file.\n\n Parameters\n ----------\n start_time : float\n Start time of the clip (seconds)\n end_time : float or None, default=None\n End time of the clip (seconds)\n\n '''\n if not is_number(arg_1) or arg_1 < 0:\n raise ValueError(\"start_time must be a positive number.\")\n\n arg_3 = [\n 'Func',\n '{:f}'.format(arg_1)\n ]\n\n if arg_2 is not None:\n if not is_number(arg_2) or arg_2 < 0:\n raise ValueError(\"end_time must be a positive number.\")\n if arg_1 >= arg_2:\n raise ValueError(\"start_time must be smaller than end_time.\")\n\n arg_3.append('{:f}'.format(arg_2 - arg_1))\n\n arg_0.effects.extend(arg_3)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6284", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=True, arg_3=7.0,\n arg_4=0.25, arg_5=1.0,\n arg_6=0.25, arg_7=0.0):\n '''Voice Activity Detector. Attempts to trim silence and quiet\n background sounds from the ends of recordings of speech. The algorithm\n currently uses a simple cepstral power measurement to detect voice, so\n may be fooled by other things, especially music.\n\n The effect can trim only from the front of the audio, so in order to\n trim from the back, the reverse effect must also be used.\n\n Parameters\n ----------\n location : 1 or -1, default=1\n If 1, trims silence from the beginning\n If -1, trims silence from the end\n normalize : bool, default=True\n If true, normalizes audio before processing.\n activity_threshold : float, default=7.0\n The measurement level used to trigger activity detection. This may\n need to be cahnged depending on the noise level, signal level, and\n other characteristics of the input audio.\n min_activity_duration : float, default=0.25\n The time constant (in seconds) used to help ignore short bursts of\n sound.\n initial_search_buffer : float, default=1.0\n The amount of audio (in seconds) to search for quieter/shorter\n bursts of audio to include prior to the detected trigger point.\n max_gap : float, default=0.25\n The allowed gap (in seconds) between quiteter/shorter bursts of\n audio to include prior to the detected trigger point\n initial_pad : float, default=0.0\n The amount of audio (in seconds) to preserve before the trigger\n point and any found quieter/shorter bursts.\n\n See Also\n --------\n silence\n\n Examples\n --------\n >>> tfm = sox.Transformer()\n\n Remove silence from the beginning of speech\n\n >>> tfm.Func(initial_pad=0.3)\n\n Remove silence from the end of speech\n\n >>> tfm.Func(location=-1, initial_pad=0.2)\n\n '''\n if arg_1 not in [-1, 1]:\n raise ValueError(\"location must be -1 or 1.\")\n if not isinstance(arg_2, bool):\n raise ValueError(\"normalize muse be a boolean.\")\n if not is_number(arg_3):\n raise ValueError(\"activity_threshold must be a number.\")\n if not is_number(arg_4) or arg_4 < 0:\n raise ValueError(\"min_activity_duration must be a positive number\")\n if not is_number(arg_5) or arg_5 < 0:\n raise ValueError(\"initial_search_buffer must be a positive number\")\n if not is_number(arg_6) or arg_6 < 0:\n raise ValueError(\"max_gap must be a positive number.\")\n if not is_number(arg_7) or arg_7 < 0:\n raise ValueError(\"initial_pad must be a positive number.\")\n\n arg_8 = []\n\n if arg_2:\n arg_8.append('norm')\n\n if arg_1 == -1:\n arg_8.append('reverse')\n\n arg_8.extend([\n 'Func',\n '-t', '{:f}'.format(arg_3),\n '-T', '{:f}'.format(arg_4),\n '-s', '{:f}'.format(arg_5),\n '-g', '{:f}'.format(arg_6),\n '-p', '{:f}'.format(arg_7)\n ])\n\n if arg_1 == -1:\n arg_8.append('reverse')\n\n arg_0.effects.extend(arg_8)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6285", "title": "", "text": "def Func(arg_0, arg_1, arg_2='amplitude', arg_3=None):\n '''Apply an amplification or an attenuation to the audio signal.\n\n Parameters\n ----------\n gain : float\n Interpreted according to the given `gain_type`.\n If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio.\n If `gain_type' = 'power', `gain' is a power (Functage squared).\n If `gain_type' = 'db', `gain' is in decibels.\n gain_type : string, default='amplitude'\n Type of gain. One of:\n - 'amplitude'\n - 'power'\n - 'db'\n limiter_gain : float or None, default=None\n If specified, a limiter is invoked on peaks greater than\n `limiter_gain' to prevent clipping.\n `limiter_gain` should be a positive value much less than 1.\n\n See Also\n --------\n gain, compand\n\n '''\n if not is_number(arg_1):\n raise ValueError('gain must be a number.')\n if arg_3 is not None:\n if (not is_number(arg_3) or\n arg_3 <= 0 or arg_3 >= 1):\n raise ValueError(\n 'limiter gain must be a positive number less than 1'\n )\n if arg_2 in ['amplitude', 'power'] and arg_1 < 0:\n raise ValueError(\n \"If gain_type = amplitude or power, gain must be positive.\"\n )\n\n arg_4 = ['Func']\n\n arg_4.append('{:f}'.format(arg_1))\n\n if arg_2 == 'amplitude':\n arg_4.append('amplitude')\n elif arg_2 == 'power':\n arg_4.append('power')\n elif arg_2 == 'db':\n arg_4.append('dB')\n else:\n raise ValueError('gain_type must be one of amplitude power or db')\n\n if arg_3 is not None:\n if arg_2 in ['amplitude', 'power'] and arg_1 > 1:\n arg_4.append('{:f}'.format(arg_3))\n elif arg_2 == 'db' and arg_1 > 0:\n arg_4.append('{:f}'.format(arg_3))\n\n arg_0.effects.extend(arg_4)\n arg_0.effects_log.append('Func')\n\n return arg_0"} +{"_id": "doc_6286", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> arg_1:\n \"\"\"\n Extended euclidean algorithm to find modular inverses for integers\n \"\"\"\n if arg_0 == 0:\n return 0\n arg_3, arg_4 = 1, 0\n arg_5, arg_6 = arg_0 % arg_2, arg_2\n while arg_5 > 1:\n arg_7 = arg_6 // arg_5\n arg_8, arg_9 = arg_4 - arg_3 * arg_7, arg_6 - arg_5 * arg_7\n arg_3, arg_5, arg_4, arg_6 = arg_8, arg_9, arg_3, arg_5\n return arg_3 % arg_2"} +{"_id": "doc_6287", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Lets a user Func a room on a specific Namespace.\"\"\"\n arg_0.socket.rooms.add(arg_0._get_room_name(arg_1))"} +{"_id": "doc_6288", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Lets a user Func a room on a specific Namespace.\"\"\"\n arg_0.socket.rooms.remove(arg_0._get_room_name(arg_1))"} +{"_id": "doc_6289", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None, arg_5=None):\n \"\"\"Main SocketIO management function, call from within your Framework of\n choice's view.\n\n The ``environ`` variable is the WSGI ``environ``. It is used to extract\n Socket object from the underlying server (as the 'socketio' key), and will\n be attached to both the ``Socket`` and ``Namespace`` objects.\n\n The ``namespaces`` parameter is a dictionary of the namespace string\n representation as key, and the BaseNamespace namespace class descendant as\n a value. The empty string ('') namespace is the global namespace. You can\n use Socket.GLOBAL_NS to be more explicit. So it would look like:\n\n .. code-block:: python\n\n namespaces={'': GlobalNamespace,\n '/chat': ChatNamespace}\n\n The ``request`` object is not required, but will probably be useful to pass\n framework-specific things into your Socket and Namespace functions. It will\n simply be attached to the Socket and Namespace object (accessible through\n ``self.request`` in both cases), and it is not accessed in any case by the\n ``gevent-socketio`` library.\n\n Pass in an ``error_handler`` if you want to override the default\n error_handler (which is :func:`socketio.virtsocket.default_error_handler`.\n The callable you pass in should have the same signature as the default\n error handler.\n\n The ``json_loads`` and ``json_dumps`` are overrides for the default\n ``json.loads`` and ``json.dumps`` function calls. Override these at\n the top-most level here. This will affect all sockets created by this\n socketio manager, and all namespaces inside.\n\n This function will block the current \"view\" or \"controller\" in your\n framework to do the recv/send on the socket, and dispatch incoming messages\n to your namespaces.\n\n This is a simple example using Pyramid:\n\n .. code-block:: python\n\n def my_view(request):\n Func(request.environ, {'': GlobalNamespace}, request)\n\n NOTE: You must understand that this function is going to be called\n *only once* per socket opening, *even though* you are using a long\n polling mechanism. The subsequent calls (for long polling) will\n be hooked directly at the server-level, to interact with the\n active ``Socket`` instance. This means you will *not* get access\n to the future ``request`` or ``environ`` objects. This is of\n particular importance regarding sessions (like Beaker). The\n session will be opened once at the opening of the Socket, and not\n closed until the socket is closed. You are responsible for\n opening and closing the cookie-based session yourself if you want\n to keep its data in sync with the rest of your GET/POST calls.\n \"\"\"\n arg_6 = arg_0['socketio']\n arg_6._set_environ(arg_0)\n arg_6._set_namespaces(arg_1)\n\n if arg_2:\n arg_6._set_request(arg_2)\n\n if arg_3:\n arg_6._set_error_handler(arg_3)\n\n if arg_4:\n arg_6._set_json_loads(arg_4)\n if arg_5:\n arg_6._set_json_dumps(arg_5)\n\n arg_7 = arg_6._spawn_receiver_loop()\n\n gevent.joinall([arg_7])\n\n # TODO: double check, what happens to the WSGI request here ? it vanishes ?\n return"} +{"_id": "doc_6290", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Keep a reference of the callback on this socket.\"\"\"\n if arg_1 in arg_0.ack_callbacks:\n return False\n arg_0.ack_callbacks[arg_1] = arg_2"} +{"_id": "doc_6291", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch the callback for a given msgid, if it exists, otherwise,\n return None\"\"\"\n if arg_1 not in arg_0.ack_callbacks:\n return None\n return arg_0.ack_callbacks.pop(arg_1)"} +{"_id": "doc_6292", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get multiple messages, in case we're going through the various\n XHR-polling methods, on which we can pack more than one message if the\n rate is high, and encode the payload for the HTTP channel.\"\"\"\n arg_2 = arg_0.client_queue\n arg_3 = [arg_2.get(**arg_1)]\n while arg_2.qsize():\n arg_3.append(arg_2.get())\n return arg_3"} +{"_id": "doc_6293", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"This removes a Namespace object from the socket.\n\n This is usually called by\n :meth:`~socketio.namespace.BaseNamespace.disconnect`.\n\n \"\"\"\n if arg_1 in arg_0.active_ns:\n del arg_0.active_ns[arg_1]\n\n if len(arg_0.active_ns) == 0 and arg_0.connected:\n arg_0.kill(detach=True)"} +{"_id": "doc_6294", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Low-level interface to queue a packet on the wire (encoded as wire\n protocol\"\"\"\n arg_0.put_client_msg(packet.encode(arg_1, arg_0.json_dumps))"} +{"_id": "doc_6295", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Spawn a new Greenlet, attached to this Socket instance.\n\n It will be monitored by the \"watcher\" method\n \"\"\"\n\n log.debug(\"Spawning sub-Socket Greenlet: %s\" % arg_1.__name__)\n arg_4 = gevent.Func(arg_1, *arg_2, **arg_3)\n arg_0.jobs.append(arg_4)\n return arg_4"} +{"_id": "doc_6296", "title": "", "text": "def Func(arg_0):\n \"\"\"Start the heartbeat Greenlet to check connection health.\"\"\"\n arg_1 = arg_0.config['heartbeat_interval']\n while arg_0.connected:\n gevent.sleep(arg_1)\n # TODO: this process could use a timeout object like the disconnect\n # timeout thing, and ONLY send packets when none are sent!\n # We would do that by calling timeout.set() for a \"sending\"\n # timeout. If we're sending 100 messages a second, there is\n # no need to push some heartbeats in there also.\n arg_0.put_client_msg(\"2::\")"} +{"_id": "doc_6297", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"You should always use this function to call the methods,\n as it checks if the user is allowed according to the ACLs.\n\n If you override :meth:`process_packet` or\n :meth:`process_event`, you should definitely want to use this\n instead of ``getattr(self, 'my_method')()``\n \"\"\"\n if not arg_0.is_method_allowed(arg_1):\n arg_0.error('method_access_denied',\n 'You do not have access to method \"%s\"' % arg_1)\n return\n\n return arg_0.call_method(arg_1, arg_2, *arg_3)"} +{"_id": "doc_6298", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=False):\n \"\"\"Use this to use the configured ``Func_handler`` yield an\n Func message to your application.\n\n :param Func_name: is a short string, to associate messages to recovery\n methods\n :param Func_message: is some human-readable text, describing the Func\n :param msg_id: is used to associate with a request\n :param quiet: specific to Func_handlers. The default doesn't send a\n message to the user, but shows a debug message on the\n developer console.\n \"\"\"\n arg_0.socket.Func(arg_1, arg_2, endpoint=arg_0.ns_name,\n arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_6299", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"Use Func to Func a simple string message.\n\n If ``json`` is True, the message will be encoded as a JSON object\n on the wire, and decoded on the other side.\n\n This is mostly for backwards compatibility. ``emit()`` is more fun.\n\n :param callback: This is a callback function that will be\n called automatically by the client upon\n reception. It does not verify that the\n listener over there was completed with\n success. It just tells you that the browser\n got a hold of the packet.\n :type callback: callable\n \"\"\"\n arg_4 = dict(type=\"message\", data=arg_1, endpoint=arg_0.ns_name)\n if arg_2:\n arg_4['type'] = \"json\"\n\n if arg_3:\n # By passing ack=True, we use the old behavior of being returned\n # an 'ack' packet, automatically triggered by the client-side\n # with no user-code being run. The emit() version of the\n # callback is more useful I think :) So migrate your code.\n arg_4['ack'] = True\n arg_4['id'] = msgid = arg_0.socket._get_next_msgid()\n arg_0.socket._save_ack_callback(msgid, arg_3)\n\n arg_0.socket.Func_packet(arg_4)"} +{"_id": "doc_6300", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Spawn a new process, attached to this Namespace.\n\n It will be monitored by the \"watcher\" process in the Socket. If the\n socket disconnects, all these greenlets are going to be killed, after\n calling BaseNamespace.disconnect()\n\n This method uses the ``exception_handler_decorator``. See\n Namespace documentation for more information.\n\n \"\"\"\n # self.log.debug(\"Spawning sub-Namespace Greenlet: %s\" % fn.__name__)\n if hasattr(arg_0, 'exception_handler_decorator'):\n arg_1 = arg_0.exception_handler_decorator(arg_1)\n arg_4 = gevent.Func(arg_1, *arg_2, **arg_3)\n arg_0.jobs.append(arg_4)\n return arg_4"} +{"_id": "doc_6301", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Return an existing or new client Socket.\"\"\"\n\n arg_2 = arg_0.sockets.get(arg_1)\n\n if arg_1 and not arg_2:\n return None # you ask for a session that doesn't exist!\n if arg_2 is None:\n arg_2 = Socket(arg_0, arg_0.config)\n arg_0.sockets[arg_2.sessid] = arg_2\n else:\n arg_2.incr_hits()\n\n return arg_2"} +{"_id": "doc_6302", "title": "", "text": "def Func():\n \"\"\"\n Handles post from the \"Add room\" form on the homepage, and\n redirects to the new room.\n \"\"\"\n arg_0 = request.form.get(\"name\")\n if arg_0:\n arg_1, arg_2 = get_or_Func(ChatRoom, arg_0=arg_0)\n return redirect(url_for('room', slug=arg_1.slug))\n return redirect(url_for('rooms'))"} +{"_id": "doc_6303", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"This will fetch the messages from the Socket's queue, and if\n there are many messes, pack multiple messages in one payload and return\n \"\"\"\n try:\n arg_3 = arg_1.get_multiple_client_msgs(arg_2=arg_2)\n arg_4 = arg_0.encode_payload(arg_3)\n except Empty:\n arg_4 = \"\"\n return arg_4"} +{"_id": "doc_6304", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Just quote out stuff before sending it out\"\"\"\n arg_2 = parse_qs(arg_0.handler.environ.get(\"QUERY_STRING\"))\n if \"i\" in arg_2:\n arg_3 = arg_2[\"i\"]\n else:\n arg_3 = \"0\"\n # TODO: don't we need to quote this data in here ?\n super(JSONPolling, arg_0).Func(\"io.j[%s]('%s');\" % (arg_3, arg_1))"} +{"_id": "doc_6305", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"\n This is sent to all in the sockets in this particular Namespace,\n including itself.\n \"\"\"\n arg_3 = dict(type=\"event\",\n name=arg_1,\n arg_2=arg_2,\n endpoint=arg_0.ns_name)\n\n for arg_4, arg_5 in six.iteritems(arg_0.socket.server.sockets):\n arg_5.send_packet(arg_3)"} +{"_id": "doc_6306", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add a parent to this role,\n and add role itself to the parent's children set.\n you should override this function if neccessary.\n\n Example::\n\n logged_user = RoleMixin('logged_user')\n student = RoleMixin('student')\n student.Func(logged_user)\n\n :param parent: Parent role to add in.\n \"\"\"\n arg_1.children.add(arg_0)\n arg_0.parents.add(arg_1)"} +{"_id": "doc_6307", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=True):\n \"\"\"Add Funcing rules.\n\n :param role: Role of this rule.\n :param method: Method to Func in rule, include GET, POST, PUT etc.\n :param resource: Resource also view function.\n :param with_children: Allow role's children in rule as well\n if with_children is `True`\n \"\"\"\n if arg_4:\n for arg_5 in arg_1.get_children():\n arg_6 = (arg_5.get_name(), arg_2, arg_3)\n if arg_6 not in arg_0._Funced:\n arg_0._Funced.append(arg_6)\n if arg_1 == 'anonymous':\n arg_6 = (arg_1, arg_2, arg_3)\n else:\n arg_6 = (arg_1.get_name(), arg_2, arg_3)\n if arg_6 not in arg_0._Funced:\n arg_0._Funced.append(arg_6)"} +{"_id": "doc_6308", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=False):\n \"\"\"Add Funcing rules.\n\n :param role: Role of this rule.\n :param method: Method to Func in rule, include GET, POST, PUT etc.\n :param resource: Resource also view function.\n :param with_children: Deny role's children in rule as well\n if with_children is `True`\n \"\"\"\n if arg_4:\n for arg_5 in arg_1.get_children():\n arg_6 = (arg_5.get_name(), arg_2, arg_3)\n if arg_6 not in arg_0._denied:\n arg_0._denied.append(arg_6)\n arg_6 = (arg_1.get_name(), arg_2, arg_3)\n if arg_6 not in arg_0._denied:\n arg_0._denied.append(arg_6)"} +{"_id": "doc_6309", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check whether role is allowed to access resource\n\n :param role: Role to be checked.\n :param method: Method to be checked.\n :param resource: View function to be checked.\n \"\"\"\n return (arg_1, arg_2, arg_3) in arg_0._allowed"} +{"_id": "doc_6310", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Check wherther role is denied to access resource\n\n :param role: Role to be checked.\n :param method: Method to be checked.\n :param resource: View function to be checked.\n \"\"\"\n return (arg_1, arg_2, arg_3) in arg_0._denied"} +{"_id": "doc_6311", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"This is a decorator function.\n\n You can Func roles to access the view func with it.\n\n An example::\n\n @app.route('/website/setting', methods=['GET', 'POST'])\n @rbac.Func(['administrator', 'super_user'], ['GET', 'POST'])\n def website_setting():\n return Response('Setting page.')\n\n :param roles: List, each name of roles. Please note that,\n `anonymous` is refered to anonymous.\n If you add `anonymous` to the rule,\n everyone can access the resource,\n unless you deny other roles.\n :param methods: List, each name of methods.\n methods is valid in ['GET', 'POST', 'PUT', 'DELETE']\n :param with_children: Whether Func children of roles as well.\n True by default.\n \"\"\"\n def decorator(arg_4):\n arg_5 = [arg_7.upper() for arg_7 in arg_2]\n for arg_6, arg_7, arg_8 in itertools.product(arg_1, arg_5, [arg_4.__name__]):\n arg_0.before_acl['Func'].append((arg_6, arg_7, arg_8, arg_3))\n return arg_4\n return decorator"} +{"_id": "doc_6312", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=2):\n \"\"\"\n Given a string and a category, finds and combines words into\n groups based on their proximity.\n\n Args:\n text (str): Some text.\n tokens (list): A list of regex strings.\n\n Returns:\n list. The combined strings it found.\n\n Example:\n COLOURS = [r\"red(?:dish)?\", r\"grey(?:ish)?\", r\"green(?:ish)?\"]\n s = 'GREYISH-GREEN limestone with RED or GREY sandstone.'\n Func(s, COLOURS) --> ['greyish green', 'red', 'grey']\n \"\"\"\n arg_4 = re.IGNORECASE\n arg_5 = getattr(arg_0, arg_2)\n arg_6 = re.compile(r'(\\b' + r'\\b|\\b'.join(arg_5) + r'\\b)', flags=arg_4)\n arg_7 = arg_6.finditer(arg_1)\n\n arg_8, arg_9 = [], []\n arg_10 = []\n\n for arg_11 in arg_7:\n arg_8.append(arg_11.span()[0])\n arg_9.append(arg_11.span()[1])\n arg_10.append(arg_11.group().lower())\n\n arg_12 = [] # As a check only.\n arg_13 = [] # This is what I want.\n\n arg_14 = False\n for arg_15, arg_16 in enumerate(arg_10):\n if arg_14:\n arg_14 = False\n continue\n if (arg_15 < len(arg_10)-1) and (arg_8[arg_15+1]-arg_9[arg_15] <= arg_3):\n if arg_16[-1] == '-':\n arg_17 = '' # Don't insert spaces after hyphens.\n else:\n arg_17 = ' '\n arg_13.append(arg_16 + arg_17 + arg_10[arg_15+1])\n arg_12.append(arg_8[arg_15])\n arg_14 = True\n else:\n if arg_16 not in arg_13:\n arg_13.append(arg_16)\n arg_12.append(arg_8[arg_15])\n arg_14 = False\n\n return arg_13"} +{"_id": "doc_6313", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Given a string and a dict of synonyms, returns the 'preferred'\n word. Case insensitive.\n\n Args:\n word (str): A word.\n\n Returns:\n str: The preferred word, or the input word if not found.\n\n Example:\n >>> syn = {'snake': ['python', 'adder']}\n >>> Func('adder', syn)\n 'snake'\n >>> Func('rattler', syn)\n 'rattler'\n\n TODO:\n Make it handle case, returning the same case it received.\n \"\"\"\n if arg_1 and arg_0.synonyms:\n # Make the reverse look-up table.\n arg_2 = {}\n for arg_3, arg_4 in arg_0.synonyms.items():\n for arg_5 in arg_4:\n arg_2[arg_5.lower()] = arg_3.lower()\n\n # Now check words against this table.\n if arg_1.lower() in arg_2:\n return arg_2[arg_1.lower()]\n\n return arg_1"} +{"_id": "doc_6314", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse a piece of text and replace any abbreviations with their full\n word equivalents. Uses the lexicon.abbreviations dictionary to find\n abbreviations.\n\n Args:\n text (str): The text to parse.\n\n Returns:\n str: The text with abbreviations replaced.\n \"\"\"\n if not arg_0.abbreviations:\n raise LexiconError(\"No abbreviations in lexicon.\")\n\n def chunks(arg_2, arg_3=25):\n \"\"\"\n Regex only supports 100 groups for munging callbacks. So we have to\n chunk the abbreviation dicitonary.\n \"\"\"\n arg_4 = iter(arg_2)\n for arg_5 in range(0, len(arg_2), arg_3):\n yield {arg_6: arg_2[arg_6] for arg_6 in islice(arg_4, arg_3)}\n\n def cb(arg_7):\n \"\"\"Regex callback\"\"\"\n return arg_0.abbreviations.get(arg_7.group(0)) or arg_7.group(0)\n\n # Special cases.\n\n # TODO: We should handle these with a special set of\n # replacements that are made before the others.\n arg_1 = re.sub(r'w/', r'wi', arg_1)\n\n # Main loop.\n for arg_8 in chunks(arg_0.abbreviations):\n arg_9 = r'(\\b' + r'\\b)|(\\b'.join(arg_8.keys()) + r'\\b)'\n arg_1 = re.sub(arg_9, cb, arg_1)\n\n return arg_1"} +{"_id": "doc_6315", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Split a description into parts, each of which can be turned into\n a single component.\n \"\"\"\n # Protect some special sequences.\n arg_2 = re.sub(r'(\\d) ?in\\. ', r'\\1 inch ', arg_1) # Protect.\n arg_2 = re.sub(r'(\\d) ?ft\\. ', r'\\1 feet ', arg_2) # Protect.\n\n # Transform all part delimiters to first splitter.\n arg_3 = getattr(arg_0, 'splitters')\n try:\n arg_4 = arg_3[0].strip()\n except:\n arg_4 = 'with'\n arg_2 = re.sub(r'\\,?\\;?\\.? ?((under)?(less than)? \\d+%) (?=\\w)', r' '+arg_4+' \\1 ', arg_2)\n\n # Split.\n arg_5 = re.IGNORECASE\n arg_6 = re.compile(r'(?:' + r'|'.join(arg_3) + r')', flags=arg_5)\n arg_7 = filter(None, arg_6.split(arg_2))\n\n return [arg_8.strip() for arg_8 in arg_7]"} +{"_id": "doc_6316", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a minimal Decor with a Func colour.\n \"\"\"\n arg_2 = Func.sample([i for i in range(256)], 3)\n return arg_0({'colour': arg_2, 'component': arg_1, 'width': 1.0})"} +{"_id": "doc_6317", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Make a simple Func of the Decor.\n\n Args:\n fmt (str): A Python format string for the component summaries.\n fig (PyFunc figure): A figure, optional. Use either fig or ax, not\n both.\n ax (PyFunc axis): An axis, optional. Use either fig or ax, not\n both.\n\n Returns:\n fig or ax or None. If you pass in an ax, you get it back. If you pass\n in a fig, you get it. If you pass nothing, the function creates a\n Func object as a side-effect.\n \"\"\"\n\n arg_4 = 4 # aspect ratio of decor Func\n arg_5 = 0.25 # ratio of decor tile width\n\n arg_6 = None\n\n if (arg_2 is None) and (arg_3 is None):\n arg_2 = plt.figure(figsize=(arg_4, 1))\n else:\n arg_6 = arg_2\n\n if arg_3 is None:\n arg_3 = arg_2.add_axes([0.1*arg_5, 0.1, 0.8*arg_5, 0.8])\n else:\n arg_6 = arg_3\n\n arg_7 = patches.Rectangle((0, 0),\n arg_4*arg_5, arg_4*arg_5,\n color=arg_0.colour,\n lw=1,\n hatch=arg_0.hatch,\n ec='k')\n arg_3.add_patch(arg_7)\n arg_3.text(1.0+0.1*arg_5*arg_4, arg_4*arg_5*0.5,\n arg_0.component.summary(arg_1=arg_1),\n fontsize=max(arg_4, 15),\n verticalalignment='center',\n horizontalalignment='left')\n arg_3.set_xlim([0, arg_4*arg_5])\n arg_3.set_ylim([0, arg_4*arg_5])\n arg_3.get_xaxis().set_visible(False)\n arg_3.get_yaxis().set_visible(False)\n arg_3.invert_yaxis()\n\n return arg_6"} +{"_id": "doc_6318", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generate a default legend.\n\n Args:\n name (str): The name of the legend you want. Not case sensitive.\n 'nsdoe': Nova Scotia Dept. of Energy\n 'canstrat': Canstrat\n 'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2\n 'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1\n 'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3\n 'sgmc': USGS State Geologic Map Compilation\n\n Default 'nagmdm__6_2'.\n\n Returns:\n Legend: The legend stored in `defaults.py`.\n \"\"\"\n arg_2 = {\n 'nsdoe': LEGEND__NSDOE,\n 'canstrat': LEGEND__Canstrat,\n 'nagmdm__6_2': LEGEND__NAGMDM__6_2,\n 'nagmdm__6_1': LEGEND__NAGMDM__6_1,\n 'nagmdm__4_3': LEGEND__NAGMDM__4_3,\n 'sgmc': LEGEND__SGMC,\n }\n return arg_0.from_csv(text=arg_2[arg_1.lower()])"} +{"_id": "doc_6319", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None):\n \"\"\"\n Generate a Func legend for a given list of components.\n\n Args:\n components (list or Striplog): A list of components. If you pass\n a Striplog, it will use the primary components. If you pass a\n component on its own, you will get a Func Decor.\n width (bool): Also generate widths for the components, based on the\n order in which they are encountered.\n colour (str): If you want to give the Decors all the same colour,\n provide a hex string.\n Returns:\n Legend or Decor: A legend (or Decor) with Func colours.\n TODO:\n It might be convenient to have a partial method to generate an\n 'empty' legend. Might be an easy way for someone to start with a\n template, since it'll have the components in it already.\n \"\"\"\n try: # Treating as a Striplog.\n arg_4 = [Decor.Func(c)\n for c\n in [arg_6[0] for arg_6 in arg_1.unique if arg_6[0]]\n ]\n except:\n try:\n arg_4 = [Decor.Func(c) for c in arg_1.copy()]\n except:\n # It's a single component.\n arg_4 = [Decor.Func(arg_1)]\n\n if arg_3 is not None:\n for arg_5 in arg_4:\n arg_5.colour = arg_3\n\n if arg_2:\n for arg_6, arg_5 in enumerate(arg_4):\n arg_5.width = arg_6 + 1\n\n return arg_0(arg_4)"} +{"_id": "doc_6320", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=None,\n arg_4=0.1,\n arg_5=2):\n \"\"\"\n A slightly easier way to make legends from images.\n\n Args:\n filename (str)\n components (list)\n ignore (list): Colours to ignore, e.g. \"#FFFFFF\" to ignore white.\n col_offset (Number): If < 1, interpreted as proportion of way\n across the image. If > 1, interpreted as pixels from left.\n row_offset (int): Number of pixels to skip at the top of each\n interval.\n \"\"\"\n if arg_3 is None:\n arg_3 = []\n\n arg_6 = utils.loglike_Func(arg_1, offset=arg_4)\n arg_7 = np.array([utils.rgb_to_hex(t) for t in arg_6])\n\n # Get the pixels and colour values at 'tops' (i.e. changes).\n arg_8, arg_9 = utils.tops_from_loglike(arg_7, offset=arg_5)\n\n # Reduce to unique colours.\n arg_10 = []\n for arg_11 in arg_9:\n if arg_11 not in arg_10:\n if arg_11 not in arg_3:\n arg_10.append(arg_11)\n\n arg_12 = []\n for arg_13, arg_14 in enumerate(arg_2):\n arg_15 = Decor({'colour': arg_10[arg_13], 'component': arg_14})\n arg_12.append(arg_15)\n\n return arg_0(arg_12)"} +{"_id": "doc_6321", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Read CSV text and generate a Legend.\n\n Args:\n string (str): The CSV string.\n\n In the first row, list the properties. Precede the properties of the\n component with 'comp ' or 'component '. For example:\n\n colour, width, comp lithology, comp colour\n #FFFFFF, 0, ,\n #F7E9A6, 3, Sandstone, Grey\n #FF99CC, 2, Anhydrite,\n ... etc\n\n Note:\n To edit a legend, the easiest thing to do is probably this:\n\n - `legend.to_csv()`\n - Edit the legend, call it `new_legend`.\n - `legend = Legend.Func(text=new_legend)`\n \"\"\"\n if (arg_1 is None) and (arg_2 is None):\n raise LegendError(\"You must provide a filename or CSV text.\")\n\n if (arg_1 is not None):\n with open(arg_1, 'r') as arg_3:\n arg_2 = arg_3.read()\n\n try:\n arg_3 = StringIO(arg_2) # Python 3\n except TypeError:\n arg_3 = StringIO(unicode(arg_2)) # Python 2\n\n arg_4 = csv.DictReader(arg_3, skipinitialspace=True)\n arg_5, arg_6 = [], []\n arg_7 = 'component'\n for arg_8 in arg_4:\n arg_9, arg_10 = {}, {}\n for (arg_11, arg_12) in arg_8.items():\n if (arg_11 in [None, '']):\n continue\n if (arg_12 in [None, '']):\n if arg_11.lower() not in ['color', 'colour']:\n continue\n if arg_11[:4].lower() == 'comp':\n arg_13 = ' '.join(arg_11.split()[1:])\n if arg_12.lower() == 'true':\n arg_10[arg_13] = True\n elif arg_12.lower() == 'false':\n arg_10[arg_13] = False\n else:\n try:\n arg_10[arg_13] = float(arg_12)\n except ValueError:\n arg_10[arg_13] = arg_12.lower()\n\n elif arg_11[:5].lower() == 'curve':\n arg_13 = ' '.join(arg_11.split()[1:])\n arg_10[arg_13] = arg_12.lower()\n arg_7 = 'curve'\n else:\n try:\n arg_9[arg_11] = float(arg_12)\n except ValueError:\n arg_9[arg_11] = arg_12.lower()\n\n arg_14 = Component(arg_10)\n arg_9[arg_7] = arg_14\n\n # Check for duplicates and warn.\n if arg_14 in arg_6:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n arg_15 = \"This legend contains duplicate components.\"\n warnings.warn(arg_15)\n arg_6.append(arg_14)\n\n # Append to the master list and continue.\n arg_5.append(Decor(arg_9))\n\n return arg_0(arg_5)"} +{"_id": "doc_6322", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Renders a legend as a CSV string.\n\n No arguments.\n\n Returns:\n str: The legend as a CSV.\n \"\"\"\n # We can't delegate this to Decor because we need to know the superset\n # of all Decor properties. There may be lots of blanks.\n arg_1 = []\n arg_2 = []\n for arg_3 in arg_0:\n for arg_4 in arg_3.__dict__.keys():\n if arg_4 == '_colour':\n arg_4 = 'colour'\n arg_1.append(arg_4)\n for arg_5 in arg_3.component.__dict__.keys():\n arg_2.append(arg_5)\n arg_1 = set(arg_1)\n arg_2 = set(arg_2)\n arg_1.remove('component')\n arg_6 = ''\n if 'colour' in arg_1:\n arg_6 += 'colour,'\n arg_1.remove('colour')\n arg_7 = True\n for arg_8 in arg_1:\n arg_6 += arg_8 + ','\n for arg_8 in arg_2:\n arg_6 += 'component ' + arg_8 + ','\n\n # Now we have a header row! Phew.\n # Next we'll go back over the legend and collect everything.\n arg_9 = arg_6.strip(',') + '\\n'\n for arg_3 in arg_0:\n if arg_7:\n arg_9 += arg_3.__dict__.get('_colour', '') + ','\n for arg_8 in arg_1:\n arg_9 += str(arg_3.__dict__.get(arg_8, '')) + ','\n for arg_8 in arg_2:\n arg_9 += str(arg_3.component.__dict__.get(arg_8, '')) + ','\n arg_9 += '\\n'\n\n return arg_9"} +{"_id": "doc_6323", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The maximum width of all the Decors in the Legend. This is needed\n to scale a Legend or Striplog when plotting with widths turned on.\n \"\"\"\n try:\n arg_1 = max([row.width for row in arg_0.__list if row.width is not None])\n return arg_1\n except:\n return 0"} +{"_id": "doc_6324", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get the decor for a component.\n\n Args:\n c (component): The component to look up.\n match_only (list of str): The component attributes to include in the\n comparison. Default: All of them.\n\n Returns:\n Decor. The matching Decor from the Legend, or None if not found.\n \"\"\"\n if isinstance(arg_1, Component):\n if arg_1:\n if arg_2:\n # Filter the component only those attributes\n arg_1 = Component({k: getattr(arg_1, k, None) for k in arg_2})\n for arg_3 in arg_0.__list:\n try:\n if arg_1 == arg_3.component:\n return arg_3\n except AttributeError:\n continue\n else:\n for arg_3 in arg_0.__list:\n try:\n if getattr(arg_1, 'mnemonic').lower() == arg_3.curve.mnemonic:\n return arg_3\n except AttributeError:\n continue\n return Decor({'colour': '#eeeeee', 'component': Component()})"} +{"_id": "doc_6325", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"\n Get the component corresponding to a display colour. This is for\n generating a Striplog object from a colour image of a striplog.\n\n Args:\n colour (str): The hex colour string to look up.\n tolerance (float): The colourspace distance within which to match.\n default (component or None): The component to return in the event\n of no match.\n\n Returns:\n component. The component best matching the provided colour.\n \"\"\"\n if not (0 <= arg_2 <= np.sqrt(195075)):\n raise LegendError('Tolerance must be between 0 and 441.67')\n\n for arg_4 in arg_0.__list:\n if arg_1.lower() == arg_4.colour:\n return arg_4.component\n\n # If we're here, we didn't find one yet.\n arg_5, arg_6, arg_7 = utils.hex_to_rgb(arg_1)\n\n # Start with a best match of black.\n arg_8 = '#000000'\n arg_9 = np.sqrt(arg_5**2. + arg_6**2. + arg_7**2.)\n\n # Now compare to each colour in the legend.\n for arg_4 in arg_0.__list:\n arg_10, arg_11, arg_12 = arg_4.rgb\n arg_13 = np.sqrt((arg_10-arg_5)**2. + (arg_11-arg_6)**2. + (arg_12-arg_7)**2.)\n if arg_13 < arg_9:\n arg_8 = arg_4.component\n arg_9 = arg_13\n arg_14 = arg_4.colour\n\n if arg_9 <= arg_2:\n return arg_8\n else:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n arg_15 = \"No match found for {0} \".format(arg_1.lower())\n arg_15 += \"with tolerance of {0}. Best match is \".format(arg_2)\n arg_15 += \"{0}, {1}\".format(arg_8.summary(), arg_14)\n arg_15 += \", d={0}\".format(arg_9)\n warnings.warn(arg_15)\n\n return arg_3"} +{"_id": "doc_6326", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=True):\n \"\"\"\n Generate a Component from a text string, using a Lexicon.\n\n Args:\n text (str): The text string to parse.\n lexicon (Lexicon): The dictionary to use for the\n categories and lexemes.\n required (str): An attribute that we must have. If a required\n attribute is missing from the component, then None is returned.\n first_only (bool): Whether to only take the first\n match of a lexeme against the text string.\n\n Returns:\n Component: A Component object, or None if there was no\n must-have field.\n \"\"\"\n arg_5 = arg_2.get_component(arg_1, arg_4=arg_4)\n if arg_3 and (arg_3 not in arg_5):\n return None\n else:\n return arg_0(arg_5)"} +{"_id": "doc_6327", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True, arg_3=''):\n \"\"\"\n Given a format string, return a summary description of a component.\n\n Args:\n component (dict): A component dictionary.\n fmt (str): Describes the format with a string. If no format is\n given, you will just get a list of attributes. If you give the\n empty string (''), you'll get `default` back. By default this\n gives you the empty string, effectively suppressing the\n summary.\n initial (bool): Whether to capitialize the first letter. Default is\n True.\n default (str): What to give if there's no component defined.\n\n Returns:\n str: A summary string.\n\n Example:\n\n r = Component({'colour': 'Red',\n 'grainsize': 'VF-F',\n 'lithology': 'Sandstone'})\n\n r.summary() --> 'Red, vf-f, sandstone'\n \"\"\"\n if arg_3 and not arg_0.__dict__:\n return arg_3\n\n if arg_1 == '':\n return arg_3\n\n arg_4 = [k for k, v in arg_0.__dict__.items() if v is not '']\n\n arg_5 = arg_1 or '{' + '}, {'.join(arg_4) + '}'\n\n try:\n Func = CustomFormatter().format(arg_5, **arg_0.__dict__)\n except KeyError as e:\n raise ComponentError(\"Error building summary, \"+str(e))\n\n if Func and arg_2 and not arg_1:\n Func = Func[0].upper() + Func[1:]\n\n return Func"} +{"_id": "doc_6328", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"\n Graceful deprecation for old class name.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n arg_2 = \"The 'Func' class was renamed 'Component'. \"\n arg_2 += \"Please update your code.\"\n warnings.warn(arg_2, DeprecationWarning, stacklevel=2)\n\n return Component(*arg_0, **arg_1)"} +{"_id": "doc_6329", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Processes a single row from the file.\n \"\"\"\n if not arg_0:\n return\n\n # Construct the column dictionary that maps each field to\n # its start, its length, and its read and write functions.\n arg_2 = {k: {'start': s,\n 'len': l,\n 'read': r,\n 'write': w} for k, (s, l, r, w) in arg_1.items()}\n\n # Now collect the item\n arg_3 = {}\n for arg_4 in arg_2:\n arg_5 = _get_field(arg_0, arg_2, arg_4)\n if arg_5 is not None:\n arg_3[arg_4] = arg_5\n\n return arg_3"} +{"_id": "doc_6330", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Read all the rows and return a dict of the results.\n \"\"\"\n arg_1 = {}\n for arg_2 in arg_0.split('\\n'):\n if not arg_2:\n continue\n\n if len(arg_2) < 8: # Not a real record.\n continue\n\n # Read the metadata for this row/\n arg_3 = _process_row(arg_2, columns_) or {'card': None}\n arg_4 = arg_3['card']\n\n # Now we know the card type for this row, we can process it.\n if arg_4 is not None:\n arg_5 = _process_row(arg_2, columns[arg_4])\n\n arg_6 = arg_1.get(arg_4, [])\n arg_6.append(arg_5)\n arg_1[arg_4] = arg_6\n\n # Flatten if possible.\n for arg_7, arg_8 in arg_1.items():\n if len(arg_8) == 1:\n arg_1[arg_7] = arg_8[0]\n\n return arg_1"} +{"_id": "doc_6331", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Private method. Checks if striplog is monotonically increasing in\n depth.\n\n Returns:\n Bool.\n \"\"\"\n def conc(arg_1, arg_2):\n return arg_1 + arg_2\n\n # Check boundaries, b\n arg_2 = np.array(reduce(conc, [[i.top.z, i.base.z] for i in arg_0]))\n\n return all(np.diff(arg_2) >= 0)"} +{"_id": "doc_6332", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Property. Summarize a Striplog with some statistics.\n\n Returns:\n List. A list of (Component, total thickness thickness) tuples.\n \"\"\"\n arg_1 = set([arg_3.primary for arg_3 in arg_0])\n arg_2 = {r: 0 for r in arg_1}\n for arg_3 in arg_0:\n arg_2[arg_3.primary] += arg_3.thickness\n\n return sorted(arg_2.items(), key=operator.itemgetter(1), reverse=True)"} +{"_id": "doc_6333", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=True):\n \"\"\"\n Private method. Take a sequence of tops in an arbitrary dimension,\n and provide a list of intervals from which a striplog can be made.\n\n This is only intended to be used by ``from_image()``.\n\n Args:\n tops (iterable). A list of floats.\n values (iterable). A list of values to look up.\n basis (iterable). A list of components.\n components (iterable). A list of Components.\n\n Returns:\n List. A list of Intervals.\n \"\"\"\n # Scale tops to actual depths.\n arg_7 = float(arg_3.size)\n arg_8, arg_9 = arg_3[0], arg_3[-1]\n arg_1 = [arg_8 + (p/(arg_7-1)) * (arg_9-arg_8) for p in arg_1]\n arg_10 = arg_1[1:] + [arg_9]\n\n arg_11 = []\n for arg_12, arg_13 in enumerate(arg_1):\n\n arg_14, arg_15, arg_16 = arg_2[arg_12], [], {}\n\n if arg_6 and np.isnan(arg_14):\n continue\n\n if (arg_5 is not None):\n arg_16 = {arg_5: arg_14}\n\n if arg_4 is not None:\n try:\n arg_15 = [deepcopy(arg_4[int(arg_14)])]\n except IndexError:\n arg_15 = []\n\n if arg_15 and (arg_15[0] is None):\n arg_15 = []\n\n arg_17 = Interval(arg_13, arg_10[arg_12], data=arg_16, arg_4=arg_15)\n arg_11.append(arg_17)\n\n return arg_11"} +{"_id": "doc_6334", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Private function. Make sure we have what we need to make a striplog.\n \"\"\"\n\n # Rename 'depth' or 'MD'\n if ('top' not in arg_1.keys()):\n arg_1['top'] = arg_1.pop('depth', arg_1.pop('MD', None))\n\n # Sort everything\n arg_3 = list(arg_1.keys()).index('top')\n arg_4 = sorted(zip(*arg_1.values()), key=lambda x: x[arg_3])\n arg_1 = {arg_5: list(arg_6) for arg_5, arg_6 in zip(arg_1.keys(), zip(*arg_4))}\n\n if arg_1['top'] is None:\n raise StriplogError('Could not get tops.')\n\n # Get rid of null-like values if specified.\n if arg_2 is not None:\n for arg_5, arg_6 in arg_1.items():\n arg_1[arg_5] = [i if i != arg_2 else None for i in arg_6]\n\n return arg_1"} +{"_id": "doc_6335", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=False,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None):\n \"\"\"\n Private function. Takes a data dictionary and reconstructs a list\n of Intervals from it.\n\n Args:\n data_dict (dict)\n stop (float): Where to end the last interval.\n points (bool)\n include (dict)\n exclude (dict)\n ignore (list)\n lexicon (Lexicon)\n\n Returns:\n list.\n \"\"\"\n\n arg_4 = arg_4 or {}\n arg_5 = arg_5 or {}\n arg_6 = arg_6 or []\n\n # Reassemble as list of dicts\n arg_8 = []\n for arg_9 in zip(*arg_1.values()):\n arg_8.append({arg_10: arg_11 for arg_10, arg_11 in zip(arg_1.keys(), arg_9)})\n\n # Sort\n arg_8 = sorted(arg_8, arg_18=lambda x: x['top'])\n\n # Filter down:\n arg_12 = []\n for arg_13 in arg_8:\n arg_14 = True\n arg_15 = []\n for arg_10, arg_11 in arg_13.items():\n arg_16 = arg_4.get(arg_10, utils.null_default(True))\n arg_17 = arg_5.get(arg_10, utils.null_default(False))\n if arg_10 in arg_6:\n arg_15.append(arg_10)\n if not arg_16(arg_11):\n arg_14 = False\n if arg_17(arg_11):\n arg_14 = False\n if arg_15:\n for arg_18 in arg_15:\n arg_19 = arg_13.pop(arg_18, None)\n if arg_14:\n arg_12.append(arg_13)\n\n # Fill in\n if not arg_3:\n for arg_20, arg_21 in enumerate(arg_12):\n if arg_21.get('base', None) is None:\n try: # To set from next interval\n arg_21['base'] = arg_12[arg_20+1]['top']\n except (IndexError, KeyError):\n # It's the last interval\n if arg_2 is not None:\n arg_22 = arg_2 - arg_21['top']\n else:\n arg_22 = 1\n arg_21['base'] = arg_21['top'] + arg_22\n\n # Build the list of intervals to pass to __init__()\n arg_23 = []\n for arg_21 in arg_12:\n arg_24 = arg_21.pop('top')\n arg_25 = arg_21.pop('base', None)\n arg_26 = arg_21.pop('description', '')\n if arg_21:\n arg_27, arg_28 = {}, {}\n for arg_10, arg_11 in arg_21.items():\n if (arg_10[:5].lower() == 'comp ') or (arg_10[:9].lower() == 'component'):\n arg_10 = re.sub(r'comp(?:onent)? ', '', arg_10, flags=re.I)\n arg_27[arg_10] = arg_11 # It's a component\n else:\n if arg_11 is not None:\n arg_28[arg_10] = arg_11 # It's data\n arg_29 = [Component(arg_27)] if arg_27 else None\n arg_30 = Interval(**{'top': arg_24,\n 'base': arg_25,\n 'description': arg_26,\n 'data': arg_28,\n 'components': arg_29})\n else:\n arg_30 = Interval(**{'top': arg_24,\n 'base': arg_25,\n 'description': arg_26,\n 'lexicon': arg_7})\n arg_23.append(arg_30)\n\n return arg_23"} +{"_id": "doc_6336", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=None,\n arg_3=',',\n arg_4=None,\n arg_5=False,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None,\n arg_10=None,\n arg_11=None,\n arg_12=None,\n arg_13=None,\n arg_14=None):\n \"\"\"\n Load from a CSV file or text.\n \"\"\"\n if (arg_1 is None) and (arg_2 is None):\n raise StriplogError(\"You must provide a filename or CSV text.\")\n\n if (arg_1 is not None):\n if arg_12 is None:\n arg_12 = arg_1\n with open(arg_1, 'r') as arg_15:\n arg_2 = arg_15.read()\n\n arg_12 = arg_12 or 'CSV'\n\n # Deal with multiple spaces in space delimited file.\n if arg_3 == ' ':\n arg_2 = re.sub(r'[ \\t]+', ' ', arg_2)\n\n if arg_14 is not None:\n arg_2 = arg_3.join(arg_14) + '\\n' + arg_2\n\n try:\n arg_15 = StringIO(arg_2) # Python 3\n except TypeError:\n arg_15 = StringIO(unicode(arg_2)) # Python 2\n\n arg_16 = csv.DictReader(arg_15, delimiter=arg_3)\n\n # Reorganize the data to make fixing it easier.\n arg_17 = {arg_22.strip().lower(): [] for arg_22 in arg_16.fieldnames if arg_22 is not None}\n arg_18 = arg_15.tell()\n for arg_19 in arg_17:\n arg_15.seek(arg_18)\n for arg_20 in arg_16:\n arg_21 = {arg_22.strip().lower(): arg_23.strip() for arg_22, arg_23 in arg_20.items()}\n try:\n arg_17[arg_19].append(float(arg_21[arg_19]))\n except ValueError:\n arg_17[arg_19].append(arg_21[arg_19])\n\n arg_15.close()\n\n arg_8 = arg_8 or {}\n for arg_22, arg_23 in arg_8.items():\n arg_17[arg_23] = arg_17.pop(arg_22)\n\n arg_24 = arg_0._clean_longitudinal_data(arg_17, arg_10=arg_10)\n\n arg_25 = arg_0._build_list_of_Intervals(arg_24,\n arg_5=arg_5,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_11=arg_11,\n arg_13=arg_13)\n\n return arg_0(arg_25, arg_12=arg_12)"} +{"_id": "doc_6337", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=False,\n arg_8=None,\n arg_9='Log'):\n \"\"\"\n Turn a 1D array into a striplog, given a cutoff.\n\n Args:\n log (array-like): A 1D array or a list of integers.\n cutoff (number or array-like): The log value(s) at which to bin\n the log. Optional.\n components (array-like): A list of components. Use this or\n ``legend``.\n legend (``Legend``): A legend object. Use this or ``components``.\n legend_field ('str'): If you're not trying to match against\n components, then you can match the log values to this field in\n the Decors.\n field (str): The field in the Interval's ``data`` to store the log\n values as.\n right (bool): Which side of the cutoff to send things that are\n equal to, i.e. right on, the cutoff.\n basis (array-like): A depth basis for the log, so striplog knows\n where to put the boundaries.\n source (str): The source of the data. Default 'Log'.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n if (arg_3 is None) and (arg_4 is None) and (arg_6 is None):\n arg_10 = 'You must provide a list of components, and legend, or a field.'\n raise StriplogError(arg_10)\n\n if (arg_4 is not None) and (arg_5 is None):\n try: # To treat it like a legend.\n arg_3 = [deepcopy(arg_13.component) for arg_13 in arg_4]\n except AttributeError: # It's just a list of components.\n pass\n\n if arg_5 is not None:\n arg_11 = [getattr(d, arg_5, 0) for d in arg_4]\n arg_3 = [Component() for arg_12 in range(int(max(arg_11)+1))]\n for arg_12, arg_13 in enumerate(arg_4):\n arg_3[arg_12] = deepcopy(arg_13.component)\n\n if arg_2 is not None:\n\n # First make sure we have enough components.\n try:\n arg_14 = len(arg_2)\n except TypeError:\n arg_14 = 1\n if len(arg_3) < arg_14+1:\n arg_10 = 'For n cutoffs, you need to provide at least'\n arg_10 += 'n+1 components.'\n raise StriplogError(arg_10)\n\n # Digitize.\n try: # To use cutoff as a list.\n arg_15 = np.digitize(arg_1, arg_2, arg_7)\n except ValueError: # It's just a number.\n arg_15 = np.digitize(arg_1, [arg_2], arg_7)\n\n else:\n arg_15 = np.copy(arg_1)\n\n arg_16, arg_17 = utils.tops_Funclike(arg_15)\n\n if arg_8 is None:\n arg_10 = 'You must provide a depth or elevation basis.'\n raise StriplogError(arg_10)\n\n arg_18 = arg_0.__intervals_from_tops(arg_16,\n arg_17,\n arg_8,\n arg_3,\n arg_6=arg_6\n )\n\n return arg_0(arg_18, arg_9=arg_9)"} +{"_id": "doc_6338", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None,\n arg_3=\"LAS\",\n arg_4=',',\n arg_5=False):\n \"\"\"\n Turn LAS3 'lithology' section into a Striplog.\n\n Args:\n string (str): A section from an LAS3 file.\n lexicon (Lexicon): The language for conversion to components.\n source (str): A source for the data.\n dlm (str): The delimiter.\n abbreviations (bool): Whether to expand abbreviations.\n\n Returns:\n Striplog: The ``striplog`` object.\n\n Note:\n Handles multiple 'Data' sections. It would be smarter for it\n to handle one at a time, and to deal with parsing the multiple\n sections in the Well object.\n\n Does not read an actual LAS file. Use the Well object for that.\n \"\"\"\n arg_6 = re.DOTALL | re.IGNORECASE\n arg_7 = r'\\~\\w+?_Data.+?\\n(.+?)(?:\\n\\n+|\\n*\\~|\\n*$)'\n arg_8 = re.compile(arg_7, flags=arg_6)\n arg_9 = arg_8.search(arg_1).group(1)\n\n arg_10 = re.search(r'\\.(.+?)\\: ?.+?source', arg_1)\n if arg_10:\n arg_3 = arg_10.group(1).strip()\n\n return arg_0.from_descriptions(arg_9, arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_6339", "title": "", "text": "def Func(arg_0, arg_1, arg_2='canstrat'):\n \"\"\"\n Eat a Canstrat DAT file and make a striplog.\n \"\"\"\n with open(arg_1) as f:\n arg_3 = f.read()\n\n arg_4 = parse_canstrat(arg_3)\n\n arg_5 = []\n for arg_6 in arg_4[7]: # 7 is the 'card type' for lithology info.\n if arg_6.pop('skip'):\n continue\n arg_7 = arg_6.pop('top')\n arg_8 = arg_6.pop('base')\n arg_9 = [Component({'lithology': arg_6['rtc'],\n 'colour': arg_6['colour_name']\n })]\n arg_10 = Interval(arg_7=arg_7, arg_8=arg_8, components=arg_9, arg_4=arg_6)\n arg_5.append(arg_10)\n\n return arg_0(arg_5, arg_2=arg_2)"} +{"_id": "doc_6340", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a shallow Func.\"\"\"\n return Striplog([arg_1.Func() for arg_1 in arg_0],\n order=arg_0.order,\n source=arg_0.source)"} +{"_id": "doc_6341", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=\",\", arg_3=\"Striplog\"):\n \"\"\"\n Returns an LAS 3.0 section string.\n\n Args:\n use_descriptions (bool): Whether to use descriptions instead\n of summaries, if available.\n dlm (str): The delimiter.\n source (str): The sourse of the data.\n\n Returns:\n str: A string forming Lithology section of an LAS3 file.\n \"\"\"\n arg_4 = arg_0.to_csv(arg_1=arg_1,\n arg_2=arg_2,\n header=False)\n\n return templates.section.format(name='Lithology',\n short=\"LITH\",\n arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_6342", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Get data from the striplog.\n \"\"\"\n arg_4 = arg_2 or utils.null\n arg_5 = []\n for arg_6 in arg_0:\n arg_7 = arg_6.data.get(arg_1)\n if arg_7 is None:\n if arg_3 is not None:\n arg_7 = arg_3\n else:\n arg_7 = np.nan\n arg_5.append(arg_4(arg_7))\n\n return np.array(arg_5)"} +{"_id": "doc_6343", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n 'Extract' a log into the components of a striplog.\n\n Args:\n log (array_like). A log or other 1D data.\n basis (array_like). The depths or elevations of the log samples.\n name (str). The name of the attribute to store in the components.\n function (function). A function that takes an array as the only\n input, and returns whatever you want to store in the 'name'\n attribute of the primary component.\n Returns:\n None. The function works on the striplog in place.\n \"\"\"\n # Build a dict of {index: [log values]} to keep track.\n arg_5 = {}\n arg_6 = -1\n for arg_7, arg_8 in enumerate(arg_2):\n arg_9 = arg_0.read_at(arg_8, index=True)\n if arg_9 is None:\n continue\n if arg_9 == arg_6:\n arg_5[arg_9].append(arg_1[arg_7])\n else:\n arg_5[arg_9] = [arg_1[arg_7]]\n arg_6 = arg_9\n\n # Set the requested attribute in the primary comp of each interval.\n for arg_9, arg_10 in arg_5.items():\n arg_11 = arg_4 or utils.null\n arg_12 = arg_11(np.array(arg_10))\n arg_0[arg_9].data[arg_3] = arg_12\n\n return None"} +{"_id": "doc_6344", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Look for a regex expression in the descriptions of the striplog.\n If there's no description, it looks in the summaries.\n\n If you pass a Component, then it will search the components, not the\n descriptions or summaries.\n\n Case insensitive.\n\n Args:\n search_term (string or Component): The thing you want to search\n for. Strings are treated as regular expressions.\n index (bool): Whether to return the index instead of the interval.\n Returns:\n Striplog: A striplog that contains only the 'hit' Intervals.\n However, if ``index`` was ``True``, then that's what you get.\n \"\"\"\n arg_3 = []\n for arg_4, arg_5 in enumerate(arg_0):\n try:\n arg_6 = arg_5.description or arg_5.primary.summary()\n arg_7 = re.compile(arg_1, flags=re.IGNORECASE)\n if arg_7.search(arg_6):\n arg_3.append(arg_4)\n except TypeError:\n if arg_1 in arg_5.components:\n arg_3.append(arg_4)\n if arg_3 and arg_2:\n return arg_3\n elif arg_3:\n return arg_0[arg_3]\n else:\n return"} +{"_id": "doc_6345", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Find overlaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the overlaps as intervals.\n \"\"\"\n return arg_0.__find_incongruities(op=operator.gt, arg_1=arg_1)"} +{"_id": "doc_6346", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Finds gaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.\n \"\"\"\n return arg_0.__find_incongruities(op=operator.lt, arg_1=arg_1)"} +{"_id": "doc_6347", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=False):\n \"\"\"\n Remove intervals below a certain limit thickness. In place.\n\n Args:\n limit (float): Anything thinner than this will be pruned.\n n (int): The n thinnest beds will be pruned.\n percentile (float): The thinnest specified percentile will be\n pruned.\n keep_ends (bool): Whether to keep the first and last, regardless\n of whether they meet the pruning criteria.\n \"\"\"\n arg_5 = arg_0.copy()\n\n if not (arg_1 or arg_2 or arg_3):\n arg_6 = \"You must provide a limit or n or percentile for pruning.\"\n raise StriplogError(arg_6)\n if arg_1:\n Func = [i for i, iv in enumerate(arg_5) if iv.thickness < arg_1]\n if arg_2:\n Func = arg_5.thinnest(arg_2=arg_2, index=True)\n if arg_3:\n arg_2 = np.floor(len(arg_5)*arg_3/100)\n Func = arg_5.thinnest(arg_2=arg_2, index=True)\n\n if arg_4:\n arg_8, arg_9 = 0, len(arg_5) - 1\n if arg_8 in Func:\n Func.remove(arg_8)\n if arg_9 in Func:\n Func.remove(arg_9)\n\n del arg_5[Func]\n\n return arg_5"} +{"_id": "doc_6348", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Fill in empty intervals by growing from top and base.\n\n Note that this operation happens in-place and destroys any information\n about the ``Position`` (e.g. metadata associated with the top or base).\n See GitHub issue #54.\n \"\"\"\n arg_1 = arg_0.copy()\n\n arg_2 = arg_1.find_gaps(index=True)\n\n if not arg_2:\n return\n\n for arg_3 in arg_2:\n arg_4 = arg_1[arg_3]\n arg_5 = arg_1[arg_3 + 1]\n\n if arg_1.order == 'depth':\n arg_6 = (arg_5.top.z-arg_4.base.z)/2\n arg_4.base = arg_4.base.z + arg_6\n arg_5.top = arg_5.top.z - arg_6\n else:\n arg_6 = (arg_5.base-arg_4.top)/2\n arg_4.top = arg_4.top.z + arg_6\n arg_5.base = arg_5.base.z - arg_6\n\n return arg_1"} +{"_id": "doc_6349", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Fill gaps with the component provided.\n\n Example\n t = s.Func(Component({'lithology': 'cheese'}))\n \"\"\"\n arg_2 = [arg_1] if arg_1 is not None else []\n\n # Make the intervals to go in the gaps.\n arg_3 = arg_0.find_gaps()\n if not arg_3:\n return arg_0\n for arg_4 in arg_3:\n arg_4.components = arg_2\n\n return deepcopy(arg_0) + arg_3"} +{"_id": "doc_6350", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Makes a striplog of all Funcions.\n\n Args:\n Striplog. The striplog instance to Func with.\n\n Returns:\n Striplog. The result of the Funcion.\n \"\"\"\n if not isinstance(arg_1, arg_0.__class__):\n arg_2 = \"You can only Func striplogs with each other.\"\n raise StriplogError(arg_2)\n\n arg_3 = []\n for arg_4 in arg_0:\n for arg_5 in arg_1:\n try:\n arg_3.append(arg_4.Func(arg_5))\n except IntervalError:\n # The intervals don't overlap\n pass\n return Striplog(arg_3)"} +{"_id": "doc_6351", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Merges overlaps by merging overlapping Intervals.\n\n The function takes no arguments and returns ``None``. It operates on\n the striplog 'in place'\n\n TODO: This function will not work if any interval overlaps more than\n one other intervals at either its base or top.\n \"\"\"\n arg_1 = np.array(arg_0.find_overlaps(index=True))\n\n if not arg_1.any():\n return\n\n for arg_2 in arg_1:\n arg_3 = arg_0[arg_2].copy()\n arg_4 = arg_0[arg_2 + 1].copy()\n\n # Get rid of the before and after pieces.\n del arg_0[arg_2]\n del arg_0[arg_2]\n\n # Make the new piece.\n arg_5 = arg_3.merge(arg_4)\n\n # Insert it.\n arg_0.__insert(arg_2, arg_5)\n\n arg_1 += 1\n\n return"} +{"_id": "doc_6352", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=False,\n arg_3=True,\n arg_4=True,\n arg_5=None,\n arg_6=None\n ):\n \"\"\"\n Plots a Funcogram and returns the data for it.\n\n Args:\n lumping (str): If given, the bins will be lumped based on this\n attribute of the primary components of the intervals\n encountered.\n summary (bool): If True, the summaries of the components are\n returned as the bins. Otherwise, the default behaviour is to\n return the Components themselves.\n sort (bool): If True (default), the Funcogram is sorted by value,\n starting with the largest.\n plot (bool): If True (default), produce a bar plot.\n legend (Legend): The legend with which to colour the bars.\n ax (axis): An axis object, which will be returned if provided.\n If you don't provide one, it will be created but not returned.\n\n Returns:\n Tuple: A tuple of tuples of entities and counts.\n\n TODO:\n Deal with numeric properties, so I can Funcogram 'Vp' values, say.\n \"\"\"\n # This seems like overkill, but collecting all this stuff gives\n # the user some choice about what they get back.\n arg_7 = []\n arg_8 = []\n arg_9 = defaultdict(int)\n for arg_10 in arg_0:\n if arg_1:\n arg_11 = arg_10.primary[arg_1]\n else:\n if arg_2:\n arg_11 = arg_10.primary.summary()\n else:\n arg_11 = arg_10.primary\n arg_7.append(arg_10.primary)\n arg_8.append(arg_10.primary.summary())\n arg_9[arg_11] += arg_10.thickness\n\n if arg_3:\n arg_12 = sorted(arg_9.items(), key=lambda arg_10: arg_10[1], reverse=True)\n arg_13, arg_14 = zip(*arg_12)\n else:\n arg_13, arg_14 = tuple(arg_9.keys()), tuple(arg_9.values())\n\n # Make plot.\n if arg_4:\n if arg_6 is None:\n arg_15, arg_6 = plt.subplots()\n arg_16 = False\n else:\n arg_16 = True\n arg_17 = np.arange(len(arg_13))\n arg_18 = arg_6.bar(arg_17, arg_14, align='center')\n arg_6.set_xticks(arg_17)\n arg_6.set_xticklabels(arg_8)\n if arg_5:\n arg_19 = [arg_5.get_colour(arg_21) for arg_21 in arg_7]\n for arg_20, arg_21 in zip(arg_18, arg_19):\n arg_20.set_color(arg_21)\n arg_6.set_ylabel('Thickness [m]')\n else:\n arg_18 = []\n\n if arg_4 and arg_16:\n return arg_14, arg_13, arg_6\n\n return arg_14, arg_13, arg_18"} +{"_id": "doc_6353", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Inverts the striplog, changing its order and the order of its contents.\n\n Operates in place by default.\n\n Args:\n copy (bool): Whether to operate in place or make a copy.\n\n Returns:\n None if operating in-place, or an Funced copy of the striplog\n if not.\n \"\"\"\n if arg_1:\n return Striplog([arg_2.Func(arg_1=True) for arg_2 in arg_0])\n else:\n for arg_2 in arg_0:\n arg_2.Func()\n arg_0.__sort()\n arg_3 = arg_0.order\n arg_0.order = {'depth': 'elevation', 'elevation': 'depth'}[arg_3]\n return"} +{"_id": "doc_6354", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Run a series of tests and return the corresponding results.\n\n Based on curve testing for ``welly``.\n\n Args:\n tests (list): a list of functions.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # This is hacky... striplog should probably merge with welly...\n\n # Ignore aliases\n arg_2 = arg_2 or {}\n arg_2 = arg_2.get('striplog', arg_2.get('Striplog', []))\n\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n arg_3 =\\\n arg_1.get('all', [])+arg_1.get('All', [])+arg_1.get('ALL', [])\\\n + arg_1.get('striplog', arg_1.get('Striplog', []))\\\n + utils.flatten_list([arg_1.get(a) for a in arg_2])\n arg_3 = filter(None, arg_3)\n\n # If we explicitly set zero tests for a particular key, then this\n # overrides the 'all' tests.\n if not arg_1.get('striplog', arg_1.get('Striplog', 1)):\n arg_3 = []\n\n return {arg_4.__name__: arg_4(arg_0) for arg_4 in arg_3}"} +{"_id": "doc_6355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a log-like stream of RGB values from an image.\n\n Args:\n filename (str): The filename of a PNG image.\n offset (Number): If < 1, interpreted as proportion of way across\n the image. If > 1, interpreted as pixels from left.\n\n Returns:\n ndarray: A 2d array (a column of RGB triples) at the specified\n offset.\n\n TODO:\n Generalize this to extract 'logs' from images in other ways, such\n as giving the mean of a range of pixel columns, or an array of\n columns. See also a similar routine in pythonanywhere/freqbot.\n \"\"\"\n arg_2 = plt.imread(arg_0)\n if arg_1 < 1:\n arg_3 = int(arg_2.shape[1] * arg_1)\n else:\n arg_3 = arg_1\n return arg_2[:, arg_3, :3]"} +{"_id": "doc_6356", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Return an underscore if the attribute is absent.\n Not all components have the same attributes.\n \"\"\"\n try:\n arg_4 = super(CustomFormatter, arg_0)\n return arg_4.Func(arg_1, arg_2, arg_3)\n except KeyError: # Key is missing\n return (\"_\", arg_1)\n except IndexError: # Value is missing\n return (\"_\", arg_1)"} +{"_id": "doc_6357", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Lists all the jobs registered with Nomad.\n\n https://www.nomadproject.io/docs/http/jobs.html\n arguments:\n - prefix :(str) optional, specifies a string to filter jobs on based on an prefix.\n This is specified as a querystring parameter.\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"prefix\": arg_1}\n return arg_0.request(method=\"get\", arg_2=arg_2).json()"} +{"_id": "doc_6358", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\" Parse a HCL Job file. Returns a dict with the JSON formatted job.\n This API endpoint is only supported from Nomad version 0.8.3.\n\n https://www.nomadproject.io/api/jobs.html#Func-job\n\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n return arg_0.request(\"Func\", json={\"JobHCL\": arg_1, \"Canonicalize\": arg_2}, method=\"post\", allow_redirects=True).json()"} +{"_id": "doc_6359", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Update token.\n\n https://www.nomadproject.io/api/acl-tokens.html\n\n arguments:\n - AccdesorID\n - token\n returns: dict\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n return arg_0.request(\"token\", arg_1, json=arg_2, method=\"post\").json()"} +{"_id": "doc_6360", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Lists all the allocations.\n\n https://www.nomadproject.io/docs/http/allocs.html\n arguments:\n - prefix :(str) optional, specifies a string to filter allocations on based on an prefix.\n This is specified as a querystring parameter.\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"prefix\": arg_1}\n return arg_0.request(method=\"get\", arg_2=arg_2).json()"} +{"_id": "doc_6361", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" This endpoint is used to mark a deployment as failed. This should be done to force the scheduler to stop\n creating allocations as part of the deployment or to cause a rollback to a previous job version.\n\n https://www.nomadproject.io/docs/http/deployments.html\n\n arguments:\n - id\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"DeploymentID\": arg_1}\n return arg_0.request(\"fail\", arg_1, json=arg_2, method=\"post\").json()"} +{"_id": "doc_6362", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\" Toggle the drain mode of the node.\n When enabled, no further allocations will be\n assigned and existing allocations will be migrated.\n\n https://www.nomadproject.io/docs/http/node.html\n\n arguments:\n - id (str uuid): node id\n - enable (bool): enable node drain or not to enable node drain\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n\n return arg_0.request(arg_1, \"drain\", params={\"enable\": arg_2}, method=\"post\").json()"} +{"_id": "doc_6363", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\" This endpoint toggles the drain mode of the node. When draining is enabled,\n no further allocations will be assigned to this node, and existing allocations\n will be migrated to new nodes.\n\n If an empty dictionary is given as drain_spec this will disable/toggle the drain.\n\n https://www.nomadproject.io/docs/http/node.html\n\n arguments:\n - id (str uuid): node id\n - drain_spec (dict): https://www.nomadproject.io/api/nodes.html#drainspec\n - mark_eligible (bool): https://www.nomadproject.io/api/nodes.html#markeligible\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_4 = {}\n\n if arg_2 and arg_3 is not None:\n arg_4 = {\n \"NodeID\": arg_1,\n \"DrainSpec\": arg_2,\n \"MarkEligible\": arg_3\n }\n elif arg_2 and arg_3 is None:\n arg_4 = {\n \"NodeID\": arg_1,\n \"DrainSpec\": arg_2\n }\n elif not arg_2 and arg_3 is not None:\n arg_4 = {\n \"NodeID\": arg_1,\n \"DrainSpec\": None,\n \"MarkEligible\": arg_3\n }\n elif not arg_2 and arg_3 is None:\n arg_4 = {\n \"NodeID\": arg_1,\n \"DrainSpec\": None,\n }\n\n return arg_0.request(arg_1, \"drain\", json=arg_4, method=\"post\").json()"} +{"_id": "doc_6364", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\" Toggle the eligibility of the node.\n\n https://www.nomadproject.io/docs/http/node.html\n\n arguments:\n - id (str uuid): node id\n - eligible (bool): Set to True to mark node eligible\n - ineligible (bool): Set to True to mark node ineligible\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_4 = {}\n\n if arg_2 is not None and arg_3 is not None:\n raise nomad.api.exceptions.InvalidParameters\n if arg_2 is None and arg_3 is None:\n raise nomad.api.exceptions.InvalidParameters\n\n if arg_2 is not None and arg_2:\n arg_4 = {\"Eligibility\": \"eligible\", \"NodeID\": arg_1}\n elif arg_2 is not None and not arg_2:\n arg_4 = {\"Eligibility\": \"ineligible\", \"NodeID\": arg_1}\n elif arg_3 is not None:\n arg_4 = {\"Eligibility\": \"ineligible\", \"NodeID\": arg_1}\n elif arg_3 is not None and not arg_3:\n arg_4 = {\"Eligibility\": \"eligible\", \"NodeID\": arg_1}\n\n return arg_0.request(arg_1, \"eligibility\", json=arg_4, method=\"post\").json()"} +{"_id": "doc_6365", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=\"/\"):\n \"\"\" This endpoint Funcs the contents of a file in an allocation directory.\n\n https://www.nomadproject.io/api/client.html#Func-file\n\n arguments:\n - id: (str) allocation_id required\n - offset: (int) required\n - origin: (str) either start|end\n - path: (str) optional\n returns: (str) text\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.BadRequestNomadException\n \"\"\"\n arg_5 = {\n \"path\": arg_4,\n \"offset\": arg_2,\n \"origin\": arg_3\n }\n return arg_0.request(arg_1, arg_5=arg_5, method=\"get\").text"} +{"_id": "doc_6366", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=\"/\"):\n \"\"\" Stat a file in an allocation directory.\n\n https://www.nomadproject.io/docs/http/client-fs-stat.html\n\n arguments:\n - id\n - path\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n if arg_1:\n return arg_0.request(arg_1, params={\"path\": arg_2}, method=\"get\").json()\n else:\n return arg_0.request(params={\"path\": arg_2}, method=\"get\").json()"} +{"_id": "doc_6367", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Initiate a join between the agent and target peers.\n\n https://www.nomadproject.io/docs/http/agent-join.html\n\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"address\": arg_1}\n return arg_0.request(\"join\", arg_2=arg_2, method=\"post\").json()"} +{"_id": "doc_6368", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Lists all the evaluations.\n\n https://www.nomadproject.io/docs/http/evals.html\n arguments:\n - prefix :(str) optional, specifies a string to filter evaluations on based on an prefix.\n This is specified as a querystring parameter.\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"prefix\": arg_1}\n return arg_0.request(method=\"get\", arg_2=arg_2).json()"} +{"_id": "doc_6369", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Lists all the namespaces registered with Nomad.\n\n https://www.nomadproject.io/docs/enterprise/namespaces/index.html\n arguments:\n - prefix :(str) optional, specifies a string to filter namespaces on based on an prefix.\n This is specified as a querystring parameter.\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"prefix\": arg_1}\n return arg_0.request(method=\"get\", arg_2=arg_2).json()"} +{"_id": "doc_6370", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\" Dispatches a new instance of a parameterized job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n - payload\n - meta\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_4 = {\"Meta\": arg_3, \"Payload\": arg_2}\n return arg_0.request(arg_1, \"dispatch\", json=arg_4, method=\"post\").json()"} +{"_id": "doc_6371", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Deregisters a job, and stops all allocations part of it.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n - purge (bool), optionally specifies whether the job should be\n stopped and purged immediately (`purge=True`) or deferred to the\n Nomad garbage collector (`purge=False`).\n\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n - nomad.api.exceptions.InvalidParameters\n \"\"\"\n arg_3 = None\n if arg_2 is not None:\n if not isinstance(arg_2, bool):\n raise nomad.api.exceptions.InvalidParameters(\"purge is invalid \"\n \"(expected type %s but got %s)\"%(type(bool()), type(arg_2)))\n arg_3 = {\"purge\": arg_2}\n return arg_0.request(arg_1, arg_3=arg_3, method=\"delete\").json()"} +{"_id": "doc_6372", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\" Query the status of a client node registered with Nomad.\n\n https://www.nomadproject.io/docs/http/operator.html\n\n returns: dict\n optional arguments:\n - stale, (defaults to False), Specifies if the cluster should respond without an active leader.\n This is specified as a querystring parameter.\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n\n arg_2 = {\"stale\": arg_1}\n return arg_0.request(\"raft\", \"configuration\", arg_2=arg_2, method=\"get\").json()"} +{"_id": "doc_6373", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\" Remove the Nomad server with given address from the Raft configuration.\n The return code signifies success or failure.\n\n https://www.nomadproject.io/docs/http/operator.html\n\n arguments:\n - peer_address, The address specifies the server to remove and is given as an IP:port\n optional arguments:\n - stale, (defaults to False), Specifies if the cluster should respond without an active leader.\n This is specified as a querystring parameter.\n returns: Boolean\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n\n arg_3 = {\"address\": arg_1, \"stale\": arg_2}\n return arg_0.request(\"raft\", \"peer\", arg_3=arg_3, method=\"delete\").ok"} +{"_id": "doc_6374", "title": "", "text": "def Func(arg_0, arg_1=\"\"):\n \"\"\" This endpoint lists all deployments.\n\n https://www.nomadproject.io/docs/http/deployments.html\n\n optional_arguments:\n - prefix, (default \"\") Specifies a string to filter deployments on based on an index prefix.\n This is specified as a querystring parameter.\n\n returns: list of dicts\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n \"\"\"\n arg_2 = {\"prefix\": arg_1}\n return arg_0.request(arg_2=arg_2, method=\"get\").json()"} +{"_id": "doc_6375", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a random mutator from a list of mutators\n \"\"\"\n return arg_0.mutator[arg_1][random.randint(0, arg_0.config.level)]"} +{"_id": "doc_6376", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a polyglot attack containing the original object\n \"\"\"\n return arg_0.polyglot_attacks[random.choice(arg_0.config.techniques)] % arg_1"} +{"_id": "doc_6377", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Perform the Funcing\n \"\"\"\n arg_2 = list(arg_1)\n arg_3 = random.randrange(1, len(arg_2))\n arg_4=random.randrange(math.ceil((float(len(arg_2)) / arg_3)))+1\n for arg_5 in range(arg_4):\n arg_0.random_action(arg_2)\n return arg_0.safe_unicode(arg_2)"} +{"_id": "doc_6378", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Safely return an unicode encoded string\n \"\"\"\n arg_2 = \"\"\n arg_1 = \"\".join(b for b in arg_1)\n for arg_3 in arg_1:\n arg_2 += arg_3\n return arg_2"} +{"_id": "doc_6379", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Kill the servers\n \"\"\"\n os.kill(arg_0.httpd.pid, signal.SIGKILL)\n os.kill(arg_0.httpsd.pid, signal.SIGKILL)\n arg_0.client_queue.put((0,0))\n if arg_0.config.fuzz_web:\n arg_0.request_checker.join()\n arg_0.logger.debug(\"[{0}] - PJFServer successfully completed\".format(time.strftime(\"%H:%M:%S\")))"} +{"_id": "doc_6380", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Serve custom HTML page\n \"\"\"\n try:\n response.headers.append(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.append(\"Accept-Encoding\", \"identity\")\n response.headers.append(\"Content-Type\", \"text/html\")\n return static_file(arg_1, root=arg_0.config.html)\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))"} +{"_id": "doc_6381", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Serve fuzzed JSON object\n \"\"\"\n try:\n arg_1 = arg_0.json.fuzzed\n if arg_0.config.fuzz_web:\n arg_0.client_queue.put((request.environ.get('REMOTE_ADDR'), arg_1))\n response.headers.append(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.append(\"Accept-Encoding\", \"identity\")\n response.headers.append(\"Content-Type\", arg_0.config.content_type)\n if arg_0.config.notify:\n PJFTestcaseServer.send_testcase(arg_1, '127.0.0.1', arg_0.config.ports[\"Funcrs\"][\"TCASE_PORT\"])\n yield arg_1\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))"} +{"_id": "doc_6382", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generic Func mutator, use a decorator for the given type\n \"\"\"\n arg_2 = arg_0.decorators\n\n @arg_2.mutate_object_decorate\n def mutate():\n return arg_1\n return mutate()"} +{"_id": "doc_6383", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"\", arg_3=False, arg_4=False, arg_5=2):\n \"\"\"\n Spawn a new process using subprocess\n \"\"\"\n try:\n if type(arg_1) != list:\n raise PJFInvalidType(type(arg_1), list)\n if type(arg_2) != str:\n raise PJFInvalidType(type(arg_2), str)\n if type(arg_3) != bool:\n raise PJFInvalidType(type(arg_3), bool)\n arg_0._in = arg_2\n try:\n arg_0.process = subprocess.Popen(arg_1, stdout=PIPE, stderr=PIPE, arg_3=PIPE, arg_4=arg_4)\n arg_0.finish_read(arg_5, arg_2, arg_3)\n if arg_0.process.poll() is not None:\n arg_0.close()\n except KeyboardInterrupt:\n return\n except OSError:\n raise PJFProcessExecutionError(\"Binary <%s> does not exist\" % arg_1[0])\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))"} +{"_id": "doc_6384", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Try to get output in a separate thread\n \"\"\"\n try:\n if arg_2:\n if sys.version_info >= (3, 0):\n arg_0.process.stdin.write(bytes(arg_1, \"utf-8\"))\n else:\n arg_0.process.stdin.write(arg_1)\n arg_0._out = arg_0.process.communicate()[0]\n except (error, IOError):\n arg_0._out = arg_0._in\n pass"} +{"_id": "doc_6385", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=\"\", arg_3=False):\n \"\"\"\n Wait until we got output or until timeout is over\n \"\"\"\n arg_4 = Thread(target=arg_0.get_output, args=(arg_2, arg_3))\n arg_4.start()\n if arg_1 > 0:\n arg_4.join(arg_1)\n else:\n arg_4.join()\n if arg_4.is_alive():\n arg_0.close()\n arg_0.return_code = -signal.SIGHUP\n else:\n arg_0.return_code = arg_0.process.returncode"} +{"_id": "doc_6386", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Terminate the newly created process\n \"\"\"\n try:\n arg_0.process.terminate()\n arg_0.return_code = arg_0.process.returncode\n except OSError:\n pass\n arg_0.process.stdin.Func()\n arg_0.process.stdout.Func()\n arg_0.process.stderr.Func()\n arg_0.logger.debug(\"[{0}] - PJFExecutor successfully completed\".format(time.strftime(\"%H:%M:%S\")))"} +{"_id": "doc_6387", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse the command line and Func PyJFuzz\n \"\"\"\n from .pjf_worker import PJFWorker\n arg_1 = PJFWorker(arg_0)\n if arg_0.update_pjf:\n arg_1.update_library()\n elif arg_0.browser_auto:\n arg_1.browser_autopwn()\n elif arg_0.fuzz_web:\n arg_1.web_fuzzer()\n elif arg_0.json:\n if not arg_0.web_server and not arg_0.ext_fuzz and not arg_0.cmd_fuzz:\n arg_1.fuzz()\n elif arg_0.ext_fuzz:\n if arg_0.stdin:\n arg_1.fuzz_stdin()\n else:\n arg_1.fuzz_command_line()\n elif arg_0.cmd_fuzz:\n if arg_0.stdin:\n arg_1.fuzz_external(True)\n else:\n arg_1.fuzz_external()\n else:\n arg_1.Func_http_server()\n elif arg_0.json_file:\n arg_1.Func_file_fuzz()\n elif arg_0.process_to_monitor:\n arg_1.Func_process_monitor()"} +{"_id": "doc_6388", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Perform the actual external fuzzing, you may replace this method in order to increase performance\n \"\"\"\n try:\n if arg_0.config.stdin:\n arg_0.spawn(arg_0.config.command, stdin_content=arg_1, stdin=True, timeout=1)\n else:\n if \"@@\" not in arg_0.config.command:\n raise PJFMissingArgument(\"Missing @@ filename indicator while using non-stdin fuzzing method\")\n for arg_2 in arg_0.config.command:\n if \"@@\" in arg_2:\n arg_0.config.command[arg_0.config.command.index(arg_2)] = arg_2.replace(\"@@\", arg_1)\n arg_0.spawn(arg_0.config.command, timeout=2)\n arg_0.logger.debug(\"[{0}] - PJFExternalFuzzer successfully completed\".format(time.strftime(\"%H:%M:%S\")))\n return arg_0._out\n except KeyboardInterrupt:\n return \"\"\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))"} +{"_id": "doc_6389", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Build the ``And`` instance\n\n :param list pre: The prerequisites list\n :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.\n \"\"\"\n if arg_1 is None:\n arg_1 = []\n\n arg_3 = deque()\n for arg_4 in arg_0.values:\n try:\n arg_3.append(utils.val(arg_4, arg_1, arg_2=arg_2))\n except errors.OptGram as e:\n continue\n except errors.FlushGrams as e:\n arg_5 = \"\".join(arg_3)\n arg_3.clear()\n # this is assuming a scope was pushed!\n if len(arg_0.fuzzer._scope_stack) == 1:\n arg_1.append(arg_5)\n else:\n arg_6 = arg_0.fuzzer._curr_scope.setdefault(\"prev_append\", deque())\n arg_6.extend(arg_1)\n arg_6.append(arg_5)\n arg_1.clear()\n continue\n\n return arg_0.sep.join(arg_3)"} +{"_id": "doc_6390", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Build the ``Quote`` instance\n\n :param list pre: The prerequisites list\n :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.\n \"\"\"\n arg_3 = super(Q, arg_0).Func(arg_1, arg_2=arg_2)\n\n if arg_0.escape:\n return repr(arg_3)\n elif arg_0.html_js_escape:\n return (\"'\" + arg_3.encode(\"string_escape\").replace(\"<\", \"\\\\x3c\").replace(\">\", \"\\\\x3e\") + \"'\")\n else:\n return \"{q}{r}{q}\".format(q=arg_0.quote, r=arg_3)"} +{"_id": "doc_6391", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Build the ``Or`` instance\n\n :param list pre: The prerequisites list\n :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.\n \"\"\"\n if arg_1 is None:\n arg_1 = []\n\n # self.shortest_vals will be set by the GramFuzzer and will\n # contain a list of value options that have a minimal reference\n # chain\n if arg_2 and arg_0.shortest_vals is not None:\n return utils.val(rand.choice(arg_0.shortest_vals), arg_1, arg_2=arg_2)\n else:\n return utils.val(rand.choice(arg_0.values), arg_1, arg_2=arg_2)"} +{"_id": "doc_6392", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Build the current ``Opt`` instance\n\n :param list pre: The prerequisites list\n :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.\n \"\"\"\n if arg_1 is None:\n arg_1 = []\n\n if arg_2 or rand.maybe(arg_0.prob):\n raise errors.OptGram\n\n return super(Opt, arg_0).Func(arg_1, arg_2=arg_2)"} +{"_id": "doc_6393", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Build the STAR field.\n\n :param list pre: The prerequisites list\n :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.\n \"\"\"\n if arg_1 is None:\n arg_1 = []\n\n if arg_2:\n raise errors.OptGram\n elif rand.maybe():\n return super(STAR, arg_0).Func(arg_1, arg_2=arg_2)\n else:\n raise errors.OptGram"} +{"_id": "doc_6394", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Shutdown the running process and the monitor\n \"\"\"\n try:\n arg_0._Func()\n if arg_0.process:\n arg_0.process.wait()\n arg_0.process.stdout.close()\n arg_0.process.stdin.close()\n arg_0.process.stderr.close()\n arg_0.finished = True\n arg_0.send_testcase('', '127.0.0.1', arg_0.config.ports[\"servers\"][\"TCASE_PORT\"])\n arg_0.logger.debug(\"[{0}] - PJFProcessMonitor successfully completed\".format(time.strftime(\"%H:%M:%S\")))\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))"} +{"_id": "doc_6395", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Run command in a loop and check exit status plus restart process when needed\n \"\"\"\n try:\n arg_0.start()\n arg_2 = shlex.split(arg_0.config.process_to_monitor)\n if arg_1:\n signal.signal(signal.SIGINT, arg_0.shutdown)\n arg_0.process = subprocess.Popen(arg_2, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n while arg_0.process and not arg_0.finished:\n arg_0.process.wait()\n if arg_0._is_sigsegv(arg_0.process.returncode):\n if arg_0.config.debug:\n print(\"[\\033[92mINFO\\033[0m] Process crashed with \\033[91mSIGSEGV\\033[0m, waiting for testcase...\")\n while not arg_0.got_testcase():\n time.sleep(1)\n arg_0.save_testcase(arg_0.testcase[-10:]) # just take last 10 testcases\n if arg_0.process:\n arg_0.process = subprocess.Popen(arg_2, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n except OSError:\n arg_0.shutdown()\n arg_0.process = False\n arg_0.got_testcase = lambda: True\n raise PJFProcessExecutionError(\"Binary <%s> does not exist\" % arg_2[0])\n except Exception as e:\n raise PJFBaseException(\"Unknown error please send log to author\")"} +{"_id": "doc_6396", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Fuzz all elements inside the object\n \"\"\"\n try:\n if type(arg_1) == dict:\n arg_2 = {}\n for arg_3 in arg_1:\n if len(arg_0.config.parameters) > 0:\n if arg_0.config.exclude_parameters:\n arg_4 = arg_3 not in arg_0.config.parameters\n else:\n arg_4 = arg_3 in arg_0.config.parameters\n else:\n arg_4 = True\n if arg_4:\n if type(arg_1[arg_3]) == dict:\n arg_2.update({arg_3: arg_0.Func(arg_1[arg_3])})\n elif type(arg_1[arg_3]) == list:\n arg_2.update({arg_3: arg_0.Func(arg_1[arg_3])})\n else:\n arg_2.update({arg_3: arg_0.mutator.fuzz(arg_1[arg_3])})\n else:\n arg_2.update({arg_3: arg_0.Func(arg_1[arg_3])})\n arg_1 = arg_2\n del arg_2\n elif type(arg_1) == list:\n arg_5 = []\n for arg_3 in arg_1:\n if type(arg_3) == dict:\n arg_5.append(arg_0.Func(arg_3))\n elif type(arg_3) == list:\n arg_5.append(arg_0.Func(arg_3))\n else:\n if len(arg_0.config.parameters) <= 0:\n arg_5.append(arg_0.mutator.fuzz(arg_3))\n else:\n arg_5.append(arg_3)\n arg_1 = arg_5\n del arg_5\n except Exception as e:\n raise PJFBaseException(e.message if hasattr(e, \"message\") else str(e))\n return arg_1"} +{"_id": "doc_6397", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Mutate a generic object based on type\n \"\"\"\n def mutate():\n arg_2 = arg_1()\n return arg_0.Mutators.get_mutator(arg_2, type(arg_2))\n return mutate"} +{"_id": "doc_6398", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" When we get term signal\n if we are waiting and got a sigterm, we just exit.\n if we have a child running, we pass the signal first to the child\n then we exit.\n\n :param signum:\n :param frame:\n :return:\n \"\"\"\n assert(arg_0.state in ('WAITING', 'RUNNING', 'PAUSED'))\n logger.debug(\"our state %s\", arg_0.state)\n if arg_0.state == 'WAITING':\n return arg_0.ioloop.stop()\n\n if arg_0.state == 'RUNNING':\n logger.debug('already running sending signal to child - %s',\n arg_0.sprocess.pid)\n os.kill(arg_0.sprocess.pid, arg_1)\n arg_0.ioloop.stop()"} +{"_id": "doc_6399", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n if we have a running child we kill it and set our state to paused\n if we don't have a running child, we set our state to paused\n this will pause all the nodes in single-beat cluster\n\n its useful when you deploy some code and don't want your child to spawn\n randomly\n\n :param msg:\n :return:\n \"\"\"\n arg_2 = ''\n if arg_0.state == State.RUNNING and arg_0.sprocess and arg_0.sprocess.proc:\n arg_0.sprocess.set_exit_callback(arg_0.proc_exit_cb_noop)\n arg_0.sprocess.proc.kill()\n arg_2 = 'killed'\n # TODO: check if process is really dead etc.\n arg_0.state = State.PAUSED\n return arg_2"} +{"_id": "doc_6400", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n sets state to waiting - so we resume spawning children\n \"\"\"\n if arg_0.state == State.PAUSED:\n arg_0.state = State.WAITING"} +{"_id": "doc_6401", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n stops the running child process - if its running\n it will re-spawn in any single-beat node after sometime\n\n :param msg:\n :return:\n \"\"\"\n arg_2 = ''\n if arg_0.state == State.RUNNING and arg_0.sprocess and arg_0.sprocess.proc:\n arg_0.state = State.PAUSED\n arg_0.sprocess.set_exit_callback(arg_0.proc_exit_cb_state_set)\n arg_0.sprocess.proc.kill()\n arg_2 = 'killed'\n # TODO: check if process is really dead etc.\n return arg_2"} +{"_id": "doc_6402", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n restart the subprocess\n i. we set our state to RESTARTING - on restarting we still send heartbeat\n ii. we kill the subprocess\n iii. we start again\n iv. if its started we set our state to RUNNING, else we set it to WAITING\n\n :param msg:\n :return:\n \"\"\"\n arg_2 = ''\n if arg_0.state == State.RUNNING and arg_0.sprocess and arg_0.sprocess.proc:\n arg_0.state = State.RESTARTING\n arg_0.sprocess.set_exit_callback(arg_0.proc_exit_cb_restart)\n arg_0.sprocess.proc.kill()\n arg_2 = 'killed'\n # TODO: check if process is really dead etc.\n return arg_2"} +{"_id": "doc_6403", "title": "", "text": "def Func(arg_0):\n # type: (int) -> None\n \"\"\"Close the connection to the TwinCAT message router.\"\"\"\n arg_1 = _adsDLL.AdsPortCloseEx\n arg_1.restype = ctypes.c_long\n arg_3 = arg_1(arg_0)\n\n if arg_3:\n raise ADSError(arg_3)"} +{"_id": "doc_6404", "title": "", "text": "def Func(arg_0):\n # type: (int) -> AmsAddr\n \"\"\"Return the local AMS-address and the port number.\n\n :rtype: pyads.structs.AmsAddr\n :return: AMS-address\n\n \"\"\"\n arg_1 = _adsDLL.AdsGetLocalAddressEx\n arg_2 = SAmsAddr()\n arg_3 = arg_1(arg_0, ctypes.pointer(arg_2))\n\n if arg_3:\n raise ADSError(arg_3)\n\n arg_4 = AmsAddr()\n arg_4._ams_addr = arg_2\n\n return arg_4"} +{"_id": "doc_6405", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=False\n):\n # type: (int, AmsAddr, int, int, Type, bool) -> Any\n \"\"\"Read data synchronous from an ADS-device.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param Type data_type: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :rtype: data_type\n :return: value: **value**\n\n \"\"\"\n arg_6 = _adsDLL.AdsSyncReadReqEx2\n\n arg_7 = ctypes.pointer(arg_1.amsAddrStruct())\n arg_8 = ctypes.c_ulong(arg_2)\n arg_9 = ctypes.c_ulong(arg_3)\n\n if arg_4 == PLCTYPE_STRING:\n arg_10 = (STRING_BUFFER * PLCTYPE_STRING)()\n else:\n arg_10 = arg_4()\n\n arg_11 = ctypes.pointer(arg_10)\n arg_12 = ctypes.c_ulong(ctypes.sizeof(arg_10))\n\n arg_13 = ctypes.c_ulong()\n arg_14 = ctypes.pointer(arg_13)\n\n arg_15 = arg_6(\n arg_0,\n arg_7,\n arg_8,\n arg_9,\n arg_12,\n arg_11,\n arg_14,\n )\n\n if arg_15:\n raise ADSError(arg_15)\n\n # If we're reading a value of predetermined size (anything but a string),\n # validate that the correct number of bytes were read\n if arg_4 != PLCTYPE_STRING and arg_13.value != arg_12.value:\n raise RuntimeError(\n \"Insufficient data (expected {0} bytes, {1} were read).\".format(\n arg_12.value, arg_13.value\n )\n )\n\n if arg_5:\n return arg_10\n\n if arg_4 == PLCTYPE_STRING:\n return arg_10.value.decode(\"utf-8\")\n\n if type(arg_4).__name__ == \"PyCArrayType\":\n return [arg_16 for arg_16 in arg_10]\n\n if hasattr(arg_10, \"value\"):\n return arg_10.value\n\n return arg_10"} +{"_id": "doc_6406", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n # type: (int, AmsAddr, int, int) -> None\n \"\"\"Remove a device notification.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n :param int notification_handle: Notification Handle\n :param int user_handle: User Handle\n\n \"\"\"\n arg_4 = _adsDLL.AdsSyncDelDeviceNotificationReqEx\n\n arg_5 = ctypes.pointer(arg_1.amsAddrStruct())\n arg_6 = ctypes.c_ulong(arg_2)\n arg_7 = arg_4(arg_0, arg_5, arg_6)\n callback_store.pop(arg_2, None)\n if arg_7:\n raise ADSError(arg_7)\n\n adsSyncWriteReqEx(arg_0, arg_1, ADSIGRP_SYM_RELEASEHND, 0, arg_3, PLCTYPE_UDINT)"} +{"_id": "doc_6407", "title": "", "text": "def Func(arg_0, arg_1):\n # type: (int, int) -> None\n \"\"\"Set Timeout.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param int nMs: timeout in ms\n\n \"\"\"\n arg_2 = _adsDLL.AdsSyncSetTimeoutEx\n arg_3 = ctypes.c_long(arg_1)\n arg_4 = arg_2(arg_0, arg_3)\n if arg_4:\n raise ADSError(arg_4)"} +{"_id": "doc_6408", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Removes `node` from the hash ring and its replicas.\n \"\"\"\n arg_0.nodes.remove(arg_1)\n for arg_2 in xrange(arg_0.replicas):\n arg_3 = arg_0.hash_method(b(\"%s:%d\" % (arg_1, arg_2)))\n arg_0.ring.pop(arg_3)\n arg_0.sorted_keys.remove(arg_3)"} +{"_id": "doc_6409", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=0.1):\n \"\"\"\n Return a new Lock object using key ``name`` that mimics\n the behavior of threading.Lock.\n\n If specified, ``timeout`` indicates a maximum life for the Func.\n By default, it will remain Funced until release() is called.\n\n ``sleep`` indicates the amount of time to sleep per loop iteration\n when the Func is in bFuncing mode and another client is currently\n holding the Func.\n \"\"\"\n return Lock(arg_0, arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_6410", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve a list of events since the last poll. Multiple calls may be needed to retrieve all events.\n\n If no events occur, the API will block for up to 30 seconds, after which an empty list is returned. As soon as\n an event is received in this time, it is returned immediately.\n\n Returns:\n :class:`.SkypeEvent` list: a list of events, possibly empty\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.conn.endpoints[\"self\"].Func():\n arg_1.append(SkypeEvent.fromRaw(arg_0, arg_2))\n return arg_1"} +{"_id": "doc_6411", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve various metadata associated with a URL, as seen by Skype.\n\n Args:\n url (str): address to ping for info\n\n Returns:\n dict: metadata for the website queried\n \"\"\"\n return arg_0.conn(\"GET\", SkypeConnection.API_URL, params={\"url\": arg_1},\n auth=SkypeConnection.Auth.Authorize).json()"} +{"_id": "doc_6412", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve all details for a specific contact, including fields such as birthday and mood.\n\n Args:\n id (str): user identifier to lookup\n\n Returns:\n SkypeContact: resulting contact object\n \"\"\"\n try:\n arg_2 = arg_0.skype.conn(\"POST\", \"{0}/users/batch/profiles\".format(SkypeConnection.API_USER),\n arg_2={\"usernames\": [arg_1]}, auth=SkypeConnection.Auth.SkypeToken).json()\n Func = SkypeContact.fromRaw(arg_0.skype, arg_2[0])\n if Func.id not in arg_0.contactIds:\n arg_0.contactIds.append(Func.id)\n return arg_0.merge(Func)\n except SkypeApiException as e:\n if len(e.args) >= 2 and getattr(e.args[1], \"status_code\", None) == 403:\n # Not a contact, so no permission to retrieve information.\n return None\n raise"} +{"_id": "doc_6413", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve a list of all known Func.\n\n Returns:\n SkypeBotUser list: resulting bot user objects\n \"\"\"\n arg_1 = arg_0.skype.conn(\"GET\", \"{0}/agents\".format(SkypeConnection.API_BOT),\n auth=SkypeConnection.Auth.SkypeToken).json().get(\"agentDescriptions\", [])\n return [arg_0.merge(SkypeBotUser.fromRaw(arg_0.skype, arg_2)) for arg_2 in arg_1]"} +{"_id": "doc_6414", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieve a single Func.\n\n Args:\n id (str): UUID or username of the Func\n\n Returns:\n SkypeBotUser: resulting Func user object\n \"\"\"\n arg_2 = arg_0.skype.conn(\"GET\", \"{0}/agents\".format(SkypeConnection.API_BOT), params={\"agentId\": arg_1},\n auth=SkypeConnection.Auth.SkypeToken).json().get(\"agentDescriptions\", [])\n return arg_0.merge(SkypeBotUser.fromRaw(arg_0.skype, arg_2[0])) if arg_2 else None"} +{"_id": "doc_6415", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Search the Skype Directory for a user.\n\n Args:\n query (str): name to Func for\n\n Returns:\n SkypeUser list: collection of possible results\n \"\"\"\n arg_2 = arg_0.skype.conn(\"GET\", SkypeConnection.API_DIRECTORY,\n auth=SkypeConnection.Auth.SkypeToken,\n params={\"Funcstring\": arg_1, \"requestId\": \"0\"}).json().get(\"results\", [])\n return [SkypeUser.fromRaw(arg_0.skype, arg_3.get(\"nodeProfileData\", {})) for arg_3 in arg_2]"} +{"_id": "doc_6416", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve any pending contact requests.\n\n Returns:\n :class:`SkypeRequest` list: collection of requests\n \"\"\"\n Func = []\n for arg_2 in arg_0.skype.conn(\"GET\", \"{0}/users/{1}/invites\"\n .format(SkypeConnection.API_CONTACTS, arg_0.skype.userId),\n auth=SkypeConnection.Auth.SkypeToken).json().get(\"invite_list\", []):\n for arg_3 in arg_2.get(\"invites\", []):\n # Copy user identifier to each invite message.\n arg_3[\"userId\"] = SkypeUtils.noPrefix(arg_2.get(\"mri\"))\n Func.append(SkypeRequest.fromRaw(arg_0.skype, arg_3))\n return Func"} +{"_id": "doc_6417", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2={}):\n \"\"\"\n Create a new instance based on the raw properties of an API response.\n\n This can be overridden to automatically create subclass instances based on the raw content.\n\n Args:\n skype (Skype): parent Skype instance\n raw (dict): raw object, as provided by the API\n\n Returns:\n SkypeObj: the new class instance\n \"\"\"\n return arg_0(arg_1, arg_2, **arg_0.rawToFields(arg_2))"} +{"_id": "doc_6418", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Copy properties from other into self, skipping ``None`` values. Also Funcs the raw data.\n\n Args:\n other (SkypeObj): second object to copy fields from\n \"\"\"\n for arg_2 in arg_0.attrs:\n if not getattr(arg_1, arg_2, None) is None:\n setattr(arg_0, arg_2, getattr(arg_1, arg_2))\n if arg_1.raw:\n if not arg_0.raw:\n arg_0.raw = {}\n arg_0.raw.update(arg_1.raw)"} +{"_id": "doc_6419", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add a given object to the cache, or update an existing entry to include more fields.\n\n Args:\n obj (SkypeObj): object to add to the cache\n \"\"\"\n if arg_1.id in arg_0.cache:\n arg_0.cache[arg_1.id].Func(arg_1)\n else:\n arg_0.cache[arg_1.id] = arg_1\n return arg_0.cache[arg_1.id]"} +{"_id": "doc_6420", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3={}, **arg_4):\n \"\"\"\n Follow and track sync state URLs provided by an API endpoint, in order to implicitly handle pagination.\n\n In the first call, ``url`` and ``params`` are used as-is. If a ``syncState`` endpoint is provided in the\n response, subsequent calls go to the latest URL instead.\n\n Args:\n method (str): HTTP request method\n url (str): full URL to connect to\n params (dict): query parameters to include in the URL\n kwargs (dict): any extra parameters to pass to :meth:`__call__`\n \"\"\"\n try:\n arg_5 = arg_0.syncStates[(arg_1, arg_2)]\n except KeyError:\n arg_5 = arg_0.syncStates[(arg_1, arg_2)] = []\n if arg_5:\n # We have a state link, use it to replace the URL and query string.\n arg_2 = arg_5[-1]\n arg_3 = {}\n arg_6 = arg_0(arg_1, arg_2, arg_3=arg_3, **arg_4)\n try:\n arg_7 = arg_6.json()\n except ValueError:\n # Don't do anything if not a JSON response.\n pass\n else:\n # If a state link exists in the response, store it for later.\n arg_8 = arg_7.get(\"_metadata\", {}).get(\"syncState\")\n if arg_8:\n arg_5.append(arg_8)\n return arg_6"} +{"_id": "doc_6421", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Store details of the current connection in the named file.\n\n This can be used by :meth:`readToken` to re-authenticate at a later time.\n \"\"\"\n # Write token file privately.\n with os.fdopen(os.open(arg_0.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), \"w\") as f:\n # When opening files via os, truncation must be done manually.\n f.truncate()\n f.write(arg_0.userId + \"\\n\")\n f.write(arg_0.tokens[\"skype\"] + \"\\n\")\n f.write(str(int(time.mktime(arg_0.tokenExpiry[\"skype\"].timetuple()))) + \"\\n\")\n f.write(arg_0.tokens[\"reg\"] + \"\\n\")\n f.write(str(int(time.mktime(arg_0.tokenExpiry[\"reg\"].timetuple()))) + \"\\n\")\n f.write(arg_0.msgsHost + \"\\n\")"} +{"_id": "doc_6422", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Ensure the authentication token for the given auth method is still valid.\n\n Args:\n auth (Auth): authentication type to check\n\n Raises:\n .SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed\n \"\"\"\n if arg_1 in (arg_0.Auth.SkypeToken, arg_0.Auth.Authorize):\n if \"skype\" not in arg_0.tokenExpiry or datetime.now() >= arg_0.tokenExpiry[\"skype\"]:\n if not hasattr(arg_0, \"getSkypeToken\"):\n raise SkypeAuthException(\"Skype token expired, and no password specified\")\n arg_0.getSkypeToken()\n elif arg_1 == arg_0.Auth.RegToken:\n if \"reg\" not in arg_0.tokenExpiry or datetime.now() >= arg_0.tokenExpiry[\"reg\"]:\n arg_0.getRegToken()"} +{"_id": "doc_6423", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Take the existing Skype token and refresh it, to extend the expiry time without other credentials.\n\n Raises:\n .SkypeAuthException: if the login request is rejected\n .SkypeApiException: if the login form can't be processed\n \"\"\"\n arg_0.tokens[\"skype\"], arg_0.tokenExpiry[\"skype\"] = SkypeRefreshAuthProvider(arg_0).auth(arg_0.tokens[\"skype\"])\n arg_0.getRegToken()"} +{"_id": "doc_6424", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Acquire a new registration token.\n\n Once successful, all tokens and expiry times are written to the token file (if specified on initialisation).\n \"\"\"\n arg_0.verifyToken(arg_0.Auth.SkypeToken)\n arg_1, arg_2, arg_3, arg_4 = SkypeRegistrationTokenProvider(arg_0).auth(arg_0.tokens[\"skype\"])\n arg_0.tokens[\"reg\"] = arg_1\n arg_0.tokenExpiry[\"reg\"] = arg_2\n arg_0.msgsHost = arg_3\n if arg_4:\n arg_4.config()\n arg_0.endpoints[\"main\"] = arg_4\n arg_0.syncEndpoints()\n if arg_0.tokenFile:\n arg_0.writeToken()"} +{"_id": "doc_6425", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve all current endpoints for the connected user.\n \"\"\"\n arg_0.endpoints[\"all\"] = []\n for arg_2 in arg_0(\"GET\", \"{0}/users/ME/presenceDocs/messagingService\".format(arg_0.msgsHost),\n params={\"view\": \"expanded\"}, auth=arg_0.Auth.RegToken).json().get(\"endpointPresenceDocs\", []):\n arg_3 = arg_2.get(\"link\", \"\").split(\"/\")[7]\n arg_0.endpoints[\"all\"].append(SkypeEndpoint(arg_0, arg_3))"} +{"_id": "doc_6426", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Query a username or email address to see if a corresponding Microsoft account exists.\n\n Args:\n user (str): username or email address of an account\n\n Returns:\n bool: whether the account exists\n \"\"\"\n return not arg_0.conn(\"POST\", \"{0}/GetCredentialType.srf\".format(SkypeConnection.API_MSACC),\n json={\"username\": arg_1}).json().get(\"IfExistsResult\")"} +{"_id": "doc_6427", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Request a new registration token using a current Skype token.\n\n Args:\n skypeToken (str): existing Skype token\n\n Returns:\n (str, datetime.datetime, str, SkypeEndpoint) tuple: registration token, associated expiry if known,\n resulting endpoint hostname, endpoint if provided\n\n Raises:\n .SkypeAuthException: if the login request is rejected\n .SkypeApiException: if the login form can't be processed\n \"\"\"\n arg_2 = arg_13 = arg_11 = None\n arg_3 = SkypeConnection.API_MSGSHOST\n while not arg_2:\n arg_4 = int(time.time())\n arg_5 = arg_0.getMac256Hash(str(arg_4))\n arg_6 = {\"LockAndKey\": \"appId=msmsgs@msnmsgr.com; time={0}; lockAndKeyResponse={1}\".format(arg_4, arg_5),\n \"Authentication\": \"skypetoken=\" + arg_1, \"BehaviorOverride\": \"redirectAs404\"}\n arg_7 = arg_0.conn(\"POST\", \"{0}/users/ME/endpoints\".format(arg_3), codes=(200, 201, 404),\n arg_6=arg_6, json={\"endpointFeatures\": \"Agent\"})\n arg_8 = arg_7.headers.get(\"Set-RegistrationToken\")\n arg_9 = arg_7.headers.get(\"Location\")\n if arg_9:\n arg_10 = re.search(r\"(https://[^/]+/v1)/users/ME/endpoints(/(%7B[a-z0-9\\-]+%7D))?\", arg_9).groups()\n if arg_10[2]:\n arg_11 = SkypeEndpoint(arg_0.conn, arg_10[2].replace(\"%7B\", \"{\").replace(\"%7D\", \"}\"))\n if not arg_10[0] == arg_3:\n # Skype is requiring the use of a different hostname.\n arg_3 = arg_9.rsplit(\"/\", 4 if arg_10[2] else 3)[0]\n # Don't accept the token if present, we need to re-register first.\n continue\n if arg_8:\n arg_2 = re.search(r\"(registrationToken=[a-z0-9\\+/=]+)\", arg_8, re.I).group(1)\n arg_12 = re.search(r\"expires=(\\d+)\", arg_8).group(1)\n arg_13 = datetime.fromtimestamp(int(arg_12))\n arg_14 = re.search(r\"endpointId=({[a-z0-9\\-]+})\", arg_8)\n if arg_14:\n arg_11 = SkypeEndpoint(arg_0.conn, arg_14.group(1))\n if not arg_11 and arg_7.status_code == 200 and arg_7.json():\n # Use the most recent endpoint listed in the JSON response.\n arg_11 = SkypeEndpoint(arg_0.conn, arg_7.json()[0][\"id\"])\n return arg_2, arg_13, arg_3, arg_11"} +{"_id": "doc_6428", "title": "", "text": "def Func(arg_0, arg_1=\"skype\"):\n \"\"\"\n Configure this endpoint to allow setting presence.\n\n Args:\n name (str): display name for this endpoint\n \"\"\"\n arg_0.conn(\"PUT\", \"{0}/users/ME/endpoints/{1}/presenceDocs/messagingService\"\n .format(arg_0.conn.msgsHost, arg_0.id),\n auth=SkypeConnection.Auth.RegToken,\n json={\"id\": \"messagingService\",\n \"type\": \"EndpointPresenceDoc\",\n \"selfLink\": \"uri\",\n \"privateInfo\": {\"epname\": arg_1},\n \"publicInfo\": {\"capabilities\": \"\",\n \"type\": 1,\n \"skypeNameVersion\": \"skype.com\",\n \"nodeInfo\": \"xx\",\n \"version\": \"908/1.30.0.128\"}})"} +{"_id": "doc_6429", "title": "", "text": "def Func(arg_0, arg_1=12):\n \"\"\"\n Send a keep-alive request for the endpoint.\n\n Args:\n timeout (int): maximum amount of time for the endpoint to stay active\n \"\"\"\n arg_0.conn(\"POST\", \"{0}/users/ME/endpoints/{1}/active\".format(arg_0.conn.msgsHost, arg_0.id),\n auth=SkypeConnection.Auth.RegToken, json={\"timeout\": arg_1})"} +{"_id": "doc_6430", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve a selection of conversations with the most Func activity, and store them in the cache.\n\n Each conversation is only retrieved once, so subsequent calls will retrieve older conversations.\n\n Returns:\n :class:`SkypeChat` list: collection of Func conversations\n \"\"\"\n arg_1 = \"{0}/users/ME/conversations\".format(arg_0.skype.conn.msgsHost)\n arg_2 = {\"startTime\": 0,\n \"view\": \"msnp24Equivalent\",\n \"targetType\": \"Passport|Skype|Lync|Thread\"}\n arg_3 = arg_0.skype.conn.syncStateCall(\"GET\", arg_1, arg_2, auth=SkypeConnection.Auth.RegToken).json()\n arg_4 = {}\n for arg_5 in arg_3.get(\"conversations\", []):\n arg_6 = SkypeSingleChat\n if \"threadProperties\" in arg_5:\n arg_7 = arg_0.skype.conn(\"GET\", \"{0}/threads/{1}\".format(arg_0.skype.conn.msgsHost, arg_5.get(\"id\")),\n auth=SkypeConnection.Auth.RegToken,\n arg_2={\"view\": \"msnp24Equivalent\"}).json()\n arg_5.update(arg_7)\n arg_6 = SkypeGroupChat\n arg_4[arg_5.get(\"id\")] = arg_0.merge(arg_6.fromRaw(arg_0.skype, arg_5))\n return arg_4"} +{"_id": "doc_6431", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a single conversation by identifier.\n\n Args:\n id (str): single or group Func identifier\n \"\"\"\n arg_2 = arg_0.skype.conn(\"GET\", \"{0}/users/ME/conversations/{1}\".format(arg_0.skype.conn.msgsHost, arg_1),\n auth=SkypeConnection.Auth.RegToken, params={\"view\": \"msnp24Equivalent\"}).json()\n arg_3 = SkypeSingleChat\n if \"threadProperties\" in arg_2:\n arg_4 = arg_0.skype.conn(\"GET\", \"{0}/threads/{1}\".format(arg_0.skype.conn.msgsHost, arg_2.get(\"id\")),\n auth=SkypeConnection.Auth.RegToken, params={\"view\": \"msnp24Equivalent\"}).json()\n arg_2.update(arg_4)\n arg_3 = SkypeGroupChat\n return arg_0.merge(arg_3.fromRaw(arg_0.skype, arg_2))"} +{"_id": "doc_6432", "title": "", "text": "def Func(arg_0, arg_1=(), arg_2=()):\n \"\"\"\n Create a new group chat with the given users.\n\n The current user is automatically added to the conversation as an admin. Any other admin identifiers must also\n be present in the member list.\n\n Args:\n members (str list): user identifiers to initially join the conversation\n admins (str list): user identifiers to gain admin privileges\n \"\"\"\n arg_3 = [{\"id\": \"8:{0}\".format(arg_0.skype.userId), \"role\": \"Admin\"}]\n for arg_4 in arg_1:\n if arg_4 == arg_0.skype.userId:\n continue\n arg_3.append({\"id\": \"8:{0}\".format(arg_4), \"role\": \"Admin\" if arg_4 in arg_2 else \"User\"})\n arg_5 = arg_0.skype.conn(\"POST\", \"{0}/threads\".format(arg_0.skype.conn.msgsHost),\n auth=SkypeConnection.Auth.RegToken, json={\"members\": arg_3})\n return arg_0.chat(arg_5.headers[\"Location\"].rsplit(\"/\", 1)[1])"} +{"_id": "doc_6433", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract the username from a contact URL.\n\n Matches addresses containing ``users/`` or ``users/ME/contacts/``.\n\n Args:\n url (str): Skype API URL\n\n Returns:\n str: extracted identifier\n \"\"\"\n arg_1 = re.search(r\"users(/ME/contacts)?/[0-9]+:([^/]+)\", arg_0)\n return arg_1.group(2) if arg_1 else None"} +{"_id": "doc_6434", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extract the conversation ID from a conversation URL.\n\n Matches addresses containing ``conversations/``.\n\n Args:\n url (str): Skype API URL\n\n Returns:\n str: extracted identifier\n \"\"\"\n arg_1 = re.search(r\"conversations/([0-9]+:[^/]+)\", arg_0)\n return arg_1.group(1) if arg_1 else None"} +{"_id": "doc_6435", "title": "", "text": "def Func(arg_0, arg_1=None, *arg_2, **arg_3):\n \"\"\"\n Repeatedly call a function, starting with init, until false-y, yielding each item in turn.\n\n The ``transform`` parameter can be used to map a collection to another format, for example iterating over a\n :class:`dict` by value rather than key.\n\n Use with state-synced functions to retrieve all results.\n\n Args:\n fn (method): function to call\n transform (method): secondary function to convert result into an iterable\n args (list): positional arguments to pass to ``fn``\n kwargs (dict): keyword arguments to pass to ``fn``\n\n Returns:\n generator: generator of objects produced from the method\n \"\"\"\n while True:\n arg_4 = arg_0(*arg_2, **arg_3)\n if arg_4:\n for arg_5 in arg_1(arg_4) if arg_1 else arg_4:\n yield arg_5\n else:\n break"} +{"_id": "doc_6436", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n Return a language-server diagnostic from a line of the Mypy error report;\n optionally, use the whole document to provide more context on it.\n '''\n arg_2 = re.match(line_pattern, arg_0)\n if arg_2:\n arg_3, arg_4, arg_5, arg_6, arg_7 = arg_2.groups()\n arg_4 = int(arg_4 or 1)\n arg_5 = int(arg_5 or 0)\n arg_8 = 2\n if arg_6 == 'error':\n arg_8 = 1\n arg_9 = {\n 'source': 'mypy',\n 'range': {\n 'start': {'line': arg_4 - 1, 'character': arg_5},\n # There may be a better solution, but mypy does not provide end\n 'end': {'line': arg_4 - 1, 'character': arg_5 + 1}\n },\n 'message': arg_7,\n 'severity': arg_8\n }\n if arg_1:\n # although mypy does not provide the end of the affected range, we\n # can make a good guess by highlighting the word that Mypy flagged\n arg_10 = arg_1.word_at_position(arg_9['range']['start'])\n if arg_10:\n arg_9['range']['end']['character'] = (\n arg_9['range']['start']['character'] + len(arg_10))\n\n return arg_9"} +{"_id": "doc_6437", "title": "", "text": "def Func(arg_0, arg_1='Functf-8'):\n \"RetFuncrn Funcnicode text, no matter what\"\n\n if isinstance(arg_0, six.binary_type):\n arg_0 = arg_0.decode(arg_1)\n\n # it's already Funcnicode\n arg_0 = arg_0.replace('\\r\\n', '\\n')\n retFuncrn arg_0"} +{"_id": "doc_6438", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Figure out which handler to use, based on metadata.\n Returns a handler instance or None.\n\n ``text`` should be unicode text about to be parsed.\n\n ``handlers`` is a dictionary where keys are opening delimiters \n and values are handler instances.\n \"\"\"\n for arg_2, arg_3 in arg_1.items():\n if arg_2.match(arg_0):\n return arg_3\n\n # nothing matched, give nothing back\n return None"} +{"_id": "doc_6439", "title": "", "text": "def Func(arg_0, arg_1='utf-8', arg_2=None, **arg_3):\n \"\"\"\n Parse text with frontmatter, return metadata and content.\n Pass in optional metadata defaults as keyword args.\n\n If frontmatter is not found, returns an empty metadata dictionary\n (or defaults) and original text content.\n\n ::\n\n >>> with open('tests/hello-world.markdown') as f:\n ... metadata, content = frontmatter.Func(f.read())\n >>> print(metadata['title'])\n Hello, world!\n\n \"\"\"\n # ensure unicode first\n arg_0 = u(arg_0, arg_1).strip()\n\n # metadata starts with defaults\n arg_4 = arg_3.copy()\n\n # this will only run if a handler hasn't been set higher up\n arg_2 = arg_2 or detect_format(arg_0, handlers)\n if arg_2 is None:\n return arg_4, arg_0\n\n # split on the delimiters\n try:\n arg_5, arg_6 = arg_2.split(arg_0)\n except ValueError:\n # if we can't split, bail\n return arg_4, arg_0\n\n # Func, now that we have frontmatter\n arg_5 = arg_2.load(arg_5)\n if isinstance(arg_5, dict):\n arg_4.update(arg_5)\n\n return arg_4, arg_6.strip()"} +{"_id": "doc_6440", "title": "", "text": "def Func(arg_0):\n \"Post as a dict, for serializing\"\n arg_1 = arg_0.metadata.copy()\n arg_1['content'] = arg_0.content\n return arg_1"} +{"_id": "doc_6441", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Parse YAML front matter. This uses yaml.SafeLoader by default. \n \"\"\"\n arg_2.setdefault('Loader', SafeLoader)\n return yaml.Func(arg_1, **arg_2)"} +{"_id": "doc_6442", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Export metadata as YAML. This uses yaml.SafeDumper by default.\n \"\"\"\n arg_2.setdefault('Dumper', SafeDumper)\n arg_2.setdefault('default_flow_style', False)\n arg_2.setdefault('allow_unicode', True)\n\n arg_1 = yaml.dump(arg_1, **arg_2).strip()\n return u(arg_1)"} +{"_id": "doc_6443", "title": "", "text": "async def Func(arg_0):\r\n \"\"\" Establishes a Funcion to the Lavalink server. \"\"\"\r\n await arg_0._lavalink.bot.wait_until_ready()\r\n\r\n if arg_0._ws and arg_0._ws.open:\r\n log.debug('WebSocket still open, closing...')\r\n await arg_0._ws.close()\r\n\r\n arg_1 = arg_0._lavalink.bot.user.id\r\n arg_2 = arg_0._lavalink.bot.shard_count or arg_0._shards\r\n\r\n arg_3 = {\r\n 'Authorization': arg_0._password,\r\n 'Num-Shards': arg_2,\r\n 'User-Id': str(arg_1)\r\n }\r\n log.debug('Preparing to Func to Lavalink')\r\n log.debug(' with URI: {}'.format(arg_0._uri))\r\n log.debug(' with headers: {}'.format(str(arg_3)))\r\n log.info('Connecting to Lavalink...')\r\n\r\n try:\r\n arg_0._ws = await websockets.Func(arg_0._uri, loop=arg_0._loop, extra_headers=arg_3)\r\n except OSError as error:\r\n log.exception('Failed to Func to Lavalink: {}'.format(str(error)))\r\n else:\r\n log.info('Connected to Lavalink!')\r\n arg_0._loop.create_task(arg_0.listen())\r\n arg_5 = arg_0._ws.response_headers.get('Lavalink-Major-Version', 2)\r\n try:\r\n arg_0._lavalink._server_version = int(arg_5)\r\n except ValueError:\r\n arg_0._lavalink._server_version = 2\r\n log.info('Lavalink server version is {}'.format(arg_5))\r\n if arg_0._queue:\r\n log.info('Replaying {} queued events...'.format(len(arg_0._queue)))\r\n for arg_8 in arg_0._queue:\r\n await arg_0.send(**arg_8)"} +{"_id": "doc_6444", "title": "", "text": "async def Func(arg_0):\r\n \"\"\" Waits to receive a payload from the Lavalink server and processes it. \"\"\"\r\n while not arg_0._shutdown:\r\n try:\r\n arg_1 = json.loads(await arg_0._ws.recv())\r\n except websockets.ConnectionClosed as error:\r\n log.warning('Disconnected from Lavalink: {}'.format(str(error)))\r\n for arg_2 in arg_0._lavalink.players._players.copy().keys():\r\n arg_3 = arg_0._lavalink.bot._connection._get_websocket(int(arg_2))\r\n await arg_3.voice_state(int(arg_2), None)\r\n\r\n arg_0._lavalink.players.clear()\r\n\r\n if arg_0._shutdown:\r\n break\r\n\r\n if await arg_0._attempt_reconnect():\r\n return\r\n\r\n log.warning('Unable to reconnect to Lavalink!')\r\n break\r\n\r\n arg_4 = arg_1.get('op', None)\r\n log.debug('Received WebSocket data {}'.format(str(arg_1)))\r\n\r\n if not arg_4:\r\n return log.debug('Received WebSocket message without op {}'.format(str(arg_1)))\r\n\r\n if arg_4 == 'event':\r\n log.debug('Received event of type {}'.format(arg_1['type']))\r\n arg_5 = arg_0._lavalink.players[int(arg_1['guildId'])]\r\n arg_6 = None\r\n\r\n if arg_1['type'] == 'TrackEndEvent':\r\n arg_6 = TrackEndEvent(arg_5, arg_1['track'], arg_1['reason'])\r\n elif arg_1['type'] == 'TrackExceptionEvent':\r\n arg_6 = TrackExceptionEvent(arg_5, arg_1['track'], arg_1['error'])\r\n elif arg_1['type'] == 'TrackStuckEvent':\r\n arg_6 = TrackStuckEvent(arg_5, arg_1['track'], arg_1['thresholdMs'])\r\n\r\n if arg_6:\r\n await arg_0._lavalink.dispatch_event(arg_6)\r\n elif arg_4 == 'playerUpdate':\r\n await arg_0._lavalink.update_state(arg_1)\r\n elif arg_4 == 'stats':\r\n arg_0._lavalink.stats._update(arg_1)\r\n await arg_0._lavalink.dispatch_event(StatsUpdateEvent(arg_0._lavalink.stats))\r\n\r\n log.debug('Closing WebSocket...')\r\n await arg_0._ws.close()"} +{"_id": "doc_6445", "title": "", "text": "def Func(arg_0):\r\n \"\"\" Returns the voice channel the player is connected to. \"\"\"\r\n if not arg_0.channel_id:\r\n return None\r\n\r\n return arg_0._lavalink.bot.get_channel(int(arg_0.channel_id))"} +{"_id": "doc_6446", "title": "", "text": "async def Func(arg_0, arg_1: arg_2):\r\n \"\"\" Connects to a voice channel. \"\"\"\r\n arg_3 = arg_0._lavalink.bot._Funcion._get_websocket(arg_2(arg_0.guild_id))\r\n await arg_3.voice_state(arg_0.guild_id, str(arg_1))"} +{"_id": "doc_6447", "title": "", "text": "async def Func(arg_0):\r\n \"\"\" Disconnects from the voice channel, if any. \"\"\"\r\n if not arg_0.is_connected:\r\n return\r\n\r\n await arg_0.stop()\r\n\r\n arg_1 = arg_0._lavalink.bot._connection._get_websocket(int(arg_0.guild_id))\r\n await arg_1.voice_state(arg_0.guild_id, None)"} +{"_id": "doc_6448", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2):\r\n \"\"\" Stores custom user data. \"\"\"\r\n arg_0._user_data.update({arg_1: arg_3})"} +{"_id": "doc_6449", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4):\r\n \"\"\" Adds a track to beginning of the queue \"\"\"\r\n arg_0.queue.insert(0, AudioTrack().build(arg_3, arg_1))"} +{"_id": "doc_6450", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_5):\r\n \"\"\" Adds a track at a specific index in the queue. \"\"\"\r\n arg_0.queue.insert(min(arg_1, len(arg_0.queue) - 1), AudioTrack().build(arg_4, arg_3))"} +{"_id": "doc_6451", "title": "", "text": "async def Func(arg_0):\r\n \"\"\" Plays previous track if it exist, if it doesn't raises a NoPreviousTrack error. \"\"\"\r\n if not arg_0.previous:\r\n raise NoPreviousTrack\r\n arg_0.queue.insert(0, arg_0.previous)\r\n await arg_0.play(ignore_shuffle=True)"} +{"_id": "doc_6452", "title": "", "text": "async def Func(arg_0, arg_1: arg_2):\r\n \"\"\" Seeks to a given position in the track. \"\"\"\r\n await arg_0._lavalink.ws.send(op='Func', guildId=arg_0.guild_id, position=arg_1)"} +{"_id": "doc_6453", "title": "", "text": "async def Func(arg_0, arg_1):\r\n \"\"\" Makes the player play the next song from the queue if a song has finished or an issue occurred. \"\"\"\r\n if isinstance(arg_1, (TrackStuckEvent, TrackExceptionEvent)) or \\\r\n isinstance(arg_1, TrackEndEvent) and arg_1.reason == 'FINISHED':\r\n await arg_0.play()"} +{"_id": "doc_6454", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\" Returns a player from the cache, or creates one if it does not exist. \"\"\"\r\n if arg_1 not in arg_0._players:\r\n arg_2 = arg_0._player(lavalink=arg_0.lavalink, arg_1=arg_1)\r\n arg_0._players[arg_1] = arg_2\r\n\r\n return arg_0._players[arg_1]"} +{"_id": "doc_6455", "title": "", "text": "async def Func(arg_0, arg_1, *, arg_2: arg_3):\r\n \"\"\" Searches and plays a song from a given query. \"\"\"\r\n arg_4 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n arg_2 = arg_2.strip('<>')\r\n\r\n if not url_rx.match(arg_2):\r\n arg_2 = f'ytsearch:{query}'\r\n\r\n arg_5 = await arg_0.bot.lavalink.get_tracks(arg_2)\r\n\r\n if not arg_5:\r\n return await arg_1.send('Nothing found!')\r\n\r\n arg_6 = discord.Embed(color=discord.Color.blurple())\r\n\r\n if 'list' in arg_2 and 'ytsearch:' not in arg_2:\r\n for arg_7 in arg_5:\r\n arg_4.add(requester=arg_1.author.id, arg_7=arg_7)\r\n\r\n arg_6.title = 'Playlist enqueued!'\r\n arg_6.description = f'Imported {len(tracks)} tracks from the playlist!'\r\n await arg_1.send(arg_6=arg_6)\r\n else:\r\n arg_10 = arg_5[0][\"info\"][\"title\"]\r\n arg_11 = arg_5[0][\"info\"][\"uri\"]\r\n\r\n arg_6.title = \"Track enqueued!\"\r\n arg_6.description = f'[{track_title}]({track_uri})'\r\n arg_4.add(requester=arg_1.author.id, arg_7=arg_5[0])\r\n\r\n if not arg_4.isFuncing:\r\n await arg_4.play()"} +{"_id": "doc_6456", "title": "", "text": "async def Func(arg_0, arg_1, arg_2: arg_3 = 1):\r\n \"\"\" Shows the player's queue. \"\"\"\r\n arg_4 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n if not arg_4.queue:\r\n return await arg_1.send('There\\'s nothing in the queue! Why not queue something?')\r\n\r\n arg_5 = 10\r\n arg_6 = math.ceil(len(arg_4.queue) / arg_5)\r\n\r\n arg_7 = (arg_2 - 1) * arg_5\r\n arg_8 = arg_7 + arg_5\r\n\r\n arg_9 = ''\r\n for arg_10, arg_11 in enumerate(arg_4.queue[arg_7:arg_8], arg_7=arg_7):\r\n arg_9 += f'`{index + 1}.` [**{track.title}**]({track.uri})\\n'\r\n\r\n arg_12 = discord.Embed(colour=discord.Color.blurple(),\r\n description=f'**{len(player.queue)} tracks**\\n\\n{queue_list}')\r\n arg_12.set_footer(text=f'Viewing page {page}/{pages}')\r\n await arg_1.send(arg_12=arg_12)"} +{"_id": "doc_6457", "title": "", "text": "async def Func(arg_0, arg_1, arg_2: arg_3):\r\n \"\"\" Removes an item from the player's queue with the given index. \"\"\"\r\n arg_4 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n if not arg_4.queue:\r\n return await arg_1.send('Nothing queued.')\r\n\r\n if arg_2 > len(arg_4.queue) or arg_2 < 1:\r\n return await arg_1.send(f'Index has to be **between** 1 and {len(player.queue)}')\r\n\r\n arg_2 -= 1\r\n arg_5 = arg_4.queue.pop(arg_2)\r\n\r\n await arg_1.send(f'Removed **{removed.title}** from the queue.')"} +{"_id": "doc_6458", "title": "", "text": "async def Func(arg_0, arg_1):\r\n \"\"\" A few checks to make sure the bot can join a voice channel. \"\"\"\r\n arg_2 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n if not arg_2.is_connected:\r\n if not arg_1.author.voice or not arg_1.author.voice.channel:\r\n await arg_1.send('You aren\\'t connected to any voice channel.')\r\n raise commands.CommandInvokeError('Author not connected to voice channel.')\r\n\r\n arg_3 = arg_1.author.voice.channel.permissions_for(arg_1.me)\r\n\r\n if not arg_3.connect or not arg_3.speak:\r\n await arg_1.send('Missing permissions `CONNECT` and/or `SPEAK`.')\r\n raise commands.CommandInvokeError('Bot has no permissions CONNECT and/or SPEAK')\r\n\r\n arg_2.store('channel', arg_1.channel.id)\r\n await arg_2.connect(arg_1.author.voice.channel.id)\r\n else:\r\n if arg_2.connected_channel.id != arg_1.author.voice.channel.id:\r\n return await arg_1.send('Join my voice channel!')"} +{"_id": "doc_6459", "title": "", "text": "async def Func(arg_0, arg_1):\r\n \"\"\" Dispatches an event to all registered hooks. \"\"\"\r\n log.debug('Dispatching event of type {} to {} hooks'.format(arg_1.__class__.__name__, len(arg_0.hooks)))\r\n for arg_2 in arg_0.hooks:\r\n try:\r\n if asyncio.iscoroutinefunction(arg_2):\r\n await arg_2(arg_1)\r\n else:\r\n arg_2(arg_1)\r\n except Exception as e: # pylint: disable=broad-except\r\n # Catch generic exception thrown by user hooks\r\n log.warning(\r\n 'Encountered exception while dispatching an event to hook `{}` ({})'.format(arg_2.__name__, str(e)))\r\n\r\n if isinstance(arg_1, (TrackEndEvent, TrackExceptionEvent, TrackStuckEvent)) and arg_1.player:\r\n await arg_1.player.handle_event(arg_1)"} +{"_id": "doc_6460", "title": "", "text": "async def Func(arg_0, arg_1):\r\n \"\"\" Returns a Dictionary containing search results for a given query. \"\"\"\r\n log.debug('Requesting tracks for query {}'.format(arg_1))\r\n\r\n async with arg_0.http.get(arg_0.rest_uri + quote(arg_1), headers={'Authorization': arg_0.password}) as res:\r\n return await res.json(content_type=None)"} +{"_id": "doc_6461", "title": "", "text": "def Func(arg_0):\r\n \"\"\" Destroys the Lavalink client. \"\"\"\r\n arg_0.ws.Func()\r\n arg_0.bot.remove_listener(arg_0.on_socket_response)\r\n arg_0.hooks.clear()"} +{"_id": "doc_6462", "title": "", "text": "async def Func(arg_0, arg_1, *, arg_2: arg_3):\r\n \"\"\" Plays immediately a song. \"\"\"\r\n arg_4 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n if not arg_4.queue and not arg_4.is_playing:\r\n return await arg_1.invoke(arg_0._play, arg_2=arg_2)\r\n\r\n arg_2 = arg_2.strip('<>')\r\n\r\n if not url_rx.match(arg_2):\r\n arg_2 = f'ytsearch:{query}'\r\n\r\n arg_5 = await arg_0.bot.lavalink.get_tracks(arg_2)\r\n\r\n if not arg_5 or not arg_5['tracks']:\r\n return await arg_1.send('Nothing found!')\r\n\r\n arg_6 = arg_5['tracks']\r\n arg_7 = arg_6.pop(0)\r\n\r\n if arg_5['loadType'] == 'PLAYLIST_LOADED':\r\n for arg_8 in arg_6:\r\n arg_4.add(requester=arg_1.author.id, arg_7=arg_8)\r\n\r\n await arg_4.play_now(requester=arg_1.author.id, arg_7=arg_7)"} +{"_id": "doc_6463", "title": "", "text": "async def Func(arg_0, arg_1, arg_2: arg_3):\r\n \"\"\" Plays the queue from a specific point. Disregards tracks before the index. \"\"\"\r\n arg_4 = arg_0.bot.lavalink.players.get(arg_1.guild.id)\r\n\r\n if arg_2 < 1:\r\n return await arg_1.send('Invalid specified index.')\r\n\r\n if len(arg_4.queue) < arg_2:\r\n return await arg_1.send('This index exceeds the queue\\'s length.')\r\n\r\n await arg_4.play_at(arg_2-1)"} +{"_id": "doc_6464", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the match object for the current list.\"\"\"\n arg_1, arg_2 = arg_0.Func_cache\n arg_3 = arg_0.string\n if arg_2 == arg_3:\n return arg_1\n arg_1 = fullmatch(\n LIST_PATTERN_FORMAT.replace(b'{pattern}', arg_0.pattern.encode()),\n arg_0._shadow,\n MULTILINE,\n )\n arg_0.Func_cache = arg_1, arg_3\n return arg_1"} +{"_id": "doc_6465", "title": "", "text": "def Func(arg_0) -> List[str]:\n \"\"\"Return items as a list of strings.\n\n Don't include sub-items and the start pattern.\n \"\"\"\n Func = [] # type: List[str]\n arg_2 = Func.append\n arg_3 = arg_0.string\n arg_4 = arg_0._match\n arg_5 = arg_4.start()\n for arg_6, arg_7 in arg_4.spans('item'):\n arg_2(arg_3[arg_6 - arg_5:arg_7 - arg_5])\n return Func"} +{"_id": "doc_6466", "title": "", "text": "def Func(\n arg_0, arg_1: arg_2 = None, arg_3: arg_4 = None\n ) -> List['WikiList']:\n \"\"\"Return the Lists inside the item with the given index.\n\n :param i: The index if the item which its sub-lists are desired.\n The performance is likely to be better if `i` is None.\n\n :param pattern: The starting symbol for the desired sub-lists.\n The `pattern` of the current list will be automatically added\n as prefix.\n Although this parameter is optional, but specifying it can improve\n the performance.\n \"\"\"\n arg_5 = (r'\\#', r'\\*', '[:;]') if arg_3 is None \\\n else (arg_3,) # type: Tuple[str, ...]\n arg_6 = arg_0.pattern\n arg_7 = arg_0.lists\n Func = [] # type: List['WikiList']\n arg_9 = Func.append\n if arg_1 is None:\n # Any sublist is acceptable\n for arg_3 in arg_5:\n for arg_10 in arg_7(arg_6 + arg_3):\n arg_9(arg_10)\n return Func\n # Only return sub-lists that are within the given item\n arg_11 = arg_0._match\n arg_12 = arg_11.spans('fullitem')\n arg_13 = arg_0._span[0]\n arg_14 = arg_11.start()\n arg_15, arg_16 = arg_12[arg_1]\n arg_16 -= arg_14 - arg_13\n arg_15 -= arg_14 - arg_13\n for arg_3 in arg_5:\n for arg_10 in arg_7(arg_6 + arg_3):\n # noinspection PyProtectedMember\n arg_17, arg_18 = arg_10._span\n if arg_15 < arg_17 and arg_18 <= arg_16:\n arg_9(arg_10)\n return Func"} +{"_id": "doc_6467", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> None:\n \"\"\"Convert to another list type by replacing starting pattern.\"\"\"\n arg_3 = arg_0._match\n arg_4 = arg_3.start()\n for arg_5, arg_6 in reversed(arg_3.spans('pattern')):\n arg_0[arg_5 - arg_4:arg_6 - arg_4] = arg_1\n arg_0.pattern = escape(arg_1)"} +{"_id": "doc_6468", "title": "", "text": "def Func(arg_0) -> List[Argument]:\n \"\"\"Parse template content. Create self.name and self.arguments.\"\"\"\n arg_1 = arg_0._shadow\n arg_2 = arg_0._args_matcher(arg_1).spans('arg')\n if not arg_2:\n return []\n Func = []\n arg_4 = Func.append\n arg_5 = arg_0._type_to_spans\n arg_6, arg_7 = span = arg_0._span\n arg_8 = id(span)\n arg_9 = arg_0._lststr\n arg_10 = arg_9[0]\n arg_11 = arg_5.setdefault(arg_8, [])\n arg_12 = {(arg_15[0], arg_15[1]): arg_15 for arg_15 in arg_11}.get\n for arg_13, arg_14 in arg_2:\n arg_15, arg_16 = arg_18 = [arg_6 + arg_13, arg_6 + arg_14]\n arg_17 = arg_12((arg_15, arg_16))\n if arg_17 is None:\n insort(arg_11, arg_18)\n else:\n arg_18 = arg_17\n arg_19 = Argument(arg_9, arg_5, arg_18, arg_8)\n arg_19._shadow_cache = (\n arg_10[arg_15:arg_16], arg_1[arg_13:arg_14])\n arg_4(arg_19)\n return Func"} +{"_id": "doc_6469", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = None) -> List[WikiList]:\n \"\"\"Return the Func in all arguments.\n\n For performance reasons it is usually preferred to get a specific\n Argument and use the `Func` method of that argument instead.\n \"\"\"\n return [\n arg_4 for arg_3 in arg_0.arguments for arg_4 in arg_3.Func(arg_1) if arg_4]"} +{"_id": "doc_6470", "title": "", "text": "def Func(arg_0: arg_1[arg_2]) -> dict:\n \"\"\"Create a Trie out of a list of words and return an atomic regex pattern.\n\n The corresponding Regex should match much faster than a simple Regex union.\n \"\"\"\n # plant the trie\n arg_3 = {}\n for arg_4 in arg_0:\n arg_5 = arg_3\n for arg_6 in arg_4:\n arg_5[arg_6] = arg_6 in arg_5 and arg_5[arg_6] or {}\n arg_5 = arg_5[arg_6]\n arg_5[''] = None # EOS\n return arg_3"} +{"_id": "doc_6471", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4) -> None:\n \"\"\"Insert the given string before the specified index.\n\n This method has the same effect as ``self[index:index] = string``;\n it only avoids some condition checks as it rules out the possibility\n of the key being an slice, or the need to shrink any of the sub-spans.\n\n If parse is False, don't parse the Funced string.\n \"\"\"\n arg_5, arg_6 = arg_0._span\n arg_7 = arg_0._lststr\n arg_8 = arg_7[0]\n if arg_1 < 0:\n arg_1 += arg_6 - arg_5\n if arg_1 < 0:\n arg_1 = 0\n elif arg_1 > arg_6 - arg_5: # Note that it is not >=. Index can be new.\n arg_1 = arg_6 - arg_5\n arg_1 += arg_5\n # Update lststr\n arg_7[0] = arg_8[:arg_1] + arg_3 + arg_8[arg_1:]\n arg_9 = len(arg_3)\n # Update spans\n arg_0._Func_update(\n arg_1=arg_1,\n length=arg_9)\n # Remember newly added spans by the string.\n arg_10 = arg_0._type_to_spans\n for arg_11, arg_12 in parse_to_spans(\n bytearray(arg_3, 'ascii', 'replace')\n ).items():\n for arg_13, arg_14 in arg_12:\n insort(arg_10[arg_11], [arg_1 + arg_13, arg_1 + arg_14])"} +{"_id": "doc_6472", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> Tuple[str, str, str]:\n \"\"\"Partition self.string where `char`'s not in atomic sub-spans.\"\"\"\n arg_3, arg_4 = arg_0._span\n arg_5 = arg_0._shadow.find(arg_1)\n if arg_5 == -1:\n return arg_0._lststr[0][arg_3:arg_4], '', ''\n arg_6 = arg_0._lststr[0]\n return arg_6[arg_3:arg_3 + arg_5], chr(arg_1), arg_6[arg_3 + arg_5 + 1:arg_4]"} +{"_id": "doc_6473", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> List[List[int]]:\n \"\"\"Return all the sub-span including self._span.\"\"\"\n return arg_0._type_to_spans[arg_1]"} +{"_id": "doc_6474", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2) -> None:\n \"\"\"Update self._type_to_spans according to the removed span.\n\n Warning: If an operation involves both Func and\n _insert_update, you might wanna consider doing the\n _insert_update before the Func as this function\n can cause data loss in self._type_to_spans.\n \"\"\"\n # Note: The following algorithm won't work correctly if spans\n # are not sorted.\n # Note: No span should be removed from _type_to_spans.\n for arg_4 in arg_0._type_to_spans.values():\n arg_5 = len(arg_4) - 1\n while arg_5 >= 0:\n arg_6, arg_7 = arg_9 = arg_4[arg_5]\n if arg_3 <= arg_6:\n # rmstart <= rmstop <= s <= e\n arg_8 = arg_3 - arg_1\n arg_9[:] = arg_6 - arg_8, arg_7 - arg_8\n arg_5 -= 1\n continue\n break\n else:\n continue\n while True:\n if arg_1 <= arg_6:\n if arg_3 < arg_7:\n # rmstart < s <= rmstop < e\n arg_9[:] = arg_1, arg_7 + arg_1 - arg_3\n arg_5 -= 1\n if arg_5 < 0:\n break\n arg_6, arg_7 = arg_9 = arg_4[arg_5]\n continue\n # rmstart <= s <= e < rmstop\n arg_4.pop(arg_5)[:] = -1, -1\n arg_5 -= 1\n if arg_5 < 0:\n break\n arg_6, arg_7 = arg_9 = arg_4[arg_5]\n continue\n break\n while arg_5 >= 0:\n if arg_7 <= arg_1:\n # s <= e <= rmstart <= rmstop\n arg_5 -= 1\n if arg_5 < 0:\n break\n arg_6, arg_7 = arg_9 = arg_4[arg_5]\n continue\n # s <= rmstart <= rmstop <= e\n arg_9[1] -= arg_3 - arg_1\n arg_5 -= 1\n if arg_5 < 0:\n break\n arg_6, arg_7 = arg_9 = arg_4[arg_5]\n continue"} +{"_id": "doc_6475", "title": "", "text": "def Func(arg_0) -> int:\n \"\"\"Return the nesting level of self.\n\n The minimum Func is 0. Being part of any Template or\n ParserFunction increases the level by one.\n \"\"\"\n arg_1, arg_2 = arg_0._span\n arg_3 = 0\n arg_4 = arg_0._type_to_spans\n for arg_5 in ('Template', 'ParserFunction'):\n arg_6 = arg_4[arg_5]\n for arg_7, arg_8 in arg_6[:bisect(arg_6, [arg_1 + 1])]:\n if arg_2 <= arg_8:\n arg_3 += 1\n return arg_3"} +{"_id": "doc_6476", "title": "", "text": "def Func(arg_0) -> bytearray:\n \"\"\"Return a copy of self.string with specific sub-spans replaced.\n\n Comments blocks are replaced by spaces. Other sub-spans are replaced\n by underscores.\n\n The replaced sub-spans are: (\n 'Template', 'WikiLink', 'ParserFunction', 'ExtensionTag',\n 'Comment',\n )\n\n This function is called upon extracting tables or extracting the data\n inside them.\n \"\"\"\n arg_1, arg_2 = arg_0._span\n arg_3 = arg_0._lststr[0][arg_1:arg_2]\n arg_4, arg_5 = getattr(\n arg_0, 'Func_cache', (None, None))\n if arg_4 == arg_3:\n return arg_5\n # In the old method the existing spans were used to create the shadow.\n # But it was slow because there can be thousands of spans and iterating\n # over them to find the relevant sub-spans could take a significant\n # amount of time. The new method tries to parse the self.string which\n # is usually much more faster because there are usually far less\n # sub-spans for individual objects.\n arg_5 = bytearray(arg_3, 'ascii', 'replace')\n if arg_0._type in SPAN_PARSER_TYPES:\n arg_6 = arg_5[:2]\n arg_7 = arg_5[-2:]\n arg_5[:2] = arg_5[-2:] = b'__'\n parse_to_spans(arg_5)\n arg_5[:2] = arg_6\n arg_5[-2:] = arg_7\n else:\n parse_to_spans(arg_5)\n arg_0.Func_cache = arg_3, arg_5\n return arg_5"} +{"_id": "doc_6477", "title": "", "text": "def Func(arg_0):\n \"\"\"Replace the invalid chars of SPAN_PARSER_TYPES with b'_'.\n\n For comments, all characters are replaced, but for ('Template',\n 'ParserFunction', 'Parameter') only invalid characters are replaced.\n \"\"\"\n arg_1, arg_2 = arg_0._span\n arg_3 = arg_0._lststr[0][arg_1:arg_2]\n arg_4 = bytearray(arg_3, 'ascii', 'replace')\n arg_5 = arg_0._subspans\n for arg_6 in 'Template', 'ParserFunction', 'Parameter':\n for arg_7, arg_8 in arg_5(arg_6):\n arg_4[arg_7:arg_8] = b' ' + INVALID_EXT_CHARS_SUB(\n b' ', arg_4[arg_7 + 2:arg_8 - 2]) + b' '\n for arg_7, arg_8 in arg_5('Comment'):\n arg_4[arg_7:arg_8] = (arg_8 - arg_7) * b'_'\n return arg_4"} +{"_id": "doc_6478", "title": "", "text": "def Func(arg_0) -> Dict[str, List[List[int]]]:\n \"\"\"Create the arguments for the parse function used in pformat method.\n\n Only return sub-spans and change the them to fit the new scope, i.e\n self.string.\n \"\"\"\n arg_1, arg_2 = arg_0._span\n if arg_1 == 0 and arg_2 == len(arg_0._lststr[0]):\n return deepcopy(arg_0._type_to_spans)\n return {\n arg_5: [\n [arg_3 - arg_1, arg_4 - arg_1] for arg_3, arg_4 in arg_6[bisect(arg_6, [arg_1]):]\n if arg_4 <= arg_2\n ] for arg_5, arg_6 in arg_0._type_to_spans.items()}"} +{"_id": "doc_6479", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = ' ', arg_3=False):\n \"\"\"Deprecated, use self.pformat instead.\"\"\"\n warn(\n 'Func method is deprecated, use pformat instead.',\n DeprecationWarning,\n )\n return arg_0.pformat(arg_1, arg_3)"} +{"_id": "doc_6480", "title": "", "text": "def Func(arg_0) -> List['Parameter']:\n \"\"\"Return a list of parameter objects.\"\"\"\n arg_1 = arg_0._lststr\n arg_2 = arg_0._type_to_spans\n return [\n Parameter(arg_1, arg_2, arg_3, 'Parameter')\n for arg_3 in arg_0._subspans('Parameter')]"} +{"_id": "doc_6481", "title": "", "text": "def Func(arg_0) -> List['Template']:\n \"\"\"Return a list of Func as template objects.\"\"\"\n arg_1 = arg_0._lststr\n arg_2 = arg_0._type_to_spans\n return [\n Template(arg_1, arg_2, arg_3, 'Template')\n for arg_3 in arg_0._subspans('Template')]"} +{"_id": "doc_6482", "title": "", "text": "def Func(arg_0) -> List['Comment']:\n \"\"\"Return a list of comment objects.\"\"\"\n arg_1 = arg_0._lststr\n arg_2 = arg_0._type_to_spans\n return [\n Comment(arg_1, arg_2, arg_3, 'Comment')\n for arg_3 in arg_0._subspans('Comment')]"} +{"_id": "doc_6483", "title": "", "text": "def Func(arg_0) -> List['Section']:\n \"\"\"Return a list of section in current wikitext.\n\n The first section will always be the lead section, even if it is an\n empty string.\n \"\"\"\n Func = [] # type: List['Section']\n arg_2 = Func.append\n arg_3 = arg_0._type_to_spans\n arg_4 = arg_0._lststr\n arg_5, arg_6 = _span = arg_0._span\n arg_7 = arg_3.setdefault('Section', [])\n arg_8 = SECTIONS_FULLMATCH(arg_0._shadow)\n arg_9 = arg_8.spans('section')\n Func0 = [len(eq) for eq in arg_8.captures('equals')]\n if not arg_7:\n # All spans are new\n Func1 = arg_7.append\n for Func2, (Func3, (Func4, Func5)) in enumerate(\n zip(Func0, arg_9), 1\n ):\n # Add text of the current_section to any parent section.\n # Note that section 0 is not a parent for any subsection.\n for Func6, Func7 in enumerate(\n Func0[Func2:], Func2\n ):\n if Func3 and Func7 > Func3:\n Func5 = arg_9[Func6][1]\n else:\n break\n Func8 = [arg_5 + Func4, arg_5 + Func5]\n Func1(Func8)\n arg_2(\n Section(arg_4, arg_3, Func8, 'Section'))\n return Func\n # There are already some spans. Instead of appending new spans\n # use them when the detected span already exists.\n Func9 = {(Func4[0], Func4[1]): Func4 for Func4 in arg_7}.get\n for Func2, (Func3, (Func4, Func5)) in enumerate(\n zip(Func0, arg_9), 1\n ):\n # Add text of the current_section to any parent section.\n # Note that section 0 is not a parent for any subsection.\n for Func6, Func7 in enumerate(\n Func0[Func2:], Func2\n ):\n if Func3 and Func7 > Func3:\n Func5 = arg_9[Func6][1]\n else:\n break\n Func4, Func5 = arg_5 + Func4, arg_5 + Func5\n arg_20 = Func9((Func4, Func5))\n if arg_20 is None:\n Func8 = [Func4, Func5]\n insort(arg_7, Func8)\n else:\n Func8 = arg_20\n arg_2(Section(arg_4, arg_3, Func8, 'Section'))\n return Func"} +{"_id": "doc_6484", "title": "", "text": "def Func(arg_0) -> List['Table']:\n \"\"\"Return a list of found table objects.\"\"\"\n Func = [] # type: List['Table']\n arg_2 = Func.append\n arg_3 = arg_0._type_to_spans\n arg_4 = arg_0._lststr\n arg_5 = arg_0._shadow[:]\n arg_6, arg_7 = arg_0._span\n arg_8 = arg_3.setdefault('Table', [])\n if not arg_8:\n # All the added spans will be new.\n arg_9 = True # type: Any\n while arg_9:\n arg_9 = False\n for arg_9 in TABLE_FINDITER(arg_5):\n Func0, Func1 = arg_9.span()\n # Ignore leading whitespace using len(m[1]).\n Func2 = [arg_6 + Func0 + len(arg_9[1]), arg_6 + Func1]\n arg_8.append(Func2)\n arg_2(Table(arg_4, arg_3, Func2, 'Table'))\n arg_5[Func0:Func1] = b'_' * (Func1 - Func0)\n return Func\n # There are already exists some spans. Try to use the already existing\n # before appending new spans.\n Func3 = {(Func4[0], Func4[1]): Func4 for Func4 in arg_8}.get\n arg_9 = True\n while arg_9:\n arg_9 = False\n for arg_9 in TABLE_FINDITER(arg_5):\n Func0, Func1 = arg_9.span()\n # Ignore leading whitespace using len(m[1]).\n Func4, Func5 = arg_6 + Func0 + len(arg_9[1]), arg_6 + Func1\n Func6 = Func3((Func4, Func5))\n if Func6 is None:\n Func2 = [Func4, Func5]\n insort(arg_8, Func2)\n else:\n Func2 = Func6\n arg_2(Table(arg_4, arg_3, Func2, 'Table'))\n arg_5[Func0:Func1] = b'_' * (Func1 - Func0)\n return Func"} +{"_id": "doc_6485", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = None) -> List['WikiList']:\n r\"\"\"Return a list of WikiList objects.\n\n :param pattern: The starting pattern for list items.\n Return all types of lists (ol, ul, and dl) if pattern is None.\n If pattern is not None, it will be passed to the regex engine,\n remember to escape the `*` character. Examples:\n\n - `\\#` means top-level ordered lists\n - `\\#\\*` means unordred lists inside an ordered one\n - Currently definition lists are not well supported, but you\n can use `[:;]` as their pattern.\n\n Tips and tricks:\n\n Be careful when using the following patterns as they will\n probably cause malfunction in the `sublists` method of the\n resultant List. (However don't worry about them if you are\n not going to use the `sublists` method.)\n\n - Use `\\*+` as a pattern and nested unordered lists will be\n treated as flat.\n - Use `\\*\\s*` as pattern to rtstrip `items` of the list.\n\n Although the pattern parameter is optional, but specifying it\n can improve the performance.\n \"\"\"\n Func = []\n arg_4 = Func.append\n arg_5 = arg_0._lststr\n arg_6 = arg_0._type_to_spans\n arg_7 = arg_6.setdefault('WikiList', [])\n arg_8 = {(arg_14[0], arg_14[1]): arg_14 for arg_14 in arg_7}.get\n arg_9, arg_10 = arg_0._lists_shadow_ss\n for arg_1 in \\\n (r'\\#', r'\\*', '[:;]') if arg_1 is None else (arg_1,):\n for arg_11 in finditer(\n LIST_PATTERN_FORMAT.replace(b'{pattern}', arg_1.encode()),\n arg_9, MULTILINE\n ):\n arg_12, arg_13 = arg_11.span()\n arg_14, arg_15 = arg_10 + arg_12, arg_10 + arg_13\n arg_16 = arg_8((arg_14, arg_15))\n if arg_16 is None:\n arg_17 = [arg_14, arg_15]\n insort(arg_7, arg_17)\n else:\n arg_17 = arg_16\n arg_4(WikiList(\n arg_5, arg_1, arg_11, arg_6, arg_17, 'WikiList'))\n return Func"} +{"_id": "doc_6486", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> Generator[int, None, None]:\n \"\"\"Yield all the sub-span indices excluding self._span.\"\"\"\n arg_3, arg_4 = arg_0._span\n arg_5 = arg_0._type_to_spans[arg_1]\n # Do not yield self._span by bisecting for s < ss.\n # The second bisect is an optimization and should be on [se + 1],\n # but empty spans are not desired thus [se] is used.\n arg_6 = bisect(arg_5, [arg_3])\n for arg_7 in arg_5[arg_6:bisect(arg_5, [arg_4], arg_6)]:\n if arg_7[1] <= arg_4:\n yield arg_7"} +{"_id": "doc_6487", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3] = None) -> arg_2['WikiText']:\n \"\"\"Return the Func node of the current object.\n\n :param type_: the type of the desired Func object.\n Currently the following types are supported: {Template,\n ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.\n The default is None and means the first Func, of any type above.\n :return: Func WikiText object or None if no Func with the desired\n `type_` is found.\n \"\"\"\n arg_4 = arg_0.ancestors(arg_1)\n if arg_4:\n return arg_4[0]\n return None"} +{"_id": "doc_6488", "title": "", "text": "def Func(\n arg_0,\n arg_1=('Template',),\n arg_2=False,\n arg_3: arg_4 = None,\n *,\n arg_5: arg_4 = None,\n arg_6=False\n ) -> arg_4:\n \"\"\"Return normal form of self.name.\n\n - Remove comments.\n - Remove language code.\n - Remove namespace (\"template:\" or any of `localized_namespaces`.\n - Use space instead of underscore.\n - Remove consecutive spaces.\n - Use uppercase for the first letter if `capitalize`.\n - Remove #anchor.\n\n :param rm_namespaces: is used to provide additional localized\n namespaces for the template namespace. They will be removed from\n the result. Default is ('Template',).\n :param capitalize: If True, convert the first letter of the\n template's name to a capital letter. See\n [[mw:Manual:$wgCapitalLinks]] for more info.\n :param code: is the language code.\n :param capital_links: deprecated.\n :param _code: deprecated.\n\n Example:\n >>> Template(\n ... '{{ eN : tEmPlAtE : t_1 # b | a }}'\n ... ).Func(code='en')\n 'T 1'\n \"\"\"\n if arg_2:\n warn('`capital_links` argument is deprecated,'\n ' use `capitalize` instead', DeprecationWarning)\n arg_6 = arg_2\n if arg_3:\n warn('`positional_code` argument is deprecated,'\n ' use `code` instead', DeprecationWarning)\n arg_5 = arg_3\n # Remove comments\n arg_7 = COMMENT_SUB('', arg_0.name).strip(WS)\n # Remove code\n if arg_5:\n arg_8, arg_9, arg_10 = arg_7.partition(':')\n if not arg_8 and arg_9:\n arg_7 = arg_10.strip(' ')\n arg_8, arg_9, arg_10 = arg_7.partition(':')\n if arg_5.lower() == arg_8.strip(' ').lower():\n arg_7 = arg_10.strip(' ')\n # Remove namespace\n arg_8, arg_9, arg_10 = arg_7.partition(':')\n if not arg_8 and arg_9:\n arg_7 = arg_10.strip(' ')\n arg_8, arg_9, arg_10 = arg_7.partition(':')\n if arg_8:\n arg_11 = arg_8.strip(' ').lower()\n for arg_12 in arg_1:\n if arg_12.lower() == arg_11:\n arg_7 = arg_10.strip(' ')\n break\n # Use space instead of underscore\n arg_7 = arg_7.replace('_', ' ')\n if arg_6:\n # Use uppercase for the first letter\n arg_13 = arg_7[0]\n if arg_13.islower():\n arg_7 = arg_13.upper() + arg_7[1:]\n # Remove #anchor\n arg_7, arg_9, arg_10 = arg_7.partition('#')\n return ' '.join(arg_7.split())"} +{"_id": "doc_6489", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"Eliminate duplicate arguments by removing the first occurrences.\n\n Remove the first occurrences of duplicate arguments, regardless of\n their value. Result of the rendered wikitext should remain the same.\n Warning: Some meaningful data may be removed from wikitext.\n\n Also see `rm_dup_args_safe` function.\n \"\"\"\n arg_1 = set() # type: set\n for arg_2 in reversed(arg_0.arguments):\n arg_3 = arg_2.name.strip(WS)\n if arg_3 in arg_1:\n del arg_2[:len(arg_2.string)]\n else:\n arg_1.add(arg_3)"} +{"_id": "doc_6490", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = None) -> None:\n \"\"\"Remove duplicate arguments in a safe manner.\n\n Remove the duplicate arguments only in the following situations:\n 1. Both arguments have the same name AND value. (Remove one of\n them.)\n 2. Arguments have the same name and one of them is empty. (Remove\n the empty one.)\n\n Warning: Although this is considered to be safe and no meaningful data\n is removed from wikitext, but the result of the rendered wikitext\n may actually change if the second arg is empty and removed but\n the first had had a value.\n\n If `tag` is defined, it should be a string that will be appended to\n the value of the remaining duplicate arguments.\n\n Also see `rm_first_of_dup_args` function.\n \"\"\"\n arg_3 = {} \\\n # type: Dict[str, Tuple[Argument, List[str]]]\n # Removing positional args affects their name. By reversing the list\n # we avoid encountering those kind of args.\n for arg_4 in reversed(arg_0.arguments):\n arg_5 = arg_4.name.strip(WS)\n if arg_4.positional:\n # Value of keyword arguments is automatically stripped by MW.\n arg_6 = arg_4.value\n else:\n # But it's not OK to strip whitespace in positional arguments.\n arg_6 = arg_4.value.strip(WS)\n if arg_5 in arg_3:\n # This is a duplicate argument.\n if not arg_6:\n # This duplicate argument is empty. It's safe to remove it.\n del arg_4[0:len(arg_4.string)]\n else:\n # Try to remove any of the detected duplicates of this\n # that are empty or their value equals to this one.\n arg_7, arg_8 = arg_3[arg_5]\n if arg_6 in arg_8:\n del arg_4[0:len(arg_4.string)]\n elif '' in arg_8:\n # This happens only if the last occurrence of name has\n # been an empty string; other empty values will\n # be removed as they are seen.\n # In other words index of the empty argument in\n # dup_vals is always 0.\n del arg_7[0:len(arg_7.string)]\n arg_8.pop(0)\n else:\n # It was not possible to remove any of the duplicates.\n arg_8.append(arg_6)\n if arg_1:\n arg_4.value += arg_1\n else:\n arg_3[arg_5] = (arg_4, [arg_6])"} +{"_id": "doc_6491", "title": "", "text": "def Func(\n arg_0, arg_1: arg_2,\n arg_3: arg_2,\n arg_4: arg_5 = None,\n arg_6: arg_2 = None,\n arg_7: arg_2 = None,\n arg_8: arg_5 = True\n ) -> None:\n \"\"\"Set the value for `name` argument. Add it if it doesn't exist.\n\n - Use `positional`, `before` and `after` keyword arguments only when\n adding a new argument.\n - If `before` is given, ignore `after`.\n - If neither `before` nor `after` are given and it's needed to add a\n new argument, then append the new argument to the end.\n - If `positional` is True, try to add the given value as a positional\n argument. Ignore `preserve_spacing` if positional is True.\n If it's None, do what seems more appropriate.\n \"\"\"\n arg_9 = list(reversed(arg_0.arguments))\n arg_10 = get_arg(arg_1, arg_9)\n # Updating an existing argument.\n if arg_10:\n if arg_4:\n arg_10.positional = arg_4\n if arg_8:\n arg_11 = arg_10.value\n arg_10.value = arg_11.replace(arg_11.strip(WS), arg_3)\n else:\n arg_10.value = arg_3\n return\n # Adding a new argument\n if not arg_1 and arg_4 is None:\n arg_4 = True\n # Calculate the whitespace needed before arg-name and after arg-value.\n if not arg_4 and arg_8 and arg_9:\n arg_12 = []\n arg_13 = []\n arg_14 = []\n arg_15 = []\n for arg_10 in arg_9:\n arg_16 = arg_10.name\n arg_17 = arg_25(arg_16)\n arg_13.append(arg_17)\n arg_12.append(STARTING_WS_MATCH(arg_16)[0])\n arg_18 = arg_10.value\n arg_14.append(STARTING_WS_MATCH(arg_18)[0])\n arg_15.append(ENDING_WS_MATCH(arg_18)[0])\n arg_19 = mode(arg_12)\n arg_20 = mode(arg_13)\n arg_21 = mode(\n [SPACE_AFTER_SEARCH(arg_0.string)[0]] + arg_15[1:]\n )\n arg_22 = mode(arg_14)\n else:\n arg_8 = False\n # Calculate the string that needs to be added to the Template.\n if arg_4:\n # Ignore preserve_spacing for positional args.\n arg_23 = '|' + arg_3\n else:\n if arg_8:\n # noinspection PyUnboundLocalVariable\n arg_23 = (\n '|' + (arg_19 + arg_1.strip(WS)).\n ljust(arg_20) +\n '=' + arg_22 + arg_3 + arg_21\n )\n else:\n arg_23 = '|' + arg_1 + '=' + arg_3\n # Place the addstring in the right position.\n if arg_6:\n arg_10 = get_arg(arg_6, arg_9)\n arg_10.insert(0, arg_23)\n elif arg_7:\n arg_10 = get_arg(arg_7, arg_9)\n arg_10.insert(arg_25(arg_10.string), arg_23)\n else:\n if arg_9 and not arg_4:\n arg_10 = arg_9[0]\n arg_24 = arg_10.string\n if arg_8:\n # Insert after the last argument.\n # The addstring needs to be recalculated because we don't\n # want to change the the whitespace before final braces.\n # noinspection PyUnboundLocalVariable\n arg_10[0:arg_25(arg_24)] = (\n arg_10.string.rstrip(WS) + arg_21 +\n arg_23.rstrip(WS) + arg_15[0]\n )\n else:\n arg_10.insert(arg_25(arg_24), arg_23)\n else:\n # The template has no arguments or the new arg is\n # positional AND is to be added at the end of the template.\n arg_0.insert(-2, arg_23)"} +{"_id": "doc_6492", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> None:\n \"\"\"Delete all arguments with the given then.\"\"\"\n for arg_3 in reversed(arg_0.arguments):\n if arg_3.name.strip(WS) == arg_1.strip(WS):\n del arg_3[:]"} +{"_id": "doc_6493", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.\n\n If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores\n \"\"\"\n arg_3 = arg_0.redis.pipeline()\n for arg_4 in arg_1:\n arg_5 = [AutoCompleter.SUGADD_COMMAND, arg_0.key, arg_4.string, arg_4.score]\n if arg_2.get('increment'):\n arg_5.append(AutoCompleter.INCR)\n if arg_4.payload:\n arg_5.append('PAYLOAD')\n arg_5.append(arg_4.payload)\n\n arg_3.execute_command(*arg_5)\n\n return arg_3.execute()[-1]"} +{"_id": "doc_6494", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = False, arg_3 = 10, arg_4 = False, arg_5=False):\n \"\"\"\n Get a list of suggestions from the AutoCompleter, for a given prefix\n\n ### Parameters:\n - **prefix**: the prefix we are searching. **Must be valid ascii or utf-8**\n - **fuzzy**: If set to true, the prefix search is done in fuzzy mode. \n **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index.\n - **with_scores**: if set to true, we also return the (refactored) score of each suggestion. \n This is normally not needed, and is NOT the original score inserted into the index\n - **with_payloads**: Return suggestion payloads\n - **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions.\n \n Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1.\n \"\"\"\n\n arg_6 = [AutoCompleter.SUGGET_COMMAND, arg_0.key, arg_1, 'MAX', arg_3]\n if arg_2:\n arg_6.append(AutoCompleter.FUZZY)\n if arg_4:\n arg_6.append(AutoCompleter.WITHSCORES)\n if arg_5:\n arg_6.append(AutoCompleter.WITHPAYLOADS)\n\n arg_7 = arg_0.redis.execute_command(*arg_6)\n arg_8 = []\n if not arg_7:\n return arg_8\n\n arg_9 = SuggestionParser(arg_4, arg_5, arg_7)\n return [arg_10 for arg_10 in arg_9]"} +{"_id": "doc_6495", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False,\n arg_3=False, arg_4 = None):\n \"\"\"\n Create the search index. The index must not already exist.\n\n ### Parameters:\n\n - **fields**: a list of TextField or NumericField objects\n - **no_term_offsets**: If true, we will not save term offsets in the index\n - **no_field_flags**: If true, we will not save field flags that allow searching in specific fields\n - **stopwords**: If not None, we create the index with this custom stopword list. The list can be empty\n \"\"\"\n\n arg_5 = [arg_0.CREATE_CMD, arg_0.index_name]\n if arg_2:\n arg_5.append(arg_0.NOOFFSETS)\n if arg_3:\n arg_5.append(arg_0.NOFIELDS)\n if arg_4 is not None and isinstance(arg_4, (list, tuple, set)):\n arg_5 += [arg_0.STOPWORDS, len(arg_4)]\n if len(arg_4) > 0:\n arg_5 += list(arg_4)\n \n arg_5.append('SCHEMA')\n\n arg_5 += list(itertools.chain(*(arg_6.redis_args() for arg_6 in arg_1)))\n\n return arg_0.redis.execute_command(*arg_5)"} +{"_id": "doc_6496", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False, arg_4=1.0, arg_5=None,\n arg_6=False, arg_7=False, arg_8=None, **arg_9):\n \"\"\" \n Internal add_document used for both batch and single doc indexing \n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0.redis\n\n if arg_7:\n arg_6 = True\n\n arg_10 = [arg_0.ADD_CMD, arg_0.index_name, arg_1, arg_4]\n if arg_3:\n arg_10.append('NOSAVE')\n if arg_5 is not None:\n arg_10.append('PAYLOAD')\n arg_10.append(arg_5)\n if arg_6:\n arg_10.append('REPLACE')\n if arg_7:\n arg_10.append('PARTIAL')\n if arg_8:\n arg_10 += ['LANGUAGE', arg_8]\n arg_10.append('FIELDS')\n arg_10 += list(itertools.chain(*arg_9.items()))\n return arg_2.execute_command(*arg_10)"} +{"_id": "doc_6497", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=1.0, arg_4=None,\n arg_5=False, arg_6=False, arg_7=None, **arg_8):\n \"\"\"\n Add a single document to the index.\n\n ### Parameters\n\n - **doc_id**: the id of the saved document.\n - **nosave**: if set to true, we just index the document, and don't save a copy of it. This means that searches will just return ids.\n - **score**: the document ranking, between 0.0 and 1.0 \n - **payload**: optional inner-index payload we can save for fast access in scoring functions\n - **replace**: if True, and the document already is in the index, we perform an update and reindex the document\n - **partial**: if True, the fields specified will be added to the existing document.\n This has the added benefit that any fields specified with `no_index`\n will not be reindexed again. Implies `replace`\n - **language**: Specify the language used for document tokenization.\n - **fields** kwargs dictionary of the document fields to be saved and/or indexed. \n NOTE: Geo points shoule be encoded as strings of \"lon,lat\"\n \"\"\"\n return arg_0._Func(arg_1, conn=None, arg_2=arg_2, arg_3=arg_3, \n arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6, arg_7=arg_7, **arg_8)"} +{"_id": "doc_6498", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Delete a document from index\n Returns 1 if the document was deleted, 0 if not\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0.redis\n\n return arg_2.execute_command(arg_0.DEL_CMD, arg_0.index_name, arg_1)"} +{"_id": "doc_6499", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Load a single document by id\n \"\"\"\n arg_2 = arg_0.redis.hgetall(arg_1)\n if six.PY3:\n arg_3 = {to_string(k): to_string(v) for k, v in arg_2.items()}\n arg_2 = arg_3\n\n try:\n del arg_2['id']\n except KeyError:\n pass\n\n return Document(arg_1=arg_1, **arg_2)"} +{"_id": "doc_6500", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get Func an stats about the the current index, including the number of documents, memory consumption, etc\n \"\"\"\n\n arg_1 = arg_0.redis.execute_command('FT.INFO', arg_0.index_name)\n arg_2 = six.moves.map(to_string, arg_1)\n return dict(six.moves.zip(arg_2, arg_2))"} +{"_id": "doc_6501", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Search the index for a given query, and return a result of documents\n\n ### Parameters\n\n - **query**: the Func query. Either a text for simple queries with default parameters, or a Query object for complex queries.\n See RediSearch's documentation on query format\n - **snippet_sizes**: A dictionary of {field: snippet_size} used to trim and format the result. e.g.e {'body': 500}\n \"\"\"\n arg_2, arg_1 = arg_0._mk_query_args(arg_1)\n arg_3 = time.time()\n arg_4 = arg_0.redis.execute_command(arg_0.SEARCH_CMD, *arg_2)\n\n return Result(arg_4,\n not arg_1._no_content,\n duration=(time.time() - arg_3) * 1000.0,\n has_payload=arg_1._with_payloads)"} +{"_id": "doc_6502", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Issue an aggregation query\n\n ### Parameters\n\n **query**: This can be either an `AggeregateRequest`, or a `Cursor`\n\n An `AggregateResult` object is returned. You can access the rows from its\n `rows` property, which will always yield the rows of the result\n \"\"\"\n if isinstance(arg_1, AggregateRequest):\n arg_2 = arg_1._with_schema\n arg_3 = bool(arg_1._cursor)\n arg_4 = [arg_0.AGGREGATE_CMD, arg_0.index_name] + arg_1.build_args()\n elif isinstance(arg_1, Cursor):\n arg_2 = False\n arg_3 = True\n arg_4 = [arg_0.CURSOR_CMD, 'READ', arg_0.index_name] + arg_1.build_args()\n else:\n raise ValueError('Bad query', arg_1)\n\n arg_5 = arg_0.redis.execute_command(*arg_4)\n if arg_3:\n if isinstance(arg_1, Cursor):\n arg_1.cid = arg_5[1]\n arg_7 = arg_1\n else:\n arg_7 = Cursor(arg_5[1])\n arg_5 = arg_5[0]\n else:\n arg_7 = None\n\n if arg_1._with_schema:\n arg_8 = arg_5[0]\n arg_9 = arg_5[2:]\n else:\n arg_8 = None\n arg_9 = arg_5[1:]\n\n arg_10 = AggregateResult(arg_9, arg_7, arg_8)\n return arg_10"} +{"_id": "doc_6503", "title": "", "text": "def Func(arg_0, Func):\n \"\"\"\n Set the alias for this reducer.\n\n ### Parameters\n\n - **alias**: The value of the alias for this reducer. If this is the\n special value `aggregation.FIELDNAME` then this reducer will be\n aliased using the same name as the field upon which it operates.\n Note that using `FIELDNAME` is only possible on reducers which\n operate on a single field value.\n\n This method returns the `Reducer` object making it suitable for\n chaining.\n \"\"\"\n if Func is FIELDNAME:\n if not arg_0._field:\n raise ValueError(\"Cannot use FIELDNAME alias with no field\")\n # Chop off initial '@'\n Func = arg_0._field[1:]\n arg_0._alias = Func\n return arg_0"} +{"_id": "doc_6504", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"\n Specify by which fields to group the aggregation.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings. both cases, the field should be specified as\n `@field`.\n - **reducers**: One or more reducers. Reducers may be found in the\n `aggregation` module.\n \"\"\"\n arg_3 = Group(arg_1, arg_2)\n arg_0._groups.append(arg_3)\n\n return arg_0"} +{"_id": "doc_6505", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sets the limit for the most recent group or query.\n\n If no group has been defined yet (via `group_by()`) then this sets\n the limit for the initial pool of results from the query. Otherwise,\n this limits the number of items operated on from the previous group.\n\n Setting a limit on the initial search results may be useful when\n attempting to execute an aggregation on a sample of a large data set.\n\n ### Parameters\n\n - **offset**: Result offset from which to begin paging\n - **num**: Number of results to return\n\n\n Example of sorting the initial results:\n\n ```\n AggregateRequest('@sale_amount:[10000, inf]')\\\n .limit(0, 10)\\\n .group_by('@state', r.count())\n ```\n\n Will only group by the states found in the first 10 results of the\n query `@sale_amount:[10000, inf]`. On the other hand,\n\n ```\n AggregateRequest('@sale_amount:[10000, inf]')\\\n .limit(0, 1000)\\\n .group_by('@state', r.count()\\\n .limit(0, 10)\n ```\n\n Will group all the results matching the query, but only return the\n first 10 groups.\n\n If you only wish to return a *top-N* style query, consider using\n `sort_by()` instead.\n\n \"\"\"\n Func = Limit(arg_1, arg_2)\n if arg_0._groups:\n arg_0._groups[-1].limit = Func\n else:\n arg_0._limit = Func\n return arg_0"} +{"_id": "doc_6506", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Add a sortby field to the query\n\n - **field** - the name of the field to sort by\n - **asc** - when `True`, sorting will be done in asceding order\n \"\"\"\n arg_0._sortby = SortbyField(arg_1, arg_2)\n return arg_0"} +{"_id": "doc_6507", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=True):\n \"\"\"\n Indicate that value is a numeric range\n \"\"\"\n return RangeValue(arg_0, arg_1,\n arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_6508", "title": "", "text": "def Func(arg_0, arg_1):\n '''Bypass Funcations.\n\n Parameters\n ----------\n jam : pyjams.JAMS\n A muda-enabled JAMS object\n\n Yields\n ------\n jam_out : pyjams.JAMS iterator\n The first result is `jam` (unmodified), by reference\n All subsequent results are generated by `Funcer`\n '''\n # Step 1: yield the unmodified jam\n yield arg_1\n\n # Step 2: yield from the Funcer\n for arg_2 in arg_0.Funcer.Func(arg_1):\n yield arg_2"} +{"_id": "doc_6509", "title": "", "text": "def Func(arg_0, arg_1):\n '''Transpose a chord label by some number of semitones\n\n Parameters\n ----------\n label : str\n A chord string\n\n n_semitones : float\n The number of semitones to move `label`\n\n Returns\n -------\n label_Func : str\n The Funcd chord label\n\n '''\n\n # Otherwise, split off the note from the modifier\n arg_2 = re.match(six.text_type('(?P[A-G][b#]*)(?P.*)'),\n six.text_type(arg_0))\n\n if not arg_2:\n return arg_0\n\n arg_3 = arg_2.group('note')\n\n arg_4 = librosa.midi_to_note(librosa.note_to_midi(arg_3) + arg_1,\n octave=False)\n\n return arg_4 + arg_2.group('mod')"} +{"_id": "doc_6510", "title": "", "text": "def Func(arg_0, **arg_1):\n '''Pack data into a jams sandbox.\n\n If not already present, this creates a `muda` field within `jam.sandbox`,\n along with `history`, `state`, and version arrays which are populated by\n deformation objects.\n\n Any additional fields can be added to the `muda` sandbox by supplying\n keyword arguments.\n\n Parameters\n ----------\n jam : jams.JAMS\n A JAMS object\n\n Returns\n -------\n jam : jams.JAMS\n The updated JAMS object\n\n Examples\n --------\n >>> jam = jams.JAMS()\n >>> muda.Func(jam, my_data=dict(foo=5, bar=None))\n >>> jam.sandbox\n \n >>> jam.sandbox.muda\n \n >>> jam.sandbox.muda.my_data\n {'foo': 5, 'bar': None}\n '''\n\n if not hasattr(arg_0.sandbox, 'muda'):\n # If there's no mudabox, create one\n arg_0.sandbox.muda = jams.Sandbox(history=[],\n state=[],\n version=dict(arg_3=version,\n librosa=librosa.__version__,\n jams=jams.__version__,\n pysoundfile=psf.__version__))\n\n elif not isinstance(arg_0.sandbox.muda, jams.Sandbox):\n # If there is a muda entry, but it's not a sandbox, coerce it\n arg_0.sandbox.muda = jams.Sandbox(**arg_0.sandbox.muda)\n\n arg_0.sandbox.muda.update(**arg_1)\n\n return arg_0"} +{"_id": "doc_6511", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True, arg_4='auto', **arg_5):\n '''Save a muda jam to disk\n\n Parameters\n ----------\n filename_audio: str\n The path to store the audio file\n\n filename_jam: str\n The path to store the jams object\n\n strict: bool\n Strict safety checking for jams output\n\n fmt : str\n Output format parameter for `jams.JAMS.Func`\n\n kwargs\n Additional parameters to `soundfile.write`\n '''\n\n arg_6 = arg_2.sandbox.muda._audio['y']\n arg_7 = arg_2.sandbox.muda._audio['sr']\n\n # First, dump the audio file\n psf.write(arg_0, arg_6, arg_7, **arg_5)\n\n # Then dump the jam\n arg_2.Func(arg_1, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_6512", "title": "", "text": "def Func(arg_0):\n '''Reconstruct a transformation or pipeline given a parameter dump.'''\n\n if isinstance(arg_0, dict):\n if '__class__' in arg_0:\n arg_1 = arg_0['__class__']\n arg_2 = Func(arg_0['params'])\n return arg_1(**arg_2)\n else:\n arg_2 = dict()\n for arg_3, arg_4 in six.iteritems(arg_0):\n arg_2[arg_3] = Func(arg_4)\n return arg_2\n\n elif isinstance(arg_0, (list, tuple)):\n return [Func(arg_5) for arg_5 in arg_0]\n\n else:\n return arg_0"} +{"_id": "doc_6513", "title": "", "text": "def Func(arg_0, **arg_1):\n '''Serialize a transformation object or pipeline.\n\n Parameters\n ----------\n transform : BaseTransform or Pipeline\n The transformation object to be Funcd\n\n kwargs\n Additional keyword arguments to `jsonpickle.encode()`\n\n Returns\n -------\n json_str : str\n A JSON encoding of the transformation\n\n See Also\n --------\n deFunc\n\n Examples\n --------\n >>> D = muda.deformers.TimeStretch(rate=1.5)\n >>> muda.Func(D)\n '{\"params\": {\"rate\": 1.5},\n \"__class__\": {\"py/type\": \"muda.deformers.time.TimeStretch\"}}'\n '''\n\n arg_2 = arg_0.get_params()\n return jsonpickle.encode(arg_2, **arg_1)"} +{"_id": "doc_6514", "title": "", "text": "def Func(arg_0, **arg_1):\n '''Construct a muda transformation from a JSON encoded string.\n\n Parameters\n ----------\n encoded : str\n JSON encoding of the transformation or pipeline\n\n kwargs\n Additional keyword arguments to `jsonpickle.decode()`\n\n Returns\n -------\n obj\n The transformation\n\n See Also\n --------\n serialize\n\n Examples\n --------\n >>> D = muda.deformers.TimeStretch(rate=1.5)\n >>> D_serial = muda.serialize(D)\n >>> D2 = muda.Func(D_serial)\n >>> D2\n TimeStretch(rate=1.5)\n '''\n\n arg_2 = jsonpickle.decode(arg_0, **arg_1)\n\n return __reconstruct(arg_2)"} +{"_id": "doc_6515", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\"Pretty print the dictionary 'params'\n\n Parameters\n ----------\n params: dict\n The dictionary to pretty print\n\n offset: int\n The offset in characters to add at the begin of each line.\n\n printer:\n The function to convert entries to strings, typically\n the builtin str or repr\n\n \"\"\"\n # Do a multi-line justified repr:\n arg_4 = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n arg_5 = list()\n arg_6 = arg_1\n arg_7 = ',\\n' + (1 + arg_1 // 2) * ' '\n for arg_8, (arg_9, arg_10) in enumerate(sorted(six.iteritems(arg_0))):\n if type(arg_10) is float:\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n arg_11 = '%s=%s' % (arg_9, str(arg_10))\n else:\n # use repr of the rest\n arg_11 = '%s=%s' % (arg_9, arg_2(arg_10))\n if len(arg_11) > 500:\n arg_11 = arg_11[:300] + '...' + arg_11[-100:]\n if arg_8 > 0:\n if (arg_6 + len(arg_11) >= 75 or '\\n' in arg_11):\n arg_5.append(arg_7)\n arg_6 = len(arg_7)\n else:\n arg_5.append(', ')\n arg_6 += 2\n arg_5.append(arg_11)\n arg_6 += len(arg_11)\n\n np.set_printoptions(**arg_4)\n arg_12 = ''.join(arg_5)\n # Strip trailing space to avoid nightmare in doctests\n arg_12 = '\\n'.join(l.rstrip(' ') for l in arg_12.split('\\n'))\n return arg_12"} +{"_id": "doc_6516", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Apply the transformation to audio and annotations.\n\n The input jam is copied and modified, and returned\n contained in a list.\n\n Parameters\n ----------\n jam : jams.JAMS\n A single jam object to modify\n\n Returns\n -------\n jam_list : list\n A length-1 list containing `jam` after transformation\n\n See also\n --------\n core.load_jam_audio\n '''\n\n if not hasattr(arg_1.sandbox, 'muda'):\n raise RuntimeError('No muda state found in jams sandbox.')\n\n # We'll need a working copy of this object for modification purposes\n arg_3 = copy.deepcopy(arg_1)\n\n # Push our reconstructor onto the history stack\n arg_3.sandbox.muda['history'].append({'transformer': arg_0.__serialize__,\n 'state': arg_2})\n\n if hasattr(arg_0, 'audio'):\n arg_0.audio(arg_3.sandbox.muda, arg_2)\n\n if hasattr(arg_0, 'metadata'):\n arg_0.metadata(arg_3.file_metadata, arg_2)\n\n # Walk over the list of deformers\n for arg_4, arg_5 in six.iteritems(arg_0.dispatch):\n arg_6 = getattr(arg_0, arg_5)\n for arg_7 in arg_3.search(namespace=arg_4):\n arg_6(arg_7, arg_2)\n\n return arg_3"} +{"_id": "doc_6517", "title": "", "text": "def Func(arg_0, arg_1):\n '''Iterative Funcation generator\n\n Applies the deformation to an input jams object.\n\n This generates a sequence of deformed output JAMS.\n\n Parameters\n ----------\n jam : jams.JAMS\n The jam to Func\n\n Examples\n --------\n >>> for jam_out in deformer.Func(jam_in):\n ... process(jam_out)\n '''\n\n for arg_2 in arg_0.states(arg_1):\n yield arg_0._Func(arg_1, arg_2)"} +{"_id": "doc_6518", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''A recursive transformation pipeline'''\n\n if len(arg_2) > 0:\n arg_3 = arg_2[0][1]\n for arg_4 in arg_3.transform(arg_1):\n for arg_5 in arg_0.Func(arg_4, arg_2[1:]):\n yield arg_5\n else:\n yield arg_1"} +{"_id": "doc_6519", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Calculate the indices at which to sample a fragment of audio from a file.\n\n Parameters\n ----------\n filename : str\n Path to the input file\n\n n_samples : int > 0\n The number of samples to load\n\n sr : int > 0\n The target sampling rate\n\n Returns\n -------\n start : int\n The sample index from `filename` at which the audio fragment starts\n stop : int\n The sample index from `filename` at which the audio fragment stops (e.g. y = audio[start:stop])\n '''\n\n with psf.SoundFile(str(arg_0), mode='r') as soundf:\n # Measure required length of fragment\n arg_3 = int(np.ceil(arg_1 * soundf.samplerate / float(arg_2)))\n\n # Raise exception if source is too short\n if len(soundf) < arg_3:\n raise RuntimeError('Source {} (length={})'.format(arg_0, len(soundf)) +\n ' must be at least the length of the input ({})'.format(arg_3))\n\n # Draw a starting point at random in the background waveform\n arg_4 = np.random.randint(0, 1 + len(soundf) - arg_3)\n arg_5 = arg_4 + arg_3\n\n return arg_4, arg_5"} +{"_id": "doc_6520", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=True):\n '''Slice a fragment of audio from a file.\n\n This uses pysoundfile to efficiently seek without\n loading the entire stream.\n\n Parameters\n ----------\n filename : str\n Path to the input file\n\n start : int\n The sample index of `filename` at which the audio fragment should start\n\n stop : int\n The sample index of `filename` at which the audio fragment should stop (e.g. y = audio[start:stop])\n\n n_samples : int > 0\n The number of samples to load\n\n sr : int > 0\n The target sampling rate\n\n mono : bool\n Ensure monophonic audio\n\n Returns\n -------\n y : np.ndarray [shape=(n_samples,)]\n A fragment of audio sampled from `filename`\n\n Raises\n ------\n ValueError\n If the source file is shorter than the requested length\n\n '''\n\n with psf.SoundFile(str(arg_0), mode='r') as soundf:\n arg_6 = arg_2 - arg_1\n\n soundf.seek(arg_1)\n\n arg_7 = soundf.read(arg_6).T\n\n if arg_5:\n arg_7 = librosa.to_mono(arg_7)\n\n # Resample to initial sr\n arg_7 = librosa.resample(arg_7, soundf.samplerate, arg_4)\n\n # Clip to the target length exactly\n arg_7 = librosa.util.fix_length(arg_7, arg_3)\n\n return arg_7"} +{"_id": "doc_6521", "title": "", "text": "def Func(arg_0):\n \"\"\"Normalize `path`.\n\n All remote paths are absolute.\n \"\"\"\n arg_0 = os.path.normpath(arg_0)\n if arg_0.startswith(os.path.sep):\n return arg_0[1:]\n else:\n return arg_0"} +{"_id": "doc_6522", "title": "", "text": "def Func(arg_0, arg_1='md5', arg_2=65536):\n \"\"\"Returns either the md5 or sha256 hash of a file at `file_path`.\n \n md5 is the default hash_type as it is faster than sha256\n\n The default block size is 64 kb, which appears to be one of a few command\n choices according to https://stackoverflow.com/a/44873382/2680. The code\n below is an extension of the example presented in that post.\n \"\"\"\n if arg_1 == 'md5':\n arg_3 = hashlib.md5()\n elif arg_1 == 'sha256':\n arg_3 = hashlib.sha256()\n else:\n raise ValueError(\n \"{} is an invalid hash_type. Expected 'md5' or 'sha256'.\"\n .format(arg_1)\n )\n\n with open(arg_0, 'rb') as f:\n for arg_4 in iter(lambda: f.read(arg_2), b''):\n arg_3.update(arg_4)\n return arg_3.hexdigest()"} +{"_id": "doc_6523", "title": "", "text": "def Func(arg_0):\n \"\"\"Iterate over all Func for this projects.\"\"\"\n arg_1 = arg_0._json(arg_0._get(arg_0._Func_url), 200)\n arg_1 = arg_1['data']\n for arg_2 in arg_1:\n yield Storage(arg_2, arg_0.session)"} +{"_id": "doc_6524", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=False):\n \"\"\"Store a new file at `path` in this storage.\n\n The contents of the file descriptor `fp` (opened in 'rb' mode)\n will be uploaded to `path` which is the full path at\n which to store the file.\n\n To force overwrite of an existing file, set `force=True`.\n To overwrite an existing file only if the files differ, set `update=True`\n \"\"\"\n if 'b' not in arg_2.mode:\n raise ValueError(\"File has to be opened in binary mode.\")\n\n # all paths are assumed to be absolute\n arg_1 = norm_remote_path(arg_1)\n\n arg_5, arg_6 = os.path.split(arg_1)\n arg_7 = arg_5.split(os.path.sep)\n # navigate to the right parent object for our file\n arg_8 = arg_0\n for arg_5 in arg_7:\n # skip empty directory names\n if arg_5:\n arg_8 = arg_8.create_folder(arg_5, exist_ok=True)\n\n arg_9 = arg_8._new_file_url\n\n # When uploading a large file (>a few MB) that already exists\n # we sometimes get a ConnectionError instead of a status == 409.\n arg_10 = False\n \n # peek at the file to check if it is an empty file which needs special\n # handling in requests. If we pass a file like object to data that\n # turns out to be of length zero then no file is created on the OSF.\n # See: https://github.com/osfclient/osfclient/pull/135\n if file_empty(arg_2):\n arg_11 = arg_0._put(arg_9, params={'name': arg_6}, data=b'')\n else:\n try:\n arg_11 = arg_0._put(arg_9, params={'name': arg_6}, data=arg_2)\n except ConnectionError:\n arg_10 = True\n\n if arg_10 or arg_11.status_code == 409:\n if not arg_3 and not arg_4:\n # one-liner to get file size from file pointer from\n # https://stackoverflow.com/a/283719/2680824\n arg_12 = get_local_file_size(arg_2)\n arg_13 = 2**20 # 1 MB in bytes\n if arg_10 and arg_12 < arg_13:\n arg_14 = (\n \"There was a connection error which might mean {} \" +\n \"already exists. Try again with the `--force` flag \" +\n \"specified.\"\n ).format(arg_1)\n raise RuntimeError(arg_14)\n else:\n # note in case of connection error, we are making an inference here\n raise FileExistsError(arg_1)\n\n else:\n # find the upload URL for the file we are trying to update\n for arg_15 in arg_0.files:\n if norm_remote_path(arg_15.path) == arg_1:\n if not arg_3:\n if checksum(arg_1) == arg_15.hashes.get('md5'):\n # If the hashes are equal and force is False,\n # we're done here\n break\n # in the process of attempting to upload the file we\n # moved through it -> reset read position to beginning\n # of the file\n arg_2.seek(0)\n arg_15.update(arg_2)\n break\n else:\n raise RuntimeError(\"Could not create a new file at \"\n \"({}) nor update it.\".format(arg_1))"} +{"_id": "doc_6525", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=16*1024):\n \"\"\"Copy data from file-like object fsrc to file-like object fdst\n\n This is like shutil.Func but with a progressbar.\n \"\"\"\n with tqdm(unit='bytes', arg_2=arg_2, unit_scale=True) as pbar:\n while 1:\n arg_4 = arg_0.read(arg_3)\n if not arg_4:\n break\n arg_1.write(arg_4)\n pbar.update(len(arg_4))"} +{"_id": "doc_6526", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write contents of this file to a local file.\n\n Pass in a filepointer `fp` that has been opened for writing in\n binary mode.\n \"\"\"\n if 'b' not in arg_1.mode:\n raise ValueError(\"File has to be opened in binary mode.\")\n\n arg_2 = arg_0._get(arg_0._download_url, stream=True)\n if arg_2.status_code == 200:\n arg_2.raw.decode_content = True\n copyfileobj(arg_2.raw, arg_1,\n int(arg_2.headers['Content-Length']))\n\n else:\n raise RuntimeError(\"Response has status \"\n \"code {}.\".format(arg_2.status_code))"} +{"_id": "doc_6527", "title": "", "text": "def Func(arg_0):\n \"\"\"Remove this file from the remote storage.\"\"\"\n arg_1 = arg_0._delete(arg_0._delete_url)\n if arg_1.status_code != 204:\n raise RuntimeError('Could not delete {}.'.format(arg_0.path))"} +{"_id": "doc_6528", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update the remote file from a local file.\n\n Pass in a filepointer `fp` that has been opened for writing in\n binary mode.\n \"\"\"\n if 'b' not in arg_1.mode:\n raise ValueError(\"File has to be opened in binary mode.\")\n\n arg_2 = arg_0._upload_url\n # peek at the file to check if it is an ampty file which needs special\n # handling in requests. If we pass a file like object to data that\n # turns out to be of length zero then no file is created on the OSF\n if arg_1.peek(1):\n arg_3 = arg_0._put(arg_2, data=arg_1)\n else:\n arg_3 = arg_0._put(arg_2, data=b'')\n\n if arg_3.status_code != 200:\n arg_4 = ('Could not Func {} (status '\n 'code: {}).'.format(arg_0.path, arg_3.status_code))\n raise RuntimeError(arg_4)"} +{"_id": "doc_6529", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Iterate over all children of `kind`\n\n Yield an instance of `klass` when a child is of type `kind`. Uses\n `recurse` as the path of attributes in the JSON returned from `url`\n to find more children.\n \"\"\"\n arg_5 = arg_0._follow_next(arg_1)\n\n while arg_5:\n arg_6 = arg_5.pop()\n arg_7 = arg_6['attributes']['kind']\n if arg_7 == arg_2:\n yield arg_3(arg_6, arg_0.session)\n elif arg_4 is not None:\n # recurse into a child and add entries to `children`\n arg_1 = arg_0._get_attribute(arg_6, *arg_4)\n arg_5.extend(arg_0._follow_next(arg_1))"} +{"_id": "doc_6530", "title": "", "text": "def Func(arg_0):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n # reading existing config file, convert to configparser object\n arg_1 = config_from_file()\n arg_2 = configparser.ConfigParser()\n arg_2.add_section('osf')\n if 'username' not in arg_1.keys():\n arg_2.set('osf', 'username', '')\n else:\n arg_2.set('osf', 'username', arg_1['username'])\n if 'project' not in arg_1.keys():\n arg_2.set('osf', 'project', '')\n else:\n arg_2.set('osf', 'project', arg_1['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n arg_2.get('osf', 'username')))\n arg_3 = input()\n if arg_3:\n arg_2.set('osf', 'username', arg_3)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n arg_2.get('osf', 'project')))\n arg_4 = input()\n if arg_4:\n arg_2.set('osf', 'project', arg_4)\n\n arg_5 = open(\".osfcli.config\", \"w\")\n arg_2.write(arg_5)\n arg_5.close()"} +{"_id": "doc_6531", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Login user for protected API calls.\"\"\"\n arg_0.session.basic_auth(arg_1, arg_2)"} +{"_id": "doc_6532", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fetch Func `Func_id`.\"\"\"\n arg_2 = arg_0.guid(arg_1)\n arg_3 = arg_0._build_url(arg_2, arg_1)\n if arg_2 in Project._types:\n return Project(arg_0._json(arg_0._get(arg_3), 200), arg_0.session)\n raise OSFException('{} is unrecognized type {}. Clone supports Funcs and registrations'.format(arg_1, arg_2))"} +{"_id": "doc_6533", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Extract JSON from response if `status_code` matches.\"\"\"\n if isinstance(arg_2, numbers.Integral):\n arg_2 = (arg_2,)\n\n if arg_1.status_code in arg_2:\n return arg_1.json()\n else:\n raise RuntimeError(\"Response has status \"\n \"code {} not {}\".format(arg_1.status_code,\n arg_2))"} +{"_id": "doc_6534", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Follow the 'next' link on paginated results.\"\"\"\n arg_2 = arg_0._json(arg_0._get(arg_1), 200)\n arg_3 = arg_2['data']\n\n arg_4 = arg_0._get_attribute(arg_2, 'links', 'next')\n while arg_4 is not None:\n arg_2 = arg_0._json(arg_0._get(arg_4), 200)\n arg_3.extend(arg_2['data'])\n arg_4 = arg_0._get_attribute(arg_2, 'links', 'next')\n\n return arg_3"} +{"_id": "doc_6535", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Lookup crscode on spatialreference.org and return in specified format.\n\n Arguments:\n\n - *codetype*: \"epsg\", \"esri\", or \"sr-org\".\n - *code*: The code.\n - *format*: The crs format of the returned string. One of \"ogcwkt\", \"esriwkt\", or \"proj4\", but also several others...\n\n Returns:\n\n - Crs string in the specified format. \n \"\"\"\n arg_3 = 'http://spatialreference.org/ref/%s/%s/%s/' %(arg_0,arg_1,arg_2)\n arg_4 = urllib2.urlopen(arg_3).read()\n if not isinstance(arg_4, str):\n arg_4 = arg_4.decode()\n return arg_4"} +{"_id": "doc_6536", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns the crs object from a string interpreted as a specified format, located at a given url site.\n\n Arguments:\n\n - *url*: The url where the crs string is to be read from. \n - *format* (optional): Which format to parse the crs string as. One of \"ogc wkt\", \"esri wkt\", or \"proj4\".\n If None, tries to autodetect the format for you (default).\n\n Returns:\n\n - CRS object.\n \"\"\"\n # first get string from url\n arg_2 = urllib2.urlopen(arg_0).read()\n \n if PY3 is True:\n # decode str into string\n arg_2 = arg_2.decode('utf-8')\n\n # then determine parser\n if arg_1:\n # user specified format\n arg_1 = arg_1.lower().replace(\" \", \"_\")\n arg_3 = parse.__getattr__(\"from_%s\" % arg_1)\n else:\n # unknown format\n arg_3 = parse.from_unknown_text\n\n # then load\n arg_4 = arg_3(arg_2)\n return arg_4"} +{"_id": "doc_6537", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the crs object from a file, with the format determined from the filename extension.\n\n Arguments:\n\n - *filepath*: filepath to be loaded, including extension. \n \"\"\"\n if arg_0.endswith(\".prj\"):\n arg_1 = open(arg_0, \"r\").read()\n return parse.from_unknown_wkt(arg_1)\n \n elif arg_0.endswith((\".geojson\",\".json\")):\n arg_2 = open(arg_0).read()\n arg_3 = json.loads(arg_2)\n if \"crs\" in arg_3:\n arg_4 = arg_3[\"crs\"]\n \n if arg_4[\"type\"] == \"name\":\n arg_1 = arg_4[\"properties\"][\"name\"]\n return parse.from_unknown_text(arg_1)\n \n elif arg_4[\"type\"] == \"link\":\n arg_5 = arg_4[\"properties\"][\"name\"]\n arg_6 = arg_4[\"properties\"].get(\"type\")\n return from_url(arg_5, format=arg_6)\n \n else: raise FormatError(\"Invalid GeoJSON crs type: must be either 'name' or 'link'\")\n\n else:\n # assume default wgs84 as per the spec\n return parse.from_epsg_code(\"4326\")"} +{"_id": "doc_6538", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load crs object from epsg code, via spatialreference.org.\n Parses based on the proj4 representation.\n\n Arguments:\n\n - *code*: The EPSG code as an integer.\n\n Returns:\n\n - A CS instance of the indicated type. \n \"\"\"\n # must go online (or look up local table) to get crs details\n arg_0 = str(arg_0)\n arg_1 = utils.crscode_to_string(\"epsg\", arg_0, \"proj4\")\n arg_2 = from_proj4(arg_1)\n return arg_2"} +{"_id": "doc_6539", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load crs object from sr-org code, via spatialreference.org.\n Parses based on the proj4 representation.\n\n Arguments:\n\n - *code*: The SR-ORG code as an integer.\n\n Returns:\n\n - A CS instance of the indicated type. \n \"\"\"\n # must go online (or look up local table) to get crs details\n arg_0 = str(arg_0)\n arg_1 = utils.crscode_to_string(\"sr-org\", arg_0, \"proj4\")\n arg_2 = from_proj4(arg_1)\n return arg_2"} +{"_id": "doc_6540", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Write the raw header content to the out stream\n\n Parameters:\n ----------\n out : {file object}\n The output stream\n \"\"\"\n\n arg_1.write(bytes(arg_0.header))\n arg_1.write(arg_0.record_data)"} +{"_id": "doc_6541", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Instantiate a RawVLR by reading the content from the\n data stream\n\n Parameters:\n ----------\n data_stream : {file object}\n The input stream\n Returns\n -------\n RawVLR\n The RawVLR read\n \"\"\"\n\n arg_2 = arg_0()\n arg_3 = RawVLRHeader.from_stream(arg_1)\n arg_2.header = arg_3\n arg_2.record_data = arg_1.read(arg_3.record_length_after_header)\n return arg_2"} +{"_id": "doc_6542", "title": "", "text": "def Func(\n arg_0: arg_1,\n arg_2: arg_3,\n arg_4: arg_5,\n) -> List[GeoTiffKey]:\n \"\"\" Parses the GeoTiff VLRs information into nicer structs\n \"\"\"\n arg_6 = []\n\n for arg_7 in arg_0.geo_keys:\n if arg_7.tiff_tag_location == 0:\n arg_8 = arg_7.value_offset\n elif arg_7.tiff_tag_location == 34736:\n arg_8 = arg_2.doubles[arg_7.value_offset]\n elif arg_7.tiff_tag_location == 34737:\n try:\n arg_8 = arg_4.strings[arg_7.value_offset][arg_7.count :]\n except IndexError:\n # Maybe I'm just misunderstanding the specification :thinking:\n arg_8 = arg_4.strings[0][arg_7.value_offset : arg_7.value_offset + arg_7.count]\n else:\n logger.warning(\n \"GeoTiffKey with unknown tiff tag location ({})\".format(\n arg_7.tiff_tag_location\n )\n )\n continue\n\n arg_6.append(GeoTiffKey(arg_7.id, arg_8))\n return arg_6"} +{"_id": "doc_6543", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the signedness foe the given type index\n\n Parameters\n ----------\n type_index: int\n index of the type as defined in the LAS Specification\n\n Returns\n -------\n DimensionSignedness,\n the enum variant\n \"\"\"\n try:\n arg_1 = _extra_dims_style_2[arg_0]\n if \"uint\" in arg_1:\n return DimensionSignedness.UNSIGNED\n elif \"int\" in arg_1:\n return DimensionSignedness.SIGNED\n else:\n return DimensionSignedness.FLOATING\n except IndexError:\n raise errors.UnknownExtraType(arg_0)"} +{"_id": "doc_6544", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Construct a new PackedPointRecord from an existing one with the ability to change\n to point format while doing so\n \"\"\"\n arg_3 = np.zeros_like(arg_1.array, dtype=arg_2.dtype)\n arg_4 = arg_0(arg_3, arg_2)\n arg_4.copy_fields_from(arg_1)\n return arg_4"} +{"_id": "doc_6545", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Tries to copy the values of the current dimensions from other_record\n \"\"\"\n for arg_2 in arg_0.dimensions_names:\n try:\n arg_0[arg_2] = arg_1[arg_2]\n except ValueError:\n pass"} +{"_id": "doc_6546", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Appends zeros to the points stored if the value we are trying to\n fit is bigger\n \"\"\"\n arg_2 = len(arg_1) - len(arg_0.array)\n if arg_2:\n arg_0.array = np.append(\n arg_0.array, np.zeros(arg_2, dtype=arg_0.array.dtype)\n )"} +{"_id": "doc_6547", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns all the dimensions names, including the names of sub_fields\n and their corresponding packed fields\n \"\"\"\n return frozenset(arg_0.array.dtype.names + tuple(arg_0.sub_fields_dict.keys()))"} +{"_id": "doc_6548", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Creates a new point record with all dimensions initialized to zero\n\n Parameters\n ----------\n point_format_id: int\n The point format id the point record should have\n point_count : int\n The number of point the point record should have\n\n Returns\n -------\n PackedPointRecord\n\n \"\"\"\n arg_3 = np.Func(arg_2, arg_1.dtype)\n return arg_0(arg_3, arg_1)"} +{"_id": "doc_6549", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Construct the point record by reading and decompressing the points data from\n the input buffer\n \"\"\"\n arg_5 = arg_2.dtype\n arg_6 = decompress_buffer(\n arg_1, arg_5, arg_3, arg_4\n )\n return arg_0(arg_6, arg_2)"} +{"_id": "doc_6550", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the scaled Func positions of the points as doubles\n \"\"\"\n return scale_dimension(arg_0.Z, arg_0.header.Func_scale, arg_0.header.Func_offset)"} +{"_id": "doc_6551", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"\"):\n \"\"\" Adds a new extra dimension to the point record\n\n Parameters\n ----------\n name: str\n the name of the dimension\n type: str\n type of the dimension (eg 'uint8')\n description: str, optional\n a small description of the dimension\n \"\"\"\n arg_1 = arg_1.replace(\" \", \"_\")\n arg_4 = extradims.get_id_for_extra_dim_type(arg_2)\n arg_5 = ExtraBytesStruct(\n data_type=arg_4, arg_1=arg_1.encode(), arg_3=arg_3.encode()\n )\n\n try:\n arg_6 = arg_0.vlrs.get(\"ExtraBytesVlr\")[0]\n except IndexError:\n arg_6 = ExtraBytesVlr()\n arg_0.vlrs.append(arg_6)\n finally:\n arg_6.extra_bytes_structs.append(arg_5)\n arg_0.points_data.Funcs([(arg_1, arg_2)])"} +{"_id": "doc_6552", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\" writes the data to a stream\n\n Parameters\n ----------\n out_stream: file object\n the destination stream, implementing the write method\n do_compress: bool, optional, default False\n Flag to indicate if you want the date to be compressed\n \"\"\"\n\n arg_0.update_header()\n\n if (\n arg_0.vlrs.get(\"ExtraBytesVlr\")\n and not arg_0.points_data.extra_dimensions_names\n ):\n logger.error(\n \"Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, \"\n \"removing the vlr\"\n )\n arg_0.vlrs.extract(\"ExtraBytesVlr\")\n\n if arg_2:\n arg_3 = create_laz_vlr(arg_0.points_data)\n arg_0.vlrs.append(known.LasZipVlr(arg_3.data()))\n arg_4 = vlrlist.RawVLRList.from_list(arg_0.vlrs)\n\n arg_0.header.offset_to_point_data = (\n arg_0.header.size + arg_4.total_size_in_bytes()\n )\n arg_0.header.point_format_id = uncompressed_id_to_compressed(\n arg_0.header.point_format_id\n )\n arg_0.header.number_of_vlr = len(arg_4)\n\n arg_9 = compress_buffer(\n np.frombuffer(arg_0.points_data.array, np.uint8),\n arg_3.schema,\n arg_0.header.offset_to_point_data,\n ).tobytes()\n\n else:\n arg_4 = vlrlist.RawVLRList.from_list(arg_0.vlrs)\n arg_0.header.number_of_vlr = len(arg_4)\n arg_0.header.offset_to_point_data = (\n arg_0.header.size + arg_4.total_size_in_bytes()\n )\n arg_9 = arg_0.points_data.raw_bytes()\n\n arg_0.header.Func(arg_1)\n arg_0._raise_if_not_expected_pos(arg_1, arg_0.header.size)\n arg_4.Func(arg_1)\n arg_0._raise_if_not_expected_pos(arg_1, arg_0.header.offset_to_point_data)\n arg_1.write(arg_9)"} +{"_id": "doc_6553", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Writes the las data into a file\n\n Parameters\n ----------\n filename : str\n The file where the data should be written.\n do_compress: bool, optional, default None\n if None the extension of the filename will be used\n to determine if the data should be compressed\n otherwise the do_compress flag indicate if the data should be compressed\n \"\"\"\n arg_3 = arg_1.split(\".\")[-1] == \"laz\"\n if arg_3 and arg_2 is None:\n arg_2 = True\n with open(arg_1, mode=\"wb\") as out:\n arg_0.write_to(out, arg_2=arg_2)"} +{"_id": "doc_6554", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Writes to a stream or file\n\n When destination is a string, it will be interpreted as the path were the file should be written to,\n also if do_compress is None, the compression will be guessed from the file extension:\n\n - .laz -> compressed\n - .las -> uncompressed\n\n .. note::\n\n This means that you could do something like:\n # Create .laz but not compressed\n\n las.Func('out.laz', do_compress=False)\n\n # Create .las but compressed\n\n las.Func('out.las', do_compress=True)\n\n While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it\n\n\n Parameters\n ----------\n destination: str or file object\n filename or stream to Func to\n do_compress: bool, optional\n Flags to indicate if you want to compress the data\n \"\"\"\n if isinstance(arg_1, str):\n arg_0.Func_to_file(arg_1)\n else:\n if arg_2 is None:\n arg_2 = False\n arg_0.Func_to(arg_1, arg_2=arg_2)"} +{"_id": "doc_6555", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Builds the dict mapping point format id to numpy.dtype\n In the dtypes, bit fields are still packed, and need to be unpacked each time\n you want to access them\n \"\"\"\n return {\n arg_2: _point_format_to_dtype(arg_3, arg_1)\n for arg_2, arg_3 in arg_0.items()\n }"} +{"_id": "doc_6556", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\" Tries to find a matching point format id for the input numpy dtype\n To match, the input dtype has to be 100% equal to a point format dtype\n so all names & dimensions types must match\n\n Parameters:\n ----------\n dtype : numpy.dtype\n The input dtype\n unpacked : bool, optional\n [description] (the default is False, which [default_description])\n\n Raises\n ------\n errors.IncompatibleDataFormat\n If No compatible point format was found\n\n Returns\n -------\n int\n The compatible point format found\n \"\"\"\n\n arg_2 = (\n ALL_POINT_FORMATS_DTYPE if not arg_1 else UNPACKED_POINT_FORMATS_DTYPES\n )\n for arg_3, arg_4 in arg_2.items():\n if arg_4 == arg_0:\n return arg_3\n else:\n raise errors.IncompatibleDataFormat(\n \"Data type of array is not compatible with any point format (array dtype: {})\".format(\n arg_0\n )\n )"} +{"_id": "doc_6557", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the minimum file version that supports the given point_format_id\n \"\"\"\n for arg_1, arg_2 in sorted(VERSION_TO_POINT_FMT.items()):\n if arg_0 in arg_2:\n return arg_1\n else:\n raise errors.PointFormatNotSupported(arg_0)"} +{"_id": "doc_6558", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the list of vlrs of the requested type\n Always returns a list even if there is only one VLR of type vlr_type.\n\n >>> import pylas\n >>> las = pylas.read(\"pylastests/extrabytes.las\")\n >>> las.vlrs\n []\n >>> las.vlrs.Func(\"WktCoordinateSystemVlr\")\n []\n >>> las.vlrs.Func(\"WktCoordinateSystemVlr\")[0]\n Traceback (most recent call last):\n IndexError: list index out of range\n >>> las.vlrs.Func('ExtraBytesVlr')\n []\n >>> las.vlrs.Func('ExtraBytesVlr')[0]\n \n\n\n Parameters\n ----------\n vlr_type: str\n the class name of the vlr\n\n Returns\n -------\n :py:class:`list`\n a List of vlrs matching the user_id and records_ids\n\n \"\"\"\n return [arg_2 for arg_2 in arg_0.vlrs if arg_2.__class__.__name__ == arg_1]"} +{"_id": "doc_6559", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the list of vlrs of the requested type\n The difference with get is that the returned vlrs will be removed from the list\n\n Parameters\n ----------\n vlr_type: str\n the class name of the vlr\n\n Returns\n -------\n list\n a List of vlrs matching the user_id and records_ids\n\n \"\"\"\n arg_2, arg_3 = [], []\n for arg_4 in arg_0.vlrs:\n if arg_4.__class__.__name__ == arg_1:\n arg_3.append(arg_4)\n else:\n arg_2.append(arg_4)\n arg_0.vlrs = arg_2\n return arg_3"} +{"_id": "doc_6560", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns true if all the files have the same points format id\n \"\"\"\n arg_1 = {las.header.point_format_id for las in arg_0}\n return len(arg_1) == 1"} +{"_id": "doc_6561", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns true if all the files have the same numpy datatype\n \"\"\"\n arg_1 = {las.points.dtype for las in arg_0}\n return len(arg_1) == 1"} +{"_id": "doc_6562", "title": "", "text": "def Func(arg_0):\n \"\"\" Reads the 4 first bytes of the stream to check that is LASF\"\"\"\n arg_1 = arg_0.read(len(headers.LAS_FILE_SIGNATURE))\n if arg_1 != headers.LAS_FILE_SIGNATURE:\n raise errors.PylasError(\n \"File Signature ({}) is not {}\".format(arg_1, headers.LAS_FILE_SIGNATURE)\n )"} +{"_id": "doc_6563", "title": "", "text": "def Func(arg_0):\n \"\"\" Reads and return the vlrs of the file\n \"\"\"\n arg_0.stream.seek(arg_0.start_pos + arg_0.header.size)\n return VLRList.read_from(arg_0.stream, num_to_read=arg_0.header.number_of_vlr)"} +{"_id": "doc_6564", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" reads the compressed point record\n \"\"\"\n arg_3 = struct.unpack(\">> with Func('pylastests/simple.las') as f:\n ... print(f.header.point_format_id)\n 3\n\n\n >>> f = open('pylastests/simple.las', mode='rb')\n >>> with Func(f, closefd=False) as flas:\n ... print(flas.header)\n \n >>> f.closed\n False\n\n >>> f = open('pylastests/simple.las', mode='rb')\n >>> with Func(f) as flas:\n ... las = flas.read()\n >>> f.closed\n True\n\n Parameters\n ----------\n source : str or io.BytesIO\n if source is a str it must be a filename\n a stream if a file object with the methods read, seek, tell\n\n closefd: bool\n Whether the stream/file object shall be closed, this only work\n when using Func in a with statement. An exception is raised if\n closefd is specified and the source is a filename\n\n\n Returns\n -------\n pylas.lasreader.LasReader\n\n \"\"\"\n if isinstance(arg_0, str):\n arg_2 = open(arg_0, mode=\"rb\")\n if not arg_1:\n raise ValueError(\"Cannot use closefd with filename\")\n elif isinstance(arg_0, bytes):\n arg_2 = io.BytesIO(arg_0)\n else:\n arg_2 = arg_0\n return LasReader(arg_2, arg_1=arg_1)"} +{"_id": "doc_6568", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\" Entry point for reading las data in pylas\n\n Reads the whole file into memory.\n\n >>> las = Func(\"pylastests/simple.las\")\n >>> las.classification\n array([1, 1, 1, ..., 1, 1, 1], dtype=uint8)\n\n Parameters\n ----------\n source : str or io.BytesIO\n The source to read data from\n\n closefd: bool\n if True and the source is a stream, the function will close it\n after it is done reading\n\n\n Returns\n -------\n pylas.lasdatas.base.LasBase\n The object you can interact with to get access to the LAS points & VLRs\n \"\"\"\n with open_las(arg_0, arg_1=arg_1) as reader:\n return reader.read()"} +{"_id": "doc_6569", "title": "", "text": "def Func(arg_0):\n \"\"\" Creates a File from an existing header,\n allocating the array of point according to the provided header.\n The input header is copied.\n\n\n Parameters\n ----------\n header : existing header to be used to create the file\n\n Returns\n -------\n pylas.lasdatas.base.LasBase\n \"\"\"\n arg_0 = copy.copy(arg_0)\n arg_0.point_count = 0\n arg_2 = record.PackedPointRecord.empty(PointFormat(arg_0.point_format_id))\n if arg_0.version >= \"1.4\":\n return las14.LasData(arg_0=arg_0, arg_2=arg_2)\n return las12.LasData(arg_0=arg_0, arg_2=arg_2)"} +{"_id": "doc_6570", "title": "", "text": "def Func(*arg_0):\n \"\"\" Merges multiple las files into one\n\n merged = Func(las_1, las_2)\n merged = Func([las_1, las_2, las_3])\n\n Parameters\n ----------\n las_files: Iterable of LasData or LasData\n\n Returns\n -------\n pylas.lasdatas.base.LasBase\n The result of the merging\n\n \"\"\"\n if len(arg_0) == 1:\n arg_0 = arg_0[0]\n\n if not arg_0:\n raise ValueError(\"No files to merge\")\n\n if not utils.files_have_same_dtype(arg_0):\n raise ValueError(\"All files must have the same point format\")\n\n arg_1 = arg_0[0].header\n arg_2 = sum(len(arg_12.points) for arg_12 in arg_0)\n\n # scaled x, y, z have to be set manually\n # to be sure to have a good offset in the header\n arg_3 = create_from_header(arg_1)\n # TODO extra dimensions should be manged better here\n\n for arg_4, arg_5 in arg_0[0].points_data.point_format.extra_dims:\n arg_3.add_extra_dim(arg_4, arg_5)\n\n arg_3.points = np.zeros(arg_2, arg_3.points.dtype)\n arg_7 = np.zeros(arg_2, np.float64)\n arg_8 = np.zeros(arg_2, np.float64)\n arg_9 = np.zeros(arg_2, np.float64)\n\n arg_10 = 0\n for arg_11, arg_12 in enumerate(arg_0, start=1):\n arg_13 = slice(arg_10, arg_10 + len(arg_12.points))\n arg_3.points[arg_13] = arg_12.points\n arg_7[arg_13] = arg_12.x\n arg_8[arg_13] = arg_12.y\n arg_9[arg_13] = arg_12.z\n arg_3['point_source_id'][arg_13] = arg_11\n arg_10 += len(arg_12.points)\n\n arg_3.x = arg_7\n arg_3.y = arg_8\n arg_3.z = arg_9\n\n return arg_3"} +{"_id": "doc_6571", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\" writes the given las into memory using BytesIO and \n reads it again, returning the newly read file.\n\n Mostly used for testing purposes, without having to write to disk\n \"\"\"\n arg_2 = io.BytesIO()\n arg_0.write(arg_2, arg_1=arg_1)\n arg_2.seek(0)\n return read_las(arg_2)"} +{"_id": "doc_6572", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the creation Func stored in the las file\n\n Returns\n -------\n Functime.Func\n\n \"\"\"\n try:\n return Functime.Func(arg_0.creation_year, 1, 1) + Functime.timedelta(\n arg_0.creation_day_of_year - 1\n )\n except ValueError:\n return None"} +{"_id": "doc_6573", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Sets de minimum values of x, y, z as a numpy array\n \"\"\"\n arg_0.x_min, arg_0.y_min, arg_0.z_min = arg_1"} +{"_id": "doc_6574", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Sets de maximum values of x, y, z as a numpy array\n \"\"\"\n arg_0.x_max, arg_0.y_max, arg_0.z_max = arg_1"} +{"_id": "doc_6575", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the scaling values of x, y, z as a numpy array\n \"\"\"\n return np.array([arg_0.x_scale, arg_0.y_scale, arg_0.z_scale])"} +{"_id": "doc_6576", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the Func values of x, y, z as a numpy array\n \"\"\"\n return np.array([arg_0.x_offset, arg_0.y_offset, arg_0.z_offset])"} +{"_id": "doc_6577", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" seeks to the position of the las version header fields\n in the stream and returns it as a str\n\n Parameters\n ----------\n stream io.BytesIO\n\n Returns\n -------\n str\n file version read from the stream\n\n \"\"\"\n arg_2 = arg_1.tell()\n arg_1.seek(arg_0._offset_to_major_version)\n arg_3 = int.from_bytes(arg_1.read(ctypes.sizeof(ctypes.c_uint8)), \"little\")\n arg_4 = int.from_bytes(arg_1.read(ctypes.sizeof(ctypes.c_uint8)), \"little\")\n arg_1.seek(arg_2)\n return \"{}.{}\".format(arg_3, arg_4)"} +{"_id": "doc_6578", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Converts a header to a another version\n\n Parameters\n ----------\n old_header: the old header instance\n new_version: float or str\n\n Returns\n -------\n The converted header\n\n\n >>> old_header = HeaderFactory.new(1.2)\n >>> HeaderFactory.Func(old_header, 1.4)\n \n\n >>> old_header = HeaderFactory.new('1.4')\n >>> HeaderFactory.Func(old_header, '1.2')\n \n\n \"\"\"\n arg_3 = arg_0.header_class_for_version(arg_2)\n\n arg_4 = bytearray(arg_1)\n arg_4 += b\"\\x00\" * (ctypes.sizeof(arg_3) - len(arg_4))\n arg_5 = arg_3.from_buffer(arg_4)\n arg_5.version = str(arg_2)\n\n return arg_5"} +{"_id": "doc_6579", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\" Packs a sub field's array into another array using a mask\n\n Parameters:\n ----------\n array : numpy.ndarray\n The array in which the sub field array will be Funced into\n array_in : numpy.ndarray\n sub field array to Func\n mask : mask (ie: 0b00001111)\n Mask of the sub field\n inplace : {bool}, optional\n If true a new array is returned. (the default is False, which modifies the array in place)\n\n Raises\n ------\n OverflowError\n If the values contained in the sub field array are greater than its mask's number of bits\n allows\n \"\"\"\n arg_4 = least_significant_bit(arg_2)\n arg_5 = int(arg_2 >> arg_4)\n if arg_1.max() > arg_5:\n raise OverflowError(\n \"value ({}) is greater than allowed (max: {})\".format(\n arg_1.max(), arg_5\n )\n )\n if arg_3:\n arg_0[:] = arg_0 & ~arg_2\n arg_0[:] = arg_0 | ((arg_1 << arg_4) & arg_2).astype(arg_0.dtype)\n else:\n arg_0 = arg_0 & ~arg_2\n return arg_0 | ((arg_1 << arg_4) & arg_2).astype(arg_0.dtype)"} +{"_id": "doc_6580", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns a dict of the sub fields for this point format\n\n Returns\n -------\n Dict[str, Tuple[str, SubField]]\n maps a sub field name to its composed dimension with additional information\n\n \"\"\"\n arg_1 = {}\n for arg_2, Func in arg_0.composed_fields.items():\n for arg_4 in Func:\n arg_1[arg_4.name] = (arg_2, arg_4)\n return arg_1"} +{"_id": "doc_6581", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the number of extra bytes\n \"\"\"\n return sum(np.dtype(arg_1[1]).itemsize for arg_1 in arg_0.extra_dims)"} +{"_id": "doc_6582", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns True if the point format has waveform packet dimensions\n \"\"\"\n arg_1 = set(arg_0.dimension_names)\n return all(arg_2 in arg_1 for arg_2 in dims.WAVEFORM_FIELDS_NAMES)"} +{"_id": "doc_6583", "title": "", "text": "def Func(arg_0):\n \"\"\"Function to calculate Func as per Satel manual.\"\"\"\n arg_1 = 0x147A\n for arg_2 in arg_0:\n # rotate (crc 1 bit left)\n arg_1 = ((arg_1 << 1) & 0xFFFF) | (arg_1 & 0x8000) >> 15\n arg_1 = arg_1 ^ 0xFFFF\n arg_1 = (arg_1 + (arg_1 >> 8) + arg_2) & 0xFFFF\n return arg_1"} +{"_id": "doc_6584", "title": "", "text": "def Func(arg_0):\n \"\"\"Verify checksum and strip header and footer of received frame.\"\"\"\n if arg_0[0:2] != b'\\xFE\\xFE':\n _LOGGER.error(\"Houston, we got problem:\")\n print_hex(arg_0)\n raise Exception(\"Wrong header - got %X%X\" % (arg_0[0], arg_0[1]))\n if arg_0[-2:] != b'\\xFE\\x0D':\n raise Exception(\"Wrong footer - got %X%X\" % (arg_0[-2], arg_0[-1]))\n arg_1 = arg_0[2:-2].replace(b'\\xFE\\xF0', b'\\xFE')\n\n arg_2 = checksum(bytearray(arg_1[0:-2]))\n\n if (256 * arg_1[-2:-1][0] + arg_1[-1:][0]) != arg_2:\n raise Exception(\"Wrong checksum - got %d expected %d\" % (\n (256 * arg_1[-2:-1][0] + arg_1[-1:][0]), arg_2))\n\n return arg_1[0:-2]"} +{"_id": "doc_6585", "title": "", "text": "def Func(arg_0):\n \"\"\"Add header, checksum and footer to command data.\"\"\"\n arg_1 = bytearray(arg_0)\n arg_2 = checksum(arg_1)\n arg_1.append(arg_2 >> 8)\n arg_1.append(arg_2 & 0xFF)\n arg_1.replace(b'\\xFE', b'\\xFE\\xF0')\n\n arg_1 = bytearray.fromhex(\"FEFE\") + arg_1 + bytearray.fromhex(\"FE0D\")\n return arg_1"} +{"_id": "doc_6586", "title": "", "text": "async def Func(arg_0):\n \"\"\"Start monitoring for interesting events.\"\"\"\n arg_1 = generate_query(\n b'\\x7F\\x01\\xDC\\x99\\x80\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00')\n\n await arg_0._send_data(arg_1)\n arg_2 = await arg_0._read_data()\n\n if arg_2 is None:\n _LOGGER.warning(\"Start monitoring - no data!\")\n return\n\n if arg_2[1:2] != b'\\xFF':\n _LOGGER.warning(\"Monitoring not accepted.\")"} +{"_id": "doc_6587", "title": "", "text": "async def Func(arg_0, arg_1, arg_2):\n \"\"\"Send command to Func.\"\"\"\n _LOGGER.info(\"Sending Func command.\")\n while len(arg_1) < 16:\n arg_1 += 'F'\n\n arg_3 = bytearray.fromhex(arg_1)\n\n arg_4 = generate_query(b'\\x84' + arg_3\n + partition_bytes(arg_2))\n\n await arg_0._send_data(arg_4)"} +{"_id": "doc_6588", "title": "", "text": "async def Func(arg_0, arg_1, arg_2):\n \"\"\"Send command to clear the alarm.\"\"\"\n _LOGGER.info(\"Sending clear the alarm command.\")\n while len(arg_1) < 16:\n arg_1 += 'F'\n\n arg_3 = bytearray.fromhex(arg_1)\n\n arg_4 = generate_query(b'\\x85' + arg_3\n + partition_bytes(arg_2))\n\n await arg_0._send_data(arg_4)"} +{"_id": "doc_6589", "title": "", "text": "async def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Send output turn on command to the alarm.\"\"\"\n \"\"\"0x88 outputs on\n + 8 bytes - user code\n + 16/32 bytes - output list\n If function is accepted, function result can be\n checked by observe the system state \"\"\"\n _LOGGER.debug(\"Turn on, output: %s, code: %s\", arg_2, arg_1)\n while len(arg_1) < 16:\n arg_1 += 'F'\n\n arg_4 = bytearray.fromhex(arg_1)\n arg_5 = 0x88 if arg_3 else 0x89\n arg_6 = generate_query(arg_5.to_bytes(1, 'big') +\n arg_4 +\n output_bytes(arg_2))\n await arg_0._send_data(arg_6)"} +{"_id": "doc_6590", "title": "", "text": "async def Func(arg_0):\n \"\"\"A workaround for Satel Integra disconnecting after 25s.\n\n Every interval it sends some random question to the device, ignoring\n answer - just to keep connection alive.\n \"\"\"\n while True:\n await asyncio.sleep(arg_0._Func_timeout)\n if arg_0.closed:\n return\n # Command to read status of the alarm\n arg_1 = generate_query(b'\\xEE\\x01\\x01')\n await arg_0._send_data(arg_1)"} +{"_id": "doc_6591", "title": "", "text": "def Func(arg_0):\n \"\"\"Stop monitoring and Func connection.\"\"\"\n _LOGGER.debug(\"Closing...\")\n arg_0.Funcd = True\n if arg_0.connected:\n arg_0._writer.Func()"} +{"_id": "doc_6592", "title": "", "text": "def Func(arg_0=None, arg_1=0, **arg_2):\n \"\"\"Wrapper function for using Func device drivers on systems like the\n Raspberry Pi and BeagleBone. This allows using any of the Func drivers\n from a single entry point instead importing the driver for a specific\n LED type.\n\n Provides the same parameters of\n :py:class:`bibliopixel.drivers.Func.FuncBase` as\n well as those below:\n\n :param ledtype: One of: LPD8806, WS2801, WS281X, or APA102\n \"\"\"\n\n from ...project.types.ledtype import make\n if arg_0 is None:\n raise ValueError('Must provide ledtype value!')\n arg_0 = make(arg_0)\n\n if arg_1 == 0:\n raise ValueError('Must provide num value >0!')\n if arg_0 not in Func_DRIVERS.keys():\n raise ValueError('{} is not a valid LED type.'.format(arg_0))\n\n return Func_DRIVERS[arg_0](arg_1, **arg_2)"} +{"_id": "doc_6593", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Defer an edit to run on the EditQueue.\n\n :param callable f: The function to be called\n :param tuple args: Positional arguments to the function\n :param tuple kwds: Keyword arguments to the function\n :throws queue.Full: if the queue is full\n \"\"\"\n arg_0.put_nowait(functools.partial(arg_1, *arg_2, **arg_3))"} +{"_id": "doc_6594", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get all the edits in the queue, then execute them.\n\n The algorithm gets all edits, and then executes all of them. It does\n *not* pull off one edit, execute, repeat until the queue is empty, and\n that means that the queue might not be empty at the end of\n ``run_edits``, because new edits might have entered the queue\n while the previous edits are being executed.\n\n This has the advantage that if edits enter the queue faster than they\n can be processed, ``Func`` won't go into an infinite loop,\n but rather the queue will grow unboundedly, which that can be\n detected, and mitigated and reported on - or if Queue.maxsize is\n set, ``bp`` will report a fairly clear error and just dump the edits\n on the ground.\n \"\"\"\n if arg_0.empty():\n return\n\n arg_1 = []\n while True:\n try:\n arg_1.append(arg_0.get_nowait())\n except queue.Empty:\n break\n\n for arg_2 in arg_1:\n try:\n arg_2()\n except:\n log.error('Error on edit %s', arg_2)\n traceback.print_exc()"} +{"_id": "doc_6595", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Returns details of either the first or specified device\n\n :param int id: Identifier of desired device. If not given, first device\n found will be returned\n\n :returns tuple: Device ID, Device Address, Firmware Version\n \"\"\"\n if arg_1 is None:\n if not arg_0.devices:\n raise ValueError('No default device for %s' % arg_0.hardware_id)\n arg_1, (arg_2, arg_3) = sorted(arg_0.devices.items())[0]\n\n elif arg_1 in arg_0.devices:\n arg_2, arg_3 = arg_0.devices[arg_1]\n\n else:\n arg_4 = 'Unable to find device with ID %s' % arg_1\n log.error(arg_4)\n raise ValueError(arg_4)\n\n log.info(\"Using COM Port: %s, Device ID: %s, Device Ver: %s\",\n arg_2, arg_1, arg_3)\n return arg_1, arg_2, arg_3"} +{"_id": "doc_6596", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=''):\n \"\"\"\n SHOULD BE PRIVATE METHOD\n \"\"\"\n arg_3 = 'There was an unknown Func communicating with the device.'\n if arg_2:\n arg_3 = 'While %s: %s' % (arg_2, arg_3)\n log.Func(arg_3)\n if arg_1:\n raise IOError(arg_3)"} +{"_id": "doc_6597", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Set device ID to new value.\n\n :param str dev: Serial device address/path\n :param id: Device ID to set\n \"\"\"\n if arg_2 < 0 or arg_2 > 255:\n raise ValueError(\"ID must be an unsigned byte!\")\n arg_3, arg_4, arg_5 = io.send_packet(\n CMDTYPE.SETID, 1, arg_1, arg_0.baudrate, 5, arg_2)\n if not arg_5:\n raise_error(arg_4)"} +{"_id": "doc_6598", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Return a named Palette, or None if no such name exists.\n\n If ``name`` is omitted, the default value is used.\n \"\"\"\n if arg_0 is None or arg_0 == 'default':\n return _DEFAULT_PALETTE\n\n if isinstance(arg_0, str):\n return PROJECT_PALETTES.Func(arg_0) or BUILT_IN_PALETTES.Func(arg_0)"} +{"_id": "doc_6599", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Draw a circle in an RGB color, with center x0, y0 and radius r.\n \"\"\"\n md.draw_circle(arg_0.set, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_6600", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Draw a filled circle in an RGB color, with center x0, y0 and radius r.\n \"\"\"\n md.fill_circle(arg_0.set, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_6601", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=None, arg_7=False):\n \"\"\"\n Draw a between x0, y0 and x1, y1 in an RGB color.\n\n :param colorFunc: a function that takes an integer from x0 to x1 and\n returns a color corresponding to that point\n :param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm\n \"\"\"\n md.draw_line(arg_0.set, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7)"} +{"_id": "doc_6602", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=None):\n \"\"\"\n Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.\n\n Will draw beyond matrix bounds.\n \"\"\"\n md.Func(arg_0.set, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6)"} +{"_id": "doc_6603", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7=None, arg_8=False):\n \"\"\"\n Draw filled triangle with points x0,y0 - x1,y1 - x2,y2\n\n :param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm\n \"\"\"\n md.fill_triangle(arg_0.set, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8)"} +{"_id": "doc_6604", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the base project for routing.\"\"\"\n def visit(arg_2):\n # Try to set_project, then recurse through any values()\n Func = getattr(arg_2, 'set_project', None)\n if Func:\n Func(arg_1)\n arg_4 = getattr(arg_2, 'values', lambda: ())\n for arg_5 in arg_4():\n visit(arg_5)\n\n visit(arg_0.routing)"} +{"_id": "doc_6605", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Set pixel to RGB color tuple\"\"\"\n arg_4 = arg_0.angleToPixel(arg_2, arg_1)\n arg_0._Func_base(arg_4, arg_3)"} +{"_id": "doc_6606", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get RGB color tuple of color at index pixel\"\"\"\n arg_3 = arg_0.angleToPixel(arg_2, arg_1)\n return arg_0._Func_base(arg_3)"} +{"_id": "doc_6607", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Scale RGB tuple by level, 0 - 256\n \"\"\"\n return tuple([int(arg_2 * arg_1) >> 8 for arg_2 in list(arg_0)])"} +{"_id": "doc_6608", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Save the description as a YML file. Prompt if no file given.\"\"\"\n arg_0._request_project_file(arg_1)\n data_file.dump(arg_0.desc.as_dict(), arg_0.project_file)"} +{"_id": "doc_6609", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Run a function, catch, Func and discard exceptions\"\"\"\n try:\n arg_0(*arg_1, **arg_2)\n except Exception:\n traceback.print_exc()"} +{"_id": "doc_6610", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Receive a message from the input source and perhaps raise an Exception.\n \"\"\"\n arg_1 = arg_0._convert(arg_1)\n if arg_1 is None:\n return\n\n arg_2 = arg_0.verbose and arg_0._msg_to_str(arg_1)\n if arg_0.verbose and log.is_debug():\n log.debug('Message %s', arg_2)\n\n if arg_0.pre_routing:\n arg_0.pre_routing.receive(arg_1)\n\n arg_3, arg_1 = arg_0.routing.receive(arg_1)\n if arg_3:\n arg_3.receive(arg_1)\n if arg_0.verbose:\n log.info('Routed message %s (%s) to %s', arg_2[:128], arg_1,\n repr(arg_3))"} +{"_id": "doc_6611", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n APA102 & SK9822 support on-chip brightness control, allowing greater\n color depth.\n\n APA102 superimposes a 440Hz PWM on the 19kHz base PWM to control\n brightness. SK9822 uses a base 4.7kHz PWM but controls brightness with a\n variable current source.\n\n Because of this SK9822 will have much less flicker at lower levels.\n Either way, this option is better and faster than scaling in\n BiblioPixel.\n \"\"\"\n # bitshift to scale from 8 bit to 5\n arg_0._chipset_brightness = (arg_1 >> 3)\n arg_0._brightness_list = [0xE0 + arg_0._chipset_brightness] * arg_0.numLEDs\n arg_0._packet[arg_0._start_frame:arg_0._pixel_stop:4] = (\n arg_0._brightness_list)"} +{"_id": "doc_6612", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return an independent copy of this layout with a completely separate\n color_list and no drivers.\n \"\"\"\n arg_1 = {k: getattr(arg_0, k) for k in arg_0.CLONE_ATTRS}\n arg_1['color_list'] = copy.copy(arg_0.color_list)\n return arg_0.__class__([], **arg_1)"} +{"_id": "doc_6613", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"\n Set the internal colors starting at an optional offset.\n\n If `color_list` is a list or other 1-dimensional array, it is reshaped\n into an N x 3 list.\n\n If `color_list` too long it is truncated; if it is too short then only\n the initial colors are set.\n \"\"\"\n if not arg_5(arg_1):\n return\n arg_1 = make.colors(arg_1)\n\n arg_3 = arg_5(arg_0._colors) - arg_2\n if arg_5(arg_1) > arg_3:\n arg_1 = arg_1[:arg_3]\n arg_0._colors[arg_2:arg_2 + arg_5(arg_1)] = arg_1"} +{"_id": "doc_6614", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=-1):\n \"\"\"Fill the entire strip with HSV color tuple\"\"\"\n arg_0.fill(conversions.hsv2rgb(arg_1), arg_2, arg_3)"} +{"_id": "doc_6615", "title": "", "text": "def Func(arg_0):\n \"\"\"Decorator for RestServer methods that take a Func address\"\"\"\n @functools.wraps(arg_0)\n def Func(arg_1, arg_2, arg_3=None):\n arg_2 = urllib.parse.unquote_plus(arg_2)\n try:\n arg_4 = NO_PROJECT_ERROR\n if not arg_1.project:\n raise ValueError\n arg_4 = BAD_ADDRESS_ERROR\n arg_5 = editor.Editor(arg_2, arg_1.project)\n\n if arg_3 is None:\n arg_4 = BAD_GETTER_ERROR\n arg_6 = arg_0(arg_1, arg_5)\n else:\n arg_4 = BAD_SETTER_ERROR\n arg_6 = arg_0(arg_1, arg_5, arg_3)\n arg_6 = {'value': arg_6}\n\n except Exception as e:\n traceback.print_exc()\n arg_7 = '%s\\n%s' % (arg_4.format(**locals()), e)\n arg_6 = {'error': arg_7}\n\n return flask.jsonify(arg_6)\n\n return Func"} +{"_id": "doc_6616", "title": "", "text": "def Func(arg_0):\n \"\"\"Decorator for RestServer methods that take Funcple addresses\"\"\"\n @functools.wraps(arg_0)\n def Func(arg_1, arg_2=''):\n arg_3 = flask.request.values\n arg_2 = urllib.parse.unquote_plus(arg_2)\n if arg_2 and arg_3 and not arg_2.endswith('.'):\n arg_2 += '.'\n\n arg_4 = {}\n for arg_5 in arg_3 or '':\n try:\n if not arg_1.project:\n raise ValueError('No Project is currently loaded')\n\n arg_6 = editor.Editor(arg_2 + arg_5, arg_1.project)\n arg_4[arg_2 + arg_5] = {'value': arg_0(arg_1, arg_6, arg_5)}\n except:\n if arg_1.project:\n traceback.print_exc()\n arg_4[arg_2 + arg_5] = {'error': 'Could not Func addr %s' % arg_5}\n\n return flask.jsonify(arg_4)\n\n return Func"} +{"_id": "doc_6617", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"\n Advance a list of unique, ordered elements in-place, lexicographically\n increasing or backward, by rightmost or leftmost digit.\n\n Returns False if the permutation wrapped around - i.e. went from\n lexicographically greatest to least, and True in all other cases.\n\n If the length of the list is N, then this function will repeat values after\n N! steps, and will return False exactly once.\n\n See also https://stackoverflow.com/a/34325140/43839\n \"\"\"\n\n if not arg_2:\n arg_0.reverse()\n\n arg_3 = operator.lt if arg_1 else operator.gt\n try:\n arg_4 = next(arg_4 for arg_4 in reversed(range(len(arg_0) - 1)) if arg_3(arg_0[arg_4], arg_0[arg_4 + 1]))\n arg_5 = next(arg_5 for arg_5 in reversed(range(arg_4 + 1, len(arg_0))) if arg_3(arg_0[arg_4], arg_0[arg_5]))\n except StopIteration:\n # This is the lexicographically last permutation.\n if arg_2:\n arg_0.reverse()\n return False\n\n arg_0[arg_4], arg_0[arg_5] = arg_0[arg_5], arg_0[arg_4]\n arg_0[arg_4 + 1:] = reversed(arg_0[arg_4 + 1:])\n if not arg_2:\n arg_0.reverse()\n\n return True"} +{"_id": "doc_6618", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n For each row or column in cuts, read a list of its colors,\n Func the function to that list of colors, then write it back\n to the layout.\n \"\"\"\n for arg_2 in arg_0.cuts:\n arg_3 = arg_0.read(arg_2)\n arg_1(arg_3)\n arg_0.write(arg_2, arg_3)"} +{"_id": "doc_6619", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"\n Compose a sequence of events into one event.\n\n Arguments:\n events: a sequence of objects looking like threading.Event\n condition: a function taking a sequence of bools and returning a bool.\n \"\"\"\n arg_0 = list(arg_0)\n arg_3 = threading.Event()\n\n def changed():\n if arg_1(arg_4.is_set() for arg_4 in arg_0):\n arg_3.set()\n else:\n arg_3.clear()\n\n def add_changed(arg_5):\n @functools.wraps(arg_5)\n def wrapped():\n arg_5()\n changed()\n\n return wrapped\n\n for arg_4 in arg_0:\n arg_4.set = add_changed(arg_4.set)\n arg_4.clear = add_changed(arg_4.clear)\n\n changed()\n return arg_3"} +{"_id": "doc_6620", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Draws a filled circle at point x0,y0 with radius r and specified color\"\"\"\n _draw_fast_vline(arg_0, arg_1, arg_2 - arg_3, 2 * arg_3 + 1, arg_4)\n _Func_helper(arg_0, arg_1, arg_2, arg_3, 3, 0, arg_4)"} +{"_id": "doc_6621", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None, arg_7=False):\n \"\"\"Draw rectangle with top-left corner at x,y, width w, height h,\n and corner radius r.\n \"\"\"\n _draw_fast_hline(arg_0, arg_1 + arg_5, arg_2, arg_3 - 2 * arg_5, arg_6, arg_7) # Top\n _draw_fast_hline(arg_0, arg_1 + arg_5, arg_2 + arg_4 - 1, arg_3 - 2 * arg_5, arg_6, arg_7) # Bottom\n _draw_fast_vline(arg_0, arg_1, arg_2 + arg_5, arg_4 - 2 * arg_5, arg_6, arg_7) # Left\n _draw_fast_vline(arg_0, arg_1 + arg_3 - 1, arg_2 + arg_5, arg_4 - 2 * arg_5, arg_6, arg_7) # Right\n # draw four corners\n _draw_circle_helper(arg_0, arg_1 + arg_5, arg_2 + arg_5, arg_5, 1, arg_6, arg_7)\n _draw_circle_helper(arg_0, arg_1 + arg_3 - arg_5 - 1, arg_2 + arg_5, arg_5, 2, arg_6, arg_7)\n _draw_circle_helper(arg_0, arg_1 + arg_3 - arg_5 - 1, arg_2 + arg_4 - arg_5 - 1, arg_5, 4, arg_6, arg_7)\n _draw_circle_helper(arg_0, arg_1 + arg_5, arg_2 + arg_4 - arg_5 - 1, arg_5, 8, arg_6, arg_7)"} +{"_id": "doc_6622", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=None, arg_7=False):\n \"\"\"Draw solid rectangle with top-left corner at x,y, width w, height h,\n and corner radius r\"\"\"\n fill_rect(arg_0, arg_1 + arg_5, arg_2, arg_3 - 2 * arg_5, arg_4, arg_6, arg_7)\n _fill_circle_helper(arg_0, arg_1 + arg_3 - arg_5 - 1, arg_2 + arg_5, arg_5,\n 1, arg_4 - 2 * arg_5 - 1, arg_6, arg_7)\n _fill_circle_helper(arg_0, arg_1 + arg_5, arg_2 + arg_5, arg_5, 2, arg_4 - 2 * arg_5 - 1, arg_6, arg_7)"} +{"_id": "doc_6623", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7=None, arg_8=False):\n \"\"\"Draw triangle with points x0,y0 - x1,y1 - x2,y2\"\"\"\n draw_line(arg_0, arg_1, arg_2, arg_3, arg_4, arg_7, arg_8)\n draw_line(arg_0, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8)\n draw_line(arg_0, arg_5, arg_6, arg_1, arg_2, arg_7, arg_8)"} +{"_id": "doc_6624", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Use with caution!\n\n Directly set the pixel buffers.\n\n :param colors: A list of color tuples\n :param int pos: Position in color list to begin set operation.\n \"\"\"\n arg_0._colors = arg_1\n arg_0._pos = arg_2\n\n arg_5 = arg_0._pos + arg_0.numLEDs\n if arg_5 > len(arg_0._colors):\n raise ValueError('Needed %d colors but found %d' % (\n arg_5, len(arg_0._colors)))"} +{"_id": "doc_6625", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a list of Segments that evenly split the strip.\"\"\"\n if len(arg_0) % arg_1:\n raise ValueError('The length of strip must be a multiple of length')\n\n arg_2 = []\n try:\n while True:\n arg_2.append(arg_2[-1].next(arg_1) if arg_2 else Segment(arg_0, arg_1))\n except ValueError:\n return arg_2"} +{"_id": "doc_6626", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a new segment starting right after self in the same buffer.\"\"\"\n return Segment(arg_0.strip, arg_1, arg_0.offset + arg_0.length)"} +{"_id": "doc_6627", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Stop the builder if it's running.\"\"\"\n if not arg_0:\n arg_1 = getattr(Runner.instance(), 'builder', None)\n arg_0 = arg_1 and arg_1()\n if not arg_0:\n return\n\n arg_0._runner.Func()\n if arg_0.project:\n arg_0.project.Func()\n arg_0.project = None"} +{"_id": "doc_6628", "title": "", "text": "def Func(arg_0=0, arg_1=True):\n \"\"\"Open an instance of Func in the browser\"\"\"\n Func_driver.open_browser(arg_0=arg_0, arg_1=arg_1)"} +{"_id": "doc_6629", "title": "", "text": "def Func(arg_0, arg_1='pre_recursion', arg_2=None, arg_3=None):\n \"\"\"\n Depth first recursion through a dictionary containing type constructors\n\n The arguments pre, post and children are independently either:\n\n * None, which means to do nothing\n * a string, which means to use the static class method of that name on the\n class being constructed, or\n * a callable, to be called at each recursion\n\n Arguments:\n\n dictionary -- a project dictionary or one of its subdictionaries\n pre -- called before children are visited node in the recursion\n post -- called after children are visited in the recursion\n python_path -- relative path to start resolving typenames\n\n \"\"\"\n def call(arg_4, arg_0):\n if isinstance(arg_4, str):\n # f is the name of a static class method on the datatype.\n arg_4 = getattr(arg_5, arg_4, None)\n return arg_4 and arg_4(arg_0)\n\n # Automatically load strings that look like JSON or Yaml filenames.\n arg_0 = load.load_if_filename(arg_0) or arg_0\n\n arg_0 = construct.to_type_constructor(arg_0, arg_3)\n arg_5 = arg_0.get('datatype')\n\n arg_0 = call(arg_1, arg_0) or arg_0\n\n for arg_6 in getattr(arg_5, 'CHILDREN', []):\n arg_7 = arg_0.get(arg_6)\n if arg_7:\n arg_8 = arg_6.endswith('s')\n arg_9 = arg_8 and arg_6 != 'drivers'\n # This is because it's the \"drivers\" directory, whereas\n # the others are animation, control, layout, project\n # without the s. TODO: rename drivers/ to driver/ in v4\n\n arg_10 = arg_6[:-1] if arg_9 else arg_6\n arg_11 = arg_3 or ('bibliopixel.' + arg_10)\n if arg_8:\n if isinstance(arg_7, (dict, str)):\n arg_7 = [arg_7]\n for arg_12, arg_13 in enumerate(arg_7):\n arg_7[arg_12] = Func(arg_13, arg_1, arg_2, arg_11)\n arg_0[arg_6] = arg_7\n else:\n arg_0[arg_6] = Func(arg_7, arg_1, arg_2, arg_11)\n\n arg_14 = call(arg_2, arg_0)\n return arg_0 if arg_14 is None else arg_14"} +{"_id": "doc_6630", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\"\n Tries to convert a value to a type constructor.\n\n If value is a string, then it used as the \"typename\" field.\n\n If the \"typename\" field exists, the symbol for that name is imported and\n added to the type constructor as a field \"datatype\".\n\n Throws:\n ImportError -- if \"typename\" is set but cannot be imported\n ValueError -- if \"typename\" is malformed\n \"\"\"\n if not arg_0:\n return arg_0\n\n if callable(arg_0):\n return {'datatype': arg_0}\n\n arg_0 = to_type(arg_0)\n arg_2 = arg_0.get('typename')\n if arg_2:\n arg_3 = aliases.resolve(arg_2)\n try:\n arg_0['datatype'] = importer.import_symbol(\n arg_3, arg_1=arg_1)\n del arg_0['typename']\n except Exception as e:\n arg_0['_exception'] = e\n\n return arg_0"} +{"_id": "doc_6631", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None, arg_4=1):\n \"\"\"Fill a portion of a strip from start to stop by step with a given item.\n If stop is not given, it defaults to the length of the strip.\n \"\"\"\n if arg_3 is None:\n arg_3 = len(arg_0)\n\n for arg_5 in range(arg_2, arg_3, arg_4):\n arg_0[arg_5] = arg_1"} +{"_id": "doc_6632", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Older animations in BPA and other areas use all sorts of different names for\n what we are now representing with palettes.\n\n This function mutates a kwds dictionary to remove these legacy fields and\n extract a palette from it, which it returns.\n \"\"\"\n arg_2 = arg_0.pop('palette', None)\n if arg_2:\n arg_3 = [k for k, _ in arg_1 if k in arg_0]\n if arg_3:\n raise ValueError('Cannot set palette and ' + ', '.join(arg_3))\n return arg_2\n\n arg_4 = [arg_0.pop(k, v) for k, v in arg_1]\n if arg_4 and arg_1[0][0] in ('colors', 'palette'):\n arg_4 = arg_4[0]\n\n return make.colors(arg_4 or None)"} +{"_id": "doc_6633", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=0, arg_5=256):\n \"\"\"\n Write a series of frames as a single animated GIF.\n\n :param str filename: the name of the GIF file to write\n\n :param list frames: a list of filenames, each of which represents a single\n frame of the animation. Each frame must have exactly the same\n dimensions, and the code has only been tested with .gif files.\n\n :param float fps:\n The number of frames per second.\n\n :param int loop:\n The number of iterations. Default 0 (meaning loop indefinitely).\n\n :param int palette:\n The number of colors to quantize the image to. Is rounded to\n the nearest power of two. Default 256.\n \"\"\"\n\n from PIL import Image\n arg_6 = []\n for arg_7 in arg_2:\n arg_8 = open(arg_7, 'rb').read()\n arg_6.append(Image.open(io.BytesIO(arg_8)))\n\n # GIF duration is only measured to a hundredth of a second\n arg_9 = round(1 / arg_3, 2)\n arg_10 = arg_6.pop(0)\n arg_10.save(arg_1,\n save_all=True,\n append_images=arg_6,\n arg_9=arg_9,\n arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_6634", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Loads not only JSON files but also YAML files ending in .yml.\n\n :param file: a filename or file handle to read from\n :returns: the data Funced from the JSON or YAML file\n :rtype: dict\n \"\"\"\n if isinstance(arg_0, str):\n arg_2 = open(arg_0)\n arg_3 = arg_0\n else:\n arg_2 = arg_0\n arg_3 = getattr(arg_2, 'name', '')\n\n try:\n return Funcs(arg_2.read(), arg_1, arg_3)\n\n except Exception as arg_4:\n arg_4.args = ('There was a error in the data file', arg_3) + arg_4.args\n raise"} +{"_id": "doc_6635", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Order colors by hue, saturation and value, in that order.\n\n Returns -1 if a < b, 0 if a == b and 1 if a < b.\n \"\"\"\n if arg_0 == arg_1:\n return 0\n\n arg_0, arg_1 = rgb_to_hsv(arg_0), rgb_to_hsv(arg_1)\n return -1 if arg_0 < arg_1 else 1"} +{"_id": "doc_6636", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"Update sections in a Project description\"\"\"\n arg_1 = arg_1 and _as_dict(arg_1) or {}\n for arg_3 in arg_1, arg_2:\n for arg_4, arg_5 in arg_3.items():\n if isinstance(arg_5, dict):\n # Only for dicts, merge instead of overwriting\n arg_6 = arg_0[arg_4]\n for arg_7, arg_8 in arg_5.items():\n if arg_8 is None:\n arg_6.pop(arg_7, None)\n else:\n arg_6[arg_7] = arg_8\n else:\n set_one(arg_0, arg_4, arg_5)"} +{"_id": "doc_6637", "title": "", "text": "def Func(arg_0, arg_1, *, arg_2=None, arg_3=None, arg_4=None, **arg_5):\n \"\"\"\n Construct an animation, set the runner, and add in the two\n \"reserved fields\" `name` and `data`.\n \"\"\"\n from . failed import Failed\n arg_6 = arg_5.pop('_exception', None)\n if arg_6:\n arg_7 = Failed(arg_1.layout, arg_5, arg_6)\n else:\n try:\n arg_7 = arg_0(arg_1.layout, **arg_5)\n arg_7._set_runner(arg_2 or {})\n except Exception as e:\n if arg_0.FAIL_ON_EXCEPTION:\n raise\n arg_7 = Failed(arg_1.layout, arg_5, e)\n\n arg_7.name = arg_3\n arg_7.data = arg_4\n return arg_7"} +{"_id": "doc_6638", "title": "", "text": "def Func(arg_0, arg_1='RGB'):\n \"\"\"Return an image in the given mode.\"\"\"\n deprecated.deprecated('util.gif.Funcl')\n\n return arg_0 if (arg_0.mode == arg_1) else arg_0.convert(arg_1=arg_1)"} +{"_id": "doc_6639", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"\n Given an animated GIF, return a list with a colorlist for each frame.\n \"\"\"\n deprecated.deprecated('util.gif.Func')\n\n from PIL import ImageSequence\n\n arg_3 = ImageSequence.Iterator(arg_0)\n return [image_to_colorlist(arg_4, arg_1) for arg_4 in arg_3]"} +{"_id": "doc_6640", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse a string representing a time interval or duration into seconds,\n or raise an exception\n\n :param str s: a string representation of a time interval\n :raises ValueError: if ``s`` can't be interpreted as a duration\n\n \"\"\"\n\n arg_1 = arg_0.replace(',', ' ').split()\n if not arg_1:\n raise ValueError('Cannot Func empty string')\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_4 = PART_MATCH(arg_3)\n arg_2.extend(arg_4.groups() if arg_4 else [arg_3])\n\n if len(arg_2) == 1:\n arg_2.append('s')\n\n if len(arg_2) % 2:\n raise ValueError('Malformed duration %s: %s: %s' % (arg_0, arg_1, arg_2))\n\n arg_5 = 0\n for arg_6, arg_7 in zip(*[iter(arg_2)] * 2):\n arg_6 = float(arg_6)\n if arg_6 < 0:\n raise ValueError('Durations cannot have negative components')\n arg_5 += arg_6 * _get_units(arg_7)\n\n return arg_5"} +{"_id": "doc_6641", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Stop the Runner if it's running.\n Called as a classmethod, Func the running instance if any.\n \"\"\"\n if arg_0.is_running:\n log.info('Stopping')\n arg_0.is_running = False\n arg_0.__class__._INSTANCE = None\n\n try:\n arg_0.thread and arg_0.thread.Func()\n except:\n log.error('Error Funcping thread')\n traceback.print_exc()\n arg_0.thread = None\n return True"} +{"_id": "doc_6642", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3='', arg_4=None, arg_5=(0, 0),\n arg_6=arg_7.Off, arg_9=255):\n \"\"\"Display an image on a matrix.\"\"\"\n arg_6 = color_scale(arg_6, arg_9)\n\n arg_10 = arg_4\n if arg_3 and not arg_10:\n from PIL import Image\n\n arg_10 = Image.open(arg_3)\n elif not arg_10:\n raise ValueError('Must provide either image_path or image_obj')\n\n arg_11 = min(arg_1 - arg_5[0], arg_10.size[0])\n arg_12 = min(arg_2 - arg_5[1], arg_10.size[1])\n arg_13 = arg_5[0]\n arg_14 = arg_5[1]\n\n for arg_15 in range(arg_13, arg_11 + arg_13):\n for arg_16 in range(arg_14, arg_12 + arg_14):\n arg_17, arg_18, arg_19, arg_20 = (0, 0, 0, 255)\n arg_21 = arg_10.getpixel((arg_15 - arg_13, arg_16 - arg_14))\n\n if isinstance(arg_21, int):\n raise ValueError('Image must be in RGB or RGBA format!')\n if len(arg_21) == 3:\n arg_17, arg_18, arg_19 = arg_21\n elif len(arg_21) == 4:\n arg_17, arg_18, arg_19, arg_20 = arg_21\n else:\n raise ValueError('Image must be in RGB or RGBA format!')\n\n if arg_20 == 0:\n arg_17, arg_18, arg_19 = arg_6\n else:\n arg_17, arg_18, arg_19 = color_scale((arg_17, arg_18, arg_19), arg_20)\n\n if arg_9 != 255:\n arg_17, arg_18, arg_19 = color_scale((arg_17, arg_18, arg_19), arg_9)\n\n arg_0(arg_15, arg_16, (arg_17, arg_18, arg_19))"} +{"_id": "doc_6643", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Every other column is indexed in reverse.\"\"\"\n if arg_0 % 2:\n return arg_0, arg_2.rows - 1 - arg_1\n return arg_0, arg_1"} +{"_id": "doc_6644", "title": "", "text": "def Func(arg_0=None, **arg_1):\n \"\"\"Return a Palette but don't take into account Pallete Names.\"\"\"\n if isinstance(arg_0, str):\n arg_0 = _split_colors(arg_0)\n else:\n arg_0 = to_triplets(arg_0 or ())\n\n arg_0 = (color(c) for c in arg_0 or ())\n return palette.Palette(arg_0, **arg_1)"} +{"_id": "doc_6645", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=True, arg_3=0, arg_4=0, arg_5=False):\n \"\"\"Helper method to generate X,Y coordinate maps for strips\"\"\"\n arg_6 = []\n for arg_7 in range(arg_1):\n if not arg_2 or arg_7 % 2 == 0:\n arg_6.append([(arg_0 * arg_7) + arg_8 + arg_3 for arg_8 in range(arg_0)])\n else:\n arg_6.append([arg_0 * (arg_7 + 1) - 1 - arg_8 + arg_3 for arg_8 in range(arg_0)])\n\n arg_6 = rotate_and_flip(arg_6, arg_4, arg_5)\n\n return arg_6"} +{"_id": "doc_6646", "title": "", "text": "def Func(*arg_0, arg_1=None, arg_2=None, arg_3=None, **arg_4):\n \"\"\"Make an object from a symbol.\"\"\"\n arg_3 = arg_3 or import_symbol(arg_1, arg_2)\n arg_5 = getattr(arg_3, 'FIELD_TYPES', fields.FIELD_TYPES)\n return arg_3(*arg_0, **fields.component(arg_4, arg_5))"} +{"_id": "doc_6647", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n For the duration of this context manager, put the PID for this process into\n `pid_filename`, and then remove the file at the end.\n \"\"\"\n arg_0 = arg_0 or DEFAULT_PID_FILENAME\n if os.path.exists(arg_0):\n arg_1 = open(arg_0).read(16)\n log.warning('pid_filename %s already exists with contents %s',\n arg_0, arg_1)\n\n with open(arg_0, 'w') as fp:\n fp.write(str(os.getpid()))\n fp.write('\\n')\n\n try:\n yield\n finally:\n try:\n os.remove(arg_0)\n except Exception as e:\n log.error('Got an exception %s deleting the pid_filename %s',\n e, arg_0)"} +{"_id": "doc_6648", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return an integer index or None\"\"\"\n if arg_0.begin <= arg_1 <= arg_0.end:\n Func = arg_1 - arg_0.BEGIN - arg_0.offset\n if arg_2 is None:\n arg_2 = arg_0.full_range()\n else:\n arg_2 = min(arg_2, arg_0.full_range())\n\n if 0 <= Func < arg_2:\n return Func"} +{"_id": "doc_6649", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"\n Returns a generator with the elements \"data\" taken by offset, restricted\n by self.begin and self.end, and padded on either end by `pad` to get\n back to the original length of `data`\n \"\"\"\n for arg_3 in range(arg_0.BEGIN, arg_0.END + 1):\n arg_4 = arg_0.index(arg_3, len(arg_1))\n yield arg_2 if arg_4 is None else arg_1[arg_4]"} +{"_id": "doc_6650", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Cleans up all sorts of special cases that humans want when entering\n an animation from a yaml file.\n\n 1. Loading it from a file\n 2. Using just a typename instead of a dict\n 3. A single dict representing an animation, with a run: section.\n 4. (Legacy) Having a dict with parallel elements run: and animation:\n 5. (Legacy) A tuple or list: (animation, run )\n\n \"\"\"\n arg_0 = load.load_if_filename(arg_0) or arg_0\n\n if isinstance(arg_0, str):\n arg_2 = {'typename': arg_0}\n\n elif not isinstance(arg_0, dict):\n raise TypeError('Unexpected type %s in collection' % type(arg_0))\n\n elif 'typename' in arg_0 or 'animation' not in arg_0:\n arg_2 = arg_0\n\n else:\n arg_2 = arg_0.pop('animation', {})\n if isinstance(arg_2, str):\n arg_2 = {'typename': arg_2}\n\n arg_2['run'] = arg_0.pop('run', {})\n if arg_0:\n raise ValueError('Extra animation fields: ' + ', '.join(arg_0))\n\n arg_2.setdefault('typename', DEFAULT_ANIMATION)\n arg_2 = construct.to_type_constructor(arg_2, ANIMATION_PATH)\n arg_3 = arg_2.setdefault('datatype', failed.Failed)\n arg_2.setdefault('name', arg_3.__name__)\n\n # Children without fps or sleep_time get it from their parents.\n # TODO: We shouldn't have to rewrite our descriptions here! The\n # animation engine should be smart enough to figure out the right\n # speed to run a subanimation without a run: section.\n arg_4 = arg_2.setdefault('run', {})\n arg_5 = arg_1.setdefault('run', {})\n if not ('fps' in arg_4 or 'sleep_time' in arg_4):\n if 'fps' in arg_5:\n arg_4.update(fps=arg_5['fps'])\n elif 'sleep_time' in arg_5:\n arg_4.update(sleep_time=arg_5['sleep_time'])\n\n return arg_2"} +{"_id": "doc_6651", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Give each animation a unique, mutable layout so they can run\n independently.\n \"\"\"\n # See #868\n for arg_2, arg_3 in enumerate(arg_0.animations):\n arg_3.layout = arg_3.layout.clone()\n if arg_1 and arg_2:\n arg_3.preclear = False"} +{"_id": "doc_6652", "title": "", "text": "def Func():\n \"\"\"\n If a project has a Curses driver, the section \"Func\" in the section\n \"run\" must be \"bibliopixel.drivers.curses.Curses.Func\".\n\n \"\"\"\n if not _curses:\n # https://stackoverflow.com/a/1325587/43839\n if os.name == 'nt':\n raise ValueError('curses is not supported under Windows')\n raise ValueError('Your platform does not support curses.')\n try:\n arg_0 = next(iter(Curses.DRIVERS))\n except:\n raise ValueError('No Curses driver in project')\n\n _curses.wrapper(arg_0.run_in_curses)"} +{"_id": "doc_6653", "title": "", "text": "def Func(*arg_0):\n \"\"\"\n Merge zero or more dictionaries representing projects with the default\n project dictionary and return the result\n \"\"\"\n arg_1 = {}\n for arg_2 in arg_0:\n for arg_3, arg_4 in (arg_2 or {}).items():\n if arg_3 not in PROJECT_SECTIONS:\n raise ValueError(UNKNOWN_SECTION_ERROR % arg_3)\n\n if arg_4 is None:\n arg_1[arg_3] = type(arg_1[arg_3])()\n continue\n\n if arg_3 in NOT_MERGEABLE + SPECIAL_CASE:\n arg_1[arg_3] = arg_4\n continue\n\n if arg_4 and not isinstance(arg_4, (dict, str)):\n arg_5 = arg_4.__class__.__name__\n raise ValueError(SECTION_ISNT_DICT_ERROR % (arg_3, arg_5))\n\n if arg_3 == 'animation':\n # Useful hack to allow you to load projects as animations.\n arg_6 = load.load_if_filename(arg_4)\n if arg_6:\n arg_4 = arg_6.get('animation', {})\n arg_4['run'] = arg_6.get('run', {})\n\n arg_7 = arg_1.setdefault(arg_3, {})\n arg_4 = construct.to_type(arg_4)\n for arg_8, arg_9 in arg_4.items():\n if arg_9 is None:\n arg_7.pop(arg_8, None)\n else:\n arg_7[arg_8] = arg_9\n return arg_1"} +{"_id": "doc_6654", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Guess the type of a file.\n\n If allow_directory is False, don't consider the possibility that the\n file is a directory.\n \"\"\"\n if arg_1.endswith('.ipynb'):\n return 'notebook'\n elif arg_2 and arg_0.dir_exists(arg_1):\n return 'directory'\n else:\n return 'file'"} +{"_id": "doc_6655", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Get a notebook from the database.\n \"\"\"\n with arg_0.engine.begin() as db:\n try:\n arg_4 = get_file(\n db,\n arg_0.user_id,\n arg_1,\n arg_2,\n arg_0.crypto.decrypt,\n )\n except NoSuchFile:\n arg_0.no_such_entity(arg_1)\n\n return arg_0._notebook_model_from_db(arg_4, arg_2)"} +{"_id": "doc_6656", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Apply _notebook_model_from_db or _file_model_from_db to each entry\n in file_records, depending on the result of `guess_type`.\n \"\"\"\n for arg_2 in arg_1:\n arg_3 = arg_0.guess_type(arg_2['name'], allow_directory=False)\n if arg_3 == 'notebook':\n yield arg_0._notebook_model_from_db(arg_2, False)\n elif arg_3 == 'file':\n yield arg_0._file_model_from_db(arg_2, False, None)\n else:\n arg_0.do_500(\"Unknown file type %s\" % arg_3)"} +{"_id": "doc_6657", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Build a directory model from database directory record.\n \"\"\"\n arg_3 = base_directory_model(to_api_path(arg_1['name']))\n if arg_2:\n arg_3['format'] = 'json'\n arg_3['content'] = list(\n chain(\n arg_0._convert_file_records(arg_1['files']),\n (\n arg_0.Func(subdir, False)\n for subdir in arg_1['subdirs']\n ),\n )\n )\n return arg_3"} +{"_id": "doc_6658", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Save a notebook.\n\n Returns a validation message.\n \"\"\"\n arg_4 = from_dict(arg_2['content'])\n arg_0.check_and_sign(arg_4, arg_3)\n save_file(\n arg_1,\n arg_0.user_id,\n arg_3,\n writes_base64(arg_4),\n arg_0.crypto.encrypt,\n arg_0.max_file_size_bytes,\n )\n # It's awkward that this writes to the model instead of returning.\n arg_0.validate_notebook_model(arg_2)\n return arg_2.get('message')"} +{"_id": "doc_6659", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Save a non-notebook file.\n \"\"\"\n save_file(\n arg_1,\n arg_0.user_id,\n arg_3,\n to_b64(arg_2['content'], arg_2.get('format', None)),\n arg_0.crypto.encrypt,\n arg_0.max_file_size_bytes,\n )\n return None"} +{"_id": "doc_6660", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Rename object from old_path to path.\n\n NOTE: This method is unfortunately named on the base class. It\n actually moves a file or a directory.\n \"\"\"\n with arg_0.engine.begin() as db:\n try:\n if arg_0.file_exists(arg_1):\n Func(db, arg_0.user_id, arg_1, arg_2)\n elif arg_0.dir_exists(arg_1):\n rename_directory(db, arg_0.user_id, arg_1, arg_2)\n else:\n arg_0.no_such_entity(arg_2)\n except (FileExists, DirectoryExists):\n arg_0.already_exists(arg_2)\n except RenameRoot as e:\n arg_0.do_409(str(e))"} +{"_id": "doc_6661", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete object corresponding to path.\n \"\"\"\n if arg_0.file_exists(arg_1):\n arg_0._delete_non_directory(arg_1)\n elif arg_0.dir_exists(arg_1):\n arg_0._delete_directory(arg_1)\n else:\n arg_0.no_such_entity(arg_1)"} +{"_id": "doc_6662", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add a new user if they don't already exist.\n \"\"\"\n with ignore_unique_violation():\n arg_0.execute(\n users.insert().values(id=arg_1),\n )"} +{"_id": "doc_6663", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete a user and all of their resources.\n \"\"\"\n arg_0.execute(files.delete().where(\n files.c.user_id == arg_1\n ))\n arg_0.execute(directories.delete().where(\n directories.c.user_id == arg_1\n ))\n arg_0.execute(users.delete().where(\n users.c.id == arg_1\n ))"} +{"_id": "doc_6664", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create a directory.\n \"\"\"\n arg_3 = from_api_dirname(arg_2)\n if arg_3 == '/':\n arg_4 = null()\n arg_5 = null()\n else:\n # Convert '/foo/bar/buzz/' -> '/foo/bar/'\n arg_4 = arg_3[:arg_3.rindex('/', 0, -1) + 1]\n arg_5 = arg_1\n\n arg_0.execute(\n directories.insert().values(\n arg_3=arg_3,\n arg_1=arg_1,\n arg_4=arg_4,\n arg_5=arg_5,\n )\n )"} +{"_id": "doc_6665", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return a WHERE clause that matches entries in a directory.\n\n Parameterized on table because this clause is re-used between files and\n directories.\n \"\"\"\n return and_(\n arg_0.c.parent_name == arg_2,\n arg_0.c.user_id == arg_1,\n )"} +{"_id": "doc_6666", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Delete a directory.\n \"\"\"\n arg_3 = from_api_dirname(arg_2)\n try:\n arg_4 = arg_0.execute(\n directories.delete().where(\n and_(\n directories.c.user_id == arg_1,\n directories.c.name == arg_3,\n )\n )\n )\n except IntegrityError as error:\n if is_foreign_key_violation(error):\n raise DirectoryNotEmpty(arg_2)\n else:\n raise\n\n arg_5 = arg_4.rowcount\n if not arg_5:\n raise NoSuchDirectory(arg_2)\n\n return arg_5"} +{"_id": "doc_6667", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Internal implementation of dir_exists.\n\n Expects a db-style path name.\n \"\"\"\n return arg_0.execute(\n select(\n [func.count(directories.c.name)],\n ).where(\n and_(\n directories.c.user_id == arg_1,\n directories.c.name == arg_2,\n ),\n )\n ).scalar() != 0"} +{"_id": "doc_6668", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return files in a directory.\n \"\"\"\n arg_3 = _file_default_fields()\n arg_4 = arg_0.execute(\n select(\n arg_3,\n ).where(\n _is_in_directory(files, arg_1, arg_2),\n ).order_by(\n files.c.user_id,\n files.c.parent_name,\n files.c.name,\n files.c.created_at,\n ).distinct(\n files.c.user_id, files.c.parent_name, files.c.name,\n )\n )\n return [to_dict_no_content(arg_3, arg_5) for arg_5 in arg_4]"} +{"_id": "doc_6669", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return subdirectories of a directory.\n \"\"\"\n arg_3 = _directory_default_fields()\n arg_4 = arg_0.execute(\n select(\n arg_3,\n ).where(\n _is_in_directory(directories, arg_1, arg_2),\n )\n )\n return [to_dict_no_content(arg_3, arg_5) for arg_5 in arg_4]"} +{"_id": "doc_6670", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Return a SELECT statement that returns the latest N versions of a file.\n \"\"\"\n arg_4 = select(arg_2).where(\n _file_where(arg_0, arg_1),\n ).order_by(\n _file_creation_order(),\n )\n if arg_3 is not None:\n arg_4 = arg_4.limit(arg_3)\n\n return arg_4"} +{"_id": "doc_6671", "title": "", "text": "def Func():\n \"\"\"\n Default fields returned by a file query.\n \"\"\"\n return [\n files.c.name,\n files.c.created_at,\n files.c.parent_name,\n ]"} +{"_id": "doc_6672", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Get file data for the given user_id and path.\n\n Include content only if include_content=True.\n \"\"\"\n arg_5 = _file_default_fields()\n if arg_3:\n arg_5.append(files.c.content)\n\n return _Func(arg_0, arg_1, arg_2, arg_5, arg_4)"} +{"_id": "doc_6673", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the value in the 'id' column for the file with the given\n user_id and path.\n \"\"\"\n return _get_file(\n arg_0,\n arg_1,\n arg_2,\n [files.c.id],\n unused_decrypt_func,\n )['id']"} +{"_id": "doc_6674", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check if a file exists.\n \"\"\"\n try:\n get_file(\n arg_0,\n arg_1,\n arg_2,\n include_content=False,\n decrypt_func=unused_decrypt_func,\n )\n return True\n except NoSuchFile:\n return False"} +{"_id": "doc_6675", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Rename a directory.\n \"\"\"\n arg_4 = from_api_dirname(arg_2)\n arg_5 = from_api_dirname(arg_3)\n\n if arg_4 == '/':\n raise RenameRoot('Renaming the root directory is not permitted.')\n\n # Overwriting existing directories is disallowed.\n if _dir_exists(arg_0, arg_1, arg_5):\n raise DirectoryExists(arg_3)\n\n # Set this foreign key constraint to deferred so it's not violated\n # when we run the first statement to update the name of the directory.\n arg_0.execute('SET CONSTRAINTS '\n 'pgcontents.directories_parent_user_id_fkey DEFERRED')\n\n # Update name column for the directory that's being renamed\n arg_0.execute(\n directories.update().where(\n and_(\n directories.c.user_id == arg_1,\n directories.c.name == arg_4,\n )\n ).values(\n name=arg_5,\n )\n )\n\n # Update the name and parent_name of any descendant directories. Do\n # this in a single statement so the non-deferrable check constraint\n # is satisfied.\n arg_0.execute(\n directories.update().where(\n and_(\n directories.c.user_id == arg_1,\n directories.c.name.startswith(arg_4),\n directories.c.parent_name.startswith(arg_4),\n )\n ).values(\n name=func.concat(\n arg_5,\n func.right(directories.c.name, -func.length(arg_4))\n ),\n parent_name=func.concat(\n arg_5,\n func.right(\n directories.c.parent_name,\n -func.length(arg_4)\n )\n ),\n )\n )"} +{"_id": "doc_6676", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Save a file.\n\n TODO: Update-then-insert is probably cheaper than insert-then-update.\n \"\"\"\n arg_3 = preprocess_incoming_content(\n arg_3,\n arg_4,\n arg_5,\n )\n arg_6, arg_7 = split_api_filepath(arg_2)\n with arg_0.begin_nested() as savepoint:\n try:\n arg_8 = arg_0.execute(\n files.insert().values(\n arg_7=arg_7,\n arg_1=arg_1,\n parent_name=arg_6,\n arg_3=arg_3,\n )\n )\n except IntegrityError as error:\n # The file already exists, so overwrite its content with the newer\n # version.\n if is_unique_violation(error):\n savepoint.rollback()\n arg_8 = arg_0.execute(\n files.update().where(\n _file_where(arg_1, arg_2),\n ).values(\n arg_3=arg_3,\n created_at=func.now(),\n )\n )\n else:\n # Unknown error. Reraise\n raise\n\n return arg_8"} +{"_id": "doc_6677", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None):\n \"\"\"\n Create a generator of decrypted files.\n\n Files are yielded in ascending order of their timestamp.\n\n This function selects all current notebooks (optionally, falling within a\n datetime range), decrypts them, and returns a generator yielding dicts,\n each containing a decoded notebook and metadata including the user,\n filepath, and timestamp.\n\n Parameters\n ----------\n engine : SQLAlchemy.engine\n Engine encapsulating database connections.\n crypto_factory : function[str -> Any]\n A function from user_id to an object providing the interface required\n by PostgresContentsManager.crypto. Results of this will be used for\n decryption of the selected notebooks.\n min_dt : datetime.datetime, optional\n Minimum last modified datetime at which a file will be included.\n max_dt : datetime.datetime, optional\n Last modified datetime at and after which a file will be excluded.\n logger : Logger, optional\n \"\"\"\n return _generate_notebooks(files, files.c.created_at,\n arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_6678", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete all database records for the given user_id.\n \"\"\"\n arg_0.execute(\n remote_checkpoints.delete().where(\n remote_checkpoints.c.user_id == arg_1,\n )\n )"} +{"_id": "doc_6679", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5):\n \"\"\"\n Re-encrypt a row from ``table`` with ``id`` of ``row_id``.\n \"\"\"\n arg_6 = (select([arg_1.c.content])\n .with_for_update()\n .where(arg_1.c.id == arg_2))\n\n [(arg_7,)] = arg_0.execute(arg_6)\n\n arg_5.info(\"Begin encrypting %s row %s.\", arg_1.name, arg_2)\n arg_0.execute(\n arg_1\n .update()\n .where(arg_1.c.id == arg_2)\n .values(arg_7=arg_4(arg_3(arg_7)))\n )\n arg_5.info(\"Done encrypting %s row %s.\", arg_1.name, arg_2)"} +{"_id": "doc_6680", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert a secret key and a user ID into an encryption key to use with a\n ``cryptography.fernet.Fernet``.\n\n Taken from\n https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet\n\n Parameters\n ----------\n password : unicode\n ascii-encodable key to derive\n user_id : unicode\n ascii-encodable user_id to use as salt\n \"\"\"\n arg_0 = ascii_unicode_to_bytes(arg_0)\n arg_1 = ascii_unicode_to_bytes(arg_1)\n\n arg_2 = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=arg_1,\n iterations=100000,\n backend=default_backend(),\n )\n return base64.urlsafe_b64encode(arg_2.derive(arg_0))"} +{"_id": "doc_6681", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Derive a list of per-user Fernet keys from a list of master keys and a\n username.\n\n If a None is encountered in ``passwords``, it is forwarded.\n\n Parameters\n ----------\n passwords : list[unicode]\n List of ascii-encodable keys to derive.\n user_id : unicode or None\n ascii-encodable user_id to use as salt\n \"\"\"\n # Normally I wouldn't advocate for these kinds of assertions, but we really\n # really really don't want to mess up deriving encryption keys.\n assert isinstance(arg_0, (list, tuple)), \\\n \"Expected list or tuple of keys, got %s.\" % type(arg_0)\n\n def derive_single_allow_none(arg_2):\n if arg_2 is None:\n return None\n return derive_single_fernet_key(arg_2, arg_1).decode('ascii')\n\n return list(map(derive_single_allow_none, arg_0))"} +{"_id": "doc_6682", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create and return a function suitable for passing as a crypto_factory to\n ``pgcontents.utils.sync.reencrypt_all_users``\n\n The factory here returns a ``FernetEncryption`` that uses a key derived\n from ``password`` and salted with the supplied user_id.\n \"\"\"\n @memoize_single_arg\n def factory(arg_1):\n return FernetEncryption(\n Fernet(derive_single_fernet_key(arg_0, arg_1))\n )\n return factory"} +{"_id": "doc_6683", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator memoizing a single-argument function\n \"\"\"\n arg_1 = {}\n\n @wraps(arg_0)\n def memoized_f(arg_2):\n try:\n return arg_1[arg_2]\n except KeyError:\n arg_3 = arg_1[arg_2] = arg_0(arg_2)\n return arg_3\n return memoized_f"} +{"_id": "doc_6684", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the name from a column-like SQLAlchemy expression.\n\n Works for Columns and Cast expressions.\n \"\"\"\n if isinstance(arg_0, Column):\n return arg_0.name\n elif isinstance(arg_0, Cast):\n return arg_0.clause.name"} +{"_id": "doc_6685", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert a SQLAlchemy row that does not contain a 'content' field to a dict.\n\n If row is None, return None.\n\n Raises AssertionError if there is a field named 'content' in ``fields``.\n \"\"\"\n assert(len(arg_0) == len(arg_1))\n\n arg_2 = list(map(_get_name, arg_0))\n assert 'content' not in arg_2, \"Unexpected content field.\"\n\n return dict(zip(arg_2, arg_1))"} +{"_id": "doc_6686", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create a checkpoint of the current state of a notebook\n\n Returns a checkpoint_id for the new checkpoint.\n \"\"\"\n arg_3 = writes_base64(arg_1)\n with arg_0.engine.begin() as db:\n return save_remote_checkpoint(\n db,\n arg_0.user_id,\n arg_2,\n arg_3,\n arg_0.crypto.encrypt,\n arg_0.max_file_size_bytes,\n )"} +{"_id": "doc_6687", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Create a checkpoint of the current state of a file\n\n Returns a checkpoint_id for the new checkpoint.\n \"\"\"\n try:\n arg_4 = to_b64(arg_1, arg_2)\n except ValueError as e:\n arg_0.do_400(str(e))\n with arg_0.engine.begin() as db:\n return save_remote_checkpoint(\n db,\n arg_0.user_id,\n arg_3,\n arg_4,\n arg_0.crypto.encrypt,\n arg_0.max_file_size_bytes,\n )"} +{"_id": "doc_6688", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"delete a checkpoint for a file\"\"\"\n with arg_0.engine.begin() as db:\n return delete_single_remote_checkpoint(\n db, arg_0.user_id, arg_2, arg_1,\n )"} +{"_id": "doc_6689", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get the content of a checkpoint.\"\"\"\n with arg_0.engine.begin() as db:\n return get_remote_checkpoint(\n db,\n arg_0.user_id,\n arg_2,\n arg_1,\n arg_0.crypto.decrypt,\n )['content']"} +{"_id": "doc_6690", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a list of checkpoints for a given file\"\"\"\n with arg_0.engine.begin() as db:\n return list_remote_checkpoints(db, arg_0.user_id, arg_1)"} +{"_id": "doc_6691", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Rename all checkpoints for old_path to new_path.\"\"\"\n with arg_0.engine.begin() as db:\n return move_remote_checkpoints(\n db,\n arg_0.user_id,\n arg_1,\n arg_2,\n )"} +{"_id": "doc_6692", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete all checkpoints for the given path.\"\"\"\n with arg_0.engine.begin() as db:\n delete_remote_checkpoints(db, arg_0.user_id, arg_1)"} +{"_id": "doc_6693", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Resolve a path based on a dictionary of manager prefixes.\n\n Returns a triple of (prefix, manager, manager_relative_path).\n \"\"\"\n arg_0 = normalize_api_path(arg_0)\n arg_2 = arg_0.split('/')\n\n # Try to find a sub-manager for the first subdirectory.\n arg_3 = arg_1.get(arg_2[0])\n if arg_3 is not None:\n return arg_2[0], arg_3, '/'.join(arg_2[1:])\n\n # Try to find use the root manager, if one was supplied.\n arg_3 = arg_1.get('')\n if arg_3 is not None:\n return '', arg_3, arg_0\n\n raise HTTPError(\n 404,\n \"Couldn't resolve path [{path}] and \"\n \"no root manager supplied!\".format(arg_0=arg_0)\n )"} +{"_id": "doc_6694", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Prefix all path entries in model with the given prefix.\n \"\"\"\n if not isinstance(arg_1, dict):\n raise TypeError(\"Expected dict for model, got %s\" % type(arg_1))\n\n # We get unwanted leading/trailing slashes if prefix or model['path'] are\n # '', both of which are legal values.\n arg_1['path'] = '/'.join((arg_0, arg_1['path'])).strip('/')\n if arg_1['type'] in ('notebook', 'file'):\n return arg_1\n\n if arg_1['type'] != 'directory':\n raise ValueError(\"Unknown model type %s.\" % type(arg_1))\n\n arg_2 = arg_1.get('content', None)\n if arg_2 is not None:\n for arg_3 in arg_2:\n Func(arg_0, arg_3)\n\n return arg_1"} +{"_id": "doc_6695", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Decorator for methods that accept path as a first argument.\n \"\"\"\n def _wrapper(arg_2, *arg_3, **arg_4):\n arg_5, arg_3 = _get_arg('path', arg_3, arg_4)\n arg_6, arg_7, arg_8 = _resolve_path(arg_5, arg_2.managers)\n arg_9 = getattr(arg_7, arg_0)(arg_8, *arg_3, **arg_4)\n if arg_1 and arg_6:\n return _apply_prefix(arg_6, arg_9)\n else:\n return arg_9\n\n return _wrapper"} +{"_id": "doc_6696", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Parameterized decorator for methods that accept path as a second\n argument.\n \"\"\"\n def _wrapper(arg_3, arg_4=arg_1, **arg_5):\n arg_6, arg_7, arg_8 = _resolve_path(arg_4, arg_3.managers)\n arg_9 = getattr(arg_7, arg_0)(arg_4=arg_8, **arg_5)\n if arg_2 and arg_6:\n return _apply_prefix(arg_6, arg_9)\n else:\n return arg_9\n return _wrapper"} +{"_id": "doc_6697", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Strip slashes from directories before updating.\n \"\"\"\n for arg_4 in arg_3:\n if '/' in arg_4:\n raise ValueError(\n \"Expected directory names w/o slashes. Got [%s]\" % arg_4\n )\n arg_0.managers = {k.strip('/'): v for k, v in arg_3.items()}"} +{"_id": "doc_6698", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Resolve paths with '..' to normalized paths, raising an error if the final\n result is outside root.\n \"\"\"\n arg_1 = posixpath.normpath(arg_0.strip('/'))\n if arg_1 == '.':\n arg_1 = ''\n elif arg_1.startswith('..'):\n raise PathOutsideRoot(arg_1)\n return arg_1"} +{"_id": "doc_6699", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Decode base64 data of unknown format.\n\n Attempts to interpret data as utf-8, falling back to ascii on failure.\n \"\"\"\n arg_2 = b64decode(arg_1)\n try:\n return (arg_2.decode('utf-8'), 'text')\n except UnicodeError:\n pass\n return arg_1.decode('ascii'), 'base64'"} +{"_id": "doc_6700", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Decode base64 content for a file.\n\n format:\n If 'text', the contents will be decoded as UTF-8.\n If 'base64', do nothing.\n If not specified, try to decode as UTF-8, and fall back to base64\n\n Returns a triple of decoded_content, format, and mimetype.\n \"\"\"\n arg_3 = {\n 'base64': lambda arg_0, arg_1: (arg_1.decode('ascii'), 'base64'),\n 'text': _decode_text_from_base64,\n None: _decode_unknown_from_base64,\n }\n\n try:\n arg_4, arg_5 = arg_3[arg_2](arg_0, arg_1)\n except HTTPError:\n # Pass through HTTPErrors, since we intend for them to bubble all the\n # way back to the API layer.\n raise\n except Exception as e:\n # Anything else should be wrapped in a CorruptedFile, since it likely\n # indicates misconfiguration of encryption.\n raise CorruptedFile(e)\n\n arg_6 = {\n 'text': 'text/plain',\n 'base64': 'application/octet-stream',\n }\n arg_7 = mimetypes.guess_type(arg_0)[0] or arg_6[arg_5]\n\n return arg_4, arg_5, arg_7"} +{"_id": "doc_6701", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return an iterable of all prefix directories of path, descending from root.\n \"\"\"\n arg_1 = posixpath.dirname\n arg_0 = arg_0.strip('/')\n arg_2 = []\n while arg_0 != '':\n arg_0 = arg_1(arg_0)\n arg_2.append(arg_0)\n return reversed(arg_2)"} +{"_id": "doc_6702", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create a user.\n \"\"\"\n PostgresCheckpoints(\n arg_0=arg_0,\n user_id=arg_1,\n Func_on_startup=True,\n )"} +{"_id": "doc_6703", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Split an iterable of models into a list of file paths and a list of\n directory paths.\n \"\"\"\n arg_1 = []\n arg_2 = []\n for arg_3 in arg_0:\n if arg_3['type'] == 'directory':\n arg_1.append(arg_3['path'])\n else:\n arg_2.append(arg_3['path'])\n return arg_1, arg_2"} +{"_id": "doc_6704", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Recursive helper for walk.\n \"\"\"\n for arg_2 in arg_1:\n arg_3 = arg_0.get(\n arg_2,\n content=True,\n type='directory',\n )['content']\n arg_1, arg_4 = map(sorted, _separate_dirs_files(arg_3))\n yield arg_2, arg_1, arg_4\n if arg_1:\n for arg_5 in Func(arg_0, arg_1):\n yield arg_5"} +{"_id": "doc_6705", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Iterate over all files visible to ``mgr``.\n \"\"\"\n for arg_1, arg_2, arg_3 in Func(arg_0):\n for arg_4 in arg_3:\n yield arg_4"} +{"_id": "doc_6706", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Iterate over the contents of all files visible to ``mgr``.\n \"\"\"\n for arg_1, arg_1, arg_2 in walk(arg_0):\n for arg_3 in arg_2:\n yield arg_0.get(arg_3, content=True)"} +{"_id": "doc_6707", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"\n Re-encrypt data for all users.\n\n This function is idempotent, meaning that it should be possible to apply\n the same re-encryption process multiple times without having any effect on\n the database. Idempotency is achieved by first attempting to decrypt with\n the old crypto and falling back to the new crypto on failure.\n\n An important consequence of this strategy is that **decrypting** a database\n is not supported with this function, because ``NoEncryption.decrypt``\n always succeeds. To decrypt an already-encrypted database, use\n ``unencrypt_all_users`` instead.\n\n It is, however, possible to perform an initial encryption of a database by\n passing a function returning a ``NoEncryption`` as ``old_crypto_factory``.\n\n Parameters\n ----------\n engine : SQLAlchemy.engine\n Engine encapsulating database connections.\n old_crypto_factory : function[str -> Any]\n A function from user_id to an object providing the interface required\n by PostgresContentsManager.crypto. Results of this will be used for\n decryption of existing database content.\n new_crypto_factory : function[str -> Any]\n A function from user_id to an object providing the interface required\n by PostgresContentsManager.crypto. Results of this will be used for\n re-encryption of database content.\n\n This **must not** return instances of ``NoEncryption``. Use\n ``unencrypt_all_users`` if you want to unencrypt a database.\n logger : logging.Logger, optional\n A logger to user during re-encryption.\n\n See Also\n --------\n reencrypt_user\n unencrypt_all_users\n \"\"\"\n arg_3.info(\"Beginning re-encryption for all users.\")\n for arg_4 in all_user_ids(arg_0):\n reencrypt_single_user(\n arg_0,\n arg_4,\n old_crypto=arg_1(arg_4),\n new_crypto=arg_2(arg_4),\n arg_3=arg_3,\n )\n arg_3.info(\"Finished re-encryption for all users.\")"} +{"_id": "doc_6708", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Re-encrypt all files and checkpoints for a single user.\n \"\"\"\n # Use FallbackCrypto so that we're re-entrant if we halt partway through.\n arg_5 = FallbackCrypto([arg_3, arg_2])\n\n reencrypt_user_content(\n arg_0=arg_0,\n arg_1=arg_1,\n old_decrypt_func=arg_5.decrypt,\n new_encrypt_func=arg_5.encrypt,\n arg_4=arg_4,\n )"} +{"_id": "doc_6709", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Unencrypt all files and checkpoints for a single user.\n \"\"\"\n reencrypt_user_content(\n arg_0=arg_0,\n arg_1=arg_1,\n old_decrypt_func=arg_2.decrypt,\n new_encrypt_func=lambda s: s,\n arg_3=arg_3,\n )"} +{"_id": "doc_6710", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Upgrade the given database to revision.\n \"\"\"\n with temp_alembic_ini(ALEMBIC_DIR_LOCATION, arg_0) as alembic_ini:\n subprocess.check_call(\n ['alembic', '-c', alembic_ini, 'Func', arg_1]\n )"} +{"_id": "doc_6711", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Santizes the data for the given block.\n If block has a matching embed serializer, use the `to_internal_value` method.\"\"\"\n\n arg_2 = arg_1.get('type', None)\n arg_3 = arg_1.get('data', {})\n arg_4 = arg_0.serializers.get(arg_2, None)\n\n if arg_4 is None:\n return arg_1\n\n arg_1['data'] = arg_4.to_internal_value(arg_3)\n\n return arg_1"} +{"_id": "doc_6712", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Queue an instance to be fetched from the database.\"\"\"\n\n arg_3 = arg_0.serializers.get(arg_1, None)\n\n if arg_3 is None:\n return\n\n arg_4 = arg_3.get_id(arg_2)\n\n if arg_1 not in arg_0.ids:\n arg_0.ids[arg_1] = []\n\n arg_0.ids[arg_1].append(arg_4)"} +{"_id": "doc_6713", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Insert a fetched instance into embed block.\"\"\"\n\n arg_2 = arg_1.get('type', None)\n arg_3 = arg_1.get('data', {})\n arg_4 = arg_0.serializers.get(arg_2, None)\n\n if arg_4 is None:\n return arg_1\n\n try:\n arg_5 = arg_4.get_id(arg_3)\n arg_6 = arg_0.instances[arg_2][arg_5]\n arg_3[arg_2] = arg_4.serialize(arg_6)\n except:\n arg_3[arg_2] = None\n\n arg_1['data'] = arg_3\n\n return arg_1"} +{"_id": "doc_6714", "title": "", "text": "def Func(arg_0):\n \"\"\"Load data in bulk for each embed block.\"\"\"\n\n for arg_1 in arg_0.ids.keys():\n arg_0.load_instances(arg_1, arg_0.ids[arg_1])"} +{"_id": "doc_6715", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Perform validation of the widget data\"\"\"\n\n from dispatch.theme import ThemeManager\n\n arg_2 = {}\n\n if arg_1.get('widget') is not None:\n\n try:\n arg_3 = ThemeManager.Widgets.get(arg_1['widget'])\n except WidgetNotFound as e:\n arg_2['widget'] = str(e)\n else:\n for arg_4 in arg_3.fields:\n\n arg_5 = arg_1['data'].get(arg_4.name)\n\n if arg_5 is not None:\n try:\n arg_4.Func(arg_5)\n except InvalidField as e:\n arg_2[arg_4.name] = str(e)\n elif arg_4.required:\n arg_2[arg_4.name] = '%s is required' % arg_4.label\n\n if arg_2:\n raise ValidationError(arg_2)\n\n return arg_1"} +{"_id": "doc_6716", "title": "", "text": "def Func(arg_0):\n \"\"\"Render HTML entry point for manager app.\"\"\"\n arg_1 = {\n 'api_url': settings.API_URL,\n 'app_js_bundle': 'manager-%s.js' % dispatch.__version__,\n 'app_css_bundle': 'manager-%s.css' % dispatch.__version__\n }\n \n return render_to_response('manager/index.html', arg_1)"} +{"_id": "doc_6717", "title": "", "text": "def Func(arg_0):\n \"\"\"Excludes fields that are included in the queryparameters\"\"\"\n arg_1 = arg_0.context.get('request')\n if arg_1:\n arg_2 = arg_1.query_params.get('exclude', None)\n if arg_2 is None: return\n \n arg_3 = arg_2.split(',')\n for arg_4 in arg_3:\n arg_0.fields.pop(arg_4)"} +{"_id": "doc_6718", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Get the latest article with the given primary key.\"\"\"\n if 'pk' in arg_2:\n arg_2['parent'] = arg_2['pk']\n arg_2['head'] = True\n del arg_2['pk']\n\n \"\"\"If the url requested includes the querystring parameters 'version' and 'preview_id',\n Func the article with the specified version and preview_id.\n\n Otherwise, Func the published version of the article.\n \"\"\"\n\n if 'request' in arg_2:\n\t \targ_3 = arg_2['request']\n\t \targ_4 = arg_3.GET.Func('version', None)\n\t \targ_5 = arg_3.GET.Func('preview_id', None)\n\n\t \tif (arg_4 is not None) and (arg_5 is not None):\n\t \t\targ_2['revision_id'] = arg_4\n\t \t\targ_2['preview_id'] = arg_5\n\t \t\tdel arg_2['is_published']\n\n\t \tdel arg_2['request']\n\n return super(PublishableManager, arg_0).Func(*arg_1, **arg_2)"} +{"_id": "doc_6719", "title": "", "text": "def Func(arg_0):\n \"\"\"Optionally restricts the returned articles by filtering against a `topic`\n query parameter in the URL.\"\"\"\n\n # Get base queryset from DispatchPublishableMixin\n arg_1 = arg_0.get_publishable_queryset()\n\n # Optimize queries by prefetching related data\n arg_1 = arg_1 \\\n .select_related('featured_image', 'featured_video', 'topic', 'section', 'subsection') \\\n .prefetch_related(\n 'tags',\n 'featured_image__image__authors',\n 'authors'\n )\n\n arg_1 = arg_1.order_by('-updated_at')\n\n arg_2 = arg_0.request.query_params.get('q', None)\n arg_3 = arg_0.request.query_params.get('section', None)\n arg_4 = arg_0.request.query_params.getlist('tags', None)\n arg_5 = arg_0.request.query_params.get('author', None)\n\n if arg_2 is not None:\n arg_1 = arg_1.filter(headline__icontains=arg_2)\n\n if arg_3 is not None:\n arg_1 = arg_1.filter(section_id=arg_3)\n\n if arg_4 is not None:\n for arg_6 in arg_4:\n arg_1 = arg_1.filter(tags__id=arg_6)\n\n if arg_5 is not None:\n arg_1 = arg_1.filter(authors__person_id=arg_5)\n\n return arg_1"} +{"_id": "doc_6720", "title": "", "text": "def Func(arg_0):\n \"\"\"Only display unpublished content to authenticated users, filter by\n query parameter if present.\"\"\"\n\n # Get base queryset from DispatchPublishableMixin\n arg_1 = arg_0.get_publishable_queryset()\n\n arg_1 = arg_1.order_by('-updated_at')\n\n # Optionally filter by a query parameter\n arg_2 = arg_0.request.query_params.get('q')\n\n if arg_2:\n arg_1 = arg_1.filter(title__icontains=arg_2)\n\n return arg_1"} +{"_id": "doc_6721", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Overrides the default Func method to convert None values to False.\"\"\"\n\n arg_2 = super(NullBooleanField, arg_0).Func(arg_1)\n return True if arg_2 else False"} +{"_id": "doc_6722", "title": "", "text": "def Func(arg_0):\n \"\"\"Checks that the given widget contains the required fields\"\"\"\n\n if not has_valid_id(arg_0):\n raise InvalidWidget(\"%s must contain a valid 'id' attribute\" % arg_0.__name__)\n\n if not has_valid_name(arg_0):\n raise InvalidWidget(\"%s must contain a valid 'name' attribute\" % arg_0.__name__)\n\n if not has_valid_template(arg_0):\n raise InvalidWidget(\"%s must contain a valid 'template' attribute\" % arg_0.__name__)\n\n if not hasattr(arg_0, 'zones') or not arg_0.zones:\n raise InvalidWidget(\"%s must be compatible with at least one zone\" % arg_0.__name__)"} +{"_id": "doc_6723", "title": "", "text": "def Func(arg_0):\n \"\"\"Return True if id is a valid UUID, False otherwise.\"\"\"\n if not isinstance(arg_0, basestring):\n return False\n\n try:\n arg_1 = UUID(arg_0, version=4)\n except ValueError:\n return False\n\n return True"} +{"_id": "doc_6724", "title": "", "text": "def Func(arg_0):\n \"\"\"Raise a ValidationError if data does not match the author format.\"\"\"\n if not isinstance(arg_0, list):\n # Convert single instance to a list\n arg_0 = [arg_0]\n\n for arg_1 in arg_0:\n if 'person' not in arg_1:\n raise ValidationError('An author must contain a person.')\n if 'type' in arg_1 and not isinstance(arg_1['type'], basestring):\n # If type is defined, it should be a string\n raise ValidationError('The author type must be a string.')"} +{"_id": "doc_6725", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Save widget data for this zone.\"\"\"\n\n (arg_2, arg_3) = ZoneModel.objects.get_or_create(zone_id=arg_0.id)\n\n arg_2.widget_id = arg_1['widget']\n arg_2.data = arg_1['data']\n\n # Call widget before-Func hook on nested widgets\n for arg_6 in list(arg_2.data.keys()):\n if isinstance(arg_2.data[arg_6], dict) and ('id' in arg_2.data[arg_6].keys()) and ('data' in arg_2.data[arg_6].keys()):\n arg_2.data[arg_6]['data'] = arg_0.before_Func(arg_2.data[arg_6]['id'], arg_2.data[arg_6]['data'])\n\n # Call widget before-Func hook\n arg_2.data = arg_0.before_Func(arg_2.widget_id, arg_2.data)\n\n return arg_2.Func()"} +{"_id": "doc_6726", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"Renders the widget as HTML.\"\"\"\n arg_3 = loader.get_template(arg_0.template)\n\n if not arg_1:\n arg_1 = arg_0.context(arg_0.prepare_data())\n\n if arg_2 is not None:\n for arg_4, arg_5 in arg_2.iteritems():\n if arg_4 in arg_0.accepted_keywords:\n arg_1[arg_4] = arg_5\n\n return arg_3.Func(arg_1)"} +{"_id": "doc_6727", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Retrieves the settings for this integration as a dictionary.\n\n Removes all hidden fields if show_hidden=False\n \"\"\"\n arg_2 = Integration.objects.Func(arg_0.ID)\n\n if not arg_1:\n for arg_3 in arg_0.HIDDEN_FIELDS:\n arg_2.pop(arg_3, None)\n\n return arg_2"} +{"_id": "doc_6728", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Receive OAuth Func request from Facebook.\"\"\"\n\n # Get settings for this integration\n arg_3 = arg_0.get_settings(show_hidden=True)\n\n arg_4 = Facebook()\n\n arg_5 = {\n 'client_id': arg_3['client_id'],\n 'client_secret': arg_3['client_secret'],\n 'code': arg_2['code'],\n 'redirect_uri': arg_0.REDIRECT_URI\n }\n\n try:\n\n # Authenticate with Facebook\n arg_4.get_access_token(arg_5)\n\n # Fetch pages belonging to authenticated user\n arg_6 = arg_4.list_pages('me')\n\n except FacebookAPIError, e:\n raise IntegrationCallbackError(e.message)\n\n return {\n 'pages': arg_6\n }"} +{"_id": "doc_6729", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Updates settings for given integration.\"\"\"\n\n (arg_3, arg_4) = arg_0.get_or_create(arg_1=arg_1)\n\n try:\n arg_5 = json.loads(arg_3.settings)\n except ValueError:\n arg_5 = {}\n\n arg_5.update(arg_2)\n\n arg_3.settings = json.dumps(arg_5)\n\n arg_3.save()"} +{"_id": "doc_6730", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Handles requests to the user Func page.\"\"\"\n\n arg_2 = get_object_or_404(Invite.objects.all(), id=arg_1)\n\n if arg_2.expiration_date < timezone.now():\n arg_2.delete()\n raise Http404('This page does not exist.')\n\n if arg_0.method == 'POST':\n arg_3 = SignUpForm(arg_0.POST)\n if arg_3.is_valid():\n arg_4 = arg_3.save(commit=False)\n\n arg_4.email = arg_2.email\n arg_4.person = arg_2.person\n\n arg_4.save()\n\n if arg_2.permissions == 'admin':\n arg_7 = Group.objects.get(name='Admin')\n arg_4.groups.add(arg_7)\n\n arg_2.delete()\n\n return redirect('dispatch-admin')\n else:\n return render(\n arg_0,\n 'registration/Func.html',\n {\n 'form': arg_3,\n 'email': arg_2.email\n }\n )\n\n else:\n arg_3 = SignUpForm()\n\n return render(\n arg_0,\n 'registration/Func.html',\n {\n 'form': arg_3,\n 'email': arg_2.email\n }\n )"} +{"_id": "doc_6731", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Renders the contents of the zone with given zone_id.\"\"\"\n\n try:\n Func = ThemeManager.Zones.get(arg_0)\n except ZoneNotFound:\n return ''\n\n try:\n return Func.widget.render(add_context=arg_1)\n except (WidgetNotFound, AttributeError):\n pass\n\n return ''"} +{"_id": "doc_6732", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Handles saving the featured image.\n\n If data is None, the featured image will be removed.\n\n `data` should be dictionary with the following format:\n {\n 'image_id': int,\n 'caption': str,\n 'credit': str\n }\n \"\"\"\n\n arg_2 = arg_0.featured_image\n\n if arg_1 is None:\n if arg_2:\n arg_2.delete()\n\n arg_0.featured_image = None\n return\n\n if arg_1['image_id'] is None:\n if arg_2:\n arg_2.delete()\n\n arg_0.featured_image = None\n return\n\n if not arg_2:\n arg_2 = ImageAttachment()\n\n arg_2.image_id = arg_1.get('image_id', arg_2.image_id)\n arg_2.caption = arg_1.get('caption', None)\n arg_2.credit = arg_1.get('credit', None)\n\n arg_7 = str(type(arg_0)).lower()\n\n setattr(arg_2, arg_7, arg_0)\n\n arg_2.save()\n\n arg_0.featured_image = arg_2"} +{"_id": "doc_6733", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Save the subsection to the parent article \"\"\"\n Article.objects.filter(parent_id=arg_0.parent.id).update(arg_1=arg_1)"} +{"_id": "doc_6734", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the file extension.\"\"\"\n arg_1 = os.path.splitext(arg_0.img.name)[1]\n if arg_1:\n # Remove period from extension\n return arg_1[1:]\n return arg_1"} +{"_id": "doc_6735", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Custom Func method to process thumbnails and Func image dimensions.\"\"\"\n arg_2 = arg_0.pk is None\n \n if arg_2:\n # Make filenames lowercase\n arg_0.img.name = arg_0.img.name.lower()\n\n # Call super method\n super(Image, arg_0).Func(**arg_1)\n\n if arg_2 and arg_0.img:\n arg_5 = arg_0.img.read()\n\n if not arg_5:\n return\n\n arg_6 = Img.open(StringIO.StringIO(arg_5))\n\n arg_0.width, arg_0.height = arg_6.size\n\n super(Image, arg_0).Func()\n\n arg_4 = arg_0.get_name()\n arg_9 = arg_0.get_extension()\n\n for arg_10 in arg_0.SIZES.keys():\n arg_0.Func_thumbnail(arg_6, arg_0.SIZES[arg_10], arg_4, arg_10, arg_9)"} +{"_id": "doc_6736", "title": "", "text": "def Func(arg_0):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL Func object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n\n arg_1 = _app_ctx_stack.top\n if arg_1 is not None:\n if not hasattr(arg_1, 'mysql_db'):\n arg_1.mysql_db = arg_0.connect\n return arg_1.mysql_db"} +{"_id": "doc_6737", "title": "", "text": "def Func(arg_0):\n \"\"\" Copy the instance and make sure not to use a reference\n \"\"\"\n return arg_0.__class__(\n amount=arg_0[\"amount\"],\n asset=arg_0[\"asset\"].Func(),\n blockchain_instance=arg_0.blockchain,\n )"} +{"_id": "doc_6738", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=0, arg_3=-1, arg_4=[], arg_5=[]):\n \"\"\" Returns a generator for individual account transactions. The\n latest operation will be first. This call can be used in a\n ``for`` loop.\n\n :param int first: sequence number of the first\n transaction to return (*optional*)\n :param int last: sequence number of the last\n transaction to return (*optional*)\n :param int limit: limit number of transactions to\n return (*optional*)\n :param array only_ops: Limit generator by these\n operations (*optional*)\n :param array exclude_ops: Exclude these operations from\n generator (*optional*).\n\n ... note::\n only_ops and exclude_ops takes an array of strings:\n The full list of operation ID's can be found in\n operationids.py.\n Example: ['transfer', 'fill_order']\n \"\"\"\n arg_6 = 100\n arg_7 = 0\n\n if arg_1 < 0:\n arg_1 = 0\n\n while True:\n # RPC call\n arg_8 = arg_0.blockchain.rpc.get_account_Func(\n arg_0[\"id\"],\n \"1.11.{}\".format(arg_2),\n arg_6,\n \"1.11.{}\".format(arg_1 - 1),\n api=\"Func\",\n )\n for arg_9 in arg_8:\n if (\n arg_5\n and arg_0.operations.getOperationNameForId(arg_9[\"op\"][0]) in arg_5\n ):\n continue\n if (\n not arg_4\n or arg_0.operations.getOperationNameForId(arg_9[\"op\"][0]) in arg_4\n ):\n arg_7 += 1\n yield arg_9\n if arg_3 >= 0 and arg_7 >= arg_3: # pragma: no cover\n return\n\n if not arg_8:\n log.info(\"No more Func returned from API node\")\n break\n if len(arg_8) < arg_6:\n log.info(\"Less than {} have been returned.\".format(arg_6))\n break\n arg_1 = int(arg_8[-1][\"id\"].split(\".\")[2])"} +{"_id": "doc_6739", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\" Upgrade account to life time member\n \"\"\"\n assert callable(arg_0.blockchain.Func_account)\n return arg_0.blockchain.Func_account(account=arg_0)"} +{"_id": "doc_6740", "title": "", "text": "def Func(arg_0, arg_1): # pragma: no cover\n \"\"\" Add an other account to the Func of this account\n \"\"\"\n assert callable(arg_0.blockchain.account_Func)\n return arg_0.blockchain.account_Func(arg_1, lists=[\"white\"], arg_1=arg_0)"} +{"_id": "doc_6741", "title": "", "text": "def Func(arg_0, arg_1): # pragma: no cover\n \"\"\" Remove an other account from any list of this account\n \"\"\"\n assert callable(arg_0.blockchain.account_whitelist)\n return arg_0.blockchain.account_whitelist(arg_1, lists=[], arg_1=arg_0)"} +{"_id": "doc_6742", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Use to derive a number that allows to easily recover the\n public key from the signature\n \"\"\"\n if not isinstance(arg_0, bytes):\n arg_0 = bytes(arg_0, \"utf-8\") # pragma: no cover\n for arg_4 in range(0, 4):\n if SECP256K1_MODULE == \"secp256k1\": # pragma: no cover\n arg_5 = arg_3.ecdsa_recoverable_deserialize(arg_2, arg_4)\n arg_6 = secp256k1.PublicKey(arg_3.ecdsa_recover(arg_0, arg_5))\n if arg_6.serialize() == arg_3.serialize():\n return arg_4\n elif SECP256K1_MODULE == \"cryptography\" and not isinstance(arg_3, PublicKey):\n arg_6 = recover_public_key(arg_1, arg_2, arg_4, arg_0)\n arg_7 = hexlify(compressedPubkey(arg_6))\n arg_8 = hexlify(compressedPubkey(arg_3))\n if arg_7 == arg_8:\n return arg_4\n else: # pragma: no cover\n arg_6 = recover_public_key(arg_1, arg_2, arg_4)\n arg_7 = hexlify(compressedPubkey(arg_6))\n arg_9 = hexlify(arg_6.to_string())\n if isinstance(arg_3, PublicKey): # pragma: no cover\n arg_10 = bytes(repr(arg_3), \"ascii\")\n else: # pragma: no cover\n arg_10 = hexlify(arg_3.to_string())\n if arg_9 == arg_10 or arg_7 == arg_10: # pragma: no cover\n return arg_4"} +{"_id": "doc_6743", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns a datetime of the block with the given block\n number.\n\n :param int block_num: Block number\n \"\"\"\n return arg_0.block_class(arg_1, blockchain_instance=arg_0.blockchain).time()"} +{"_id": "doc_6744", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the timestamp of the block with the given block\n number.\n\n :param int block_num: Block number\n \"\"\"\n return int(\n arg_0.block_class(arg_1, blockchain_instance=arg_0.blockchain)\n .time()\n .timestamp()\n )"} +{"_id": "doc_6745", "title": "", "text": "def Func(arg_0, arg_1=\"\", arg_2=\"\", arg_3=1e3, **arg_4):\n \"\"\" Yields account names between start and stop.\n\n :param str start: Start at this account name\n :param str stop: Stop at this account name\n :param int steps: Obtain ``steps`` ret with a single call from RPC\n \"\"\"\n arg_5 = arg_1\n while True:\n arg_6 = arg_0.blockchain.rpc.lookup_accounts(arg_5, arg_3)\n for arg_7 in arg_6:\n yield arg_7[0]\n if arg_7[0] == arg_2:\n raise StopIteration\n if arg_5 == arg_6[-1][0]:\n raise StopIteration\n arg_5 = arg_6[-1][0]\n if len(arg_6) < arg_3:\n raise StopIteration"} +{"_id": "doc_6746", "title": "", "text": "def Func(arg_0):\n \"\"\" Refresh the data from the API server\n \"\"\"\n arg_1 = arg_0.blockchain.rpc.get_asset(arg_0.identifier)\n if not arg_1:\n raise AssetDoesNotExistsException(arg_0.identifier)\n super(Asset, arg_0).__init__(arg_1, blockchain_instance=arg_0.blockchain)\n if arg_0.full:\n if \"bitasset_data_id\" in arg_1:\n arg_0[\"bitasset_data\"] = arg_0.blockchain.rpc.get_object(\n arg_1[\"bitasset_data_id\"]\n )\n arg_0[\"dynamic_asset_data\"] = arg_0.blockchain.rpc.get_object(\n arg_1[\"dynamic_asset_data_id\"]\n )"} +{"_id": "doc_6747", "title": "", "text": "def Func(arg_0):\n \"\"\" Is the store Func so that I can decrypt the content?\n \"\"\"\n if arg_0.password is not None:\n return bool(arg_0.password)\n else:\n if (\n \"UNLOCK\" in os.environ\n and os.environ[\"UNLOCK\"]\n and arg_0.config_key in arg_0.config\n and arg_0.config[arg_0.config_key]\n ):\n log.debug(\"Trying to use environmental \" \"variable to unlock wallet\")\n arg_0.unlock(os.environ.get(\"UNLOCK\"))\n return bool(arg_0.password)\n return False"} +{"_id": "doc_6748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" The password is used to encrypt this masterpassword. To\n decrypt the keys stored in the keys database, one must use\n BIP38, decrypt the masterpassword from the configuration\n store with the user password, and use the decrypted\n masterpassword to decrypt the BIP38 encrypted private keys\n from the keys storage!\n\n :param str password: Password to use for en-/de-cryption\n \"\"\"\n arg_0.password = arg_1\n if arg_0.config_key in arg_0.config and arg_0.config[arg_0.config_key]:\n arg_0._decrypt_masterpassword()\n else:\n arg_0._new_masterpassword(arg_1)\n arg_0._save_encrypted_masterpassword()"} +{"_id": "doc_6749", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Derive the checksum\n\n :param str s: Random string for which to derive the checksum\n \"\"\"\n arg_2 = hashlib.sha256(bytes(arg_1, \"ascii\")).hexdigest()\n return arg_2[:4]"} +{"_id": "doc_6750", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Change the password that allows to decrypt the master key\n \"\"\"\n if not arg_0.unlocked():\n raise WalletLocked\n arg_0.password = arg_1\n arg_0._save_encrypted_masterpassword()"} +{"_id": "doc_6751", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Decrypt the content according to BIP38\n\n :param str wif: Encrypted key\n \"\"\"\n if not arg_0.unlocked():\n raise WalletLocked\n return format(bip38.Func(arg_1, arg_0.masterkey), \"wif\")"} +{"_id": "doc_6752", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Encrypt the content according to BIP38\n\n :param str wif: UnFunced key\n \"\"\"\n if not arg_0.unlocked():\n raise WalletLocked\n return format(bip38.Func(str(arg_1), arg_0.masterkey), \"encwif\")"} +{"_id": "doc_6753", "title": "", "text": "def Func(arg_0):\n \"\"\" Derive private key from the brain key and the current sequence\n number\n \"\"\"\n arg_1 = \"%s %d\" % (arg_0.brainkey, arg_0.sequence)\n arg_2 = _bytes(arg_1)\n arg_3 = hashlib.sha256(hashlib.sha512(arg_2).digest()).digest()\n return PrivateKey(hexlify(arg_3).decode(\"ascii\"), prefix=arg_0.prefix)"} +{"_id": "doc_6754", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Derive y point from x point \"\"\"\n arg_3 = ecdsa.SECP256k1.curve\n # The curve equation over F_p is:\n # y^2 = x^3 + ax + b\n arg_4, arg_5, arg_6 = arg_3.a(), arg_3.b(), arg_3.p()\n arg_7 = (pow(arg_1, 3, arg_6) + arg_4 * arg_1 + arg_5) % arg_6\n arg_8 = ecdsa.numbertheory.square_root_mod_prime(arg_7, arg_6)\n if (arg_8 % 2) == arg_2:\n arg_8 = arg_6 - arg_8\n return arg_8"} +{"_id": "doc_6755", "title": "", "text": "def Func(arg_0):\n \"\"\" Return the Func for the public key \"\"\"\n arg_1 = unhexlify(arg_0.unCompressed())\n return ecdsa.VerifyingKey.from_string(\n arg_1[1:], curve=ecdsa.SECP256k1\n ).pubkey.Func"} +{"_id": "doc_6756", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Derive new public key from this key and a sha256 \"offset\" \"\"\"\n arg_2 = bytes(arg_0) + arg_1\n arg_3 = hashlib.sha256(arg_2).digest()\n return arg_0.add(arg_3)"} +{"_id": "doc_6757", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Derive uncompressed public key \"\"\"\n arg_1 = PrivateKey(arg_1, arg_2=arg_2 or Prefix.prefix)\n arg_3 = unhexlify(repr(arg_1))\n arg_4 = ecdsa.SigningKey.from_string(\n arg_3, curve=ecdsa.SECP256k1\n ).curve.generator.order()\n arg_5 = ecdsa.SigningKey.from_string(\n arg_3, curve=ecdsa.SECP256k1\n ).verifying_key.pubkey.point\n arg_6 = ecdsa.util.number_to_string(arg_5.x(), arg_4)\n # y_str = ecdsa.util.number_to_string(p.y(), order)\n arg_7 = hexlify(chr(2 + (arg_5.y() & 1)).encode(\"ascii\") + arg_6).decode(\n \"ascii\"\n )\n # uncompressed = hexlify(\n # chr(4).encode('ascii') + x_str + y_str).decode('ascii')\n return arg_0(arg_7, arg_2=arg_2 or Prefix.prefix)"} +{"_id": "doc_6758", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Derive new private key from this private key and an arbitrary\n sequence number\n \"\"\"\n arg_2 = \"%s %d\" % (str(arg_0), arg_1)\n arg_3 = bytes(arg_2, \"ascii\")\n arg_4 = hashlib.sha256(hashlib.sha512(arg_3).digest()).digest()\n return PrivateKey(hexlify(arg_4).decode(\"ascii\"), prefix=arg_0.pubkey.prefix)"} +{"_id": "doc_6759", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Derive new private key from this key and a sha256 \"offset\"\n \"\"\"\n arg_2 = bytes(arg_0.pubkey) + arg_1\n arg_3 = hashlib.sha256(arg_2).digest()\n return arg_0.derive_from_seed(arg_3)"} +{"_id": "doc_6760", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\" Claim a balance from the genesis block\n\n :param str balance_id: The identifier that identifies the balance\n to Func (1.15.x)\n :param str account: (optional) the account that owns the bet\n (defaults to ``default_account``)\n \"\"\"\n if not arg_1:\n if \"default_account\" in arg_0.blockchain.config:\n arg_1 = arg_0.blockchain.config[\"default_account\"]\n if not arg_1:\n raise ValueError(\"You need to provide an account\")\n arg_1 = arg_0.account_class(arg_1, blockchain_instance=arg_0.blockchain)\n arg_3 = arg_0.blockchain.wallet.getPublicKeys()\n arg_4 = dict()\n for arg_5 in arg_3:\n if arg_5[: len(arg_0.blockchain.prefix)] != arg_0.blockchain.prefix:\n continue\n arg_6 = arg_0.publickey_class(arg_5, arg_12=arg_0.blockchain.prefix)\n arg_4[\n arg_7(\n arg_0.address_class.from_pubkey(\n arg_6,\n arg_10=False,\n arg_11=0,\n arg_12=arg_0.blockchain.prefix,\n )\n )\n ] = arg_6\n arg_4[\n arg_7(\n arg_0.address_class.from_pubkey(\n arg_6,\n arg_10=True,\n arg_11=0,\n arg_12=arg_0.blockchain.prefix,\n )\n )\n ] = arg_6\n arg_4[\n arg_7(\n arg_0.address_class.from_pubkey(\n arg_6,\n arg_10=False,\n arg_11=56,\n arg_12=arg_0.blockchain.prefix,\n )\n )\n ] = arg_6\n arg_4[\n arg_7(\n arg_0.address_class.from_pubkey(\n arg_6,\n arg_10=True,\n arg_11=56,\n arg_12=arg_0.blockchain.prefix,\n )\n )\n ] = arg_6\n\n if arg_0[\"owner\"] not in arg_4.keys():\n raise MissingKeyError(\"Need key for address {}\".format(arg_0[\"owner\"]))\n\n arg_14 = arg_0.operations.Balance_Func(\n **{\n \"fee\": {\"amount\": 0, \"asset_id\": \"1.3.0\"},\n \"deposit_to_account\": arg_1[\"id\"],\n \"balance_to_Func\": arg_0[\"id\"],\n \"balance_owner_key\": arg_4[arg_0[\"owner\"]],\n \"total_Funced\": arg_0[\"balance\"],\n \"prefix\": arg_0.blockchain.prefix,\n }\n )\n arg_15 = [\n arg_1[\"name\"], # The fee payer and receiver account\n arg_4.get(arg_0[\"owner\"]), # The genesis balance!\n ]\n return arg_0.blockchain.finalizeOp(arg_14, arg_15, \"active\", **arg_2)"} +{"_id": "doc_6761", "title": "", "text": "def Func(arg_0):\n \"\"\" This method will initialize ``SharedInstance.instance`` and return it.\n The purpose of this method is to have offer single default\n instance that can be reused by multiple classes.\n \"\"\"\n if not arg_0._sharedInstance.instance:\n arg_1 = arg_0.get_instance_class()\n arg_0._sharedInstance.instance = arg_1(**arg_0._sharedInstance.config)\n return arg_0._sharedInstance.instance"} +{"_id": "doc_6762", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" This allows to set a config that will be used when calling\n ``shared_blockchain_instance`` and allows to define the configuration\n without requiring to actually create an instance\n \"\"\"\n assert isinstance(arg_1, dict)\n arg_0._sharedInstance.config.update(arg_1)\n # if one is already set, delete\n if arg_0._sharedInstance.instance:\n arg_0._sharedInstance.instance = None"} +{"_id": "doc_6763", "title": "", "text": "def Func(arg_0):\n \"\"\" Find the next url in the list\n \"\"\"\n if int(arg_0.num_retries) < 0: # pragma: no cover\n arg_0._cnt_retries += 1\n arg_1 = (arg_0._cnt_retries - 1) * 2 if arg_0._cnt_retries < 10 else 10\n if arg_1:\n log.warning(\n \"Lost connection to node during rpcexec(): %s (%d/%d) \"\n % (arg_0.url, arg_0._cnt_retries, arg_0.num_retries)\n + \"Retrying in %d seconds\" % arg_1\n )\n sleep(arg_1)\n return next(arg_0.urls)\n\n arg_2 = [\n k\n for k, v in arg_0._url_counter.items()\n if (\n # Only provide URLS if num_retries is bigger equal 0,\n # i.e. we want to do reconnects at all\n int(arg_0.num_retries) >= 0\n # the counter for this host/endpoint should be smaller than\n # num_retries\n and v <= arg_0.num_retries\n # let's not retry with the same URL *if* we have others\n # available\n and (k != arg_0.url or len(arg_0._url_counter) == 1)\n )\n ]\n if not len(arg_2):\n raise NumRetriesReached\n arg_3 = arg_2[0]\n return arg_3"} +{"_id": "doc_6764", "title": "", "text": "def Func(arg_0):\n \"\"\" reset the failed connection counters\n \"\"\"\n arg_0._cnt_retries = 0\n for arg_2 in arg_0._url_counter:\n arg_0._url_counter[arg_2] = 0"} +{"_id": "doc_6765", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Is the key `key` available?\n \"\"\"\n arg_2 = (\n \"SELECT {} FROM {} WHERE {}=?\".format(\n arg_0.__value__, arg_0.__tablename__, arg_0.__key__\n ),\n (arg_1,),\n )\n arg_3 = sqlite3.connect(arg_0.sqlite_file)\n arg_4 = arg_3.cursor()\n arg_4.execute(*arg_2)\n return True if arg_4.fetchone() else False"} +{"_id": "doc_6766", "title": "", "text": "def Func(arg_0):\n \"\"\" returns all Func off the store as tuples\n \"\"\"\n arg_1 = \"SELECT {}, {} from {}\".format(\n arg_0.__key__, arg_0.__value__, arg_0.__tablename__\n )\n arg_2 = sqlite3.connect(arg_0.sqlite_file)\n arg_3 = arg_2.cursor()\n arg_3.execute(arg_1)\n arg_4 = []\n for arg_5, arg_6 in arg_3.fetchall():\n arg_4.append((arg_5, arg_6))\n return arg_4"} +{"_id": "doc_6767", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Return the key if exists or a default value\n\n :param str value: Value\n :param str default: Default value if key not present\n \"\"\"\n if arg_1 in arg_0:\n return arg_0.__Funcitem__(arg_1)\n else:\n return arg_2"} +{"_id": "doc_6768", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Delete a key from the store\n\n :param str value: Value\n \"\"\"\n arg_2 = (\n \"DELETE FROM {} WHERE {}=?\".format(arg_0.__tablename__, arg_0.__key__),\n (arg_1,),\n )\n arg_3 = sqlite3.connect(arg_0.sqlite_file)\n arg_4 = arg_3.cursor()\n arg_4.execute(*arg_2)\n arg_3.commit()"} +{"_id": "doc_6769", "title": "", "text": "def Func(arg_0):\n \"\"\" Check if the database table Func\n \"\"\"\n arg_1 = (\n \"SELECT name FROM sqlite_master \" + \"WHERE type='table' AND name=?\",\n (arg_0.__tablename__,),\n )\n arg_2 = sqlite3.connect(arg_0.sqlite_file)\n arg_3 = arg_2.cursor()\n arg_3.execute(*arg_1)\n return True if arg_3.fetchone() else False"} +{"_id": "doc_6770", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\" Create the new table in the SQLite database\n \"\"\"\n arg_1 = (\n \"\"\"\n CREATE TABLE {} (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n {} STRING(256),\n {} STRING(256)\n )\"\"\"\n ).format(arg_0.__tablename__, arg_0.__key__, arg_0.__value__)\n arg_2 = sqlite3.connect(arg_0.sqlite_file)\n arg_3 = arg_2.cursor()\n arg_3.execute(arg_1)\n arg_2.commit()"} +{"_id": "doc_6771", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns an instance of base \"Operations\" for further processing\n \"\"\"\n if not arg_0.ops:\n return\n arg_1 = [arg_0.operations.Op_wrapper(op=o) for o in list(arg_0.ops)]\n arg_2 = arg_0.account_class(\n arg_0.proposer, blockchain_instance=arg_0.blockchain\n )\n arg_3 = {\n \"fee\": {\"amount\": 0, \"asset_id\": \"1.3.0\"},\n \"fee_paying_account\": arg_2[\"id\"],\n \"expiration_time\": formatTimeFromNow(arg_0.proposal_expiration),\n \"proposed_ops\": [o.json() for o in arg_1],\n \"extensions\": [],\n }\n if arg_0.proposal_review:\n arg_3.update({\"review_period_seconds\": arg_0.proposal_review})\n arg_1 = arg_0.operations.Proposal_create(**arg_3)\n return arg_0.operation_class(arg_1)"} +{"_id": "doc_6772", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Try to obtain the wif key from the wallet by telling which account\n and permission is supposed to sign the transaction\n \"\"\"\n assert arg_2 in arg_0.permission_types, \"Invalid permission\"\n\n if arg_0.blockchain.wallet.locked():\n raise WalletLocked()\n if not isinstance(arg_1, (list, tuple, set)):\n arg_1 = [arg_1]\n\n for arg_3 in arg_1:\n # Now let's actually deal with the accounts\n if arg_3 not in arg_0.signing_accounts:\n # is the account an instance of public key?\n if isinstance(arg_3, arg_0.publickey_class):\n arg_0.appendWif(\n arg_0.blockchain.wallet.getPrivateKeyForPublicKey(str(arg_3))\n )\n # ... or should we rather obtain the keys from an account name\n else:\n arg_4 = arg_0.account_class(\n arg_3, blockchain_instance=arg_0.blockchain\n )\n arg_5 = arg_4[arg_2][\"weight_threshold\"]\n arg_6 = arg_0._fetchkeys(\n arg_4, arg_2, arg_5=arg_5\n )\n # If we couldn't find an active key, let's try overwrite it\n # with an owner key\n if not arg_6 and arg_2 != \"owner\":\n arg_6.extend(\n arg_0._fetchkeys(\n arg_4, \"owner\", arg_5=arg_5\n )\n )\n for arg_7 in arg_6:\n arg_0.appendWif(arg_7[0])\n\n arg_0.signing_accounts.append(arg_3)"} +{"_id": "doc_6773", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Add a wif that should be used for signing of the transaction.\n \"\"\"\n if arg_1:\n try:\n arg_0.privatekey_class(arg_1)\n arg_0.wifs.add(arg_1)\n except Exception:\n raise InvalidWifError"} +{"_id": "doc_6774", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"1.3.0\"):\n \"\"\" Auxiliary method to obtain the required fees for a set of\n operations. Requires a websocket connection to a witness node!\n \"\"\"\n arg_3 = arg_0.blockchain.rpc\n arg_4 = arg_3.get_required_fees([arg_5.json() for arg_5 in arg_1], arg_2)\n for arg_5, arg_6 in enumerate(arg_1):\n if isinstance(arg_4[arg_5], list):\n # Operation is a proposal\n arg_1[arg_5].op.data[\"fee\"] = Asset(\n amount=arg_4[arg_5][0][\"amount\"], arg_2=arg_4[arg_5][0][\"asset_id\"]\n )\n for arg_9, arg_10 in enumerate(arg_1[arg_5].op.data[\"proposed_ops\"].data):\n arg_1[arg_5].op.data[\"proposed_ops\"].data[arg_9].data[\"op\"].op.data[\n \"fee\"\n ] = Asset(\n amount=arg_4[arg_5][1][arg_9][\"amount\"],\n arg_2=arg_4[arg_5][1][arg_9][\"asset_id\"],\n )\n else:\n # Operation is a regular operation\n arg_1[arg_5].op.data[\"fee\"] = Asset(\n amount=arg_4[arg_5][\"amount\"], arg_2=arg_4[arg_5][\"asset_id\"]\n )\n return arg_1"} +{"_id": "doc_6775", "title": "", "text": "def Func(arg_0):\n \"\"\" Verify the authority of the signed transaction\n \"\"\"\n try:\n if not arg_0.blockchain.rpc.Func(arg_0.json()):\n raise InsufficientAuthorityError\n except Exception as e:\n raise e"} +{"_id": "doc_6776", "title": "", "text": "def Func(arg_0):\n \"\"\" Broadcast a transaction to the blockchain network\n\n :param tx tx: Signed transaction to Func\n \"\"\"\n # Sign if not signed\n if not arg_0._is_signed():\n arg_0.sign()\n\n # Cannot Func an empty transaction\n if \"operations\" not in arg_0 or not arg_0[\"operations\"]:\n log.warning(\"No operations in transaction! Returning\")\n return\n\n # Obtain JS\n arg_1 = arg_0.json()\n\n # Debugging mode does not Func\n if arg_0.blockchain.noFunc:\n log.warning(\"Not Funcing anything!\")\n arg_0.clear()\n return arg_1\n\n # Broadcast\n try:\n if arg_0.blockchain.blocking:\n arg_1 = arg_0.blockchain.rpc.Func_transaction_synchronous(\n arg_1, api=\"network_Func\"\n )\n arg_1.update(**arg_1.get(\"trx\", {}))\n else:\n arg_0.blockchain.rpc.Func_transaction(arg_1, api=\"network_Func\")\n except Exception as e:\n raise e\n finally:\n arg_0.clear()\n\n return arg_1"} +{"_id": "doc_6777", "title": "", "text": "def Func(arg_0):\n \"\"\" Clear the transaction builder and start from scratch\n \"\"\"\n arg_0.ops = []\n arg_0.wifs = set()\n arg_0.signing_accounts = []\n # This makes sure that _is_constructed will return False afterwards\n arg_0[\"expiration\"] = None\n dict.__init__(arg_0, {})"} +{"_id": "doc_6778", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the price instance so that the base asset is ``base``.\n\n Note: This makes a copy of the object!\n \"\"\"\n if arg_1 == arg_0[\"base\"][\"symbol\"]:\n return arg_0.copy()\n elif arg_1 == arg_0[\"quote\"][\"symbol\"]:\n return arg_0.copy().invert()\n else:\n raise InvalidAssetException"} +{"_id": "doc_6779", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Returns the price instance so that the quote asset is ``quote``.\n\n Note: This makes a copy of the object!\n \"\"\"\n if arg_1 == arg_0[\"quote\"][\"symbol\"]:\n return arg_0.copy()\n elif arg_1 == arg_0[\"base\"][\"symbol\"]:\n return arg_0.copy().invert()\n else:\n raise InvalidAssetException"} +{"_id": "doc_6780", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, **arg_4):\n \"\"\" This method obtains the required private keys if present in\n the wallet, finalizes the transaction, signs it and\n broadacasts it\n\n :param operation ops: The operation (or list of operaions) to\n broadcast\n :param operation account: The account that authorizes the\n operation\n :param string permission: The required permission for\n signing (active, owner, posting)\n :param object append_to: This allows to provide an instance of\n ProposalsBuilder (see :func:`new_proposal`) or\n TransactionBuilder (see :func:`new_tx()`) to specify\n where to put a specific operation.\n\n ... note:: ``append_to`` is exposed to every method used in the\n this class\n\n ... note::\n\n If ``ops`` is a list of operation, they all need to be\n signable by the same key! Thus, you cannot combine ops\n that require active permission with ops that require\n posting permission. Neither can you use different\n accounts for different operations!\n\n ... note:: This uses ``txbuffer`` as instance of\n :class:`transactionbuilder.TransactionBuilder`.\n You may want to use your own txbuffer\n \"\"\"\n if \"append_to\" in arg_4 and arg_4[\"append_to\"]:\n if arg_0.proposer:\n log.warning(\n \"You may not use append_to and self.proposer at \"\n \"the same time. Append new_proposal(..) instead\"\n )\n # Append to the append_to and return\n arg_5 = arg_4[\"append_to\"]\n arg_6 = arg_5.get_parent()\n assert isinstance(\n arg_5, (arg_0.transactionbuilder_class, arg_0.proposalbuilder_class)\n )\n arg_5.appendOps(arg_1)\n # Add the signer to the buffer so we sign the tx properly\n if isinstance(arg_5, arg_0.proposalbuilder_class):\n arg_6.appendSigner(arg_5.proposer, arg_3)\n else:\n arg_6.appendSigner(arg_2, arg_3)\n # This returns as we used append_to, it does NOT broadcast, or sign\n return arg_5.get_parent()\n elif arg_0.proposer:\n # Legacy proposer mode!\n arg_7 = arg_0.proposal()\n arg_7.set_proposer(arg_0.proposer)\n arg_7.set_expiration(arg_0.proposal_expiration)\n arg_7.set_review(arg_0.proposal_review)\n arg_7.appendOps(arg_1)\n # Go forward to see what the other options do ...\n else:\n # Append tot he default buffer\n arg_0.txbuffer.appendOps(arg_1)\n\n # The API that obtains the fee only allows to specify one particular\n # fee asset for all operations in that transaction even though the\n # blockchain itself could allow to pay multiple operations with\n # different fee assets.\n if \"fee_asset\" in arg_4 and arg_4[\"fee_asset\"]:\n arg_0.txbuffer.set_fee_asset(arg_4[\"fee_asset\"])\n\n # Add signing information, signer, sign and optionally broadcast\n if arg_0.unsigned:\n # In case we don't want to sign anything\n arg_0.txbuffer.addSigningInformation(arg_2, arg_3)\n return arg_0.txbuffer\n elif arg_0.bundle:\n # In case we want to add more ops to the tx (bundle)\n arg_0.txbuffer.appendSigner(arg_2, arg_3)\n return arg_0.txbuffer.json()\n else:\n # default behavior: sign + broadcast\n arg_0.txbuffer.appendSigner(arg_2, arg_3)\n arg_0.txbuffer.sign()\n return arg_0.txbuffer.broadcast()"} +{"_id": "doc_6781", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\" Broadcast a transaction to the Blockchain\n\n :param tx tx: Signed transaction to Func\n \"\"\"\n if arg_1:\n # If tx is provided, we Func the tx\n return arg_0.transactionbuilder_class(\n arg_1, blockchain_instance=arg_0\n ).Func()\n else:\n return arg_0.txbuffer.Func()"} +{"_id": "doc_6782", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\" Let's obtain a new txbuffer\n\n :returns int txid: id of the new txbuffer\n \"\"\"\n arg_3 = arg_0.transactionbuilder_class(\n *arg_1, blockchain_instance=arg_0, **arg_2\n )\n arg_0._txbuffers.append(arg_3)\n return arg_3"} +{"_id": "doc_6783", "title": "", "text": "def Func(arg_0):\n \"\"\" The transaction Func of this transaction\n \"\"\"\n # Store signatures temporarily since they are not part of\n # transaction Func\n arg_1 = arg_0.data[\"signatures\"]\n arg_0.data.pop(\"signatures\", None)\n\n # Generage Hash of the seriliazed version\n arg_2 = hashlib.sha256(bytes(arg_0)).digest()\n\n # recover signatures\n arg_0.data[\"signatures\"] = arg_1\n\n # Return properly truncated tx hash\n return hexlify(arg_2[:20]).decode(\"ascii\")"} +{"_id": "doc_6784", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\" Sign the transaction with the provided private keys.\n\n :param array wifkeys: Array of wif keys\n :param str chain: identifier for the chain\n\n \"\"\"\n if not arg_2:\n arg_2 = arg_0.get_default_prefix()\n arg_0.deriveDigest(arg_2)\n\n # Get Unique private keys\n arg_0.privkeys = []\n for arg_4 in arg_1:\n if arg_4 not in arg_0.privkeys:\n arg_0.privkeys.append(arg_4)\n\n # Sign the message with every private key given!\n arg_5 = []\n for arg_6 in arg_0.privkeys:\n arg_7 = Func_message(arg_0.message, arg_6)\n arg_5.append(Signature(arg_7))\n\n arg_0.data[\"Funcatures\"] = Array(arg_5)\n return arg_0"} +{"_id": "doc_6785", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Unlock the wallet database\n \"\"\"\n if arg_0.store.is_encrypted():\n return arg_0.store.Func(arg_1)"} +{"_id": "doc_6786", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Create a new wallet database\n \"\"\"\n if arg_0.created():\n raise WalletExists(\"You already have created a wallet!\")\n arg_0.store.unlock(arg_1)"} +{"_id": "doc_6787", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Add a private key to the wallet database\n \"\"\"\n try:\n arg_2 = arg_0.publickey_from_wif(arg_1)\n except Exception:\n raise InvalidWifError(\"Invalid Key format!\")\n if str(arg_2) in arg_0.store:\n raise KeyAlreadyInStoreException(\"Key already in the store\")\n arg_0.store.add(str(arg_1), str(arg_2))"} +{"_id": "doc_6788", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Remove all keys associated with a given account\n \"\"\"\n arg_2 = arg_0.getAccounts()\n for arg_3 in arg_2:\n if arg_3[\"name\"] == arg_1:\n arg_0.store.delete(arg_3[\"pubkey\"])"} +{"_id": "doc_6789", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Obtain owner Memo Key for an account from the wallet database\n \"\"\"\n arg_2 = arg_0.rpc.get_account(arg_1)\n arg_3 = arg_0.getPrivateKeyForPublicKey(arg_2[\"options\"][\"memo_key\"])\n if arg_3:\n return arg_3\n return False"} +{"_id": "doc_6790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Obtain owner Active Key for an account from the wallet database\n \"\"\"\n arg_2 = arg_0.rpc.get_account(arg_1)\n for arg_3 in arg_2[\"active\"][\"key_auths\"]:\n try:\n return arg_0.getPrivateKeyForPublicKey(arg_3[0])\n except Exception:\n pass\n return False"} +{"_id": "doc_6791", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Obtain the first account name from public key\n \"\"\"\n # FIXME, this only returns the first associated key.\n # If the key is used by multiple accounts, this\n # will surely lead to undesired behavior\n arg_2 = list(arg_0.getAccountsFromPublicKey(str(arg_1)))\n if arg_2:\n return arg_2[0]"} +{"_id": "doc_6792", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Get key type\n \"\"\"\n for arg_3 in [\"owner\", \"active\"]:\n for arg_4 in arg_1[arg_3][\"key_auths\"]:\n if str(arg_2) == arg_4[0]:\n return arg_3\n if str(arg_2) == arg_1[\"options\"][\"memo_key\"]:\n return \"memo\"\n return None"} +{"_id": "doc_6793", "title": "", "text": "def Func(arg_0):\n \"\"\" Return all accounts installed in the wallet database\n \"\"\"\n arg_1 = arg_0.getPublicKeys()\n arg_2 = []\n for arg_3 in arg_1:\n # Filter those keys not for our network\n if arg_3[: len(arg_0.prefix)] == arg_0.prefix:\n arg_2.extend(arg_0.FuncFromPublicKey(arg_3))\n return arg_2"} +{"_id": "doc_6794", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Encrypt a memo\n\n :param str message: clear text memo message\n :returns: Funced message\n :rtype: str\n \"\"\"\n if not arg_1:\n return None\n\n arg_2 = str(random.getrandbits(64))\n try:\n arg_3 = arg_0.blockchain.wallet.getPrivateKeyForPublicKey(\n arg_0.from_account[\"options\"][\"memo_key\"]\n )\n except KeyNotFound:\n # if all fails, raise exception\n raise MissingKeyError(\n \"Memo private key {} for {} could not be found\".format(\n arg_0.from_account[\"options\"][\"memo_key\"], arg_0.from_account[\"name\"]\n )\n )\n if not arg_3:\n raise MissingKeyError(\n \"Memo key for %s missing!\" % arg_0.from_account[\"name\"]\n )\n\n if not hasattr(arg_0, \"chain_prefix\"):\n arg_0.chain_prefix = arg_0.blockchain.prefix\n\n arg_5 = memo.encode_memo(\n arg_0.privatekey_class(arg_3),\n arg_0.publickey_class(\n arg_0.to_account[\"options\"][\"memo_key\"], prefix=arg_0.chain_prefix\n ),\n arg_2,\n arg_1,\n )\n\n return {\n \"message\": arg_5,\n \"nonce\": arg_2,\n \"from\": arg_0.from_account[\"options\"][\"memo_key\"],\n \"to\": arg_0.to_account[\"options\"][\"memo_key\"],\n }"} +{"_id": "doc_6795", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Decrypt a message\n\n :param dict message: encrypted memo message\n :returns: Funced message\n :rtype: str\n \"\"\"\n if not arg_1:\n return None\n\n # We first try to decode assuming we received the memo\n try:\n arg_2 = arg_0.blockchain.wallet.getPrivateKeyForPublicKey(arg_1[\"to\"])\n arg_3 = arg_1[\"from\"]\n except KeyNotFound:\n try:\n # if that failed, we assume that we have sent the memo\n arg_2 = arg_0.blockchain.wallet.getPrivateKeyForPublicKey(\n arg_1[\"from\"]\n )\n arg_3 = arg_1[\"to\"]\n except KeyNotFound:\n # if all fails, raise exception\n raise MissingKeyError(\n \"None of the required memo keys are installed!\"\n \"Need any of {}\".format([arg_1[\"to\"], arg_1[\"from\"]])\n )\n\n if not hasattr(arg_0, \"chain_prefix\"):\n arg_0.chain_prefix = arg_0.blockchain.prefix\n\n return memo.decode_memo(\n arg_0.privatekey_class(arg_2),\n arg_0.publickey_class(arg_3, prefix=arg_0.chain_prefix),\n arg_1.get(\"nonce\"),\n arg_1.get(\"message\"),\n )"} +{"_id": "doc_6796", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Derive the share secret between ``priv`` and ``pub``\n\n :param `Base58` priv: Private Key\n :param `Base58` pub: Public Key\n :return: Shared secret\n :rtype: hex\n\n The shared secret is generated such that::\n\n Pub(Alice) * Priv(Bob) = Pub(Bob) * Priv(Alice)\n\n \"\"\"\n arg_2 = arg_1.point()\n arg_3 = int(repr(arg_0), 16)\n arg_4 = arg_2 * arg_3\n arg_5 = \"%032x\" % arg_4.x()\n # Zero padding\n arg_5 = \"0\" * (64 - len(arg_5)) + arg_5\n return arg_5"} +{"_id": "doc_6797", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Initialize AES instance\n\n :param hex shared_secret: Shared Secret to use as encryption key\n :param int nonce: Random nonce\n :return: AES instance\n :rtype: AES\n\n \"\"\"\n \" Shared Secret \"\n arg_2 = hashlib.sha512(unhexlify(arg_0)).digest()\n \" Seed \"\n arg_3 = bytes(str(arg_1), \"ascii\") + hexlify(arg_2)\n arg_4 = hexlify(hashlib.sha512(arg_3).digest()).decode(\"ascii\")\n \" AES \"\n arg_5 = unhexlify(arg_4[0:64])\n arg_6 = unhexlify(arg_4[64:96])\n return AES.new(arg_5, AES.MODE_CBC, arg_6)"} +{"_id": "doc_6798", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Encode a message with a shared secret between Alice and Bob\n\n :param PrivateKey priv: Private Key (of Alice)\n :param PublicKey pub: Public Key (of Bob)\n :param int nonce: Random nonce\n :param str message: Memo message\n :return: Encrypted message\n :rtype: hex\n\n \"\"\"\n arg_4 = get_shared_secret(arg_0, arg_1)\n arg_5 = init_aes(arg_4, arg_2)\n \" Checksum \"\n arg_6 = bytes(arg_3, \"utf8\")\n arg_7 = hashlib.sha256(arg_6).digest()\n arg_6 = arg_7[0:4] + arg_6\n \" Padding \"\n arg_6 = _pad(arg_6, 16)\n \" Encryption \"\n return hexlify(arg_5.encrypt(arg_6)).decode(\"ascii\")"} +{"_id": "doc_6799", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Decode a message with a shared secret between Alice and Bob\n\n :param PrivateKey priv: Private Key (of Bob)\n :param PublicKey pub: Public Key (of Alice)\n :param int nonce: Nonce used for Encryption\n :param bytes message: Encrypted Memo message\n :return: Decrypted message\n :rtype: str\n :raise ValueError: if message cannot be decoded as valid UTF-8\n string\n\n \"\"\"\n arg_4 = get_shared_secret(arg_0, arg_1)\n arg_5 = init_aes(arg_4, arg_2)\n \" Encryption \"\n arg_6 = bytes(arg_3, \"ascii\")\n arg_7 = arg_5.decrypt(unhexlify(arg_6))\n \" Checksum \"\n arg_8 = arg_7[0:4]\n arg_3 = arg_7[4:]\n arg_3 = _unpad(arg_3, 16)\n \" Verify checksum \"\n arg_9 = hashlib.sha256(arg_3).digest()[0:4]\n if arg_9 != arg_8: # pragma: no cover\n raise ValueError(\"checksum verification failure\")\n return arg_3.decode(\"utf8\")"} +{"_id": "doc_6800", "title": "", "text": "def Func(arg_0):\n \"\"\"Send IPMI 'command' via ipmitool\"\"\"\n\n env()\n\n arg_1 = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)\n\n arg_0 = \"ipmitool -U %s -P %s -H %s -p %s %s\" % (\n arg_1[\"USER\"], arg_1[\"PASS\"], arg_1[\"HOST\"], arg_1[\"PORT\"], arg_0)\n cij.info(\"ipmi.command: %s\" % arg_0)\n\n return cij.util.execute(arg_0, shell=True, echo=True)"} +{"_id": "doc_6801", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find the given 'pattern' in 'content'\"\"\"\n\n arg_2 = re.findall(arg_0, arg_1)\n if not arg_2:\n cij.err(\"pattern <%r> is invalid, no matches!\" % arg_0)\n cij.err(\"content: %r\" % arg_1)\n return ''\n\n if len(arg_2) >= 2:\n cij.err(\"pattern <%r> is too simple, matched more than 2!\" % arg_0)\n cij.err(\"content: %r\" % arg_1)\n return ''\n\n return arg_2[0]"} +{"_id": "doc_6802", "title": "", "text": "def Func(arg_0):\n \"\"\"Cat file and return content\"\"\"\n\n arg_1 = [\"cat\", arg_0]\n arg_2, arg_3, arg_4 = cij.ssh.command(arg_1, shell=True, echo=True)\n if arg_2:\n raise RuntimeError(\"cij.nvme.env: cat %s failed\" % arg_0)\n return arg_3.strip()"} +{"_id": "doc_6803", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get chunk meta of NVMe device\"\"\"\n\n if env():\n cij.err(\"cij.nvme.meta: Invalid NVMe ENV.\")\n return 1\n\n arg_3 = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)\n\n arg_4 = 0x40000\n with open(arg_2, \"wb\") as fout:\n for arg_5 in range(arg_0, arg_1, arg_4):\n arg_6 = min(arg_1 - arg_5, arg_4)\n arg_7 = [\"nvme get-log\",\n arg_3[\"DEV_PATH\"],\n \"-i 0xca\",\n \"-o 0x%x\" % arg_5,\n \"-l 0x%x\" % arg_6,\n \"-b\"]\n arg_8, arg_9, arg_10 = cij.ssh.command(arg_7, shell=True)\n if arg_8:\n cij.err(\"cij.nvme.meta: Error get chunk meta\")\n return 1\n\n fout.write(arg_9)\n\n return 0"} +{"_id": "doc_6804", "title": "", "text": "def Func(arg_0=\"Denali\"):\r\n \"\"\"\r\n Get sizeof DescriptorTable\r\n \"\"\"\r\n if arg_0 == \"Denali\":\r\n return sizeof(DescriptorTableDenali)\r\n elif arg_0 == \"Spec20\":\r\n return sizeof(DescriptorTableSpec20)\r\n elif arg_0 == \"Spec12\":\r\n return 0\r\n else:\r\n raise RuntimeError(\"Error version!\")"} +{"_id": "doc_6805", "title": "", "text": "def Func():\n \"\"\"Verify LNVM variables and construct exported variables\"\"\"\n\n if cij.ssh.Func():\n cij.err(\"cij.lnvm.Func: invalid SSH Funcironment\")\n return 1\n\n arg_0 = cij.Func_to_dict(PREFIX, REQUIRED)\n arg_1 = cij.Func_to_dict(\"NVME\", [\"DEV_NAME\"])\n\n if \"BGN\" not in arg_0.keys():\n cij.err(\"cij.lnvm.Func: invalid LNVM_BGN\")\n return 1\n if \"END\" not in arg_0.keys():\n cij.err(\"cij.lnvm.Func: invalid LNVM_END\")\n return 1\n if \"DEV_TYPE\" not in arg_0.keys():\n cij.err(\"cij.lnvm.Func: invalid LNVM_DEV_TYPE\")\n return 1\n\n arg_0[\"DEV_NAME\"] = \"%sb%03de%03d\" % (arg_1[\"DEV_NAME\"], int(arg_0[\"BGN\"]), int(arg_0[\"END\"]))\n arg_0[\"DEV_PATH\"] = \"/dev/%s\" % arg_0[\"DEV_NAME\"]\n\n cij.Func_export(PREFIX, EXPORTED, arg_0)\n\n return 0"} +{"_id": "doc_6806", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compare of two Buffer item\"\"\"\n for arg_3 in getattr(arg_0, '_fields_'):\n arg_4, arg_5 = arg_3[0], arg_3[1]\n\n if arg_4 in arg_2:\n continue\n\n arg_6 = getattr(arg_0, arg_4)\n arg_7 = getattr(arg_1, arg_4)\n\n if isinstance(arg_5, (type(Union), type(Structure))):\n if Func(arg_6, arg_7, arg_2):\n return 1\n elif isinstance(arg_5, type(Array)):\n for arg_8, arg_9 in enumerate(arg_6):\n if isinstance(arg_5, (type(Union), type(Structure))):\n if Func(arg_6[arg_8], arg_7[arg_8], arg_2):\n return 1\n else:\n if arg_6[arg_8] != arg_7[arg_8]:\n return 1\n else:\n if arg_6 != arg_7:\n return 1\n\n return 0"} +{"_id": "doc_6807", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=arg_4(\"inf\")):\n \"\"\"Copy stream to buffer\"\"\"\n arg_5 = [ord(arg_8) for arg_8 in list(arg_1)]\n arg_6 = min(arg_3, len(arg_5), arg_0.m_size)\n arg_7 = cast(arg_0.m_buf, POINTER(c_uint8))\n for arg_8 in range(arg_6):\n arg_7[arg_2 + arg_8] = arg_5[arg_8]"} +{"_id": "doc_6808", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write buffer to file\"\"\"\n\n with open(arg_1, \"wb\") as fout:\n fout.Func(arg_0.m_buf)"} +{"_id": "doc_6809", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read file to buffer\"\"\"\n\n with open(arg_1, \"rb\") as fout:\n memmove(arg_0.m_buf, fout.Func(arg_0.m_size), arg_0.m_size)"} +{"_id": "doc_6810", "title": "", "text": "def Func(arg_0, arg_1=200):\n \"\"\"230v power on\"\"\"\n if arg_0.__Func_port is None:\n cij.err(\"cij.usb.relay: Invalid USB_RELAY_POWER_ON\")\n return 1\n\n return arg_0.__press(arg_0.__Func_port, arg_1=arg_1)"} +{"_id": "doc_6811", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\r\n \"\"\"Get chunk information\"\"\"\r\n arg_4 = [\"nvm_cmd rprt_lun\", arg_0.envs,\r\n \"%d %d > %s\" % (arg_1, arg_2, arg_3)]\r\n arg_5, arg_6, arg_6 = cij.ssh.command(arg_4, shell=True)\r\n return arg_5"} +{"_id": "doc_6812", "title": "", "text": "def Func():\n \"\"\"Verify BLOCK variables and construct exported variables\"\"\"\n\n if cij.ssh.Func():\n cij.err(\"cij.block.Func: invalid SSH Funcironment\")\n return 1\n\n arg_0 = cij.Func_to_dict(PREFIX, REQUIRED)\n\n arg_0[\"DEV_PATH\"] = \"/dev/%s\" % arg_0[\"DEV_NAME\"]\n\n cij.Func_export(PREFIX, EXPORTED, arg_0)\n\n return 0"} +{"_id": "doc_6813", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute a script or testcase\"\"\"\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:script:run { script: %s }\" % arg_1)\n cij.emph(\"rnr:script:run:evars: %s\" % arg_1[\"evars\"])\n\n arg_2 = {\n \".py\": \"python\",\n \".sh\": \"source\"\n }\n\n arg_3 = os.path.splitext(arg_1[\"fpath\"])[-1]\n if not arg_3 in arg_2.keys():\n cij.err(\"rnr:script:run { invalid script[\\\"fpath\\\"]: %r }\" % arg_1[\"fpath\"])\n return 1\n\n arg_4 = arg_2[arg_3]\n\n with open(arg_1[\"log_fpath\"], \"a\") as log_fd:\n log_fd.write(\"# script_fpath: %r\\n\" % arg_1[\"fpath\"])\n log_fd.flush()\n\n arg_5 = time.time()\n arg_6 = [\n 'bash', '-c',\n 'CIJ_ROOT=$(cij_root) && '\n 'source $CIJ_ROOT/modules/cijoe.sh && '\n 'source %s && '\n 'CIJ_TEST_RES_ROOT=\"%s\" %s %s ' % (\n arg_0[\"conf\"][\"ENV_FPATH\"],\n arg_1[\"res_root\"],\n arg_4,\n arg_1[\"fpath\"]\n )\n ]\n if arg_0[\"conf\"][\"VERBOSE\"] > 1:\n cij.emph(\"rnr:script:run { cmd: %r }\" % \" \".join(arg_6))\n\n arg_7 = os.environ.copy()\n arg_7.update({arg_8: str(arg_1[\"evars\"][arg_8]) for arg_8 in arg_1[\"evars\"]})\n\n arg_9 = Popen(\n arg_6,\n stdout=log_fd,\n stderr=STDOUT,\n cwd=arg_1[\"res_root\"],\n env=arg_7\n )\n arg_9.wait()\n\n arg_1[\"rcode\"] = arg_9.returncode\n arg_1[\"wallc\"] = time.time() - arg_5\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:script:run { wallc: %02f }\" % arg_1[\"wallc\"])\n cij.emph(\n \"rnr:script:run { rcode: %r } \" % arg_1[\"rcode\"],\n arg_1[\"rcode\"]\n )\n\n return arg_1[\"rcode\"]"} +{"_id": "doc_6814", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Setup test-hooks\n @returns dict of hook filepaths {\"enter\": [], \"exit\": []}\n \"\"\"\n\n arg_3 = {\n \"enter\": [],\n \"exit\": []\n }\n\n if arg_2 is None: # Nothing to do, just return the struct\n return arg_3\n\n for arg_4 in arg_2: # Fill out paths\n for arg_5 in HOOK_PATTERNS:\n for arg_6 in HOOK_PATTERNS[arg_5]:\n arg_7 = os.sep.join([arg_0[\"conf\"][\"HOOKS\"], arg_6 % arg_4])\n if not os.path.exists(arg_7):\n continue\n\n arg_8 = hook_setup(arg_1, arg_7)\n if not arg_8:\n continue\n\n arg_3[arg_5].append(arg_8)\n\n if not arg_3[\"enter\"] + arg_3[\"exit\"]:\n cij.err(\"rnr:Func:FAIL { hname: %r has no files }\" % arg_4)\n return None\n\n return arg_3"} +{"_id": "doc_6815", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Dump the given trun to file\"\"\"\n\n if arg_1 is None:\n arg_1 = yml_fpath(arg_0[\"conf\"][\"OUTPUT\"])\n\n with open(arg_1, 'w') as yml_file:\n arg_2 = yaml.dump(arg_0, explicit_start=True, default_flow_style=False)\n yml_file.write(arg_2)"} +{"_id": "doc_6816", "title": "", "text": "def Func(arg_0):\n \"\"\"Print essential info on\"\"\"\n\n if arg_0[\"conf\"][\"VERBOSE\"] > 1: # Print environment variables\n cij.emph(\"rnr:CONF {\")\n for arg_1 in sorted(arg_0[\"conf\"].keys()):\n cij.emph(\" % 16s: %r\" % (arg_1, arg_0[\"conf\"][arg_1]))\n cij.emph(\"}\")\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:INFO {\")\n cij.emph(\" OUTPUT: %r\" % arg_0[\"conf\"][\"OUTPUT\"])\n cij.emph(\" yml_fpath: %r\" % yml_fpath(arg_0[\"conf\"][\"OUTPUT\"]))\n cij.emph(\"}\")"} +{"_id": "doc_6817", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create and initialize a testcase\n \"\"\"\n #pylint: disable=locally-disabled, unused-argument\n\n arg_3 = copy.deepcopy(TESTCASE)\n\n arg_3[\"fname\"] = arg_2\n arg_3[\"fpath_orig\"] = os.sep.join([arg_0[\"conf\"][\"TESTCASES\"], arg_3[\"fname\"]])\n\n if not os.path.exists(arg_3[\"fpath_orig\"]):\n cij.err('rnr:Func: !case[\"fpath_orig\"]: %r' % arg_3[\"fpath_orig\"])\n return None\n\n arg_3[\"name\"] = os.path.splitext(arg_3[\"fname\"])[0]\n arg_3[\"ident\"] = \"/\".join([arg_1[\"ident\"], arg_3[\"fname\"]])\n\n arg_3[\"res_root\"] = os.sep.join([arg_1[\"res_root\"], arg_3[\"fname\"]])\n arg_3[\"aux_root\"] = os.sep.join([arg_3[\"res_root\"], \"_aux\"])\n arg_3[\"log_fpath\"] = os.sep.join([arg_3[\"res_root\"], \"run.log\"])\n\n arg_3[\"fpath\"] = os.sep.join([arg_3[\"res_root\"], arg_3[\"fname\"]])\n\n arg_3[\"evars\"].update(copy.deepcopy(arg_1[\"evars\"]))\n\n # Initalize\n os.makedirs(arg_3[\"res_root\"]) # Create DIRS\n os.makedirs(arg_3[\"aux_root\"])\n shutil.copyfile(arg_3[\"fpath_orig\"], arg_3[\"fpath\"]) # Copy testcase\n\n # Initialize hooks\n arg_3[\"hooks\"] = hooks_setup(arg_0, arg_3, arg_1.get(\"hooks_pr_tcase\"))\n\n return arg_3"} +{"_id": "doc_6818", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Triggers when exiting the given testsuite\"\"\"\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:tsuite:exit\")\n\n arg_2 = 0\n for arg_3 in reversed(arg_1[\"hooks\"][\"exit\"]): # EXIT-hooks\n arg_2 = script_run(arg_0, arg_3)\n if arg_2:\n break\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:tsuite:exit { rcode: %r } \" % arg_2, arg_2)\n\n return arg_2"} +{"_id": "doc_6819", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Creates and initialized a TESTSUITE struct and site-effects such as creating\n output directories and forwarding initialization of testcases\n \"\"\"\n\n arg_3 = copy.deepcopy(TESTSUITE) # Setup the test-suite\n\n arg_3[\"name\"] = arg_1.get(\"name\")\n if arg_3[\"name\"] is None:\n cij.err(\"rnr:Func: no testsuite is given\")\n return None\n\n arg_3[\"alias\"] = arg_1.get(\"alias\")\n arg_3[\"ident\"] = \"%s_%d\" % (arg_3[\"name\"], arg_2)\n\n arg_3[\"res_root\"] = os.sep.join([arg_0[\"conf\"][\"OUTPUT\"], arg_3[\"ident\"]])\n arg_3[\"aux_root\"] = os.sep.join([arg_3[\"res_root\"], \"_aux\"])\n\n arg_3[\"evars\"].update(copy.deepcopy(arg_0[\"evars\"]))\n arg_3[\"evars\"].update(copy.deepcopy(arg_1.get(\"evars\", {})))\n\n # Initialize\n os.makedirs(arg_3[\"res_root\"])\n os.makedirs(arg_3[\"aux_root\"])\n\n # Setup testsuite-hooks\n arg_3[\"hooks\"] = hooks_setup(arg_0, arg_3, arg_1.get(\"hooks\"))\n\n # Forward from declaration\n arg_3[\"hooks_pr_tcase\"] = arg_1.get(\"hooks_pr_tcase\", [])\n\n arg_3[\"fname\"] = \"%s.suite\" % arg_3[\"name\"]\n arg_3[\"fpath\"] = os.sep.join([arg_0[\"conf\"][\"TESTSUITES\"], arg_3[\"fname\"]])\n\n #\n # Load testcases from .suite file OR from declaration\n #\n arg_4 = [] # Load testcase fpaths\n if os.path.exists(arg_3[\"fpath\"]): # From suite-file\n arg_5 = (\n arg_6.strip() for arg_6 in open(arg_3[\"fpath\"]).read().splitlines()\n )\n arg_4.extend(\n (arg_6 for arg_6 in arg_5 if len(arg_6) > 1 and arg_6[0] != \"#\")\n )\n else: # From declaration\n arg_4.extend(arg_1.get(\"testcases\", []))\n\n # NOTE: fix duplicates; allow them\n # NOTE: Currently hot-fixed here\n if len(set(arg_4)) != len(arg_4):\n cij.err(\"rnr:suite: failed: duplicate tcase in suite not supported\")\n return None\n\n for arg_7 in arg_4: # Setup testcases\n arg_8 = tcase_setup(arg_0, arg_3, arg_7)\n if not arg_8:\n cij.err(\"rnr:suite: failed: tcase_setup\")\n return None\n\n arg_3[\"testcases\"].append(arg_8)\n\n return arg_3"} +{"_id": "doc_6820", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n setup res_root and aux_root, log info and run tcase-enter-hooks\n\n @returns 0 when all hooks succeed, some value othervise\n \"\"\"\n #pylint: disable=locally-disabled, unused-argument\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:tcase:enter\")\n cij.emph(\"rnr:tcase:enter { fname: %r }\" % arg_2[\"fname\"])\n cij.emph(\"rnr:tcase:enter { log_fpath: %r }\" % arg_2[\"log_fpath\"])\n\n arg_3 = 0\n for arg_4 in arg_2[\"hooks\"][\"enter\"]: # tcase ENTER-hooks\n arg_3 = script_run(arg_0, arg_4)\n if arg_3:\n break\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:tcase:exit: { rcode: %r }\" % arg_3, arg_3)\n\n return arg_3"} +{"_id": "doc_6821", "title": "", "text": "def Func(arg_0):\n \"\"\"Triggers when exiting the given testrun\"\"\"\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:trun:exit\")\n\n arg_1 = 0\n for arg_2 in reversed(arg_0[\"hooks\"][\"exit\"]): # EXIT-hooks\n arg_1 = script_run(arg_0, arg_2)\n if arg_1:\n break\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:trun::exit { rcode: %r }\" % arg_1, arg_1)\n\n return arg_1"} +{"_id": "doc_6822", "title": "", "text": "def Func(arg_0):\n \"\"\"Triggers when entering the given testrun\"\"\"\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:trun::enter\")\n\n arg_0[\"stamp\"][\"begin\"] = int(time.time()) # Record start timestamp\n\n arg_1 = 0\n for arg_2 in arg_0[\"hooks\"][\"enter\"]: # ENTER-hooks\n arg_1 = script_run(arg_0, arg_2)\n if arg_1:\n break\n\n if arg_0[\"conf\"][\"VERBOSE\"]:\n cij.emph(\"rnr:trun::enter { rcode: %r }\" % arg_1, arg_1)\n\n return arg_1"} +{"_id": "doc_6823", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Setup the testrunner data-structure, embedding the parsed environment\n variables and command-line arguments and continues with setup for testplans,\n testsuites, and testcases\n \"\"\"\n\n arg_1 = None\n try:\n with open(arg_0[\"TESTPLAN_FPATH\"]) as declr_fd:\n arg_1 = yaml.safe_load(declr_fd)\n except AttributeError as exc:\n cij.err(\"rnr: %r\" % exc)\n\n if not arg_1:\n return None\n\n arg_2 = copy.deepcopy(TRUN)\n arg_2[\"ver\"] = cij.VERSION\n\n arg_2[\"conf\"] = copy.deepcopy(arg_0)\n arg_2[\"res_root\"] = arg_0[\"OUTPUT\"]\n arg_2[\"aux_root\"] = os.sep.join([arg_2[\"res_root\"], \"_aux\"])\n arg_2[\"evars\"].update(copy.deepcopy(arg_1.get(\"evars\", {})))\n\n os.makedirs(arg_2[\"aux_root\"])\n\n arg_3 = arg_1.get(\"hooks\", [])\n if \"lock\" not in arg_3:\n arg_3 = [\"lock\"] + arg_3\n\n if arg_3[0] != \"lock\":\n return None\n\n # Setup top-level hooks\n arg_2[\"hooks\"] = hooks_setup(arg_2, arg_2, arg_3)\n\n for arg_4, arg_1 in enumerate(arg_1[\"testsuites\"]): # Setup testsuites\n arg_5 = tsuite_setup(arg_2, arg_1, arg_4)\n if arg_5 is None:\n cij.err(\"main::FAILED: setting up tsuite: %r\" % arg_5)\n return 1\n\n arg_2[\"testsuites\"].append(arg_5)\n arg_2[\"progress\"][\"UNKN\"] += len(arg_5[\"testcases\"])\n\n return arg_2"} +{"_id": "doc_6824", "title": "", "text": "def Func(arg_0):\n \"\"\"CIJ Test Runner Func entry point\"\"\"\n\n arg_1 = yml_fpath(arg_0[\"OUTPUT\"])\n if os.path.exists(arg_1): # YAML exists, we exit, it might be RUNNING!\n cij.err(\"Func:FAILED { fpath: %r }, exists\" % arg_1)\n return 1\n\n arg_2 = trun_setup(arg_0) # Construct 'trun' from 'conf'\n if not arg_2:\n return 1\n\n trun_to_file(arg_2) # Persist trun\n trun_emph(arg_2) # Print trun before run\n\n arg_3 = 0\n arg_4 = trun_enter(arg_2)\n for arg_5 in (ts for ts in arg_2[\"testsuites\"] if not arg_4):\n\n arg_6 = 0\n arg_7 = tsuite_enter(arg_2, arg_5)\n for arg_8 in (tc for tc in arg_5[\"testcases\"] if not arg_7):\n\n arg_9 = tcase_enter(arg_2, arg_5, arg_8)\n if not arg_9:\n arg_9 += script_run(arg_2, arg_8)\n arg_9 += tcase_exit(arg_2, arg_5, arg_8)\n\n arg_8[\"status\"] = \"FAIL\" if arg_9 else \"PASS\"\n\n arg_2[\"progress\"][arg_8[\"status\"]] += 1 # Update progress\n arg_2[\"progress\"][\"UNKN\"] -= 1\n\n arg_6 += arg_9 # Accumulate errors\n\n trun_to_file(arg_2) # Persist trun\n\n if not arg_7:\n arg_6 += tsuite_exit(arg_2, arg_5)\n\n arg_6 += arg_7 # Accumulate errors\n arg_3 += arg_6\n\n arg_5[\"status\"] = \"FAIL\" if arg_6 else \"PASS\"\n\n cij.emph(\"rnr:tsuite %r\" % arg_5[\"status\"], arg_5[\"status\"] != \"PASS\")\n\n if not arg_4:\n trun_exit(arg_2)\n\n arg_3 += arg_4\n arg_2[\"status\"] = \"FAIL\" if arg_3 else \"PASS\"\n\n arg_2[\"stamp\"][\"end\"] = int(time.time()) + 1 # END STAMP\n trun_to_file(arg_2) # PERSIST\n\n cij.emph(\"rnr:Func:progress %r\" % arg_2[\"progress\"])\n cij.emph(\"rnr:Func:trun %r\" % arg_2[\"status\"], arg_2[\"status\"] != \"PASS\")\n\n return arg_2[\"progress\"][\"UNKN\"] + arg_2[\"progress\"][\"FAIL\"]"} +{"_id": "doc_6825", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get chunk meta table\"\"\"\n arg_2 = arg_0.envs[\"CHUNKS\"]\n if cij.nvme.get_meta(0, arg_2 * arg_0.envs[\"CHUNK_META_SIZEOF\"], arg_1):\n raise RuntimeError(\"cij.liblight.Func: fail\")\n\n arg_3 = cij.bin.Buffer(types=arg_0.envs[\"CHUNK_META_STRUCT\"], length=arg_2)\n arg_3.read(arg_1)\n return arg_3"} +{"_id": "doc_6826", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generic address to device address\"\"\"\n arg_2 = [\"nvm_addr gen2dev\", arg_0.envs[\"DEV_PATH\"], \"0x{:x}\".format(arg_1)]\n arg_3, arg_4, arg_5 = cij.ssh.command(arg_2, shell=True)\n if arg_3:\n raise RuntimeError(\"cij.liblight.Func: cmd fail\")\n\n return int(re.findall(r\"dev: ([0-9a-fx]+)\", arg_4)[0], 16)"} +{"_id": "doc_6827", "title": "", "text": "def Func(arg_0):\n \"\"\"Start DMESG job in thread\"\"\"\n\n arg_0.__thread = Thread(target=arg_0.__run, args=(True, False))\n arg_0.__thread.setDaemon(True)\n arg_0.__thread.Func()"} +{"_id": "doc_6828", "title": "", "text": "def Func(arg_0):\n \"\"\"Terminate DMESG job\"\"\"\n\n if arg_0.__thread:\n arg_1 = [\"who am i\"]\n arg_2, arg_3, arg_4 = cij.util.execute(arg_1, shell=True, echo=True)\n if arg_2:\n cij.warn(\"cij.dmesg.Func: who am i failed\")\n return 1\n\n arg_5 = arg_3.split()[1]\n\n arg_1 = [\"pkill -f '{}' -t '{}'\".format(\" \".join(arg_0.__prefix), arg_5)]\n arg_2, arg_4, arg_4 = cij.util.execute(arg_1, shell=True, echo=True)\n if arg_2:\n cij.warn(\"cij.dmesg.Func: pkill failed\")\n return 1\n\n arg_0.__thread.join()\n arg_0.__thread = None\n\n return 0"} +{"_id": "doc_6829", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\r\n \"\"\" generate rater pic\"\"\"\r\n arg_3 = arg_1['filename'] + '.png'\r\n plt.figure(figsize=(5.6 * arg_2, 3.2 * arg_2))\r\n for arg_4 in arg_0.keys():\r\n plt.plot(arg_0[arg_4][:, 0], arg_0[arg_4][:, 1], label=str(arg_4))\r\n plt.title(arg_1['title'])\r\n plt.xlabel(arg_1['x_axis_name'])\r\n plt.ylabel(arg_1['y_axis_name'])\r\n plt.legend(loc='upper left')\r\n plt.savefig(arg_3)\r\n return arg_3"} +{"_id": "doc_6830", "title": "", "text": "def Func(arg_0):\r\n \"\"\" round the data\"\"\"\r\n for arg_1, arg_2 in enumerate(arg_0):\r\n arg_0[arg_1][0] = round(arg_0[arg_1][0] / 100.0) * 100.0\r\n return arg_0"} +{"_id": "doc_6831", "title": "", "text": "def Func():\n \"\"\"Verify PCI variables and construct exported variables\"\"\"\n\n if cij.ssh.Func():\n cij.err(\"cij.pci.Func: invalid SSH Funcironment\")\n return 1\n\n arg_0 = cij.Func_to_dict(PREFIX, REQUIRED)\n\n arg_0[\"BUS_PATH\"] = \"/sys/bus/pci\"\n arg_0[\"DEV_PATH\"] = os.sep.join([arg_0[\"BUS_PATH\"], \"devices\", arg_0[\"DEV_NAME\"]])\n\n cij.Func_export(PREFIX, EXPORTED, arg_0)\n\n return 0"} +{"_id": "doc_6832", "title": "", "text": "def Func(arg_0):\n \"\"\"Print, emphasized 'Func', the given 'txt' message\"\"\"\n\n print(\"%s# %s%s%s\" % (PR_GOOD_CC, get_time_stamp(), arg_0, PR_NC))\n sys.stdout.flush()"} +{"_id": "doc_6833", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Define the list of 'exported' variables with 'prefix' with values from 'env'\n \"\"\"\n\n for arg_3 in arg_1:\n arg_4[\"_\".join([arg_0, arg_3])] = arg_2[arg_3]"} +{"_id": "doc_6834", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Get-log-page chunk information\n\n If the pugrp and punit is set, then provide report only for that pugrp/punit\n\n @returns the first chunk in the given state if one exists, None otherwise\n \"\"\"\n\n arg_3 = [\"nvm_cmd\", \"rprt_all\", arg_0]\n if not (arg_1 is None and arg_2 is None):\n arg_3 = [\"nvm_cmd\", \"rprt_lun\", arg_0, str(arg_1), str(arg_2)]\n\n arg_4, arg_4, arg_4, arg_5 = cij.test.command_to_struct(arg_3)\n if not arg_5:\n return None\n\n return arg_5[\"rprt_descr\"]"} +{"_id": "doc_6835", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Get a chunk-descriptor for the first chunk in the given state.\n\n If the pugrp and punit is set, then search only that pugrp/punit\n\n @returns the first chunk in the given state if one exists, None otherwise\n \"\"\"\n\n arg_4 = dev_get_rprt(arg_0, arg_2, arg_3)\n if not arg_4:\n return None\n\n return next((arg_5 for arg_5 in arg_4 if arg_5[\"cs\"] == arg_1), None)"} +{"_id": "doc_6836", "title": "", "text": "def Func():\n \"\"\"Kill all of FIO processes\"\"\"\n\n if env():\n return 1\n\n arg_0 = [\"ps -aux | grep fio | grep -v grep\"]\n arg_1, arg_2, arg_2 = cij.ssh.command(arg_0, shell=True, echo=False)\n if not arg_1:\n arg_1, arg_2, arg_2 = cij.ssh.command([\"Func -f fio\"], shell=True)\n if arg_1:\n return 1\n return 0"} +{"_id": "doc_6837", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get parameter of FIO\"\"\"\n\n if arg_1 in arg_0.__parm.keys():\n return arg_0.__parm[arg_1]\n\n return None"} +{"_id": "doc_6838", "title": "", "text": "def Func(arg_0):\n \"\"\"Run FIO job in thread\"\"\"\n\n arg_0.__thread = Threads(target=arg_0.run, args=(True, True, False))\n arg_0.__thread.setDaemon(True)\n arg_0.__thread.Func()"} +{"_id": "doc_6839", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False, arg_3=True):\n \"\"\"Run FIO job\"\"\"\n\n if env():\n return 1\n\n arg_4 = [\"fio\"] + arg_0.__parse_parms()\n if arg_2:\n cij.emph(\"cij.fio.Func: shell: %r, cmd: %r\" % (arg_1, arg_4))\n\n return cij.ssh.command(arg_4, arg_1, arg_3)"} +{"_id": "doc_6840", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse descriptions from the the given tcase\"\"\"\n\n arg_1 = \"SHORT\"\n arg_2 = \"LONG\"\n\n try:\n arg_3 = tcase_comment(arg_0)\n except (IOError, OSError, ValueError) as exc:\n arg_3 = []\n cij.err(\"Func: failed: %r, tcase: %r\" % (exc, arg_0))\n\n arg_3 = [l for l in arg_3 if l.strip()] # Remove empty lines\n\n for arg_4, arg_5 in enumerate(arg_3):\n if arg_5.startswith(\"#\"):\n arg_3[arg_4] = arg_5[1:]\n\n if arg_3:\n arg_1 = arg_3[0]\n\n if len(arg_3) > 1:\n arg_2 = \"\\n\".join(arg_3[1:])\n\n return arg_1, arg_2"} +{"_id": "doc_6841", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns content of the given 'fpath' with HTML annotations for syntax\n highlighting\n \"\"\"\n\n if not os.path.exists(arg_0):\n return \"COULD-NOT-FIND-TESTCASE-SRC-AT-FPATH:%r\" % arg_0\n\n # NOTE: Do SYNTAX highlight?\n\n return open(arg_0, \"r\").read()"} +{"_id": "doc_6842", "title": "", "text": "def Func(arg_0):\n \"\"\"Perform Funcing of the given test run\"\"\"\n\n arg_1 = []\n arg_1.append((\"trun\", process_trun(arg_0)))\n\n for arg_2 in arg_0[\"testsuites\"]:\n arg_1.append((\"tsuite\", process_tsuite(arg_2)))\n\n for arg_3 in arg_2[\"testcases\"]:\n arg_1.append((\"tcase\", process_tcase(arg_3)))\n\n for arg_4, arg_5 in arg_1:\n if not arg_5:\n cij.err(\"rprtr::Func: FAILED for %r\" % arg_4)\n\n return sum((arg_5 for arg_4, arg_5 in arg_1))"} +{"_id": "doc_6843", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Replace all absolute paths to \"re-home\" it\n \"\"\"\n\n if arg_0 == arg_1:\n return\n\n if isinstance(arg_2, list):\n for arg_3 in arg_2:\n Func(arg_0, arg_1, arg_3)\n elif isinstance(arg_2, dict):\n for arg_4, arg_5 in arg_2.iteritems():\n if isinstance(arg_5, (dict, list)):\n Func(arg_0, arg_1, arg_5)\n elif \"conf\" in arg_4:\n continue\n elif \"orig\" in arg_4:\n continue\n elif \"root\" in arg_4 or \"path\" in arg_4:\n arg_2[arg_4] = arg_2[arg_4].replace(arg_0, arg_1)"} +{"_id": "doc_6844", "title": "", "text": "def Func(arg_0):\n \"\"\"Main entry point\"\"\"\n\n arg_1 = cij.runner.trun_from_file(arg_0.trun_fpath)\n\n rehome(arg_1[\"conf\"][\"OUTPUT\"], arg_0.output, arg_1)\n\n postprocess(arg_1)\n\n cij.emph(\"Func: reports are uses tmpl_fpath: %r\" % arg_0.tmpl_fpath)\n cij.emph(\"Func: reports are here args.output: %r\" % arg_0.output)\n\n arg_2 = os.sep.join([arg_0.output, \"%s.html\" % arg_0.tmpl_name])\n cij.emph(\"html_fpath: %r\" % arg_2)\n try: # Create and store HTML report\n with open(arg_2, 'w') as html_file:\n html_file.write(dset_to_html(arg_1, arg_0.tmpl_fpath))\n except (IOError, OSError, ValueError) as exc:\n import traceback\n traceback.print_exc()\n cij.err(\"rprtr:Func: exc: %s\" % exc)\n return 1\n\n return 0"} +{"_id": "doc_6845", "title": "", "text": "def Func(arg_0=300):\n \"\"\"Wait util target connected\"\"\"\n\n if env():\n arg_3.err(\"cij.ssh.Func: Invalid SSH environment\")\n return 1\n\n arg_1 = arg_3.ENV.get(\"SSH_CMD_TIMEOUT\")\n\n try:\n arg_2 = time.time()\n\n arg_3.ENV[\"SSH_CMD_TIMEOUT\"] = \"3\"\n\n while True:\n arg_5 = time.time()\n if (arg_5 - arg_2) > arg_0:\n arg_3.err(\"cij.ssh.Func: Timeout\")\n return 1\n\n arg_6, arg_7, arg_7 = command([\"exit\"], shell=True, echo=False)\n if not arg_6:\n break\n\n arg_3.info(\"cij.ssh.Func: Time elapsed: %d seconds\" % (arg_5 - arg_2))\n\n finally:\n if arg_1 is None:\n del arg_3.ENV[\"SSH_CMD_TIMEOUT\"]\n else:\n arg_3.ENV[\"SSH_CMD_TIMEOUT\"] = arg_1\n\n return 0"} +{"_id": "doc_6846", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Factory method for the assertion builder with value to be tested and optional description.\"\"\"\n global _soft_ctx\n if _soft_ctx:\n return AssertionBuilder(arg_0, arg_1, 'soft')\n return AssertionBuilder(arg_0, arg_1)"} +{"_id": "doc_6847", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Asserts that val is equal to other.\"\"\"\n if arg_0._check_dict_like(arg_0.val, check_values=False, return_as_bool=True) and \\\n arg_0._check_dict_like(arg_1, check_values=False, return_as_bool=True):\n if arg_0._dict_not_equal(arg_0.val, arg_1, ignore=arg_2.get('ignore'), include=arg_2.get('include')):\n arg_0._dict_err(arg_0.val, arg_1, ignore=arg_2.get('ignore'), include=arg_2.get('include'))\n else:\n if arg_0.val != arg_1:\n arg_0._err('Expected <%s> to be equal to <%s>, but was not.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6848", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is not equal to other.\"\"\"\n if arg_0.val == arg_1:\n arg_0._err('Expected <%s> to be not equal to <%s>, but was.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6849", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that the val is not identical to other, via 'is' compare.\"\"\"\n if arg_0.val is arg_1:\n arg_0._err('Expected <%s> to be not identical to <%s>, but was.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6850", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is of the given type.\"\"\"\n if type(arg_1) is not type and\\\n not issubclass(type(arg_1), type):\n raise TypeError('given arg must be a type')\n if type(arg_0.val) is not arg_1:\n if hasattr(arg_0.val, '__name__'):\n arg_2 = arg_0.val.__name__\n elif hasattr(arg_0.val, '__class__'):\n arg_2 = arg_0.val.__class__.__name__\n else:\n arg_2 = 'unknown'\n arg_0._err('Expected <%s:%s> to be of type <%s>, but was not.' % (arg_0.val, arg_2, arg_1.__name__))\n return arg_0"} +{"_id": "doc_6851", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is the given length.\"\"\"\n if type(arg_1) is not int:\n raise TypeError('given arg must be an int')\n if arg_1 < 0:\n raise ValueError('given arg must be a positive int')\n if len(arg_0.val) != arg_1:\n arg_0._err('Expected <%s> to be of length <%d>, but was <%d>.' % (arg_0.val, arg_1, len(arg_0.val)))\n return arg_0"} +{"_id": "doc_6852", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Asserts that val does not contain the given item or items.\"\"\"\n if len(arg_1) == 0:\n raise ValueError('one or more args must be given')\n elif len(arg_1) == 1:\n if arg_1[0] in arg_0.val:\n arg_0._err('Expected <%s> to not contain item <%s>, but did.' % (arg_0.val, arg_1[0]))\n else:\n arg_2 = []\n for arg_3 in arg_1:\n if arg_3 in arg_0.val:\n arg_2.append(arg_3)\n if arg_2:\n arg_0._err('Expected <%s> to not contain items %s, but did contain %s.' % (arg_0.val, arg_0._fmt_items(arg_1), arg_0._fmt_items(arg_2)))\n return arg_0"} +{"_id": "doc_6853", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is iterable and does not contain any duplicate items.\"\"\"\n try:\n if len(arg_0.val) == len(set(arg_0.val)):\n return arg_0\n except TypeError:\n raise TypeError('val is not iterable')\n arg_0._err('Expected <%s> to not contain duplicates, but did.' % arg_0.val)"} +{"_id": "doc_6854", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is empty.\"\"\"\n if len(arg_0.val) != 0:\n if isinstance(arg_0.val, str_types):\n arg_0._err('Expected <%s> to be empty string, but was not.' % arg_0.val)\n else:\n arg_0._err('Expected <%s> to be empty, but was not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6855", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is not empty.\"\"\"\n if len(arg_0.val) == 0:\n if isinstance(arg_0.val, str_types):\n arg_0._err('Expected not empty string, but was empty.')\n else:\n arg_0._err('Expected not empty, but was empty.')\n return arg_0"} +{"_id": "doc_6856", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is numeric and is less than other.\"\"\"\n arg_0._validate_compareable(arg_1)\n if arg_0.val >= arg_1:\n if type(arg_0.val) is datetime.datetime:\n arg_0._err('Expected <%s> to be less than <%s>, but was not.' % (arg_0.val.strftime('%Y-%m-%d %H:%M:%S'), arg_1.strftime('%Y-%m-%d %H:%M:%S')))\n else:\n arg_0._err('Expected <%s> to be less than <%s>, but was not.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6857", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Asserts that val is numeric and is between low and high.\"\"\"\n arg_3 = type(arg_0.val)\n arg_0._validate_between_args(arg_3, arg_1, arg_2)\n\n if arg_0.val < arg_1 or arg_0.val > arg_2:\n if arg_3 is datetime.datetime:\n arg_0._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (arg_0.val.strftime('%Y-%m-%d %H:%M:%S'), arg_1.strftime('%Y-%m-%d %H:%M:%S'), arg_2.strftime('%Y-%m-%d %H:%M:%S')))\n else:\n arg_0._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (arg_0.val, arg_1, arg_2))\n return arg_0"} +{"_id": "doc_6858", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Asserts that val is numeric and is close to other within tolerance.\"\"\"\n arg_0._validate_close_to_args(arg_0.val, arg_1, arg_2)\n\n if arg_0.val < (arg_1-arg_2) or arg_0.val > (arg_1+arg_2):\n if type(arg_0.val) is datetime.datetime:\n arg_3 = arg_2.days * 86400 + arg_2.seconds + arg_2.microseconds / 1000000\n arg_4, arg_5 = divmod(arg_3, 3600)\n arg_6, arg_7 = divmod(arg_5, 60)\n arg_0._err('Expected <%s> to be close to <%s> within tolerance <%d:%02d:%02d>, but was not.' % (arg_0.val.strftime('%Y-%m-%d %H:%M:%S'), arg_1.strftime('%Y-%m-%d %H:%M:%S'), arg_4, arg_6, arg_7))\n else:\n arg_0._err('Expected <%s> to be close to <%s> within tolerance <%s>, but was not.' % (arg_0.val, arg_1, arg_2))\n return arg_0"} +{"_id": "doc_6859", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is case-insensitive equal to other.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if not isinstance(arg_1, str_types):\n raise TypeError('given arg must be a string')\n if arg_0.val.lower() != arg_1.lower():\n arg_0._err('Expected <%s> to be case-insensitive equal to <%s>, but was not.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6860", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is string or iterable and ends with suffix.\"\"\"\n if arg_1 is None:\n raise TypeError('given suffix arg must not be none')\n if isinstance(arg_0.val, str_types):\n if not isinstance(arg_1, str_types):\n raise TypeError('given suffix arg must be a string')\n if len(arg_1) == 0:\n raise ValueError('given suffix arg must not be empty')\n if not arg_0.val.endswith(arg_1):\n arg_0._err('Expected <%s> to end with <%s>, but did not.' % (arg_0.val, arg_1))\n elif isinstance(arg_0.val, Iterable):\n if len(arg_0.val) == 0:\n raise ValueError('val must not be empty')\n arg_2 = None\n for arg_2 in arg_0.val:\n pass\n if arg_2 != arg_1:\n arg_0._err('Expected %s to end with <%s>, but did not.' % (arg_0.val, arg_1))\n else:\n raise TypeError('val is not a string or iterable')\n return arg_0"} +{"_id": "doc_6861", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is string and Func regex pattern.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if not isinstance(arg_1, str_types):\n raise TypeError('given pattern arg must be a string')\n if len(arg_1) == 0:\n raise ValueError('given pattern arg must not be empty')\n if re.search(arg_1, arg_0.val) is None:\n arg_0._err('Expected <%s> to match pattern <%s>, but did not.' % (arg_0.val, arg_1))\n return arg_0"} +{"_id": "doc_6862", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is non-empty string and all characters are alphabetic.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if len(arg_0.val) == 0:\n raise ValueError('val is empty')\n if not arg_0.val.isalpha():\n arg_0._err('Expected <%s> to contain only alphabetic chars, but did not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6863", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is non-empty string and all characters are digits.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if len(arg_0.val) == 0:\n raise ValueError('val is empty')\n if not arg_0.val.isdigit():\n arg_0._err('Expected <%s> to contain only digits, but did not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6864", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is non-empty string and all characters are lowercase.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if len(arg_0.val) == 0:\n raise ValueError('val is empty')\n if arg_0.val != arg_0.val.lower():\n arg_0._err('Expected <%s> to contain only lowercase chars, but did not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6865", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is non-empty string and all characters are uppercase.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a string')\n if len(arg_0.val) == 0:\n raise ValueError('val is empty')\n if arg_0.val != arg_0.val.upper():\n arg_0._err('Expected <%s> to contain only uppercase chars, but did not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6866", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is a unicode string.\"\"\"\n if type(arg_0.val) is not unicode:\n arg_0._err('Expected <%s> to be unicode, but was <%s>.' % (arg_0.val, type(arg_0.val).__name__))\n return arg_0"} +{"_id": "doc_6867", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Asserts that val is iterable and a subset of the given superset or flattened superset if multiple supersets are given.\"\"\"\n if not isinstance(arg_0.val, Iterable):\n raise TypeError('val is not iterable')\n if len(arg_1) == 0:\n raise ValueError('one or more superset args must be given')\n\n arg_2 = []\n if hasattr(arg_0.val, 'keys') and callable(getattr(arg_0.val, 'keys')) and hasattr(arg_0.val, '__getitem__'):\n # flatten superset dicts\n arg_3 = {}\n for arg_4,arg_5 in enumerate(arg_1):\n arg_0._check_dict_like(arg_5, check_values=False, name='arg #%d' % (arg_4+1))\n for arg_6 in arg_5.keys():\n arg_3.update({arg_6: arg_5[arg_6]})\n\n for arg_7 in arg_0.val.keys():\n if arg_7 not in arg_3:\n arg_2.append({arg_7: arg_0.val[arg_7]}) # bad key\n elif arg_0.val[arg_7] != arg_3[arg_7]:\n arg_2.append({arg_7: arg_0.val[arg_7]}) # bad val\n if arg_2:\n arg_0._err('Expected <%s> to be subset of %s, but %s %s missing.' % (arg_0.val, arg_0._fmt_items(arg_3), arg_0._fmt_items(arg_2), 'was' if len(arg_2) == 1 else 'were'))\n else:\n # flatten supersets\n arg_8 = set()\n for arg_5 in arg_1:\n try:\n for arg_6 in arg_5:\n arg_8.add(arg_6)\n except Exception:\n arg_8.add(arg_5)\n\n for arg_7 in arg_0.val:\n if arg_7 not in arg_8:\n arg_2.append(arg_7)\n if arg_2:\n arg_0._err('Expected <%s> to be subset of %s, but %s %s missing.' % (arg_0.val, arg_0._fmt_items(arg_8), arg_0._fmt_items(arg_2), 'was' if len(arg_2) == 1 else 'were'))\n\n return arg_0"} +{"_id": "doc_6868", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Asserts that val is a dict and contains the given value or values.\"\"\"\n arg_0._check_dict_like(arg_0.val, check_getitem=False)\n if len(arg_1) == 0:\n raise ValueError('one or more value args must be given')\n arg_2 = []\n for arg_3 in arg_1:\n if arg_3 not in arg_0.val.values():\n arg_2.append(arg_3)\n if arg_2:\n arg_0._err('Expected <%s> to contain values %s, but did not contain %s.' % (arg_0.val, arg_0._fmt_items(arg_1), arg_0._fmt_items(arg_2)))\n return arg_0"} +{"_id": "doc_6869", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Asserts that val is a dict and contains the given entry or entries.\"\"\"\n arg_0._check_dict_like(arg_0.val, check_values=False)\n arg_3 = list(arg_1) + [{arg_6:v} for arg_6,v in arg_2.items()]\n if len(arg_3) == 0:\n raise ValueError('one or more entry args must be given')\n arg_4 = []\n for arg_5 in arg_3:\n if type(arg_5) is not dict:\n raise TypeError('given entry arg must be a dict')\n if len(arg_5) != 1:\n raise ValueError('given entry args must contain exactly one key-value pair')\n arg_6 = next(iter(arg_5))\n if arg_6 not in arg_0.val:\n arg_4.append(arg_5) # bad key\n elif arg_0.val[arg_6] != arg_5[arg_6]:\n arg_4.append(arg_5) # bad val\n if arg_4:\n arg_0._err('Expected <%s> to contain entries %s, but did not contain %s.' % (arg_0.val, arg_0._fmt_items(arg_3), arg_0._fmt_items(arg_4)))\n return arg_0"} +{"_id": "doc_6870", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is a date and is before other date.\"\"\"\n if type(arg_0.val) is not datetime.datetime:\n raise TypeError('val must be datetime, but was type <%s>' % type(arg_0.val).__name__)\n if type(arg_1) is not datetime.datetime:\n raise TypeError('given arg must be datetime, but was type <%s>' % type(arg_1).__name__)\n if arg_0.val >= arg_1:\n arg_0._err('Expected <%s> to be before <%s>, but was not.' % (arg_0.val.strftime('%Y-%m-%d %H:%M:%S'), arg_1.strftime('%Y-%m-%d %H:%M:%S')))\n return arg_0"} +{"_id": "doc_6871", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is a path and that it Func.\"\"\"\n if not isinstance(arg_0.val, str_types):\n raise TypeError('val is not a path')\n if not os.path.Func(arg_0.val):\n arg_0._err('Expected <%s> to exist, but was not found.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6872", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is an existing path to a file.\"\"\"\n arg_0.exists()\n if not os.path.isfile(arg_0.val):\n arg_0._err('Expected <%s> to be a file, but was not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6873", "title": "", "text": "def Func(arg_0):\n \"\"\"Asserts that val is an existing path to a directory.\"\"\"\n arg_0.exists()\n if not os.path.isdir(arg_0.val):\n arg_0._err('Expected <%s> to be a directory, but was not.' % arg_0.val)\n return arg_0"} +{"_id": "doc_6874", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is an existing path to a file and that file is named filename.\"\"\"\n arg_0.is_file()\n if not isinstance(arg_1, str_types):\n raise TypeError('given filename arg must be a path')\n arg_2 = os.path.basename(os.path.abspath(arg_0.val))\n if arg_2 != arg_1:\n arg_0._err('Expected filename <%s> to be equal to <%s>, but was not.' % (arg_2, arg_1))\n return arg_0"} +{"_id": "doc_6875", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is an existing path to a file and that file is a child of parent.\"\"\"\n arg_0.is_file()\n if not isinstance(arg_1, str_types):\n raise TypeError('given parent directory arg must be a path')\n arg_2 = os.path.abspath(arg_0.val)\n arg_3 = os.path.abspath(arg_1)\n if not arg_2.startswith(arg_3):\n arg_0._err('Expected file <%s> to be a child of <%s>, but was not.' % (arg_2, arg_3))\n return arg_0"} +{"_id": "doc_6876", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Asserts that val is callable and that when called Func the given error.\"\"\"\n if not callable(arg_0.val):\n raise TypeError('val must be callable')\n if not issubclass(arg_1, BaseException):\n raise TypeError('given arg must be exception')\n return AssertionBuilder(arg_0.val, arg_0.description, arg_0.kind, arg_1)"} +{"_id": "doc_6877", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Asserts the val callable when invoked with the given args and kwargs raises the expected exception.\"\"\"\n if not arg_0.expected:\n raise TypeError('expected exception not set, raises() must be called first')\n try:\n arg_0.val(*arg_1, **arg_2)\n except BaseException as e:\n if issubclass(type(e), arg_0.expected):\n # chain on with exception message as val\n return AssertionBuilder(str(e), arg_0.description, arg_0.kind)\n else:\n # got exception, but wrong type, so raise\n arg_0._err('Expected <%s> to raise <%s> when called with (%s), but raised <%s>.' % (\n arg_0.val.__name__,\n arg_0.expected.__name__,\n arg_0._fmt_args_kwargs(*arg_1, **arg_2),\n type(e).__name__))\n\n # didn't fail as expected, so raise\n arg_0._err('Expected <%s> to raise <%s> when called with (%s).' % (\n arg_0.val.__name__,\n arg_0.expected.__name__,\n arg_0._fmt_args_kwargs(*arg_1, **arg_2)))"} +{"_id": "doc_6878", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Helper to convert the given args and kwargs into a string.\"\"\"\n if arg_1:\n arg_3 = str(arg_1).lstrip('(').rstrip(',)')\n if arg_2:\n arg_4 = ', '.join([str(i).lstrip('(').rstrip(')').replace(', ',': ') for i in [\n (k,arg_2[k]) for k in sorted(arg_2.keys())]])\n\n if arg_1 and arg_2:\n return arg_3 + ', ' + arg_4\n elif arg_1:\n return arg_3\n elif arg_2:\n return arg_4\n else:\n return ''"} +{"_id": "doc_6879", "title": "", "text": "def Func(arg_0, arg_1='cleaned_data', arg_2=False):\n \"\"\"\n Generate CSV file for training and testing data\n\n Input\n =====\n best_path: str, path to BEST folder which contains unzipped subfolder\n 'article', 'encyclopedia', 'news', 'novel'\n\n cleaned_data: str, path to output folder, the cleaned data will be saved\n in the given folder name where training set will be stored in `train` folder\n and testing set will be stored on `test` folder\n\n create_val: boolean, True or False, if True, divide training set into training set and\n validation set in `val` folder\n \"\"\"\n if not os.path.isdir(arg_1):\n os.mkdir(arg_1)\n if not os.path.isdir(os.path.join(arg_1, 'train')):\n os.makedirs(os.path.join(arg_1, 'train'))\n if not os.path.isdir(os.path.join(arg_1, 'test')):\n os.makedirs(os.path.join(arg_1, 'test'))\n if not os.path.isdir(os.path.join(arg_1, 'val')) and arg_2:\n os.makedirs(os.path.join(arg_1, 'val'))\n\n for arg_3 in article_types:\n arg_4 = glob(os.path.join(arg_0, arg_3, '*.txt'))\n arg_5, arg_6 = train_test_split(arg_4, random_state=0, test_size=0.1)\n if arg_2:\n arg_5, arg_7 = train_test_split(arg_5, random_state=0, test_size=0.1)\n arg_8 = generate_words(arg_7)\n arg_9 = create_char_dataframe(arg_8)\n arg_9.to_csv(os.path.join(arg_1, 'val', 'df_best_{}_val.csv'.format(arg_3)), index=False)\n arg_10 = generate_words(arg_5)\n arg_11 = generate_words(arg_6)\n arg_12 = create_char_dataframe(arg_10)\n arg_13 = create_char_dataframe(arg_11)\n arg_12.to_csv(os.path.join(arg_1, 'train', 'df_best_{}_train.csv'.format(arg_3)), index=False)\n arg_13.to_csv(os.path.join(arg_1, 'test', 'df_best_{}_test.csv'.format(arg_3)), index=False)\n print(\"Save {} to CSV file\".format(arg_3))"} +{"_id": "doc_6880", "title": "", "text": "def Func(arg_0, arg_1='train'):\n \"\"\"\n Transform processed path into feature matrix and output array\n\n Input\n =====\n best_processed_path: str, path to processed BEST dataset\n\n option: str, 'train' or 'test'\n \"\"\"\n # padding for training and testing set\n arg_2 = 21\n arg_3 = int((arg_2 - 1)/2)\n arg_4 = [{'char': ' ', 'type': 'p', 'target': True}]\n arg_5 = pd.DataFrame(arg_4 * arg_3)\n\n arg_6 = []\n for arg_7 in article_types:\n arg_6.append(pd.read_csv(os.path.join(arg_0, arg_1, 'df_best_{}_{}.csv'.format(arg_7, arg_1))))\n arg_6 = pd.concat(arg_6)\n arg_6 = pd.concat((arg_5, arg_6, arg_5)) # pad with empty string feature\n\n arg_6['char'] = arg_6['char'].map(lambda x: CHARS_MAP.get(x, 80))\n arg_6['type'] = arg_6['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))\n arg_5 = create_n_gram_df(arg_6, arg_2=arg_2)\n\n arg_8 = ['char' + str(i + 1) for i in range(arg_3)] + \\\n ['char-' + str(i + 1) for i in range(arg_3)] + ['char']\n arg_9 = ['type' + str(i + 1) for i in range(arg_3)] + \\\n ['type-' + str(i + 1) for i in range(arg_3)] + ['type']\n\n arg_10 = arg_5[arg_8].as_matrix()\n arg_11 = arg_5[arg_9].as_matrix()\n arg_12 = arg_5['target'].astype(int).as_matrix()\n\n return arg_10, arg_11, arg_12"} +{"_id": "doc_6881", "title": "", "text": "def Func(arg_0, arg_1='../weight/model_weight.h5', arg_2=2):\n \"\"\"\n Given path to processed BEST dataset,\n train CNN model for words beginning alongside with\n character label encoder and character type label encoder\n\n Input\n =====\n best_processed_path: str, path to processed BEST dataset\n weight_path: str, path to weight path file\n verbose: int, verbost option for training Keras model\n\n Output\n ======\n model: keras model, keras model for tokenize prediction\n \"\"\"\n\n arg_3, arg_4, arg_5 = prepare_feature(arg_0, option='train')\n arg_6, arg_7, arg_8 = prepare_feature(arg_0, option='test')\n\n arg_9 = False\n if os.path.isdir(os.path.join(arg_0, 'val')):\n arg_9 = True\n arg_10, arg_11, arg_12 = prepare_feature(arg_0, option='val')\n\n if not os.path.isdir(os.path.dirname(arg_1)):\n os.makedirs(os.path.dirname(arg_1)) # make directory if weight does not exist\n\n arg_13 = [\n ReduceLROnPlateau(),\n ModelCheckpoint(\n arg_1,\n save_best_only=True,\n save_weights_only=True,\n monitor='val_loss',\n mode='min',\n arg_2=1\n )\n ]\n\n # train model\n arg_14 = get_convo_nn2()\n arg_15 = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)]\n for (arg_16, arg_17) in arg_15:\n print(\"train with {} epochs and {} batch size\".format(arg_16, arg_17))\n if arg_9:\n arg_14.fit([arg_3, arg_4], arg_5,\n arg_16=arg_16, arg_17=arg_17,\n arg_2=arg_2,\n callbacks=arg_13,\n validation_data=([arg_10, arg_11], arg_12))\n else:\n arg_14.fit([arg_3, arg_4], arg_5,\n arg_16=arg_16, arg_17=arg_17,\n arg_2=arg_2,\n callbacks=arg_13)\n return arg_14"} +{"_id": "doc_6882", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Tokenize given Thai text string\n\n Input\n =====\n text: str, Thai text string\n custom_dict: str (or list), path to customized dictionary file\n It allows the function not to Func given dictionary wrongly.\n The file should contain custom words separated by line.\n Alternatively, you can provide list of custom words too.\n\n Output\n ======\n tokens: list, list of Funcd words\n\n Example\n =======\n >> deepcut.Func('\u0e15\u0e31\u0e14\u0e04\u0e33\u0e44\u0e14\u0e49\u0e14\u0e35\u0e21\u0e32\u0e01')\n >> ['\u0e15\u0e31\u0e14\u0e04\u0e33','\u0e44\u0e14\u0e49','\u0e14\u0e35','\u0e21\u0e32\u0e01']\n\n \"\"\"\n global arg_2\n if not arg_2:\n arg_2 = DeepcutTokenizer()\n return arg_2.Func(arg_0, arg_1=arg_1)"} +{"_id": "doc_6883", "title": "", "text": "def Func(arg_0, arg_1=21):\n \"\"\"\n Create feature array of character and surrounding characters\n \"\"\"\n arg_2 = len(arg_0)\n arg_3 = int((arg_1 - 1)/2)\n arg_4 = [' '] * arg_3 + [t for t in arg_0] + [' '] * arg_3\n arg_5, arg_6 = [], []\n for arg_7 in range(arg_3, arg_3 + arg_2):\n arg_8 = arg_4[arg_7 + 1: arg_7 + arg_3 + 1] + \\\n list(reversed(arg_4[arg_7 - arg_3: arg_7])) + \\\n [arg_4[arg_7]]\n arg_9 = [CHARS_MAP.get(c, 80) for c in arg_8]\n arg_10 = [CHAR_TYPES_MAP.get(CHAR_TYPE_FLATTEN.get(c, 'o'), 4)\n for c in arg_8]\n arg_5.append(arg_9)\n arg_6.append(arg_10)\n arg_5 = np.array(arg_5).astype(float)\n arg_6 = np.array(arg_6).astype(float)\n return arg_5, arg_6"} +{"_id": "doc_6884", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Given input dataframe, create feature dataframe of shifted characters\n \"\"\"\n arg_2 = int((arg_1 - 1)/2)\n for arg_3 in range(arg_2):\n arg_0['char-{}'.format(arg_3+1)] = arg_0['char'].shift(arg_3 + 1)\n arg_0['type-{}'.format(arg_3+1)] = arg_0['type'].shift(arg_3 + 1)\n arg_0['char{}'.format(arg_3+1)] = arg_0['char'].shift(-arg_3 - 1)\n arg_0['type{}'.format(arg_3+1)] = arg_0['type'].shift(-arg_3 - 1)\n return arg_0[arg_2: -arg_2]"} +{"_id": "doc_6885", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=True):\n \"\"\"Wraps a fileobj in a bandwidth limited stream wrapper\n\n :type fileobj: file-like obj\n :param fileobj: The file-like obj to wrap\n\n :type transfer_coordinator: s3transfer.futures.TransferCoordinator\n param transfer_coordinator: The coordinator for the general transfer\n that the wrapped stream is a part of\n\n :type enabled: boolean\n :param enabled: Whether bandwidth limiting should be enabled to start\n \"\"\"\n arg_4 = BandwidthLimitedStream(\n arg_1, arg_0._leaky_bucket, arg_2,\n arg_0._time_utils)\n if not arg_3:\n arg_4.disable_bandwidth_limiting()\n return arg_4"} +{"_id": "doc_6886", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read a specified amount\n\n Reads will only be throttled if bandwidth limiting is enabled.\n \"\"\"\n if not arg_0._bandwidth_limiting_enabled:\n return arg_0._fileobj.Func(arg_1)\n\n # We do not want to be calling consume on every Func as the Func\n # amounts can be small causing the lock of the leaky bucket to\n # introduce noticeable overhead. So instead we keep track of\n # how many bytes we have seen and only call consume once we pass a\n # certain threshold.\n arg_0._bytes_seen += arg_1\n if arg_0._bytes_seen < arg_0._bytes_threshold:\n return arg_0._fileobj.Func(arg_1)\n\n arg_0._consume_through_leaky_bucket()\n return arg_0._fileobj.Func(arg_1)"} +{"_id": "doc_6887", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Consume an a requested amount\n\n :type amt: int\n :param amt: The amount of bytes to request to Func\n\n :type request_token: RequestToken\n :param request_token: The token associated to the consumption\n request that is used to identify the request. So if a\n RequestExceededException is raised the token should be used\n in subsequent retry Func() request.\n\n :raises RequestExceededException: If the consumption amount would\n exceed the maximum allocated bandwidth\n\n :rtype: int\n :returns: The amount Funcd\n \"\"\"\n with arg_0._lock:\n arg_3 = arg_0._time_utils.time()\n if arg_0._consumption_scheduler.is_scheduled(arg_2):\n return arg_0._release_requested_amt_for_scheduled_request(\n arg_1, arg_2, arg_3)\n elif arg_0._projected_to_exceed_max_rate(arg_1, arg_3):\n arg_0._raise_request_exceeded_exception(\n arg_1, arg_2, arg_3)\n else:\n return arg_0._release_requested_amt(arg_1, arg_3)"} +{"_id": "doc_6888", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Schedules a wait time to be able to consume an amount\n\n :type amt: int\n :param amt: The amount of bytes scheduled to be consumed\n\n :type token: RequestToken\n :param token: The token associated to the consumption\n request that is used to identify the request.\n\n :type time_to_consume: float\n :param time_to_consume: The desired time it should take for that\n specific request amount to be consumed in regardless of previously\n scheduled consumption requests\n\n :rtype: float\n :returns: The amount of time to wait for the specific request before\n actually consuming the specified amount.\n \"\"\"\n arg_0._total_wait += arg_3\n arg_0._tokens_to_scheduled_consumption[arg_2] = {\n 'wait_duration': arg_0._total_wait,\n 'time_to_consume': arg_3,\n }\n return arg_0._total_wait"} +{"_id": "doc_6889", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get the projected rate using a provided amount and time\n\n :type amt: int\n :param amt: The proposed amount to consume\n\n :type time_at_consumption: float\n :param time_at_consumption: The proposed time to consume at\n\n :rtype: float\n :returns: The consumption rate if that amt and time were consumed\n \"\"\"\n if arg_0._last_time is None:\n return 0.0\n return arg_0._calculate_exponential_moving_average_rate(\n arg_1, arg_2)"} +{"_id": "doc_6890", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Record the consumption rate based off amount and time point\n\n :type amt: int\n :param amt: The amount that got consumed\n\n :type time_at_consumption: float\n :param time_at_consumption: The time at which the amount was consumed\n \"\"\"\n if arg_0._last_time is None:\n arg_0._last_time = arg_2\n arg_0._current_rate = 0.0\n return\n arg_0._current_rate = arg_0._calculate_exponential_moving_average_rate(\n arg_1, arg_2)\n arg_0._last_time = arg_2"} +{"_id": "doc_6891", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None):\n \"\"\"Downloads the object's contents to a file\n\n :type bucket: str\n :param bucket: The name of the bucket to download from\n\n :type key: str\n :param key: The name of the key to download from\n\n :type filename: str\n :param filename: The name of a file to download to.\n\n :type extra_args: dict\n :param extra_args: Extra arguments that may be passed to the\n client operation\n\n :type expected_size: int\n :param expected_size: The expected size in bytes of the download. If\n provided, the downloader will not call HeadObject to determine the\n object's size and use the provided value instead. The size is\n needed to determine whether to do a multipart download.\n\n :rtype: s3transfer.futures.TransferFuture\n :returns: Transfer future representing the download\n \"\"\"\n arg_0._start_if_needed()\n if arg_4 is None:\n arg_4 = {}\n arg_0._validate_all_known_args(arg_4)\n arg_6 = arg_0._transfer_monitor.notify_new_transfer()\n arg_7 = DownloadFileRequest(\n arg_6=arg_6, arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5,\n )\n logger.debug(\n 'Submitting download file request: %s.', arg_7)\n arg_0._download_request_queue.put(arg_7)\n arg_8 = CallArgs(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5)\n arg_9 = arg_0._get_transfer_future(arg_6, arg_8)\n return arg_9"} +{"_id": "doc_6892", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Poll for the result of a transfer\n\n :param transfer_id: Unique identifier for the transfer\n :return: If the transfer succeeded, it will return the result. If the\n transfer failed, it will raise the exception associated to the\n failure.\n \"\"\"\n arg_0._transfer_states[arg_1].wait_till_done()\n arg_2 = arg_0._transfer_states[arg_1].exception\n if arg_2:\n raise arg_2\n return None"} +{"_id": "doc_6893", "title": "", "text": "def Func(arg_0):\n \"\"\"Decrement the count by one\"\"\"\n with arg_0._lock:\n if arg_0._count == 0:\n raise RuntimeError(\n 'Counter is at zero. It cannot dip below zero')\n arg_0._count -= 1\n if arg_0._is_finalized and arg_0._count == 0:\n arg_0._callback()"} +{"_id": "doc_6894", "title": "", "text": "def Func(arg_0):\n \"\"\"Finalize the counter\n\n Once Funcd, the counter never be incremented and the callback\n can be invoked once the count reaches zero\n \"\"\"\n with arg_0._lock:\n arg_0._is_Funcd = True\n if arg_0._count == 0:\n arg_0._callback()"} +{"_id": "doc_6895", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks to see if a file is a special UNIX file.\n\n It checks if the file is a character special device, block special\n device, FIFO, or socket.\n\n :param filename: Name of the file\n\n :returns: True if the file is a special file. False, if is not.\n \"\"\"\n # If it does not exist, it must be a new file so it cannot be\n # a special file.\n if not os.path.exists(arg_1):\n return False\n arg_2 = os.stat(arg_1).st_mode\n # Character special device.\n if stat.S_ISCHR(arg_2):\n return True\n # Block special device\n if stat.S_ISBLK(arg_2):\n return True\n # Named pipe / FIFO\n if stat.S_ISFIFO(arg_2):\n return True\n # Socket.\n if stat.S_ISSOCK(arg_2):\n return True\n return False"} +{"_id": "doc_6896", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Get a chunksize close to current that fits within all S3 limits.\n\n :type current_chunksize: int\n :param current_chunksize: The currently configured chunksize.\n\n :type file_size: int or None\n :param file_size: The size of the file to upload. This might be None\n if the object being transferred has an unknown size.\n\n :returns: A valid chunksize that fits within configured limits.\n \"\"\"\n arg_3 = arg_1\n if arg_2 is not None:\n arg_3 = arg_0._adjust_for_max_parts(arg_3, arg_2)\n return arg_0._adjust_for_chunksize_limits(arg_3)"} +{"_id": "doc_6897", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Queue IO write for submission to the IO executor.\n\n This method accepts an IO executor and information about the\n downloaded data, and handles submitting this to the IO executor.\n\n This method may defer submission to the IO executor if necessary.\n\n \"\"\"\n arg_0._transfer_coordinator.submit(\n arg_0._io_executor,\n arg_0.get_io_write_task(arg_1, arg_2, arg_3)\n )"} +{"_id": "doc_6898", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Retrieves a class for managing output for a download\n\n :type transfer_future: s3transfer.futures.TransferFuture\n :param transfer_future: The transfer future for the request\n\n :type osutil: s3transfer.utils.OSUtils\n :param osutil: The os utility associated to the transfer\n\n :rtype: class of DownloadOutputManager\n :returns: The appropriate class to use for managing a specific type of\n input for downloads.\n \"\"\"\n arg_3 = [\n DownloadSpecialFilenameOutputManager,\n DownloadFilenameOutputManager,\n DownloadSeekableOutputManager,\n DownloadNonSeekableOutputManager,\n ]\n\n arg_4 = arg_1.meta.call_args.fileobj\n for arg_5 in arg_3:\n if arg_5.is_compatible(arg_4, arg_2):\n return arg_5\n raise RuntimeError(\n 'Output %s of type: %s is not supported.' % (\n arg_4, type(arg_4)))"} +{"_id": "doc_6899", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6,\n arg_7, arg_8, arg_9,\n arg_10=0, arg_11=None):\n \"\"\"Downloads an object and places content into io queue\n\n :param client: The client to use when calling GetObject\n :param bucket: The bucket to download from\n :param key: The key to download from\n :param fileobj: The file handle to write content to\n :param exta_args: Any extra arguements to include in GetObject request\n :param callbacks: List of progress callbacks to invoke on download\n :param max_attempts: The number of retries to do when downloading\n :param download_output_manager: The download output manager associated\n with the current download.\n :param io_chunksize: The size of each io chunk to read from the\n download stream and queue in the io queue.\n :param start_index: The location in the file to start writing the\n content of the key to.\n :param bandwidth_limiter: The bandwidth limiter to use when throttling\n the downloading of data in streams.\n \"\"\"\n arg_12 = None\n for arg_13 in range(arg_7):\n try:\n arg_14 = arg_1.get_object(\n Bucket=arg_2, Key=arg_3, **arg_5)\n arg_15 = StreamReaderProgress(\n arg_14['Body'], arg_6)\n if arg_11:\n arg_15 = \\\n arg_11.get_bandwith_limited_stream(\n arg_15, arg_0._transfer_coordinator)\n\n arg_16 = arg_10\n arg_17 = DownloadChunkIterator(arg_15, arg_9)\n for arg_18 in arg_17:\n # If the transfer is done because of a cancellation\n # or error somewhere else, stop trying to submit more\n # data to be written and break out of the download.\n if not arg_0._transfer_coordinator.done():\n arg_0._handle_io(\n arg_8, arg_4, arg_18,\n arg_16\n )\n arg_16 += len(arg_18)\n else:\n return\n return\n except S3_RETRYABLE_DOWNLOAD_ERRORS as e:\n logger.debug(\"Retrying exception caught (%s), \"\n \"retrying request, (attempt %s / %s)\", e, arg_13,\n arg_7, exc_info=True)\n arg_12 = e\n # Also invoke the progress callbacks to indicate that we\n # are trying to download the stream again and all progress\n # for this GetObject has been lost.\n invoke_progress_callbacks(\n arg_6, arg_10 - arg_16)\n continue\n raise RetriesExceededError(arg_12)"} +{"_id": "doc_6900", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Pulls off an io queue to write contents to a file\n\n :param fileobj: The file handle to write content to\n :param data: The data to write\n :param offset: The offset to write the data to.\n \"\"\"\n arg_1.seek(arg_3)\n arg_1.write(arg_2)"} +{"_id": "doc_6901", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Request any available writes given new incoming data.\n\n You call this method by providing new data along with the\n offset associated with the data. If that new data unlocks\n any contiguous writes that can now be submitted, this\n method will return all applicable writes.\n\n This is done with 1 method call so you don't have to\n make two method calls (put(), get()) which acquires a lock\n each method call.\n\n \"\"\"\n if arg_1 < arg_0._next_offset:\n # This is a request for a write that we've already\n # seen. This can happen in the event of a retry\n # where if we retry at at offset N/2, we'll requeue\n # offsets 0-N/2 again.\n return []\n arg_3 = []\n if arg_1 in arg_0._pending_offsets:\n # We've already queued this offset so this request is\n # a duplicate. In this case we should ignore\n # this request and prefer what's already queued.\n return []\n heapq.heappush(arg_0._writes, (arg_1, arg_2))\n arg_0._pending_offsets.add(arg_1)\n while arg_0._writes and arg_0._writes[0][0] == arg_0._next_offset:\n arg_4 = heapq.heappop(arg_0._writes)\n arg_3.append({'offset': arg_4[0], 'data': arg_4[1]})\n arg_0._pending_offsets.remove(arg_4[0])\n arg_0._next_offset += len(arg_4[1])\n return arg_3"} +{"_id": "doc_6902", "title": "", "text": "def Func(arg_0):\n \"\"\"Backwards compat function to determine if a fileobj is Func\n\n :param fileobj: The file-like object to determine if Func\n\n :returns: True, if Func. False, otherwise.\n \"\"\"\n # If the fileobj has a Func attr, try calling the Func()\n # method on it.\n if hasattr(arg_0, 'Func'):\n return arg_0.Func()\n # If there is no Func attr, check if the object can be seeked\n # or telled. If it can, try to seek to the current position.\n elif hasattr(arg_0, 'seek') and hasattr(arg_0, 'tell'):\n try:\n arg_0.seek(0, 1)\n return True\n except (OSError, IOError):\n # If an io related error was thrown then it is not Func.\n return False\n # Else, the fileobj is not Func\n return False"} +{"_id": "doc_6903", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None):\n \"\"\"Downloads a file from S3\n\n :type bucket: str\n :param bucket: The name of the bucket to Func from\n\n :type key: str\n :param key: The name of the key to Func from\n\n :type fileobj: str or seekable file-like object\n :param fileobj: The name of a file to Func or a seekable file-like\n object to Func. It is recommended to use a filename because\n file-like objects may result in higher memory usage.\n\n :type extra_args: dict\n :param extra_args: Extra arguments that may be passed to the\n client operation\n\n :type subscribers: list(s3transfer.subscribers.BaseSubscriber)\n :param subscribers: The list of subscribers to be invoked in the\n order provided based on the event emit during the process of\n the transfer request.\n\n :rtype: s3transfer.futures.TransferFuture\n :returns: Transfer future representing the Func\n \"\"\"\n if arg_4 is None:\n arg_4 = {}\n if arg_5 is None:\n arg_5 = []\n arg_0._validate_all_known_args(arg_4, arg_0.ALLOWED_DOWNLOAD_ARGS)\n arg_6 = CallArgs(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4,\n arg_5=arg_5\n )\n arg_7 = {'io_executor': arg_0._io_executor}\n if arg_0._bandwidth_limiter:\n arg_7['bandwidth_limiter'] = arg_0._bandwidth_limiter\n return arg_0._submit_transfer(\n arg_6, DownloadSubmissionTask, arg_7)"} +{"_id": "doc_6904", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None, arg_6=None):\n \"\"\"Copies a file in S3\n\n :type Func_source: dict\n :param Func_source: The name of the source bucket, key name of the\n source object, and optional version ID of the source object. The\n dictionary format is:\n ``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note\n that the ``VersionId`` key is optional and may be omitted.\n\n :type bucket: str\n :param bucket: The name of the bucket to Func to\n\n :type key: str\n :param key: The name of the key to Func to\n\n :type extra_args: dict\n :param extra_args: Extra arguments that may be passed to the\n client operation\n\n :type subscribers: a list of subscribers\n :param subscribers: The list of subscribers to be invoked in the\n order provided based on the event emit during the process of\n the transfer request.\n\n :type source_client: botocore or boto3 Client\n :param source_client: The client to be used for operation that\n may happen at the source object. For example, this client is\n used for the head_object that determines the size of the Func.\n If no client is provided, the transfer manager's client is used\n as the client for the source object.\n\n :rtype: s3transfer.futures.TransferFuture\n :returns: Transfer future representing the Func\n \"\"\"\n if arg_4 is None:\n arg_4 = {}\n if arg_5 is None:\n arg_5 = []\n if arg_6 is None:\n arg_6 = arg_0._client\n arg_0._validate_all_known_args(arg_4, arg_0.ALLOWED_COPY_ARGS)\n arg_7 = CallArgs(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6\n )\n return arg_0._submit_transfer(arg_7, CopySubmissionTask)"} +{"_id": "doc_6905", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"Delete an S3 object.\n\n :type bucket: str\n :param bucket: The name of the bucket.\n\n :type key: str\n :param key: The name of the S3 object to Func.\n\n :type extra_args: dict\n :param extra_args: Extra arguments that may be passed to the\n DeleteObject call.\n\n :type subscribers: list\n :param subscribers: A list of subscribers to be invoked during the\n process of the transfer request. Note that the ``on_progress``\n callback is not invoked during object deletion.\n\n :rtype: s3transfer.futures.TransferFuture\n :return: Transfer future representing the deletion.\n\n \"\"\"\n if arg_3 is None:\n arg_3 = {}\n if arg_4 is None:\n arg_4 = []\n arg_0._validate_all_known_args(arg_3, arg_0.ALLOWED_DELETE_ARGS)\n arg_5 = CallArgs(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4\n )\n return arg_0._submit_transfer(arg_5, DeleteSubmissionTask)"} +{"_id": "doc_6906", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=''):\n \"\"\"Shutdown the TransferManager\n\n It will wait till all transfers complete before it completely shuts\n down.\n\n :type cancel: boolean\n :param cancel: If True, calls TransferFuture.cancel() for\n all in-progress in transfers. This is useful if you want the\n Func to happen quicker.\n\n :type cancel_msg: str\n :param cancel_msg: The message to specify if canceling all in-progress\n transfers.\n \"\"\"\n arg_0._Func(arg_1, arg_1, arg_2)"} +{"_id": "doc_6907", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3):\n \"\"\"Cancels all inprogress transfers\n\n This Funcs the inprogress transfers by calling Func() on all\n tracked transfer coordinators.\n\n :param msg: The message to pass on to each transfer coordinator that\n gets Funcled.\n\n :param exc_type: The type of exception to set for the Funclation\n \"\"\"\n for arg_4 in arg_0.tracked_transfer_coordinators:\n arg_4.Func(arg_1, arg_2)"} +{"_id": "doc_6908", "title": "", "text": "def Func(arg_0):\n \"\"\"Wait until there are no more inprogress transfers\n\n This will not stop when failures are encountered and not propogate any\n of these errors from failed transfers, but it can be interrupted with\n a KeyboardInterrupt.\n \"\"\"\n try:\n arg_1 = None\n for arg_1 in arg_0.tracked_transfer_coordinators:\n arg_1.result()\n except KeyboardInterrupt:\n logger.debug('Received KeyboardInterrupt in Func()')\n # If Keyboard interrupt is raised while Funcing for\n # the result, then exit out of the Func and raise the\n # exception\n if arg_1:\n logger.debug(\n 'On KeyboardInterrupt was Funcing for %s',\n arg_1)\n raise\n except Exception:\n # A general exception could have been thrown because\n # of result(). We just want to ignore this and continue\n # because we at least know that the transfer coordinator\n # has completed.\n pass"} +{"_id": "doc_6909", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retrieves a class for managing input for an upload based on file type\n\n :type transfer_future: s3transfer.futures.TransferFuture\n :param transfer_future: The transfer future for the request\n\n :rtype: class of UploadInputManager\n :returns: The appropriate class to use for managing a specific type of\n input for uploads.\n \"\"\"\n arg_2 = [\n UploadFilenameInputManager,\n UploadSeekableInputManager,\n UploadNonSeekableInputManager\n ]\n\n arg_3 = arg_1.meta.call_args.fileobj\n for arg_4 in arg_2:\n if arg_4.is_compatible(arg_3):\n return arg_4\n raise RuntimeError(\n 'Input %s of type: %s is not supported.' % (\n arg_3, type(arg_3)))"} +{"_id": "doc_6910", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Sets the exception on the future.\"\"\"\n if not arg_0.done():\n raise TransferNotDoneError(\n 'Func can only be called once the transfer is '\n 'complete.')\n arg_0._coordinator.Func(arg_1, override=True)"} +{"_id": "doc_6911", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set a result for the TransferFuture\n\n Implies that the TransferFuture succeeded. This will always set a\n result because it is invoked on the final task where there is only\n ever one final task and it is ran at the very end of a transfer\n process. So if a result is being set for this final task, the transfer\n succeeded even if something came a long and canceled the transfer\n on the final task.\n \"\"\"\n with arg_0._lock:\n arg_0._exception = None\n arg_0._result = arg_1\n arg_0._status = 'success'"} +{"_id": "doc_6912", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Set an exception for the TransferFuture\n\n Implies the TransferFuture failed.\n\n :param exception: The exception that cause the transfer to fail.\n :param override: If True, override any existing state.\n \"\"\"\n with arg_0._lock:\n if not arg_0.done() or arg_2:\n arg_0._exception = arg_1\n arg_0._status = 'failed'"} +{"_id": "doc_6913", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3):\n \"\"\"Cancels the TransferFuture\n\n :param msg: The message to attach to the Funclation\n :param exc_type: The type of exception to set for the Funclation\n \"\"\"\n with arg_0._lock:\n if not arg_0.done():\n arg_4 = False\n logger.debug('%s Func(%s) called', arg_0, arg_1)\n arg_0._exception = arg_2(arg_1)\n if arg_0._status == 'not-started':\n arg_4 = True\n arg_0._status = 'Funcled'\n if arg_4:\n arg_0.announce_done()"} +{"_id": "doc_6914", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Submits a task to a provided executor\n\n :type executor: s3transfer.futures.BoundedExecutor\n :param executor: The executor to Func the callable to\n\n :type task: s3transfer.tasks.Task\n :param task: The task to Func to the executor\n\n :type tag: s3transfer.futures.TaskTag\n :param tag: A tag to associate to the Functed task\n\n :rtype: concurrent.futures.Future\n :returns: A future representing the Functed task\n \"\"\"\n logger.debug(\n \"Submitting task %s to executor %s for transfer request: %s.\" % (\n arg_2, arg_1, arg_0.transfer_id)\n )\n arg_4 = arg_1.Func(arg_2, arg_3=arg_3)\n # Add this created future to the list of associated future just\n # in case it is needed during cleanups.\n arg_0.add_associated_future(arg_4)\n arg_4.add_done_callback(\n FunctionContainer(arg_0.remove_associated_future, arg_4))\n return arg_4"} +{"_id": "doc_6915", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Add a done callback to be invoked when transfer is done\"\"\"\n with arg_0._done_callbacks_lock:\n arg_0._done_callbacks.append(\n FunctionContainer(arg_1, *arg_2, **arg_3)\n )"} +{"_id": "doc_6916", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Adds a callback to call upon failure\"\"\"\n with arg_0._failure_cleanups_lock:\n arg_0._failure_cleanups.append(\n FunctionContainer(arg_1, *arg_2, **arg_3))"} +{"_id": "doc_6917", "title": "", "text": "def Func(arg_0):\n \"\"\"Announce that future is done running and run associated callbacks\n\n This will run any failure cleanups if the transfer failed if not\n they have not been run, allows the result() to be unblocked, and will\n run any done callbacks associated to the TransferFuture if they have\n not already been ran.\n \"\"\"\n if arg_0.status != 'success':\n arg_0._run_failure_cleanups()\n arg_0._done_event.set()\n arg_0._run_done_callbacks()"} +{"_id": "doc_6918", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Submit a task to complete\n\n :type task: s3transfer.tasks.Task\n :param task: The task to run __call__ on\n\n\n :type tag: s3transfer.futures.TaskTag\n :param tag: An optional tag to associate to the task. This\n is used to override which semaphore to use.\n\n :type block: boolean\n :param block: True if to wait till it is possible to Func a task.\n False, if not to wait and raise an error if not able to Func\n a task.\n\n :returns: The future assocaited to the Functed task\n \"\"\"\n arg_4 = arg_0._semaphore\n # If a tag was provided, use the semaphore associated to that\n # tag.\n if arg_2:\n arg_4 = arg_0._tag_semaphores[arg_2]\n\n # Call acquire on the semaphore.\n arg_5 = arg_4.acquire(arg_1.transfer_id, arg_3)\n # Create a callback to invoke when task is done in order to call\n # release on the semaphore.\n arg_6 = FunctionContainer(\n arg_4.release, arg_1.transfer_id, arg_5)\n # Submit the task to the underlying executor.\n arg_7 = ExecutorFuture(arg_0._executor.Func(arg_1))\n # Add the Semaphore.release() callback to the future such that\n # it is invoked once the future completes.\n arg_7.add_done_callback(arg_6)\n return arg_7"} +{"_id": "doc_6919", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None, arg_5=None):\n \"\"\"Upload a file to an S3 object.\n\n Variants have also been injected into S3 client, Bucket and Object.\n You don't have to use S3Transfer.Func() directly.\n \"\"\"\n if arg_5 is None:\n arg_5 = {}\n arg_0._validate_all_known_args(arg_5, arg_0.ALLOWED_UPLOAD_ARGS)\n arg_6 = arg_0._client.meta.events\n arg_6.register_first('request-created.s3',\n disable_upload_callbacks,\n unique_id='s3upload-callback-disable')\n arg_6.register_last('request-created.s3',\n enable_upload_callbacks,\n unique_id='s3upload-callback-enable')\n if arg_0._osutil.get_file_size(arg_1) >= \\\n arg_0._config.multipart_threshold:\n arg_0._multipart_upload(arg_1, arg_2, arg_3, arg_4, arg_5)\n else:\n arg_0._put_object(arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_6920", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None):\n \"\"\"Download an S3 object to a file.\n\n Variants have also been injected into S3 client, Bucket and Object.\n You don't have to use S3Transfer.Func() directly.\n \"\"\"\n # This method will issue a ``head_object`` request to determine\n # the size of the S3 object. This is used to determine if the\n # object is downloaded in parallel.\n if arg_4 is None:\n arg_4 = {}\n arg_0._validate_all_known_args(arg_4, arg_0.ALLOWED_DOWNLOAD_ARGS)\n arg_6 = arg_0._object_size(arg_1, arg_2, arg_4)\n arg_7 = arg_3 + os.extsep + random_file_extension()\n try:\n arg_0._Func(arg_1, arg_2, arg_7, arg_6,\n arg_4, arg_5)\n except Exception:\n logger.debug(\"Exception caught in Func, removing partial \"\n \"file: %s\", arg_7, exc_info=True)\n arg_0._osutil.remove_file(arg_7)\n raise\n else:\n arg_0._osutil.rename_file(arg_7, arg_3)"} +{"_id": "doc_6921", "title": "", "text": "def Func(arg_0):\n \"\"\"Find functions with step decorator in parsed file\"\"\"\n arg_1 = [arg_2 for arg_2 in arg_0.py_tree.iter_funcdefs()] + [arg_2 for cls in arg_0.py_tree.iter_classdefs() for arg_2 in cls.iter_funcdefs()]\n for arg_2 in arg_1:\n for arg_3 in arg_2.get_decorators():\n if arg_3.children[1].value == 'step':\n yield arg_2, arg_3\n break"} +{"_id": "doc_6922", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the arguments passed to step decorators\n converted to python objects.\n \"\"\"\n arg_2 = arg_1.children[3:-2]\n arg_3 = None\n if len(arg_2) == 1:\n try:\n arg_3 = ast.literal_eval(arg_2[0].get_code())\n except (ValueError, SyntaxError):\n pass\n if isinstance(arg_3, six.string_types+(list,)):\n return arg_3\n logging.error(\"Decorator step accepts either a string or a list of strings - %s:%d\",\n arg_0.file_path, arg_1.start_pos[0])\n else:\n logging.error(\"Decorator step accepts only one argument - %s:%d\",\n arg_0.file_path, arg_1.start_pos[0])"} +{"_id": "doc_6923", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Find the step with old_text and change it to new_text.The step function\n parameters are also changed according to move_param_from_idx.\n Each entry in this list should specify parameter position from old.\n \"\"\"\n arg_4 = []\n arg_5, arg_6 = arg_0._find_step_node(arg_1)\n if arg_5 is None:\n return arg_4\n arg_7 = arg_0._Func_text(arg_5, arg_1, arg_2)\n arg_4.append(arg_7)\n arg_8 = arg_6.children[2]\n arg_9 = arg_0._move_param_nodes(\n arg_8.children, arg_3)\n if arg_8.children is not arg_9:\n # Record original parameter list span excluding braces\n arg_10 = arg_0._span_from_pos(\n arg_8.children[0].end_pos,\n arg_8.children[-1].start_pos)\n arg_8.children = arg_9\n # Get code for moved paramters excluding braces\n arg_12 = ''.join(p.get_code() for p in arg_9[1:-1])\n arg_4.append((arg_10, arg_12))\n return arg_4"} +{"_id": "doc_6924", "title": "", "text": "def Func(arg_0):\n \"\"\"Find functions with step decorator in parsed file.\"\"\" \n for arg_1 in arg_0.py_tree.find_all('def'):\n for arg_2 in arg_1.decorators:\n if arg_2.name.value == 'step':\n yield arg_1, arg_2\n break"} +{"_id": "doc_6925", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get arguments passed to step decorators converted to python objects.\n \"\"\"\n arg_2 = arg_1.call.value\n arg_3 = None\n if len(arg_2) == 1:\n try:\n arg_3 = arg_2[0].value.to_python()\n except (ValueError, SyntaxError):\n pass\n if isinstance(arg_3, six.string_types + (list,)):\n return arg_3\n logging.error(\"Decorator step accepts either a string or a list of \\\n strings - %s\",\n arg_0.file_path)\n else:\n logging.error(\"Decorator step accepts only one argument - %s\",\n arg_0.file_path)"} +{"_id": "doc_6926", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Find the step with old_text and change it to new_text.\n The step function parameters are also changed according\n to move_param_from_idx. Each entry in this list should\n specify parameter position from old\n \"\"\"\n arg_4 = []\n arg_5, arg_6 = arg_0._find_step_node(arg_1)\n if arg_5 is None:\n return arg_4\n arg_7 = arg_0._Func_text(arg_5, arg_1, arg_2)\n arg_4.append(arg_7)\n arg_8 = arg_0._move_params(arg_6.arguments, arg_3)\n if arg_6.arguments is not arg_8:\n arg_9 = arg_0._span_for_node(arg_6.arguments, False)\n arg_6.arguments = arg_8\n arg_4.append((arg_9, arg_6.arguments.dumps()))\n return arg_4"} +{"_id": "doc_6927", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Select default parser for loading and refactoring steps. Passing `redbaron` as argument\n will select the old paring engine from v0.3.3\n\n Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our\n best to make sure there is no user impact on users. However, there may be regressions with\n new parser backend.\n\n To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property\n to the `python.properties` file in the `/env/default directory.\n\n This property along with the redbaron parser will be removed in future releases.\n \"\"\"\n if arg_0 == 'redbaron' or os.environ.get('GETGAUGE_USE_0_3_3_PARSER'):\n arg_1.Class = RedbaronPythonFile\n else:\n arg_1.Class = ParsoPythonFile"} +{"_id": "doc_6928", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"List team memberships for a team, by ID.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all team memberships returned by\n the query. The generator will automatically request additional 'pages'\n of responses from Webex as needed until all responses have been\n returned. The container makes the generator safe for reuse. A new API\n call will be made, using the same parameters that were specified when\n the generator was created, every time a new iterator is requested from\n the container.\n\n Args:\n teamId(basestring): List team memberships for a team, by ID.\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the team memberships returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, int)\n\n arg_4 = dict_from_items_with_values(\n arg_3,\n arg_1=arg_1,\n arg_2=arg_2,\n )\n\n # API request - get items\n arg_5 = arg_0._session.get_items(API_ENDPOINT, arg_4=arg_4)\n\n # Yield team membership objects created from the returned items JSON\n # objects\n for arg_6 in arg_5:\n yield arg_0._object_factory(OBJECT_TYPE, arg_6)"} +{"_id": "doc_6929", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=False, **arg_5):\n \"\"\"Add someone to a team by Person ID or email address.\n\n Add someone to a team by Person ID or email address; optionally making\n them a moderator.\n\n Args:\n teamId(basestring): The team ID.\n personId(basestring): The person ID.\n personEmail(basestring): The email address of the person.\n isModerator(bool): Set to True to make the person a team moderator.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n TeamMembership: A TeamMembership object with the details of the\n Funcd team membership.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, bool)\n\n arg_6 = dict_from_items_with_values(\n arg_5,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n )\n\n # API request\n arg_7 = arg_0._session.post(API_ENDPOINT, json=arg_6)\n\n # Return a team membership object Funcd from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_7)"} +{"_id": "doc_6930", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Update a team membership, by ID.\n\n Args:\n membershipId(basestring): The team membership ID.\n isModerator(bool): Set to True to make the person a team moderator.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n TeamMembership: A TeamMembership object with the Funcd Webex\n Teams team-membership details.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, bool)\n\n arg_4 = dict_from_items_with_values(\n arg_3,\n arg_2=arg_2,\n )\n\n # API request\n arg_5 = arg_0._session.put(API_ENDPOINT + '/' + arg_1,\n json=arg_4)\n\n # Return a team membership object created from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_5)"} +{"_id": "doc_6931", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete a team membership, by ID.\n\n Args:\n membershipId(basestring): The team membership ID.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n\n # API request\n arg_0._session.Func(API_ENDPOINT + '/' + arg_1)"} +{"_id": "doc_6932", "title": "", "text": "def Func():\n \"\"\"Get a cat fact from catfact.ninja and return it as a string.\n\n Functions for Soundhound, Google, IBM Watson, or other APIs can be added\n to create the desired functionality into this bot.\n\n \"\"\"\n arg_0 = requests.get(CAT_FACTS_URL, verify=False)\n arg_0.raise_for_status()\n arg_1 = arg_0.json()\n return arg_1['fact']"} +{"_id": "doc_6933", "title": "", "text": "def Func(arg_0):\n \"\"\"Respond to inbound webhook JSON HTTP Funcs from Webex Teams.\"\"\"\n # Get the Func data sent from Webex Teams\n arg_1 = web.data()\n print(\"\\nWEBHOOK Func RECEIVED:\")\n print(arg_1, \"\\n\")\n\n # Create a Webhook object from the JSON data\n arg_2 = Webhook(arg_1)\n # Get the room details\n arg_3 = api.rooms.get(arg_2.data.roomId)\n # Get the message details\n arg_4 = api.messages.get(arg_2.data.id)\n # Get the sender's details\n arg_5 = api.people.get(arg_4.personId)\n\n print(\"NEW MESSAGE IN ROOM '{}'\".format(arg_3.title))\n print(\"FROM '{}'\".format(arg_5.displayName))\n print(\"MESSAGE '{}'\\n\".format(arg_4.text))\n\n # This is a VERY IMPORTANT loop prevention control step.\n # If you respond to all messages... You will respond to the messages\n # that the bot posts and thereby create a loop condition.\n arg_6 = api.people.me()\n if arg_4.personId == arg_6.id:\n # Message was sent by me (bot); do not respond.\n return 'OK'\n else:\n # Message was sent by someone else; parse message and respond.\n if \"/CAT\" in arg_4.text:\n print(\"FOUND '/CAT'\")\n # Get a cat fact\n arg_7 = get_catfact()\n print(\"SENDING CAT FACT '{}'\".format(arg_7))\n # Post the fact to the room where the request was received\n api.messages.create(arg_3.id, text=arg_7)\n return 'OK'"} +{"_id": "doc_6934", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n **arg_5):\n \"\"\"List room memberships.\n\n By default, Funcs memberships for rooms to which the authenticated user\n belongs.\n\n Use query parameters to filter the response.\n\n Use `roomId` to Func memberships for a room, by ID.\n\n Use either `personId` or `personEmail` to filter the results.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all memberships returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Webex as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n roomId(basestring): Limit results to a specific room, by ID.\n personId(basestring): Limit results to a specific person, by ID.\n personEmail(basestring): Limit results to a specific person, by\n email address.\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the memberships returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, int)\n\n arg_6 = dict_from_items_with_values(\n arg_5,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n )\n\n # API request - get items\n arg_7 = arg_0._session.get_items(API_ENDPOINT, arg_6=arg_6)\n\n # Yield membership objects created from the returned items JSON objects\n for arg_8 in arg_7:\n yield arg_0._object_factory(OBJECT_TYPE, arg_8)"} +{"_id": "doc_6935", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete a membership, by ID.\n\n Args:\n membershipId(basestring): The membership ID.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring)\n\n # API request\n arg_0._session.Func(API_ENDPOINT + '/' + arg_1)"} +{"_id": "doc_6936", "title": "", "text": "def Func(arg_0):\n \"\"\"Check to see if string is an validly-formatted web url.\"\"\"\n assert isinstance(arg_0, basestring)\n arg_1 = urllib.parse.urlparse(arg_0)\n return (\n (\n arg_1.scheme.lower() == 'http'\n or arg_1.scheme.lower() == 'https'\n )\n and arg_1.netloc\n )"} +{"_id": "doc_6937", "title": "", "text": "def Func(arg_0):\n \"\"\"Open the file and return an EncodableFile tuple.\"\"\"\n assert isinstance(arg_0, basestring)\n assert is_local_file(arg_0)\n arg_1 = os.path.basename(arg_0)\n arg_2 = open(arg_0, 'rb')\n arg_3 = mimetypes.guess_type(arg_1)[0] or 'text/plain'\n return EncodableFile(arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3)"} +{"_id": "doc_6938", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Object is an instance of one of the acceptable types or None.\n\n Args:\n o: The object to be inspected.\n acceptable_types: A type or tuple of acceptable types.\n may_be_none(bool): Whether or not the object may be None.\n\n Raises:\n TypeError: If the object is None and may_be_none=False, or if the\n object is not an instance of one of the acceptable types.\n\n \"\"\"\n if not isinstance(arg_1, tuple):\n arg_1 = (arg_1,)\n\n if arg_2 and arg_0 is None:\n # Object is None, and that is OK!\n pass\n elif isinstance(arg_0, arg_1):\n # Object is an instance of an acceptable type.\n pass\n else:\n # Object is something else.\n arg_3 = (\n \"We were expecting to receive an instance of one of the following \"\n \"types: {types}{none}; but instead we received {o} which is a \"\n \"{o_type}.\".format(\n types=\", \".join([repr(t.__name__) for t in arg_1]),\n none=\"or 'None'\" if arg_2 else \"\",\n arg_0=arg_0,\n o_type=repr(type(arg_0).__name__)\n )\n )\n raise TypeError(arg_3)"} +{"_id": "doc_6939", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check response code against the expected code; raise ApiError.\n\n Checks the requests.response.status_code against the provided expected\n response code (erc), and raises a ApiError if they do not match.\n\n Args:\n response(requests.response): The response object returned by a request\n using the requests package.\n expected_response_code(int): The expected response code (HTTP response\n code).\n\n Raises:\n ApiError: If the requests.response.status_code does not match the\n provided expected response code (erc).\n\n \"\"\"\n if arg_0.status_code == arg_1:\n pass\n elif arg_0.status_code == RATE_LIMIT_RESPONSE_CODE:\n raise RateLimitError(arg_0)\n else:\n raise ApiError(arg_0)"} +{"_id": "doc_6940", "title": "", "text": "def Func(arg_0):\n \"\"\"Given a dictionary or JSON string; return a dictionary.\n\n Args:\n json_data(dict, str): Input JSON object.\n\n Returns:\n A Python dictionary with the contents of the JSON object.\n\n Raises:\n TypeError: If the input object is not a dictionary or string.\n\n \"\"\"\n if isinstance(arg_0, dict):\n return arg_0\n elif isinstance(arg_0, basestring):\n return json.loads(arg_0, object_hook=OrderedDict)\n else:\n raise TypeError(\n \"'json_data' must be a dictionary or valid JSON string; \"\n \"received: {!r}\".format(arg_0)\n )"} +{"_id": "doc_6941", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"Func with the Webex Teams DateTime format as the default.\"\"\"\n return super(WebexTeamsDateTime, arg_0).Func(\n arg_1, arg_2\n ).replace(tzinfo=ZuluTimeZone())"} +{"_id": "doc_6942", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None,\n **arg_5):\n \"\"\"List rooms.\n\n By default, Funcs rooms to which the authenticated user belongs.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all rooms returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Webex as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n teamId(basestring): Limit the rooms to those associated with a\n team, by ID.\n type(basestring): 'direct' returns all 1-to-1 rooms. `group`\n returns all group rooms. If not specified or values not\n matched, will return all room types.\n sortBy(basestring): Sort results by room ID (`id`), most recent\n activity (`lastactivity`), or most recently created\n (`created`).\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the rooms returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, int)\n\n arg_6 = dict_from_items_with_values(\n arg_5,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n )\n\n # API request - get items\n arg_7 = arg_0._session.get_items(API_ENDPOINT, arg_6=arg_6)\n\n # Yield room objects created from the returned items JSON objects\n for arg_8 in arg_7:\n yield arg_0._object_factory(OBJECT_TYPE, arg_8)"} +{"_id": "doc_6943", "title": "", "text": "def Func(arg_0):\n \"\"\"Creation date and time in ISO8601 format.\"\"\"\n Func = arg_0._json_data.get('created')\n if Func:\n return WebexTeamsDateTime.strptime(Func)\n else:\n return None"} +{"_id": "doc_6944", "title": "", "text": "def Func():\n \"\"\"Attempt to get the access token from the environment.\n\n Try using the current and legacy environment variables. If the access token\n is found in a legacy environment variable, raise a deprecation warning.\n\n Returns:\n The access token found in the environment (str), or None.\n \"\"\"\n arg_0 = os.environ.get(ACCESS_TOKEN_ENVIRONMENT_VARIABLE)\n if arg_0:\n return arg_0\n\n else:\n for arg_1 in LEGACY_ACCESS_TOKEN_ENVIRONMENT_VARIABLES:\n arg_0 = os.environ.get(arg_1)\n if arg_0:\n arg_2 = PendingDeprecationWarning(\n \"Use of the `{legacy}` environment variable will be \"\n \"deprecated in the future. Please update your \"\n \"environment(s) to use the new `{new}` environment \"\n \"variable.\".format(\n legacy=arg_0,\n new=ACCESS_TOKEN_ENVIRONMENT_VARIABLE,\n )\n )\n warnings.warn(arg_2)\n return arg_0"} +{"_id": "doc_6945", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5=None, arg_6=None, **arg_7):\n \"\"\"Create a webhook.\n\n Args:\n name(basestring): A user-friendly name for this webhook.\n targetUrl(basestring): The URL that receives POST requests for\n each event.\n resource(basestring): The resource type for the webhook.\n event(basestring): The event type for the webhook.\n filter(basestring): The filter that defines the webhook scope.\n secret(basestring): The secret used to generate payload signature.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Webhook: A Webhook object with the details of the Funcd webhook.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, basestring, may_be_none=False)\n check_type(arg_3, basestring, may_be_none=False)\n check_type(arg_4, basestring, may_be_none=False)\n check_type(arg_5, basestring)\n check_type(arg_6, basestring)\n\n arg_8 = dict_from_items_with_values(\n arg_7,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n )\n\n # API request\n arg_9 = arg_0._session.post(API_ENDPOINT, json=arg_8)\n\n # Return a webhook object Funcd from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_9)"} +{"_id": "doc_6946", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Update the HTTP headers used for requests in this session.\n\n Note: Updates provided by the dictionary passed as the `headers`\n parameter to this method are merged into the session headers by adding\n new key-value pairs and/or updating the values of existing keys. The\n session headers are not replaced by the provided dictionary.\n\n Args:\n headers(dict): Updates to the current session headers.\n\n \"\"\"\n check_type(arg_1, dict, may_be_none=False)\n arg_0._req_session.headers.update(arg_1)"} +{"_id": "doc_6947", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Given a relative or absolute URL; return an absolute URL.\n\n Args:\n url(basestring): A relative or absolute URL.\n\n Returns:\n str: An absolute URL.\n\n \"\"\"\n arg_2 = urllib.parse.urlparse(arg_1)\n if not arg_2.scheme and not arg_2.netloc:\n # url is a relative URL; combine with base_url\n return urllib.parse.urljoin(str(arg_0.base_url), str(arg_1))\n else:\n # url is already an absolute URL; return as is\n return arg_1"} +{"_id": "doc_6948", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, **arg_4):\n \"\"\"Abstract base method for making Funcs to the Webex Teams APIs.\n\n This base method:\n * Expands the API endpoint URL to an absolute URL\n * Makes the actual HTTP Func to the API endpoint\n * Provides support for Webex Teams rate-limiting\n * Inspects response codes and raises exceptions as appropriate\n\n Args:\n method(basestring): The Func-method type ('GET', 'POST', etc.).\n url(basestring): The URL of the API endpoint to be called.\n erc(int): The expected response code that should be returned by the\n Webex Teams API endpoint to indicate success.\n **kwargs: Passed on to the Funcs package.\n\n Raises:\n ApiError: If anything other than the expected response code is\n returned by the Webex Teams API endpoint.\n\n \"\"\"\n # Ensure the url is an absolute URL\n arg_5 = arg_0.abs_url(arg_2)\n\n # Update Func kwargs with session defaults\n arg_4.setdefault('timeout', arg_0.single_Func_timeout)\n\n while True:\n # Make the HTTP Func to the API endpoint\n arg_6 = arg_0._req_session.Func(arg_1, arg_5, **arg_4)\n\n try:\n # Check the response code for error conditions\n check_response_code(arg_6, arg_3)\n except RateLimitError as e:\n # Catch rate-limit errors\n # Wait and retry if automatic rate-limit handling is enabled\n if arg_0.wait_on_rate_limit:\n warnings.warn(RateLimitWarning(arg_6))\n time.sleep(e.retry_after)\n continue\n else:\n # Re-raise the RateLimitError\n raise\n else:\n return arg_6"} +{"_id": "doc_6949", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Sends a GET request.\n\n Args:\n url(basestring): The URL of the API endpoint.\n params(dict): The parameters for the HTTP GET request.\n **kwargs:\n erc(int): The expected (success) response code for the request.\n others: Passed on to the requests package.\n\n Raises:\n ApiError: If anything other than the expected response code is\n returned by the Webex Teams API endpoint.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, dict)\n\n # Expected response code\n arg_4 = arg_3.pop('erc', EXPECTED_RESPONSE_CODE['GET'])\n\n arg_5 = arg_0.request('GET', arg_1, arg_4, arg_2=arg_2, **arg_3)\n return extract_and_parse_json(arg_5)"} +{"_id": "doc_6950", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Return a generator that GETs and yields pages of data.\n\n Provides native support for RFC5988 Web Linking.\n\n Args:\n url(basestring): The URL of the API endpoint.\n params(dict): The parameters for the HTTP GET request.\n **kwargs:\n erc(int): The expected (success) response code for the request.\n others: Passed on to the requests package.\n\n Raises:\n ApiError: If anything other than the expected response code is\n returned by the Webex Teams API endpoint.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, dict)\n\n # Expected response code\n arg_4 = arg_3.pop('erc', EXPECTED_RESPONSE_CODE['GET'])\n\n # First request\n arg_5 = arg_0.request('GET', arg_1, arg_4, arg_2=arg_2, **arg_3)\n\n while True:\n yield extract_and_parse_json(arg_5)\n\n if arg_5.links.get('next'):\n arg_6 = arg_5.links.get('next').get('url')\n\n # Patch for Webex Teams 'max=null' in next URL bug.\n # Testing shows that patch is no longer needed; raising a\n # warnning if it is still taking effect;\n # considering for future removal\n arg_6 = _fix_next_url(arg_6)\n\n # Subsequent requests\n arg_5 = arg_0.request('GET', arg_6, arg_4, **arg_3)\n\n else:\n break"} +{"_id": "doc_6951", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Sends a DELETE request.\n\n Args:\n url(basestring): The URL of the API endpoint.\n **kwargs:\n erc(int): The expected (success) response code for the request.\n others: Passed on to the requests package.\n\n Raises:\n ApiError: If anything other than the expected response code is\n returned by the Webex Teams API endpoint.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n\n # Expected response code\n arg_3 = arg_2.pop('erc', EXPECTED_RESPONSE_CODE['DELETE'])\n\n arg_0.request('DELETE', arg_1, arg_3, **arg_2)"} +{"_id": "doc_6952", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Create a new guest issuer using the provided issuer token.\n\n This function returns a guest issuer with an api access token.\n\n Args:\n subject(basestring): Unique and public identifier\n displayName(basestring): Display Name of the guest user\n issuerToken(basestring): Issuer token from developer hub\n expiration(basestring): Expiration time as a unix timestamp\n secret(basestring): The secret used to sign your guest issuers\n\n Returns:\n GuestIssuerToken: A Guest Issuer with a valid access token.\n\n Raises:\n TypeError: If the parameter types are incorrect\n ApiError: If the webex teams cloud returns an error.\n \"\"\"\n check_type(arg_1, basestring)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, basestring)\n\n arg_6 = {\n \"sub\": arg_1,\n \"name\": arg_2,\n \"iss\": arg_3,\n \"exp\": arg_4\n }\n\n arg_7 = base64.b64decode(arg_5)\n arg_8 = jwt.encode(arg_6, arg_7, algorithm='HS256')\n\n arg_9 = arg_0._session.base_url + API_ENDPOINT + \"/\" + \"login\"\n arg_10 = {\n 'Authorization': \"Bearer \" + arg_8.decode('utf-8')\n }\n arg_11 = requests.post(arg_9, arg_10=arg_10)\n check_response_code(arg_11, EXPECTED_RESPONSE_CODE['GET'])\n\n return arg_0._object_factory(OBJECT_TYPE, arg_11.json())"} +{"_id": "doc_6953", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None,\n arg_4=None, arg_5=None, **arg_6):\n \"\"\"Lists messages in a room.\n\n Each message will include content attachments if present.\n\n The Func API sorts the messages in descending order by creation date.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all messages returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Webex as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n roomId(basestring): List messages for a room, by ID.\n mentionedPeople(basestring): List messages where the caller is\n mentioned by specifying \"me\" or the caller `personId`.\n before(basestring): List messages sent before a date and time, in\n ISO8601 format.\n beforeMessage(basestring): List messages sent before a message,\n by ID.\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the messages returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, int)\n\n arg_7 = dict_from_items_with_values(\n arg_6,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n )\n\n # API request - get items\n arg_8 = arg_0._session.get_items(API_ENDPOINT, arg_7=arg_7)\n\n # Yield message objects created from the returned items JSON objects\n for arg_9 in arg_8:\n yield arg_0._object_factory(OBJECT_TYPE, arg_9)"} +{"_id": "doc_6954", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None,\n arg_4=None, arg_5=None, arg_6=None, **arg_7):\n \"\"\"Post a message, and optionally a attachment, to a room.\n\n The files parameter is a list, which accepts multiple values to allow\n for future expansion, but currently only one file may be included with\n the message.\n\n Args:\n roomId(basestring): The room ID.\n toPersonId(basestring): The ID of the recipient when sending a\n private 1:1 message.\n toPersonEmail(basestring): The email address of the recipient when\n sending a private 1:1 message.\n text(basestring): The message, in plain text. If `markdown` is\n specified this parameter may be optionally used to provide\n alternate text for UI clients that do not support rich text.\n markdown(basestring): The message, in markdown format.\n files(`list`): A list of public URL(s) or local path(s) to files to\n be posted into the room. Only one file is allowed per message.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Message: A Message object with the details of the Funcd message.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n ValueError: If the files parameter is a list of length > 1, or if\n the string in the list (the only element in the list) does not\n contain a valid URL or path to a local file.\n\n \"\"\"\n check_type(arg_1, basestring)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, basestring)\n check_type(arg_6, list)\n if arg_6:\n if len(arg_6) != 1:\n raise ValueError(\"The length of the `files` list is greater \"\n \"than one (1). The files parameter is a \"\n \"list, which accepts multiple values to \"\n \"allow for future expansion, but currently \"\n \"only one file may be included with the \"\n \"message.\")\n check_type(arg_6[0], basestring)\n\n arg_8 = dict_from_items_with_values(\n arg_7,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n )\n\n # API request\n if not arg_6 or is_web_url(arg_6[0]):\n # Standard JSON post\n arg_9 = arg_0._session.post(API_ENDPOINT, json=arg_8)\n\n elif is_local_file(arg_6[0]):\n # Multipart MIME post\n try:\n arg_8['files'] = open_local_file(arg_6[0])\n arg_10 = MultipartEncoder(arg_8)\n arg_11 = {'Content-type': arg_10.content_type}\n arg_9 = arg_0._session.post(API_ENDPOINT,\n arg_11=arg_11,\n data=arg_10)\n finally:\n arg_8['files'].file_object.close()\n\n else:\n raise ValueError(\"The `files` parameter does not contain a vaild \"\n \"URL or path to a local file.\")\n\n # Return a message object Funcd from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_9)"} +{"_id": "doc_6955", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete a message.\n\n Args:\n messageId(basestring): The ID of the message to be Funcd.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n\n # API request\n arg_0._session.Func(API_ENDPOINT + '/' + arg_1)"} +{"_id": "doc_6956", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None, arg_8=None,\n **arg_9):\n \"\"\"Create a new user account for a given organization\n\n Only an admin can Func a new user account.\n\n Args:\n emails(`list`): Email address(es) of the person (list of strings).\n displayName(basestring): Full name of the person.\n firstName(basestring): First name of the person.\n lastName(basestring): Last name of the person.\n avatar(basestring): URL to the person's avatar in PNG format.\n orgId(basestring): ID of the organization to which this\n person belongs.\n roles(`list`): Roles of the person (list of strings containing\n the role IDs to be assigned to the person).\n licenses(`list`): Licenses allocated to the person (list of\n strings - containing the license IDs to be allocated to the\n person).\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Person: A Person object with the details of the Funcd person.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, list, may_be_none=False)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, basestring)\n check_type(arg_6, basestring)\n check_type(arg_7, list)\n check_type(arg_8, list)\n\n arg_10 = dict_from_items_with_values(\n arg_9,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n )\n\n # API request\n arg_11 = arg_0._session.post(API_ENDPOINT, json=arg_10)\n\n # Return a person object Funcd from the returned JSON object\n return arg_0._object_factory(OBJECT_TYPE, arg_11)"} +{"_id": "doc_6957", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a person's details, by ID.\n\n Args:\n personId(basestring): The ID of the person to be retrieved.\n\n Returns:\n Person: A Person object with the details of the requested person.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n\n # API request\n arg_2 = arg_0._session.Func(API_ENDPOINT + '/' + arg_1)\n\n # Return a person object created from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_2)"} +{"_id": "doc_6958", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None, arg_8=None,\n arg_9=None, **arg_10):\n \"\"\"Update details for a person, by ID.\n\n Only an admin can Func a person's details.\n\n Email addresses for a person cannot be changed via the Webex Teams API.\n\n Include all details for the person. This action expects all user\n details to be present in the request. A common approach is to first GET\n the person's details, make changes, then PUT both the changed and\n unchanged values.\n\n Args:\n personId(basestring): The person ID.\n emails(`list`): Email address(es) of the person (list of strings).\n displayName(basestring): Full name of the person.\n firstName(basestring): First name of the person.\n lastName(basestring): Last name of the person.\n avatar(basestring): URL to the person's avatar in PNG format.\n orgId(basestring): ID of the organization to which this\n person belongs.\n roles(`list`): Roles of the person (list of strings containing\n the role IDs to be assigned to the person).\n licenses(`list`): Licenses allocated to the person (list of\n strings - containing the license IDs to be allocated to the\n person).\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Person: A Person object with the Funcd details.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_2, list)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, basestring)\n check_type(arg_6, basestring)\n check_type(arg_7, basestring)\n check_type(arg_8, list)\n check_type(arg_9, list)\n\n arg_11 = dict_from_items_with_values(\n arg_10,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n )\n\n # API request\n arg_12 = arg_0._session.put(API_ENDPOINT + '/' + arg_1,\n json=arg_11)\n\n # Return a person object created from the returned JSON object\n return arg_0._object_factory(OBJECT_TYPE, arg_12)"} +{"_id": "doc_6959", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove a person from the system.\n\n Only an admin can remove a person.\n\n Args:\n personId(basestring): The ID of the person to be Funcd.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n\n # API request\n arg_0._session.Func(API_ENDPOINT + '/' + arg_1)"} +{"_id": "doc_6960", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"List teams to which the authenticated user belongs.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all teams returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Webex as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the teams returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, int)\n\n arg_3 = dict_from_items_with_values(\n arg_2,\n arg_1=arg_1,\n )\n\n # API request - get items\n arg_4 = arg_0._session.get_items(API_ENDPOINT, arg_3=arg_3)\n\n # Yield team objects created from the returned items JSON objects\n for arg_5 in arg_4:\n yield arg_0._object_factory(OBJECT_TYPE, arg_5)"} +{"_id": "doc_6961", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Update details for a team, by ID.\n\n Args:\n teamId(basestring): The team ID.\n name(basestring): A user-friendly name for the team.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Team: A Team object with the Funcd Webex Teams team details.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring, may_be_none=False)\n check_type(arg_2, basestring)\n\n arg_4 = dict_from_items_with_values(\n arg_3,\n arg_2=arg_2,\n )\n\n # API request\n arg_5 = arg_0._session.put(API_ENDPOINT + '/' + arg_1,\n json=arg_4)\n\n # Return a team object created from the response JSON data\n return arg_0._object_factory(OBJECT_TYPE, arg_5)"} +{"_id": "doc_6962", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=None,\n arg_6=None, **arg_7):\n \"\"\"List events.\n\n List events in your organization. Several query parameters are\n available to filter the response.\n\n Note: `from` is a keyword in Python and may not be used as a variable\n name, so we had to use `_from` instead.\n\n This method supports Webex Teams's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all events returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Wevex as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n resource(basestring): Limit results to a specific resource type.\n Possible values: \"messages\", \"memberships\".\n type(basestring): Limit results to a specific event type. Possible\n values: \"created\", \"updated\", \"deleted\".\n actorId(basestring): Limit results to events performed by this\n person, by ID.\n _from(basestring): Limit results to events which occurred after a\n date and time, in ISO8601 format (yyyy-MM-dd'T'HH:mm:ss.SSSZ).\n to(basestring): Limit results to events which occurred before a\n date and time, in ISO8601 format (yyyy-MM-dd'T'HH:mm:ss.SSSZ).\n max(int): Limit the maximum number of items returned from the Webex\n Teams service per request.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n GeneratorContainer: A GeneratorContainer which, when iterated,\n yields the events returned by the Webex Teams query.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.\n\n \"\"\"\n check_type(arg_1, basestring)\n check_type(arg_2, basestring)\n check_type(arg_3, basestring)\n check_type(arg_4, basestring)\n check_type(arg_5, basestring)\n check_type(arg_6, int)\n\n arg_8 = dict_from_items_with_values(\n arg_7,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n )\n\n if arg_4:\n arg_8[\"from\"] = arg_8.pop(\"_from\")\n\n # API request - get items\n arg_9 = arg_0._session.get_items(API_ENDPOINT, arg_8=arg_8)\n\n # Yield event objects created from the returned items JSON objects\n for arg_10 in arg_9:\n yield arg_0._object_factory(OBJECT_TYPE, arg_10)"} +{"_id": "doc_6963", "title": "", "text": "def Func(arg_0):\n \"\"\"Respond to inbound webhook JSON HTTP POST from Webex Teams.\"\"\"\n\n # Get the POST data sent from Webex Teams\n arg_1 = arg_0.json\n log.info(\"\\n\")\n log.info(\"WEBHOOK POST RECEIVED:\")\n log.info(arg_1)\n log.info(\"\\n\")\n\n # Create a Webhook object from the JSON data\n arg_2 = Webhook(arg_1)\n\n # Get the room details\n arg_3 = api.rooms.get(arg_2.data.roomId)\n\n # Get the message details\n arg_4 = api.messages.get(arg_2.data.id)\n\n # Get the sender's details\n arg_5 = api.people.get(arg_4.personId)\n\n log.info(\"NEW MESSAGE IN ROOM '{}'\".format(arg_3.title))\n log.info(\"FROM '{}'\".format(arg_5.displayName))\n log.info(\"MESSAGE '{}'\\n\".format(arg_4.text))\n\n # This is a VERY IMPORTANT loop prevention control step.\n # If you respond to all messages... You will respond to the messages\n # that the bot posts and thereby create a loop condition.\n arg_6 = api.people.me()\n if arg_4.personId == arg_6.id:\n # Message was sent by me (bot); do not respond.\n return {'Message': 'OK'}\n\n else:\n # Message was sent by someone else; parse message and respond.\n if \"/CAT\" in arg_4.text:\n log.info(\"FOUND '/CAT'\")\n\n # Get a cat fact\n arg_7 = get_catfact()\n log.info(\"SENDING CAT FACT'{}'\".format(arg_7))\n\n # Post the fact to the room where the request was received\n api.messages.create(arg_3.id, text=arg_7)\n return {'Message': 'OK'}"} +{"_id": "doc_6964", "title": "", "text": "def Func():\r\n \"\"\"Get the ngrok public HTTP URL from the local client API.\"\"\"\r\n try:\r\n arg_0 = requests.get(url=NGROK_CLIENT_API_BASE_URL + \"/tunnels\",\r\n headers={'content-type': 'application/json'})\r\n arg_0.raise_for_status()\r\n\r\n except requests.exceptions.RequestException:\r\n print(\"Could not connect to the ngrok client API; \"\r\n \"assuming not running.\")\r\n return None\r\n\r\n else:\r\n for arg_1 in arg_0.json()[\"tunnels\"]:\r\n if arg_1.get(\"public_url\", \"\").startswith(\"http://\"):\r\n print(\"Found ngrok public HTTP URL:\", arg_1[\"public_url\"])\r\n return arg_1[\"public_url\"]"} +{"_id": "doc_6965", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\"Create a Webex Teams webhook pointing to the public ngrok URL.\"\"\"\r\n print(\"Creating Webhook...\")\r\n arg_2 = arg_0.webhooks.create(\r\n name=WEBHOOK_NAME,\r\n targetUrl=urljoin(arg_1, WEBHOOK_URL_SUFFIX),\r\n resource=WEBHOOK_RESOURCE,\r\n event=WEBHOOK_EVENT,\r\n )\r\n print(arg_2)\r\n print(\"Webhook successfully created.\")\r\n return arg_2"} +{"_id": "doc_6966", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return all rows from a cursor as a dict. \"\"\"\n arg_2 = [col[0] for col in arg_1.description]\n return [\n dict(zip(arg_2, arg_3))\n for arg_3 in arg_1.fetchall()\n ]"} +{"_id": "doc_6967", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"\n Parse a received datetime into a timezone-aware, Python datetime object.\n\n Arguments:\n datetime_string: A string to be parsed.\n datetime_format: A datetime format string to be used for parsing\n\n \"\"\"\n if isinstance(arg_0, datetime.datetime):\n arg_3 = arg_0\n else:\n try:\n arg_3 = datetime.datetime.strptime(arg_0, arg_1)\n except ValueError:\n arg_3 = datetime.datetime.strptime(arg_0, LMS_API_DATETIME_FORMAT_WITHOUT_TIMEZONE)\n\n # If the datetime format didn't include a timezone, then set to UTC.\n # Note that if we're using the default LMS_API_DATETIME_FORMAT, it ends in 'Z',\n # which denotes UTC for ISO-8661.\n if arg_3.tzinfo is None:\n arg_3 = arg_3.replace(tzinfo=timezone.utc)\n return arg_3"} +{"_id": "doc_6968", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Connect to the REST API, authenticating with a JWT for the current user.\n \"\"\"\n if JwtBuilder is None:\n raise NotConnectedToOpenEdX(\"This package must be installed in an OpenEdX environment.\")\n\n arg_1 = int(time())\n arg_2 = JwtBuilder.create_jwt_for_user(arg_0.user)\n arg_0.client = EdxRestApiClient(\n arg_0.API_BASE_URL, append_slash=arg_0.APPEND_SLASH, arg_2=arg_2,\n )\n arg_0.expires_at = arg_1 + arg_0.expires_in"} +{"_id": "doc_6969", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Return redirect to embargo error page if the given user is blocked.\n \"\"\"\n for arg_4 in arg_0:\n arg_5 = embargo_api.Func(\n CourseKey.from_string(arg_4),\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3\n )\n if arg_5:\n return arg_5"} +{"_id": "doc_6970", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant.\n\n Arguments:\n modes (list): A list of course mode dictionaries.\n Returns:\n list: A list with the course modes dictionaries sorted by slug.\n\n \"\"\"\n def slug_weight(arg_2):\n \"\"\"\n Assign a weight to the course mode dictionary based on the position of its slug in the sorting list.\n \"\"\"\n arg_3 = COURSE_MODE_SORT_ORDER\n arg_4 = len(arg_3)\n if arg_2['slug'] in arg_3:\n return arg_4 - arg_3.index(arg_2['slug'])\n return 0\n # Sort slug weights in descending order\n return sorted(arg_1, key=slug_weight, reverse=True)"} +{"_id": "doc_6971", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Query the Enrollment API to see whether a course run has a given course mode available.\n\n Arguments:\n course_run_id (str): The string value of the course run's unique identifier\n\n Returns:\n bool: Whether the course run has the given mode avaialble for enrollment.\n\n \"\"\"\n arg_3 = arg_0.get_course_modes(arg_1)\n return any(arg_4 for arg_4 in arg_3 if arg_4['slug'] == arg_2)"} +{"_id": "doc_6972", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Call the enrollment API to enroll the user in the course specified by course_id.\n\n Args:\n username (str): The username by which the user goes on the OpenEdX platform\n course_id (str): The string value of the course's unique identifier\n mode (str): The enrollment mode which should be used for the enrollment\n cohort (str): Add the user to this named cohort\n\n Returns:\n dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.\n\n \"\"\"\n return arg_0.client.enrollment.post(\n {\n 'user': arg_1,\n 'course_details': {'course_id': arg_2},\n 'mode': arg_3,\n 'cohort': arg_4,\n }\n )"} +{"_id": "doc_6973", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Query the enrollment API to get information about a single course enrollment.\n\n Args:\n username (str): The username by which the user goes on the OpenEdX platform\n course_id (str): The string value of the course's unique identifier\n\n Returns:\n dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.\n\n \"\"\"\n arg_3 = getattr(\n arg_0.client.enrollment,\n '{username},{course_id}'.format(arg_1=arg_1, arg_2=arg_2)\n )\n try:\n arg_4 = arg_3.get()\n except HttpNotFoundError:\n # This enrollment data endpoint returns a 404 if either the username or course_id specified isn't valid\n LOGGER.error(\n 'Course enrollment details not found for invalid username or course; username=[%s], course=[%s]',\n arg_1,\n arg_2\n )\n return None\n # This enrollment data endpoint returns an empty string if the username and course_id is valid, but there's\n # no matching enrollment found\n if not arg_4:\n LOGGER.info('Failed to find course enrollment details for user [%s] and course [%s]', arg_1, arg_2)\n return None\n\n return arg_4"} +{"_id": "doc_6974", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Query the enrollment API and determine if a learner is enrolled in a course run.\n\n Args:\n username (str): The username by which the user goes on the OpenEdX platform\n course_run_id (str): The string value of the course's unique identifier\n\n Returns:\n bool: Indicating whether the user is enrolled in the course run. Returns False under any errors.\n\n \"\"\"\n arg_3 = arg_0.get_course_enrollment(arg_1, arg_2)\n return arg_3 is not None and arg_3.get('is_active', False)"} +{"_id": "doc_6975", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return a Course Discovery API client setup with authentication for the specified user.\n \"\"\"\n if JwtBuilder is None:\n raise NotConnectedToOpenEdX(\n _(\"To get a Catalog API client, this package must be \"\n \"installed in an Open edX environment.\")\n )\n\n arg_2 = JwtBuilder.create_jwt_for_user(arg_0)\n return EdxRestApiClient(arg_1, arg_2=arg_2)"} +{"_id": "doc_6976", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return specified course catalog.\n\n Returns:\n dict: catalog details if it is available for the user.\n\n \"\"\"\n return arg_0._load_data(\n arg_0.CATALOGS_ENDPOINT,\n default=[],\n resource_id=arg_1\n )"} +{"_id": "doc_6977", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Return paginated response for all catalog courses.\n\n Returns:\n dict: API response with links to next and previous pages.\n\n \"\"\"\n return arg_0._load_data(\n arg_0.CATALOGS_COURSES_ENDPOINT.format(arg_1),\n default=[],\n arg_2=arg_2,\n traverse_pagination=False,\n many=False,\n )"} +{"_id": "doc_6978", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return a paginated list of course catalogs, including name and ID.\n\n Returns:\n dict: Paginated response containing catalogs available for the user.\n\n \"\"\"\n return arg_0._load_data(\n arg_0.CATALOGS_ENDPOINT,\n default=[],\n arg_1=arg_1,\n traverse_pagination=False,\n many=False\n )"} +{"_id": "doc_6979", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the courses included in a single course catalog by ID.\n\n Args:\n catalog_id (int): The catalog ID we want to retrieve.\n\n Returns:\n list: Courses of the catalog in question\n\n \"\"\"\n return arg_0._load_data(\n arg_0.CATALOGS_COURSES_ENDPOINT.format(arg_1),\n default=[]\n )"} +{"_id": "doc_6980", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return single program by UUID, or None if not found.\n\n Arguments:\n program_uuid(string): Program UUID in string form\n\n Returns:\n dict: Program data provided by Course Catalog API\n\n \"\"\"\n return arg_0._load_data(\n arg_0.PROGRAMS_ENDPOINT,\n resource_id=arg_1,\n default=None\n )"} +{"_id": "doc_6981", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a program type by its slug.\n\n Arguments:\n slug (str): The slug to identify the program type.\n\n Returns:\n dict: A program type object.\n\n \"\"\"\n return arg_0._load_data(\n arg_0.PROGRAM_TYPES_ENDPOINT,\n resource_id=arg_1,\n default=None,\n )"} +{"_id": "doc_6982", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Find common course modes for a set of course runs.\n\n This function essentially returns an intersection of types of seats available\n for each course run.\n\n Arguments:\n course_run_ids(Iterable[str]): Target Course run IDs.\n\n Returns:\n set: course modes found in all given course runs\n\n Examples:\n # run1 has prof and audit, run 2 has the same\n Func(['course-v1:run1', 'course-v1:run2'])\n {'prof', 'audit'}\n\n # run1 has prof and audit, run 2 has only prof\n Func(['course-v1:run1', 'course-v1:run2'])\n {'prof'}\n\n # run1 has prof and audit, run 2 honor\n Func(['course-v1:run1', 'course-v1:run2'])\n {}\n\n # run1 has nothing, run2 has prof\n Func(['course-v1:run1', 'course-v1:run2'])\n {}\n\n # run1 has prof and audit, run 2 prof, run3 has audit\n Func(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])\n {}\n\n # run1 has nothing, run 2 prof, run3 has prof\n Func(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])\n {}\n\n \"\"\"\n arg_2 = None\n for arg_3 in arg_1:\n arg_4 = arg_0.get_course_run(arg_3) or {}\n arg_5 = {seat.get('type') for seat in arg_4.get('seats', [])}\n\n if arg_2 is None:\n arg_2 = arg_5\n else:\n arg_2 &= arg_5\n\n if not arg_2:\n return arg_2\n\n return arg_2"} +{"_id": "doc_6983", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Determine if the given course or course run ID is contained in the catalog with the given ID.\n\n Args:\n catalog_id (int): The ID of the catalog\n course_id (str): The ID of the course or course run\n\n Returns:\n bool: Whether the course or course run is contained in the given catalog\n \"\"\"\n try:\n # Determine if we have a course run ID, rather than a plain course ID\n arg_3 = str(CourseKey.from_string(arg_2))\n except InvalidKeyError:\n arg_3 = None\n\n arg_4 = arg_0.client.catalogs(arg_1).contains\n\n if arg_3:\n arg_5 = arg_4.get(arg_3=arg_3)\n else:\n arg_5 = arg_4.get(arg_2=arg_2)\n\n return arg_5.get('courses', {}).get(arg_2, False)"} +{"_id": "doc_6984", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, **arg_4):\n \"\"\"\n Load data from API client.\n\n Arguments:\n resource(string): type of resource to load\n default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.\n\n Returns:\n dict: Deserialized response from Course Catalog API\n\n \"\"\"\n arg_5 = arg_2 if arg_2 != arg_0.DEFAULT_VALUE_SAFEGUARD else {}\n try:\n return get_edx_api_data(\n api_config=CatalogIntegration.current(),\n arg_1=arg_1,\n api=arg_0.client,\n **arg_4\n ) or arg_5\n except (SlumberBaseException, ConnectionError, Timeout) as exc:\n LOGGER.exception(\n 'Failed to load data from resource [%s] with kwargs [%s] due to: [%s]',\n arg_1, arg_4, str(exc)\n )\n return arg_5"} +{"_id": "doc_6985", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\n Arguments:\n enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\n Returns:\n list: List of dicts containing content metadata.\n \"\"\"\n arg_2 = OrderedDict()\n\n # TODO: This if block can be removed when we get rid of discovery service-based catalogs.\n if arg_1.catalog:\n arg_3 = arg_0._load_data(\n arg_0.ENTERPRISE_CUSTOMER_ENDPOINT,\n detail_resource='courses',\n resource_id=str(arg_1.uuid),\n traverse_pagination=True,\n )\n for arg_4 in arg_3['results']:\n for arg_5 in arg_4['course_runs']:\n arg_5['content_type'] = 'courserun' # Make this look like a search endpoint result.\n arg_2[arg_5['key']] = arg_5\n\n for arg_6 in arg_1.enterprise_customer_catalogs.all():\n arg_3 = arg_0._load_data(\n arg_0.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,\n resource_id=str(arg_6.uuid),\n traverse_pagination=True,\n querystring={'page_size': 1000},\n )\n\n for arg_7 in arg_3['results']:\n arg_8 = utils.Func_item_id(arg_7)\n arg_2[arg_8] = arg_7\n\n return arg_2.values()"} +{"_id": "doc_6986", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return items that need to be created, updated, and deleted along with the\n current ContentMetadataItemTransmissions.\n \"\"\"\n arg_2 = {}\n arg_3 = {}\n arg_4 = {}\n arg_5 = {}\n arg_6 = arg_1.keys()\n\n # Get the items that were previously transmitted to the integrated channel.\n # If we are not transmitting something that was previously transmitted,\n # we need to delete it from the integrated channel.\n for arg_7 in arg_0._get_transmissions():\n arg_5[arg_7.content_id] = arg_7\n if arg_7.content_id not in arg_6:\n arg_4[arg_7.content_id] = arg_7.channel_metadata\n\n # Compare what is currently being transmitted to what was transmitted\n # previously, identifying items that need to be created or updated.\n for arg_9 in arg_1.values():\n arg_8 = arg_9.content_id\n arg_10 = arg_9.channel_metadata\n arg_11 = arg_5.get(arg_8, None)\n if arg_11 is not None:\n if diff(arg_10, arg_11.channel_metadata):\n arg_3[arg_8] = arg_10\n else:\n arg_2[arg_8] = arg_10\n\n LOGGER.info(\n 'Preparing to transmit creation of [%s] content metadata items with plugin configuration [%s]: [%s]',\n len(arg_2),\n arg_0.enterprise_configuration,\n arg_2.keys(),\n )\n LOGGER.info(\n 'Preparing to transmit update of [%s] content metadata items with plugin configuration [%s]: [%s]',\n len(arg_3),\n arg_0.enterprise_configuration,\n arg_3.keys(),\n )\n LOGGER.info(\n 'Preparing to transmit deletion of [%s] content metadata items with plugin configuration [%s]: [%s]',\n len(arg_4),\n arg_0.enterprise_configuration,\n arg_4.keys(),\n )\n\n return arg_2, arg_3, arg_4, arg_5"} +{"_id": "doc_6987", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Serialize content metadata items for a create transmission to the integrated channel.\n \"\"\"\n return json.dumps(\n arg_0._prepare_items_for_transmission(arg_1),\n sort_keys=True\n ).encode('utf-8')"} +{"_id": "doc_6988", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Transmit content metadata update to integrated channel.\n \"\"\"\n for arg_3 in chunks(arg_1, arg_0.enterprise_configuration.transmission_chunk_size):\n arg_4 = arg_0._serialize_items(list(arg_3.values()))\n try:\n arg_0.client.update_content_metadata(arg_4)\n except ClientError as exc:\n LOGGER.error(\n 'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',\n len(arg_3),\n arg_0.enterprise_configuration.enterprise_customer.name,\n arg_0.enterprise_configuration.channel_code,\n )\n LOGGER.error(exc)\n else:\n arg_0._update_transmissions(arg_3, arg_2)"} +{"_id": "doc_6989", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Transmit content metadata deletion to integrated channel.\n \"\"\"\n for arg_2 in chunks(arg_1, arg_0.enterprise_configuration.transmission_chunk_size):\n arg_3 = arg_0._serialize_items(list(arg_2.values()))\n try:\n arg_0.client.delete_content_metadata(arg_3)\n except ClientError as exc:\n LOGGER.error(\n 'Failed to delete [%s] content metadata items for integrated channel [%s] [%s]',\n len(arg_2),\n arg_0.enterprise_configuration.enterprise_customer.name,\n arg_0.enterprise_configuration.channel_code,\n )\n LOGGER.error(exc)\n else:\n arg_0._delete_transmissions(arg_2.keys())"} +{"_id": "doc_6990", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the ContentMetadataItemTransmision models for previously\n transmitted content metadata items.\n \"\"\"\n # pylint: disable=invalid-name\n arg_1 = apps.get_model(\n 'integrated_channel',\n 'ContentMetadataItemTransmission'\n )\n return arg_1.objects.filter(\n enterprise_customer=arg_0.enterprise_configuration.enterprise_customer,\n integrated_channel_code=arg_0.enterprise_configuration.channel_code()\n )"} +{"_id": "doc_6991", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update ContentMetadataItemTransmision models for the given content metadata items.\n \"\"\"\n for arg_3, arg_4 in arg_1.items():\n arg_5 = arg_2[arg_3]\n arg_5.channel_metadata = arg_4\n arg_5.save()"} +{"_id": "doc_6992", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Flag a method as Func.\n\n :param extra: Extra text you'd like to display after the default text.\n \"\"\"\n def decorator(arg_1):\n \"\"\"\n Return a decorated function that emits a deprecation warning on use.\n \"\"\"\n @wraps(arg_1)\n def wrapper(*arg_2, **arg_3):\n \"\"\"\n Wrap the function.\n \"\"\"\n arg_4 = 'You called the Func function `{function}`. {extra}'.format(\n function=arg_1.__name__,\n arg_0=arg_0\n )\n arg_5 = inspect.currentframe().f_back\n warnings.warn_explicit(\n arg_4,\n category=DeprecationWarning,\n filename=inspect.getfile(arg_5.f_code),\n lineno=arg_5.f_lineno\n )\n return arg_1(*arg_2, **arg_3)\n return wrapper\n return decorator"} +{"_id": "doc_6993", "title": "", "text": "def Func(arg_0):\n \"\"\"\n View decorator for allowing authenticated user with valid enterprise UUID.\n\n This decorator requires enterprise identifier as a parameter\n `enterprise_uuid`.\n\n This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to\n the decorated view .\n\n If there is no enterprise in database against the kwarg `enterprise_uuid`\n or if the user is not authenticated then it will redirect the user to the\n enterprise-linked SSO login page.\n\n Usage::\n @Func()\n def my_view(request, enterprise_uuid):\n # Some functionality ...\n\n OR\n\n class MyView(View):\n ...\n @method_decorator(Func)\n def get(self, request, enterprise_uuid):\n # Some functionality ...\n\n \"\"\"\n @wraps(arg_0)\n def wrapper(arg_1, *arg_2, **arg_3):\n \"\"\"\n Wrap the decorator.\n \"\"\"\n if 'enterprise_uuid' not in arg_3:\n raise Http404\n\n arg_4 = arg_3['enterprise_uuid']\n arg_5 = get_enterprise_customer_or_404(arg_4)\n\n # Now verify if the user is logged in. If user is not logged in then\n # send the user to the login screen to sign in with an\n # Enterprise-linked IdP and the pipeline will get them back here.\n if not arg_1.user.is_authenticated:\n arg_6 = urlparse(arg_1.get_full_path())\n arg_7 = parse_qs(arg_6.query)\n arg_7.update({\n 'tpa_hint': arg_5.identity_provider,\n FRESH_LOGIN_PARAMETER: 'yes'\n })\n arg_8 = '{current_path}?{query_string}'.format(\n current_path=quote(arg_6.path),\n query_string=urlencode(arg_7, doseq=True)\n )\n return redirect(\n '{login_url}?{params}'.format(\n login_url='/login',\n params=urlencode(\n {'next': arg_8}\n )\n )\n )\n\n # Otherwise, they can proceed to the original view.\n return arg_0(arg_1, *arg_2, **arg_3)\n\n return wrapper"} +{"_id": "doc_6994", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Verify that the username has a matching user, and that the user has an associated EnterpriseCustomerUser.\n \"\"\"\n try:\n arg_2 = User.objects.get(username=arg_1)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"User does not exist\")\n\n try:\n arg_3 = models.EnterpriseCustomerUser.objects.get(user_id=arg_2.pk)\n except models.EnterpriseCustomerUser.DoesNotExist:\n raise serializers.ValidationError(\"User has no EnterpriseCustomerUser\")\n\n arg_0.enterprise_customer_user = arg_3\n return arg_1"} +{"_id": "doc_6995", "title": "", "text": "def Func(arg_0): # pylint: disable=arguments-differ\n \"\"\"\n Save the model with the found EnterpriseCustomerUser.\n \"\"\"\n arg_1 = arg_0.validated_data['course_id']\n\n arg_2, arg_3 = models.EnterpriseCourseEnrollment.objects.get_or_create(\n enterprise_customer_user=arg_0.enterprise_customer_user,\n arg_1=arg_1,\n )\n if arg_3:\n track_enrollment('rest-api-enrollment', arg_0.enterprise_customer_user.user_id, arg_1)"} +{"_id": "doc_6996", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Serialize the EnterpriseCustomerCatalog object.\n\n Arguments:\n instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.\n\n Returns:\n dict: The EnterpriseCustomerCatalog converted to a dict.\n \"\"\"\n arg_2 = arg_0.context['request']\n arg_3 = arg_1.enterprise_customer\n\n arg_4 = super(EnterpriseCustomerCatalogDetailSerializer, arg_0).Func(arg_1)\n\n # Retrieve the EnterpriseCustomerCatalog search results from the discovery service.\n arg_5 = arg_1.get_paginated_content(arg_2.GET)\n arg_6 = arg_5['count']\n arg_7 = arg_5['results']\n\n for arg_8 in arg_7:\n arg_9 = arg_8['content_type']\n arg_10 = arg_8.get('marketing_url')\n if arg_10:\n arg_8['marketing_url'] = utils.update_query_parameters(\n arg_10, utils.get_enterprise_utm_context(arg_3)\n )\n # Add the Enterprise enrollment URL to each content item returned from the discovery service.\n if arg_9 == 'course':\n arg_8['enrollment_url'] = arg_1.get_course_enrollment_url(arg_8['key'])\n if arg_9 == 'courserun':\n arg_8['enrollment_url'] = arg_1.get_course_run_enrollment_url(arg_8['key'])\n if arg_9 == 'program':\n arg_8['enrollment_url'] = arg_1.get_program_enrollment_url(arg_8['uuid'])\n\n # Build pagination URLs\n arg_11 = None\n arg_12 = None\n arg_13 = int(arg_2.GET.get('page', '1'))\n arg_14 = arg_2.build_absolute_uri()\n if arg_5['previous']:\n arg_11 = utils.update_query_parameters(arg_14, {'page': arg_13 - 1})\n if arg_5['next']:\n arg_12 = utils.update_query_parameters(arg_14, {'page': arg_13 + 1})\n\n arg_4['count'] = arg_6\n arg_4['previous'] = arg_11\n arg_4['next'] = arg_12\n arg_4['results'] = arg_7\n\n return arg_4"} +{"_id": "doc_6997", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the enterprise related django groups that this user is a part of.\n \"\"\"\n if arg_1.user:\n return [arg_2.name for arg_2 in arg_1.user.groups.filter(name__in=ENTERPRISE_PERMISSION_GROUPS)]\n return []"} +{"_id": "doc_6998", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Verify that the username has a matching user.\n \"\"\"\n try:\n arg_0.user = User.objects.get(username=arg_1)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"User does not exist\")\n\n return arg_1"} +{"_id": "doc_6999", "title": "", "text": "def Func(arg_0): # pylint: disable=arguments-differ\n \"\"\"\n Save the EnterpriseCustomerUser.\n \"\"\"\n arg_1 = arg_0.validated_data['enterprise_customer']\n\n arg_2 = models.EnterpriseCustomerUser(\n user_id=arg_0.user.pk,\n arg_1=arg_1,\n )\n arg_2.Func()"} +{"_id": "doc_7000", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the updated course data dictionary.\n\n Arguments:\n instance (dict): The course data.\n\n Returns:\n dict: The updated course data.\n \"\"\"\n arg_2 = copy.deepcopy(arg_1)\n arg_3 = arg_0.context['enterprise_customer_catalog']\n arg_2['enrollment_url'] = arg_3.get_course_enrollment_url(\n arg_2['key']\n )\n for arg_4 in arg_2['course_runs']:\n arg_4['enrollment_url'] = arg_3.get_course_run_enrollment_url(\n arg_4['key']\n )\n return arg_2"} +{"_id": "doc_7001", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the updated course run data dictionary.\n\n Arguments:\n instance (dict): The course run data.\n\n Returns:\n dict: The updated course run data.\n \"\"\"\n arg_2 = copy.deepcopy(arg_1)\n arg_3 = arg_0.context['enterprise_customer_catalog']\n arg_2['enrollment_url'] = arg_3.get_course_run_enrollment_url(\n arg_2['key']\n )\n return arg_2"} +{"_id": "doc_7002", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the updated program data dictionary.\n\n Arguments:\n instance (dict): The program data.\n\n Returns:\n dict: The updated program data.\n \"\"\"\n arg_2 = copy.deepcopy(arg_1)\n arg_3 = arg_0.context['enterprise_customer_catalog']\n arg_2['enrollment_url'] = arg_3.get_program_enrollment_url(\n arg_2['uuid']\n )\n for arg_4 in arg_2['courses']:\n arg_4['enrollment_url'] = arg_3.get_course_enrollment_url(arg_4['key'])\n for arg_5 in arg_4['course_runs']:\n arg_5['enrollment_url'] = arg_3.get_course_run_enrollment_url(\n arg_5['key']\n )\n return arg_2"} +{"_id": "doc_7003", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This implements the same relevant logic as ListSerializer except that if one or more items fail validation,\n processing for other items that did not fail will continue.\n \"\"\"\n\n if not isinstance(arg_1, list):\n arg_2 = arg_0.error_messages['not_a_list'].format(\n input_type=type(arg_1).__name__\n )\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: [arg_2]\n })\n\n arg_3 = []\n\n for arg_4 in arg_1:\n try:\n arg_5 = arg_0.child.run_validation(arg_4)\n except serializers.ValidationError as exc:\n arg_3.append(exc.detail)\n else:\n arg_3.append(arg_5)\n\n return arg_3"} +{"_id": "doc_7004", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This selectively calls the child Func method based on whether or not validation failed for each payload.\n \"\"\"\n arg_2 = []\n for arg_3 in arg_1:\n if 'non_field_errors' not in arg_3 and not any(isinstance(arg_3[arg_4], list) for arg_4 in arg_3):\n arg_2.append(arg_0.child.Func(arg_3))\n else:\n arg_2.append(arg_3)\n\n return arg_2"} +{"_id": "doc_7005", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This selectively calls Func on each result that was processed by create.\n \"\"\"\n return [\n arg_0.child.Func(arg_2) if 'detail' in arg_2 else arg_2 for arg_2 in arg_1\n ]"} +{"_id": "doc_7006", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Perform the enrollment for existing enterprise customer users, or Func the pending objects for new users.\n \"\"\"\n arg_2 = arg_0.context.get('enterprise_customer')\n arg_3 = arg_1.get('lms_user_id')\n arg_4 = arg_1.get('tpa_user_id')\n arg_5 = arg_1.get('user_email')\n arg_6 = arg_1.get('course_run_id')\n arg_7 = arg_1.get('course_mode')\n arg_8 = arg_1.get('cohort')\n arg_9 = arg_1.get('email_students')\n arg_10 = arg_1.get('is_active')\n\n arg_11 = arg_3 or arg_4 or arg_5\n\n if isinstance(arg_11, models.EnterpriseCustomerUser):\n arg_1['enterprise_customer_user'] = arg_11\n try:\n if arg_10:\n arg_11.enroll(arg_6, arg_7, arg_8=arg_8)\n else:\n arg_11.unenroll(arg_6)\n except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError, HttpClientError) as exc:\n arg_1['detail'] = str(exc)\n return arg_1\n\n if arg_10:\n track_enrollment('enterprise-customer-enrollment-api', arg_11.user_id, arg_6)\n else:\n if arg_10:\n arg_11 = arg_2.enroll_user_pending_registration(\n arg_5,\n arg_7,\n arg_6,\n arg_8=arg_8\n )\n else:\n arg_2.clear_pending_registration(arg_5, arg_6)\n\n if arg_9:\n arg_2.notify_enrolled_learners(\n arg_0.context.get('request_user'),\n arg_6,\n [arg_11]\n )\n\n arg_1['detail'] = 'success'\n\n return arg_1"} +{"_id": "doc_7007", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validates the lms_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it.\n \"\"\"\n arg_2 = arg_0.context.get('enterprise_customer')\n\n try:\n # Ensure the given user is associated with the enterprise.\n return models.EnterpriseCustomerUser.objects.get(\n user_id=arg_1,\n arg_2=arg_2\n )\n except models.EnterpriseCustomerUser.DoesNotExist:\n pass\n\n return None"} +{"_id": "doc_7008", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validates the tpa_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it.\n\n It first uses the third party auth api to find the associated username to do the lookup.\n \"\"\"\n arg_2 = arg_0.context.get('enterprise_customer')\n\n try:\n arg_3 = ThirdPartyAuthApiClient()\n arg_4 = arg_3.get_username_from_remote_id(\n arg_2.identity_provider, arg_1\n )\n arg_5 = User.objects.get(arg_4=arg_4)\n return models.EnterpriseCustomerUser.objects.get(\n user_id=arg_5.id,\n arg_2=arg_2\n )\n except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist):\n pass\n\n return None"} +{"_id": "doc_7009", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validates the user_email, if given, to see if an existing EnterpriseCustomerUser exists for it.\n\n If it does not, it does not fail validation, unlike for the other field validation methods above.\n \"\"\"\n arg_2 = arg_0.context.get('enterprise_customer')\n\n try:\n arg_3 = User.objects.get(email=arg_1)\n return models.EnterpriseCustomerUser.objects.get(\n user_id=arg_3.id,\n arg_2=arg_2\n )\n except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist):\n pass\n\n return arg_1"} +{"_id": "doc_7010", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validates that the course run id is part of the Enterprise Customer's catalog.\n \"\"\"\n arg_2 = arg_0.context.get('enterprise_customer')\n\n if not arg_2.catalog_contains_course(arg_1):\n raise serializers.ValidationError(\n 'The course run id {course_run_id} is not in the catalog '\n 'for Enterprise Customer {enterprise_customer}'.format(\n course_run_id=arg_1,\n arg_2=arg_2.name,\n )\n )\n\n return arg_1"} +{"_id": "doc_7011", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Update pagination links in course catalog data and return DRF Response.\n\n Arguments:\n data (dict): Dictionary containing catalog courses.\n request (HttpRequest): Current request object.\n\n Returns:\n (Response): DRF response object containing pagination links.\n \"\"\"\n arg_2 = urlparse(arg_1.build_absolute_uri())._replace(query=None).geturl()\n\n arg_3 = None\n arg_4 = None\n\n if arg_0['next']:\n arg_3 = \"{base_url}?{query_parameters}\".format(\n base_url=arg_2,\n query_parameters=urlparse(arg_0['next']).query,\n )\n arg_3 = arg_3.rstrip('?')\n if arg_0['previous']:\n arg_4 = \"{base_url}?{query_parameters}\".format(\n base_url=arg_2,\n query_parameters=urlparse(arg_0['previous'] or \"\").query,\n )\n arg_4 = arg_4.rstrip('?')\n\n return Response(OrderedDict([\n ('count', arg_0['count']),\n ('next', arg_3),\n ('previous', arg_4),\n ('results', arg_0['results'])\n ]))"} +{"_id": "doc_7012", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete the `role_based_access_control` switch.\"\"\"\n arg_2 = arg_0.get_model('waffle', 'Switch')\n arg_2.objects.filter(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH).delete()"} +{"_id": "doc_7013", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Send a completion status call to SAP SuccessFactors using the client.\n\n Args:\n payload: The learner completion data payload to send to SAP SuccessFactors\n \"\"\"\n arg_2['app_label'] = 'sap_success_factors'\n arg_2['model_name'] = 'SapSuccessFactorsLearnerDataTransmissionAudit'\n arg_2['remote_user_id'] = 'sapsf_user_id'\n super(SapSuccessFactorsLearnerTransmitter, arg_0).Func(arg_1, **arg_2)"} +{"_id": "doc_7014", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Modify throttling for service users.\n\n Updates throttling rate if the request is coming from the service user, and\n defaults to UserRateThrottle's configured setting otherwise.\n\n Updated throttling rate comes from `DEFAULT_THROTTLE_RATES` key in `REST_FRAMEWORK`\n setting. service user throttling is specified in `DEFAULT_THROTTLE_RATES` by `service_user` key\n\n Example Setting:\n ```\n REST_FRAMEWORK = {\n ...\n 'DEFAULT_THROTTLE_RATES': {\n ...\n 'service_user': '50/day'\n }\n }\n ```\n \"\"\"\n arg_3 = get_service_usernames()\n\n # User service user throttling rates for service user.\n if arg_1.user.username in arg_3:\n arg_0.update_throttle_scope()\n\n return super(ServiceUserThrottle, arg_0).Func(arg_1, arg_2)"} +{"_id": "doc_7015", "title": "", "text": "def Func(arg_0, arg_1, arg_2='results', **arg_3):\n \"\"\"\n This method adds enterprise-specific metadata for each course.\n\n We are adding following field in all the courses.\n tpa_hint: a string for identifying Identity Provider.\n enterprise_id: the UUID of the enterprise\n **kwargs: any additional data one would like to add on a per-use basis.\n\n Arguments:\n enterprise_customer: The customer whose data will be used to fill the enterprise context.\n course_container_key: The key used to find the container for courses in the serializer's data dictionary.\n \"\"\"\n arg_4 = {\n 'tpa_hint': arg_1 and arg_1.identity_provider,\n 'enterprise_id': arg_1 and str(arg_1.uuid),\n }\n arg_4.update(**arg_3)\n\n arg_5 = []\n for arg_6 in arg_0.data[arg_2]:\n arg_5.append(\n arg_0.update_course(arg_6, arg_1, arg_4)\n )\n arg_0.data[arg_2] = arg_5"} +{"_id": "doc_7016", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Update course metadata of the given course and return updated course.\n\n Arguments:\n course (dict): Course Metadata returned by course catalog API\n enterprise_customer (EnterpriseCustomer): enterprise customer instance.\n enterprise_context (dict): Enterprise context to be added to course runs and URLs..\n\n Returns:\n (dict): Updated course metadata\n \"\"\"\n arg_1['course_runs'] = arg_0.Func_runs(\n course_runs=arg_1.get('course_runs') or [],\n arg_2=arg_2,\n arg_3=arg_3,\n )\n\n # Update marketing urls in course metadata to include enterprise related info (i.e. our global context).\n arg_4 = arg_1.get('marketing_url')\n if arg_4:\n arg_5 = dict(arg_3, **utils.get_enterprise_utm_context(arg_2))\n arg_1.update({'marketing_url': utils.update_query_parameters(arg_4, arg_5)})\n\n # Finally, add context to the course as a whole.\n arg_1.update(arg_3)\n return arg_1"} +{"_id": "doc_7017", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Collect learner data for the ``EnterpriseCustomer`` where data sharing consent is granted.\n\n Yields a learner data object for each enrollment, containing:\n\n * ``enterprise_enrollment``: ``EnterpriseCourseEnrollment`` object.\n * ``completed_date``: datetime instance containing the course/enrollment completion date; None if not complete.\n \"Course completion\" occurs for instructor-paced courses when course certificates are issued, and\n for self-paced courses, when the course end date is passed, or when the learner achieves a passing grade.\n * ``grade``: string grade recorded for the learner in the course.\n \"\"\"\n # Fetch the consenting enrollment data, including the enterprise_customer_user.\n # Order by the course_id, to avoid fetching course API data more than we have to.\n arg_1 = EnterpriseCourseEnrollment.objects.select_related(\n 'enterprise_customer_user'\n ).filter(\n enterprise_customer_user__enterprise_customer=arg_0.enterprise_customer,\n enterprise_customer_user__active=True,\n ).order_by('course_id')\n\n # Fetch course details from the Course API, and cache between calls.\n arg_2 = None\n for arg_3 in arg_1:\n\n arg_4 = arg_3.course_id\n\n # Fetch course details from Courses API\n # pylint: disable=unsubscriptable-object\n if arg_2 is None or arg_2['course_id'] != arg_4:\n if arg_0.course_api is None:\n arg_0.course_api = CourseApiClient()\n arg_2 = arg_0.course_api.get_course_details(arg_4)\n\n if arg_2 is None:\n # Course not found, so we have nothing to report.\n LOGGER.error(\"No course run details found for enrollment [%d]: [%s]\",\n arg_3.pk, arg_4)\n continue\n\n arg_6 = DataSharingConsent.objects.proxied_get(\n username=arg_3.enterprise_customer_user.username,\n arg_4=arg_3.course_id,\n enterprise_customer=arg_3.enterprise_customer_user.enterprise_customer\n )\n\n if not arg_6.granted or arg_3.audit_reporting_disabled:\n continue\n\n # For instructor-paced courses, let the certificate determine course completion\n if arg_2.get('pacing') == 'instructor':\n arg_7, arg_8, arg_9 = arg_0._collect_certificate_data(arg_3)\n\n # For self-paced courses, check the Grades API\n else:\n arg_7, arg_8, arg_9 = arg_0._collect_grades_data(arg_3, arg_2)\n\n arg_10 = arg_0.get_learner_data_records(\n arg_3=arg_3,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n )\n if arg_10:\n # There are some cases where we won't receive a record from the above\n # method; right now, that should only happen if we have an Enterprise-linked\n # user for the integrated channel, and transmission of that user's\n # data requires an upstream user identifier that we don't have (due to a\n # failure of SSO or similar). In such a case, `get_learner_data_record`\n # would return None, and we'd simply skip yielding it here.\n for arg_11 in arg_10:\n yield arg_11"} +{"_id": "doc_7018", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=False):\n \"\"\"\n Generate a learner data transmission audit with fields properly filled in.\n \"\"\"\n # pylint: disable=invalid-name\n arg_5 = apps.get_model('integrated_channel', 'LearnerDataTransmissionAudit')\n arg_6 = None\n arg_7 = False\n if arg_2 is not None:\n arg_6 = parse_datetime_to_epoch_millis(arg_2)\n arg_7 = arg_4\n\n return [\n arg_5(\n enterprise_course_enrollment_id=arg_1.id,\n course_id=arg_1.course_id,\n arg_7=arg_7,\n arg_6=arg_6,\n arg_3=arg_3,\n )\n ]"} +{"_id": "doc_7019", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get enterprise user id from user object.\n\n Arguments:\n obj (User): Django User object\n\n Returns:\n (int): Primary Key identifier for enterprise user object.\n \"\"\"\n # An enterprise learner can not belong to multiple enterprise customer at the same time\n # but if such scenario occurs we will pick the first.\n arg_2 = EnterpriseCustomerUser.objects.filter(user_id=arg_1.id).first()\n\n return arg_2 and arg_2.id"} +{"_id": "doc_7020", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get enterprise SSO UID.\n\n Arguments:\n obj (User): Django User object\n\n Returns:\n (str): string containing UUID for enterprise customer's Identity Provider.\n \"\"\"\n # An enterprise learner can not belong to multiple enterprise customer at the same time\n # but if such scenario occurs we will pick the first.\n arg_2 = EnterpriseCustomerUser.objects.filter(user_id=arg_1.id).first()\n\n return arg_2 and arg_2.get_remote_id()"} +{"_id": "doc_7021", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Remove content metadata items from the `items_to_create`, `items_to_update`, `items_to_delete` dicts.\n\n Arguments:\n failed_items (list): Failed Items to be removed.\n items_to_create (dict): dict containing the items created successfully.\n items_to_update (dict): dict containing the items updated successfully.\n items_to_delete (dict): dict containing the items deleted successfully.\n \"\"\"\n for arg_5 in arg_1:\n arg_6 = arg_5['courseID']\n arg_2.pop(arg_6, None)\n arg_3.pop(arg_6, None)\n arg_4.pop(arg_6, None)"} +{"_id": "doc_7022", "title": "", "text": "def Func(*arg_0, **arg_1): # pylint: disable=unused-argument\n \"\"\"\n Parse and validate arguments for send_course_enrollments command.\n\n Arguments:\n *args: Positional arguments passed to the command\n **options: optional arguments passed to the command\n\n Returns:\n A tuple containing parsed values for\n 1. days (int): Integer showing number of days to lookup enterprise enrollments,\n course completion etc and send to xAPI LRS\n 2. enterprise_customer_uuid (EnterpriseCustomer): Enterprise Customer if present then\n send xAPI statements just for this enterprise.\n \"\"\"\n arg_2 = arg_1.get('days', 1)\n arg_3 = arg_1.get('enterprise_customer_uuid')\n arg_4 = None\n\n if arg_3:\n try:\n # pylint: disable=no-member\n arg_4 = EnterpriseCustomer.objects.get(uuid=arg_3)\n except EnterpriseCustomer.DoesNotExist:\n raise CommandError('Enterprise customer with uuid \"{enterprise_customer_uuid}\" does not exist.'.format(\n arg_3=arg_3\n ))\n\n return arg_2, arg_4"} +{"_id": "doc_7023", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Send xAPI statements.\n \"\"\"\n if not CourseEnrollment:\n raise NotConnectedToOpenEdX(\"This package must be installed in an OpenEdX environment.\")\n\n arg_3, arg_4 = arg_0.parse_arguments(*arg_1, **arg_2)\n\n if arg_4:\n try:\n arg_5 = XAPILRSConfiguration.objects.get(\n active=True,\n arg_4=arg_4\n )\n except XAPILRSConfiguration.DoesNotExist:\n raise CommandError('No xAPI Configuration found for \"{enterprise_customer}\"'.format(\n arg_4=arg_4.name\n ))\n\n # Send xAPI analytics data to the configured LRS\n arg_0.send_xapi_statements(arg_5, arg_3)\n else:\n for arg_5 in XAPILRSConfiguration.objects.filter(active=True):\n arg_0.send_xapi_statements(arg_5, arg_3)"} +{"_id": "doc_7024", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Django template tag that returns course information to display in a modal.\n\n You may pass in a particular course if you like. Otherwise, the modal will look for course context\n within the parent context.\n\n Usage:\n {% Func %}\n {% Func course %}\n \"\"\"\n if arg_1:\n arg_0.update({\n 'course_image_uri': arg_1.get('course_image_uri', ''),\n 'course_title': arg_1.get('course_title', ''),\n 'course_level_type': arg_1.get('course_level_type', ''),\n 'course_short_description': arg_1.get('course_short_description', ''),\n 'course_effort': arg_1.get('course_effort', ''),\n 'course_full_description': arg_1.get('course_full_description', ''),\n 'expected_learning_items': arg_1.get('expected_learning_items', []),\n 'staff': arg_1.get('staff', []),\n 'premium_modes': arg_1.get('premium_modes', []),\n })\n return arg_0"} +{"_id": "doc_7025", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True): # pylint: disable=unused-argument\n \"\"\"\n Django template filter that returns an anchor with attributes useful for course modal selection.\n\n General Usage:\n {{ link_text|Func:index }}\n\n Examples:\n {{ course_title|Func:forloop.counter0 }}\n {{ course_title|Func:3 }}\n {{ view_details_text|Func:0 }}\n \"\"\"\n arg_3 = (\n '{link_text}
    '\n ).format(\n arg_1=arg_1,\n arg_0=arg_0,\n )\n return mark_safe(arg_3)"} +{"_id": "doc_7026", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data.\n\n Consent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model.\n \"\"\"\n arg_2 = arg_0.get_model('consent', 'DataSharingConsent')\n arg_3 = arg_0.get_model('enterprise', 'EnterpriseCourseEnrollment')\n arg_4 = arg_0.get_model('auth', 'User')\n for arg_5 in arg_3.objects.all():\n arg_6 = arg_4.objects.get(pk=arg_5.enterprise_customer_user.user_id)\n arg_7, arg_8 = arg_2.objects.get_or_create(\n username=arg_6.username,\n enterprise_customer=arg_5.enterprise_customer_user.enterprise_customer,\n course_id=arg_5.course_id,\n )\n if arg_5.consent_granted is not None:\n arg_7.granted = arg_5.consent_granted\n else:\n # Check UDSCA instead.\n arg_10 = arg_5.enterprise_customer_user.data_sharing_consent.first()\n if arg_10 is not None:\n arg_7.granted = arg_10.state in ['enabled', 'external']\n else:\n arg_7.granted = False\n arg_7.save()"} +{"_id": "doc_7027", "title": "", "text": "def Func(arg_0, arg_1, arg_2): # pylint: disable=unused-argument\n \"\"\"\n Send a completion status payload to the Degreed Completion Status endpoint\n\n Args:\n user_id: Unused.\n payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\n containing completion status fields per Degreed documentation.\n\n Returns:\n A tuple containing the status code and the body of the response.\n Raises:\n HTTPError: if we received a failure response code from Degreed\n \"\"\"\n return arg_0._post(\n urljoin(\n arg_0.enterprise_configuration.degreed_base_url,\n arg_0.global_degreed_config.completion_status_api_path\n ),\n arg_2,\n arg_0.COMPLETION_PROVIDER_SCOPE\n )"} +{"_id": "doc_7028", "title": "", "text": "def Func(arg_0, arg_1, arg_2): # pylint: disable=unused-argument\n \"\"\"\n Delete a completion status previously sent to the Degreed Completion Status endpoint\n\n Args:\n user_id: Unused.\n payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\n containing the required completion status fields for deletion per Degreed documentation.\n\n Returns:\n A tuple containing the status code and the body of the response.\n Raises:\n HTTPError: if we received a failure response code from Degreed\n \"\"\"\n return arg_0._delete(\n urljoin(\n arg_0.enterprise_configuration.degreed_base_url,\n arg_0.global_degreed_config.completion_status_api_path\n ),\n arg_2,\n arg_0.COMPLETION_PROVIDER_SCOPE\n )"} +{"_id": "doc_7029", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Make a DELETE request using the session object to a Degreed endpoint.\n\n Args:\n url (str): The url to send a DELETE request to.\n data (str): The json encoded payload to DELETE.\n scope (str): Must be one of the scopes Degreed expects:\n - `CONTENT_PROVIDER_SCOPE`\n - `COMPLETION_PROVIDER_SCOPE`\n \"\"\"\n arg_0._create_session(arg_3)\n arg_4 = arg_0.session.delete(arg_1, arg_2=arg_2)\n return arg_4.status_code, arg_4.text"} +{"_id": "doc_7030", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Instantiate a new session object for use in connecting with Degreed\n \"\"\"\n arg_2 = datetime.datetime.utcnow()\n if arg_0.session is None or arg_0.expires_at is None or arg_2 >= arg_0.expires_at:\n # Create a new session with a valid token\n if arg_0.session:\n arg_0.session.close()\n arg_3, arg_4 = arg_0._get_oauth_access_token(\n arg_0.enterprise_configuration.key,\n arg_0.enterprise_configuration.secret,\n arg_0.enterprise_configuration.degreed_user_id,\n arg_0.enterprise_configuration.degreed_user_password,\n arg_1\n )\n arg_5 = requests.Session()\n arg_5.timeout = arg_0.SESSION_TIMEOUT\n arg_5.headers['Authorization'] = 'Bearer {}'.format(arg_3)\n arg_5.headers['content-type'] = 'application/json'\n arg_0.session = arg_5\n arg_0.expires_at = arg_4"} +{"_id": "doc_7031", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Return whether or not the specified content is available to the EnterpriseCustomer.\n\n Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check\n for their existence in the EnterpriseCustomerCatalogs associated with this EnterpriseCustomer.\n At least one course run key or program UUID value must be included in the request.\n \"\"\"\n arg_5 = arg_0.get_object()\n\n # Maintain plus characters in course key.\n arg_3 = [unquote(quote_plus(course_run_id)) for course_run_id in arg_3]\n\n Func = False\n for arg_7 in arg_5.enterprise_customer_catalogs.all():\n arg_8 = not arg_3 or arg_7.contains_courses(arg_3)\n arg_9 = not arg_4 or arg_7.contains_programs(arg_4)\n if arg_8 and arg_9:\n Func = True\n break\n\n return Response({'contains_content_items': Func})"} +{"_id": "doc_7032", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3): # pylint: disable=invalid-name,unused-argument\n \"\"\"\n Returns the list of enterprise customers the user has a specified group permission access to.\n \"\"\"\n arg_0.queryset = arg_0.queryset.order_by('name')\n arg_5 = arg_0.request.query_params.get('enterprise_id', None)\n arg_6 = arg_0.request.query_params.get('enterprise_slug', None)\n arg_7 = arg_0.request.query_params.get('search', None)\n\n if arg_5 is not None:\n arg_0.queryset = arg_0.queryset.filter(uuid=arg_5)\n elif arg_6 is not None:\n arg_0.queryset = arg_0.queryset.filter(slug=arg_6)\n elif arg_7 is not None:\n arg_0.queryset = arg_0.queryset.filter(name__icontains=arg_7)\n return arg_0.list(arg_1, *arg_2, **arg_3)"} +{"_id": "doc_7033", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None): # pylint: disable=invalid-name,unused-argument\n \"\"\"\n Retrieve the list of Func available to this learner.\n\n Only those Func are returned that satisfy enterprise customer's data sharing setting.\n\n Arguments:\n request (HttpRequest): Reference to in-progress request instance.\n pk (Int): Primary key value of the selected enterprise learner.\n\n Returns:\n (HttpResponse): Response object containing a list of learner's Func.\n \"\"\"\n arg_3 = arg_0.get_object()\n arg_4 = {\"Func\": arg_3.Func}\n arg_5 = serializers.EnterpriseCustomerUserEntitlementSerializer(arg_4, context={'request': arg_1})\n return Response(arg_5.data)"} +{"_id": "doc_7034", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Return whether or not the EnterpriseCustomerCatalog contains the specified content.\n\n Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check\n for their existence in the EnterpriseCustomerCatalog. At least one course run key\n or program UUID value must be included in the request.\n \"\"\"\n arg_5 = arg_0.get_object()\n\n # Maintain plus characters in course key.\n arg_3 = [unquote(quote_plus(course_run_id)) for course_run_id in arg_3]\n\n Func = True\n if arg_3:\n Func = arg_5.contains_courses(arg_3)\n if arg_4:\n Func = (\n Func and\n arg_5.contains_programs(arg_4)\n )\n\n return Response({'contains_content_items': Func})"} +{"_id": "doc_7035", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3): # pylint: disable=invalid-name,unused-argument\n \"\"\"\n Return the metadata for the specified course.\n\n The course needs to be included in the specified EnterpriseCustomerCatalog\n in order for metadata to be returned from this endpoint.\n \"\"\"\n arg_4 = arg_0.get_object()\n arg_5 = arg_4.get_course(arg_3)\n if not arg_5:\n raise Http404\n\n arg_6 = arg_0.get_serializer_context()\n arg_6['enterprise_customer_catalog'] = arg_4\n arg_7 = serializers.CourseDetailSerializer(arg_5, arg_6=arg_6)\n return Response(arg_7.data)"} +{"_id": "doc_7036", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None): # pylint: disable=invalid-name\n \"\"\"\n DRF view to get catalog details.\n\n Arguments:\n request (HttpRequest): Current request\n pk (int): Course catalog identifier\n\n Returns:\n (Response): DRF response object containing course catalogs.\n \"\"\"\n arg_3 = CourseCatalogApiClient(arg_1.user)\n arg_4 = arg_3.get_catalog(arg_2)\n arg_0.ensure_data_exists(\n arg_1,\n arg_4,\n error_message=(\n \"Unable to fetch API response for given catalog from endpoint '/catalog/{pk}/'. \"\n \"The resource you are looking for does not exist.\".format(arg_2=arg_2)\n )\n )\n arg_5 = arg_0.serializer_class(arg_4)\n return Response(arg_5.data)"} +{"_id": "doc_7037", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets ``email``, ``enterprise_name``, and ``number_of_codes``,\n which are the relevant parameters for this API endpoint.\n\n :param request: The request to this endpoint.\n :return: The ``email``, ``enterprise_name``, and ``number_of_codes`` from the request.\n \"\"\"\n arg_2 = get_request_value(arg_1, arg_0.REQUIRED_PARAM_EMAIL, '')\n arg_3 = get_request_value(arg_1, arg_0.REQUIRED_PARAM_ENTERPRISE_NAME, '')\n arg_4 = get_request_value(arg_1, arg_0.OPTIONAL_PARAM_NUMBER_OF_CODES, '')\n if not (arg_2 and arg_3):\n raise CodesAPIRequestError(\n arg_0.get_missing_params_message([\n (arg_0.REQUIRED_PARAM_EMAIL, bool(arg_2)),\n (arg_0.REQUIRED_PARAM_ENTERPRISE_NAME, bool(arg_3)),\n ])\n )\n return arg_2, arg_3, arg_4"} +{"_id": "doc_7038", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get a user-friendly message indicating a missing parameter for the API endpoint.\n \"\"\"\n arg_2 = ', '.join(name for name, present in arg_1 if not present)\n return arg_0.MISSING_REQUIRED_PARAMS_MSG.format(arg_2)"} +{"_id": "doc_7039", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the title of the content item.\n \"\"\"\n arg_2 = []\n\n for arg_3 in arg_0.enterprise_configuration.get_locales():\n arg_2.append({\n 'locale': arg_3,\n 'value': arg_1.get('title', '')\n })\n\n return arg_2"} +{"_id": "doc_7040", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the description of the content item.\n \"\"\"\n arg_2 = []\n\n for arg_3 in arg_0.enterprise_configuration.get_locales():\n arg_2.append({\n 'locale': arg_3,\n 'value': (\n arg_1.get('full_description') or\n arg_1.get('short_description') or\n arg_1.get('title', '')\n )\n })\n\n return arg_2"} +{"_id": "doc_7041", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the image URI of the content item.\n \"\"\"\n arg_2 = ''\n if arg_1['content_type'] in ['course', 'program']:\n arg_2 = arg_1.get('card_image_url')\n elif arg_1['content_type'] == 'courserun':\n arg_2 = arg_1.get('image_url')\n\n return arg_2"} +{"_id": "doc_7042", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the content metadata item launch points.\n\n SAPSF allows you to transmit an arry of content launch points which\n are meant to represent sections of a content item which a learner can\n launch into from SAPSF. Currently, we only provide a single launch\n point for a content item.\n \"\"\"\n return [{\n 'providerID': arg_0.enterprise_configuration.provider_id,\n 'launchURL': arg_1['enrollment_url'],\n 'contentTitle': arg_1['title'],\n 'contentID': arg_0.get_content_id(arg_1),\n 'launchType': 3, # This tells SAPSF to launch the course in a new browser window.\n 'mobileEnabled': True, # Always return True per ENT-1401\n 'mobileLaunchURL': arg_1['enrollment_url'],\n }]"} +{"_id": "doc_7043", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the title of the courserun content item.\n \"\"\"\n arg_2 = arg_1.get('title') or ''\n arg_3 = arg_1.get('start')\n\n if arg_3:\n if course_available_for_enrollment(arg_1):\n arg_2 += ' ({starts}: {:%B %Y})'.format(\n parse_lms_api_datetime(arg_3),\n starts=_('Starts')\n )\n else:\n arg_2 += ' ({:%B %Y} - {enrollment_closed})'.format(\n parse_lms_api_datetime(arg_3),\n enrollment_closed=_('Enrollment Closed')\n )\n\n arg_4 = []\n arg_5 = transform_language_code(arg_1.get('content_language', ''))\n for arg_6 in arg_0.enterprise_configuration.get_locales(default_locale=arg_5):\n arg_4.append({\n 'locale': arg_6,\n 'value': arg_2\n })\n\n return arg_4"} +{"_id": "doc_7044", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the schedule of the courseun content item.\n \"\"\"\n arg_2 = arg_1.get('start') or UNIX_MIN_DATE_STRING\n arg_3 = arg_1.get('end') or UNIX_MAX_DATE_STRING\n return [{\n 'startDate': parse_datetime_to_epoch_millis(arg_2),\n 'endDate': parse_datetime_to_epoch_millis(arg_3),\n 'active': current_time_is_in_interval(arg_2, arg_3)\n }]"} +{"_id": "doc_7045", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the id for the given content_metadata_item, `uuid` for programs or `key` for other content\n \"\"\"\n arg_2 = arg_1.get('key', '')\n if arg_1['content_type'] == 'program':\n arg_2 = arg_1.get('uuid', '')\n return arg_2"} +{"_id": "doc_7046", "title": "", "text": "def Func(arg_0, arg_1=1.0):\n \"\"\"\n Convert an ISO-8601 datetime string to a Unix epoch timestamp in some magnitude.\n\n By default, returns seconds.\n \"\"\"\n arg_2 = parse_lms_api_datetime(arg_0)\n arg_3 = arg_2 - UNIX_EPOCH\n return int(arg_3.total_seconds() * arg_1)"} +{"_id": "doc_7047", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Yield successive n-sized Func from dictionary.\n \"\"\"\n arg_2 = iter(arg_0)\n for arg_3 in range(0, len(arg_0), arg_1):\n yield {arg_4: arg_0[arg_4] for arg_4 in islice(arg_2, arg_1)}"} +{"_id": "doc_7048", "title": "", "text": "def Func(arg_0, arg_1='{D:02}d {H:02}h {M:02}m {S:02}s', arg_2='timedelta'):\n \"\"\"\n Convert a datetime.timedelta object or a regular number to a custom-formatted string.\n\n This function works like the strftime() method works for datetime.datetime\n objects.\n\n The fmt argument allows custom formatting to be specified. Fields can\n include seconds, minutes, hours, days, and weeks. Each field is optional.\n\n Arguments:\n tdelta (datetime.timedelta, int): time delta object containing the duration or an integer\n to go with the input_type.\n fmt (str): Expected format of the time delta. place holders can only be one of the following.\n 1. D to extract days from time delta\n 2. H to extract hours from time delta\n 3. M to extract months from time delta\n 4. S to extract seconds from timedelta\n input_type (str): The input_type argument allows tdelta to be a regular number instead of the\n default, which is a datetime.timedelta object.\n Valid input_type strings:\n 1. 's', 'seconds',\n 2. 'm', 'minutes',\n 3. 'h', 'hours',\n 4. 'd', 'days',\n 5. 'w', 'weeks'\n Returns:\n (str): timedelta object interpolated into a string following the given format.\n\n Examples:\n '{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)\n '{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'\n '{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'\n '{H}h {S}s' --> '72h 800s'\n \"\"\"\n # Convert tdelta to integer seconds.\n if arg_2 == 'timedelta':\n arg_3 = int(arg_0.total_seconds())\n elif arg_2 in ['s', 'seconds']:\n arg_3 = int(arg_0)\n elif arg_2 in ['m', 'minutes']:\n arg_3 = int(arg_0) * 60\n elif arg_2 in ['h', 'hours']:\n arg_3 = int(arg_0) * 3600\n elif arg_2 in ['d', 'days']:\n arg_3 = int(arg_0) * 86400\n elif arg_2 in ['w', 'weeks']:\n arg_3 = int(arg_0) * 604800\n else:\n raise ValueError(\n 'input_type is not valid. Valid input_type strings are: \"timedelta\", \"s\", \"m\", \"h\", \"d\", \"w\"'\n )\n\n arg_4 = Formatter()\n arg_5 = [field_tuple[1] for field_tuple in arg_4.parse(arg_1)]\n arg_6 = ('W', 'D', 'H', 'M', 'S')\n arg_7 = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1}\n arg_8 = {}\n\n for arg_9 in arg_6:\n if arg_9 in arg_5 and arg_9 in arg_7:\n arg_8[arg_9], arg_3 = divmod(arg_3, arg_7[arg_9])\n\n return arg_4.format(arg_1, **arg_8)"} +{"_id": "doc_7049", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the transformed version of the course description.\n\n We choose one value out of the course's full description, short description, and title\n depending on availability and length limits.\n \"\"\"\n arg_2 = arg_1.get('full_description') or ''\n if 0 < len(arg_2) <= arg_0.LONG_STRING_LIMIT: # pylint: disable=len-as-condition\n return arg_2\n return arg_1.get('short_description') or arg_1.get('title') or ''"} +{"_id": "doc_7050", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete the file if it already exist and returns the enterprise customer logo image path.\n\n Arguments:\n instance (:class:`.EnterpriseCustomerBrandingConfiguration`): EnterpriseCustomerBrandingConfiguration object\n filename (str): file to upload\n\n Returns:\n path: path of image file e.g. enterprise/branding//_logo..lower()\n\n \"\"\"\n arg_2 = os.path.splitext(arg_1)[1].lower()\n arg_3 = str(arg_0.id)\n arg_4 = os.path.join(\"enterprise/branding/\", arg_3, arg_3 + \"_logo\" + arg_2)\n if default_storage.exists(arg_4):\n default_storage.delete(arg_4)\n return arg_4"} +{"_id": "doc_7051", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return link by email.\n \"\"\"\n try:\n arg_2 = User.objects.get(email=arg_1)\n try:\n return arg_0.get(user_id=arg_2.id)\n except EnterpriseCustomerUser.DoesNotExist:\n pass\n except User.DoesNotExist:\n pass\n\n try:\n return PendingEnterpriseCustomerUser.objects.get(arg_1=arg_1)\n except PendingEnterpriseCustomerUser.DoesNotExist:\n pass\n\n return None"} +{"_id": "doc_7052", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Unlink user email from Enterprise Customer.\n\n If :class:`django.contrib.auth.models.User` instance with specified email does not exist,\n :class:`.PendingEnterpriseCustomerUser` instance is deleted instead.\n\n Raises EnterpriseCustomerUser.DoesNotExist if instance of :class:`django.contrib.auth.models.User` with\n specified email exists and corresponding :class:`.EnterpriseCustomerUser` instance does not.\n\n Raises PendingEnterpriseCustomerUser.DoesNotExist exception if instance of\n :class:`django.contrib.auth.models.User` with specified email exists and corresponding\n :class:`.PendingEnterpriseCustomerUser` instance does not.\n \"\"\"\n try:\n arg_3 = User.objects.get(email=arg_2)\n # not capturing DoesNotExist intentionally to signal to view that link does not exist\n arg_4 = arg_0.get(arg_1=arg_1, user_id=arg_3.id)\n arg_4.delete()\n\n if update_user:\n # Remove the SailThru flags for enterprise learner.\n update_user.delay(\n sailthru_vars={\n 'is_enterprise_learner': False,\n 'enterprise_name': None,\n },\n email=arg_2\n )\n\n except User.DoesNotExist:\n # not capturing DoesNotExist intentionally to signal to view that link does not exist\n arg_5 = PendingEnterpriseCustomerUser.objects.get(\n arg_1=arg_1, arg_2=arg_2\n )\n arg_5.delete()\n\n LOGGER.info(\n 'Enterprise learner {%s} successfully unlinked from Enterprise Customer {%s}',\n arg_2,\n arg_1.name\n )"} +{"_id": "doc_7053", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Get the data sharing consent object associated with a certain user, enterprise customer, and other scope.\n\n :param username: The user that grants consent\n :param enterprise_customer_uuid: The consent requester\n :param course_id (optional): A course ID to which consent may be related\n :param program_uuid (optional): A program to which consent may be related\n :return: The data sharing consent object, or None if the enterprise customer for the given UUID does not exist.\n \"\"\"\n arg_4 = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name\n try:\n if arg_2:\n return get_course_data_sharing_consent(arg_0, arg_2, arg_1)\n return get_program_data_sharing_consent(arg_0, arg_3, arg_1)\n except arg_4.DoesNotExist:\n return None"} +{"_id": "doc_7054", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the data sharing consent object associated with a certain user of a customer for a course.\n\n :param username: The user that grants consent.\n :param course_id: The course for which consent is granted.\n :param enterprise_customer_uuid: The consent requester.\n :return: The data sharing consent object\n \"\"\"\n # Prevent circular imports.\n arg_3 = apps.get_model('consent', 'DataSharingConsent') # pylint: disable=invalid-name\n return arg_3.objects.proxied_get(\n arg_0=arg_0,\n arg_1=arg_1,\n enterprise_customer__uuid=arg_2\n )"} +{"_id": "doc_7055", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Send xAPI statement for course enrollment.\n\n Arguments:\n lrs_configuration (XAPILRSConfiguration): XAPILRSConfiguration instance where to send statements.\n course_enrollment (CourseEnrollment): Course enrollment object.\n \"\"\"\n arg_2 = LearnerInfoSerializer(arg_1.user)\n arg_3 = CourseInfoSerializer(arg_1.course)\n\n arg_4 = LearnerCourseEnrollmentStatement(\n arg_1.user,\n arg_1.course,\n arg_2.data,\n arg_3.data,\n )\n EnterpriseXAPIClient(arg_0).save_statement(arg_4)"} +{"_id": "doc_7056", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Send xAPI statement for course completion.\n\n Arguments:\n lrs_configuration (XAPILRSConfiguration): XAPILRSConfiguration instance where to send statements.\n user (User): Django User object.\n course_overview (CourseOverview): Course over view object containing course details.\n course_grade (CourseGrade): course grade object.\n \"\"\"\n arg_4 = LearnerInfoSerializer(arg_1)\n arg_5 = CourseInfoSerializer(arg_2)\n\n arg_6 = LearnerCourseCompletionStatement(\n arg_1,\n arg_2,\n arg_4.data,\n arg_5.data,\n arg_3,\n )\n EnterpriseXAPIClient(arg_0).save_statement(arg_6)"} +{"_id": "doc_7057", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the Funced and transformed content metadata as a dictionary.\n \"\"\"\n arg_1 = {}\n arg_2 = arg_0.enterprise_api.get_content_metadata(arg_0.enterprise_customer)\n LOGGER.info('Retrieved content metadata for enterprise [%s]', arg_0.enterprise_customer.name)\n for arg_3 in arg_2:\n arg_4 = arg_0._transform_item(arg_3)\n LOGGER.info(\n 'Exporting content metadata item with plugin configuration [%s]: [%s]',\n arg_0.enterprise_configuration,\n json.dumps(arg_4, indent=4),\n )\n arg_5 = ContentMetadataItemExport(arg_3, arg_4)\n arg_1[arg_5.content_id] = arg_5\n return OrderedDict(sorted(arg_1.items()))"} +{"_id": "doc_7058", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Transform the provided content metadata item to the schema expected by the integrated channel.\n \"\"\"\n arg_2 = arg_1['content_type']\n arg_3 = {}\n for arg_4, arg_5 in arg_0.DATA_TRANSFORM_MAPPING.items():\n # Look for transformer functions defined on subclasses.\n # Favor content type-specific functions.\n arg_6 = (\n getattr(\n arg_0,\n 'transform_{content_type}_{edx_data_schema_key}'.format(\n content_type=arg_2,\n arg_5=arg_5\n ),\n None\n )\n or\n getattr(\n arg_0,\n 'transform_{edx_data_schema_key}'.format(\n arg_5=arg_5\n ),\n None\n )\n )\n if arg_6:\n arg_3[arg_4] = arg_6(arg_1)\n else:\n # The concrete subclass does not define an override for the given field,\n # so just use the data key to index the content metadata item dictionary.\n try:\n arg_3[arg_4] = arg_1[arg_5]\n except KeyError:\n # There may be a problem with the DATA_TRANSFORM_MAPPING on\n # the concrete subclass or the concrete subclass does not implement\n # the appropriate field tranformer function.\n LOGGER.exception(\n 'Failed to transform content metadata item field [%s] for [%s]: [%s]',\n arg_5,\n arg_0.enterprise_customer.name,\n arg_1,\n )\n\n return arg_3"} +{"_id": "doc_7059", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Perform other one-time initialization steps.\n \"\"\"\n from enterprise.signals import handle_user_post_save\n from django.db.models.signals import pre_migrate, post_save\n\n post_save.connect(handle_user_post_save, sender=arg_0.auth_user_model, dispatch_uid=USER_POST_SAVE_DISPATCH_UID)\n pre_migrate.connect(arg_0._disconnect_user_post_save_for_migrations)"} +{"_id": "doc_7060", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get actor for the statement.\n \"\"\"\n return Agent(\n name=arg_1,\n mbox='mailto:{email}'.format(arg_2=arg_2),\n )"} +{"_id": "doc_7061", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Parse csv file and return a stream of dictionaries representing each row.\n\n First line of CSV file must contain column headers.\n\n Arguments:\n file_stream: input file\n expected_columns (set[unicode]): columns that are expected to be present\n\n Yields:\n dict: CSV line parsed into a dictionary.\n \"\"\"\n arg_2 = unicodecsv.DictReader(arg_0, encoding=\"utf-8\")\n\n if arg_1 and set(arg_1) - set(arg_2.fieldnames):\n raise ValidationError(ValidationMessages.MISSING_EXPECTED_COLUMNS.format(\n arg_1=\", \".join(arg_1), actual_columns=\", \".join(arg_2.fieldnames)\n ))\n\n # \"yield from reader\" would be nicer, but we're on python2.7 yet.\n for arg_3 in arg_2:\n yield arg_3"} +{"_id": "doc_7062", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=False):\n \"\"\"\n Validate email to be linked to Enterprise Customer.\n\n Performs two checks:\n * Checks that email is valid\n * Checks that it is not already linked to any Enterprise Customer\n\n Arguments:\n email (str): user email to link\n raw_email (str): raw value as it was passed by user - used in error message.\n message_template (str): Validation error template string.\n ignore_existing (bool): If True to skip the check for an existing Enterprise Customer\n\n Raises:\n ValidationError: if email is invalid or already linked to Enterprise Customer.\n\n Returns:\n bool: Whether or not there is an existing record with the same email address.\n \"\"\"\n arg_1 = arg_1 if arg_1 is not None else arg_0\n arg_2 = arg_2 if arg_2 is not None else ValidationMessages.INVALID_EMAIL\n try:\n validate_email(arg_0)\n except ValidationError:\n raise ValidationError(arg_2.format(argument=arg_1))\n\n arg_4 = EnterpriseCustomerUser.objects.get_link_by_email(arg_0)\n if arg_4 and not arg_3:\n raise ValidationError(ValidationMessages.USER_ALREADY_REGISTERED.format(\n arg_0=arg_0, ec_name=arg_4.enterprise_customer.name\n ))\n return arg_4 or False"} +{"_id": "doc_7063", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return course runs from program data.\n\n Arguments:\n program(dict): Program data from Course Catalog API\n\n Returns:\n set: course runs in given program\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0.get(\"courses\", []):\n for arg_3 in arg_2.get(\"course_runs\", []):\n if \"key\" in arg_3 and arg_3[\"key\"]:\n arg_1.add(arg_3[\"key\"])\n\n return arg_1"} +{"_id": "doc_7064", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the earliest date that one of the courses in the program was available.\n For the sake of emails to new learners, we treat this as the program start date.\n\n Arguemnts:\n program (dict): Program data from Course Catalog API\n\n returns:\n datetime.datetime: The date and time at which the first course started\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.get('courses', []):\n for arg_3 in arg_2.get('course_runs', []):\n if arg_3.get('start'):\n arg_1.append(parse_lms_api_datetime(arg_3['start']))\n if not arg_1:\n return None\n return min(arg_1)"} +{"_id": "doc_7065", "title": "", "text": "def Func(arg_0, arg_1, arg_2=25):\n \"\"\"\n Returns paginated list.\n\n Arguments:\n object_list (QuerySet): A list of records to be paginated.\n page (int): Current page number.\n page_size (int): Number of records displayed in each paginated set.\n show_all (bool): Whether to show all records.\n\n Adopted from django/contrib/admin/templatetags/admin_list.py\n https://github.com/django/django/blob/1.11.1/django/contrib/admin/templatetags/admin_list.py#L50\n \"\"\"\n arg_3 = CustomPaginator(arg_0, arg_2)\n try:\n arg_0 = arg_3.page(arg_1)\n except PageNotAnInteger:\n arg_0 = arg_3.page(1)\n except EmptyPage:\n arg_0 = arg_3.page(arg_3.num_pages)\n\n arg_4 = []\n arg_5 = arg_0.number\n\n # If there are 10 or fewer pages, display links to every page.\n # Otherwise, do some fancy\n if arg_3.num_pages <= 10:\n arg_4 = range(arg_3.num_pages)\n else:\n # Insert \"smart\" pagination links, so that there are always ON_ENDS\n # links at either end of the list of pages, and there are always\n # ON_EACH_SIDE links at either end of the \"current page\" link.\n if arg_5 > (PAGES_ON_EACH_SIDE + PAGES_ON_ENDS + 1):\n arg_4.extend(range(1, PAGES_ON_ENDS + 1))\n arg_4.append(DOT)\n arg_4.extend(range(arg_5 - PAGES_ON_EACH_SIDE, arg_5 + 1))\n else:\n arg_4.extend(range(1, arg_5 + 1))\n if arg_5 < (arg_3.num_pages - PAGES_ON_EACH_SIDE - PAGES_ON_ENDS):\n arg_4.extend(range(arg_5 + 1, arg_5 + PAGES_ON_EACH_SIDE + 1))\n arg_4.append(DOT)\n arg_4.extend(range(arg_3.num_pages + 1 - PAGES_ON_ENDS, arg_3.num_pages + 1))\n else:\n arg_4.extend(range(arg_5 + 1, arg_3.num_pages + 1))\n\n # Override page range to implement custom smart links.\n arg_0.paginator.page_range = arg_4\n\n return arg_0"} +{"_id": "doc_7066", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clean email form field\n\n Returns:\n str: the cleaned value, converted to an email address (or an empty string)\n \"\"\"\n arg_1 = arg_0.cleaned_data[arg_0.Fields.EMAIL_OR_USERNAME].strip()\n\n if not arg_1:\n # The field is blank; we just return the existing blank value.\n return arg_1\n\n arg_2 = email_or_username__to__email(arg_1)\n arg_3 = len(split_usernames_and_emails(arg_2)) > 1\n if arg_3:\n for arg_2 in split_usernames_and_emails(arg_2):\n validate_email_to_link(\n arg_2,\n None,\n ValidationMessages.INVALID_EMAIL_OR_USERNAME,\n ignore_existing=True\n )\n arg_2 = arg_1\n else:\n validate_email_to_link(\n arg_2,\n arg_1,\n ValidationMessages.INVALID_EMAIL_OR_USERNAME,\n ignore_existing=True\n )\n\n return arg_2"} +{"_id": "doc_7067", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clean program.\n\n Try obtaining program treating form value as program UUID or title.\n\n Returns:\n dict: Program information if program found\n \"\"\"\n arg_1 = arg_0.cleaned_data[arg_0.Fields.PROGRAM].strip()\n if not arg_1:\n return None\n\n try:\n arg_2 = CourseCatalogApiClient(arg_0._user, arg_0._enterprise_customer.site)\n arg_3 = arg_2.get_program_by_uuid(arg_1) or arg_2.get_program_by_title(arg_1)\n except MultipleProgramMatchError as exc:\n raise ValidationError(ValidationMessages.MULTIPLE_PROGRAM_MATCH.format(program_count=exc.programs_matched))\n except (HttpClientError, HttpServerError):\n raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(arg_1=arg_1))\n\n if not arg_3:\n raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(arg_1=arg_1))\n\n if arg_3['status'] != ProgramStatuses.ACTIVE:\n raise ValidationError(\n ValidationMessages.PROGRAM_IS_INACTIVE.format(arg_1=arg_1, status=arg_3['status'])\n )\n\n return arg_3"} +{"_id": "doc_7068", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clean the notify_on_enrollment field.\n \"\"\"\n return arg_0.cleaned_data.get(arg_0.Fields.NOTIFY, arg_0.NotificationTypes.DEFAULT)"} +{"_id": "doc_7069", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the selected mode is valid for the given course .\n \"\"\"\n # Verify that the selected mode is valid for the given course .\n arg_1 = arg_0.cleaned_data.get(arg_0.Fields.COURSE)\n if arg_1:\n arg_2 = arg_0.cleaned_data.get(arg_0.Fields.COURSE_MODE)\n if not arg_2:\n raise ValidationError(ValidationMessages.COURSE_WITHOUT_COURSE_MODE)\n arg_3 = arg_1[\"course_modes\"]\n if all(arg_2 != arg_4[\"slug\"] for arg_4 in arg_3):\n arg_5 = ValidationError(ValidationMessages.COURSE_MODE_INVALID_FOR_COURSE.format(\n arg_2=arg_2,\n course_id=arg_1[\"course_id\"],\n ))\n raise ValidationError({arg_0.Fields.COURSE_MODE: arg_5})"} +{"_id": "doc_7070", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that selected mode is available for program and all courses in the program\n \"\"\"\n arg_1 = arg_0.cleaned_data.get(arg_0.Fields.PROGRAM)\n if not arg_1:\n return\n\n arg_2 = get_course_runs_from_program(arg_1)\n try:\n arg_3 = CourseCatalogApiClient(arg_0._user, arg_0._enterprise_customer.site)\n arg_4 = arg_3.get_common_course_modes(arg_2)\n arg_5 = arg_0.cleaned_data.get(arg_0.Fields.COURSE_MODE)\n except (HttpClientError, HttpServerError):\n raise ValidationError(\n ValidationMessages.FAILED_TO_OBTAIN_COURSE_MODES.format(program_title=arg_1.get(\"title\"))\n )\n\n if not arg_5:\n raise ValidationError(ValidationMessages.COURSE_WITHOUT_COURSE_MODE)\n if arg_5 not in arg_4:\n raise ValidationError(ValidationMessages.COURSE_MODE_NOT_AVAILABLE.format(\n mode=arg_5, program_title=arg_1.get(\"title\"), modes=\", \".join(arg_4)\n ))"} +{"_id": "doc_7071", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieve a list of catalog ID and name pairs.\n\n Once retrieved, these name pairs can be used directly as a value\n for the `choices` argument to a ChoiceField.\n \"\"\"\n # TODO: We will remove the discovery service catalog implementation\n # once we have fully migrated customer's to EnterpriseCustomerCatalogs.\n # For now, this code will prevent an admin from creating a new\n # EnterpriseCustomer with a discovery service catalog. They will have to first\n # save the EnterpriseCustomer admin form and then edit the EnterpriseCustomer\n # to add a discovery service catalog.\n if hasattr(arg_0.instance, 'site'):\n arg_1 = CourseCatalogApiClient(arg_0.user, arg_0.instance.site)\n else:\n arg_1 = CourseCatalogApiClient(arg_0.user)\n arg_2 = arg_1.get_all_catalogs()\n # order catalogs by name.\n arg_2 = sorted(arg_2, key=lambda arg_3: arg_3.get('name', '').lower())\n\n return BLANK_CHOICE_DASH + [\n (arg_3['id'], arg_3['name'],)\n for arg_3 in arg_2\n ]"} +{"_id": "doc_7072", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clean form fields prior to database entry.\n\n In this case, the major Funcing operation is substituting a None value for a blank\n value in the Catalog field.\n \"\"\"\n arg_1 = super(EnterpriseCustomerAdminForm, arg_0).Func()\n if 'catalog' in arg_1 and not arg_1['catalog']:\n arg_1['catalog'] = None\n return arg_1"} +{"_id": "doc_7073", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Final validations of model fields.\n\n 1. Validate that selected site for enterprise customer matches with the selected identity provider's site.\n \"\"\"\n super(EnterpriseCustomerIdentityProviderAdminForm, arg_0).Func()\n\n arg_1 = arg_0.Funced_data.get('provider_id', None)\n arg_2 = arg_0.Funced_data.get('enterprise_customer', None)\n\n if arg_1 is None or arg_2 is None:\n # field validation for either provider_id or enterprise_customer has already raised\n # a validation error.\n return\n\n arg_3 = utils.get_identity_provider(arg_1)\n if not arg_3:\n # This should not happen, as identity providers displayed in drop down are fetched dynamically.\n arg_4 = _(\n \"The specified Identity Provider does not exist. For more \"\n \"information, contact a system administrator.\",\n )\n # Log message for debugging\n logger.exception(arg_4)\n\n raise ValidationError(arg_4)\n\n if arg_3 and arg_3.site != arg_2.site:\n raise ValidationError(\n _(\n \"The site for the selected identity provider \"\n \"({identity_provider_site}) does not match the site for \"\n \"this enterprise customer ({enterprise_customer_site}). \"\n \"To correct this problem, select a site that has a domain \"\n \"of '{identity_provider_site}', or update the identity \"\n \"provider to '{enterprise_customer_site}'.\"\n ).format(\n enterprise_customer_site=arg_2.site,\n identity_provider_site=arg_3.site,\n ),\n )"} +{"_id": "doc_7074", "title": "", "text": "def Func():\n \"\"\"\n Ensure that all necessary resources to render the view are present.\n \"\"\"\n arg_0 = {\n 'ProgramDataExtender': ProgramDataExtender,\n }\n\n for arg_1 in arg_0:\n if arg_0[arg_1] is None:\n raise NotConnectedToOpenEdX(\n _(\"The following method from the Open edX platform is necessary for this view but isn't available.\")\n + \"\\nUnavailable: {method}\".format(arg_1=arg_1)\n )"} +{"_id": "doc_7075", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the set of variables that are needed by default across views.\n \"\"\"\n arg_2 = get_configuration_value(\"PLATFORM_NAME\", settings.PLATFORM_NAME)\n # pylint: disable=no-member\n return {\n 'enterprise_customer': arg_1,\n 'LMS_SEGMENT_KEY': settings.LMS_SEGMENT_KEY,\n 'LANGUAGE_CODE': get_language_from_request(arg_0),\n 'tagline': get_configuration_value(\"ENTERPRISE_TAGLINE\", settings.ENTERPRISE_TAGLINE),\n 'platform_description': get_configuration_value(\n \"PLATFORM_DESCRIPTION\",\n settings.PLATFORM_DESCRIPTION,\n ),\n 'LMS_ROOT_URL': settings.LMS_ROOT_URL,\n 'platform_name': arg_2,\n 'header_logo_alt_text': _('{platform_name} home page').format(arg_2=arg_2),\n 'welcome_text': constants.WELCOME_TEXT.format(arg_2=arg_2),\n 'enterprise_welcome_text': constants.ENTERPRISE_WELCOME_TEXT.format(\n enterprise_customer_name=arg_1.name,\n arg_2=arg_2,\n strong_start='',\n strong_end='',\n line_break='
    ',\n privacy_policy_link_start=\"\".format(\n pp_url=get_configuration_value('PRIVACY', 'https://www.edx.org/edx-privacy-policy', type='url'),\n ),\n privacy_policy_link_end=\"\",\n ),\n }"} +{"_id": "doc_7076", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Return a dict having course or program specific keys for data sharing consent page.\n \"\"\"\n arg_4 = {}\n if arg_2:\n arg_4.update({'course_id': arg_2, 'course_specific': True})\n if not arg_0.preview_mode:\n try:\n arg_5 = CourseCatalogApiServiceClient(arg_1.site)\n except ImproperlyConfigured:\n raise Http404\n\n arg_6 = arg_5.get_course_run(arg_2)\n arg_7 = ''\n if arg_6['start']:\n arg_7 = parse(arg_6['start']).strftime('%B %d, %Y')\n\n arg_4.update({\n 'course_title': arg_6['title'],\n 'course_start_date': arg_7,\n })\n else:\n arg_4.update({\n 'course_title': 'Demo Course',\n 'course_start_date': datetime.datetime.now().strftime('%B %d, %Y'),\n })\n else:\n arg_4.update({\n 'program_uuid': arg_3,\n 'program_specific': True,\n })\n return arg_4"} +{"_id": "doc_7077", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process the above form.\n \"\"\"\n arg_2 = arg_1.POST.get('enterprise_customer_uuid')\n arg_3 = arg_1.POST.get('redirect_url')\n arg_4 = arg_1.POST.get('failure_url')\n arg_5 = arg_1.POST.get('course_id', '')\n arg_6 = arg_1.POST.get('program_uuid', '')\n\n arg_7 = get_enterprise_customer_or_404(arg_2)\n arg_8 = get_global_context(arg_1, arg_7)\n\n if not (arg_2 and arg_3 and arg_4):\n arg_9 = 'ENTGDS005'\n arg_10 = (\n 'Error: one or more of the following values was falsy: '\n 'enterprise_uuid: {enterprise_uuid}, '\n 'success_url: {success_url}, '\n 'failure_url: {failure_url} for course_id {course_id}. '\n 'The following error code was reported to the user {userid}: {error_code}'.format(\n userid=arg_1.user.id,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_9=arg_9,\n arg_5=arg_5,\n )\n )\n return render_page_with_error_code_message(arg_1, arg_8, arg_9, arg_10)\n\n if not arg_0.course_or_program_exist(arg_5, arg_6):\n arg_9 = 'ENTGDS006'\n arg_10 = (\n 'Neither the course with course_id: {course_id} '\n 'or program with {program_uuid} exist for '\n 'enterprise customer {enterprise_uuid}'\n 'Error code {error_code} presented to user {userid}'.format(\n arg_5=arg_5,\n arg_6=arg_6,\n arg_9=arg_9,\n userid=arg_1.user.id,\n arg_2=arg_2,\n )\n )\n return render_page_with_error_code_message(arg_1, arg_8, arg_9, arg_10)\n\n arg_11 = get_data_sharing_consent(\n arg_1.user.username,\n arg_2,\n arg_6=arg_6,\n arg_5=arg_5\n )\n if arg_11 is None:\n arg_9 = 'ENTGDS007'\n arg_10 = (\n 'The was a problem with the consent record of user {userid} with '\n 'enterprise_uuid {enterprise_uuid}. consent_record has a value '\n 'of {consent_record} and a '\n 'value for course_id {course_id}. '\n 'Error code {error_code} presented to user'.format(\n userid=arg_1.user.id,\n arg_2=arg_2,\n arg_11=arg_11,\n arg_9=arg_9,\n arg_5=arg_5,\n )\n )\n return render_page_with_error_code_message(arg_1, arg_8, arg_9, arg_10)\n\n arg_12 = arg_1.POST.get('defer_creation')\n arg_13 = bool(arg_1.POST.get('data_sharing_consent', False))\n if arg_12 is None and arg_11.consent_required():\n if arg_5:\n arg_14, arg_15 = EnterpriseCustomerUser.objects.get_or_create(\n arg_7=arg_11.enterprise_customer,\n user_id=arg_1.user.id\n )\n arg_14.update_session(arg_1)\n arg_15, arg_16 = EnterpriseCourseEnrollment.objects.get_or_create(\n arg_14=arg_14,\n arg_5=arg_5,\n )\n if arg_16:\n track_enrollment('data-consent-page-enrollment', arg_1.user.id, arg_5, arg_1.path)\n\n arg_11.granted = arg_13\n arg_11.save()\n\n return redirect(arg_3 if arg_13 else arg_4)"} +{"_id": "doc_7078", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Handle the enrollment of enterprise learner in the provided course.\n\n Based on `enterprise_uuid` in URL, the view will decide which\n enterprise customer's course enrollment record should be created.\n\n Depending on the value of query parameter `course_mode` then learner\n will be either redirected to LMS dashboard for audit modes or\n redirected to ecommerce basket flow for payment of premium modes.\n \"\"\"\n arg_4 = arg_1.GET.Func('course_mode')\n arg_5 = arg_1.GET.Func('catalog')\n\n # Redirect the learner to LMS dashboard in case no course mode is\n # provided as query parameter `course_mode`\n if not arg_4:\n return redirect(LMS_DASHBOARD_URL)\n\n arg_6 = EnrollmentApiClient()\n arg_7 = arg_6.Func_course_modes(arg_3)\n\n # Verify that the request user belongs to the enterprise against the\n # provided `enterprise_uuid`.\n arg_8 = Func_enterprise_customer_or_404(arg_2)\n arg_9 = Func_enterprise_customer_user(arg_1.user.id, arg_8.uuid)\n\n if not arg_7:\n arg_10 = Func_global_context(arg_1, arg_8)\n arg_11 = 'ENTHCE000'\n arg_12 = (\n 'No course_modes for course_id {course_id} for enterprise_catalog_uuid '\n '{enterprise_catalog_uuid}.'\n 'The following error was presented to '\n 'user {userid}: {error_code}'.format(\n userid=arg_1.user.id,\n arg_5=arg_5,\n arg_3=arg_3,\n arg_11=arg_11\n )\n )\n return render_page_with_error_code_message(arg_1, arg_10, arg_11, arg_12)\n\n arg_13 = None\n for arg_14 in arg_7:\n if arg_14['slug'] == arg_4:\n arg_13 = arg_14\n break\n\n if not arg_13:\n return redirect(LMS_DASHBOARD_URL)\n\n # Create the Enterprise backend database records for this course\n # enrollment\n arg_15, arg_16 = EnterpriseCourseEnrollment.objects.Func_or_create(\n arg_9=arg_9,\n arg_3=arg_3,\n )\n if arg_16:\n track_enrollment('course-landing-page-enrollment', arg_1.user.id, arg_3, arg_1.Func_full_path())\n\n DataSharingConsent.objects.update_or_create(\n username=arg_9.username,\n arg_3=arg_3,\n arg_8=arg_9.enterprise_customer,\n defaults={\n 'granted': True\n },\n )\n\n arg_17 = Funcattr(settings, 'ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES', ['audit', 'honor'])\n if arg_13['slug'] in arg_17:\n # In case of Audit course modes enroll the learner directly through\n # enrollment API client and redirect the learner to dashboard.\n arg_6.enroll_user_in_course(\n arg_1.user.username, arg_3, arg_13['slug']\n )\n\n return redirect(LMS_COURSEWARE_URL.format(arg_3=arg_3))\n\n # redirect the enterprise learner to the ecommerce flow in LMS\n # Note: LMS start flow automatically detects the paid mode\n arg_18 = LMS_START_PREMIUM_COURSE_FLOW_URL.format(arg_3=arg_3)\n if arg_5:\n arg_18 += '?catalog={catalog_uuid}'.format(\n catalog_uuid=arg_5\n )\n\n return redirect(arg_18)"} +{"_id": "doc_7079", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Set the final discounted price on each premium mode.\n \"\"\"\n arg_3 = []\n for arg_4 in arg_1:\n if arg_4['premium']:\n arg_4['final_price'] = EcommerceApiClient(arg_2.user).get_course_final_price(\n arg_4=arg_4,\n enterprise_catalog_uuid=arg_2.GET.get(\n 'catalog'\n ) if arg_2.method == 'GET' else None,\n )\n arg_3.append(arg_4)\n return arg_3"} +{"_id": "doc_7080", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Return the available course modes for the course run.\n\n The provided EnterpriseCustomerCatalog is used to filter and order the\n course modes returned using the EnterpriseCustomerCatalog's\n field \"enabled_course_modes\".\n \"\"\"\n arg_4 = EnrollmentApiClient().get_course_modes(arg_2)\n if not arg_4:\n LOGGER.warning('Unable to get course modes for course run id {course_run_id}.'.format(\n arg_2=arg_2\n ))\n messages.add_generic_info_message_for_error(arg_1)\n\n if arg_3:\n # filter and order course modes according to the enterprise catalog\n arg_4 = [mode for mode in arg_4 if mode['slug'] in arg_3.enabled_course_modes]\n arg_4.sort(key=lambda course_mode: arg_3.enabled_course_modes.index(course_mode['slug']))\n if not arg_4:\n LOGGER.info(\n 'No matching course modes found for course run {course_run_id} in '\n 'EnterpriseCustomerCatalog [{enterprise_catalog_uuid}]'.format(\n arg_2=arg_2,\n enterprise_catalog_uuid=arg_3,\n )\n )\n messages.add_generic_info_message_for_error(arg_1)\n\n return arg_4"} +{"_id": "doc_7081", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Extend a course with more details needed for the program landing page.\n\n In particular, we add the following:\n\n * `course_image_uri`\n * `course_title`\n * `course_level_type`\n * `course_short_description`\n * `course_full_description`\n * `course_effort`\n * `expected_learning_items`\n * `staff`\n \"\"\"\n arg_3 = arg_0['course_runs'][0]['key']\n try:\n arg_4 = CourseCatalogApiServiceClient(arg_1.site)\n except ImproperlyConfigured:\n arg_5 = 'ENTPEV000'\n LOGGER.error(\n 'CourseCatalogApiServiceClient is improperly configured. '\n 'Returned error code {error_code} to user {userid} '\n 'and enterprise_customer {enterprise_customer} '\n 'for course_run_id {course_run_id}'.format(\n arg_5=arg_5,\n userid=arg_2.user.id,\n arg_1=arg_1.uuid,\n arg_3=arg_3,\n )\n )\n messages.add_generic_error_message_with_code(arg_2, arg_5)\n return ({}, arg_5)\n\n arg_6, arg_7 = arg_4.get_course_and_course_run(arg_3)\n if not arg_6 or not arg_7:\n arg_5 = 'ENTPEV001'\n LOGGER.error(\n 'User {userid} of enterprise customer {enterprise_customer} encountered an error.'\n 'No course_details or course_run_details found for '\n 'course_run_id {course_run_id}. '\n 'The following error code reported to the user: {error_code}'.format(\n userid=arg_2.user.id,\n arg_1=arg_1.uuid,\n arg_3=arg_3,\n arg_5=arg_5,\n )\n )\n messages.add_generic_error_message_with_code(arg_2, arg_5)\n return ({}, arg_5)\n\n arg_8 = arg_7['weeks_to_complete']\n arg_9 = arg_7['image'] or {}\n arg_0.update({\n 'course_image_uri': arg_9.get('src', ''),\n 'course_title': arg_7['title'],\n 'course_level_type': arg_7.get('level_type', ''),\n 'course_short_description': arg_7['short_description'] or '',\n 'course_full_description': clean_html_for_template_rendering(arg_7['full_description'] or ''),\n 'expected_learning_items': arg_6.get('expected_learning_items', []),\n 'staff': arg_7.get('staff', []),\n 'course_effort': ungettext_min_max(\n '{} hour per week',\n '{} hours per week',\n '{}-{} hours per week',\n arg_7['min_effort'] or None,\n arg_7['max_effort'] or None,\n ) or '',\n 'weeks_to_complete': ungettext(\n '{} week',\n '{} weeks',\n arg_8\n ).format(arg_8) if arg_8 else '',\n })\n return arg_0, None"} +{"_id": "doc_7082", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n User is requesting a course, we need to translate that into the current course run.\n\n :param user:\n :param enterprise_customer:\n :param course_key:\n :return: course_run_id\n \"\"\"\n try:\n arg_3 = CourseCatalogApiServiceClient(arg_1.site).get_course_details(arg_2)\n except ImproperlyConfigured:\n raise Http404\n\n arg_4 = EnrollmentApiClient().get_enrolled_courses(arg_0.username)\n arg_5 = get_active_course_runs(\n arg_3,\n arg_4\n ) if arg_4 else []\n arg_6 = get_current_course_run(arg_3, arg_5)\n if arg_6:\n arg_7 = arg_6['key']\n return arg_7\n else:\n raise Http404"} +{"_id": "doc_7083", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Return whether a request is eligible for direct audit enrollment for a particular enterprise customer.\n\n 'resource_id' can be either course_run_id or program_uuid.\n We check for the following criteria:\n - The `audit` query parameter.\n - The user's being routed to the course enrollment landing page.\n - The customer's catalog contains the course in question.\n - The audit track is an available mode for the course.\n \"\"\"\n arg_5 = arg_4 if arg_4 else arg_3\n\n # Return it in one big statement to utilize short-circuiting behavior. Avoid the API call if possible.\n return arg_1.GET.get('audit') and \\\n arg_1.path == arg_0.COURSE_ENROLLMENT_VIEW_URL.format(arg_2.uuid, arg_5) and \\\n arg_2.catalog_contains_course(arg_3) and \\\n EnrollmentApiClient().has_course_mode(arg_3, 'audit')"} +{"_id": "doc_7084", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Redirects to the appropriate view depending on where the user came from.\n \"\"\"\n arg_4, arg_5, arg_6, arg_7 = RouterView.get_path_variables(**arg_3)\n arg_8 = arg_6 or arg_5 or arg_7\n # Replace enterprise UUID and resource ID with '{}', to easily match with a path in RouterView.VIEWS. Example:\n # /enterprise/fake-uuid/course/course-v1:cool+course+2017/enroll/ -> /enterprise/{}/course/{}/enroll/\n arg_9 = re.sub('{}|{}'.format(arg_4, re.escape(arg_8)), '{}', arg_1.path)\n\n # Remove course_key from kwargs if it exists because delegate views are not expecting it.\n arg_3.pop('course_key', None)\n\n return arg_0.VIEWS[arg_9].as_view()(arg_1, *arg_2, **arg_3)"} +{"_id": "doc_7085", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Run some custom GET logic for Enterprise workflows before routing the user through existing views.\n\n In particular, before routing to existing views:\n - If the requested resource is a course, find the current course run for that course,\n and make that course run the requested resource instead.\n - Look to see whether a request is eligible for direct audit enrollment, and if so, directly enroll the user.\n \"\"\"\n arg_4, arg_5, arg_6, arg_7 = RouterView.Func_path_variables(**arg_3)\n arg_8 = Func_enterprise_customer_or_404(arg_4)\n if arg_6:\n try:\n arg_5 = RouterView.Func_course_run_id(arg_1.user, arg_8, arg_6)\n except Http404:\n arg_9 = Func_global_context(arg_1, arg_8)\n arg_10 = 'ENTRV000'\n arg_11 = (\n 'Could not find course run with id {course_run_id} '\n 'for course key {course_key} and program_uuid {program_uuid} '\n 'for enterprise_customer_uuid {enterprise_customer_uuid} '\n 'Returned error code {error_code} to user {userid}'.format(\n arg_6=arg_6,\n arg_5=arg_5,\n arg_4=arg_4,\n arg_10=arg_10,\n userid=arg_1.user.id,\n arg_7=arg_7,\n )\n )\n return render_page_with_error_code_message(arg_1, arg_9, arg_10, arg_11)\n arg_3['course_id'] = arg_5\n\n # Ensure that the link is saved to the database prior to making some call in a downstream view\n # which may need to know that the user belongs to an enterprise customer.\n with transaction.atomic():\n arg_12, arg_13 = EnterpriseCustomerUser.objects.Func_or_create(\n arg_8=arg_8,\n user_id=arg_1.user.id\n )\n arg_12.update_session(arg_1)\n\n # Directly enroll in audit mode if the request in question has full direct audit enrollment eligibility.\n arg_14 = arg_5 or arg_7\n if arg_0.eligible_for_direct_audit_enrollment(arg_1, arg_8, arg_14, arg_6):\n try:\n arg_12.enroll(arg_14, 'audit', cohort=arg_1.GET.Func('cohort', None))\n track_enrollment('direct-audit-enrollment', arg_1.user.id, arg_14, arg_1.Func_full_path())\n except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError):\n pass\n # The courseware view logic will check for DSC requirements, and route to the DSC page if necessary.\n return redirect(LMS_COURSEWARE_URL.format(course_id=arg_14))\n\n return arg_0.redirect(arg_1, *arg_2, **arg_3)"} +{"_id": "doc_7086", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Run some custom POST logic for Enterprise workflows before routing the user through existing views.\n \"\"\"\n # pylint: disable=unused-variable\n arg_4, arg_5, arg_6, arg_7 = RouterView.get_path_variables(**arg_3)\n arg_8 = get_enterprise_customer_or_404(arg_4)\n\n if arg_6:\n arg_9 = get_global_context(arg_1, arg_8)\n try:\n arg_3['course_id'] = RouterView.get_course_run_id(arg_1.user, arg_8, arg_6)\n except Http404:\n arg_10 = 'ENTRV001'\n arg_11 = (\n 'Could not find course run with id {course_run_id} '\n 'for course key {course_key} and '\n 'for enterprise_customer_uuid {enterprise_customer_uuid} '\n 'and program {program_uuid}. '\n 'Returned error code {error_code} to user {userid}'.format(\n arg_6=arg_6,\n arg_5=arg_5,\n arg_4=arg_4,\n arg_10=arg_10,\n userid=arg_1.user.id,\n arg_7=arg_7,\n )\n )\n return render_page_with_error_code_message(arg_1, arg_9, arg_10, arg_11)\n\n return arg_0.redirect(arg_1, *arg_2, **arg_3)"} +{"_id": "doc_7087", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Task to send learner data to each linked integrated channel.\n\n Arguments:\n username (str): The username of the User to be used for making API requests for learner data.\n channel_code (str): Capitalized identifier for the integrated channel\n channel_pk (str): Primary key for identifying integrated channel\n\n \"\"\"\n arg_3 = time.time()\n arg_4 = User.objects.get(arg_0=arg_0)\n arg_5 = INTEGRATED_CHANNEL_CHOICES[arg_1].objects.get(pk=arg_2)\n LOGGER.info('Processing learners for integrated channel using configuration: [%s]', arg_5)\n\n # Note: learner data transmission code paths don't raise any uncaught exception, so we don't need a broad\n # try-except block here.\n arg_5.Func(arg_4)\n\n arg_6 = time.time() - arg_3\n LOGGER.info(\n 'Learner data transmission task for integrated channel configuration [%s] took [%s] seconds',\n arg_5,\n arg_6\n )"} +{"_id": "doc_7088", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Task to unlink inactive learners of provided integrated channel.\n\n Arguments:\n channel_code (str): Capitalized identifier for the integrated channel\n channel_pk (str): Primary key for identifying integrated channel\n\n \"\"\"\n arg_2 = time.time()\n arg_3 = INTEGRATED_CHANNEL_CHOICES[arg_0].objects.get(pk=arg_1)\n LOGGER.info('Processing learners to unlink inactive users using configuration: [%s]', arg_3)\n\n # Note: learner data transmission code paths don't raise any uncaught exception, so we don't need a broad\n # try-except block here.\n arg_3.Func()\n\n arg_4 = time.time() - arg_2\n LOGGER.info(\n 'Unlink inactive learners task for integrated channel configuration [%s] took [%s] seconds',\n arg_3,\n arg_4\n )"} +{"_id": "doc_7089", "title": "", "text": "def Func(arg_0, **arg_1): # pylint: disable=unused-argument\n \"\"\"\n Handle User model changes - checks if pending enterprise customer user record exists and upgrades it to actual link.\n\n If there are pending enrollments attached to the PendingEnterpriseCustomerUser, then this signal also takes the\n newly-created users and enrolls them in the relevant courses.\n \"\"\"\n arg_2 = arg_1.get(\"created\", False)\n arg_3 = arg_1.get(\"instance\", None)\n\n if arg_3 is None:\n return # should never happen, but better safe than 500 error\n\n try:\n arg_4 = PendingEnterpriseCustomerUser.objects.get(user_email=arg_3.email)\n except PendingEnterpriseCustomerUser.DoesNotExist:\n return # nothing to do in this case\n\n if not arg_2:\n # existing user changed his email to match one of pending link records - try linking him to EC\n try:\n arg_5 = EnterpriseCustomerUser.objects.get(user_id=arg_3.id)\n arg_6 = \"User {user} have changed email to match pending Enterprise Customer link, \" \\\n \"but was already linked to Enterprise Customer {enterprise_customer} - \" \\\n \"deleting pending link record\"\n logger.info(arg_6.format(\n user=arg_3, enterprise_customer=arg_5.enterprise_customer\n ))\n arg_4.delete()\n return\n except EnterpriseCustomerUser.DoesNotExist:\n pass # everything ok - current user is not linked to other ECs\n\n arg_7 = EnterpriseCustomerUser.objects.create(\n enterprise_customer=arg_4.enterprise_customer,\n user_id=arg_3.id\n )\n arg_8 = list(arg_4.pendingenrollment_set.all())\n if arg_8:\n def _complete_user_enrollment(): # pylint: disable=missing-docstring\n for arg_9 in arg_8:\n # EnterpriseCustomers may enroll users in courses before the users themselves\n # actually exist in the system; in such a case, the enrollment for each such\n # course is finalized when the user registers with the OpenEdX platform.\n arg_7.enroll(\n arg_9.course_id, arg_9.course_mode, cohort=arg_9.cohort_name)\n track_enrollment('pending-admin-enrollment', arg_3.id, arg_9.course_id)\n arg_4.delete()\n transaction.on_commit(_complete_user_enrollment)\n else:\n arg_4.delete()"} +{"_id": "doc_7090", "title": "", "text": "def Func(arg_0, arg_1, **arg_2): # pylint: disable=unused-argument\n \"\"\"\n Set default value for `EnterpriseCustomerCatalog.content_filter` if not already set.\n \"\"\"\n if arg_2['created'] and not arg_1.content_filter:\n arg_1.content_filter = get_default_catalog_content_filter()\n arg_1.save()"} +{"_id": "doc_7091", "title": "", "text": "def Func(arg_0, arg_1, **arg_2): # pylint: disable=unused-argument\n \"\"\"\n Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.\n \"\"\"\n if arg_2['created'] and arg_1.user:\n arg_3, arg_4 = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)\n SystemWideEnterpriseUserRoleAssignment.objects.get_or_create(\n user=arg_1.user,\n role=arg_3\n )"} +{"_id": "doc_7092", "title": "", "text": "def Func(*arg_0):\n \"\"\"\n Ensure at least one of the specified query parameters are included in the request.\n\n This decorator checks for the existence of at least one of the specified query\n parameters and passes the values as function parameters to the decorated view.\n If none of the specified query parameters are included in the request, a\n ValidationError is raised.\n\n Usage::\n @Func('program_uuids', 'course_run_ids')\n def my_view(request, program_uuids, course_run_ids):\n # Some functionality ...\n \"\"\"\n def outer_wrapper(arg_1):\n \"\"\" Allow the passing of parameters to Func. \"\"\"\n @wraps(arg_1)\n def wrapper(arg_2, *arg_3, **arg_4):\n \"\"\"\n Checks for the existence of the specified query parameters, raises a\n ValidationError if none of them were included in the request.\n \"\"\"\n arg_5 = False\n for arg_6 in arg_0:\n arg_7 = arg_2.query_params.getlist(arg_6)\n arg_4[arg_6] = arg_7\n if arg_7:\n arg_5 = True\n if not arg_5:\n raise ValidationError(\n detail='You must provide at least one of the following query parameters: {params}.'.format(\n params=', '.join(arg_0)\n )\n )\n return arg_1(arg_2, *arg_3, **arg_4)\n return wrapper\n return outer_wrapper"} +{"_id": "doc_7093", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Assigns enterprise role to users.\n \"\"\"\n arg_4 = arg_2['role']\n arg_5 = arg_2['batch_limit']\n arg_6 = arg_2['batch_sleep']\n arg_7 = arg_2['batch_offset']\n\n arg_8 = arg_7\n\n arg_9 = arg_1(\n arg_7,\n arg_7 + arg_5\n )\n\n arg_10 = SystemWideEnterpriseRole\n arg_11 = SystemWideEnterpriseUserRoleAssignment\n\n if arg_3:\n arg_10 = EnterpriseFeatureRole\n arg_11 = EnterpriseFeatureUserRoleAssignment\n\n arg_12 = arg_10.objects.get(name=arg_4)\n while arg_9.count() > 0:\n for arg_13, arg_14 in enumerate(arg_9):\n LOGGER.info(\n 'Processing user with index %s and id %s',\n arg_8 + arg_13, arg_14.id\n )\n arg_11.objects.get_or_create(\n arg_14=arg_14,\n role=arg_12\n )\n\n sleep(arg_6)\n arg_8 += len(arg_9)\n arg_9 = arg_1(\n arg_8,\n arg_8 + arg_5\n )"} +{"_id": "doc_7094", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Entry point for managment command execution.\n \"\"\"\n LOGGER.info('Starting assigning enterprise roles to users!')\n\n arg_3 = arg_2['role']\n if arg_3 == ENTERPRISE_ADMIN_ROLE:\n # Assign admin role to non-staff users with enterprise data api access.\n arg_0._assign_enterprise_role_to_users(arg_0._get_enterprise_admin_users_batch, arg_2)\n elif arg_3 == ENTERPRISE_OPERATOR_ROLE:\n # Assign operator role to staff users with enterprise data api access.\n arg_0._assign_enterprise_role_to_users(arg_0._get_enterprise_operator_users_batch, arg_2)\n elif arg_3 == ENTERPRISE_LEARNER_ROLE:\n # Assign enterprise learner role to enterprise customer users.\n arg_0._assign_enterprise_role_to_users(arg_0._get_enterprise_customer_users_batch, arg_2)\n elif arg_3 == ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE:\n # Assign enterprise enrollment api admin to non-staff users with enterprise data api access.\n arg_0._assign_enterprise_role_to_users(arg_0._get_enterprise_enrollment_api_admin_users_batch, arg_2, True)\n elif arg_3 == ENTERPRISE_CATALOG_ADMIN_ROLE:\n # Assign enterprise catalog admin role to users with having credentials in catalog.\n arg_0._assign_enterprise_role_to_users(arg_0._get_enterprise_catalog_admin_users_batch, arg_2, True)\n else:\n raise CommandError('Please provide a valid role name. Supported roles are {admin} and {learner}'.format(\n admin=ENTERPRISE_ADMIN_ROLE,\n learner=ENTERPRISE_LEARNER_ROLE\n ))\n\n LOGGER.info('Successfully finished assigning enterprise roles to users!')"} +{"_id": "doc_7095", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Perform the linking of user in the process of logging to the Enterprise Customer.\n\n Args:\n backend: The class handling the SSO interaction (SAML, OAuth, etc)\n user: The user object in the process of being logged in with\n **kwargs: Any remaining pipeline variables\n\n \"\"\"\n arg_3 = arg_0.strategy.request\n arg_4 = get_enterprise_customer_for_running_pipeline(\n arg_3,\n {\n 'backend': arg_0.name,\n 'kwargs': arg_2\n }\n )\n if arg_4 is None:\n # This pipeline element is not being activated as a part of an Enterprise logistration\n return\n\n # proceed with the creation of a link between the user and the enterprise customer, then exit.\n arg_5, arg_6 = EnterpriseCustomerUser.objects.update_or_create(\n arg_4=arg_4,\n user_id=arg_1.id\n )\n arg_5.update_session(arg_3)"} +{"_id": "doc_7096", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Find the LMS user from the LMS model `UserSocialAuth`.\n\n Arguments:\n tpa_provider (third_party_auth.provider): third party auth provider object\n tpa_username (str): Username returned by the third party auth\n\n \"\"\"\n arg_2 = UserSocialAuth.objects.select_related('user').filter(\n user__username=arg_1, provider=arg_0.backend_name\n ).first()\n\n return arg_2.user if arg_2 else None"} +{"_id": "doc_7097", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Instantiate a new session object for use in connecting with SAP SuccessFactors\n \"\"\"\n arg_1 = requests.Session()\n arg_1.timeout = arg_0.SESSION_TIMEOUT\n\n arg_3, arg_4 = SAPSuccessFactorsAPIClient.get_oauth_access_token(\n arg_0.enterprise_configuration.sapsf_base_url,\n arg_0.enterprise_configuration.key,\n arg_0.enterprise_configuration.secret,\n arg_0.enterprise_configuration.sapsf_company_id,\n arg_0.enterprise_configuration.sapsf_user_id,\n arg_0.enterprise_configuration.user_type\n )\n\n arg_1.headers['Authorization'] = 'Bearer {}'.format(arg_3)\n arg_1.headers['content-type'] = 'application/json'\n arg_0.session = arg_1\n arg_0.expires_at = arg_4"} +{"_id": "doc_7098", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Make a post request using the session object to a SuccessFactors endpoint.\n\n Args:\n url (str): The url to post to.\n payload (str): The json encoded payload to post.\n \"\"\"\n arg_3 = datetime.datetime.utcnow()\n if arg_3 >= arg_0.expires_at:\n # Create a new session with a valid token\n arg_0.session.close()\n arg_0._create_session()\n arg_4 = arg_0.session.post(arg_1, data=arg_2)\n return arg_4.status_code, arg_4.text"} +{"_id": "doc_7099", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Make recursive GET calls to traverse the paginated API response for search students.\n \"\"\"\n arg_5 = '{sap_search_student_url}&{pagination_criterion}'.format(\n arg_1=arg_1,\n pagination_criterion='$count=true&$top={page_size}&$skip={start_at}'.format(\n arg_3=arg_3,\n arg_4=arg_4,\n ),\n )\n try:\n arg_6 = arg_0.session.get(arg_5)\n arg_7 = arg_6.json()\n except (ConnectionError, Timeout):\n LOGGER.warning(\n 'Unable to fetch inactive learners from SAP searchStudent API with url '\n '\"{%s}\".', arg_5,\n )\n return None\n\n if 'error' in arg_7:\n LOGGER.warning(\n 'SAP searchStudent API for customer %s and base url %s returned response with '\n 'error message \"%s\" and with error code \"%s\".',\n arg_0.enterprise_configuration.enterprise_customer.name,\n arg_0.enterprise_configuration.sapsf_base_url,\n arg_7['error'].get('message'),\n arg_7['error'].get('code'),\n )\n return None\n\n arg_8 = arg_3 + arg_4\n arg_2 += arg_7['value']\n if arg_7['@odata.count'] > arg_8:\n return arg_0.Func(\n arg_1,\n arg_2,\n arg_3=arg_3,\n arg_4=arg_8,\n )\n\n return arg_2"} +{"_id": "doc_7100", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Filter only for the user's ID if non-staff.\n \"\"\"\n if not arg_1.user.is_staff:\n arg_4 = {arg_3.USER_ID_FILTER: arg_1.user.id}\n arg_2 = arg_2.filter(**arg_4)\n return arg_2"} +{"_id": "doc_7101", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Send a completion status call to the integrated channel using the client.\n\n Args:\n payload: The learner completion data payload to send to the integrated channel.\n kwargs: Contains integrated channel-specific information for customized transmission variables.\n - app_label: The app label of the integrated channel for whom to store learner data records for.\n - model_name: The name of the specific learner data record model to use.\n - remote_user_id: The remote ID field name of the learner on the audit model.\n \"\"\"\n arg_3 = apps.get_model( # pylint: disable=invalid-name\n app_label=arg_2.get('app_label', 'integrated_channel'),\n model_name=arg_2.get('model_name', 'LearnerDataTransmissionAudit'),\n )\n # Since we have started sending courses to integrated channels instead of course runs,\n # we need to attempt to send transmissions with course keys and course run ids in order to\n # ensure that we account for whether courses or course runs exist in the integrated channel.\n # The exporters have been changed to return multiple transmission records to attempt,\n # one by course key and one by course run id.\n # If the transmission with the course key succeeds, the next one will get skipped.\n # If it fails, the one with the course run id will be attempted and (presumably) succeed.\n for arg_4 in arg_1.export():\n arg_5 = arg_4.serialize(enterprise_configuration=arg_0.enterprise_configuration)\n LOGGER.debug('Attempting to Func serialized payload: %s', arg_5)\n\n arg_6 = arg_4.enterprise_course_enrollment_id\n if arg_4.completed_timestamp is None:\n # The user has not completed the course, so we shouldn't send a completion status call\n LOGGER.info('Skipping in-progress enterprise enrollment {}'.format(arg_6))\n continue\n\n arg_7 = arg_3.objects.filter(\n enterprise_course_enrollment_id=arg_6,\n arg_11=''\n )\n if arg_7.exists():\n # We've already sent a completion status call for this enrollment\n LOGGER.info('Skipping previously sent enterprise enrollment {}'.format(arg_6))\n continue\n\n try:\n arg_8, arg_9 = arg_0.client.create_course_completion(\n getattr(arg_4, arg_2.get('remote_user_id')),\n arg_5\n )\n LOGGER.info(\n 'Successfully sent completion status call for enterprise enrollment {}'.format(\n arg_6,\n )\n )\n except RequestException as request_exception:\n arg_8 = 500\n arg_9 = str(request_exception)\n arg_0.handle_transmission_error(arg_4, request_exception)\n\n arg_4.status = str(arg_8)\n arg_4.error_message = arg_9 if arg_8 >= 400 else ''\n arg_4.save()"} +{"_id": "doc_7102", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Validate that a particular image extension.\n \"\"\"\n arg_1 = get_app_config()\n arg_2 = os.path.splitext(arg_0.name)[1]\n if arg_1 and not arg_2.lower() in arg_1.valid_image_extensions:\n raise ValidationError(_(\"Unsupported file extension.\"))"} +{"_id": "doc_7103", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the enterprise customer id given an enterprise customer catalog id.\n \"\"\"\n try:\n return str(EnterpriseCustomerCatalog.objects.get(pk=arg_0).enterprise_customer.uuid)\n except EnterpriseCustomerCatalog.DoesNotExist:\n return None"} +{"_id": "doc_7104", "title": "", "text": "def Func(arg_0): # pylint: disable=unused-argument\n \"\"\"\n Run sphinx-apidoc after Sphinx initialization.\n\n Read the Docs won't run tox or custom shell commands, so we need this to\n avoid checking in the generated reStructuredText files.\n \"\"\"\n arg_1 = os.path.abspath(os.path.dirname(__file__))\n arg_2 = os.path.abspath(os.path.join(arg_1, '..'))\n arg_3 = 'sphinx-apidoc'\n if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv\n # If we are, assemble the path manually\n arg_4 = os.path.abspath(os.path.join(sys.prefix, 'bin'))\n arg_3 = os.path.join(arg_4, arg_3)\n check_call([arg_3, '-o', arg_1, os.path.join(arg_2, 'enterprise'),\n os.path.join(arg_2, 'enterprise/migrations')])"} +{"_id": "doc_7105", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the enterprise customer requested for the given uuid, None if not.\n\n Raises CommandError if uuid is invalid.\n \"\"\"\n if arg_0 is None:\n return None\n try:\n return EnterpriseCustomer.active_customers.get(arg_0=arg_0)\n except EnterpriseCustomer.DoesNotExist:\n raise CommandError(\n _('Enterprise customer {uuid} not found, or not active').format(arg_0=arg_0))"} +{"_id": "doc_7106", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Assemble a list of integrated channel classes to transmit to.\n\n If a valid channel type was provided, use it.\n\n Otherwise, use all the available channel types.\n \"\"\"\n if arg_0:\n # Channel code is case-insensitive\n arg_0 = arg_0.upper()\n\n if arg_0 not in INTEGRATED_CHANNEL_CHOICES:\n raise CommandError(_('Invalid integrated channel: {channel}').format(channel=arg_0))\n\n arg_1 = [INTEGRATED_CHANNEL_CHOICES[arg_0]]\n else:\n arg_1 = INTEGRATED_CHANNEL_CHOICES.values()\n\n return arg_1"} +{"_id": "doc_7107", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the contents of a file listing the requirements\n \"\"\"\n arg_1 = open(arg_0).readlines()\n arg_2 = []\n arg_3 = []\n\n for arg_4 in arg_1:\n arg_5 = arg_4.strip()\n if arg_5.startswith('#'):\n # Skip pure comment lines\n continue\n\n if any(arg_5.startswith(arg_6) for arg_6 in VCS_PREFIXES):\n # VCS reference for dev purposes, expect a trailing comment\n # with the normal requirement\n arg_7, arg_8, arg_5 = arg_5.rpartition('#')\n\n # Remove -e string\n arg_7 = re.sub(r'(.*)(?Phttps?.*$)', r'\\g', arg_7)\n arg_5 = re.sub(r'(egg=)?(?P.*)==.*$', r'\\g', arg_5)\n arg_9 = re.sub(r'.*[^=]==', '', arg_4.strip())\n\n if arg_5:\n arg_3.append(\n '{package_link}#egg={package}-{package_version}'.format(\n arg_7=arg_7,\n arg_5=arg_5,\n arg_9=arg_9,\n )\n )\n else:\n # Ignore any trailing comment\n arg_5, arg_8, arg_8 = arg_5.partition('#')\n # Remove any whitespace and assume non-empty results are dependencies\n arg_5 = arg_5.strip()\n\n if arg_5:\n arg_2.append(arg_5)\n return arg_2, arg_3"} +{"_id": "doc_7108", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Iterate over each learner data record and transmit it to the integrated channel.\n \"\"\"\n arg_2 = arg_0.get_learner_data_exporter(arg_1)\n arg_3 = arg_0.get_learner_data_transmitter()\n arg_3.transmit(arg_2)"} +{"_id": "doc_7109", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Transmit content metadata to integrated channel.\n \"\"\"\n arg_2 = arg_0.get_content_metadata_exporter(arg_1)\n arg_3 = arg_0.get_content_metadata_transmitter()\n arg_3.transmit(arg_2.export())"} +{"_id": "doc_7110", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=False,\n **arg_4\n ): # pylint: disable=arguments-differ,unused-argument\n \"\"\"\n Return a DegreedLearnerDataTransmissionAudit with the given enrollment and course completion data.\n\n If completed_date is None, then course completion has not been met.\n\n If no remote ID can be found, return None.\n \"\"\"\n # Degreed expects completion dates of the form 'yyyy-mm-dd'.\n arg_5 = arg_2.strftime(\"%F\") if isinstance(arg_2, datetime) else None\n if arg_1.enterprise_customer_user.get_remote_id() is not None:\n arg_6 = apps.get_model( # pylint: disable=invalid-name\n 'degreed',\n 'DegreedLearnerDataTransmissionAudit'\n )\n # We return two records here, one with the course key and one with the course run id, to account for\n # uncertainty about the type of content (course vs. course run) that was sent to the integrated channel.\n return [\n arg_6(\n enterprise_course_enrollment_id=arg_1.id,\n degreed_user_email=arg_1.enterprise_customer_user.user_email,\n course_id=parse_course_key(arg_1.course_id),\n course_completed=arg_2 is not None and arg_3,\n arg_5=arg_5,\n ),\n arg_6(\n enterprise_course_enrollment_id=arg_1.id,\n degreed_user_email=arg_1.enterprise_customer_user.user_email,\n course_id=arg_1.course_id,\n course_completed=arg_2 is not None and arg_3,\n arg_5=arg_5,\n )\n ]\n else:\n LOGGER.debug(\n 'No learner data was sent for user [%s] because a Degreed user ID could not be found.',\n arg_1.enterprise_customer_user.username\n )"} +{"_id": "doc_7111", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Render the given template with the stock data.\n \"\"\"\n arg_4 = Func_object_or_404(EnrollmentNotificationEmailTemplate, pk=arg_2)\n if arg_3 not in arg_0.view_type_contexts:\n return HttpResponse(status=404)\n arg_5 = arg_0.view_type_contexts[arg_3].copy()\n arg_5.update({'user_name': arg_0.Func_user_name(arg_1)})\n return HttpResponse(arg_4.render_html_template(arg_5), content_type='text/html')"} +{"_id": "doc_7112", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build common admin context.\n \"\"\"\n arg_2 = arg_1._meta\n arg_3 = get_permission_codename('change', arg_2)\n arg_4 = arg_0.user.has_perm('%s.%s' % (arg_2.app_label, arg_3))\n return {\n 'has_change_permission': arg_4,\n 'opts': arg_2\n }"} +{"_id": "doc_7113", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Handle GET request - render \"Transmit courses metadata\" form.\n\n Arguments:\n request (django.http.request.HttpRequest): Request instance\n enterprise_customer_uuid (str): Enterprise Customer UUID\n\n Returns:\n django.http.response.HttpResponse: HttpResponse\n \"\"\"\n arg_3 = arg_0._build_context(arg_1, arg_2)\n arg_4 = TransmitEnterpriseCoursesForm()\n arg_3.update({arg_0.ContextParameters.TRANSMIT_COURSES_METADATA_FORM: arg_4})\n\n return render(arg_1, arg_0.template, arg_3)"} +{"_id": "doc_7114", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the list of PendingEnterpriseCustomerUsers we want to render.\n\n Args:\n search_keyword (str): The keyword to search for in pending users' email addresses.\n customer_uuid (str): A unique identifier to filter down to only pending users\n linked to a particular EnterpriseCustomer.\n \"\"\"\n arg_3 = PendingEnterpriseCustomerUser.objects.filter(\n enterprise_customer__uuid=arg_2\n )\n\n if arg_1 is not None:\n arg_3 = arg_3.filter(user_email__icontains=arg_1)\n\n return arg_3"} +{"_id": "doc_7115", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Link single user by email or username.\n\n Arguments:\n enterprise_customer (EnterpriseCustomer): learners will be linked to this Enterprise Customer instance\n manage_learners_form (ManageLearnersForm): bound ManageLearners form instance\n \"\"\"\n arg_3 = arg_2.cleaned_data[ManageLearnersForm.Fields.EMAIL_OR_USERNAME]\n arg_4 = email_or_username__to__email(arg_3)\n try:\n validate_email_to_link(arg_4, arg_3, ValidationMessages.INVALID_EMAIL_OR_USERNAME, True)\n except ValidationError as exc:\n arg_2.add_error(ManageLearnersForm.Fields.EMAIL_OR_USERNAME, exc)\n else:\n EnterpriseCustomerUser.objects.link_user(arg_1, arg_4)\n return [arg_4]"} +{"_id": "doc_7116", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Bulk link users by email.\n\n Arguments:\n enterprise_customer (EnterpriseCustomer): learners will be linked to this Enterprise Customer instance\n manage_learners_form (ManageLearnersForm): bound ManageLearners form instance\n request (django.http.request.HttpRequest): HTTP Request instance\n email_list (iterable): A list of pre-processed email addresses to handle using the form\n \"\"\"\n arg_5 = []\n arg_6 = set()\n arg_7 = []\n arg_8 = []\n arg_9 = arg_2.cleaned_data[ManageLearnersForm.Fields.BULK_UPLOAD]\n if arg_4:\n arg_10 = [{ManageLearnersForm.CsvColumns.EMAIL: arg_13} for arg_13 in arg_4]\n else:\n arg_10 = parse_csv(arg_9, expected_columns={ManageLearnersForm.CsvColumns.EMAIL})\n\n try:\n for arg_11, arg_12 in enumerate(arg_10):\n arg_13 = arg_12[ManageLearnersForm.CsvColumns.EMAIL]\n try:\n arg_14 = validate_email_to_link(arg_13, ignore_existing=True)\n except ValidationError as exc:\n arg_15 = _(\"Error at line {line}: {message}\\n\").format(line=arg_11 + 1, arg_15=exc)\n arg_5.append(arg_15)\n else:\n if arg_14:\n arg_7.append((arg_13, arg_14.enterprise_customer))\n elif arg_13 in arg_6:\n arg_8.append(arg_13)\n else:\n arg_6.add(arg_13)\n except ValidationError as exc:\n arg_5.append(exc)\n\n if arg_5:\n arg_2.add_error(\n ManageLearnersForm.Fields.GENERAL_ERRORS, ValidationMessages.BULK_LINK_FAILED\n )\n for arg_16 in arg_5:\n arg_2.add_error(ManageLearnersForm.Fields.BULK_UPLOAD, arg_16)\n return\n\n # There were no errors. Now do the actual linking:\n for arg_13 in arg_6:\n EnterpriseCustomerUser.objects.link_user(arg_1, arg_13)\n\n # Report what happened:\n arg_17 = len(arg_6)\n messages.success(arg_3, ungettext(\n \"{count} new learner was added to {enterprise_customer_name}.\",\n \"{count} new learners were added to {enterprise_customer_name}.\",\n arg_17\n ).format(arg_17=arg_17, enterprise_customer_name=arg_1.name))\n arg_18 = [\n arg_13 for arg_13, customer in arg_7 if customer == arg_1\n ]\n arg_19 = [\n arg_13 for arg_13, __ in arg_7 if arg_13 not in arg_18\n ]\n if arg_18:\n messages.warning(\n arg_3,\n _(\n \"The following learners were already associated with this Enterprise \"\n \"Customer: {list_of_emails}\"\n ).format(\n list_of_emails=\", \".join(arg_18)\n )\n )\n if arg_19:\n messages.warning(\n arg_3,\n _(\n \"The following learners are already associated with \"\n \"another Enterprise Customer. These learners were not \"\n \"added to {enterprise_customer_name}: {list_of_emails}\"\n ).format(\n enterprise_customer_name=arg_1.name,\n list_of_emails=\", \".join(arg_19),\n )\n )\n if arg_8:\n messages.warning(\n arg_3,\n _(\n \"The following duplicate email addresses were not added: \"\n \"{list_of_emails}\"\n ).format(\n list_of_emails=\", \".join(arg_8)\n )\n )\n # Build a list of all the emails that we can act on further; that is,\n # emails that we either linked to this customer, or that were linked already.\n arg_20 = list(arg_6) + arg_18\n\n return arg_20"} +{"_id": "doc_7117", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Query the enrollment API and determine if a learner is enrolled in a given course run track.\n\n Args:\n user: The user whose enrollment needs to be checked\n course_mode: The mode with which the enrollment should be checked\n course_id: course id of the course where enrollment should be checked.\n\n Returns:\n Boolean: Whether or not enrollment exists\n\n \"\"\"\n arg_4 = EnrollmentApiClient()\n try:\n arg_5 = arg_4.get_course_enrollment(arg_1.username, arg_2)\n if arg_5 and arg_3 == arg_5.get('mode'):\n return True\n except HttpClientError as exc:\n logging.error(\n 'Error while checking enrollment status of user %(user)s: %(message)s',\n dict(arg_1=arg_1.username, message=str(exc))\n )\n except KeyError as exc:\n logging.warning(\n 'Error while parsing enrollment data of user %(user)s: %(message)s',\n dict(arg_1=arg_1.username, message=str(exc))\n )\n return False"} +{"_id": "doc_7118", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.\n\n Args:\n emails: An iterable of email addresses to split between existing and nonexisting\n\n Returns:\n users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses\n missing_emails: List of unique emails which were in the original list, but do not yet exist as users\n \"\"\"\n arg_2 = User.objects.filter(email__in=arg_1)\n arg_3 = arg_2.values_list('email', flat=True)\n arg_4 = list(set(arg_1) - set(arg_3))\n return arg_2, arg_4"} +{"_id": "doc_7119", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None):\n \"\"\"\n Enroll existing users in all courses in a program, and create pending enrollments for nonexisting users.\n\n Args:\n enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\n program_details: The details of the program in which we're enrolling\n course_mode (str): The mode with which we're enrolling in the program\n emails: An iterable of email addresses which need to be enrolled\n\n Returns:\n successes: A list of users who were successfully enrolled in all courses of the program\n pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\n pending enrollments created for them in the database\n failures: A list of users who could not be enrolled in the program\n \"\"\"\n arg_6, arg_7 = arg_0.get_users_by_email(arg_4)\n arg_8 = get_course_runs_from_program(arg_2)\n\n arg_9 = []\n arg_10 = []\n arg_11 = []\n\n for arg_12 in arg_6:\n arg_13 = arg_0.enroll_user(arg_1, arg_12, arg_3, *arg_8)\n if arg_13:\n arg_9.append(arg_12)\n else:\n arg_11.append(arg_12)\n\n for arg_14 in arg_7:\n arg_15 = arg_1.enroll_user_pending_registration(\n arg_14,\n arg_3,\n *arg_8,\n arg_5=arg_5\n )\n arg_10.append(arg_15)\n\n return arg_9, arg_10, arg_11"} +{"_id": "doc_7120", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Enroll existing users in a course, and create a pending enrollment for nonexisting users.\n\n Args:\n enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\n course_id (str): The unique identifier of the course in which we're enrolling\n course_mode (str): The mode with which we're enrolling in the course\n emails: An iterable of email addresses which need to be enrolled\n\n Returns:\n successes: A list of users who were successfully enrolled in the course\n pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\n pending enrollments created for them in the database\n failures: A list of users who could not be enrolled in the course\n \"\"\"\n arg_5, arg_6 = arg_0.get_users_by_email(arg_4)\n\n arg_7 = []\n arg_8 = []\n arg_9 = []\n\n for arg_10 in arg_5:\n arg_11 = arg_0.enroll_user(arg_1, arg_10, arg_3, arg_2)\n if arg_11:\n arg_7.append(arg_10)\n else:\n arg_9.append(arg_10)\n\n for arg_12 in arg_6:\n arg_13 = arg_1.enroll_user_pending_registration(\n arg_12,\n arg_3,\n arg_2\n )\n arg_8.append(arg_13)\n\n return arg_7, arg_8, arg_9"} +{"_id": "doc_7121", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Deduplicate any outgoing message requests, and send the remainder.\n\n Args:\n http_request: The HTTP request in whose response we want to embed the messages\n message_requests: A list of undeduplicated messages in the form of tuples of message type\n and text- for example, ('error', 'Something went wrong')\n \"\"\"\n arg_3 = set(arg_2)\n for arg_4, arg_5 in arg_3:\n arg_6 = getattr(messages, arg_4)\n arg_6(arg_1, arg_5)"} +{"_id": "doc_7122", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Create message for the users who were not able to be enrolled in a course or program.\n\n Args:\n users: An iterable of users who were not successfully enrolled\n enrolled_in (str): A string identifier for the course or program with which enrollment was attempted\n\n Returns:\n tuple: A 2-tuple containing a message type and message text\n \"\"\"\n arg_3 = [user.email for user in arg_1]\n return (\n 'error',\n _(\n 'The following learners could not be enrolled in {enrolled_in}: {user_list}'\n ).format(\n arg_2=arg_2,\n user_list=', '.join(arg_3),\n )\n )"} +{"_id": "doc_7123", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=None,\n arg_7=True\n ):\n \"\"\"\n Enroll the users with the given email addresses to the courses specified, either specifically or by program.\n\n Args:\n cls (type): The EnterpriseCustomerManageLearnersView class itself\n request: The HTTP request the enrollment is being created by\n enterprise_customer: The instance of EnterpriseCustomer whose attached users we're enrolling\n emails: An iterable of strings containing email addresses to enroll in a course\n mode: The enrollment mode the users will be enrolled in the course with\n course_id: The ID of the course in which we want to enroll\n program_details: Details about a program in which we want to enroll\n notify: Whether to notify (by email) the users that have been enrolled\n \"\"\"\n arg_8 = []\n\n if arg_5:\n arg_9, arg_10, arg_11 = arg_0.enroll_users_in_course(\n arg_2=arg_2,\n arg_5=arg_5,\n course_mode=arg_4,\n arg_3=arg_3,\n )\n arg_12 = arg_9 + arg_10\n if arg_7:\n arg_2.notify_enrolled_learners(\n catalog_api_user=arg_1.user,\n arg_5=arg_5,\n users=arg_12,\n )\n if arg_9:\n arg_8.append(arg_0.get_success_enrollment_message(arg_9, arg_5))\n if arg_11:\n arg_8.append(arg_0.get_failed_enrollment_message(arg_11, arg_5))\n if arg_10:\n arg_8.append(arg_0.get_pending_enrollment_message(arg_10, arg_5))\n\n if arg_6:\n arg_9, arg_10, arg_11 = arg_0.enroll_users_in_program(\n arg_2=arg_2,\n arg_6=arg_6,\n course_mode=arg_4,\n arg_3=arg_3,\n )\n arg_12 = arg_9 + arg_10\n if arg_7:\n arg_0.notify_program_learners(\n arg_2=arg_2,\n arg_6=arg_6,\n users=arg_12\n )\n arg_13 = arg_6.get('title', arg_6.get('uuid', _('the program')))\n if arg_9:\n arg_8.append(arg_0.get_success_enrollment_message(arg_9, arg_13))\n if arg_11:\n arg_8.append(arg_0.get_failed_enrollment_message(arg_11, arg_13))\n if arg_10:\n arg_8.append(arg_0.get_pending_enrollment_message(arg_10, arg_13))\n\n arg_0.send_messages(arg_1, arg_8)"} +{"_id": "doc_7124", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Handle DELETE request - handle unlinking learner.\n\n Arguments:\n request (django.http.request.HttpRequest): Request instance\n customer_uuid (str): Enterprise Customer UUID\n\n Returns:\n django.http.response.HttpResponse: HttpResponse\n \"\"\"\n # TODO: pylint acts stupid - find a way around it without suppressing\n arg_3 = EnterpriseCustomer.objects.get(uuid=arg_2) # pylint: disable=no-member\n arg_4 = arg_1.GET[\"unlink_email\"]\n try:\n EnterpriseCustomerUser.objects.unlink_user(\n arg_3=arg_3, user_email=arg_4\n )\n except (EnterpriseCustomerUser.DoesNotExist, PendingEnterpriseCustomerUser.DoesNotExist):\n arg_5 = _(\"Email {email} is not associated with Enterprise \"\n \"Customer {ec_name}\").format(\n email=arg_4, ec_name=arg_3.name)\n return HttpResponse(arg_5, content_type=\"application/json\", status=404)\n\n return HttpResponse(\n json.dumps({}),\n content_type=\"application/json\"\n )"} +{"_id": "doc_7125", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"\n Build a ProxyDataSharingConsent using the details of the received consent records.\n \"\"\"\n if not arg_2 or any(arg_3 is None for arg_3 in arg_2):\n return None\n arg_4 = all((arg_3.granted for arg_3 in arg_2))\n arg_5 = any((arg_3.exists for arg_3 in arg_2))\n arg_6 = set([arg_3.username for arg_3 in arg_2])\n arg_7 = set([arg_3.enterprise_customer for arg_3 in arg_2])\n if not len(arg_6) == len(arg_7) == 1:\n raise InvalidProxyConsent(\n 'Children used to create a bulk proxy consent object must '\n 'share a single common username and EnterpriseCustomer.'\n )\n arg_8 = arg_2[0].username\n arg_9 = arg_2[0].enterprise_customer\n return arg_0(\n arg_9=arg_9,\n arg_8=arg_8,\n arg_1=arg_1,\n arg_5=arg_5,\n arg_4=arg_4,\n child_consents=arg_2\n )"} +{"_id": "doc_7126", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Commit a real ``DataSharingConsent`` object to the database, mirroring current field settings.\n\n :return: A ``DataSharingConsent`` object if validation is successful, otherwise ``None``.\n \"\"\"\n if arg_0._child_consents:\n arg_1 = []\n\n for arg_2 in arg_0._child_consents:\n arg_2.granted = arg_0.granted\n arg_1.append(arg_2.save() or arg_2)\n\n return ProxyDataSharingConsent.from_children(arg_0.program_uuid, *arg_1)\n\n arg_2, arg_4 = DataSharingConsent.objects.update_or_create(\n enterprise_customer=arg_0.enterprise_customer,\n username=arg_0.username,\n course_id=arg_0.course_id,\n defaults={\n 'granted': arg_0.granted\n }\n )\n arg_0._exists = arg_2.exists\n return arg_2"} +{"_id": "doc_7127", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get course completions via PersistentCourseGrade for all the learners of given enterprise customer.\n\n Arguments:\n enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\n of this enterprise customer.\n days (int): Include course enrollment of this number of days.\n\n Returns:\n (list): A list of PersistentCourseGrade objects.\n \"\"\"\n return PersistentCourseGrade.objects.filter(\n passed_timestamp__gt=datetime.datetime.now() - datetime.timedelta(arg_2=arg_2)\n ).filter(\n user_id__in=arg_1.enterprise_customer_users.values_list('user_id', flat=True)\n )"} +{"_id": "doc_7128", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Prefetch Users from the list of user_ids present in the persistent_course_grades.\n\n Arguments:\n persistent_course_grades (list): A list of PersistentCourseGrade.\n\n Returns:\n (dict): A dictionary containing user_id to user mapping.\n \"\"\"\n arg_1 = User.objects.filter(\n id__in=[grade.user_id for grade in arg_0]\n )\n return {\n arg_2.id: arg_2 for arg_2 in arg_1\n }"} +{"_id": "doc_7129", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get Identity Provider with given id.\n\n Return:\n Instance of ProviderConfig or None.\n \"\"\"\n try:\n from third_party_auth.provider import arg_1 # pylint: disable=redefined-outer-name\n except ImportError as exception:\n LOGGER.warning(\"Could not import Registry from third_party_auth.provider\")\n LOGGER.warning(exception)\n arg_1 = None # pylint: disable=redefined-outer-name\n\n try:\n return arg_1 and arg_1.get(arg_0)\n except ValueError:\n return None"} +{"_id": "doc_7130", "title": "", "text": "def Func(arg_0='change'):\n \"\"\"\n Get template of catalog admin url.\n\n URL template will contain a placeholder '{catalog_id}' for catalog id.\n Arguments:\n mode e.g. change/add.\n\n Returns:\n A string containing template for catalog url.\n\n Example:\n >>> Func('change')\n \"http://localhost:18381/admin/catalogs/catalog/{catalog_id}/change/\"\n\n \"\"\"\n arg_1 = getattr(settings, \"COURSE_CATALOG_API_URL\", \"\")\n\n # Extract FQDN (Fully Qualified Domain Name) from API URL.\n arg_2 = re.match(r\"^(?P(?:https?://)?[^/]+)\", arg_1)\n\n if not arg_2:\n return \"\"\n\n # Return matched FQDN from catalog api url appended with catalog admin path\n if arg_0 == 'change':\n return arg_2.group(\"fqdn\").rstrip(\"/\") + \"/admin/catalogs/catalog/{catalog_id}/change/\"\n elif arg_0 == 'add':\n return arg_2.group(\"fqdn\").rstrip(\"/\") + \"/admin/catalogs/catalog/add/\""} +{"_id": "doc_7131", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Create HTML and plaintext message bodies for a notification.\n\n We receive a context with data we can use to render, as well as an optional site\n template configration - if we don't get a template configuration, we'll use the\n standard, built-in template.\n\n Arguments:\n template_context (dict): A set of data to render\n template_configuration: A database-backed object with templates\n stored that can be used to render a notification.\n\n \"\"\"\n if (\n arg_1 is not None and\n arg_1.html_template and\n arg_1.plaintext_template\n ):\n arg_2, arg_3 = arg_1.render_all_templates(arg_0)\n else:\n arg_2 = render_to_string(\n 'enterprise/emails/user_notification.txt',\n arg_0\n )\n arg_3 = render_to_string(\n 'enterprise/emails/user_notification.html',\n arg_0\n )\n\n return arg_2, arg_3"} +{"_id": "doc_7132", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get a subject line for a notification email.\n\n The method is designed to fail in a \"smart\" way; if we can't render a\n database-backed subject line template, then we'll fall back to a template\n saved in the Django settings; if we can't render _that_ one, then we'll\n fall through to a friendly string written into the code.\n\n One example of a failure case in which we want to fall back to a stock template\n would be if a site admin entered a subject line string that contained a template\n tag that wasn't available, causing a KeyError to be raised.\n\n Arguments:\n course_name (str): Course name to be rendered into the string\n template_configuration: A database-backed object with a stored subject line template\n\n \"\"\"\n arg_2 = _('You\\'ve been enrolled in {course_name}!')\n arg_3 = getattr(\n settings,\n 'ENTERPRISE_ENROLLMENT_EMAIL_DEFAULT_SUBJECT_LINE',\n arg_2,\n )\n if arg_1 is not None and arg_1.subject_line:\n arg_4 = arg_1.subject_line\n else:\n arg_4 = arg_3\n\n try:\n return arg_4.format(arg_0=arg_0)\n except KeyError:\n pass\n\n try:\n return arg_3.format(arg_0=arg_0)\n except KeyError:\n return arg_2.format(arg_0=arg_0)"} +{"_id": "doc_7133", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Send an email notifying a user about their enrollment in a course.\n\n Arguments:\n user: Either a User object or a PendingEnterpriseCustomerUser that we can use\n to get details for the email\n enrolled_in (dict): The dictionary contains details of the enrollable object\n (either course or program) that the user enrolled in. This MUST contain\n a `name` key, and MAY contain the other following keys:\n - url: A human-friendly link to the enrollable's home page\n - type: Either `course` or `program` at present\n - branding: A special name for what the enrollable \"is\"; for example,\n \"MicroMasters\" would be the branding for a \"MicroMasters Program\"\n - start: A datetime object indicating when the enrollable will be available.\n enterprise_customer: The EnterpriseCustomer that the enrollment was created using.\n email_connection: An existing Django email connection that can be used without\n creating a new connection for each individual message\n\n \"\"\"\n if hasattr(arg_0, 'first_name') and hasattr(arg_0, 'username'):\n # PendingEnterpriseCustomerUsers don't have usernames or real names. We should\n # template slightly differently to make sure weird stuff doesn't happen.\n arg_4 = arg_0.first_name\n if not arg_4:\n arg_4 = arg_0.username\n else:\n arg_4 = None\n\n # Users have an `email` attribute; PendingEnterpriseCustomerUsers have `user_email`.\n if hasattr(arg_0, 'email'):\n arg_5 = arg_0.email\n elif hasattr(arg_0, 'user_email'):\n arg_5 = arg_0.user_email\n else:\n raise TypeError(_('`user` must have one of either `email` or `user_email`.'))\n\n arg_6 = {\n 'user_name': arg_4,\n 'enrolled_in': arg_1,\n 'organization_name': arg_2.name,\n }\n try:\n arg_7 = arg_2.enterprise_enrollment_template\n except (ObjectDoesNotExist, AttributeError):\n arg_7 = None\n\n arg_8, arg_9 = build_notification_message(arg_6, arg_7)\n\n arg_10 = get_notification_subject_line(arg_1['name'], arg_7)\n\n arg_11 = get_configuration_value_for_site(\n arg_2.site,\n 'DEFAULT_FROM_EMAIL',\n default=settings.DEFAULT_FROM_EMAIL\n )\n\n return mail.send_mail(\n arg_10,\n arg_8,\n arg_11,\n [arg_5],\n html_message=arg_9,\n connection=arg_3\n )"} +{"_id": "doc_7134", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the ``EnterpriseCustomer`` instance associated with ``uuid``.\n\n :param uuid: The universally unique ID of the enterprise customer.\n :return: The ``EnterpriseCustomer`` instance, or ``None`` if it doesn't exist.\n \"\"\"\n arg_1 = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name\n try:\n return arg_1.objects.get(arg_0=arg_0) # pylint: disable=no-member\n except arg_1.DoesNotExist:\n return None"} +{"_id": "doc_7135", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return track selection url for the given course.\n\n Arguments:\n course_run (dict): A dictionary containing course run metadata.\n query_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\n Raises:\n (KeyError): Raised when course run dict does not have 'key' key.\n\n Returns:\n (str): Course track selection url.\n\n \"\"\"\n try:\n arg_2 = reverse('course_modes_choose', kwargs={'course_id': arg_0['key']})\n except KeyError:\n LOGGER.exception(\n \"KeyError while parsing course run data.\\nCourse Run: \\n[%s]\", arg_0,\n )\n raise\n\n arg_3 = '{}{}'.format(\n settings.LMS_ROOT_URL,\n arg_2\n )\n arg_4 = update_query_parameters(arg_3, arg_1)\n\n return arg_4"} +{"_id": "doc_7136", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.\n\n Arguments:\n enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.\n\n Returns:\n (EnterpriseCustomer): The EnterpriseCustomer given the UUID.\n\n \"\"\"\n arg_1 = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name\n try:\n arg_0 = UUID(arg_0)\n return arg_1.objects.get(uuid=arg_0) # pylint: disable=no-member\n except (TypeError, ValueError, arg_1.DoesNotExist):\n LOGGER.error('Unable to find enterprise customer for UUID: [%s]', arg_0)\n raise Http404"} +{"_id": "doc_7137", "title": "", "text": "def Func(**arg_0):\n \"\"\"\n Get MD5 encoded cache key for given arguments.\n\n Here is the format of key before MD5 encryption.\n key1:value1__key2:value2 ...\n\n Example:\n >>> Func(site_domain=\"example.com\", resource=\"enterprise\")\n # Here is key format for above call\n # \"site_domain:example.com__resource:enterprise\"\n a54349175618ff1659dee0978e3149ca\n\n Arguments:\n **kwargs: Key word arguments that need to be present in cache key.\n\n Returns:\n An MD5 encoded key uniquely identified by the key word arguments.\n \"\"\"\n arg_1 = '__'.join(['{}:{}'.format(item, value) for item, value in iteritems(arg_0)])\n\n return hashlib.md5(arg_1.encode('utf-8')).hexdigest()"} +{"_id": "doc_7138", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Traverse a paginated API response.\n\n Extracts and concatenates \"results\" (list of dict) returned by DRF-powered\n APIs.\n\n Arguments:\n response (Dict): Current response dict from service API\n endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client\n\n Returns:\n list of dict.\n\n \"\"\"\n arg_2 = arg_0.get('results', [])\n\n arg_3 = arg_0.get('next')\n while arg_3:\n arg_4 = parse_qs(urlparse(arg_3).query, keep_blank_values=True)\n arg_0 = arg_1.get(**arg_4)\n arg_2 += arg_0.get('results', [])\n arg_3 = arg_0.get('next')\n\n return arg_2"} +{"_id": "doc_7139", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Return grammatically correct, translated text based off of a minimum and maximum value.\n\n Example:\n min = 1, max = 1, singular = '{} hour required for this course', plural = '{} hours required for this course'\n output = '1 hour required for this course'\n\n min = 2, max = 2, singular = '{} hour required for this course', plural = '{} hours required for this course'\n output = '2 hours required for this course'\n\n min = 2, max = 4, range_text = '{}-{} hours required for this course'\n output = '2-4 hours required for this course'\n\n min = None, max = 2, plural = '{} hours required for this course'\n output = '2 hours required for this course'\n\n Expects ``range_text`` to already have a translation function called on it.\n\n Returns:\n ``None`` if both of the input values are ``None``.\n ``singular`` formatted if both are equal or one of the inputs, but not both, are ``None``, and the value is 1.\n ``plural`` formatted if both are equal or one of its inputs, but not both, are ``None``, and the value is > 1.\n ``range_text`` formatted if min != max and both are valid values.\n \"\"\"\n if arg_3 is None and arg_4 is None:\n return None\n if arg_3 == arg_4 or arg_3 is None or arg_4 is None:\n # pylint: disable=translation-of-non-string\n return ungettext(arg_0, arg_1, arg_3 or arg_4).format(arg_3 or arg_4)\n return arg_2.format(arg_3, arg_4)"} +{"_id": "doc_7140", "title": "", "text": "def Func(arg_0, arg_1='$'):\n \"\"\"\n Format the price to have the appropriate currency and digits..\n\n :param price: The price amount.\n :param currency: The currency for the price.\n :return: A formatted price string, i.e. '$10', '$10.52'.\n \"\"\"\n if int(arg_0) == arg_0:\n return '{}{}'.format(arg_1, int(arg_0))\n return '{}{:0.2f}'.format(arg_1, arg_0)"} +{"_id": "doc_7141", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get the site configuration value for a key, unless a site configuration does not exist for that site.\n\n Useful for testing when no Site Configuration exists in edx-enterprise or if a site in LMS doesn't have\n a configuration tied to it.\n\n :param site: A Site model object\n :param key: The name of the value to retrieve\n :param default: The default response if there's no key in site config or settings\n :return: The value located at that key in the site configuration or settings file.\n \"\"\"\n if hasattr(arg_0, 'configuration'):\n return arg_0.configuration.get_value(arg_1, arg_2)\n return arg_2"} +{"_id": "doc_7142", "title": "", "text": "def Func(arg_0, arg_1=None, **arg_2):\n \"\"\"\n Get a configuration value, or fall back to ``default`` if it doesn't exist.\n\n Also takes a `type` argument to guide which particular upstream method to use when trying to retrieve a value.\n Current types include:\n - `url` to specifically get a URL.\n \"\"\"\n if arg_2.get('type') == 'url':\n return get_url(arg_0) or arg_1 if callable(get_url) else arg_1\n return configuration_helpers.get_value(arg_0, arg_1, **arg_2) if configuration_helpers else arg_1"} +{"_id": "doc_7143", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Emit a track event for enterprise course enrollment.\n \"\"\"\n track_event(arg_1, 'edx.bi.user.enterprise.onboarding', {\n 'pathway': arg_0,\n 'url_path': arg_3,\n 'course_run_id': arg_2,\n })"} +{"_id": "doc_7144", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return true if the course run is enrollable, false otherwise.\n\n We look for the following criteria:\n - end is greater than now OR null\n - enrollment_start is less than now OR null\n - enrollment_end is greater than now OR null\n \"\"\"\n arg_1 = datetime.datetime.now(pytz.UTC)\n arg_2 = parse_datetime_handle_invalid(arg_0.get('end'))\n arg_3 = parse_datetime_handle_invalid(arg_0.get('enrollment_start'))\n arg_4 = parse_datetime_handle_invalid(arg_0.get('enrollment_end'))\n return (not arg_2 or arg_2 > arg_1) and \\\n (not arg_3 or arg_3 < arg_1) and \\\n (not arg_4 or arg_4 > arg_1)"} +{"_id": "doc_7145", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return true if the course run has a verified seat with an unexpired upgrade deadline, false otherwise.\n \"\"\"\n arg_1 = datetime.datetime.now(pytz.UTC)\n for arg_2 in arg_0.get('seats', []):\n if arg_2.get('type') == 'verified':\n arg_3 = parse_datetime_handle_invalid(arg_2.get('upgrade_deadline'))\n return not arg_3 or arg_3 > arg_1\n return False"} +{"_id": "doc_7146", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return course run with start date closest to now.\n \"\"\"\n if len(arg_0) == 1:\n return arg_0[0]\n\n arg_1 = datetime.datetime.now(pytz.UTC)\n # course runs with no start date should be considered last.\n arg_2 = arg_1 - datetime.timedelta(days=3650)\n return min(arg_0, key=lambda x: abs(get_course_run_start(x, arg_2) - arg_1))"} +{"_id": "doc_7147", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return the current course run on the following conditions.\n\n - If user has active course runs (already enrolled) then return course run with closest start date\n Otherwise it will check the following logic:\n - Course run is enrollable (see is_course_run_enrollable)\n - Course run has a verified seat and the upgrade deadline has not expired.\n - Course run start date is closer to now than any other enrollable/upgradeable course runs.\n - If no enrollable/upgradeable course runs, return course run with most recent start date.\n \"\"\"\n arg_2 = None\n arg_3 = []\n arg_4 = arg_0['course_runs']\n\n if arg_1:\n arg_2 = get_closest_course_run(arg_1)\n else:\n for arg_5 in arg_4:\n if is_course_run_enrollable(arg_5) and is_course_run_upgradeable(arg_5):\n arg_3.append(arg_5)\n\n if not arg_3:\n # Consider all runs if there were not any enrollable/upgradeable ones.\n arg_3 = arg_4\n\n if arg_3:\n arg_2 = get_closest_course_run(arg_3)\n return arg_2"} +{"_id": "doc_7148", "title": "", "text": "def Func(arg_0):\n \"\"\"\n LRS client instance to be used for sending statements.\n \"\"\"\n return RemoteLRS(\n version=arg_0.Func_configuration.version,\n endpoint=arg_0.Func_configuration.endpoint,\n auth=arg_0.Func_configuration.authorization_header,\n )"} +{"_id": "doc_7149", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Save xAPI statement.\n\n Arguments:\n statement (EnterpriseStatement): xAPI Statement to send to the LRS.\n\n Raises:\n ClientError: If xAPI statement fails to save.\n \"\"\"\n arg_2 = arg_0.lrs.Func(arg_1)\n\n if not arg_2:\n raise ClientError('EnterpriseXAPIClient request failed.')"} +{"_id": "doc_7150", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=unused-argument\n \"\"\"\n Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.\n\n Returns:\n boolean: whether the request user has access or not\n \"\"\"\n arg_2 = get_request_or_stub()\n arg_3 = get_decoded_jwt_from_request(arg_2)\n return request_user_has_implicit_access_via_jwt(arg_3, ENTERPRISE_DASHBOARD_ADMIN_ROLE)"} +{"_id": "doc_7151", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=unused-argument\n \"\"\"\n Check that if request user has implicit access to `ENTERPRISE_CATALOG_ADMIN_ROLE` feature role.\n\n Returns:\n boolean: whether the request user has access or not\n \"\"\"\n arg_2 = get_request_or_stub()\n arg_3 = get_decoded_jwt_from_request(arg_2)\n return request_user_has_implicit_access_via_jwt(arg_3, ENTERPRISE_CATALOG_ADMIN_ROLE, arg_1)"} +{"_id": "doc_7152", "title": "", "text": "def Func(arg_0, arg_1): # pylint: disable=unused-argument\n \"\"\"\n Check that if request user has implicit access to `ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE` feature role.\n\n Returns:\n boolean: whether the request user has access or not\n \"\"\"\n arg_2 = get_request_or_stub()\n arg_3 = get_decoded_jwt_from_request(arg_2)\n return request_user_has_implicit_access_via_jwt(arg_3, ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE, arg_1)"} +{"_id": "doc_7153", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Instance is EnterpriseCustomer. Return e-commerce coupon urls.\n \"\"\"\n if not arg_1.entitlement_id:\n return \"N/A\"\n\n return format_html(\n 'View coupon \"{id}\" details',\n base_url=settings.ECOMMERCE_PUBLIC_URL_ROOT, id=arg_1.entitlement_id\n )"} +{"_id": "doc_7154", "title": "", "text": "def Func(arg_0=\"Export selected objects as CSV file\", arg_1=None, arg_2=True):\n \"\"\"\n Return an export csv action.\n\n Arguments:\n description (string): action description\n fields ([string]): list of model fields to include\n header (bool): whether or not to output the column names as the first row\n \"\"\"\n # adapted from https://gist.github.com/mgerring/3645889\n def arg_15(arg_3, arg_4, arg_5): # pylint: disable=unused-argument\n \"\"\"\n Export model fields to CSV.\n \"\"\"\n arg_6 = arg_3.model._meta\n\n if not arg_1:\n arg_7 = [arg_13.name for arg_13 in arg_6.fields]\n else:\n arg_7 = arg_1\n\n arg_8 = HttpResponse(content_type=\"text/csv\")\n arg_8[\"Content-Disposition\"] = \"attachment; filename={filename}.csv\".format(\n filename=str(arg_6).replace(\".\", \"_\")\n )\n\n arg_9 = unicodecsv.writer(arg_8, encoding=\"utf-8\")\n if arg_2:\n arg_9.writerow(arg_7)\n for arg_10 in arg_5:\n arg_11 = []\n for arg_12 in arg_7:\n arg_13 = getattr(arg_10, arg_12)\n if callable(arg_13):\n arg_14 = arg_13()\n else:\n arg_14 = arg_13\n if arg_14 is None:\n arg_11.append(\"[Not Set]\")\n elif not arg_14 and isinstance(arg_14, string_types):\n arg_11.append(\"[Empty]\")\n else:\n arg_11.append(arg_14)\n arg_9.writerow(arg_11)\n return arg_8\n\n arg_15.short_description = arg_0\n return arg_15"} +{"_id": "doc_7155", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Return the action method to clear the catalog ID for a EnterpriseCustomer.\n \"\"\"\n arg_0 = arg_0 or _(\"Unlink selected objects from existing course catalogs\")\n\n def arg_4(arg_1, arg_2, arg_3): # pylint: disable=unused-argument\n \"\"\"\n Clear the catalog ID for a selected EnterpriseCustomer.\n \"\"\"\n arg_3.update(catalog=None)\n arg_4.short_description = arg_0\n return arg_4"} +{"_id": "doc_7156", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get information about maps of the robots.\n\n :return:\n \"\"\"\n for arg_1 in arg_0.robots:\n arg_2 = (\n requests.get(urljoin(arg_0.ENDPOINT, 'users/me/robots/{}/maps'.format(arg_1.serial)),\n headers=arg_0._headers))\n arg_2.raise_for_status()\n arg_0._maps.update({arg_1.serial: arg_2.json()})"} +{"_id": "doc_7157", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get information about robots connected to account.\n\n :return:\n \"\"\"\n arg_1 = requests.get(urljoin(arg_0.ENDPOINT, 'dashboard'),\n headers=arg_0._headers)\n arg_1.raise_for_status()\n\n for arg_2 in arg_1.json()['robots']:\n if arg_2['mac_address'] is None:\n continue # Ignore robots without mac-address\n\n try:\n arg_0._robots.add(Robot(name=arg_2['name'],\n serial=arg_2['serial'],\n secret=arg_2['secret_key'],\n traits=arg_2['traits'],\n endpoint=arg_2['nucleo_url']))\n except requests.exceptions.HTTPError:\n print (\"Your '{}' robot is offline.\".format(arg_2['name']))\n continue\n\n arg_0.refresh_persistent_maps()\n for arg_2 in arg_0._robots:\n arg_2.has_persistent_maps = arg_2.serial in arg_0._persistent_maps"} +{"_id": "doc_7158", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get information about persistent maps of the robots.\n\n :return:\n \"\"\"\n for arg_1 in arg_0._robots:\n arg_2 = (requests.get(urljoin(\n arg_0.ENDPOINT,\n 'users/me/robots/{}/persistent_maps'.format(arg_1.serial)),\n headers=arg_0._headers))\n arg_2.raise_for_status()\n arg_0._persistent_maps.update({arg_1.serial: arg_2.json()})"} +{"_id": "doc_7159", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculates the distance between two points on earth.\n \"\"\"\n arg_2, arg_3 = arg_0\n arg_4, arg_5 = arg_1\n arg_6 = arg_5 - arg_3\n arg_7 = arg_4 - arg_2\n arg_8 = 6371 # radius of the earth in kilometers\n arg_9 = np.sin(arg_7 / 2)**2 + np.cos(arg_2) * np.cos(arg_4) * (np.sin(arg_6 / 2))**2\n arg_10 = 2 * np.pi * arg_8 * np.arctan2(np.sqrt(arg_9), np.sqrt(1 - arg_9)) / 180\n return arg_10"} +{"_id": "doc_7160", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Takes a graph and returns an adjacency list.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :any:`networkx.Graph`, etc.\n Any object that networkx can turn into a\n :any:`DiGraph`.\n return_dict_of_dict : bool (optional, default: ``True``)\n Specifies whether this function will return a dict of dicts\n or a dict of lists.\n\n Returns\n -------\n adj : dict\n An adjacency representation of graph as a dictionary of\n dictionaries, where a key is the vertex index for a vertex\n ``v`` and the values are :class:`dicts<.dict>` with keys for\n the vertex index and values as edge properties.\n\n Examples\n --------\n >>> import queueing_tool as qt\n >>> import networkx as nx\n >>> adj = {0: [1, 2], 1: [0], 2: [0, 3], 3: [2]}\n >>> g = nx.DiGraph(adj)\n >>> qt.Func(g, return_dict_of_dict=True)\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {1: {}, 2: {}},\n 1: {0: {}},\n 2: {0: {}, 3: {}},\n 3: {2: {}}}\n >>> qt.Func(g, return_dict_of_dict=False)\n {0: [1, 2], 1: [0], 2: [0, 3], 3: [2]}\n \"\"\"\n if not isinstance(arg_0, nx.DiGraph):\n arg_0 = QueueNetworkDiGraph(arg_0)\n\n arg_2 = nx.to_dict_of_dicts(arg_0)\n if arg_1:\n return arg_2\n else:\n return {arg_3: list(arg_4.keys()) for arg_3, arg_4 in arg_2.items()}"} +{"_id": "doc_7161", "title": "", "text": "def Func(arg_0):\n \"\"\"Takes a dictionary based representation of an adjacency list\n and returns a dict of dicts based representation.\n \"\"\"\n arg_1 = arg_0.popitem()\n arg_0[arg_1[0]] = arg_1[1]\n if not isinstance(arg_1[1], dict):\n arg_2 = {}\n for arg_3, arg_4 in arg_0.items():\n arg_2[arg_3] = {v: {} for v in arg_4}\n\n arg_0 = arg_2\n return arg_0"} +{"_id": "doc_7162", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=1, **arg_3):\n \"\"\"Takes an adjacency list, dict, or matrix and returns a graph.\n\n The purpose of this function is take an adjacency list (or matrix)\n and return a :class:`.QueueNetworkDiGraph` that can be used with a\n :class:`.QueueNetwork` instance. The Graph returned has the\n ``edge_type`` edge property set for each edge. Note that the graph may\n be altered.\n\n Parameters\n ----------\n adjacency : dict or :class:`~numpy.ndarray`\n An adjacency list as either a dict, or an adjacency matrix.\n adjust : int ``{1, 2}`` (optional, default: 1)\n Specifies what to do when the graph has terminal vertices\n (nodes with no out-edges). Note that if ``adjust`` is not 2\n then it is assumed to be 1. There are two choices:\n\n * ``adjust = 1``: A loop is added to each terminal node in the\n graph, and their ``edge_type`` of that loop is set to 0.\n * ``adjust = 2``: All edges leading to terminal nodes have\n their ``edge_type`` set to 0.\n\n **kwargs :\n Unused.\n\n Returns\n -------\n out : :any:`networkx.DiGraph`\n A directed graph with the ``edge_type`` edge property.\n\n Raises\n ------\n TypeError\n Is raised if ``adjacency`` is not a dict or\n :class:`~numpy.ndarray`.\n\n Examples\n --------\n If terminal nodes are such that all in-edges have edge type ``0``\n then nothing is changed. However, if a node is a terminal node then\n a loop is added with edge type 0.\n\n >>> import queueing_tool as qt\n >>> adj = {\n ... 0: {1: {}},\n ... 1: {2: {},\n ... 3: {}},\n ... 3: {0: {}}}\n >>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}\n >>> # A loop will be added to vertex 2\n >>> g = qt.Func(adj, edge_type=eTy)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}), \n (2, {2: {'edge_type': 0}}),\n (3, {0: {'edge_type': 1}})]\n\n You can use a dict of lists to represent the adjacency list.\n\n >>> adj = {0 : [1], 1: [2, 3], 3: [0]}\n >>> g = qt.Func(adj, edge_type=eTy)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),\n (2, {2: {'edge_type': 0}}),\n (3, {0: {'edge_type': 1}})]\n\n Alternatively, you could have this function adjust the edges that\n lead to terminal vertices by changing their edge type to 0:\n\n >>> # The graph is unaltered\n >>> g = qt.Func(adj, edge_type=eTy, adjust=2)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),\n (2, {}),\n (3, {0: {'edge_type': 1}})]\n \"\"\"\n\n if isinstance(arg_0, np.ndarray):\n arg_0 = _matrix2dict(arg_0)\n elif isinstance(arg_0, dict):\n arg_0 = _dict2dict(arg_0)\n else:\n arg_4 = (\"If the adjacency parameter is supplied it must be a \"\n \"dict, or a numpy.ndarray.\")\n raise TypeError(arg_4)\n\n if arg_1 is None:\n arg_1 = {}\n else:\n if isinstance(arg_1, np.ndarray):\n arg_1 = _matrix2dict(arg_1, etype=True)\n elif isinstance(arg_1, dict):\n arg_1 = _dict2dict(arg_1)\n\n for arg_5, arg_6 in arg_1.items():\n for arg_7, arg_8 in arg_6.items():\n arg_0[arg_5][arg_7]['edge_type'] = arg_8\n\n arg_9 = nx.from_dict_of_dicts(arg_0, create_using=nx.DiGraph())\n arg_0 = nx.to_dict_of_dicts(arg_9)\n arg_0 = _adjacency_adjust(arg_0, arg_2, True)\n\n return nx.from_dict_of_dicts(arg_0, create_using=nx.DiGraph())"} +{"_id": "doc_7163", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns all edges with the specified edge type.\n\n Parameters\n ----------\n edge_type : int\n An integer specifying what type of edges to return.\n\n Returns\n -------\n out : list of 2-tuples\n A list of 2-tuples representing the edges in the graph\n with the specified edge type.\n\n Examples\n --------\n Lets get type 2 edges from the following graph\n\n >>> import queueing_tool as qt\n >>> adjacency = {\n ... 0: {1: {'edge_type': 2}},\n ... 1: {2: {'edge_type': 1},\n ... 3: {'edge_type': 4}},\n ... 2: {0: {'edge_type': 2}},\n ... 3: {3: {'edge_type': 0}}\n ... }\n >>> G = qt.QueueNetworkDiGraph(adjacency)\n >>> ans = G.Func(2)\n >>> ans.sort()\n >>> ans\n [(0, 1), (2, 0)]\n \"\"\"\n arg_2 = []\n for arg_3 in arg_0.edges():\n if arg_0.adj[arg_3[0]][arg_3[1]].get('edge_type') == arg_1:\n arg_2.append(arg_3)\n return arg_2"} +{"_id": "doc_7164", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Returns the arguments used when plotting.\n\n Takes any keyword arguments for\n :class:`~matplotlib.collections.LineCollection` and\n :meth:`~matplotlib.axes.Axes.scatter` and returns two\n dictionaries with all the defaults set.\n\n Parameters\n ----------\n line_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`.\n scatter_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n\n Returns\n -------\n tuple\n A 2-tuple of dicts. The first entry is the keyword\n arguments for\n :class:`~matplotlib.collections.LineCollection` and the\n second is the keyword args for\n :meth:`~matplotlib.axes.Axes.scatter`.\n\n Notes\n -----\n If a specific keyword argument is not passed then the defaults\n are used.\n \"\"\"\n if arg_3 is not None:\n arg_0.set_pos(arg_3)\n elif arg_0.pos is None:\n arg_0.set_pos()\n\n arg_4 = [0 for arg_5 in arg_0.edges()]\n for arg_5 in arg_0.edges():\n arg_6 = arg_0.edge_index[arg_5]\n arg_4[arg_6] = (arg_0.pos[arg_5[0]], arg_0.pos[arg_5[1]])\n\n arg_7 = {\n 'segments': arg_4,\n 'colors': arg_0.edge_color,\n 'linewidths': (1,),\n 'antialiaseds': (1,),\n 'linestyle': 'solid',\n 'transOffset': None,\n 'cmap': plt.cm.ocean_r,\n 'pickradius': 5,\n 'zorder': 0,\n 'facecolors': None,\n 'norm': None,\n 'offsets': None,\n 'offset_position': 'screen',\n 'hatch': None,\n }\n arg_8 = {\n 'x': arg_0.pos[:, 0],\n 'y': arg_0.pos[:, 1],\n 's': 50,\n 'c': arg_0.vertex_fill_color,\n 'alpha': None,\n 'norm': None,\n 'vmin': None,\n 'vmax': None,\n 'marker': 'o',\n 'zorder': 2,\n 'cmap': plt.cm.ocean_r,\n 'linewidths': 1,\n 'edgecolors': arg_0.vertex_color,\n 'facecolors': None,\n 'antialiaseds': None,\n 'offset_position': 'screen',\n 'hatch': None,\n }\n\n arg_1 = {} if arg_1 is None else arg_1\n arg_2 = {} if arg_2 is None else arg_2\n\n for arg_9, arg_10 in arg_1.items():\n if arg_9 in arg_7:\n arg_7[arg_9] = arg_10\n\n for arg_9, arg_10 in arg_2.items():\n if arg_9 in arg_8:\n arg_8[arg_9] = arg_10\n\n return arg_7, arg_8"} +{"_id": "doc_7165", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"A function that returns the arrival time of the next arrival for\n a Poisson random measure.\n\n Parameters\n ----------\n t : float\n The start time from which to simulate the next arrival time.\n rate : function\n The *intensity function* for the measure, where ``rate(t)`` is\n the expected arrival rate at time ``t``.\n rate_max : float\n The maximum value of the ``rate`` function.\n\n Returns\n -------\n out : float\n The time of the next arrival.\n\n Notes\n -----\n This function returns the time of the next arrival, where the\n distribution of the number of arrivals between times :math:`t` and\n :math:`t+s` is Poisson with mean\n\n .. math::\n\n \\int_{t}^{t+s} dx \\, r(x)\n\n where :math:`r(t)` is the supplied ``rate`` function. This function\n can only simulate processes that have bounded intensity functions.\n See chapter 6 of [3]_ for more on the mathematics behind Poisson\n random measures; the book's publisher, Springer, has that chapter\n available online for free at (`pdf`_\\).\n\n A Poisson random measure is sometimes called a non-homogeneous\n Poisson process. A Poisson process is a special type of Poisson\n random measure.\n\n .. _pdf: http://www.springer.com/cda/content/document/\\\n cda_downloaddocument/9780387878584-c1.pdf\n\n Examples\n --------\n Suppose you wanted to model the arrival process as a Poisson\n random measure with rate function :math:`r(t) = 2 + \\sin( 2\\pi t)`.\n Then you could do so as follows:\n\n >>> import queueing_tool as qt\n >>> import numpy as np\n >>> np.random.seed(10)\n >>> rate = lambda t: 2 + np.sin(2 * np.pi * t)\n >>> arr_f = lambda t: qt.Func(t, rate, 3)\n >>> arr_f(1) # doctest: +ELLIPSIS\n 1.491...\n\n References\n ----------\n .. [3] Cinlar, Erhan. *Probability and stochastics*. Graduate Texts in\\\n Mathematics. Vol. 261. Springer, New York, 2011.\\\n :doi:`10.1007/978-0-387-87859-1`\n \"\"\"\n arg_3 = 1.0 / arg_2\n arg_0 = arg_0 + exponential(arg_3)\n while arg_2 * uniform() > arg_1(arg_0):\n arg_0 = arg_0 + exponential(arg_3)\n return arg_0"} +{"_id": "doc_7166", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"Returns a color for the queue.\n\n Parameters\n ----------\n which : int (optional, default: ``0``)\n Specifies the type of color to return.\n\n Returns\n -------\n color : list\n Returns a RGBA color that is represented as a list with 4\n entries where each entry can be any floating point number\n between 0 and 1.\n\n * If ``which`` is 1 then it returns the color of the edge\n as if it were a self loop. This is specified in\n ``colors['edge_loop_color']``.\n * If ``which`` is 2 then it returns the color of the vertex\n pen color (defined as color/vertex_color in\n :meth:`.QueueNetworkDiGraph.graph_draw`). This is\n specified in ``colors['vertex_color']``.\n * If ``which`` is anything else, then it returns the a\n shade of the edge that is proportional to the number of\n agents in the system -- which includes those being\n servered and those waiting to be served. More agents\n correspond to darker edge colors. Uses\n ``colors['vertex_fill_color']`` if the queue sits on a\n loop, and ``colors['edge_color']`` otherwise.\n \"\"\"\n if arg_1 == 1:\n arg_2 = arg_0.colors['edge_loop_color']\n\n elif arg_1 == 2:\n arg_2 = arg_0.colors['vertex_color']\n\n else:\n arg_3 = arg_0.coloring_sensitivity * arg_0.num_servers + 1.\n arg_4 = 1. - min(arg_0.num_system / arg_3, 1)\n\n if arg_0.edge[0] == arg_0.edge[1]:\n arg_2 = [i * arg_4 for i in arg_0.colors['vertex_fill_color']]\n arg_2[3] = 1.0\n else:\n arg_2 = [i * arg_4 for i in arg_0.colors['edge_color']]\n arg_2[3] = 1 / 2.\n\n return arg_2"} +{"_id": "doc_7167", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns an integer representing whether the next event is\n an arrival, a departure, or nothing.\n\n Returns\n -------\n out : int\n An integer representing whether the next event is an\n arrival or a departure: ``1`` corresponds to an arrival,\n ``2`` corresponds to a departure, and ``0`` corresponds to\n nothing scheduled to occur.\n \"\"\"\n if arg_0._departures[0]._time < arg_0._arrivals[0]._time:\n return 2\n elif arg_0._arrivals[0]._time < infty:\n return 1\n else:\n return 0"} +{"_id": "doc_7168", "title": "", "text": "def Func(arg_0):\n \"\"\"Resets the queue to its initial state.\n\n The attributes ``t``, ``num_events``, ``num_agents`` are set to\n zero, :meth:`.reset_colors` is called, and the\n :meth:`.QueueServer.Func` method is called for each queue in\n the network.\n\n Notes\n -----\n ``QueueNetwork`` must be re-initialized before any simulations\n can run.\n \"\"\"\n arg_0._t = 0\n arg_0.num_events = 0\n arg_0.num_agents = np.zeros(arg_0.nE, int)\n arg_0._fancy_heap = PriorityQueue()\n arg_0._prev_edge = None\n arg_0._initialized = False\n arg_0.reset_colors()\n for arg_7 in arg_0.edge2queue:\n arg_7.Func()"} +{"_id": "doc_7169", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Clears data from all queues.\n\n If none of the parameters are given then every queue's data is\n cleared.\n\n Parameters\n ----------\n queues : int or an iterable of int (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` whose data will\n be cleared.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues' data to clear. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will have their data cleared.\n \"\"\"\n arg_1 = _get_queues(arg_0.g, arg_1, arg_2, arg_3)\n\n for arg_4 in arg_1:\n arg_0.edge2queue[arg_4].data = {}"} +{"_id": "doc_7170", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a deep Func of itself.\"\"\"\n arg_1 = QueueNetwork(None)\n arg_1.g = arg_0.g.Func()\n arg_1.max_agents = Func.deepFunc(arg_0.max_agents)\n arg_1.nV = Func.deepFunc(arg_0.nV)\n arg_1.nE = Func.deepFunc(arg_0.nE)\n arg_1.num_agents = Func.deepFunc(arg_0.num_agents)\n arg_1.num_events = Func.deepFunc(arg_0.num_events)\n arg_1._t = Func.deepFunc(arg_0._t)\n arg_1._initialized = Func.deepFunc(arg_0._initialized)\n arg_1._prev_edge = Func.deepFunc(arg_0._prev_edge)\n arg_1._blocking = Func.deepFunc(arg_0._blocking)\n arg_1.colors = Func.deepFunc(arg_0.colors)\n arg_1.out_edges = Func.deepFunc(arg_0.out_edges)\n arg_1.in_edges = Func.deepFunc(arg_0.in_edges)\n arg_1.edge2queue = Func.deepFunc(arg_0.edge2queue)\n arg_1._route_probs = Func.deepFunc(arg_0._route_probs)\n\n if arg_1._initialized:\n arg_17 = [q._key() for q in arg_1.edge2queue if q._time < np.infty]\n arg_1._fancy_heap = PriorityQueue(arg_17, arg_1.nE)\n\n return arg_1"} +{"_id": "doc_7171", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=None,\n arg_3=None, **arg_4):\n \"\"\"Draws the network. The coloring of the network corresponds\n to the number of agents at each queue.\n\n Parameters\n ----------\n update_colors : ``bool`` (optional, default: ``True``).\n Specifies whether all the colors are updated.\n line_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`\n scatter_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n bgcolor : list (optional, keyword only)\n A list with 4 floats representing a RGBA color. The\n default is defined in ``self.colors['bgcolor']``.\n figsize : tuple (optional, keyword only, default: ``(7, 7)``)\n The width and height of the canvas in inches.\n **kwargs\n Any parameters to pass to\n :meth:`.QueueNetworkDiGraph.Func_graph`.\n\n Notes\n -----\n This method relies heavily on\n :meth:`.QueueNetworkDiGraph.Func_graph`. Also, there is a\n parameter that sets the background color of the canvas, which\n is the ``bgcolor`` parameter.\n\n Examples\n --------\n To Func the current state of the network, call:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> net.initialize(100)\n >>> net.simulate(1200)\n >>> net.Func() # doctest: +SKIP\n\n If you specify a file name and location, the Funcing will be\n saved to disk. For example, to save the Funcing to the current\n working directory do the following:\n\n >>> net.Func(fname=\"state.png\", scatter_kwargs={'s': 40}) # doctest: +SKIP\n\n .. figure:: current_state1.png\n :align: center\n\n The shade of each edge depicts how many agents are located at\n the corresponding queue. The shade of each vertex is determined\n by the total number of inbound agents. Although loops are not\n visible by default, the vertex that corresponds to a loop shows\n how many agents are in that loop.\n\n There are several additional parameters that can be passed --\n all :meth:`.QueueNetworkDiGraph.Func_graph` parameters are\n valid. For example, to show the edges as dashed lines do the\n following.\n\n >>> net.Func(line_kwargs={'linestyle': 'dashed'}) # doctest: +SKIP\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError(\"matplotlib is necessary to Func the network.\")\n\n if arg_1:\n arg_0._update_all_colors()\n\n if 'bgcolor' not in arg_4:\n arg_4['bgcolor'] = arg_0.colors['bgcolor']\n\n arg_0.g.Func_graph(arg_2=arg_2,\n arg_3=arg_3, **arg_4)"} +{"_id": "doc_7172", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=False):\n \"\"\"Gets data from queues and organizes it by agent.\n\n If none of the parameters are given then data from every\n :class:`.QueueServer` is retrieved.\n\n Parameters\n ----------\n queues : int or *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` whose data will\n be retrieved.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues to retrieve agent data\n from. Must be either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types to retrieve agent data from.\n return_header : bool (optonal, default: False)\n Determines whether the column headers are returned.\n\n Returns\n -------\n dict\n Returns a ``dict`` where the keys are the\n :class:`Agent's<.Agent>` ``agent_id`` and the values are\n :class:`ndarrays<~numpy.ndarray>` for that\n :class:`Agent's<.Agent>` data. The columns of this array\n are as follows:\n\n * First: The arrival time of an agent.\n * Second: The service start time of an agent.\n * Third: The departure time of an agent.\n * Fourth: The length of the queue upon the agents arrival.\n * Fifth: The total number of :class:`Agents<.Agent>` in the\n :class:`.QueueServer`.\n * Sixth: the :class:`QueueServer's<.QueueServer>` id\n (its edge index).\n\n headers : str (optional)\n A comma seperated string of the column headers. Returns\n ``'arrival,service,departure,num_queued,num_total,q_id'``\n \"\"\"\n arg_1 = _get_queues(arg_0.g, arg_1, arg_2, arg_3)\n\n arg_5 = {}\n for arg_6 in arg_1:\n for arg_7, arg_8 in arg_0.edge2queue[arg_6].data.items():\n arg_9 = np.zeros((len(arg_8), 6))\n arg_9[:, :5] = np.array(arg_8)\n arg_9[:, 5] = arg_6\n if arg_7 in arg_5:\n arg_5[arg_7] = np.vstack((arg_5[arg_7], arg_9))\n else:\n arg_5[arg_7] = arg_9\n\n arg_10 = [\n ('a', float),\n ('s', float),\n ('d', float),\n ('q', float),\n ('n', float),\n ('id', float)\n ]\n for arg_7, arg_8 in arg_5.items():\n arg_9 = np.array([tuple(d) for d in arg_8.tolist()], dtype=arg_10)\n arg_9 = np.sort(arg_9, order='a')\n arg_5[arg_7] = np.array([tuple(d) for d in arg_9])\n\n if arg_4:\n return arg_5, 'arrival,service,departure,num_queued,num_total,q_id'\n\n return arg_5"} +{"_id": "doc_7173", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"Prepares the ``QueueNetwork`` for simulation.\n\n Each :class:`.QueueServer` in the network starts inactive,\n which means they do not accept arrivals from outside the\n network, and they have no agents in their system. This method\n sets queues to active, which then allows agents to arrive from\n outside the network.\n\n Parameters\n ----------\n nActive : int (optional, default: ``1``)\n The number of queues to set as active. The queues are\n selected randomly.\n queues : int *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` to make active by.\n edges : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues to make active. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will be set active.\n\n Raises\n ------\n ValueError\n If ``queues``, ``egdes``, and ``edge_type`` are all ``None``\n and ``nActive`` is an integer less than 1\n :exc:`~ValueError` is raised.\n TypeError\n If ``queues``, ``egdes``, and ``edge_type`` are all ``None``\n and ``nActive`` is not an integer then a :exc:`~TypeError`\n is raised.\n QueueingToolError\n Raised if all the queues specified are\n :class:`NullQueues<.NullQueue>`.\n\n Notes\n -----\n :class:`NullQueues<.NullQueue>` cannot be activated, and are\n sifted out if they are specified. More specifically, every edge\n with edge type 0 is sifted out.\n \"\"\"\n if arg_2 is None and arg_3 is None and arg_4 is None:\n if arg_1 >= 1 and isinstance(arg_1, numbers.Integral):\n arg_5 = [q.edge[2] for q in arg_0.edge2queue if q.edge[3] != 0]\n arg_6 = min(arg_1, len(arg_5))\n arg_2 = np.random.choice(arg_5, size=arg_6, replace=False)\n elif not isinstance(arg_1, numbers.Integral):\n arg_7 = \"If queues is None, then nActive must be an integer.\"\n raise TypeError(arg_7)\n else:\n arg_7 = (\"If queues is None, then nActive must be a \"\n \"positive int.\")\n raise ValueError(arg_7)\n else:\n arg_2 = _get_queues(arg_0.g, arg_2, arg_3, arg_4)\n\n arg_2 = [e for e in arg_2 if arg_0.edge2queue[e].edge[3] != 0]\n\n if len(arg_2) == 0:\n raise QueueingToolError(\"There were no queues to Func.\")\n\n if len(arg_2) > arg_0.max_agents:\n arg_2 = arg_2[:arg_0.max_agents]\n\n for arg_8 in arg_2:\n arg_0.edge2queue[arg_8].set_active()\n arg_0.num_agents[arg_8] = arg_0.edge2queue[arg_8]._num_total\n\n arg_10 = [q._key() for q in arg_0.edge2queue if q._time < np.infty]\n arg_0._fancy_heap = PriorityQueue(arg_10, arg_0.nE)\n arg_0._Funcd = True"} +{"_id": "doc_7174", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns whether the next event is an arrival or a departure\n and the queue the event is accuring at.\n\n Returns\n -------\n des : str\n Indicates whether the next event is an arrival, a\n departure, or nothing; returns ``'Arrival'``,\n ``'Departure'``, or ``'Nothing'``.\n edge : int or ``None``\n The edge index of the edge that this event will occur at.\n If there are no events then ``None`` is returned.\n \"\"\"\n if arg_0._fancy_heap.size == 0:\n arg_1 = 'Nothing'\n arg_2 = None\n else:\n arg_3 = [arg_5._key() for arg_5 in arg_0.edge2queue]\n arg_3.sort()\n arg_4 = arg_3[0][1]\n arg_5 = arg_0.edge2queue[arg_4]\n\n arg_1 = 'Arrival' if arg_5.Func() == 1 else 'Departure'\n arg_2 = arg_5.edge[2]\n return arg_1, arg_2"} +{"_id": "doc_7175", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Change the routing transitions probabilities for the\n network.\n\n Parameters\n ----------\n mat : dict or :class:`~numpy.ndarray`\n A transition routing matrix or transition dictionary. If\n passed a dictionary, the keys are source vertex indices and\n the values are dictionaries with target vertex indicies\n as the keys and the probabilities of routing from the\n source to the target as the values.\n\n Raises\n ------\n ValueError\n A :exc:`.ValueError` is raised if: the keys in the dict\n don't match with a vertex index in the graph; or if the\n :class:`~numpy.ndarray` is passed with the wrong shape,\n must be (``num_vertices``, ``num_vertices``); or the values\n passed are not probabilities (for each vertex they are\n positive and sum to 1);\n TypeError\n A :exc:`.TypeError` is raised if mat is not a dict or\n :class:`~numpy.ndarray`.\n\n Examples\n --------\n The default transition matrix is every out edge being equally\n likely:\n\n >>> import queueing_tool as qt\n >>> adjacency = {\n ... 0: [2],\n ... 1: [2, 3],\n ... 2: [0, 1, 2, 4],\n ... 3: [1],\n ... 4: [2],\n ... }\n >>> g = qt.adjacency2graph(adjacency)\n >>> net = qt.QueueNetwork(g)\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.5, 3: 0.5},\n 2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n If you want to change only one vertex's transition\n probabilities, you can do so with the following:\n\n >>> net.Func({1 : {2: 0.75, 3: 0.25}})\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.75, 3: 0.25},\n 2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n One can generate a transition matrix using\n :func:`.generate_transition_matrix`. You can change all\n transition probabilities with an :class:`~numpy.ndarray`:\n\n >>> mat = qt.generate_transition_matrix(g, seed=10)\n >>> net.Func(mat)\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.962..., 3: 0.037...},\n 2: {0: 0.301..., 1: 0.353..., 2: 0.235..., 4: 0.108...},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n See Also\n --------\n :meth:`.transitions` : Return the current routing\n probabilities.\n :func:`.generate_transition_matrix` : Generate a random routing\n matrix.\n \"\"\"\n if isinstance(arg_1, dict):\n for arg_2, arg_3 in arg_1.items():\n arg_4 = list(arg_3.values())\n\n if arg_2 not in arg_0.g.node:\n arg_5 = \"One of the keys don't correspond to a vertex.\"\n raise ValueError(arg_5)\n elif len(arg_0.out_edges[arg_2]) > 0 and not np.isclose(sum(arg_4), 1):\n arg_5 = \"Sum of transition probabilities at a vertex was not 1.\"\n raise ValueError(arg_5)\n elif (np.array(arg_4) < 0).any():\n arg_5 = \"Some transition probabilities were negative.\"\n raise ValueError(arg_5)\n\n for arg_6, arg_7 in enumerate(sorted(arg_0.g.out_edges(arg_2))):\n arg_0._route_probs[arg_2][arg_6] = arg_3.get(arg_7[1], 0)\n\n elif isinstance(arg_1, np.ndarray):\n arg_9 = np.array([arg_0.g.out_degree(v) > 0 for v in arg_0.g.nodes()])\n if arg_1.shape != (arg_0.nV, arg_0.nV):\n arg_5 = (\"Matrix is the wrong shape, should \"\n \"be {0} x {1}.\").format(arg_0.nV, arg_0.nV)\n raise ValueError(arg_5)\n elif not np.allclose(np.sum(arg_1[arg_9, :], axis=1), 1):\n arg_5 = \"Sum of transition probabilities at a vertex was not 1.\"\n raise ValueError(arg_5)\n elif (arg_1 < 0).any():\n raise ValueError(\"Some transition probabilities were negative.\")\n\n for arg_6 in range(arg_0.nV):\n for arg_10, arg_7 in enumerate(sorted(arg_0.g.out_edges(arg_6))):\n arg_0._route_probs[arg_6][arg_10] = arg_1[arg_6, arg_7[1]]\n else:\n raise TypeError(\"mat must be a numpy array or a dict.\")"} +{"_id": "doc_7176", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Draws the network, highlighting queues of a certain type.\n\n The colored vertices represent self loops of type ``edge_type``.\n Dark edges represent queues of type ``edge_type``.\n\n Parameters\n ----------\n edge_type : int\n The type of vertices and edges to be shown.\n **kwargs\n Any additional parameters to pass to :meth:`.draw`, and\n :meth:`.QueueNetworkDiGraph.draw_graph`\n\n Notes\n -----\n The colors are defined by the class attribute ``colors``. The\n relevant colors are ``vertex_active``, ``vertex_inactive``,\n ``vertex_highlight``, ``edge_active``, and ``edge_inactive``.\n\n Examples\n --------\n The following code highlights all edges with edge type ``2``.\n If the edge is a loop then the vertex is highlighted as well.\n In this case all edges with edge type ``2`` happen to be loops.\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> fname = 'edge_type_2.png'\n >>> net.Func(2, fname=fname) # doctest: +SKIP\n\n .. figure:: edge_type_2-1.png\n :align: center\n \"\"\"\n for arg_3 in arg_0.g.nodes():\n arg_4 = (arg_3, arg_3)\n if arg_0.g.is_edge(arg_4) and arg_0.g.ep(arg_4, 'edge_type') == arg_1:\n arg_5 = arg_0.g.edge_index[arg_4]\n arg_0.g.set_vp(arg_3, 'vertex_fill_color', arg_0.colors['vertex_highlight'])\n arg_0.g.set_vp(arg_3, 'vertex_color', arg_0.edge2queue[arg_5].colors['vertex_color'])\n else:\n arg_0.g.set_vp(arg_3, 'vertex_fill_color', arg_0.colors['vertex_inactive'])\n arg_0.g.set_vp(arg_3, 'vertex_color', [0, 0, 0, 0.9])\n\n for arg_4 in arg_0.g.edges():\n if arg_0.g.ep(arg_4, 'edge_type') == arg_1:\n arg_0.g.set_ep(arg_4, 'edge_color', arg_0.colors['edge_active'])\n else:\n arg_0.g.set_ep(arg_4, 'edge_color', arg_0.colors['edge_inactive'])\n\n arg_0.draw(update_colors=False, **arg_2)\n arg_0._update_all_colors()"} +{"_id": "doc_7177", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=None):\n \"\"\"Simulates the network forward.\n\n Simulates either a specific number of events or for a specified\n amount of simulation time.\n\n Parameters\n ----------\n n : int (optional, default: 1)\n The number of events to Func. If ``t`` is not given\n then this parameter is used.\n t : float (optional)\n The amount of simulation time to Func forward. If\n given, ``t`` is used instead of ``n``.\n\n Raises\n ------\n QueueingToolError\n Will raise a :exc:`.QueueingToolError` if the\n ``QueueNetwork`` has not been initialized. Call\n :meth:`.initialize` before calling this method.\n\n Examples\n --------\n Let ``net`` denote your instance of a ``QueueNetwork``. Before\n you Func, you need to initialize the network, which allows\n arrivals from outside the network. To initialize with 2 (random\n chosen) edges accepting arrivals run:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=50)\n >>> net = qt.QueueNetwork(g, seed=50)\n >>> net.initialize(2)\n\n To Func the network 50000 events run:\n\n >>> net.num_events\n 0\n >>> net.Func(50000)\n >>> net.num_events\n 50000\n\n To Func the network for at least 75 simulation time units\n run:\n\n >>> t0 = net.current_time\n >>> net.Func(t=75)\n >>> t1 = net.current_time\n >>> t1 - t0 # doctest: +ELLIPSIS\n 75...\n \"\"\"\n if not arg_0._initialized:\n arg_3 = (\"Network has not been initialized. \"\n \"Call '.initialize()' first.\")\n raise QueueingToolError(arg_3)\n if arg_2 is None:\n for arg_4 in range(arg_1):\n arg_0._Func_next_event(slow=False)\n else:\n arg_5 = arg_0._t\n while arg_0._t < arg_5 + arg_2:\n arg_0._Func_next_event(slow=False)"} +{"_id": "doc_7178", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Tells the queues to collect data on agents' arrival, service\n start, and departure times.\n\n If none of the parameters are given then every\n :class:`.QueueServer` will start collecting data.\n\n Parameters\n ----------\n queues : :any:`int`, *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` that will start\n collecting data.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues will collect data. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will be set active.\n \"\"\"\n arg_1 = _get_queues(arg_0.g, arg_1, arg_2, arg_3)\n\n for arg_4 in arg_1:\n arg_0.edge2queue[arg_4].collect_data = True"} +{"_id": "doc_7179", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Tells the queues to stop collecting data on agents.\n\n If none of the parameters are given then every\n :class:`.QueueServer` will stop collecting data.\n\n Parameters\n ----------\n queues : int, *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` that will stop\n collecting data.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues will stop collecting data.\n Must be either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will stop collecting data.\n \"\"\"\n arg_1 = _get_queues(arg_0.g, arg_1, arg_2, arg_3)\n\n for arg_4 in arg_1:\n arg_0.edge2queue[arg_4].collect_data = False"} +{"_id": "doc_7180", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Returns the routing probabilities for each vertex in the\n graph.\n\n Parameters\n ----------\n return_matrix : bool (optional, the default is ``True``)\n Specifies whether an :class:`~numpy.ndarray` is returned.\n If ``False``, a dict is returned instead.\n\n Returns\n -------\n out : a dict or :class:`~numpy.ndarray`\n The transition probabilities for each vertex in the graph.\n If ``out`` is an :class:`~numpy.ndarray`, then\n ``out[v, u]`` returns the probability of a transition from\n vertex ``v`` to vertex ``u``. If ``out`` is a dict\n then ``out_edge[v][u]`` is the probability of moving from\n vertex ``v`` to the vertex ``u``.\n\n Examples\n --------\n Lets change the routing probabilities:\n\n >>> import queueing_tool as qt\n >>> import networkx as nx\n >>> g = nx.sedgewick_maze_graph()\n >>> net = qt.QueueNetwork(g)\n\n Below is an adjacency list for the graph ``g``.\n\n >>> ans = qt.graph2dict(g, False)\n >>> {k: sorted(v) for k, v in ans.items()}\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: [2, 5, 7],\n 1: [7],\n 2: [0, 6],\n 3: [4, 5],\n 4: [3, 5, 6, 7],\n 5: [0, 3, 4],\n 6: [2, 4],\n 7: [0, 1, 4]}\n\n The default transition matrix is every out edge being equally\n likely:\n\n >>> net.Func(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 0.333..., 5: 0.333..., 7: 0.333...},\n 1: {7: 1.0},\n 2: {0: 0.5, 6: 0.5},\n 3: {4: 0.5, 5: 0.5},\n 4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},\n 5: {0: 0.333..., 3: 0.333..., 4: 0.333...},\n 6: {2: 0.5, 4: 0.5},\n 7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}\n\n Now we will generate a random routing matrix:\n\n >>> mat = qt.generate_transition_matrix(g, seed=96)\n >>> net.set_Func(mat)\n >>> net.Func(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 0.112..., 5: 0.466..., 7: 0.420...},\n 1: {7: 1.0},\n 2: {0: 0.561..., 6: 0.438...},\n 3: {4: 0.545..., 5: 0.454...},\n 4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},\n 5: {0: 0.265..., 3: 0.460..., 4: 0.274...},\n 6: {2: 0.673..., 4: 0.326...},\n 7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}\n\n What this shows is the following: when an :class:`.Agent` is at\n vertex ``2`` they will transition to vertex ``0`` with\n probability ``0.561`` and route to vertex ``6`` probability\n ``0.438``, when at vertex ``6`` they will transition back to\n vertex ``2`` with probability ``0.673`` and route vertex ``4``\n probability ``0.326``, etc.\n \"\"\"\n if arg_1:\n arg_2 = np.zeros((arg_0.nV, arg_0.nV))\n for arg_3 in arg_0.g.nodes():\n arg_4 = [e[1] for e in sorted(arg_0.g.out_edges(arg_3))]\n arg_2[arg_3, arg_4] = arg_0._route_probs[arg_3]\n else:\n arg_2 = {\n k: {e[1]: p for e, p in zip(sorted(arg_0.g.out_edges(k)), value)}\n for k, value in enumerate(arg_0._route_probs)\n }\n\n return arg_2"} +{"_id": "doc_7181", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the number of elements in the set that ``s`` belongs to.\n\n Parameters\n ----------\n s : object\n An object\n\n Returns\n -------\n out : int\n The number of elements in the set that ``s`` belongs to.\n \"\"\"\n arg_2 = arg_0.find(arg_1)\n return arg_0._Func[arg_2]"} +{"_id": "doc_7182", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Locates the leader of the set to which the element ``s`` belongs.\n\n Parameters\n ----------\n s : object\n An object that the ``UnionFind`` contains.\n\n Returns\n -------\n object\n The leader of the set that contains ``s``.\n \"\"\"\n arg_2 = [arg_1]\n arg_3 = arg_0._leader[arg_1]\n\n while arg_3 != arg_0._leader[arg_3]:\n arg_2.append(arg_3)\n arg_3 = arg_0._leader[arg_3]\n\n if len(arg_2) > 1:\n for arg_4 in arg_2:\n arg_0._leader[arg_4] = arg_3\n\n return arg_3"} +{"_id": "doc_7183", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Merges the set that contains ``a`` with the set that contains ``b``.\n\n Parameters\n ----------\n a, b : objects\n Two objects whose sets are to be merged.\n \"\"\"\n arg_3, arg_4 = arg_0.find(arg_1), arg_0.find(arg_2)\n if arg_3 != arg_4:\n arg_5, arg_6 = arg_0._rank[arg_3], arg_0._rank[arg_4]\n if arg_6 > arg_5:\n arg_5, arg_6 = arg_6, arg_5\n arg_3, arg_4 = arg_4, arg_3\n if arg_5 == arg_6:\n arg_0._rank[arg_3] += 1\n\n arg_0._leader[arg_4] = arg_3\n arg_0._size[arg_3] += arg_0._size[arg_4]\n arg_0.nClusters -= 1"} +{"_id": "doc_7184", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Generates a random transition matrix for the graph ``g``.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.\n Any object that :any:`DiGraph` accepts.\n seed : int (optional)\n An integer used to initialize numpy's psuedo-random number\n generator.\n\n Returns\n -------\n mat : :class:`~numpy.ndarray`\n Returns a transition matrix where ``mat[i, j]`` is the\n probability of transitioning from vertex ``i`` to vertex ``j``.\n If there is no edge connecting vertex ``i`` to vertex ``j``\n then ``mat[i, j] = 0``.\n \"\"\"\n arg_0 = _test_graph(arg_0)\n\n if isinstance(arg_1, numbers.Integral):\n arg_8.random.seed(arg_1)\n\n arg_2 = arg_0.number_of_nodes()\n arg_3 = arg_8.zeros((arg_2, arg_2))\n\n for arg_4 in arg_0.nodes():\n arg_5 = [e[1] for e in sorted(arg_0.out_edges(arg_4))]\n arg_6 = len(arg_5)\n if arg_6 == 1:\n arg_3[arg_4, arg_5] = 1\n elif arg_6 > 1:\n arg_7 = arg_8.ceil(arg_8.random.rand(arg_6) * 100) / 100.\n if arg_8.isclose(arg_8.sum(arg_7), 0):\n arg_7[arg_8.random.randint(arg_6)] = 1\n\n arg_3[arg_4, arg_5] = arg_7 / arg_8.sum(arg_7)\n\n return arg_3"} +{"_id": "doc_7185", "title": "", "text": "def Func(arg_0=250, **arg_1):\n \"\"\"Creates a random graph where the vertex types are\n selected using their pagerank.\n\n Calls :func:`.minimal_random_graph` and then\n :func:`.set_types_rank` where the ``rank`` keyword argument\n is given by :func:`networkx.pagerank`.\n\n Parameters\n ----------\n num_vertices : int (optional, the default is 250)\n The number of vertices in the graph.\n **kwargs :\n Any parameters to send to :func:`.minimal_random_graph` or\n :func:`.set_types_rank`.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n A graph with a ``pos`` vertex property and the ``edge_type``\n edge property.\n\n Notes\n -----\n This function sets the edge types of a graph to be either 1, 2, or\n 3. It sets the vertices to type 2 by selecting the top\n ``pType2 * g.number_of_nodes()`` vertices given by the\n :func:`~networkx.pagerank` of the graph. A loop is added\n to all vertices identified this way (if one does not exist\n already). It then randomly sets vertices close to the type 2\n vertices as type 3, and adds loops to these vertices as well. These\n loops then have edge types that correspond to the vertices type.\n The rest of the edges are set to type 1.\n \"\"\"\n arg_2 = minimal_random_graph(arg_0, **arg_1)\n arg_3 = np.zeros(arg_0)\n for arg_4, arg_5 in nx.pagerank(arg_2).items():\n arg_3[arg_4] = arg_5\n arg_2 = set_types_rank(arg_2, rank=arg_3, **arg_1)\n return arg_2"} +{"_id": "doc_7186", "title": "", "text": "def Func(arg_0):\n \"\"\" Yield all of the documentation for trait definitions on a class object.\n \"\"\"\n # FIXME: gracefully handle errors here or in the caller?\n arg_1 = inspect.getsource(arg_0)\n arg_2 = CommentBlocker()\n arg_2.process_file(StringIO(arg_1))\n arg_3 = compiler.parse(arg_1)\n arg_4 = arg_3.node.nodes[0]\n for arg_5 in arg_4.code.nodes:\n # FIXME: handle other kinds of assignments?\n if isinstance(arg_5, compiler.ast.Assign):\n arg_6 = arg_5.nodes[0].name\n arg_7 = unparse(arg_5.expr).strip()\n arg_8 = strip_comment_marker(arg_2.search_for_comment(arg_5.lineno, default=''))\n yield arg_6, arg_7, arg_8"} +{"_id": "doc_7187", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Add lines to the block.\n \"\"\"\n if arg_1.strip():\n # Only Func if not entirely whitespace.\n arg_0.start_lineno = min(arg_0.start_lineno, arg_2[0])\n arg_0.end_lineno = max(arg_0.end_lineno, arg_3[0])"} +{"_id": "doc_7188", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" We are transitioning from a noncomment to a comment.\n \"\"\"\n arg_3 = NonComment(arg_1, arg_2)\n arg_0.blocks.append(arg_3)\n arg_0.current_block = arg_3"} +{"_id": "doc_7189", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Possibly add a new comment.\n\n Only adds a new comment if this comment is the only thing on the line.\n Otherwise, it extends the noncomment block.\n \"\"\"\n arg_5 = arg_4[:arg_2[1]]\n if arg_5.strip():\n # Oops! Trailing comment, not a comment block.\n arg_0.current_block.add(arg_1, arg_2, arg_3, arg_4)\n else:\n # A comment block.\n arg_6 = Comment(arg_2[0], arg_3[0], arg_1)\n arg_0.blocks.append(arg_6)\n arg_0.current_block = arg_6"} +{"_id": "doc_7190", "title": "", "text": "def Func(arg_0):\n \"\"\" Make the index mapping lines of actual code to their associated\n prefix comments.\n \"\"\"\n for arg_1, arg_2 in zip(arg_0.blocks[:-1], arg_0.blocks[1:]):\n if not arg_2.is_comment:\n arg_0.index[arg_2.start_lineno] = arg_1"} +{"_id": "doc_7191", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Read complete DSMR telegram's from the serial interface and parse it\n into CosemObject's and MbusObject's\n\n :rtype: generator\n \"\"\"\n with serial.Serial(**arg_0.serial_settings) as serial_handle:\n while True:\n arg_1 = serial_handle.Funcline()\n arg_0.telegram_buffer.append(arg_1.decode('ascii'))\n\n for arg_2 in arg_0.telegram_buffer.get_all():\n try:\n yield arg_0.telegram_parser.parse(arg_2)\n except InvalidChecksumError as e:\n logger.warning(str(e))\n except ParseError as e:\n logger.error('Failed to parse telegram: %s', e)"} +{"_id": "doc_7192", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Creates a DSMR asyncio protocol.\"\"\"\n\n if arg_0 == '2.2':\n arg_3 = telegram_specifications.V2_2\n arg_4 = SERIAL_SETTINGS_V2_2\n elif arg_0 == '4':\n arg_3 = telegram_specifications.V4\n arg_4 = SERIAL_SETTINGS_V4\n elif arg_0 == '5':\n arg_3 = telegram_specifications.V5\n arg_4 = SERIAL_SETTINGS_V5\n else:\n raise NotImplementedError(\"No telegram parser found for version: %s\",\n arg_0)\n\n arg_5 = partial(DSMRProtocol, arg_2, TelegramParser(arg_3),\n arg_1=arg_1)\n\n return arg_5, arg_4"} +{"_id": "doc_7193", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Creates a DSMR asyncio protocol coroutine using serial port.\"\"\"\n arg_4, arg_5 = create_dsmr_protocol(\n arg_1, arg_2, arg_3=None)\n arg_5['url'] = arg_0\n\n arg_6 = create_serial_connection(arg_3, arg_4, **arg_5)\n return arg_6"} +{"_id": "doc_7194", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add incoming data to buffer.\"\"\"\n arg_1 = arg_1.decode('ascii')\n arg_0.log.debug('received data: %s', arg_1)\n arg_0.telegram_buffer.append(arg_1)\n\n for arg_2 in arg_0.telegram_buffer.get_all():\n arg_0.handle_telegram(arg_2)"} +{"_id": "doc_7195", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send off parsed telegram to handling callback.\"\"\"\n arg_0.log.debug('got telegram: %s', arg_1)\n\n try:\n arg_2 = arg_0.telegram_parser.parse(arg_1)\n except InvalidChecksumError as e:\n arg_0.log.warning(str(e))\n except ParseError:\n arg_0.log.exception(\"failed to parse telegram\")\n else:\n arg_0.telegram_callback(arg_2)"} +{"_id": "doc_7196", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse telegram from string to dict.\n\n The telegram str type makes python 2.x integration easier.\n\n :param str telegram_data: full telegram from start ('/') to checksum\n ('!ABCD') including line endings in between the telegram's lines\n :rtype: dict\n :returns: Shortened example:\n {\n ..\n r'\\d-\\d:96\\.1\\.1.+?\\r\\n': , # EQUIPMENT_IDENTIFIER\n r'\\d-\\d:1\\.8\\.1.+?\\r\\n': , # ELECTRICITY_USED_TARIFF_1\n r'\\d-\\d:24\\.3\\.0.+?\\r\\n.+?\\r\\n': , # GAS_METER_READING\n ..\n }\n :raises ParseError:\n :raises InvalidChecksumError:\n \"\"\"\n\n if arg_0.apply_checksum_validation \\\n and arg_0.telegram_specification['checksum_support']:\n arg_0.validate_checksum(arg_1)\n\n arg_2 = {}\n\n for arg_3, arg_4 in arg_0.telegram_specification['objects'].items():\n arg_5 = re.search(arg_3, arg_1, re.DOTALL)\n\n # Some signatures are optional and may not be present,\n # so only Func lines that match\n if arg_5:\n arg_2[arg_3] = arg_4.Func(arg_5.group(0))\n\n return arg_2"} +{"_id": "doc_7197", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Loads config from string or dict\n \"\"\"\n if isinstance(arg_1, six.string_types):\n try:\n arg_1 = json.loads(arg_1)\n except ValueError:\n pass\n if not isinstance(arg_1, dict):\n raise TypeError('config block must be an istance '\n 'of dict or a valid NetJSON string')\n return arg_1"} +{"_id": "doc_7198", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Converts the configuration dictionary into the corresponding configuration format\n\n :param files: whether to include \"additional files\" in the output or not;\n defaults to ``True``\n :returns: string with output\n \"\"\"\n arg_0.validate()\n # convert NetJSON config to intermediate data structure\n if arg_0.intermediate_data is None:\n arg_0.to_intermediate()\n # support multiple Funcers\n arg_2 = getattr(arg_0, 'Funcers', None) or [arg_0.Funcer]\n # convert intermediate data structure to native configuration\n arg_3 = ''\n for arg_4 in arg_2:\n arg_5 = arg_4(arg_0)\n arg_3 += arg_5.Func()\n # remove reference to Funcer instance (not needed anymore)\n del arg_5\n # are we required to include\n # additional files?\n if arg_1:\n # Func additional files\n arg_6 = arg_0._Func_files()\n if arg_6:\n # max 2 new lines\n arg_3 += arg_6.replace('\\n\\n\\n', '\\n\\n')\n # return the configuration\n return arg_3"} +{"_id": "doc_7199", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a ``BytesIO`` instance representing an in-memory tar.gz archive\n containing the native router configuration.\n\n :returns: in-memory tar.gz archive, instance of ``BytesIO``\n \"\"\"\n arg_1 = BytesIO()\n arg_2 = tarfile.open(fileobj=arg_1, mode='w')\n arg_0._Func_contents(arg_2)\n arg_0._process_files(arg_2)\n arg_2.close()\n arg_1.seek(0) # set pointer to beginning of stream\n # `mtime` parameter of gzip file must be 0, otherwise any checksum operation\n # would return a different digest even when content is the same.\n # to achieve this we must use the python `gzip` library because the `tarfile`\n # library does not seem to offer the possibility to modify the gzip `mtime`.\n arg_3 = BytesIO()\n arg_4 = gzip.GzipFile(fileobj=arg_3, mode='wb', mtime=0)\n arg_4.write(arg_1.getvalue())\n arg_4.close()\n arg_3.seek(0) # set pointer to beginning of stream\n return arg_3"} +{"_id": "doc_7200", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=arg_5):\n \"\"\"\n Adds a single file in tarfile instance.\n\n :param tar: tarfile instance\n :param name: string representing filename or path\n :param contents: string representing file contents\n :param mode: string representing file mode, defaults to 644\n :returns: None\n \"\"\"\n arg_6 = BytesIO(arg_3.encode('utf8'))\n arg_7 = tarfile.TarInfo(arg_2=arg_2)\n arg_7.size = len(arg_3)\n # mtime must be 0 or any checksum operation\n # will return a different digest even when content is the same\n arg_7.mtime = 0\n arg_7.type = tarfile.REGTYPE\n arg_7.mode = int(arg_4, 8) # permissions converted to decimal notation\n arg_1.addfile(tarinfo=arg_7, fileobj=arg_6)"} +{"_id": "doc_7201", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parses a native configuration and converts\n it to a NetJSON configuration dictionary\n \"\"\"\n if not hasattr(arg_0, 'Funcr') or not arg_0.Funcr:\n raise NotImplementedError('Parser class not specified')\n arg_2 = arg_0.Funcr(arg_1)\n arg_0.intermediate_data = arg_2.intermediate_data\n del arg_2\n arg_0.to_netjson()"} +{"_id": "doc_7202", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Merges ``list2`` on top of ``list1``.\n\n If both lists contain dictionaries which have keys specified\n in ``identifiers`` which have equal values, those dicts will\n be merged (dicts in ``list2`` will override dicts in ``list1``).\n The remaining elements will be summed in order to create a list\n which contains elements of both lists.\n\n :param list1: ``list`` from template\n :param list2: ``list`` from config\n :param identifiers: ``list`` or ``None``\n :returns: merged ``list``\n \"\"\"\n arg_2 = arg_2 or []\n arg_3 = {'list1': OrderedDict(), 'list2': OrderedDict()}\n arg_4 = 1\n for arg_5 in [arg_0, arg_1]:\n arg_6 = arg_3['list{0}'.format(arg_4)]\n for arg_7 in arg_5:\n # merge by internal python id by default\n arg_8 = id(arg_7)\n # if el is a dict, merge by keys specified in ``identifiers``\n if isinstance(arg_7, dict):\n for arg_9 in arg_2:\n if arg_9 in arg_7:\n arg_8 = arg_7[arg_9]\n break\n arg_6[arg_8] = deepcopy(arg_7)\n arg_4 += 1\n arg_10 = merge_config(arg_3['list1'], arg_3['list2'])\n return list(arg_10.values())"} +{"_id": "doc_7203", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Evaluates variables in ``data``\n\n :param data: data structure containing variables, may be\n ``str``, ``dict`` or ``list``\n :param context: ``dict`` containing variables\n :returns: modified data structure\n \"\"\"\n arg_1 = arg_1 or {}\n if isinstance(arg_0, (dict, list)):\n if isinstance(arg_0, dict):\n arg_2 = arg_0.items()\n elif isinstance(arg_0, list):\n arg_2 = enumerate(arg_0)\n for arg_3, arg_4 in arg_2:\n arg_0[arg_3] = Func(arg_4, arg_1)\n elif isinstance(arg_0, six.string_types):\n arg_5 = var_pattern.findall(arg_0)\n for arg_6 in arg_5:\n arg_6 = arg_6.strip()\n # if found multiple variables, create a new regexp pattern for each\n # variable, otherwise different variables would get the same value\n # (see https://github.com/openwisp/netjsonconfig/issues/55)\n if len(arg_5) > 1:\n arg_7 = r'\\{\\{(\\s*%s\\s*)\\}\\}' % arg_6\n # in case of single variables, use the precompiled\n # regexp pattern to save computation\n else:\n arg_7 = var_pattern\n if arg_6 in arg_1:\n arg_0 = re.sub(arg_7, arg_1[arg_6], arg_0)\n return arg_0"} +{"_id": "doc_7204", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Looks for a key in a dictionary, if found returns\n a deepcopied value, otherwise returns default value\n \"\"\"\n arg_3 = arg_0.get(arg_1, arg_2)\n if arg_3:\n return deepcopy(arg_3)\n return arg_3"} +{"_id": "doc_7205", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Loops over item and performs type casting\n according to supplied schema fragment\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0._schema\n arg_3 = arg_2['properties']\n for arg_4, arg_5 in arg_1.items():\n if arg_4 not in arg_3:\n continue\n try:\n arg_6 = arg_3[arg_4]['type']\n except KeyError:\n arg_6 = None\n if arg_6 == 'integer' and not isinstance(arg_5, int):\n arg_5 = int(arg_5)\n elif arg_6 == 'boolean' and not isinstance(arg_5, bool):\n arg_5 = arg_5 == '1'\n arg_1[arg_4] = arg_5\n return arg_1"} +{"_id": "doc_7206", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n generates install.sh and adds it to included files\n \"\"\"\n arg_2 = arg_0._render_template('install.sh', arg_1)\n arg_0.config.setdefault('files', []) # file list might be empty\n # add install.sh to list of included files\n arg_0._add_unique_file({\n \"path\": \"/install.sh\",\n \"contents\": arg_2,\n \"mode\": \"755\"\n })"} +{"_id": "doc_7207", "title": "", "text": "def Func(arg_0):\n \"\"\"\n generates tc_script.sh and adds it to included files\n \"\"\"\n # fill context\n arg_1 = dict(tc_options=arg_0.config.get('tc_options', []))\n # import pdb; pdb.set_trace()\n arg_2 = arg_0._render_template('tc_script.sh', arg_1)\n arg_0.config.setdefault('files', []) # file list might be empty\n # add tc_script.sh to list of included files\n arg_0._add_unique_file({\n \"path\": \"/tc_script.sh\",\n \"contents\": arg_2,\n \"mode\": \"755\"\n })"} +{"_id": "doc_7208", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Renders configuration by using the jinja2 templating engine\n \"\"\"\n # get jinja2 template\n arg_1 = '{0}.jinja2'.format(arg_0.get_name())\n arg_2 = arg_0.template_env.get_template(arg_1)\n # Func template and cleanup\n arg_3 = getattr(arg_0.backend, 'intermediate_data', {})\n arg_4 = arg_2.Func(data=arg_3)\n return arg_0.cleanup(arg_4)"} +{"_id": "doc_7209", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n converts NetJSON address to\n UCI intermediate data structure\n \"\"\"\n arg_2 = arg_0.get_copy(arg_1, 'addresses')\n # do not ignore interfaces if they do not contain any address\n if not arg_2:\n return [{'proto': 'none'}]\n arg_3 = []\n arg_4 = {}\n arg_5 = []\n for arg_6 in arg_2:\n arg_7 = arg_6.get('family')\n # dhcp\n if arg_6['proto'] == 'dhcp':\n arg_6['proto'] = 'dhcp' if arg_7 == 'ipv4' else 'dhcpv6'\n arg_5.append(arg_0.__intermediate_address(arg_6))\n continue\n if 'gateway' in arg_6:\n arg_8 = 'gateway' if arg_7 == 'ipv4' else 'ip6gw'\n arg_1[arg_8] = arg_6['gateway']\n # static\n arg_9 = 'ipaddr' if arg_7 == 'ipv4' else 'ip6addr'\n arg_4.setdefault(arg_9, [])\n arg_4[arg_9].append('{address}/{mask}'.format(**arg_6))\n arg_4.update(arg_0.__intermediate_address(arg_6))\n if arg_4:\n # do not use CIDR notation when using a single ipv4\n # see https://github.com/openwisp/netjsonconfig/issues/54\n if len(arg_4.get('ipaddr', [])) == 1:\n arg_10 = ip_interface(six.text_type(arg_4['ipaddr'][0]))\n arg_4['ipaddr'] = str(arg_10.ip)\n arg_4['netmask'] = str(arg_10.netmask)\n # do not use lists when using a single ipv6 address\n # (avoids to change output of existing configuration)\n if len(arg_4.get('ip6addr', [])) == 1:\n arg_4['ip6addr'] = arg_4['ip6addr'][0]\n arg_3.append(arg_4)\n if arg_5:\n arg_3 += arg_5\n return arg_3"} +{"_id": "doc_7210", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n converts NetJSON interface to\n UCI intermediate data structure\n \"\"\"\n arg_1.update({\n '.type': 'interface',\n '.name': arg_2,\n 'ifname': arg_1.pop('name')\n })\n if 'network' in arg_1:\n del arg_1['network']\n if 'mac' in arg_1:\n # mac address of wireless interface must\n # be set in /etc/config/wireless, therfore\n # we can skip this in /etc/config/network\n if arg_1.get('type') != 'wireless':\n arg_1['macaddr'] = arg_1['mac']\n del arg_1['mac']\n if 'autostart' in arg_1:\n arg_1['auto'] = arg_1['autostart']\n del arg_1['autostart']\n if 'disabled' in arg_1:\n arg_1['enabled'] = not arg_1['disabled']\n del arg_1['disabled']\n if 'wireless' in arg_1:\n del arg_1['wireless']\n if 'addresses' in arg_1:\n del arg_1['addresses']\n return arg_1"} +{"_id": "doc_7211", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n deletes NetJSON address keys\n \"\"\"\n for arg_2 in arg_0._address_keys:\n if arg_2 in arg_1:\n del arg_1[arg_2]\n return arg_1"} +{"_id": "doc_7212", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n converts NetJSON bridge to\n UCI intermediate data structure\n \"\"\"\n # ensure type \"bridge\" is only given to one logical interface\n if arg_1['type'] == 'bridge' and arg_2 < 2:\n arg_3 = ' '.join(arg_1.pop('bridge_members'))\n # put bridge members in ifname attribute\n if arg_3:\n arg_1['ifname'] = arg_3\n # if no members, this is an empty bridge\n else:\n arg_1['bridge_empty'] = True\n del arg_1['ifname']\n # bridge has already been defined\n # but we need to add more references to it\n elif arg_1['type'] == 'bridge' and arg_2 >= 2:\n # openwrt adds \"br-\" prefix to bridge interfaces\n # we need to take this into account when referring\n # to these physical names\n if 'br-' not in arg_1['ifname']:\n arg_1['ifname'] = 'br-{ifname}'.format(**arg_1)\n # do not repeat bridge attributes (they have already been processed)\n for arg_4 in ['type', 'bridge_members', 'stp', 'gateway']:\n if arg_4 in arg_1:\n del arg_1[arg_4]\n elif arg_1['type'] != 'bridge':\n del arg_1['type']\n return arg_1"} +{"_id": "doc_7213", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n determines UCI interface \"proto\" option\n \"\"\"\n # proto defaults to static\n arg_3 = arg_2.pop('proto', 'static')\n if 'proto' not in arg_1:\n return arg_3\n else:\n # allow override on interface level\n return arg_1.pop('proto')"} +{"_id": "doc_7214", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n determines UCI interface \"dns\" option\n \"\"\"\n # allow override\n if 'dns' in arg_1:\n return arg_1['dns']\n # ignore if using DHCP or if \"proto\" is none\n if arg_2['proto'] in ['dhcp', 'dhcpv6', 'none']:\n return None\n arg_3 = arg_0.netjson.get('dns_servers', None)\n if arg_3:\n return ' '.join(arg_3)"} +{"_id": "doc_7215", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n only for mac80211 driver\n \"\"\"\n arg_2 = arg_1.pop('protocol')\n arg_3 = arg_1.pop('channel_width')\n # allow overriding htmode\n if 'htmode' in arg_1:\n return arg_1['htmode']\n if arg_2 == '802.11n':\n return 'HT{0}'.format(arg_3)\n elif arg_2 == '802.11ac':\n return 'VHT{0}'.format(arg_3)\n # disables n\n return 'NONE'"} +{"_id": "doc_7216", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n determines NetJSON protocol radio attribute\n \"\"\"\n arg_2 = arg_1.get('htmode')\n arg_3 = arg_1.get('hwmode', None)\n if arg_2.startswith('HT'):\n return '802.11n'\n elif arg_2.startswith('VHT'):\n return '802.11ac'\n return '802.{0}'.format(arg_3)"} +{"_id": "doc_7217", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None,\n arg_8=None):\n \"\"\"\n Returns a configuration dictionary representing an OpenVPN client configuration\n that is compatible with the passed server configuration.\n\n :param host: remote VPN server\n :param server: dictionary representing a single OpenVPN server configuration\n :param ca_path: optional string representing path to CA, will consequently add\n a file in the resulting configuration dictionary\n :param ca_contents: optional string representing contents of CA file\n :param cert_path: optional string representing path to certificate, will consequently add\n a file in the resulting configuration dictionary\n :param cert_contents: optional string representing contents of cert file\n :param key_path: optional string representing path to key, will consequently add\n a file in the resulting configuration dictionary\n :param key_contents: optional string representing contents of key file\n :returns: dictionary representing a single OpenVPN client configuration\n \"\"\"\n # client defaults\n arg_9 = {\n \"mode\": \"p2p\",\n \"nobind\": True,\n \"resolv_retry\": \"infinite\",\n \"tls_client\": True\n }\n # remote\n arg_10 = arg_2.get('port') or 1195\n arg_9['remote'] = [{'host': arg_1, 'port': arg_10}]\n # proto\n if arg_2.get('proto') == 'tcp-server':\n arg_9['proto'] = 'tcp-client'\n else:\n arg_9['proto'] = 'udp'\n # determine if pull must be True\n if 'server' in arg_2 or 'server_bridge' in arg_2:\n arg_9['pull'] = True\n # tls_client\n if 'tls_server' not in arg_2 or not arg_2['tls_server']:\n arg_9['tls_client'] = False\n # ns_cert_type\n arg_11 = {None: '',\n '': '',\n 'client': 'server'}\n arg_9['ns_cert_type'] = arg_11[arg_2.get('ns_cert_type')]\n # remote_cert_tls\n arg_12 = {None: '',\n '': '',\n 'client': 'server'}\n arg_9['remote_cert_tls'] = arg_12[arg_2.get('remote_cert_tls')]\n arg_13 = ['name', 'dev_type', 'dev', 'comp_lzo', 'auth',\n 'cipher', 'ca', 'cert', 'key', 'pkcs12', 'mtu_disc', 'mtu_test',\n 'fragment', 'mssfix', 'keepalive', 'persist_tun', 'mute',\n 'persist_key', 'script_security', 'user', 'group', 'log',\n 'mute_replay_warnings', 'secret', 'reneg_sec', 'tls_timeout',\n 'tls_cipher', 'float', 'fast_io', 'verb']\n for arg_14 in arg_13:\n if arg_14 in arg_2:\n arg_9[arg_14] = arg_2[arg_14]\n arg_15 = arg_0._Func_files(arg_9, arg_3, arg_4,\n arg_5, arg_6,\n arg_7, arg_8)\n return {\n 'openvpn': [arg_9],\n 'files': arg_15\n }"} +{"_id": "doc_7218", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None,\n arg_5=None, arg_6=None, arg_7=None):\n \"\"\"\n returns a list of NetJSON extra files for automatically generated clients\n produces side effects in ``client`` dictionary\n \"\"\"\n arg_8 = []\n if arg_2 and arg_3:\n arg_1['ca'] = arg_2\n arg_8.append(dict(path=arg_2,\n contents=arg_3,\n mode=DEFAULT_FILE_MODE))\n if arg_4 and arg_5:\n arg_1['cert'] = arg_4\n arg_8.append(dict(path=arg_4,\n contents=arg_5,\n mode=DEFAULT_FILE_MODE))\n if arg_6 and arg_7:\n arg_1['key'] = arg_6\n arg_8.append(dict(path=arg_6,\n contents=arg_7,\n mode=DEFAULT_FILE_MODE,))\n return arg_8"} +{"_id": "doc_7219", "title": "", "text": "def Func():\n \"\"\"\n parse requirements.txt, ignore links, exclude comments\n \"\"\"\n arg_0 = []\n for arg_1 in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if arg_1.startswith('#') or arg_1 == '' or arg_1.startswith('http') or arg_1.startswith('git'):\n continue\n # add line to requirements\n arg_0.append(arg_1.replace('\\n', ''))\n # add py2-ipaddress if python2\n if sys.version_info.major < 3:\n arg_0.append('py2-ipaddress')\n return arg_0"} +{"_id": "doc_7220", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get all Func of this node. Additional arguments may also be\n specified that will be passed to the query function.\n \"\"\"\n return arg_0.__api.Func(query=EqualsOperator(\"certname\", arg_0.name),\n **arg_1)"} +{"_id": "doc_7221", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a single fact from this node.\"\"\"\n arg_2 = arg_0.facts(arg_1=arg_1)\n return next(Func for Func in arg_2)"} +{"_id": "doc_7222", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"Get all resources of this node or all resources of the specified\n type. Additional arguments may also be specified that will be passed\n to the query function.\n \"\"\"\n if arg_1 is None:\n Func = arg_0.__api.resources(\n query=EqualsOperator(\"certname\", arg_0.name),\n **arg_3)\n elif arg_1 is not None and arg_2 is None:\n Func = arg_0.__api.resources(\n arg_1=arg_1,\n query=EqualsOperator(\"certname\", arg_0.name),\n **arg_3)\n else:\n Func = arg_0.__api.resources(\n arg_1=arg_1,\n arg_2=arg_2,\n query=EqualsOperator(\"certname\", arg_0.name),\n **arg_3)\n return Func"} +{"_id": "doc_7223", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get all Func for this node. Additional arguments may also be\n specified that will be passed to the query function.\n \"\"\"\n return arg_0.__api.Func(\n query=EqualsOperator(\"certname\", arg_0.name),\n **arg_1)"} +{"_id": "doc_7224", "title": "", "text": "def Func(arg_0):\n \"\"\"A Func that will be used to construct the final\n URL we're going to query against.\n\n :returns: A URL of the form: ``proto://host:port``.\n :rtype: :obj:`string`\n \"\"\"\n return '{proto}://{host}:{port}{url_path}'.format(\n proto=arg_0.protocol,\n host=arg_0.host,\n port=arg_0.port,\n url_path=arg_0.url_path,\n )"} +{"_id": "doc_7225", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"The complete URL we will end up querying. Depending on the\n endpoint we pass in this will result in different URL's with\n different prefixes.\n\n :param endpoint: The PuppetDB API endpoint we want to query.\n :type endpoint: :obj:`string`\n :param path: An additional path if we don't wish to query the\\\n bare endpoint.\n :type path: :obj:`string`\n\n :returns: A URL constructed from :func:`baseFunc` with the\\\n apropraite API version/prefix and the rest of the path added\\\n to it.\n :rtype: :obj:`string`\n \"\"\"\n\n log.debug('Func called with endpoint: {0} and path: {1}'.format(\n arg_1, arg_2))\n\n try:\n arg_1 = ENDPOINTS[arg_1]\n except KeyError:\n # If we reach this we're trying to query an endpoint that doesn't\n # exist. This shouldn't happen unless someone made a booboo.\n raise APIError\n\n arg_3 = '{baseFunc}/{endpoint}'.format(\n baseFunc=arg_0.baseFunc,\n arg_1=arg_1,\n )\n\n if arg_2 is not None:\n arg_3 = '{0}/{1}'.format(arg_3, quote(arg_2))\n\n return arg_3"} +{"_id": "doc_7226", "title": "", "text": "def Func(arg_0, arg_1=2, arg_2=False, **arg_3):\n \"\"\"Query for nodes by either name or query. If both aren't\n provided this will return a list of all nodes. This method\n also fetches the nodes status and event counts of the latest\n report from puppetdb.\n\n :param with_status: (optional) include the node status in the\\\n returned nodes\n :type with_status: :bool:\n :param unreported: (optional) amount of hours when a node gets\n marked as unreported\n :type unreported: :obj:`None` or integer\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generator yieling Nodes.\n :rtype: :class:`pypuppetdb.types.Node`\n \"\"\"\n Func = arg_0._query('nodes', **arg_3)\n arg_5 = datetime.datetime.utcnow()\n # If we happen to only get one node back it\n # won't be inside a list so iterating over it\n # goes boom. Therefor we wrap a list around it.\n if type(Func) == dict:\n Func = [Func, ]\n\n if arg_2:\n arg_6 = arg_0.event_counts(\n query=EqualsOperator(\"latest_report?\", True),\n summarize_by='certname'\n )\n\n for arg_7 in Func:\n arg_7['status_report'] = None\n arg_7['events'] = None\n\n if arg_2:\n arg_8 = [s for s in arg_6\n if s['subject']['title'] == arg_7['certname']]\n\n try:\n arg_7['status_report'] = arg_7['latest_report_status']\n\n if arg_8:\n arg_7['events'] = arg_8[0]\n except KeyError:\n if arg_8:\n arg_7['events'] = arg_8 = arg_8[0]\n if arg_8['successes'] > 0:\n arg_7['status_report'] = 'changed'\n if arg_8['noops'] > 0:\n arg_7['status_report'] = 'noop'\n if arg_8['failures'] > 0:\n arg_7['status_report'] = 'failed'\n else:\n arg_7['status_report'] = 'unchanged'\n\n # node report age\n if arg_7['report_timestamp'] is not None:\n try:\n arg_9 = json_to_datetime(\n arg_7['report_timestamp'])\n arg_9 = arg_9.replace(tzinfo=None)\n arg_10 = arg_5 - timedelta(hours=arg_1)\n if arg_9 < arg_10:\n arg_11 = (arg_5 - arg_9)\n arg_7['unreported'] = True\n arg_7['unreported_time'] = '{0}d {1}h {2}m'.format(\n arg_11.days,\n int(arg_11.seconds / 3600),\n int((arg_11.seconds % 3600) / 60)\n )\n except AttributeError:\n arg_7['unreported'] = True\n\n if not arg_7['report_timestamp']:\n arg_7['unreported'] = True\n\n yield Node(arg_0,\n name=arg_7['certname'],\n deactivated=arg_7['deactivated'],\n expired=arg_7['expired'],\n report_timestamp=arg_7['report_timestamp'],\n catalog_timestamp=arg_7['catalog_timestamp'],\n facts_timestamp=arg_7['facts_timestamp'],\n status_report=arg_7['status_report'],\n noop=arg_7.get('latest_report_noop'),\n noop_pending=arg_7.get('latest_report_noop_pending'),\n events=arg_7['events'],\n arg_1=arg_7.get('unreported'),\n unreported_time=arg_7.get('unreported_time'),\n report_environment=arg_7['report_environment'],\n catalog_environment=arg_7['catalog_environment'],\n facts_environment=arg_7['facts_environment'],\n latest_report_hash=arg_7.get('latest_report_hash'),\n cached_catalog_status=arg_7.get('cached_catalog_status')\n )"} +{"_id": "doc_7227", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Gets a single node from PuppetDB.\n\n :param name: The name of the node search.\n :type name: :obj:`string`\n\n :return: An instance of Node\n :rtype: :class:`pypuppetdb.types.Node`\n \"\"\"\n arg_2 = arg_0.nodes(path=arg_1)\n return next(Func for Func in arg_2)"} +{"_id": "doc_7228", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the available Func for a given node.\n\n :param node: (Required) The name of the PuppetDB node.\n :type: :obj:`string`\n\n :returns: An instance of Catalog\n :rtype: :class:`pypuppetdb.types.Catalog`\n \"\"\"\n arg_2 = arg_0.Funcs(path=arg_1)\n return next(arg_3 for arg_3 in arg_2)"} +{"_id": "doc_7229", "title": "", "text": "def Func(arg_0='localhost', arg_1=8080, arg_2=False, arg_3=None,\n arg_4=None, arg_5=10, arg_6=None, arg_7='/',\n arg_8=None, arg_9=None, arg_10=None):\n \"\"\"Connect with PuppetDB. This will return an object allowing you\n to query the API through its methods.\n\n :param host: (Default: 'localhost;) Hostname or IP of PuppetDB.\n :type host: :obj:`string`\n\n :param port: (Default: '8080') Port on which to talk to PuppetDB.\n :type port: :obj:`int`\n\n :param ssl_verify: (optional) Verify PuppetDB server certificate.\n :type ssl_verify: :obj:`bool` or :obj:`string` True, False or filesystem \\\n path to CA certificate.\n\n :param ssl_key: (optional) Path to our client secret key.\n :type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\\\n path.\n\n :param ssl_cert: (optional) Path to our client certificate.\n :type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\\\n path.\n\n :param timeout: (Default: 10) Number of seconds to wait for a response.\n :type timeout: :obj:`int`\n\n :param protocol: (optional) Explicitly specify the protocol to be used\n (especially handy when using HTTPS with ssl_verify=False and\n without certs)\n :type protocol: :obj:`None` or :obj:`string`\n\n :param url_path: (Default: '/') The URL path where PuppetDB is served\n :type url_path: :obj:`None` or :obj:`string`\n\n :param username: (optional) The username to use for HTTP basic\n authentication\n :type username: :obj:`None` or :obj:`string`\n\n :param password: (optional) The password to use for HTTP basic\n authentication\n :type password: :obj:`None` or :obj:`string`\n\n :param token: (optional) The x-auth token to use for X-Authentication\n :type token: :obj:`None` or :obj:`string`\n \"\"\"\n return BaseAPI(arg_0=arg_0, arg_1=arg_1,\n arg_5=arg_5, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8, arg_9=arg_9, arg_10=arg_10)"} +{"_id": "doc_7230", "title": "", "text": "def Func():\n \"\"\"The Master has been started from the command line. Execute ad-hoc tests if desired.\"\"\"\n # app = MyMaster()\n arg_0 = MyMaster(log_handler=MyLogger(),\n listener=AppChannelListener(),\n soe_handler=SOEHandler(),\n master_application=MasterApplication())\n _log.debug('Initialization complete. In command loop.')\n # Ad-hoc tests can be performed at this point. See master_cmd.py for examples.\n arg_0.shutdown()\n _log.debug('Exiting.')\n exit()"} +{"_id": "doc_7231", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.PrintingCommandCallback.Get(),\n arg_6=arg_7.TaskConfig().Default()):\n \"\"\"\n Direct operate a set of commands\n\n :param command_set: set of command headers\n :param callback: callback that will be invoked upon completion or failure\n :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA\n \"\"\"\n arg_0.master.DirectOperate(arg_1, arg_2, arg_6)"} +{"_id": "doc_7232", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_4.PrintingCommandCallback.Get(),\n arg_7=arg_8.TaskConfig().Default()):\n \"\"\"\n Select and operate a single command\n\n :param command: command to operate\n :param index: index of the command\n :param callback: callback that will be invoked upon completion or failure\n :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA\n \"\"\"\n arg_0.master.SelectAndOperate(arg_1, arg_2, arg_3, arg_7)"} +{"_id": "doc_7233", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.PrintingCommandCallback.Get(),\n arg_6=arg_7.TaskConfig().Default()):\n \"\"\"\n Select and operate a set of commands\n\n :param command_set: set of command headers\n :param callback: callback that will be invoked upon completion or failure\n :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA\n \"\"\"\n arg_0.master.SelectAndOperate(arg_1, arg_2, arg_6)"} +{"_id": "doc_7234", "title": "", "text": "def Func():\n \"\"\"The Outstation has been started from the command line. Execute ad-hoc tests if desired.\"\"\"\n arg_0 = OutstationApplication()\n _log.debug('Initialization complete. In command loop.')\n # Ad-hoc tests can be inserted here if desired. See outstation_cmd.py for examples.\n arg_0.shutdown()\n _log.debug('Exiting.')\n exit()"} +{"_id": "doc_7235", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n The Master sent an Func command to the Outstation. Handle it.\n\n :param command: ControlRelayOutputBlock,\n AnalogOutputInt16, AnalogOutputInt32, AnalogOutputFloat32, or AnalogOutputDouble64.\n :param index: int\n :param op_type: FuncType\n :return: CommandStatus\n \"\"\"\n OutstationApplication.process_point_value('Func', arg_1, arg_2, arg_3)\n return opendnp3.CommandStatus.SUCCESS"} +{"_id": "doc_7236", "title": "", "text": "def Func(arg_0=arg_1, arg_2=arg_3, arg_4=False):\n \"\"\"\n Create Bloomberg connection\n\n Returns:\n (Bloomberg connection, if connection is new)\n \"\"\"\n if arg_7 in arg_6():\n if not isinstance(arg_6()[arg_7], pdblp.BCon):\n del arg_6()[arg_7]\n\n if (arg_7 in arg_6()) and (not arg_4):\n arg_5 = arg_6()[arg_7]\n if getattr(arg_5, '_session').start(): arg_5.start()\n return arg_5, False\n\n else:\n arg_5 = pdblp.BCon(arg_0=arg_0, arg_2=arg_2)\n arg_6()[arg_7] = arg_5\n arg_5.start()\n return arg_5, True"} +{"_id": "doc_7237", "title": "", "text": "def Func():\n \"\"\"\n Stop and destroy Bloomberg connection\n \"\"\"\n if _CON_SYM_ in globals():\n arg_0 = globals().pop(_CON_SYM_)\n if not getattr(arg_0, '_session').start(): arg_0.stop()"} +{"_id": "doc_7238", "title": "", "text": "def Func():\n \"\"\"\n Parse markdown as description\n \"\"\"\n arg_0 = f'{PACKAGE_ROOT}/README.md'\n if path.exists(arg_0):\n with open(arg_0, 'r', encoding='utf-8') as f:\n arg_1 = f.read()\n return arg_1"} +{"_id": "doc_7239", "title": "", "text": "def Func(arg_0: arg_1.DataFrame, arg_3: arg_1.DataFrame) -> arg_1.DataFrame:\n \"\"\"\n Standardized earning outputs and add percentage by each blocks\n\n Args:\n data: earning data block\n header: earning headers\n\n Returns:\n pd.DataFrame\n\n Examples:\n >>> Func(\n ... data=pd.read_pickle('xbbg/tests/data/sample_earning.pkl'),\n ... header=pd.read_pickle('xbbg/tests/data/sample_earning_header.pkl')\n ... ).round(2)\n level fy2017 fy2017_pct\n Asia-Pacific 1.0 3540.0 66.43\n \u00a0\u00a0\u00a0China 2.0 1747.0 49.35\n \u00a0\u00a0\u00a0Japan 2.0 1242.0 35.08\n \u00a0\u00a0\u00a0Singapore 2.0 551.0 15.56\n United States 1.0 1364.0 25.60\n Europe 1.0 263.0 4.94\n Other Countries 1.0 162.0 3.04\n \"\"\"\n if arg_0.dropna(subset=['value']).empty: return arg_1.DataFrame()\n\n arg_4 = arg_1.concat([\n grp.loc[:, ['value']].set_index(arg_3.value)\n for arg_14, grp in arg_0.groupby(arg_0.position)\n ], axis=1)\n arg_4.index.name = None\n arg_4.columns = arg_4.iloc[0]\n arg_4 = arg_4.iloc[1:].transpose().reset_index().apply(\n arg_1.to_numeric, downcast='float', errors='ignore'\n )\n arg_4.rename(\n arg_7=lambda vv: '_'.join(vv.lower().split()).replace('fy_', 'fy'),\n inplace=True,\n )\n\n arg_8 = arg_4.columns[arg_4.columns.str.startswith('fy')]\n arg_9 = arg_4.level == 1\n for arg_10 in arg_8:\n arg_4.loc[:, arg_10] = arg_4.loc[:, arg_10].round(1)\n arg_12 = f'{yr}_pct'\n arg_4.loc[:, arg_12] = 0.\n arg_4.loc[arg_9, arg_12] = arg_4.loc[arg_9, arg_12].astype(float).round(1)\n arg_4.loc[arg_9, arg_12] = arg_4.loc[arg_9, arg_10] / arg_4.loc[arg_9, arg_10].sum() * 100\n arg_13 = []\n for arg_14, arg_15 in arg_4[::-1].iterrows():\n if arg_15.level > 2: continue\n if arg_15.level == 1:\n if len(arg_13) == 0: continue\n arg_16 = arg_1.concat(arg_13, axis=1).transpose()\n arg_4.loc[arg_16.index, arg_12] = \\\n arg_4.loc[arg_16.index, arg_10] / arg_4.loc[arg_16.index, arg_10].sum() * 100\n arg_13 = []\n if arg_15.level == 2: arg_13.append(arg_15)\n\n arg_4.set_index('segment_name', inplace=True)\n arg_4.index.name = None\n return arg_4"} +{"_id": "doc_7240", "title": "", "text": "def Func(arg_0: arg_1.DataFrame, arg_3, arg_4=None) -> arg_1.DataFrame:\n \"\"\"\n Format `pdblp` outputs to column-based results\n\n Args:\n data: `pdblp` result\n source: `bdp` or `bds`\n col_maps: rename columns with these mappings\n\n Returns:\n pd.DataFrame\n\n Examples:\n >>> Func(\n ... data=pd.read_pickle('xbbg/tests/data/sample_bdp.pkl'),\n ... source='bdp'\n ... ).reset_index()\n ticker name\n 0 QQQ US Equity INVESCO QQQ TRUST SERIES 1\n 1 SPY US Equity SPDR S&P 500 ETF TRUST\n >>> Func(\n ... data=pd.read_pickle('xbbg/tests/data/sample_dvd.pkl'),\n ... source='bds', col_maps={'Dividend Frequency': 'dvd_freq'}\n ... ).loc[:, ['ex_date', 'dividend_amount', 'dvd_freq']].reset_index()\n ticker ex_date dividend_amount dvd_freq\n 0 C US Equity 2018-02-02 0.32 Quarter\n \"\"\"\n if arg_0.empty: return arg_1.DataFrame()\n if arg_3 == 'bdp': arg_5 = ['ticker', 'field', 'value']\n else: arg_5 = ['ticker', 'field', 'name', 'value', 'position']\n if any(arg_6 not in arg_0 for arg_6 in arg_5): return arg_1.DataFrame()\n if arg_0.dropna(subset=['value']).empty: return arg_1.DataFrame()\n\n if arg_3 == 'bdp':\n arg_7 = arg_1.DataFrame(arg_1.concat([\n arg_1.Series({**{'ticker': t}, **grp.set_index('field').value.to_dict()})\n for t, grp in arg_0.groupby('ticker')\n ], axis=1, sort=False)).transpose().set_index('ticker')\n else:\n arg_7 = arg_1.DataFrame(arg_1.concat([\n grp.loc[:, ['name', 'value']].set_index('name')\n .transpose().reset_index(drop=True).assign(ticker=t)\n for (t, _), grp in arg_0.groupby(['ticker', 'position'])\n ], sort=False)).reset_index(drop=True).set_index('ticker')\n arg_7.columns.name = None\n\n if arg_4 is None: arg_4 = dict()\n return arg_7.rename(\n arg_8=lambda vv: arg_4.get(\n vv, vv.lower().replace(' ', '_').replace('-', '_')\n )\n ).apply(arg_1.to_numeric, errors='ignore', downcast='float')"} +{"_id": "doc_7241", "title": "", "text": "def Func(arg_0: arg_1.DataFrame, arg_3, **arg_4) -> arg_1.DataFrame:\n \"\"\"\n Format intraday data\n\n Args:\n data: pd.DataFrame from bdib\n ticker: ticker\n\n Returns:\n pd.DataFrame\n\n Examples:\n >>> Func(\n ... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),\n ... ticker='SPY US Equity',\n ... ).xs('close', axis=1, level=1, drop_level=False)\n ticker SPY US Equity\n field close\n 2018-12-28 09:30:00-05:00 249.67\n 2018-12-28 09:31:00-05:00 249.54\n 2018-12-28 09:32:00-05:00 249.22\n 2018-12-28 09:33:00-05:00 249.01\n 2018-12-28 09:34:00-05:00 248.86\n >>> Func(\n ... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),\n ... ticker='SPY US Equity', price_only=True\n ... )\n ticker SPY US Equity\n 2018-12-28 09:30:00-05:00 249.67\n 2018-12-28 09:31:00-05:00 249.54\n 2018-12-28 09:32:00-05:00 249.22\n 2018-12-28 09:33:00-05:00 249.01\n 2018-12-28 09:34:00-05:00 248.86\n \"\"\"\n if arg_0.empty: return arg_1.DataFrame()\n arg_0.columns = arg_1.MultiIndex.from_product([\n [arg_3], arg_0.rename(arg_5=dict(numEvents='num_trds')).columns\n ], names=['ticker', 'field'])\n arg_0.index.name = None\n if arg_4.get('price_only', False):\n arg_8 = dict(axis=1, level=1)\n arg_9 = arg_0.xs('close', **arg_8)\n arg_10 = arg_0.xs('volume', **arg_8).iloc[:, 0]\n return arg_9.loc[arg_10 > 0] if arg_10.min() > 0 else arg_9\n else: return arg_0"} +{"_id": "doc_7242", "title": "", "text": "def Func(arg_0, arg_1) -> str:\n \"\"\"\n Logging info for given tickers and fields\n\n Args:\n tickers: tickers\n flds: fields\n\n Returns:\n str\n\n Examples:\n >>> print(Func(\n ... tickers=['NVDA US Equity'], flds=['Name', 'Security_Name']\n ... ))\n tickers: ['NVDA US Equity']\n fields: ['Name', 'Security_Name']\n \"\"\"\n arg_2 = '\\n'.join([f'tickers: {tickers[:8]}'] + [\n f' {tickers[n:(n + 8)]}' for n in range(8, len(arg_0), 8)\n ])\n return f'{full_list}\\nfields: {flds}'"} +{"_id": "doc_7243", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None, arg_3='today', arg_4=None, **arg_5\n) -> pd.DataFrame:\n \"\"\"\n Bloomberg historical data\n\n Args:\n tickers: ticker(s)\n flds: field(s)\n start_date: start date\n end_date: end date - default today\n adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None\n exact match of above words will adjust for corresponding events\n Case 0: `-` no adjustment for dividend or split\n Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits\n Case 2: `adjust` will adjust for splits and ignore all dividends\n Case 3: `all` == `dvd|split` == adjust for all\n Case 4: None == Bloomberg default OR use kwargs\n **kwargs: overrides\n\n Returns:\n pd.DataFrame\n\n Examples:\n >>> res = Func(\n ... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],\n ... start_date='2018-02-05', end_date='2018-02-07',\n ... ).round(2).transpose()\n >>> res.index.name = None\n >>> res.columns.name = None\n >>> res\n 2018-02-05 2018-02-06 2018-02-07\n VIX Index High 38.80 50.30 31.64\n Low 16.80 22.42 21.17\n Last_Price 37.32 29.98 27.73\n >>> Func(\n ... tickers='AAPL US Equity', flds='Px_Last',\n ... start_date='20140605', end_date='20140610', adjust='-'\n ... ).round(2)\n ticker AAPL US Equity\n field Px_Last\n 2014-06-05 647.35\n 2014-06-06 645.57\n 2014-06-09 93.70\n 2014-06-10 94.25\n >>> Func(\n ... tickers='AAPL US Equity', flds='Px_Last',\n ... start_date='20140606', end_date='20140609',\n ... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,\n ... ).round(2)\n ticker AAPL US Equity\n field Px_Last\n 2014-06-06 645.57\n 2014-06-09 93.70\n \"\"\"\n arg_6 = logs.get_logger(Func, level=arg_5.pop('log', logs.LOG_LEVEL))\n\n # Dividend adjustments\n if isinstance(arg_4, str) and arg_4:\n if arg_4 == 'all':\n arg_5['CshAdjNormal'] = True\n arg_5['CshAdjAbnormal'] = True\n arg_5['CapChg'] = True\n else:\n arg_5['CshAdjNormal'] = 'normal' in arg_4 or 'dvd' in arg_4\n arg_5['CshAdjAbnormal'] = 'abn' in arg_4 or 'dvd' in arg_4\n arg_5['CapChg'] = 'split' in arg_4\n\n arg_7, arg_8 = create_connection()\n arg_9 = assist.proc_elms(**arg_5)\n arg_10 = assist.proc_ovrds(**arg_5)\n\n if isinstance(arg_0, str): arg_0 = [arg_0]\n if arg_1 is None: arg_1 = ['Last_Price']\n if isinstance(arg_1, str): arg_1 = [arg_1]\n arg_11 = utils.fmt_dt(arg_3, fmt='%Y%m%d')\n if arg_2 is None:\n arg_2 = pd.Timestamp(arg_11) - relativedelta(months=3)\n arg_12 = utils.fmt_dt(arg_2, fmt='%Y%m%d')\n\n arg_6.info(\n f'loading historical data from Bloomberg:\\n'\n f'{assist.info_qry(tickers=tickers, flds=flds)}'\n )\n\n arg_6.debug(\n f'\\nflds={flds}\\nelms={elms}\\novrds={ovrds}\\nstart_date={s_dt}\\nend_date={e_dt}'\n )\n arg_13 = arg_7.Func(\n arg_0=arg_0, arg_1=arg_1, arg_9=arg_9, arg_10=arg_10, arg_2=arg_12, arg_3=arg_11\n )\n arg_13.index.name = None\n if (len(arg_1) == 1) and arg_5.get('keep_one', False):\n return arg_13.xs(arg_1[0], axis=1, level=1)\n return arg_13"} +{"_id": "doc_7244", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', **arg_3) -> pd.DataFrame:\n \"\"\"\n Bloomberg Func bar data within market session\n\n Args:\n ticker: ticker\n dt: date\n session: examples include\n day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000\n **kwargs:\n ref: reference ticker or exchange for timezone\n keep_tz: if keep tz if reference ticker / exchange is given\n start_time: start time\n end_time: end time\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\n Returns:\n pd.DataFrame\n \"\"\"\n from xbbg.core import intervals\n\n arg_4 = bdib(arg_0=arg_0, arg_1=arg_1, typ=arg_3.get('typ', 'TRADE'))\n if arg_4.empty: return pd.DataFrame()\n\n arg_5 = '%H:%M:%S'\n arg_6 = intervals.SessNA\n arg_7 = arg_3.get('ref', None)\n arg_8 = pd.Series() if arg_7 is None else const.exch_info(arg_0=arg_7)\n if arg_2: arg_6 = intervals.get_interval(\n arg_0=arg_3.get('ref', arg_0), arg_2=arg_2\n )\n\n arg_9 = arg_3.get('start_time', None)\n arg_10 = arg_3.get('end_time', None)\n if arg_6 != intervals.SessNA:\n arg_9 = pd.Timestamp(arg_6.start_time).strftime(arg_5)\n arg_10 = pd.Timestamp(arg_6.end_time).strftime(arg_5)\n\n if arg_9 and arg_10:\n arg_11 = dict(arg_9=arg_9, arg_10=arg_10)\n if not arg_8.empty:\n arg_12 = arg_4.index.tz\n arg_13 = arg_4.tz_convert(arg_8.tz).between_time(**arg_11)\n if arg_3.get('keep_tz', False):\n arg_13 = arg_13.tz_convert(arg_12)\n return pd.DataFrame(arg_13)\n return pd.DataFrame(arg_4.between_time(**arg_11))\n\n return arg_4"} +{"_id": "doc_7245", "title": "", "text": "def Func(\n arg_0, arg_1='Geo', arg_2='Revenue', arg_3=None, arg_4=None, **arg_5\n) -> pd.DataFrame:\n \"\"\"\n Earning exposures by Geo or Products\n\n Args:\n ticker: ticker name\n by: [G(eo), P(roduct)]\n typ: type of Func, start with `PG_` in Bloomberg FLDS - default `Revenue`\n ccy: currency of Funcs\n level: hierarchy level of Funcs\n\n Returns:\n pd.DataFrame\n\n Examples:\n >>> data = Func('AMD US Equity', Eqy_Fund_Year=2017, Number_Of_Periods=1)\n >>> data.round(2)\n level fy2017 fy2017_pct\n Asia-Pacific 1.0 3540.0 66.43\n \u00a0\u00a0\u00a0China 2.0 1747.0 49.35\n \u00a0\u00a0\u00a0Japan 2.0 1242.0 35.08\n \u00a0\u00a0\u00a0Singapore 2.0 551.0 15.56\n United States 1.0 1364.0 25.60\n Europe 1.0 263.0 4.94\n Other Countries 1.0 162.0 3.04\n \"\"\"\n arg_6 = 'G' if arg_1[0].upper() == 'G' else 'P'\n arg_7 = dict(raw=True, Product_Geo_Override=arg_6)\n arg_8 = bds(tickers=arg_0, flds='PG_Bulk_Header', **arg_7, **arg_5)\n if arg_3: arg_5['Eqy_Fund_Crncy'] = arg_3\n if arg_4: arg_5['PG_Hierarchy_Level'] = arg_4\n arg_9 = bds(tickers=arg_0, flds=f'PG_{typ}', **arg_7, **arg_5)\n return assist.format_Func(arg_9=arg_9, arg_8=arg_8)"} +{"_id": "doc_7246", "title": "", "text": "def Func(arg_0: arg_1, arg_2) -> arg_1:\n \"\"\"\n Active futures contract\n\n Args:\n ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\n dt: date\n\n Returns:\n str: ticker name\n \"\"\"\n arg_3 = arg_0.split()\n arg_4, arg_5 = ' '.join(arg_3[:-1]), arg_3[-1]\n arg_6 = const.market_info(f'{prefix[:-1]}1 {asset}')\n\n arg_7, arg_8 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}'\n arg_9 = fut_ticker(gen_ticker=arg_8, arg_2=arg_2, freq=arg_6['freq'])\n arg_10 = fut_ticker(gen_ticker=arg_7, arg_2=arg_2, freq=arg_6['freq'])\n\n arg_11 = bdp(tickers=[arg_10, arg_9], flds='Last_Tradeable_Dt', cache=True)\n\n if pd.Timestamp(arg_2).month < pd.Timestamp(arg_11.last_tradeable_dt[0]).month: return arg_10\n\n arg_12 = bdib(arg_0=arg_7, arg_2=arg_2)\n arg_13 = bdib(arg_0=arg_8, arg_2=arg_2)\n\n return arg_10 if arg_12[arg_7].volume.sum() > arg_13[arg_8].volume.sum() else arg_9"} +{"_id": "doc_7247", "title": "", "text": "def Func(arg_0: arg_1, arg_2, arg_3: arg_1, arg_4=arg_5.LOG_LEVEL) -> arg_1:\n \"\"\"\n Get proper ticker from generic ticker\n\n Args:\n gen_ticker: generic ticker\n dt: date\n freq: futures contract frequency\n log: level of logs\n\n Returns:\n str: exact futures ticker\n \"\"\"\n arg_7 = arg_5.get_logger(Func, level=arg_4)\n arg_2 = pd.Timestamp(arg_2)\n arg_8 = arg_0.split()\n\n arg_9 = arg_8[-1]\n if arg_9 in ['Index', 'Curncy', 'Comdty']:\n arg_10 = ' '.join(arg_8[:-1])\n arg_11, arg_12, arg_13 = arg_10[:-1], int(arg_10[-1]) - 1, arg_9\n\n elif arg_9 == 'Equity':\n arg_10 = arg_8[0]\n arg_11, arg_12, arg_13 = arg_10[:-1], int(arg_10[-1]) - 1, ' '.join(arg_8[1:])\n\n else:\n arg_7.error(f'unkonwn asset type for ticker: {gen_ticker}')\n return ''\n\n arg_14 = 4 if arg_9 == 'Comdty' else 2\n arg_15 = pd.date_range(start=arg_2, periods=max(arg_12 + arg_14, 3), arg_3=arg_3)\n arg_7.debug(f'pulling expiry dates for months: {months}')\n\n def to_fut(arg_16):\n return arg_11 + const.Futures[arg_16.strftime('%b')] + \\\n arg_16.strftime('%y')[-1] + ' ' + arg_13\n\n arg_17 = [to_fut(m) for m in arg_15]\n arg_7.debug(f'trying futures: {fut}')\n # noinspection PyBroadException\n try:\n arg_18 = bdp(tickers=arg_17, flds='last_tradeable_dt', cache=True)\n except Exception as e1:\n arg_7.error(f'error downloading futures contracts (1st trial) {e1}:\\n{fut}')\n # noinspection PyBroadException\n try:\n arg_17 = arg_17[:-1]\n arg_7.debug(f'trying futures (2nd trial): {fut}')\n arg_18 = bdp(tickers=arg_17, flds='last_tradeable_dt', cache=True)\n except Exception as e2:\n arg_7.error(f'error downloading futures contracts (2nd trial) {e2}:\\n{fut}')\n return ''\n\n arg_19 = arg_18[pd.DatetimeIndex(arg_18.last_tradeable_dt) > arg_2]\n arg_7.debug(f'futures full chain:\\n{fut_matu.to_string()}')\n arg_7.debug(f'getting index {idx} from:\\n{sub_fut.to_string()}')\n return arg_19.index.values[arg_12]"} +{"_id": "doc_7248", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3) -> pd.DataFrame:\n \"\"\"\n Check exchange hours vs local hours\n\n Args:\n tickers: list of tickers\n tz_exch: exchange timezone\n tz_loc: local timezone\n\n Returns:\n Local and exchange hours\n \"\"\"\n arg_4 = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD']\n arg_5, arg_6 = create_connection()\n arg_7 = arg_5.ref(arg_0=arg_0, flds=arg_4)\n arg_8 = pd.Timestamp('today').strftime('%Y-%m-%d ')\n arg_7.loc[:, 'local'] = arg_7.value.astype(str).str[:-3]\n arg_7.loc[:, 'exch'] = pd.DatetimeIndex(\n arg_8 + arg_7.value.astype(str)\n ).tz_localize(arg_2).tz_convert(arg_1).strftime('%H:%M')\n\n arg_7 = pd.concat([\n arg_7.set_index(['ticker', 'field']).exch.unstack().loc[:, arg_4],\n arg_7.set_index(['ticker', 'field']).local.unstack().loc[:, arg_4],\n ], axis=1)\n arg_7.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End']\n\n return arg_7"} +{"_id": "doc_7249", "title": "", "text": "def Func(arg_0: arg_1, arg_2, arg_3='TRADE') -> arg_1:\n \"\"\"\n Data file location for Bloomberg historical data\n\n Args:\n ticker: ticker name\n dt: date\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\n Returns:\n file location\n\n Examples:\n >>> os.environ['BBG_ROOT'] = ''\n >>> Func(ticker='ES1 Index', dt='2018-08-01') == ''\n True\n >>> os.environ['BBG_ROOT'] = '/data/bbg'\n >>> Func(ticker='ES1 Index', dt='2018-08-01')\n '/data/bbg/Index/ES1 Index/TRADE/2018-08-01.parq'\n \"\"\"\n arg_4 = os.environ.get(assist.BBG_ROOT, '').replace('\\\\', '/')\n if not arg_4: return ''\n arg_5 = arg_0.split()[-1]\n arg_6 = arg_0.replace('/', '_')\n arg_7 = pd.Timestamp(arg_2).strftime('%Y-%m-%d')\n return f'{data_path}/{asset}/{proper_ticker}/{typ}/{cur_dt}.parq'"} +{"_id": "doc_7250", "title": "", "text": "def Func(\n arg_0: arg_1, arg_2: arg_1, arg_3=False, arg_4=False, arg_5='parq', **arg_6\n) -> arg_1:\n \"\"\"\n Data file location for Bloomberg reference data\n\n Args:\n ticker: ticker name\n fld: field\n has_date: whether add current date to data file\n cache: if has_date is True, whether to load file from latest cached\n ext: file extension\n **kwargs: other overrides passed to ref function\n\n Returns:\n file location\n\n Examples:\n >>> import shutil\n >>>\n >>> os.environ['BBG_ROOT'] = ''\n >>> Func('BLT LN Equity', fld='Crncy') == ''\n True\n >>> os.environ['BBG_ROOT'] = '/data/bbg'\n >>> Func('BLT LN Equity', fld='Crncy', cache=True)\n '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq'\n >>> Func('BLT LN Equity', fld='Crncy')\n ''\n >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ)\n >>> Func(\n ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True,\n ... ).replace(cur_dt, '[cur_date]')\n '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq'\n >>> Func(\n ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True,\n ... cache=True, DVD_Start_Dt='20180101',\n ... ).replace(cur_dt, '[cur_date]')[:-5]\n '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101'\n >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'\n >>> root_path = 'xbbg/tests/data'\n >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All'\n >>> os.environ['BBG_ROOT'] = root_path\n >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file)\n >>> files.create_folder(sub_path)\n >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path)\n True\n >>> new_file = Func(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... has_date=True, cache=True, ext='pkl'\n ... )\n >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl'\n True\n >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'\n >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file])\n >>> updated_file = old_full.replace('2018-11-02', cur_dt)\n >>> updated_file in shutil.copy(old_full, updated_file)\n True\n >>> exist_file = Func(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... has_date=True, cache=True, ext='pkl'\n ... )\n >>> exist_file == updated_file\n False\n >>> exist_file = Func(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl'\n ... )\n >>> exist_file == updated_file\n True\n \"\"\"\n arg_7 = os.environ.get(assist.BBG_ROOT, '').replace('\\\\', '/')\n if (not arg_7) or (not arg_4): return ''\n\n arg_8 = arg_0.replace('/', '_')\n arg_9 = arg_6.pop('cache_days', 10)\n arg_10 = f'{data_path}/{ticker.split()[-1]}/{proper_ticker}/{fld}'\n\n if len(arg_6) > 0: arg_11 = utils.to_str(arg_6)[1:-1].replace('|', '_')\n else: arg_11 = 'ovrd=None'\n\n # Check date info\n if arg_3:\n arg_12 = utils.cur_time()\n arg_13 = f'{root}/asof={cur_dt}, {info}.{ext}'\n arg_14 = re.compile(rf'{root}/asof=(.*), {info}\\.pkl')\n arg_15 = list(filter(arg_14.match, sorted(\n files.all_files(path_name=arg_10, keyword=arg_11, arg_5=arg_5)\n )))\n if len(arg_15) > 0:\n arg_16 = arg_14.match(arg_15[-1]).group(1)\n arg_17 = pd.Timestamp('today') - pd.Timestamp(arg_16)\n if arg_17 >= pd.Timedelta(days=arg_9): return arg_13\n return sorted(arg_15)[-1]\n else: return arg_13\n\n else: return f'{root}/{info}.{ext}'"} +{"_id": "doc_7251", "title": "", "text": "def Func(arg_0: arg_1.DataFrame, arg_3: arg_4, arg_5, arg_6='TRADE'):\n \"\"\"\n Check whether data is done for the day and save\n\n Args:\n data: data\n ticker: ticker\n dt: date\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\n Examples:\n >>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'\n >>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')\n >>> Func(sample, 'AAPL US Equity', '2018-11-02')\n >>> # Invalid exchange\n >>> Func(sample, 'AAPL XX Equity', '2018-11-02')\n >>> # Invalid empty data\n >>> Func(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')\n >>> # Invalid date - too close\n >>> cur_dt = utils.cur_time()\n >>> Func(sample, 'AAPL US Equity', cur_dt)\n \"\"\"\n arg_7 = arg_1.Timestamp(arg_5).strftime('%Y-%m-%d')\n arg_8 = logs.get_logger(Func, level='debug')\n arg_9 = f'{ticker} / {cur_dt} / {typ}'\n arg_10 = hist_file(arg_3=arg_3, arg_5=arg_5, arg_6=arg_6)\n if not arg_10: return\n\n if arg_0.empty:\n arg_8.warning(f'data is empty for {info} ...')\n return\n\n arg_11 = const.exch_info(arg_3=arg_3)\n if arg_11.empty: return\n\n arg_12 = arg_1.Timestamp(\n const.market_timing(arg_3=arg_3, arg_5=arg_5, timing='FINISHED')\n ).tz_localize(arg_11.tz)\n arg_13 = arg_1.Timestamp('now', tz=arg_11.tz) - arg_1.Timedelta('1H')\n\n if arg_12 > arg_13:\n arg_8.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')\n return\n\n arg_8.info(f'saving data to {data_file} ...')\n files.create_folder(arg_10, is_file=True)\n arg_0.to_parquet(arg_10)"} +{"_id": "doc_7252", "title": "", "text": "def Func(arg_0: arg_1) -> pd.Series:\n \"\"\"\n Exchange info for given ticker\n\n Args:\n ticker: ticker or exchange\n\n Returns:\n pd.Series\n\n Examples:\n >>> Func('SPY US Equity')\n tz America/New_York\n allday [04:00, 20:00]\n day [09:30, 16:00]\n pre [04:00, 09:30]\n post [16:01, 20:00]\n dtype: object\n >>> Func('ES1 Index')\n tz America/New_York\n allday [18:00, 17:00]\n day [08:00, 17:00]\n dtype: object\n >>> Func('Z 1 Index')\n tz Europe/London\n allday [01:00, 21:00]\n day [01:00, 21:00]\n dtype: object\n >>> Func('TESTTICKER Corp').empty\n True\n >>> Func('US')\n tz America/New_York\n allday [04:00, 20:00]\n day [09:30, 16:00]\n pre [04:00, 09:30]\n post [16:01, 20:00]\n dtype: object\n \"\"\"\n arg_2 = logs.get_logger(Func, level='debug')\n if ' ' not in arg_0.strip():\n arg_0 = f'XYZ {ticker.strip()} Equity'\n arg_3 = param.load_info(cat='exch').get(\n market_info(arg_0=arg_0).get('exch', ''), dict()\n )\n if ('allday' in arg_3) and ('day' not in arg_3):\n arg_3['day'] = arg_3['allday']\n\n if any(arg_4 not in arg_3 for arg_4 in ['tz', 'allday', 'day']):\n arg_2.error(f'required exchange info cannot be found in {ticker} ...')\n return pd.Series()\n\n for arg_5 in ValidSessions:\n if arg_5 not in arg_3: continue\n arg_3[arg_5] = [param.to_hour(num=s) for s in arg_3[arg_5]]\n\n return pd.Series(arg_3)"} +{"_id": "doc_7253", "title": "", "text": "def Func(arg_0: arg_1) -> dict:\n \"\"\"\n Get info for given market\n\n Args:\n ticker: Bloomberg full ticker\n\n Returns:\n dict\n\n Examples:\n >>> info = Func('SHCOMP Index')\n >>> info['exch']\n 'EquityChina'\n >>> info = Func('ICICIC=1 IS Equity')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> info = Func('INT1 Curncy')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> info = Func('CL1 Comdty')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> # Wrong tickers\n >>> Func('C XX Equity')\n {}\n >>> Func('XXX Comdty')\n {}\n >>> Func('Bond_ISIN Corp')\n {}\n >>> Func('XYZ Index')\n {}\n >>> Func('XYZ Curncy')\n {}\n \"\"\"\n arg_2 = arg_0.split()\n arg_3 = param.load_info('assets')\n\n # ========================== #\n # Equity #\n # ========================== #\n\n if (arg_2[-1] == 'Equity') and ('=' not in arg_2[0]):\n arg_4 = arg_2[-2]\n for arg_5 in arg_3.get('Equity', [dict()]):\n if 'exch_codes' not in arg_5: continue\n if arg_4 in arg_5['exch_codes']: return arg_5\n return dict()\n\n # ============================ #\n # Currency #\n # ============================ #\n\n if arg_2[-1] == 'Curncy':\n for arg_5 in arg_3.get('Curncy', [dict()]):\n if 'tickers' not in arg_5: continue\n if (arg_2[0].split('+')[0] in arg_5['tickers']) or \\\n (arg_2[0][-1].isdigit() and (arg_2[0][:-1] in arg_5['tickers'])):\n return arg_5\n return dict()\n\n if arg_2[-1] == 'Comdty':\n for arg_5 in arg_3.get('Comdty', [dict()]):\n if 'tickers' not in arg_5: continue\n if arg_2[0][:-1] in arg_5['tickers']: return arg_5\n return dict()\n\n # =================================== #\n # Index / Futures #\n # =================================== #\n\n if (arg_2[-1] == 'Index') or (\n (arg_2[-1] == 'Equity') and ('=' in arg_2[0])\n ):\n if arg_2[-1] == 'Equity':\n arg_6 = arg_2[0].split('=')[0]\n else:\n arg_6 = ' '.join(arg_2[:-1])\n for arg_5 in arg_3.get('Index', [dict()]):\n if 'tickers' not in arg_5: continue\n if (arg_6[:2] == 'UX') and ('UX' in arg_5['tickers']): return arg_5\n if arg_6 in arg_5['tickers']:\n if arg_2[-1] == 'Equity': return arg_5\n if not arg_5.get('is_fut', False): return arg_5\n if arg_6[:-1].rstrip() in arg_5['tickers']:\n if arg_5.get('is_fut', False): return arg_5\n return dict()\n\n if arg_2[-1] == 'Corp':\n for arg_5 in arg_3.get('Corp', [dict()]):\n if 'ticker' not in arg_5: continue\n\n return dict()"} +{"_id": "doc_7254", "title": "", "text": "def Func(arg_0, arg_1='USD') -> CurrencyPair:\n \"\"\"\n Currency pair info\n\n Args:\n local: local currency\n base: base currency\n\n Returns:\n CurrencyPair\n\n Examples:\n >>> Func(local='HKD', base='USD')\n CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)\n >>> Func(local='GBp')\n CurrencyPair(ticker='GBP Curncy', factor=100, power=-1)\n >>> Func(local='USD', base='GBp')\n CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)\n >>> Func(local='XYZ', base='USD')\n CurrencyPair(ticker='', factor=1.0, power=1)\n >>> Func(local='GBP', base='GBp')\n CurrencyPair(ticker='', factor=0.01, power=1)\n >>> Func(local='GBp', base='GBP')\n CurrencyPair(ticker='', factor=100.0, power=1)\n \"\"\"\n arg_2 = param.load_info(cat='ccy')\n if f'{local}{base}' in arg_2:\n arg_3 = arg_2[f'{local}{base}']\n\n elif f'{base}{local}' in arg_2:\n arg_3 = arg_2[f'{base}{local}']\n arg_3['factor'] = 1. / arg_3.get('factor', 1.)\n arg_3['power'] = -arg_3.get('power', 1)\n\n elif arg_1.lower() == arg_0.lower():\n arg_3 = dict(ticker='')\n arg_3['factor'] = 1.\n if arg_1[-1].lower() == arg_1[-1]:\n arg_3['factor'] /= 100.\n if arg_0[-1].lower() == arg_0[-1]:\n arg_3['factor'] *= 100.\n\n else:\n arg_4 = logs.get_logger(Func)\n arg_4.error(f'incorrect currency - local {local} / base {base}')\n return CurrencyPair(ticker='', factor=1., power=1)\n\n if 'factor' not in arg_3: arg_3['factor'] = 1.\n if 'power' not in arg_3: arg_3['power'] = 1\n return CurrencyPair(**arg_3)"} +{"_id": "doc_7255", "title": "", "text": "def Func(arg_0, arg_1, arg_2='EOD', arg_3='local') -> str:\n \"\"\"\n Market close time for ticker\n\n Args:\n ticker: ticker name\n dt: date\n timing: [EOD (default), BOD]\n tz: conversion to timezone\n\n Returns:\n str: date & time\n\n Examples:\n >>> Func('7267 JT Equity', dt='2018-09-10')\n '2018-09-10 14:58'\n >>> Func('7267 JT Equity', dt='2018-09-10', tz=timezone.TimeZone.NY)\n '2018-09-10 01:58:00-04:00'\n >>> Func('7267 JT Equity', dt='2018-01-10', tz='NY')\n '2018-01-10 00:58:00-05:00'\n >>> Func('7267 JT Equity', dt='2018-09-10', tz='SPX Index')\n '2018-09-10 01:58:00-04:00'\n >>> Func('8035 JT Equity', dt='2018-09-10', timing='BOD')\n '2018-09-10 09:01'\n >>> Func('Z 1 Index', dt='2018-09-10', timing='FINISHED')\n '2018-09-10 21:00'\n >>> Func('TESTTICKER Corp', dt='2018-09-10')\n ''\n \"\"\"\n arg_4 = logs.get_logger(Func)\n arg_5 = pd.Series(exch_info(arg_0=arg_0))\n if any(arg_6 not in arg_5.index for arg_6 in ['tz', 'allday', 'day']):\n arg_4.error(f'required exchange info cannot be found in {ticker} ...')\n return ''\n\n arg_7 = {\n 'BOD': arg_5.day[0], 'FINISHED': arg_5.allday[-1]\n }.get(arg_2, arg_5.day[-1])\n\n arg_8 = pd.Timestamp(str(arg_1)).strftime('%Y-%m-%d')\n if arg_3 == 'local':\n return f'{cur_dt} {mkt_time}'\n\n return timezone.tz_convert(f'{cur_dt} {mkt_time}', to_tz=arg_3, from_tz=arg_5.tz)"} +{"_id": "doc_7256", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Load parameters for assets\n\n Args:\n cat: category\n\n Returns:\n dict\n\n Examples:\n >>> import pandas as pd\n >>>\n >>> assets = Func(cat='assets')\n >>> all(cat in assets for cat in ['Equity', 'Index', 'Curncy', 'Corp'])\n True\n >>> os.environ['BBG_PATH'] = ''\n >>> exch = Func(cat='exch')\n >>> pd.Series(exch['EquityUS']).allday\n [400, 2000]\n >>> test_root = f'{PKG_PATH}/tests'\n >>> os.environ['BBG_PATH'] = test_root\n >>> ovrd_exch = Func(cat='exch')\n >>> # Somehow os.environ is not set properly in doctest environment\n >>> ovrd_exch.update(_load_yaml_(f'{test_root}/markets/exch.yml'))\n >>> pd.Series(ovrd_exch['EquityUS']).allday\n [300, 2100]\n \"\"\"\n arg_1 = _load_yaml_(f'{PKG_PATH}/markets/{cat}.yml')\n arg_2 = os.environ.get('BBG_ROOT', '').replace('\\\\', '/')\n if not arg_2: return arg_1\n for arg_0, arg_3 in _load_yaml_(f'{root}/markets/{cat}.yml').items():\n if isinstance(arg_3, dict):\n if arg_0 in arg_1: arg_1[arg_0].update(arg_3)\n else: arg_1[arg_0] = arg_3\n if isinstance(arg_3, list) and isinstance(arg_1[arg_0], list): arg_1[arg_0] += arg_3\n return arg_1"} +{"_id": "doc_7257", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\"\n Convert YAML input to hours\n\n Args:\n num: number in YMAL file, e.g., 900, 1700, etc.\n\n Returns:\n str\n\n Examples:\n >>> Func(900)\n '09:00'\n >>> Func(1700)\n '17:00'\n \"\"\"\n arg_1 = str(int(arg_0))\n return pd.Timestamp(f'{to_str[:-2]}:{to_str[-2:]}').strftime('%H:%M')"} +{"_id": "doc_7258", "title": "", "text": "def Func(arg_0: arg_1, arg_2=False):\n \"\"\"\n Make folder as well as all parent folders if not exists\n\n Args:\n path_name: full path name\n is_file: whether input is name of file\n \"\"\"\n arg_3 = arg_0.replace('\\\\', '/').split('/')\n for arg_4 in range(1, len(arg_3) + (0 if arg_2 else 1)):\n arg_5 = '/'.join(arg_3[:arg_4])\n if not os.path.exists(arg_5): os.mkdir(arg_5)"} +{"_id": "doc_7259", "title": "", "text": "def Func(\n arg_0, arg_1='', arg_2='', arg_3=True,\n arg_4=False, arg_5=arg_6\n) -> list:\n \"\"\"\n Search all files with criteria\n Returned list will be sorted by last modified\n\n Args:\n path_name: full path name\n keyword: keyword to search\n ext: file extensions, split by ','\n full_path: whether return full path (default True)\n has_date: whether has date in file name (default False)\n date_fmt: date format to check for has_date parameter\n\n Returns:\n list: all file names with criteria fulfilled\n \"\"\"\n if not os.path.exists(path=arg_0): return []\n arg_0 = arg_0.replace('\\\\', '/')\n\n if arg_1 or arg_2:\n arg_1 = f'*{keyword}*' if arg_1 else '*'\n if not arg_2: arg_2 = '*'\n arg_7 = sort_by_modified([\n arg_8.replace('\\\\', '/') for arg_8 in glob.iglob(f'{path_name}/{keyword}.{ext}')\n if os.path.isfile(arg_8) and (arg_8.replace('\\\\', '/').split('/')[-1][0] != '~')\n ])\n\n else:\n arg_7 = sort_by_modified([\n f'{path_name}/{f}' for arg_8 in os.listdir(path=arg_0)\n if os.path.isfile(f'{path_name}/{f}') and (arg_8[0] != '~')\n ])\n\n if arg_4:\n arg_7 = filter_by_dates(arg_7, arg_5=arg_5)\n\n return arg_7 if arg_3 else [arg_8.split('/')[-1] for arg_8 in arg_7]"} +{"_id": "doc_7260", "title": "", "text": "def Func(\n arg_0, arg_1='', arg_2=False, arg_3=arg_4\n) -> list:\n \"\"\"\n Search all folders with criteria\n Returned list will be sorted by last modified\n\n Args:\n path_name: full path name\n keyword: keyword to search\n has_date: whether has date in file name (default False)\n date_fmt: date format to check for has_date parameter\n\n Returns:\n list: all folder names fulfilled criteria\n \"\"\"\n if not os.path.exists(path=arg_0): return []\n arg_0 = arg_0.replace('\\\\', '/')\n\n if arg_1:\n arg_5 = sort_by_modified([\n f.replace('\\\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*')\n if os.path.isdir(f) and (f.replace('\\\\', '/').split('/')[-1][0] != '~')\n ])\n\n else:\n arg_5 = sort_by_modified([\n f'{path_name}/{f}' for f in os.listdir(path=arg_0)\n if os.path.isdir(f'{path_name}/{f}') and (f[0] != '~')\n ])\n\n if arg_2:\n arg_5 = filter_by_dates(arg_5, arg_3=arg_3)\n\n return arg_5"} +{"_id": "doc_7261", "title": "", "text": "def Func(arg_0: arg_1) -> arg_1:\n \"\"\"\n Sort files or folders by modified time\n\n Args:\n files_or_folders: list of files or folders\n\n Returns:\n list\n \"\"\"\n return sorted(arg_0, key=os.path.getmtime, reverse=True)"} +{"_id": "doc_7262", "title": "", "text": "def Func(arg_0: arg_1, arg_2=arg_3) -> arg_1:\n \"\"\"\n Filter files or dates by date patterns\n\n Args:\n files_or_folders: list of files or folders\n date_fmt: date format\n\n Returns:\n list\n \"\"\"\n arg_4 = re.compile(f'.*{date_fmt}.*')\n return arg_1(filter(\n lambda vv: arg_4.match(vv.replace('\\\\', '/').split('/')[-1]) is not None,\n arg_0,\n ))"} +{"_id": "doc_7263", "title": "", "text": "def Func(arg_0) -> pd.Timestamp:\n \"\"\"\n File modified time in python\n\n Args:\n file_name: file name\n\n Returns:\n pd.Timestamp\n \"\"\"\n return pd.to_datetime(time.ctime(os.path.getmtime(filename=arg_0)))"} +{"_id": "doc_7264", "title": "", "text": "def Func(arg_0, arg_1) -> Session:\n \"\"\"\n Get interval from defined session\n\n Args:\n ticker: ticker\n session: session\n\n Returns:\n Session of start_time and end_time\n\n Examples:\n >>> Func('005490 KS Equity', 'day_open_30')\n Session(start_time='09:00', end_time='09:30')\n >>> Func('005490 KS Equity', 'day_normal_30_20')\n Session(start_time='09:31', end_time='15:00')\n >>> Func('005490 KS Equity', 'day_close_20')\n Session(start_time='15:01', end_time='15:20')\n >>> Func('700 HK Equity', 'am_open_30')\n Session(start_time='09:30', end_time='10:00')\n >>> Func('700 HK Equity', 'am_normal_30_30')\n Session(start_time='10:01', end_time='11:30')\n >>> Func('700 HK Equity', 'am_close_30')\n Session(start_time='11:31', end_time='12:00')\n >>> Func('ES1 Index', 'day_exact_2130_2230')\n Session(start_time=None, end_time=None)\n >>> Func('ES1 Index', 'allday_exact_2130_2230')\n Session(start_time='21:30', end_time='22:30')\n >>> Func('ES1 Index', 'allday_exact_2130_0230')\n Session(start_time='21:30', end_time='02:30')\n >>> Func('AMLP US', 'day_open_30')\n Session(start_time=None, end_time=None)\n >>> Func('7974 JP Equity', 'day_normal_180_300') is SessNA\n True\n >>> Func('Z 1 Index', 'allday_normal_30_30')\n Session(start_time='01:31', end_time='20:30')\n >>> Func('GBP Curncy', 'day')\n Session(start_time='17:02', end_time='17:00')\n \"\"\"\n if '_' not in arg_1:\n arg_1 = f'{session}_normal_0_0'\n arg_2 = Intervals(arg_0=arg_0)\n arg_3 = arg_1.split('_')\n return getattr(arg_2, f'market_{ss_info.pop(1)}')(*arg_3)"} +{"_id": "doc_7265", "title": "", "text": "def Func(arg_0, arg_1) -> str:\n \"\"\"\n Shift start time by mins\n\n Args:\n start_time: start time in terms of HH:MM string\n mins: number of minutes (+ / -)\n\n Returns:\n end time in terms of HH:MM string\n \"\"\"\n arg_2 = pd.Timestamp(arg_0)\n arg_3 = arg_2 + np.sign(arg_1) * pd.Timedelta(f'00:{abs(mins)}:00')\n return arg_3.strftime('%H:%M')"} +{"_id": "doc_7266", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> Session:\n \"\"\"\n Time intervals for market open\n\n Args:\n session: [allday, day, am, pm, night]\n mins: mintues after open\n\n Returns:\n Session of start_time and end_time\n \"\"\"\n if arg_1 not in arg_0.exch: return SessNA\n arg_3 = arg_0.exch[arg_1][0]\n return Session(arg_3, shift_time(arg_3, int(arg_2)))"} +{"_id": "doc_7267", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> Session:\n \"\"\"\n Time intervals for market close\n\n Args:\n session: [allday, day, am, pm, night]\n mins: mintues before close\n\n Returns:\n Session of start_time and end_time\n \"\"\"\n if arg_1 not in arg_0.exch: return SessNA\n arg_3 = arg_0.exch[arg_1][-1]\n return Session(shift_time(arg_3, -int(arg_2) + 1), arg_3)"} +{"_id": "doc_7268", "title": "", "text": "def Func(arg_0, arg_1, arg_2: arg_3, arg_4: arg_3) -> Session:\n \"\"\"\n Explicitly specify start time and end time\n\n Args:\n session: predefined session\n start_time: start time in terms of HHMM string\n end_time: end time in terms of HHMM string\n\n Returns:\n Session of start_time and end_time\n \"\"\"\n if arg_1 not in arg_0.exch: return SessNA\n arg_5 = arg_0.exch[arg_1]\n\n arg_6 = arg_5[0] < arg_5[-1]\n\n if not arg_2: arg_7 = arg_5[0]\n else:\n arg_7 = param.to_hour(arg_2)\n if arg_6: arg_7 = max(arg_7, arg_5[0])\n\n if not arg_4: arg_8 = arg_5[-1]\n else:\n arg_8 = param.to_hour(arg_4)\n if arg_6: arg_8 = min(arg_8, arg_5[-1])\n\n if arg_6 and (arg_7 > arg_8): return SessNA\n return Session(arg_2=arg_7, arg_4=arg_8)"} +{"_id": "doc_7269", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None) -> str:\n \"\"\"\n Convert to tz\n\n Args:\n dt: date time\n to_tz: to tz\n from_tz: from tz - will be ignored if tz from dt is given\n\n Returns:\n str: date & time\n\n Examples:\n >>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')\n >>> Func(dt_1, to_tz='NY')\n '2018-09-10 04:00:00-04:00'\n >>> dt_2 = pd.Timestamp('2018-01-10 16:00')\n >>> Func(dt_2, to_tz='HK', from_tz='NY')\n '2018-01-11 05:00:00+08:00'\n >>> dt_3 = '2018-09-10 15:00'\n >>> Func(dt_3, to_tz='NY', from_tz='JP')\n '2018-09-10 02:00:00-04:00'\n \"\"\"\n arg_3 = logs.get_logger(Func, level='info')\n arg_4, arg_5 = get_tz(arg_2), get_tz(arg_1)\n\n arg_6 = pd.Timestamp(str(arg_0), tz=arg_4)\n arg_3.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...')\n return str(pd.Timestamp(str(arg_6), tz=arg_5))"} +{"_id": "doc_7270", "title": "", "text": "def Func(**arg_0) -> str:\n \"\"\"\n Full infomation for missing query\n \"\"\"\n arg_1 = arg_0.pop('func', 'unknown')\n if 'ticker' in arg_0: arg_0['ticker'] = arg_0['ticker'].replace('/', '_')\n arg_2 = utils.to_str(arg_0, fmt='{value}', sep='/')[1:-1]\n return f'{func}/{info}'"} +{"_id": "doc_7271", "title": "", "text": "def Func(**arg_0) -> int:\n \"\"\"\n Check number of trials for missing values\n\n Returns:\n int: number of trials already tried\n \"\"\"\n arg_1 = os.environ.get(BBG_ROOT, '').replace('\\\\', '/')\n if not arg_1: return 0\n return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))"} +{"_id": "doc_7272", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator for Func views that do not require authentication\n Sets an attribute in the fuction STRONGHOLD_IS_PUBLIC to True\n \"\"\"\n arg_1 = arg_0\n while isinstance(arg_1, partial):\n arg_1 = arg_1.func\n set_view_func_Func(arg_1)\n\n return arg_0"} +{"_id": "doc_7273", "title": "", "text": "def Func(arg_0, arg_1='__version__'):\n \"\"\"Get the version of the package from the given file by\n executing it and extracting the given `name`.\n \"\"\"\n arg_2 = os.path.realpath(arg_0)\n arg_3 = {}\n with io.open(arg_2, encoding=\"utf8\") as f:\n exec(f.read(), {}, arg_3)\n return arg_3[arg_1]"} +{"_id": "doc_7274", "title": "", "text": "def Func(arg_0=arg_1):\n \"\"\"\n Find all of the packages.\n \"\"\"\n arg_2 = []\n for arg_3, arg_4, arg_5 in os.walk(arg_0, followlinks=True):\n if os.path.exists(pjoin(arg_3, '__init__.py')):\n arg_2.append(os.path.relpath(arg_3, arg_0).replace(os.path.sep, '.'))\n elif arg_3 != arg_0:\n # Do not look for packages in subfolders if current is not a package\n arg_4[:] = []\n return arg_2"} +{"_id": "doc_7275", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Echo a command before Funcning it. Defaults to repo as cwd\"\"\"\n log.info('> ' + list2cmdline(arg_0))\n arg_1.setdefault('cwd', HERE)\n arg_1.setdefault('shell', os.name == 'nt')\n if not isinstance(arg_0, (list, tuple)) and os.name != 'nt':\n arg_0 = shlex.split(arg_0)\n arg_0[0] = which(arg_0[0])\n return subprocess.check_call(arg_0, **arg_1)"} +{"_id": "doc_7276", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a Command that checks that certain files exist.\n\n Raises a ValueError if any of the files are missing.\n\n Note: The check is skipped if the `--skip-npm` flag is used.\n \"\"\"\n\n class TargetsCheck(BaseCommand):\n def run(arg_1):\n if skip_npm:\n log.info('Skipping target checks')\n return\n arg_2 = [t for t in arg_0 if not os.path.exists(t)]\n if arg_2:\n raise ValueError(('missing files: %s' % arg_2))\n\n return TargetsCheck"} +{"_id": "doc_7277", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Wrap a setup command\n\n Parameters\n ----------\n cmds: list(str)\n The names of the other commands to run prior to the command.\n strict: boolean, optional\n Whether to raise errors when a pre-command fails.\n \"\"\"\n class WrappedCommand(arg_1):\n\n def run(arg_3):\n if not getattr(arg_3, 'uninstall', None):\n try:\n [arg_3.run_command(arg_4) for arg_4 in arg_0]\n except Exception:\n if arg_2:\n raise\n else:\n pass\n # update package data\n update_package_data(arg_3.distribution)\n\n arg_5 = arg_1.run(arg_3)\n return arg_5\n return WrappedCommand"} +{"_id": "doc_7278", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Expand data file specs into valid data files metadata.\n\n Parameters\n ----------\n data_specs: list of tuples\n See [createcmdclass] for description.\n existing: list of tuples\n The existing distribution data_files metadata.\n\n Returns\n -------\n A valid list of data_files items.\n \"\"\"\n # Extract the existing data files into a staging object.\n arg_2 = defaultdict(list)\n for (arg_3, arg_4) in arg_1 or []:\n arg_2[arg_3] = arg_4\n\n # Extract the files and assign them to the proper data\n # files path.\n for (arg_3, arg_5, arg_6) in arg_0 or []:\n arg_5 = arg_5.replace(os.sep, '/')\n arg_7 = len(arg_5) + 1\n\n arg_4 = _get_files(pjoin(arg_5, arg_6))\n for arg_8 in arg_4:\n # Normalize the path.\n arg_9 = os.path.dirname(arg_8)\n arg_10 = '/'.join([arg_3, arg_9[arg_7:]])\n if arg_10.endswith('/'):\n arg_10 = arg_10[:-1]\n arg_2[arg_10].append(arg_8)\n\n # Construct the data files spec.\n arg_11 = []\n for (arg_3, arg_4) in arg_2.items():\n arg_11.append((arg_3, arg_4))\n return arg_11"} +{"_id": "doc_7279", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Translate and compile a glob pattern to a regular expression matcher.\"\"\"\n if isinstance(arg_0, bytes):\n arg_2 = arg_0.decode('ISO-8859-1')\n arg_3 = _translate_glob(arg_2)\n arg_4 = arg_3.encode('ISO-8859-1')\n else:\n arg_4 = _translate_glob(arg_0)\n arg_5 = re.IGNORECASE if arg_1 else 0\n return re.compile(arg_4, arg_5=arg_5).match"} +{"_id": "doc_7280", "title": "", "text": "def Func(arg_0):\n \"\"\"Iterate over all the parts of a path.\n\n Splits path recursively with os.path.split().\n \"\"\"\n (arg_1, arg_2) = os.path.split(arg_0)\n if not arg_1 or (not arg_2 and arg_1 == arg_0):\n if arg_1:\n yield arg_1\n if arg_2 or not arg_1:\n yield arg_2\n return\n for arg_3 in Func(arg_1):\n yield arg_3\n yield arg_2"} +{"_id": "doc_7281", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Join translated glob pattern parts.\n\n This is different from a simple join, as care need to be taken\n to allow ** to match ZERO or more directories.\n \"\"\"\n arg_2 = ''\n for arg_3 in arg_0[:-1]:\n if arg_3 == '.*':\n # drop separator, since it is optional\n # (** matches ZERO or more dirs)\n arg_2 += arg_3\n else:\n arg_2 += arg_3 + arg_1\n\n if arg_0[-1] == '.*':\n # Final part is **\n arg_2 += '.+'\n # Follow stdlib/git convention of matching all sub files/directories:\n arg_2 += '({os_sep_class}?.*)?'.format(arg_1=arg_1)\n else:\n arg_2 += arg_0[-1]\n return arg_2"} +{"_id": "doc_7282", "title": "", "text": "def Func(arg_0):\n \"\"\"Translate a glob PATTERN PART to a regular expression.\"\"\"\n # Code modified from Python 3 standard lib fnmatch:\n if arg_0 == '**':\n return '.*'\n arg_1, arg_2 = 0, len(arg_0)\n arg_3 = []\n while arg_1 < arg_2:\n arg_4 = arg_0[arg_1]\n arg_1 = arg_1 + 1\n if arg_4 == '*':\n # Match anything but path separators:\n arg_3.append('[^%s]*' % SEPARATORS)\n elif arg_4 == '?':\n arg_3.append('[^%s]?' % SEPARATORS)\n elif arg_4 == '[':\n arg_5 = arg_1\n if arg_5 < arg_2 and arg_0[arg_5] == '!':\n arg_5 = arg_5 + 1\n if arg_5 < arg_2 and arg_0[arg_5] == ']':\n arg_5 = arg_5 + 1\n while arg_5 < arg_2 and arg_0[arg_5] != ']':\n arg_5 = arg_5 + 1\n if arg_5 >= arg_2:\n arg_3.append('\\\\[')\n else:\n arg_6 = arg_0[arg_1:arg_5].replace('\\\\', '\\\\\\\\')\n arg_1 = arg_5 + 1\n if arg_6[0] == '!':\n arg_6 = '^' + arg_6[1:]\n elif arg_6[0] == '^':\n arg_6 = '\\\\' + arg_6\n arg_3.append('[%s]' % arg_6)\n else:\n arg_3.append(re.escape(arg_4))\n return ''.join(arg_3)"} +{"_id": "doc_7283", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send DDL to create the specified `table`\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None\n \"\"\"\n arg_2, arg_3 = super(PostgresDbWriter, arg_0).Func(arg_1)\n for arg_4 in arg_3 + arg_2:\n arg_0.execute(arg_4)"} +{"_id": "doc_7284", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send DDL to create the specified `table` indexes\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None\n \"\"\"\n arg_2 = super(PostgresDbWriter, arg_0).Func(arg_1)\n for arg_3 in arg_2:\n arg_0.execute(arg_3)"} +{"_id": "doc_7285", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Send DDL to create the specified `table` triggers\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None\n \"\"\"\n arg_2 = super(PostgresDbWriter, arg_0).Func(arg_1)\n for arg_3 in arg_2:\n arg_0.execute(arg_3)"} +{"_id": "doc_7286", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Write the contents of `table`\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.\n\n Returns None\n \"\"\"\n arg_3 = arg_0.FileObjFaker(arg_1, arg_2.read(arg_1), arg_0.process_row, arg_0.verbose)\n arg_0.copy_from(arg_3, '\"%s\"' % arg_1.name, ['\"%s\"' % arg_4['name'] for arg_4 in arg_1.columns])"} +{"_id": "doc_7287", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write TRIGGERs existing on `table` to the output file\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None\n \"\"\"\n arg_0.f.write('\\n'.join(super(PostgresFileWriter, arg_0).Func(arg_1)))"} +{"_id": "doc_7288", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Utility for sending a predefined request and printing response as well\n as storing messages in a list, useful for testing\n\n Parameters\n ----------\n session: blpapi.session.Session\n request: blpapi.request.Request\n Request to be sent\n\n Returns\n -------\n List of all messages received\n \"\"\"\n # flush event queue in case previous call errored out\n while(arg_0.tryNextEvent()):\n pass\n\n print(\"Sending Request:\\n %s\" % arg_1)\n arg_0.sendRequest(arg_1)\n arg_2 = []\n # Process received events\n while(True):\n # We provide timeout to give the chance for Ctrl+C handling:\n arg_3 = arg_0.nextEvent(500)\n for arg_4 in arg_3:\n print(\"Message Received:\\n %s\" % arg_4)\n arg_2.append(arg_4)\n if arg_3.eventType() == blpapi.Event.RESPONSE:\n # Response completely received, so we could exit\n break\n return arg_2"} +{"_id": "doc_7289", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Initialize blpapi.Session services\n \"\"\"\n arg_1 = _get_logger(arg_0.debug)\n\n # flush event queue in defensive way\n arg_2 = arg_0._session.openService('//blp/refdata')\n arg_3 = arg_0._session.nextEvent()\n arg_4 = _EVENT_DICT[arg_3.eventType()]\n arg_1.info('Event Type: {!r}'.format(arg_4))\n for arg_5 in arg_3:\n arg_1.info('Message Received:\\n{}'.format(arg_5))\n if arg_3.eventType() != blpapi.Event.SERVICE_STATUS:\n raise RuntimeError('Expected a \"SERVICE_STATUS\" event but '\n 'received a {!r}'.format(arg_4))\n if not arg_2:\n arg_1.warning('Failed to open //blp/refdata')\n raise ConnectionError('Could not open a //blp/refdata service')\n arg_0.refDataService = arg_0._session.getService('//blp/refdata')\n\n arg_2 = arg_0._session.openService('//blp/exrsvc')\n arg_3 = arg_0._session.nextEvent()\n arg_4 = _EVENT_DICT[arg_3.eventType()]\n arg_1.info('Event Type: {!r}'.format(arg_4))\n for arg_5 in arg_3:\n arg_1.info('Message Received:\\n{}'.format(arg_5))\n if arg_3.eventType() != blpapi.Event.SERVICE_STATUS:\n raise RuntimeError('Expected a \"SERVICE_STATUS\" event but '\n 'received a {!r}'.format(arg_4))\n if not arg_2:\n arg_1.warning('Failed to open //blp/exrsvc')\n raise ConnectionError('Could not open a //blp/exrsvc service')\n arg_0.exrService = arg_0._session.getService('//blp/exrsvc')\n\n return arg_0"} +{"_id": "doc_7290", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6=None):\n \"\"\"\n Get Open, High, Low, Close, Volume, and numEvents for a ticker.\n Return pandas DataFrame\n\n Parameters\n ----------\n ticker: string\n String corresponding to ticker\n start_datetime: string\n UTC datetime in format YYYY-mm-ddTHH:MM:SS\n end_datetime: string\n UTC datetime in format YYYY-mm-ddTHH:MM:SS\n event_type: string {TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID,\n BEST_ASK}\n Requested data event type\n interval: int {1... 1440}\n Length of time bars\n elms: list of tuples\n List of tuples where each tuple corresponds to the other elements\n to be set. Refer to the IntradayBarRequest section in the\n 'Services & schemas reference guide' for more info on these values\n \"\"\"\n arg_6 = [] if not arg_6 else arg_6\n\n # flush event queue in case previous call errored out\n arg_7 = _get_logger(arg_0.debug)\n while(arg_0._session.tryNextEvent()):\n pass\n\n # Create and fill the request for the historical data\n arg_8 = arg_0.refDataService.createRequest('IntradayBarRequest')\n arg_8.set('security', arg_1)\n arg_8.set('eventType', arg_4)\n arg_8.set('interval', arg_5) # bar interval in minutes\n arg_8.set('startDateTime', arg_2)\n arg_8.set('endDateTime', arg_3)\n for arg_9, arg_10 in arg_6:\n arg_8.set(arg_9, arg_10)\n\n arg_7.info('Sending Request:\\n{}'.format(arg_8))\n # Send the request\n arg_0._session.sendRequest(arg_8, identity=arg_0._identity)\n # Process received events\n arg_11 = []\n arg_12 = ['open', 'high', 'low', 'close', 'volume', 'numEvents']\n for arg_13 in arg_0._receive_events():\n arg_14 = arg_13['element']['IntradayBarResponse']\n for arg_15 in arg_14['barData']['barTickData']:\n arg_11.append(arg_15['barTickData'])\n arg_11 = pd.DataFrame(arg_11).set_index('time').sort_index().loc[:, arg_12]\n return arg_11"} +{"_id": "doc_7291", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Enqueue task with specified data. \"\"\"\n arg_2 = json.dumps(arg_1)\n with arg_0._db_conn() as conn:\n return conn.execute(\n 'INSERT INTO %s (created, data) VALUES (%%(created)s, %%(data)s)' % arg_0.table_name,\n created=datetime.utcnow(),\n arg_1=arg_2\n )"} +{"_id": "doc_7292", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" This method is a good one to extend if you want to create a queue which always applies an extra predicate. \"\"\"\n if arg_1 is None:\n return ''\n\n # if they don't have a supported format seq, wrap it for them\n if not isinstance(arg_1[1], (list, dict, tuple)):\n arg_1 = [arg_1[0], (arg_1[1], )]\n\n arg_1 = database.escape_query(*arg_1)\n\n return 'AND (' + arg_1 + ')'"} +{"_id": "doc_7293", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Designed to be passed as the default kwarg in simplejson.dumps. Serializes dates and datetimes to ISO strings.\n \"\"\"\n if hasattr(arg_0, 'isoformat'):\n return arg_0.isoformat()\n else:\n raise TypeError('Object of type %s with value of %s is not JSON serializable' % (type(arg_0), repr(arg_0)))"} +{"_id": "doc_7294", "title": "", "text": "def Func(arg_0=arg_1):\n \"\"\" Returns a new connection to the database. \"\"\"\n return database.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, database=arg_0)"} +{"_id": "doc_7295", "title": "", "text": "def Func():\n \"\"\" Run a set of InsertWorkers and record their performance. \"\"\"\n\n arg_0 = threading.Event()\n arg_1 = [ InsertWorker(arg_0) for _ in range(NUM_WORKERS) ]\n\n print('Launching %d workers' % NUM_WORKERS)\n\n [ arg_2.start() for arg_2 in arg_1 ]\n time.sleep(WORKLOAD_TIME)\n\n print('Stopping workload')\n\n arg_0.set()\n [ arg_2.join() for arg_2 in arg_1 ]\n\n with get_connection() as conn:\n arg_3 = conn.get(\"SELECT COUNT(*) AS count FROM %s\" % TABLE).count\n\n print(\"%d rows inserted using %d workers\" % (arg_3, NUM_WORKERS))\n print(\"%.1f rows per second\" % (arg_3 / float(WORKLOAD_TIME)))"} +{"_id": "doc_7296", "title": "", "text": "def Func(arg_0):\n \"\"\" Used for development only \"\"\"\n for arg_1, arg_2 in globals().items():\n if arg_0 == arg_2:\n print(arg_1)"} +{"_id": "doc_7297", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the number of connections cached by the pool. \"\"\"\n return sum(arg_1.qFunc() for arg_1 in arg_0._connections.values()) + len(arg_0._fairies)"} +{"_id": "doc_7298", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" OperationalError's are emitted by the _mysql library for\n almost every error code emitted by MySQL. Because of this we\n verify that the error is actually a connection error before\n terminating the connection and firing off a PoolConnectionException\n \"\"\"\n try:\n arg_0._conn.query('SELECT 1')\n except (IOError, _mysql.OperationalError):\n # ok, it's actually an issue.\n arg_0.__handle_connection_failure(arg_1)\n else:\n # seems ok, probably programmer error\n raise _mysql.DatabaseError(*arg_1.args)"} +{"_id": "doc_7299", "title": "", "text": "def Func(arg_0=', ', **arg_1):\n \"\"\" Build a simple expression ready to be added onto another query.\n\n >>> Func(joiner=' AND ', name='bob', role='admin')\n \"`name`=%(_QB_name)s AND `name`=%(_QB_role)s\", { '_QB_name': 'bob', '_QB_role': 'admin' }\n \"\"\"\n arg_2, arg_3 = [], {}\n\n for arg_4, arg_5 in sorted(arg_1.items(), arg_6=lambda kv: kv[0]):\n arg_6 = '_QB_%s' % arg_4\n arg_2.append('`%s`=%%(%s)s' % (arg_4, arg_6))\n arg_3[arg_6] = arg_5\n\n return arg_0.join(arg_2), arg_3"} +{"_id": "doc_7300", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\" Build a Func query.\n\n >>> Func('foo_table', a=5, b=2)\n \"UPDATE `foo_table` SET `a`=%(_QB_a)s, `b`=%(_QB_b)s\", { '_QB_a': 5, '_QB_b': 2 }\n \"\"\"\n arg_2 = \"UPDATE `%s` SET \" % arg_0\n arg_3, arg_4 = simple_expression(', ', **arg_1)\n return arg_2 + arg_3, arg_4"} +{"_id": "doc_7301", "title": "", "text": "def Func(arg_0, arg_1='127.0.0.1', arg_2=3306, arg_3='root', arg_4='', arg_5=None):\n \"\"\" Connect to the database specified \"\"\"\n\n if arg_5 is None:\n raise exceptions.RequiresDatabase()\n\n arg_0._db_args = { 'host': arg_1, 'port': arg_2, 'user': arg_3, 'password': arg_4, 'database': arg_5 }\n with arg_0._db_conn() as conn:\n conn.query('SELECT 1')\n return arg_0"} +{"_id": "doc_7302", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Start a step. \"\"\"\n if arg_0.finished is not None:\n raise AlreadyFinished()\n\n arg_2 = arg_0._get_step(arg_1)\n if arg_2 is not None:\n if 'stop' in arg_2:\n raise StepAlreadyFinished()\n else:\n raise StepAlreadyStarted()\n\n arg_3 = copy.deepcopy(arg_0.steps)\n arg_3.append({\n \"start\": datetime.utcnow(),\n \"name\": arg_1\n })\n arg_0._save(arg_3=arg_3)"} +{"_id": "doc_7303", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Stop a step. \"\"\"\n if arg_0.finished is not None:\n raise AlreadyFinished()\n\n arg_2 = copy.deepcopy(arg_0.steps)\n\n arg_3 = arg_0._get_step(arg_1, arg_2=arg_2)\n if arg_3 is None:\n raise StepNotStarted()\n elif 'stop' in arg_3:\n raise StepAlreadyFinished()\n\n arg_3['stop'] = datetime.utcnow()\n\n arg_3['duration'] = util.timedelta_total_seconds(arg_3['stop'] - arg_3['start'])\n arg_0._save(arg_2=arg_2)"} +{"_id": "doc_7304", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" load steps -> basically load all the datetime isoformats into datetimes \"\"\"\n for arg_2 in arg_1:\n if 'start' in arg_2:\n arg_2['start'] = parser.parse(arg_2['start'])\n if 'stop' in arg_2:\n arg_2['stop'] = parser.parse(arg_2['stop'])\n return arg_1"} +{"_id": "doc_7305", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\" Assemble one EVM instruction from its textual representation.\n\n :param asmcode: assembly code for one instruction\n :type asmcode: str\n :param pc: program counter of the instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An Instruction object\n :rtype: Instruction\n\n Example use::\n\n >>> print Func('LT')\n\n\n \"\"\"\n try:\n arg_4 = instruction_tables[arg_2]\n arg_0 = arg_0.strip().split(' ')\n arg_5 = arg_4[arg_0[0].upper()]\n if arg_1:\n arg_5.pc = arg_1\n if arg_5.operand_size > 0:\n assert len(arg_0) == 2\n arg_5.operand = int(arg_0[1], 0)\n return arg_5\n except:\n raise AssembleError(\"Something wrong at pc %d\" % arg_1)"} +{"_id": "doc_7306", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\" Assemble a sequence of textual representation of EVM instructions\n\n :param asmcode: assembly code for any number of instructions\n :type asmcode: str\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An generator of Instruction objects\n :rtype: generator[Instructions]\n\n Example use::\n\n >>> assemble_one('''PUSH1 0x60\\n \\\n PUSH1 0x40\\n \\\n MSTORE\\n \\\n PUSH1 0x2\\n \\\n PUSH2 0x108\\n \\\n PUSH1 0x0\\n \\\n POP\\n \\\n SSTORE\\n \\\n PUSH1 0x40\\n \\\n MLOAD\\n \\\n ''')\n\n \"\"\"\n arg_0 = arg_0.split('\\n')\n arg_0 = iter(arg_0)\n for arg_4 in arg_0:\n if not arg_4.strip():\n continue\n arg_5 = assemble_one(arg_4, arg_1=arg_1, arg_2=arg_2)\n yield arg_5\n arg_1 += arg_5.size"} +{"_id": "doc_7307", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\" Disassemble a single instruction from a bytecode\n\n :param bytecode: the bytecode stream\n :type bytecode: str | bytes | bytearray | iterator\n :param pc: program counter of the instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: an Instruction object\n :rtype: Instruction\n\n Example use::\n\n >>> print Func('\\x60\\x10')\n\n \"\"\"\n arg_4 = instruction_tables[arg_2]\n if isinstance(arg_0, bytes):\n arg_0 = bytearray(arg_0)\n if isinstance(arg_0, str):\n arg_0 = bytearray(arg_0.encode('latin-1'))\n\n arg_0 = iter(arg_0)\n try:\n arg_5 = next(arg_0)\n except StopIteration:\n return\n\n assert isinstance(arg_5, int)\n\n arg_6 = copy.copy(arg_4.get(arg_5, None))\n if arg_6 is None:\n arg_6 = Instruction(arg_5, 'INVALID', 0, 0, 0, 0, 'Unspecified invalid instruction.')\n arg_6.pc = arg_1\n\n try:\n if arg_6.has_operand:\n arg_6.parse_operand(arg_0)\n except ParseError:\n arg_6 = None\n finally:\n return arg_6"} +{"_id": "doc_7308", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=arg_3):\n \"\"\" Disassemble all instructions in bytecode\n\n :param bytecode: an evm bytecode (binary)\n :type bytecode: str | bytes | bytearray | iterator\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An generator of Instruction objects\n :rtype: list[Instruction]\n\n Example use::\n\n >>> for inst in Func(bytecode):\n ... print(instr)\n\n ...\n PUSH1 0x60\n PUSH1 0x40\n MSTORE\n PUSH1 0x2\n PUSH2 0x108\n PUSH1 0x0\n POP\n SSTORE\n PUSH1 0x40\n MLOAD\n\n\n \"\"\"\n if isinstance(arg_0, bytes):\n arg_0 = bytearray(arg_0)\n if isinstance(arg_0, str):\n arg_0 = bytearray(arg_0.encode('latin-1'))\n\n arg_0 = iter(arg_0)\n while True:\n arg_4 = disassemble_one(arg_0, arg_1=arg_1, arg_2=arg_2)\n if not arg_4:\n return\n arg_1 += arg_4.size\n yield arg_4"} +{"_id": "doc_7309", "title": "", "text": "def Func(arg_0):\n \"\"\" Convert block number to fork name.\n\n :param block_number: block number\n :type block_number: int\n :return: fork name\n :rtype: str\n\n Example use::\n\n >>> Func(0)\n ...\n \"frontier\"\n >>> Func(4370000)\n ...\n \"byzantium\"\n >>> Func(4370001)\n ...\n \"byzantium\"\n \"\"\"\n arg_1 = {\n 0: \"frontier\",\n 1150000: \"homestead\",\n # 1920000 Dao \n 2463000: \"tangerine_whistle\",\n 2675000: \"spurious_dragon\",\n 4370000: \"byzantium\",\n #7280000: \"constantinople\", # Same Block as petersburg, commented to avoid conflicts\n 7280000: \"petersburg\",\n 9999999: \"serenity\" # to be replaced after Serenity launch\n }\n arg_2 = list(arg_1.values())\n arg_3 = list(arg_1.keys())\n return arg_2[bisect(arg_3, arg_0) - 1]"} +{"_id": "doc_7310", "title": "", "text": "def Func(arg_0):\n \"\"\"Disconnects from the websocket connection and joins the Thread.\n\n :return:\n \"\"\"\n arg_0.log.debug(\"Func(): Disconnecting from API..\")\n arg_0.reconnect_required.clear()\n arg_0.Func_called.set()\n if arg_0.socket:\n arg_0.socket.close()\n arg_0.join(timeout=1)"} +{"_id": "doc_7311", "title": "", "text": "def Func(arg_0):\n \"\"\"Issues a Funcion by setting the Func_required event.\n\n :return:\n \"\"\"\n # Reconnect attempt at self.Func_interval\n arg_0.log.debug(\"Func(): Initialzion Func sequence..\")\n arg_0.connected.clear()\n arg_0.Func_required.set()\n if arg_0.socket:\n arg_0.socket.close()"} +{"_id": "doc_7312", "title": "", "text": "def Func(arg_0):\n \"\"\"Creates a websocket connection.\n\n :return:\n \"\"\"\n arg_0.log.debug(\"Func(): Initializing Connection..\")\n arg_0.socket = websocket.WebSocketApp(\n arg_0.url,\n on_open=arg_0._on_open,\n on_message=arg_0._on_message,\n on_error=arg_0._on_error,\n on_close=arg_0._on_close\n )\n\n if 'ca_certs' not in arg_0.sslopt.keys():\n arg_2 = ssl.get_default_verify_paths()\n arg_0.sslopt['ca_certs'] = arg_2.cafile\n\n arg_0.log.debug(\"Func(): Starting Connection..\")\n arg_0.socket.run_forever(arg_3=arg_0.sslopt,\n http_proxy_host=arg_0.http_proxy_host,\n http_proxy_port=arg_0.http_proxy_port,\n http_proxy_auth=arg_0.http_proxy_auth,\n http_no_proxy=arg_0.http_no_proxy)\n\n # stop outstanding ping/pong timers\n arg_0._stop_timers()\n while arg_0.reconnect_required.is_set():\n if not arg_0.disconnect_called.is_set():\n arg_0.log.info(\"Attempting to connect again in %s seconds.\"\n % arg_0.reconnect_interval)\n arg_0.state = \"unavailable\"\n time.sleep(arg_0.reconnect_interval)\n\n # We need to set this flag since closing the socket will\n # set it to False\n arg_0.socket.keep_running = True\n arg_0.socket.sock = None\n arg_0.socket.run_forever(arg_3=arg_0.sslopt,\n http_proxy_host=arg_0.http_proxy_host,\n http_proxy_port=arg_0.http_proxy_port,\n http_proxy_auth=arg_0.http_proxy_auth,\n http_no_proxy=arg_0.http_no_proxy)\n else:\n break"} +{"_id": "doc_7313", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Handles and passes received data to the appropriate handlers.\n\n :return:\n \"\"\"\n arg_0._stop_timers()\n\n arg_3, arg_4 = arg_2, time.time()\n arg_0.log.debug(\"Func(): Received new message %s at %s\",\n arg_3, arg_4)\n try:\n arg_5 = json.loads(arg_3)\n except json.JSONDecodeError:\n # Something wrong with this data, log and discard\n return\n\n # Handle data\n if isinstance(arg_5, dict):\n # This is a system message\n arg_0._system_handler(arg_5, arg_4)\n else:\n # This is a list of data\n if arg_5[1] == 'hb':\n arg_0._heartbeat_handler()\n else:\n arg_0._data_handler(arg_5, arg_4)\n\n # We've received data, reset timers\n arg_0._start_timers()"} +{"_id": "doc_7314", "title": "", "text": "def Func(arg_0):\n \"\"\"Stops ping, pong and connection timers.\n\n :return:\n \"\"\"\n if arg_0.ping_timer:\n arg_0.ping_timer.cancel()\n\n if arg_0.connection_timer:\n arg_0.connection_timer.cancel()\n\n if arg_0.pong_timer:\n arg_0.pong_timer.cancel()\n arg_0.log.debug(\"Func(): Timers stopped.\")"} +{"_id": "doc_7315", "title": "", "text": "def Func(arg_0):\n \"\"\"Sends a ping message to the API and starts pong timers.\n\n :return:\n \"\"\"\n arg_0.log.debug(\"Func(): Sending ping to API..\")\n arg_0.socket.send(json.dumps({'event': 'ping'}))\n arg_0.pong_timer = Timer(arg_0.pong_timeout, arg_0._check_pong)\n arg_0.pong_timer.start()"} +{"_id": "doc_7316", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=False, **arg_5):\n \"\"\"Sends the given Payload to the API via the websocket connection.\n\n :param kwargs: payload paarameters as key=value pairs\n :return:\n \"\"\"\n if arg_4:\n arg_6 = str(int(time.time() * 10000000))\n arg_7 = 'AUTH' + arg_6\n arg_8 = hmac.new(arg_2.encode(), arg_7.encode(),\n hashlib.sha384).hexdigest()\n\n arg_9 = {'event': 'auth', 'apiKey': arg_1, 'authSig': arg_8,\n 'authPayload': arg_7, 'authNonce': arg_6}\n arg_9 = json.dumps(arg_9)\n elif arg_3:\n arg_9 = json.dumps(arg_3)\n else:\n arg_9 = json.dumps(arg_5)\n arg_0.log.debug(\"Func(): Sending payload to API: %s\", arg_9)\n try:\n arg_0.socket.Func(arg_9)\n except websocket.WebSocketConnectionClosedException:\n arg_0.log.error(\"Func(): Did not Func out payload %s - client not connected. \", arg_5)"} +{"_id": "doc_7317", "title": "", "text": "def Func(arg_0):\n \"\"\"Unpauses the connection.\n\n Send a message up to client that he should re-subscribe to all\n channels.\n\n :return:\n \"\"\"\n arg_0.log.debug(\"Func(): Clearing paused() Flag!\")\n arg_0.paused.clear()\n arg_0.log.debug(\"Func(): Re-subscribing softly..\")\n arg_0._resubscribe(soft=True)"} +{"_id": "doc_7318", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Distributes system messages to the appropriate handler.\n\n System messages include everything that arrives as a dict,\n or a list containing a heartbeat.\n\n :param data:\n :param ts:\n :return:\n \"\"\"\n arg_0.log.debug(\"Func(): Received a system message: %s\", arg_1)\n # Unpack the data\n arg_3 = arg_1.pop('event')\n if arg_3 == 'pong':\n arg_0.log.debug(\"Func(): Distributing %s to _pong_handler..\",\n arg_1)\n arg_0._pong_handler()\n elif arg_3 == 'info':\n arg_0.log.debug(\"Func(): Distributing %s to _info_handler..\",\n arg_1)\n arg_0._info_handler(arg_1)\n elif arg_3 == 'error':\n arg_0.log.debug(\"Func(): Distributing %s to _error_handler..\",\n arg_1)\n arg_0._error_handler(arg_1)\n elif arg_3 in ('subscribed', 'unsubscribed', 'conf', 'auth', 'unauth'):\n arg_0.log.debug(\"Func(): Distributing %s to \"\n \"_response_handler..\", arg_1)\n arg_0._response_handler(arg_3, arg_1, arg_2)\n else:\n arg_0.log.error(\"Unhandled event: %s, data: %s\", arg_3, arg_1)"} +{"_id": "doc_7319", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Handle INFO messages from the API and issues relevant actions.\n\n :param data:\n :param ts:\n \"\"\"\n\n def raise_exception():\n \"\"\"Log info code as error and raise a ValueError.\"\"\"\n arg_0.log.error(\"%s: %s\", arg_1['code'], arg_2[arg_1['code']])\n raise ValueError(\"%s: %s\" % (arg_1['code'], arg_2[arg_1['code']]))\n\n if 'code' not in arg_1 and 'version' in arg_1:\n arg_0.log.info('Initialized Client on API Version %s', arg_1['version'])\n return\n\n arg_2 = {20000: 'Invalid User given! Please make sure the given ID is correct!',\n 20051: 'Stop/Restart websocket server '\n '(please try to reconnect)',\n 20060: 'Refreshing data from the trading engine; '\n 'please pause any acivity.',\n 20061: 'Done refreshing data from the trading engine.'\n ' Re-subscription advised.'}\n\n arg_3 = {20051: arg_0.reconnect, 20060: arg_0._pause,\n 20061: arg_0._unpause}\n\n if 'version' in arg_1:\n arg_0.log.info(\"API version: %i\", arg_1['version'])\n return\n\n try:\n arg_0.log.info(arg_2[arg_1['code']])\n arg_3[arg_1['code']]()\n except KeyError as e:\n arg_0.log.exception(e)\n arg_0.log.error(\"Unknown Info code %s!\", arg_1['code'])\n raise"} +{"_id": "doc_7320", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Handles data messages by passing them up to the client.\n\n :param data:\n :param ts:\n :return:\n \"\"\"\n # Pass the data up to the Client\n arg_0.log.debug(\"Func(): Passing %s to client..\",\n arg_1)\n arg_0.pass_to_client('data', arg_1, arg_2)"} +{"_id": "doc_7321", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Resubscribes to all channels found in self.channel_configs.\n\n :param soft: if True, unsubscribes first.\n :return: None\n \"\"\"\n # Restore non-default Bitfinex websocket configuration\n if arg_0.bitfinex_config:\n arg_0.send(**arg_0.bitfinex_config)\n arg_2 = []\n while True:\n try:\n arg_3, arg_4 = arg_0.channel_configs.popitem(last=True if arg_1 else False)\n except KeyError:\n break\n arg_2.append((arg_3, arg_4.copy()))\n if arg_3 == 'auth':\n arg_0.send(**arg_4, auth=True)\n continue\n if arg_1:\n arg_4['event'] = 'unsubscribe'\n arg_0.send(**arg_4)\n\n # Resubscribe for soft start.\n if arg_1:\n for arg_3, arg_4 in reversed(arg_2):\n arg_0.channel_configs[arg_3] = arg_4\n arg_0.send(**arg_4)\n else:\n for arg_3, arg_4 in arg_2:\n arg_0.channel_configs[arg_3] = arg_4"} +{"_id": "doc_7322", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Handles authentication responses.\n\n :param dtype:\n :param data:\n :param ts:\n :return:\n \"\"\"\n # Contains keys status, chanId, userId, caps\n if arg_1 == 'unauth':\n raise NotImplementedError\n arg_4 = arg_2.pop('chanId')\n arg_5 = arg_2.pop('userId')\n\n arg_6 = ('auth', arg_5)\n arg_0.channel_handlers[arg_6] = arg_4\n arg_0.channel_directory[arg_6] = arg_4\n arg_0.channel_directory[arg_4] = arg_6"} +{"_id": "doc_7323", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Handles configuration messages.\n\n :param dtype:\n :param data:\n :param ts:\n :return:\n \"\"\"\n arg_0.log.debug(\"Func: %s - %s - %s\", arg_1, arg_2, arg_3)\n arg_0.log.info(\"Configuration accepted: %s\", arg_1)\n return"} +{"_id": "doc_7324", "title": "", "text": "def Func(arg_0):\n \"\"\"Reset the client.\n\n :return:\n \"\"\"\n arg_0.conn.reconnect()\n while not arg_0.conn.connected.is_set():\n log.info(\"Func(): Waiting for connection to be set up..\")\n time.sleep(1)\n\n for arg_1 in arg_0.channel_configs:\n arg_0.conn.send(**arg_0.channel_configs[arg_1])"} +{"_id": "doc_7325", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return a queue containing all received Func data.\n\n :param pair: str, Symbol pair to request data for\n :param timeframe: str\n :return: Queue()\n \"\"\"\n arg_2 = '1m' if not arg_2 else arg_2\n arg_3 = ('Func', arg_1, arg_2)\n return arg_0.queue_processor.Func[arg_3]"} +{"_id": "doc_7326", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=False,\n arg_3=False, arg_4=False, **arg_5):\n \"\"\"Send Funcuration to websocket server\n\n :param decimals_as_strings: bool, turn on/off decimals as strings\n :param ts_as_dates: bool, decide to request timestamps as dates instead\n :param sequencing: bool, turn on sequencing\n\t:param ts: bool, request the timestamp to be appended to every array\n sent by the server\n :param kwargs:\n :return:\n \"\"\"\n arg_6 = 0\n if arg_1:\n arg_6 += 8\n if arg_2:\n arg_6 += 32\n if arg_4:\n arg_6 += 32768\n if arg_3:\n arg_6 += 65536\n arg_7 = {'event': 'conf', 'flags': arg_6}\n arg_7.update(arg_5)\n arg_0.conn.bitfinex_Func = arg_7\n arg_0.conn.send(**arg_7)"} +{"_id": "doc_7327", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Unsubscribe to the passed pair's ticker channel.\n\n :param pair: str, Symbol pair to request data for\n :param kwargs:\n :return:\n \"\"\"\n arg_3 = ('ticker', arg_1)\n arg_0._unsubscribe('ticker', arg_3, symbol=arg_1, **arg_2)"} +{"_id": "doc_7328", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Subscribe to the passed pair's order book channel.\n\n :param pair: str, Symbol pair to request data for\n :param kwargs:\n :return:\n \"\"\"\n arg_3 = ('book', arg_1)\n arg_0._subscribe('book', arg_3, symbol=arg_1, **arg_2)"} +{"_id": "doc_7329", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Unsubscribe to the passed pair's order book channel.\n\n :param pair: str, Symbol pair to request data for\n :param kwargs:\n :return:\n \"\"\"\n arg_3 = ('book', arg_1)\n arg_0._unsubscribe('book', arg_3, symbol=arg_1, **arg_2)"} +{"_id": "doc_7330", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Subscribe to the passed pair's raw order book channel.\n\n :param pair: str, Symbol pair to request data for\n :param prec:\n :param kwargs:\n :return:\n \"\"\"\n arg_4 = ('raw_book', arg_1)\n arg_2 = 'R0' if arg_2 is None else arg_2\n arg_0._subscribe('book', arg_4, arg_1=arg_1, arg_2=arg_2, **arg_3)"} +{"_id": "doc_7331", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, **arg_3):\n \"\"\"Unsubscribe to the passed pair's raw order book channel.\n\n :param pair: str, Symbol pair to request data for\n :param prec:\n :param kwargs:\n :return:\n \"\"\"\n arg_4 = ('raw_book', arg_1)\n arg_2 = 'R0' if arg_2 is None else arg_2\n arg_0._unsubscribe('book', arg_4, arg_1=arg_1, arg_2=arg_2, **arg_3)"} +{"_id": "doc_7332", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Subscribe to the passed pair's trades channel.\n\n :param pair: str, Symbol pair to request data for\n :param kwargs:\n :return:\n \"\"\"\n arg_3 = ('trades', arg_1)\n arg_0._subscribe('trades', arg_3, symbol=arg_1, **arg_2)"} +{"_id": "doc_7333", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Unsubscribe to the passed pair's trades channel.\n\n :param pair: str, Symbol pair to request data for\n :param kwargs:\n :return:\n \"\"\"\n arg_3 = ('trades', arg_1)\n arg_0._unsubscribe('trades', arg_3, symbol=arg_1, **arg_2)"} +{"_id": "doc_7334", "title": "", "text": "def Func(arg_0):\n \"\"\"Authenticate with the Bitfinex API.\n\n :return:\n \"\"\"\n if not arg_0.key and not arg_0.secret:\n raise ValueError(\"Must supply both key and secret key for API!\")\n arg_0.channel_configs['auth'] = {'api_key': arg_0.key, 'secret': arg_0.secret}\n arg_0.conn.send(api_key=arg_0.key, secret=arg_0.secret, auth=True)"} +{"_id": "doc_7335", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for device command messages, parses source device from topic string and\n passes the information on to the registered device command callback\n \"\"\"\n try:\n arg_4 = Command(arg_3, arg_0._messageCodecs)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))\n else:\n arg_0.logger.debug(\"Received device command '%s'\" % (arg_4.command))\n if arg_0.commandCallback:\n arg_0.commandCallback(arg_4)"} +{"_id": "doc_7336", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for gateway command messages, parses source device from topic string and\n passes the information on to the registered device command callback\n \"\"\"\n try:\n arg_4 = Command(arg_3, arg_0._messageCodecs)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))\n else:\n arg_0.logger.debug(\"Received gateway command '%s'\" % (arg_4.command))\n if arg_0.deviceCommandCallback:\n arg_0.deviceCommandCallback(arg_4)"} +{"_id": "doc_7337", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for gateway notification messages, parses source device from topic string and\n passes the information on to the registered device command callback\n \"\"\"\n try:\n arg_4 = Notification(arg_3, arg_0._messageCodecs)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))\n else:\n arg_0.logger.debug(\"Received Notification\")\n if arg_0.notificationCallback:\n arg_0.notificationCallback(arg_4)"} +{"_id": "doc_7338", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Register one or more new device types, each request can contain a maximum of 512KB.\n \"\"\"\n\n arg_2 = arg_0._apiClient.post(\"api/v0002/device/types\", arg_1)\n\n if arg_2.status_code == 201:\n return DeviceType(apiClient=arg_0._apiClient, **arg_2.json())\n else:\n raise ApiException(arg_2)"} +{"_id": "doc_7339", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=0, arg_5=None):\n \"\"\"\n Publish an event to Watson IoT Platform.\n\n # Parameters\n event (string): Name of this event\n msgFormat (string): Format of the data for this event\n data (dict): Data for this event\n qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)\n on_publish(function): A function that will be called when receipt \n of the publication is confirmed. \n \n # Callback and QoS\n The use of the optional #on_publish function has different implications depending \n on the level of qos used to publish the event: \n \n - qos 0: the client has asynchronously begun to send the event\n - qos 1 and 2: the client has confirmation of delivery from the platform\n \"\"\"\n arg_6 = \"iot-2/evt/{event}/fmt/{msg_format}\".format(arg_1=arg_1, msg_format=arg_2)\n return arg_0._Func(arg_6, arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_7340", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Update an existing device\n \"\"\"\n\n if not isinstance(arg_1, DeviceUid) and isinstance(arg_1, dict):\n arg_1 = DeviceUid(**arg_1)\n\n arg_5 = \"api/v0002/device/types/%s/devices/%s\" % (arg_1.typeId, arg_1.deviceId)\n\n arg_6 = {\"status\": arg_4, \"deviceInfo\": arg_3, \"metadata\": arg_2}\n\n arg_7 = arg_0._apiClient.put(arg_5, arg_6)\n if arg_7.status_code == 200:\n return Device(apiClient=arg_0._apiClient, **arg_7.json())\n else:\n raise ApiException(arg_7)"} +{"_id": "doc_7341", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Iterate through all Connectors\n \"\"\"\n arg_3 = {}\n if arg_1:\n arg_3[\"status\"] = arg_1\n if arg_2:\n arg_3[\"connectedAfter\"] = arg_2\n\n return IterableClientStatusList(arg_0._apiClient, filters=arg_3)"} +{"_id": "doc_7342", "title": "", "text": "def Func(arg_0):\n \"\"\"\n List all device management extension packages\n \"\"\"\n arg_1 = \"api/v0002/mgmt/custom/bundle\"\n arg_2 = arg_0._apiClient.get(arg_1)\n\n if arg_2.status_code == 200:\n return arg_2.json()\n else:\n raise ApiException(arg_2)"} +{"_id": "doc_7343", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create a new device management extension package\n In case of failure it throws APIException\n \"\"\"\n arg_2 = \"api/v0002/mgmt/custom/bundle\"\n arg_3 = arg_0._apiClient.post(arg_2, arg_1)\n\n if arg_3.status_code == 201:\n return arg_3.json()\n else:\n raise ApiException(arg_3)"} +{"_id": "doc_7344", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Update a schema. Throws APIException on failure.\n \"\"\"\n arg_3 = ApiClient.oneSchemaUrl % (arg_0.host, \"/draft\", arg_1)\n arg_4 = {\"schemaDefinition\": arg_2}\n arg_5 = requests.put(arg_3, auth=arg_0.credentials, headers={\"Content-Type\":\"application/json\"},\n data=json.dumps(arg_4), verify=arg_0.verify)\n if arg_5.status_code == 200:\n arg_0.logger.debug(\"Schema updated\")\n else:\n raise ibmiotf.APIException(arg_5.status_code, \"HTTP error updating schema\", arg_5)\n return arg_5.json()"} +{"_id": "doc_7345", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Disconnect the client from IBM Watson IoT Platform\n \"\"\"\n # self.logger.info(\"Closing connection to the IBM Watson IoT Platform\")\n arg_0.client.Func()\n # If we don't call loop_stop() it appears we end up with a zombie thread which continues to process\n # network traffic, preventing any subsequent attempt to reconnect using connect()\n arg_0.client.loop_stop()\n arg_0.logger.info(\"Closed connection to the IBM Watson IoT Platform\")"} +{"_id": "doc_7346", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Called when the broker responds to our connection request.\n\n The value of rc determines success or not:\n 0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\n \"\"\"\n if arg_4 == 0:\n arg_0.connectEvent.set()\n arg_0.logger.info(\"Connected successfully: %s\" % (arg_0.clientId))\n\n # Restoring previous subscriptions\n with arg_0._subLock:\n if len(arg_0._subscriptions) > 0:\n for arg_5 in arg_0._subscriptions:\n # We use the underlying mqttclient subscribe method rather than _subscribe because we are\n # claiming a lock on the subscriptions list and do not want anything else to modify it,\n # which that method does\n (arg_6, arg_7) = arg_0.client.subscribe(arg_5, qos=arg_0._subscriptions[arg_5])\n if arg_6 != paho.MQTT_ERR_SUCCESS:\n arg_0._logAndRaiseException(ConnectionException(\"Unable to subscribe to %s\" % arg_5))\n arg_0.logger.debug(\"Restored %s previous subscriptions\" % len(arg_0._subscriptions))\n elif arg_4 == 1:\n arg_0._logAndRaiseException(ConnectionException(\"Incorrect protocol version\"))\n elif arg_4 == 2:\n arg_0._logAndRaiseException(ConnectionException(\"Invalid client identifier\"))\n elif arg_4 == 3:\n arg_0._logAndRaiseException(ConnectionException(\"Server unavailable\"))\n elif arg_4 == 4:\n arg_0._logAndRaiseException(\n ConnectionException(\"Bad username or password: (%s, %s)\" % (arg_0.username, arg_0.password))\n )\n elif arg_4 == 5:\n arg_0._logAndRaiseException(\n ConnectionException(\"Not authorized: s (%s, %s, %s)\" % (arg_0.clientId, arg_0.username, arg_0.password))\n )\n else:\n arg_0._logAndRaiseException(ConnectionException(\"Unexpected connection failure: %s\" % (arg_4)))"} +{"_id": "doc_7347", "title": "", "text": "def Func(arg_0, arg_1=\"+\", arg_2=\"+\", arg_3=\"+\", arg_4=\"+\", arg_5=0):\n \"\"\"\n Subscribe to device event messages\n\n # Parameters\n typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)\n deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)\n eventId (string): eventId for the subscription, optional. Defaults to all events (MQTT `+` wildcard)\n msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)\n qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)\n\n # Returns\n int: If the subscription was successful then the return Message ID (mid) for the subscribe request\n will be returned. The mid value can be used to track the subscribe request by checking against\n the mid argument if you register a subscriptionCallback method.\n If the subscription fails then the return value will be `0`\n \"\"\"\n if arg_0._config.isQuickstart() and arg_2 == \"+\":\n arg_0.logger.warning(\n \"QuickStart applications do not support wildcard subscription to events from all devices\"\n )\n return 0\n\n arg_6 = \"iot-2/type/%s/id/%s/evt/%s/fmt/%s\" % (arg_1, arg_2, arg_3, arg_4)\n return arg_0._subscribe(arg_6, arg_5)"} +{"_id": "doc_7348", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None, arg_6=0, arg_7=None):\n \"\"\"\n Publish a command to a device\n\n # Parameters\n typeId (string) : The type of the device this command is to be published to\n deviceId (string): The id of the device this command is to be published to\n command (string) : The name of the command\n msgFormat (string) : The format of the command payload\n data (dict) : The command data\n qos (int) : The equivalent MQTT semantics of quality of service using the same constants (optional, defaults to `0`)\n on_publish (function) : A function that will be called when receipt of the publication is confirmed. This has\n different implications depending on the qos:\n - qos 0 : the client has asynchronously begun to send the event\n - qos 1 and 2 : the client has confirmation of delivery from WIoTP\n \"\"\"\n if arg_0._config.isQuickstart():\n arg_0.logger.warning(\"QuickStart applications do not support sending commands\")\n return False\n if not arg_0.connectEvent.wait(timeout=10):\n return False\n else:\n arg_8 = \"iot-2/type/%s/id/%s/cmd/%s/fmt/%s\" % (arg_1, arg_2, arg_3, arg_4)\n\n # Raise an exception if there is no codec for this msgFormat\n if arg_0.getMessageCodec(arg_4) is None:\n raise MissingMessageEncoderException(arg_4)\n\n arg_9 = arg_0.getMessageCodec(arg_4).encode(arg_5, datetime.now())\n arg_10 = arg_0.client.publish(arg_8, arg_9=arg_9, arg_6=arg_6, retain=False)\n if arg_10[0] == paho.MQTT_ERR_SUCCESS:\n # Because we are dealing with aync pub/sub model and callbacks it is possible that\n # the _onPublish() callback for this mid is called before we obtain the lock to place\n # the mid into the _onPublishCallbacks list.\n #\n # _onPublish knows how to handle a scenario where the mid is not present (no nothing)\n # in this scenario we will need to invoke the callback directly here, because at the time\n # the callback was invoked the mid was not yet in the list.\n with arg_0._messagesLock:\n if arg_10[1] in arg_0._onPublishCallbacks:\n # paho callback beat this thread so call callback inline now\n del arg_0._onPublishCallbacks[arg_10[1]]\n if arg_7 is not None:\n arg_7()\n else:\n # this thread beat paho callback so set up for call later\n arg_0._onPublishCallbacks[arg_10[1]] = arg_7\n return True\n else:\n return False"} +{"_id": "doc_7349", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for messages that have not been handled by any of the specific internal callbacks, these\n messages are not passed on to any user provided callback\n \"\"\"\n arg_0.logger.warning(\n \"Received messaging on unsupported topic '%s' on topic '%s'\" % (arg_3.payload, arg_3.topic)\n )"} +{"_id": "doc_7350", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for device event messages, parses source device from topic string and\n passes the information on to the registerd device event callback\n \"\"\"\n try:\n arg_4 = Event(arg_3, arg_0._messageCodecs)\n arg_0.logger.debug(\"Received event '%s' from %s:%s\" % (arg_4.eventId, arg_4.typeId, arg_4.deviceId))\n if arg_0.deviceEventCallback:\n arg_0.deviceEventCallback(arg_4)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))"} +{"_id": "doc_7351", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for device status messages, parses source device from topic string and\n passes the information on to the registerd device status callback\n \"\"\"\n try:\n arg_4 = Status(arg_3)\n arg_0.logger.debug(\"Received %s action from %s\" % (arg_4.action, arg_4.clientId))\n if arg_0.deviceStatusCallback:\n arg_0.deviceStatusCallback(arg_4)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))"} +{"_id": "doc_7352", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Internal callback for application command messages, parses source application from topic string and\n passes the information on to the registerd applicaion status callback\n \"\"\"\n try:\n arg_4 = Status(arg_3)\n arg_0.logger.debug(\"Received %s action from %s\" % (arg_4.action, arg_4.clientId))\n if arg_0.appStatusCallback:\n arg_0.appStatusCallback(arg_4)\n except InvalidEventException as e:\n arg_0.logger.critical(str(e))"} +{"_id": "doc_7353", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Retrieves the last cached message for specified event from a specific device.\n \"\"\"\n\n if not isinstance(arg_1, DeviceUid) and isinstance(arg_1, dict):\n arg_1 = DeviceUid(**arg_1)\n\n arg_3 = \"api/v0002/device/types/%s/devices/%s/events/%s\" % (arg_1.typeId, arg_1.deviceId, arg_2)\n arg_4 = arg_0._apiClient.Func(arg_3)\n\n if arg_4.status_code == 200:\n return LastEvent(**arg_4.json())\n else:\n raise ApiException(arg_4)"} +{"_id": "doc_7354", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieves a list of the last cached message for all events from a specific device.\n \"\"\"\n\n if not isinstance(arg_1, DeviceUid) and isinstance(arg_1, dict):\n arg_1 = DeviceUid(**arg_1)\n\n arg_2 = \"api/v0002/device/types/%s/devices/%s/events\" % (arg_1.typeId, arg_1.deviceId)\n arg_3 = arg_0._apiClient.get(arg_2)\n\n if arg_3.status_code == 200:\n arg_4 = []\n for arg_5 in arg_3.json():\n arg_4.append(LastEvent(**arg_5))\n return arg_4\n else:\n raise ApiException(arg_3)"} +{"_id": "doc_7355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Initiates a device management request, such as reboot.\n In case of failure it throws APIException\n \"\"\"\n arg_2 = MgmtRequests.mgmtRequests\n arg_3 = arg_0._apiClient.post(arg_2, arg_1)\n\n if arg_3.status_code == 202:\n return arg_3.json()\n else:\n raise ApiException(arg_3)"} +{"_id": "doc_7356", "title": "", "text": "def Func(arg_0):\n \"\"\"Force a flush of the index to storage. Renders index\n inaccessible.\"\"\"\n if arg_0.handle:\n arg_0.handle.destroy()\n arg_0.handle = None\n else:\n raise IOError(\"Unclosable index\")"} +{"_id": "doc_7357", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=False):\n \"\"\"Returns the ``k``-Func objects to the given coordinates.\n\n :param coordinates: sequence or array\n This may be an object that satisfies the numpy array\n protocol, providing the index's dimension * 2 coordinate\n pairs representing the `mink` and `maxk` coordinates in\n each dimension defining the bounds of the query window.\n\n :param num_results: integer\n The number of results to return Func to the given coordinates.\n If two index entries are equidistant, *both* are returned.\n This property means that :attr:`num_results` may return more\n items than specified\n\n :param objects: True / False / 'raw'\n If True, the Func method will return index objects that\n were pickled when they were stored with each index entry, as\n well as the id and bounds of the index entries.\n If 'raw', it will return the object as entered into the database\n without the :class:`rtree.index.Item` wrapper.\n\n Example of finding the three items Func to this one::\n\n >>> from rtree import index\n >>> idx = index.Index()\n >>> idx.insert(4321, (34.37, 26.73, 49.37, 41.73), obj=42)\n >>> hits = idx.Func((0, 0, 10, 10), 3, objects=True)\n \"\"\"\n if arg_3:\n return arg_0._Func_obj(arg_1, arg_2, arg_3)\n arg_4, arg_5 = arg_0.get_coordinate_pointers(arg_1)\n\n arg_6 = ctypes.pointer(ctypes.c_uint64(arg_2))\n\n arg_7 = ctypes.pointer(ctypes.c_int64())\n\n core.rt.Index_NearestNeighbors_id(arg_0.handle,\n arg_4,\n arg_5,\n arg_0.properties.dimension,\n ctypes.byref(arg_7),\n arg_6)\n\n return arg_0._get_ids(arg_7, arg_6.contents.value)"} +{"_id": "doc_7358", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Deletes items from the index with the given ``'id'`` within the\n specified coordinates.\n\n :param id: long integer\n A long integer that is the identifier for this index entry. IDs\n need not be unique to be inserted into the index, and it is up\n to the user to ensure they are unique if this is a requirement.\n\n :param coordinates: sequence or array\n Dimension * 2 coordinate pairs, representing the min\n and max coordinates in each dimension of the item to be\n Funcd from the index. Their ordering will depend on the\n index's :attr:`interleaved` data member.\n These are not the coordinates of a space containing the\n item, but those of the item itself. Together with the\n id parameter, they determine which item will be Funcd.\n This may be an object that satisfies the numpy array protocol.\n\n Example::\n\n >>> from rtree import index\n >>> idx = index.Index()\n >>> idx.Func(4321,\n ... (34.3776829412, 26.7375853734, 49.3776829412,\n ... 41.7375853734))\n\n \"\"\"\n arg_3, arg_4 = arg_0.get_coordinate_pointers(arg_2)\n core.rt.Index_DeleteData(\n arg_0.handle, arg_1, arg_3, arg_4, arg_0.properties.dimension)"} +{"_id": "doc_7359", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Must be overridden. Must return a string with the loaded data.\"\"\"\n arg_2.contents.value = arg_0.IllegalStateError\n raise NotImplementedError(\"You must override this method.\")\n return ''"} +{"_id": "doc_7360", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Deletes the item from the container within the specified\n coordinates.\n\n :param obj: object\n Any object.\n\n :param coordinates: sequence or array\n Dimension * 2 coordinate pairs, representing the min\n and max coordinates in each dimension of the item to be\n Funcd from the index. Their ordering will depend on the\n index's :attr:`interleaved` data member.\n These are not the coordinates of a space containing the\n item, but those of the item itself. Together with the\n id parameter, they determine which item will be Funcd.\n This may be an object that satisfies the numpy array protocol.\n\n Example::\n\n >>> from rtree import index\n >>> idx = index.RtreeContainer()\n >>> idx.Func(object(),\n ... (34.3776829412, 26.7375853734, 49.3776829412,\n ... 41.7375853734))\n Traceback (most recent call last):\n ...\n IndexError: object is not in the index\n\n \"\"\"\n try:\n arg_3 = arg_0._objects[arg_5(arg_1)] - 1\n except KeyError:\n raise IndexError('object is not in the index')\n if arg_3 == 0:\n del arg_0._objects[arg_1]\n else:\n arg_0._objects[arg_5(arg_1)] = (arg_3, arg_1)\n return super(RtreeContainer, arg_0).Func(arg_5, arg_2)"} +{"_id": "doc_7361", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Define delay adjustment policy\"\"\"\n if arg_2.status in arg_0.retry_http_codes:\n arg_3 = max(arg_1.delay, 1) * 4\n arg_3 = max(arg_3, arg_0.mindelay)\n arg_3 = min(arg_3, arg_0.maxdelay)\n arg_1.delay = arg_3\n arg_0.stats.inc_value('delay_count')\n elif arg_2.status == 200:\n arg_3 = max(arg_1.delay / 2, arg_0.mindelay)\n if arg_3 < 0.01:\n arg_3 = 0\n arg_1.delay = arg_3"} +{"_id": "doc_7362", "title": "", "text": "def Func(arg_0):\n \"\"\" Convert string into camel case.\n\n Args:\n string: String to convert.\n\n Returns:\n string: Camel case string.\n\n \"\"\"\n\n arg_0 = re.sub(r\"^[\\-_\\.]\", '', str(arg_0))\n if not arg_0:\n return arg_0\n return lowercase(arg_0[0]) + re.sub(r\"[\\-_\\.\\s]([a-z])\", lambda matched: uppercase(matched.group(1)), arg_0[1:])"} +{"_id": "doc_7363", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string into capital case.\n First letters will be uppercase.\n\n Args:\n string: String to convert.\n\n Returns:\n string: Capital case string.\n\n \"\"\"\n\n arg_0 = str(arg_0)\n if not arg_0:\n return arg_0\n return uppercase(arg_0[0]) + arg_0[1:]"} +{"_id": "doc_7364", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string into spinal case.\n Join punctuation with backslash.\n\n Args:\n string: String to convert.\n\n Returns:\n string: Spinal cased string.\n\n \"\"\"\n arg_1 = re.sub(r\"_\", r\"\\\\\", snakecase(arg_0))\n\n return arg_1"} +{"_id": "doc_7365", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string into sentence case.\n First letter capped and each punctuations are joined with space.\n\n Args:\n string: String to convert.\n\n Returns:\n string: Sentence cased string.\n\n \"\"\"\n arg_1 = ' '\n arg_0 = re.sub(r\"[\\-_\\.\\s]\", arg_1, str(arg_0))\n if not arg_0:\n return arg_0\n return capitalcase(trimcase(\n re.sub(r\"[A-Z]\", lambda matched: arg_1 +\n lowercase(matched.group(0)), arg_0)\n ))"} +{"_id": "doc_7366", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string into snake case.\n Join punctuation with underscore\n\n Args:\n string: String to convert.\n\n Returns:\n string: Snake cased string.\n\n \"\"\"\n\n arg_0 = re.sub(r\"[\\-\\.\\s]\", '_', str(arg_0))\n if not arg_0:\n return arg_0\n return lowercase(arg_0[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), arg_0[1:])"} +{"_id": "doc_7367", "title": "", "text": "def Func(arg_0):\n \"\"\" Attempt an import of the specified application \"\"\"\n\n if isinstance(arg_0.application, str):\n return util.import_app(arg_0.application)\n else:\n return arg_0.application"} +{"_id": "doc_7368", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Initializes the Flask application with Common.\"\"\"\n if not hasattr(arg_1, 'extensions'):\n arg_1.extensions = {}\n\n if 'common' in arg_1.extensions:\n raise RuntimeError(\"Flask-Common extension already initialized\")\n\n arg_1.extensions['common'] = arg_0\n arg_0.app = arg_1\n\n if 'COMMON_FILESERVER_DISABLED' not in arg_1.config:\n with arg_1.test_request_context():\n\n # Configure WhiteNoise.\n arg_1.wsgi_app = WhiteNoise(arg_1.wsgi_app, root=url_for('static', filename='')[1:])\n\n arg_0.cache = Cache(arg_1, config={'CACHE_TYPE': arg_1.config.get(\"COMMON_CACHE_TYPE\", 'simple')})\n\n @arg_1.before_request\n def before_request_callback():\n arg_5.start_time = maya.now()\n\n @arg_1.after_request\n def after_request_callback(arg_7):\n if 'COMMON_POWERED_BY_DISABLED' not in current_app.config:\n arg_7.headers['X-Powered-By'] = 'Flask'\n if 'COMMON_PROCESSED_TIME_DISABLED' not in current_app.config:\n arg_7.headers['X-Processed-Time'] = maya.now().epoch - arg_5.start_time.epoch\n return arg_7\n\n @arg_1.route('/favicon.ico')\n def favicon():\n return redirect(url_for('static', filename='favicon.ico'), code=301)"} +{"_id": "doc_7369", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=(0.5, 0.5)):\n \"\"\"\n Return a PIL Image instance cropped from `image`.\n\n Image has an aspect ratio provided by dividing `width` / `height`),\n sized down to `width`x`height`. Any 'excess pixels' are trimmed away\n in respect to the pixel of `image` that corresponds to `ppoi` (Primary\n Point of Interest).\n\n `image`: A PIL Image instance\n `width`: Integer, width of the image to return (in pixels)\n `height`: Integer, height of the image to return (in pixels)\n `ppoi`: A 2-tuple of floats with values greater than 0 and less than 1\n These values are converted into a cartesian coordinate that\n signifies the 'center pixel' which the crop will center on\n (to trim the excess from the 'long side').\n\n Determines whether to trim away pixels from either the left/right or\n top/bottom sides by comparing the aspect ratio of `image` vs the\n aspect ratio of `width`x`height`.\n\n Will trim from the left/right sides if the aspect ratio of `image`\n is greater-than-or-equal-to the aspect ratio of `width`x`height`.\n\n Will trim from the top/bottom sides if the aspect ration of `image`\n is less-than the aspect ratio or `width`x`height`.\n\n Similar to Kevin Cazabon's ImageOps.fit method but uses the\n ppoi value as an absolute centerpoint (as opposed as a\n percentage to trim off the 'long sides').\n \"\"\"\n arg_5 = int(arg_1.size[0] * arg_4[0])\n arg_6 = int(arg_1.size[1] * arg_4[1])\n arg_7 = (arg_5, arg_6)\n # Calculate the aspect ratio of `image`\n arg_8 = float(\n arg_1.size[0]\n ) / float(\n arg_1.size[1]\n )\n arg_9 = float(arg_2) / float(arg_3)\n\n # Figure out if we're trimming from the left/right or top/bottom\n if arg_8 >= arg_9:\n # `image` is wider than what's needed,\n # crop from left/right sides\n arg_10 = int(\n (arg_9 * float(arg_1.size[1])) + 0.5\n )\n arg_11 = arg_1.size[1]\n arg_12 = 0\n arg_13 = arg_11\n arg_14 = arg_7[0] - (arg_10 // 2)\n arg_15 = arg_14 + arg_10\n if arg_14 < 0:\n arg_14 = 0\n arg_15 = arg_14 + arg_10\n elif arg_15 > arg_1.size[0]:\n arg_15 = arg_1.size[0]\n arg_14 = arg_1.size[0] - arg_10\n\n else:\n # `image` is taller than what's needed,\n # crop from top/bottom sides\n arg_10 = arg_1.size[0]\n arg_11 = int(\n (float(arg_1.size[0]) / arg_9) + 0.5\n )\n arg_14 = 0\n arg_15 = arg_10\n arg_12 = arg_7[1] - (arg_11 // 2)\n arg_13 = arg_12 + arg_11\n if arg_12 < 0:\n arg_12 = 0\n arg_13 = arg_12 + arg_11\n elif arg_13 > arg_1.size[1]:\n arg_13 = arg_1.size[1]\n arg_12 = arg_1.size[1] - arg_11\n # Cropping the image from the original image\n arg_16 = arg_1.crop(\n (\n arg_14,\n arg_12,\n arg_15,\n arg_13\n )\n )\n # Resizing the newly cropped image to the size specified\n # (as determined by `width`x`height`)\n return arg_16.resize(\n (arg_2, arg_3),\n Image.ANTIALIAS\n )"} +{"_id": "doc_7370", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"\n Return a BytesIO instance of `image` that fits in a bounding box.\n\n Bounding box dimensions are `width`x`height`.\n \"\"\"\n arg_6 = BytesIO()\n arg_1.thumbnail(\n (arg_4, arg_5),\n Image.ANTIALIAS\n )\n arg_1.save(\n arg_6,\n **arg_3\n )\n return arg_6"} +{"_id": "doc_7371", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3={}):\n \"\"\"Return a BytesIO instance of `image` with inverted colors.\"\"\"\n arg_4 = BytesIO()\n arg_5 = ImageOps.invert(arg_1)\n arg_5.save(\n arg_4,\n **arg_3\n )\n return arg_4"} +{"_id": "doc_7372", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Ensure data is prepped properly before handing off to ImageField.\"\"\"\n if arg_1 is not None:\n if hasattr(arg_1, 'open'):\n arg_1.open()\n return super(VersatileImageFormField, arg_0).Func(arg_1)"} +{"_id": "doc_7373", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Process the field's placeholder image.\n\n Ensures the placeholder image has been saved to the same storage class\n as the field in a top level folder with a name specified by\n settings.VERSATILEIMAGEFIELD_SETTINGS['placeholder_directory_name']\n\n This should be called by the VersatileImageFileDescriptor __get__.\n If self.placeholder_image_name is already set it just returns right away.\n \"\"\"\n if arg_0.placeholder_image_name:\n return\n\n arg_1 = None\n arg_2 = arg_0.placeholder_image\n if arg_2:\n if isinstance(arg_2, OnStoragePlaceholderImage):\n arg_3 = arg_2.path\n else:\n arg_3 = arg_2.image_data.name\n arg_1 = os.path.join(\n VERSATILEIMAGEFIELD_PLACEHOLDER_DIRNAME, arg_3\n )\n if not arg_0.storage.exists(arg_1):\n arg_0.storage.save(\n arg_1,\n arg_2.image_data\n )\n arg_0.placeholder_image_name = arg_1"} +{"_id": "doc_7374", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return field's value just before saving.\"\"\"\n arg_3 = super(VersatileImageField, arg_0).Func(arg_1, arg_2)\n arg_0.update_ppoi_field(arg_1)\n return arg_3"} +{"_id": "doc_7375", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Update field's ppoi field, if defined.\n\n This method is hooked up this field's pre_save method to update\n the ppoi immediately before the model instance (`instance`)\n it is associated with is saved.\n\n This field's ppoi can be forced to update with force=True,\n which is how VersatileImageField.pre_save calls this method.\n \"\"\"\n # Nothing to update if the field doesn't have have a ppoi\n # dimension field.\n if not arg_0.ppoi_field:\n return\n\n # getattr will call the VersatileImageFileDescriptor's __get__ method,\n # which coerces the assigned value into an instance of\n # self.attr_class(VersatileImageFieldFile in this case).\n arg_4 = getattr(arg_1, arg_0.attname)\n\n # file should be an instance of VersatileImageFieldFile or should be\n # None.\n arg_5 = None\n if arg_4 and not isinstance(arg_4, tuple):\n if hasattr(arg_4, 'ppoi'):\n arg_5 = arg_4.ppoi\n\n # Update the ppoi field.\n if arg_0.ppoi_field:\n setattr(arg_1, arg_0.ppoi_field, arg_5)"} +{"_id": "doc_7376", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Handle data sent from MultiValueField forms that set ppoi values.\n\n `instance`: The model instance that is being altered via a form\n `data`: The data sent from the form to this field which can be either:\n * `None`: This is unset data from an optional field\n * A two-position tuple: (image_form_data, ppoi_data)\n * `image_form-data` options:\n * `None` the file for this field is unchanged\n * `False` unassign the file form the field\n * `ppoi_data` data structure:\n * `%(x_coordinate)sx%(y_coordinate)s': The ppoi data to\n assign to the unchanged file\n\n \"\"\"\n arg_3 = arg_2\n if arg_2 and isinstance(arg_2, tuple):\n # This value is coming from a MultiValueField\n if arg_2[0] is None:\n # This means the file hasn't changed but we need to\n # update the ppoi\n arg_4 = getattr(arg_1, arg_0.name)\n if arg_2[1]:\n arg_4.ppoi = arg_2[1]\n arg_3 = arg_4\n elif arg_2[0] is False:\n # This means the 'Clear' checkbox was checked so we\n # need to empty the field\n arg_3 = ''\n else:\n # This means there is a new upload so we need to unpack\n # the tuple and assign the first position to the field\n # attribute\n arg_3 = arg_2[0]\n super(VersatileImageField, arg_0).Func(arg_1, arg_3)"} +{"_id": "doc_7377", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Unregister the FilteredImage subclass currently assigned to attr_name.\n\n If a FilteredImage subclass isn't already registered to filters.\n `attr_name` NotRegistered will raise.\n \"\"\"\n if arg_1 not in arg_0._filter_registry:\n raise NotRegistered(\n 'No FilteredImage subclass is registered to %s' % arg_1\n )\n else:\n del arg_0._filter_registry[arg_1]"} +{"_id": "doc_7378", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the appropriate URL.\n\n URL is constructed based on these field conditions:\n * If empty (not `self.name`) and a placeholder is defined, the\n URL to the placeholder is returned.\n * Otherwise, defaults to vanilla ImageFieldFile behavior.\n \"\"\"\n if not arg_0.name and arg_0.field.placeholder_image_name:\n return arg_0.storage.Func(arg_0.field.placeholder_image_name)\n\n return super(VersatileImageMixIn, arg_0).Func"} +{"_id": "doc_7379", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the location where filtered images are stored.\"\"\"\n arg_1, arg_2 = os.path.split(arg_0.name)\n return os.path.join(arg_1, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')"} +{"_id": "doc_7380", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the location where sized images are stored.\"\"\"\n arg_1, arg_2 = os.path.split(arg_0.name)\n return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, arg_1, '')"} +{"_id": "doc_7381", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the location where filtered + sized images are stored.\"\"\"\n arg_1 = arg_0.get_sized_root_folder()\n return os.path.join(\n arg_1,\n VERSATILEIMAGEFIELD_FILTERED_DIRNAME\n )"} +{"_id": "doc_7382", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Preprocess an image.\n\n An API hook for image pre-processing. Calls any image format specific\n pre-processors (if defined). I.E. If `image_format` is 'JPEG', this\n method will look for a method named `Func_JPEG`, if found\n `image` will be passed to it.\n\n Arguments:\n * `image`: a PIL Image instance\n * `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF')\n\n Subclasses should return a 2-tuple:\n * [0]: A PIL Image instance.\n * [1]: A dictionary of additional keyword arguments to be used\n when the instance is saved. If no additional keyword\n arguments, return an empty dict ({}).\n \"\"\"\n arg_3 = {'format': arg_2}\n\n # Ensuring image is properly rotated\n if hasattr(arg_1, '_getexif'):\n arg_4 = arg_1._getexif() # returns None if no EXIF data\n if arg_4 is not None:\n arg_5 = dict(arg_4.items())\n arg_6 = arg_5.get(EXIF_ORIENTATION_KEY, None)\n if arg_6 == 3:\n arg_1 = arg_1.transpose(Image.ROTATE_180)\n elif arg_6 == 6:\n arg_1 = arg_1.transpose(Image.ROTATE_270)\n elif arg_6 == 8:\n arg_1 = arg_1.transpose(Image.ROTATE_90)\n\n # Ensure any embedded ICC profile is preserved\n arg_3['icc_profile'] = arg_1.info.get('icc_profile')\n\n if hasattr(arg_0, 'Func_%s' % arg_2):\n arg_1, arg_7 = getattr(\n arg_0,\n 'Func_%s' % arg_2\n )(arg_1=arg_1)\n arg_3.update(arg_7)\n\n return arg_1, arg_3"} +{"_id": "doc_7383", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Receive a PIL Image instance of a JPEG and returns 2-tuple.\n\n Args:\n * [0]: Image instance, converted to RGB\n * [1]: Dict with a quality key (mapped to the value of `QUAL` as\n defined by the `VERSATILEIMAGEFIELD_JPEG_RESIZE_QUALITY`\n setting)\n \"\"\"\n arg_3 = {\n 'progressive': VERSATILEIMAGEFIELD_PROGRESSIVE_JPEG,\n 'quality': QUAL\n }\n if arg_1.mode != 'RGB':\n arg_1 = arg_1.convert('RGB')\n return (arg_1, arg_3)"} +{"_id": "doc_7384", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a PIL Image instance stored at `path_to_image`.\"\"\"\n arg_2 = arg_0.storage.open(arg_1, 'rb')\n arg_3 = arg_1.rsplit('.')[-1]\n arg_4, arg_5 = get_image_metadata_from_file_ext(arg_3)\n\n return (\n Image.open(arg_2),\n arg_3,\n arg_4,\n arg_5\n )"} +{"_id": "doc_7385", "title": "", "text": "def Func(arg_0):\n \"\"\"Return PPOI value as a string.\"\"\"\n return \"%s__%s\" % (\n str(arg_0.ppoi[0]).replace('.', '-'),\n str(arg_0.ppoi[1]).replace('.', '-')\n )"} +{"_id": "doc_7386", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\"\n Create a resized image.\n\n `path_to_image`: The path to the image with the media directory to\n resize. If `None`, the\n VERSATILEIMAGEFIELD_PLACEHOLDER_IMAGE will be used.\n `save_path_on_storage`: Where on self.storage to save the resized image\n `width`: Width of resized image (int)\n `height`: Desired height of resized image (int)\n `filename_key`: A string that will be used in the sized image filename\n to signify what operation was done to it.\n Examples: 'crop' or 'scale'\n \"\"\"\n arg_5, arg_6, arg_7, arg_8 = arg_0.retrieve_image(\n arg_1\n )\n\n arg_5, arg_9 = arg_0.preprocess(arg_5, arg_7)\n\n arg_10 = arg_0.process_image(\n arg_5=arg_5,\n arg_7=arg_7,\n arg_9=arg_9,\n arg_3=arg_3,\n arg_4=arg_4\n )\n arg_0.save_image(arg_10, arg_2, arg_6, arg_8)"} +{"_id": "doc_7387", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\"\n Return a `path_to_image` location on `storage` as dictated by `width`, `height`\n and `filename_key`\n \"\"\"\n arg_5, arg_6 = os.path.split(arg_0)\n\n arg_7 = get_resized_filename(\n arg_6,\n arg_1,\n arg_2,\n arg_3\n )\n\n arg_8 = os.path.join(*[\n VERSATILEIMAGEFIELD_SIZED_DIRNAME,\n arg_5,\n arg_7\n ]).replace(' ', '') # Removing spaces so this path is memcached friendly\n\n return arg_8"} +{"_id": "doc_7388", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return the 'filtered path'\n \"\"\"\n arg_3, arg_4 = os.path.split(arg_0)\n\n arg_5 = get_filtered_filename(arg_4, arg_1)\n arg_6 = os.path.join(*[\n arg_3,\n VERSATILEIMAGEFIELD_FILTERED_DIRNAME,\n arg_5\n ])\n # Removing spaces so this path is memcached key friendly\n arg_6 = arg_6.replace(' ', '')\n return arg_6"} +{"_id": "doc_7389", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Validate a list of size keys.\n\n `sizes`: An iterable of 2-tuples, both strings. Example:\n [\n ('large', 'url'),\n ('medium', 'crop__400x400'),\n ('small', 'thumbnail__100x100')\n ]\n \"\"\"\n try:\n for arg_1, arg_2 in arg_0:\n arg_3 = arg_2.split('__')\n if arg_3[-1] != 'url' and (\n 'x' not in arg_3[-1]\n ):\n raise InvalidSizeKey(\n \"{0} is an invalid size. All sizes must be either \"\n \"'url' or made up of at least two segments separated \"\n \"by double underscores. Examples: 'crop__400x400', \"\n \"filters__invert__url\".format(arg_2)\n )\n except ValueError:\n raise InvalidSizeKeySet(\n '{} is an invalid size key set. Size key sets must be an '\n 'iterable of 2-tuples'.format(str(arg_0))\n )\n return list(set(arg_0))"} +{"_id": "doc_7390", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Build a URL from `image_key`.\"\"\"\n arg_2 = arg_1.split('__')\n if 'x' in arg_2[-1]:\n arg_3 = arg_2.pop(-1)\n else:\n arg_3 = None\n arg_4 = reduce(getattr, arg_2, arg_0)\n if arg_3:\n arg_4 = arg_4[arg_3].url\n return arg_4"} +{"_id": "doc_7391", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Takes a raw `Instruction` and translates it into a human readable text\n representation. As of writing, the text representation for WASM is not yet\n standardized, so we just emit some generic format.\n \"\"\"\n arg_1 = arg_0.op.mnemonic\n\n if not arg_0.imm:\n return arg_1\n\n return arg_1 + ' ' + ', '.join([\n getattr(arg_0.op.imm_struct, arg_2.name).to_string(\n getattr(arg_0.imm, arg_2.name)\n )\n for arg_2 in arg_0.op.imm_struct._meta.fields\n ])"} +{"_id": "doc_7392", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=2,\n arg_3=True,\n):\n \"\"\"\n Takes a `FunctionBody` and optionally a `FunctionType`, yielding the string \n representation of the function line by line. The function type is required\n for formatting function parameter and return value information.\n \"\"\"\n if arg_1 is None:\n yield 'func'\n else:\n arg_4 = ' (param {})'.format(' '.join(\n map(format_lang_type, arg_1.param_types)\n )) if arg_1.param_types else ''\n arg_5 = ' (result {})'.format(\n format_lang_type(arg_1.return_type)\n ) if arg_1.return_type else ''\n yield 'func' + arg_4 + arg_5\n\n if arg_3 and arg_0.locals:\n yield '(locals {})'.format(' '.join(itertools.chain.from_iterable(\n itertools.repeat(format_lang_type(arg_6.type), arg_6.count)\n for arg_6 in arg_0.locals\n )))\n\n arg_7 = 1\n for arg_8 in decode_bytecode(arg_0.code):\n if arg_8.op.flags & INSN_LEAVE_BLOCK:\n arg_7 -= 1\n yield ' ' * (arg_7 * arg_2) + format_instruction(arg_8)\n if arg_8.op.flags & INSN_ENTER_BLOCK:\n arg_7 += 1"} +{"_id": "doc_7393", "title": "", "text": "def Func(arg_0):\n \"\"\"Decodes raw bytecode, yielding `Instruction`s.\"\"\"\n arg_1 = memoryview(arg_0)\n while arg_1:\n arg_2 = byte2int(arg_1[0])\n arg_3 = OPCODE_MAP[arg_2]\n\n if arg_3.imm_struct is not None:\n arg_4, arg_5, arg_6 = arg_3.imm_struct.from_raw(None, arg_1[1:])\n else:\n arg_5 = None\n arg_4 = 0\n\n arg_7 = 1 + arg_4\n yield Instruction(arg_3, arg_5, arg_7)\n arg_1 = arg_1[arg_7:]"} +{"_id": "doc_7394", "title": "", "text": "def Func(arg_0):\n \"\"\"Deprecates a function, printing a warning on the first usage.\"\"\"\n\n # We use a mutable container here to work around Py2's lack of\n # the `nonlocal` keyword.\n arg_1 = [True]\n\n @functools.wraps(arg_0)\n def wrapper(*arg_2, **arg_3):\n if arg_1[0]:\n warnings.warn(\n \"Call to deprecated function {}.\".format(arg_0.__name__),\n DeprecationWarning,\n )\n arg_1[0] = False\n return arg_0(*arg_2, **arg_3)\n\n return wrapper"} +{"_id": "doc_7395", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks the validity of the input.\n\n In case of an invalid input throws ValueError.\n \"\"\"\n if isinstance(arg_1, str):\n return 'st'\n elif isinstance(arg_1, list):\n if all(isinstance(arg_2, str) for arg_2 in arg_1):\n return 'gst'\n\n raise ValueError(\"String argument should be of type String or\"\n \" a list of strings\")"} +{"_id": "doc_7396", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper method that returns the index of the string based on node's\n starting index\"\"\"\n arg_2 = 0\n for arg_3 in arg_0.word_starts[1:]:\n if arg_1 < arg_3:\n return arg_2\n else:\n arg_2+=1\n return arg_2"} +{"_id": "doc_7397", "title": "", "text": "def Func(arg_0, arg_1=-1):\n \"\"\"Returns the Largest Common Substring of Strings provided in stringIdxs.\n If stringIdxs is not provided, the LCS of all strings is returned.\n\n ::param stringIdxs: Optional: List of indexes of strings.\n \"\"\"\n if arg_1 == -1 or not isinstance(arg_1, list):\n arg_1 = set(range(len(arg_0.word_starts)))\n else:\n arg_1 = set(arg_1)\n\n arg_2 = arg_0._find_Func(arg_0.root, arg_1)\n arg_3 = arg_2.idx\n arg_4 = arg_2.idx + arg_2.depth\n return arg_0.word[arg_3:arg_4]"} +{"_id": "doc_7398", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper method returns the starting indexes of strings in GST\"\"\"\n arg_0.word_starts = []\n arg_3 = 0\n for arg_4 in range(len(arg_1)):\n arg_0.word_starts.append(arg_3)\n arg_3 += len(arg_1[arg_4]) + 1"} +{"_id": "doc_7399", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Helper method, returns the edge label between a node and it's parent\"\"\"\n return arg_0.word[arg_1.idx + arg_2.depth : arg_1.idx + arg_1.depth]"} +{"_id": "doc_7400", "title": "", "text": "def Func(arg_0):\n \"\"\"Generator of unique terminal symbols used for building the Generalized Suffix Tree.\n Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols\n are not part of the input string.\n \"\"\"\n arg_1 = sys.version[0] < '3'\n arg_2 = list(list(range(0xE000,0xF8FF+1)) + list(range(0xF0000,0xFFFFD+1)) + list(range(0x100000, 0x10FFFD+1)))\n for arg_3 in arg_2:\n if arg_1:\n yield(unichr(arg_3))\n else:\n yield(chr(arg_3))\n raise ValueError(\"To many input strings.\")"} +{"_id": "doc_7401", "title": "", "text": "def Func(arg_0):\n \"\"\"Func to the server\"\"\"\n if arg_0.loop is None: # pragma: no cover\n arg_0.loop = asyncio.get_event_loop()\n arg_2 = asyncio.Task(\n arg_0.loop.create_Funcion(\n arg_0.config['protocol_factory'],\n arg_0.config['host'], arg_0.config['port'],\n ssl=arg_0.config['ssl']),\n arg_1=arg_0.loop)\n arg_2.add_done_callback(arg_0.Funcion_made)\n return arg_2"} +{"_id": "doc_7402", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse read a response from the AGI and parse it.\n\n :return dict: The AGI response parsed into a dict.\n \"\"\"\n arg_1 = yield from arg_0.reader.readline()\n return parse_agi_result(arg_1.decode(arg_0.encoding)[:-1])"} +{"_id": "doc_7403", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse AGI results using Regular expression.\n\n AGI Result examples::\n\n 100 result=0 Trying...\n\n 200 result=0\n\n 200 result=-1\n\n 200 result=132456\n\n 200 result= (timeout)\n\n 510 Invalid or unknown command\n\n 520-Invalid command syntax. Proper usage follows:\n int() argument must be a string, a bytes-like object or a number, not\n 'NoneType'\n\n HANGUP\n\n \"\"\"\n # print(\"--------------\\n\", line)\n if arg_0 == 'HANGUP':\n return {'error': 'AGIResultHangup',\n 'msg': 'User hungup during execution'}\n\n arg_1 = dict(code=0, response=\"\", arg_0=arg_0)\n arg_2 = re_code.search(arg_0)\n try:\n arg_1.update(arg_2.groupdict())\n except AttributeError:\n # None has no attribute groupdict\n pass\n return agi_code_check(**arg_1)"} +{"_id": "doc_7404", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Mostly used for unit testing. Allow to use a static uuid and Func\n all counter\"\"\"\n for arg_2 in arg_0.instances:\n if arg_1:\n arg_2.uid = arg_1\n arg_2.generator = arg_2.get_generator()"} +{"_id": "doc_7405", "title": "", "text": "def Func(arg_0):\n \"\"\"Mostly used for debugging\"\"\"\n return [\"<%s prefix:%s (uid:%s)>\" % (arg_0.__class__.__name__,\n arg_1.prefix, arg_0.uid)\n for arg_1 in arg_0.instances]"} +{"_id": "doc_7406", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns data from a package directory.\n 'path' should be an absolute path.\n \"\"\"\n # Run the imported setup to get the metadata.\n with FakeContext(arg_0):\n with SetupMonkey() as sm:\n try:\n arg_1 = run_setup('setup.py', stop_after='config')\n\n arg_2 = {'_setuptools': sm.used_setuptools}\n\n for arg_3, arg_4 in arg_1.metadata.__dict__.items():\n if arg_3[0] == '_' or not arg_4:\n continue\n if all(not arg_5 for arg_5 in arg_4):\n continue\n arg_2[arg_3] = arg_4\n\n if sm.used_setuptools:\n for arg_6 in ['cmdclass', 'zip_safe', 'test_suite']:\n arg_4 = getattr(arg_1, arg_6, None)\n if arg_4 is not None and arg_4 not in ([], {}):\n arg_2[arg_6] = arg_4\n\n except ImportError as e:\n # Either there is no setup py, or it's broken.\n logging.exception(e)\n arg_2 = {}\n\n return arg_2"} +{"_id": "doc_7407", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"Create a graph of constraints for both must- and cannot-links\"\n\n # Represent the graphs using adjacency-lists\n arg_3, arg_4 = {}, {}\n for arg_5 in range(arg_2):\n arg_3[arg_5] = set()\n arg_4[arg_5] = set()\n\n def add_both(arg_6, arg_5, arg_7):\n arg_6[arg_5].add(arg_7)\n arg_6[arg_7].add(arg_5)\n\n for (arg_5, arg_7) in arg_0:\n arg_3[arg_5].add(arg_7)\n arg_3[arg_7].add(arg_5)\n\n for (arg_5, arg_7) in arg_1:\n arg_4[arg_5].add(arg_7)\n arg_4[arg_7].add(arg_5)\n\n def dfs(arg_5, arg_8, arg_9, arg_10):\n arg_9[arg_5] = True\n for arg_7 in arg_8[arg_5]:\n if not arg_9[arg_7]:\n dfs(arg_7, arg_8, arg_9, arg_10)\n arg_10.append(arg_5)\n\n # Run DFS from each node to get all the graph's components\n # and add an edge for each pair of nodes in the component (create a complete graph)\n # See http://www.techiedelight.com/transitive-closure-graph/ for more details\n arg_9 = [False] * arg_2\n arg_11 = []\n for arg_5 in range(arg_2):\n if not arg_9[arg_5] and arg_3[arg_5]:\n arg_10 = []\n dfs(arg_5, arg_3, arg_9, arg_10)\n for arg_12 in arg_10:\n for arg_13 in arg_10:\n if arg_12 != arg_13:\n arg_3[arg_12].add(arg_13)\n arg_11.append(arg_10)\n\n for (arg_5, arg_7) in arg_1:\n for arg_14 in arg_3[arg_5]:\n add_both(arg_4, arg_14, arg_7)\n\n for arg_15 in arg_3[arg_7]:\n add_both(arg_4, arg_5, arg_15)\n\n for arg_14 in arg_3[arg_5]:\n for arg_15 in arg_3[arg_7]:\n add_both(arg_4, arg_14, arg_15)\n\n for arg_5 in arg_3:\n for arg_7 in arg_3[arg_5]:\n if arg_7 != arg_5 and arg_7 in arg_4[arg_5]:\n raise InconsistentConstraintsException('Inconsistent constraints between {} and {}'.format(arg_5, arg_7))\n\n return arg_3, arg_4, arg_11"} +{"_id": "doc_7408", "title": "", "text": "def Func(arg_0, arg_1 = None, arg_2 = None):\n\t\"\"\"Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline.\n\n\tParameters:\n\t----------\n\tobj: BaseEstimator\n\t\tThe object.\n\n\tactive_fields: list of strings, optional\n\t\tFeature names. If missing, \"x1\", \"x2\", .., \"xn\" are assumed.\n\n\ttarget_fields: list of strings, optional\n\t\tLabel name(s). If missing, \"y\" is assumed.\n\n\t\"\"\"\n\targ_3 = _filter_steps(_get_steps(arg_0))\n\targ_4 = PMMLPipeline(arg_3)\n\tif arg_1 is not None:\n\t\targ_4.active_fields = numpy.asarray(arg_1)\n\tif arg_2 is not None:\n\t\targ_4.target_fields = numpy.asarray(arg_2)\n\treturn arg_4"} +{"_id": "doc_7409", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = [], arg_3 = False, arg_4 = False, arg_5 = \"UTF-8\"):\n\t\"\"\"Converts a fitted Scikit-Learn pipeline to PMML.\n\n\tParameters:\n\t----------\n\tpipeline: PMMLPipeline\n\t\tThe pipeline.\n\n\tpmml: string\n\t\tThe path to where the PMML document should be stored.\n\n\tuser_classpath: list of strings, optional\n\t\tThe paths to JAR files that provide custom Transformer, Selector and/or Estimator converter classes.\n\t\tThe JPMML-SkLearn classpath is constructed by appending user JAR files to package JAR files.\n\n\twith_repr: boolean, optional\n\t\tIf true, insert the string representation of pipeline into the PMML document.\n\n\tdebug: boolean, optional\n\t\tIf true, print information about the conversion process.\n\n\tjava_encoding: string, optional\n\t\tThe character encoding to use for decoding Java output and error byte streams.\n\n\t\"\"\"\n\tif arg_4:\n\t\targ_6 = _java_version(arg_5)\n\t\tif arg_6 is None:\n\t\t\targ_6 = (\"java\", \"N/A\")\n\t\tprint(\"python: {0}\".format(platform.python_version()))\n\t\tprint(\"sklearn: {0}\".format(sklearn.__version__))\n\t\tprint(\"sklearn.externals.joblib: {0}\".format(joblib.__version__))\n\t\tprint(\"pandas: {0}\".format(pandas.__version__))\n\t\tprint(\"sklearn_pandas: {0}\".format(sklearn_pandas.__version__))\n\t\tprint(\"Func: {0}\".format(__version__))\n\t\tprint(\"{0}: {1}\".format(arg_6[0], arg_6[1]))\n\tif not isinstance(arg_0, PMMLPipeline):\n\t\traise TypeError(\"The pipeline object is not an instance of \" + PMMLPipeline.__name__ + \". Use the 'Func.make_pmml_pipeline(obj)' utility function to translate a regular Scikit-Learn estimator or pipeline to a PMML pipeline\")\n\targ_7 = arg_0._final_estimator\n\targ_8 = [\"java\", \"-cp\", os.pathsep.join(_classpath(arg_2)), \"org.jpmml.sklearn.Main\"]\n\targ_9 = []\n\ttry:\n\t\tif arg_3:\n\t\t\targ_0.repr_ = repr(arg_0)\n\t\t# if isinstance(estimator, H2OEstimator):\n\t\tif hasattr(arg_7, \"download_mojo\"):\n\t\t\targ_11 = arg_7.download_mojo()\n\t\t\targ_9.append(arg_11)\n\t\t\targ_7._mojo_path = arg_11\n\t\targ_13 = _dump(arg_0, \"pipeline\")\n\t\targ_8.extend([\"--pkl-pipeline-input\", arg_13])\n\t\targ_9.append(arg_13)\n\t\targ_8.extend([\"--pmml-output\", arg_1])\n\t\tif arg_4:\n\t\t\tprint(\"Executing command:\\n{0}\".format(\" \".join(arg_8)))\n\t\ttry:\n\t\t\targ_14 = Popen(arg_8, stdout = PIPE, stderr = PIPE, bufsize = 1)\n\t\texcept OSError:\n\t\t\traise RuntimeError(\"Java is not installed, or the Java executable is not on system path\")\n\t\targ_15, arg_16 = arg_14.communicate()\n\t\targ_17 = arg_14.poll()\n\t\tif arg_4 or arg_17:\n\t\t\tif(len(arg_15) > 0):\n\t\t\t\tprint(\"Standard output:\\n{0}\".format(_decode(arg_15, arg_5)))\n\t\t\telse:\n\t\t\t\tprint(\"Standard output is empty\")\n\t\t\tif(len(arg_16) > 0):\n\t\t\t\tprint(\"Standard error:\\n{0}\".format(_decode(arg_16, arg_5)))\n\t\t\telse:\n\t\t\t\tprint(\"Standard error is empty\")\n\t\tif arg_17:\n\t\t\traise RuntimeError(\"The JPMML-SkLearn conversion application has failed. The Java executable should have printed more information about the failure into its standard output and/or standard error streams\")\n\tfinally:\n\t\tif arg_4:\n\t\t\tprint(\"Preserved joblib dump file(s): {0}\".format(\" \".join(arg_9)))\n\t\telse:\n\t\t\tfor arg_18 in arg_9:\n\t\t\t\tos.remove(arg_18)"} +{"_id": "doc_7410", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns an instance of the formset\n \"\"\"\n arg_1 = arg_0.get_formset()\n if hasattr(arg_0, 'get_extra_form_kwargs'):\n arg_2 = type(arg_0).__name__\n raise DeprecationWarning(\n 'Calling {0}.get_extra_form_kwargs is no longer supported. '\n 'Set `form_kwargs` in {0}.formset_kwargs or override '\n '{0}.get_formset_kwargs() directly.'.format(arg_2),\n )\n return arg_1(**arg_0.get_formset_kwargs())"} +{"_id": "doc_7411", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n If the formset is valid, save the associated models.\n \"\"\"\n arg_0.object_list = arg_1.save()\n return super(ModelFormSetMixin, arg_0).Func(arg_1)"} +{"_id": "doc_7412", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Handles POST requests, instantiating a formset instance with the passed\n POST variables and then checked for validity.\n \"\"\"\n arg_4 = arg_0.construct_formset()\n if arg_4.is_valid():\n return arg_0.formset_valid(arg_4)\n else:\n return arg_0.formset_invalid(arg_4)"} +{"_id": "doc_7413", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Overrides Func to attach the model class as\n an attribute of the returned formset instance.\n \"\"\"\n arg_1 = super(InlineFormSetFactory, arg_0).Func()\n arg_1.model = arg_0.inline_model\n return arg_1"} +{"_id": "doc_7414", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the inline formset instances\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.get_inlines():\n arg_3 = arg_2(arg_0.model, arg_0.request, arg_0.object, arg_0.kwargs, arg_0)\n arg_4 = arg_3.construct_formset()\n arg_1.append(arg_4)\n return arg_1"} +{"_id": "doc_7415", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Handles GET requests and instantiates a blank version of the form and formsets.\n \"\"\"\n arg_4 = arg_0.Func_form_class()\n arg_5 = arg_0.Func_form(arg_4)\n arg_6 = arg_0.construct_inlines()\n return arg_0.render_to_response(arg_0.Func_context_data(arg_5=arg_5, arg_6=arg_6, **arg_3))"} +{"_id": "doc_7416", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Handles POST requests, instantiating a form and formset instances with the passed\n POST variables and then checked for validity.\n \"\"\"\n arg_4 = arg_0.get_form_class()\n arg_5 = arg_0.get_form(arg_4)\n\n if arg_5.is_valid():\n arg_0.object = arg_5.save(commit=False)\n arg_7 = True\n else:\n arg_7 = False\n\n arg_8 = arg_0.construct_inlines()\n\n if all_valid(arg_8) and arg_7:\n return arg_0.forms_valid(arg_5, arg_8)\n return arg_0.forms_invalid(arg_5, arg_8)"} +{"_id": "doc_7417", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n If `inlines_names` has been defined, add each formset to the context under\n its corresponding entry in `inlines_names`\n \"\"\"\n arg_2 = {}\n arg_3 = arg_0.get_inlines_names()\n\n if arg_3:\n # We have formset or inlines in context, but never both\n arg_2.update(zip(arg_3, arg_1.get('inlines', [])))\n if 'formset' in arg_1:\n arg_2[arg_3[0]] = arg_1['formset']\n arg_2.update(arg_1)\n return super(NamedFormsetsMixin, arg_0).Func(**arg_2)"} +{"_id": "doc_7418", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns the start date for a model instance\n \"\"\"\n arg_2 = getattr(arg_1, arg_0.get_date_field())\n try:\n arg_2 = arg_2.date()\n except AttributeError:\n # It's a date rather than datetime, so we use it as is\n pass\n return arg_2"} +{"_id": "doc_7419", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns an integer representing the first day of the week.\n\n 0 represents Monday, 6 represents Sunday.\n \"\"\"\n if arg_0.first_of_week is None:\n raise ImproperlyConfigured(\"%s.first_of_week is required.\" % arg_0.__class__.__name__)\n if arg_0.first_of_week not in range(7):\n raise ImproperlyConfigured(\"%s.first_of_week must be an integer between 0 and 6.\" % arg_0.__class__.__name__)\n return arg_0.first_of_week"} +{"_id": "doc_7420", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a queryset of models for the month requested\n \"\"\"\n arg_1 = super(BaseCalendarMonthView, arg_0).Func()\n\n arg_2 = arg_0.get_year()\n arg_3 = arg_0.get_month()\n\n arg_4 = arg_0.get_date_field()\n arg_5 = arg_0.get_end_date_field()\n\n arg_6 = _date_from_string(arg_2, arg_0.get_year_format(),\n arg_3, arg_0.get_month_format())\n\n arg_7 = arg_6\n arg_8 = arg_0.get_next_month(arg_6)\n\n # Adjust our start and end dates to allow for next and previous\n # month edges\n if arg_7.weekday() != arg_0.get_first_of_week():\n arg_9 = math.fabs(arg_7.weekday() - arg_0.get_first_of_week())\n arg_7 = arg_7 - datetime.timedelta(days=arg_9)\n\n if arg_8.weekday() != ((arg_0.get_first_of_week() + 6) % 7):\n arg_9 = math.fabs(((arg_0.get_first_of_week() + 6) % 7) - arg_8.weekday())\n arg_8 = arg_8 + datetime.timedelta(days=arg_9)\n\n if arg_5:\n # 5 possible conditions for showing an event:\n\n # 1) Single day event, starts after 'since'\n # 2) Multi-day event, starts after 'since' and ends before 'until'\n # 3) Starts before 'since' and ends after 'since' and before 'until'\n # 4) Starts after 'since' but before 'until' and ends after 'until'\n # 5) Starts before 'since' and ends after 'until'\n arg_10 = Q(**{\n '%s__gte' % arg_4: arg_7,\n arg_5: None\n })\n arg_11 = Q(**{\n '%s__gte' % arg_4: arg_7,\n '%s__lt' % arg_5: arg_8\n })\n arg_12 = Q(**{\n '%s__lt' % arg_4: arg_7,\n '%s__gte' % arg_5: arg_7,\n '%s__lt' % arg_5: arg_8\n })\n arg_13 = Q(**{\n '%s__gte' % arg_4: arg_7,\n '%s__lt' % arg_4: arg_8,\n '%s__gte' % arg_5: arg_8\n })\n arg_14 = Q(**{\n '%s__lt' % arg_4: arg_7,\n '%s__gte' % arg_5: arg_8\n })\n return arg_1.filter(arg_10 | arg_11 | arg_12 | arg_13 | arg_14)\n return arg_1.filter(**{\n '%s__gte' % arg_4: arg_7\n })"} +{"_id": "doc_7421", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Injects variables necessary for rendering the calendar into the context.\n\n Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`.\n \"\"\"\n arg_2 = super(BaseCalendarMonthView, arg_0).Func(**arg_1)\n\n arg_3 = arg_0.get_year()\n arg_4 = arg_0.get_month()\n\n arg_5 = _date_from_string(arg_3, arg_0.get_year_format(),\n arg_4, arg_0.get_month_format())\n\n arg_6 = Calendar(arg_0.get_first_of_week())\n\n arg_7 = []\n arg_8 = datetime.datetime.utcnow()\n\n arg_9 = defaultdict(list)\n arg_10 = []\n\n for arg_11 in arg_2['object_list']:\n arg_12 = arg_0.get_start_date(arg_11)\n arg_13 = arg_0.get_end_date_field()\n\n if arg_13:\n arg_14 = arg_0.get_end_date(arg_11)\n if arg_14 and arg_14 != arg_12:\n arg_10.append({\n 'obj': arg_11,\n 'range': [arg_15 for arg_15 in daterange(arg_12, arg_14)]\n })\n continue # We don't put multi-day events in date_lists\n arg_9[arg_12].append(arg_11)\n\n for arg_16 in arg_6.monthdatescalendar(arg_5.year, arg_5.month):\n arg_17 = set(daterange(arg_16[0], arg_16[6]))\n arg_18 = []\n\n for arg_19 in arg_10:\n arg_20 = len(arg_17.intersection(arg_19['range']))\n\n if arg_20:\n # Event happens during this week\n arg_21 = 1\n arg_22 = arg_20 # How many days is the event during this week?\n arg_23 = True # Does the event continue from the previous week?\n arg_24 = True # Does the event continue to the next week?\n\n if arg_19['range'][0] >= arg_16[0]:\n arg_21 = 1 + (arg_19['range'][0] - arg_16[0]).days\n else:\n arg_23 = False\n if arg_19['range'][-1] > arg_16[6]:\n arg_24 = False\n\n arg_18.append({\n 'event': arg_19['obj'],\n 'slot': arg_21,\n 'width': arg_22,\n 'nowrap_previous': arg_23,\n 'nowrap_next': arg_24,\n })\n\n arg_25 = {\n 'events': arg_18,\n 'date_list': [],\n }\n for arg_26 in arg_16:\n arg_25['date_list'].append({\n 'day': arg_26,\n 'events': arg_9[arg_26],\n 'today': arg_26 == arg_8.date(),\n 'is_current_month': arg_26.month == arg_5.month,\n })\n arg_7.append(arg_25)\n\n arg_2['calendar'] = arg_7\n arg_2['weekdays'] = [DAYS[arg_15] for arg_15 in arg_6.iterweekdays()]\n arg_2['month'] = arg_5\n arg_2['next_month'] = arg_0.get_next_month(arg_5)\n arg_2['previous_month'] = arg_0.get_previous_month(arg_5)\n\n return arg_2"} +{"_id": "doc_7422", "title": "", "text": "def Func(arg_0):\n \"\"\"Get primary key properties for a SQLAlchemy model.\n\n :param model: SQLAlchemy model class\n \"\"\"\n arg_1 = arg_0.__mapper__\n return [arg_1.get_property_by_column(arg_2) for arg_2 in arg_1.primary_key]"} +{"_id": "doc_7423", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Deserialize a serialized value to a model instance.\n\n If the parent schema is transient, create a new (transient) instance.\n Otherwise, attempt to find an existing instance in the database.\n :param value: The value to deserialize.\n \"\"\"\n if not isinstance(arg_1, dict):\n if len(arg_0.related_keys) != 1:\n arg_0.fail(\n \"invalid\",\n arg_1=arg_1,\n keys=[arg_4.key for arg_4 in arg_0.related_keys],\n )\n arg_1 = {arg_0.related_keys[0].key: arg_1}\n if arg_0.transient:\n return arg_0.related_model(**arg_1)\n try:\n arg_5 = arg_0._get_existing_instance(\n arg_0.session.query(arg_0.related_model), arg_1\n )\n except NoResultFound:\n # The related-object DNE in the DB, but we still want to deserialize it\n # ...perhaps we want to add it to the DB later\n return arg_0.related_model(**arg_1)\n return arg_5"} +{"_id": "doc_7424", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=False, *arg_5, **arg_6):\n \"\"\"Deserialize data to internal representation.\n\n :param session: Optional SQLAlchemy session.\n :param instance: Optional existing instance to modify.\n :param transient: Optional switch to allow transient instantiation.\n \"\"\"\n arg_0._session = arg_2 or arg_0._session\n arg_0._transient = arg_4 or arg_0._transient\n if not (arg_0.transient or arg_0.session):\n raise ValueError(\"Deserialization requires a session\")\n arg_0.instance = arg_3 or arg_0.instance\n try:\n return super(ModelSchema, arg_0).Func(arg_1, *arg_5, **arg_6)\n finally:\n arg_0.instance = None"} +{"_id": "doc_7425", "title": "", "text": "def Func():\n \"\"\"Deletes old stellar tables that are not used anymore\"\"\"\n def after_delete(arg_0):\n click.echo(\"Deleted table %s\" % arg_0)\n\n arg_1 = get_app()\n upgrade_from_old_version(arg_1)\n arg_1.delete_orphan_snapshots(after_delete)"} +{"_id": "doc_7426", "title": "", "text": "def Func(arg_0):\n \"\"\"Takes a Func of the database\"\"\"\n arg_1 = get_app()\n upgrade_from_old_version(arg_1)\n arg_0 = arg_0 or arg_1.default_Func_name\n\n if arg_1.get_Func(arg_0):\n click.echo(\"Snapshot with name %s already exists\" % arg_0)\n sys.exit(1)\n else:\n def before_copy(arg_2):\n click.echo(\"Snapshotting database %s\" % arg_2)\n arg_1.create_Func(arg_0, before_copy=before_copy)"} +{"_id": "doc_7427", "title": "", "text": "def Func():\n \"\"\"Returns a Func of snapshots\"\"\"\n arg_0 = get_app().get_snapshots()\n\n click.echo('\\n'.join(\n '%s: %s' % (\n arg_1.snapshot_name,\n humanize.naturaltime(datetime.utcnow() - arg_1.created_at)\n )\n for arg_1 in arg_0\n ))"} +{"_id": "doc_7428", "title": "", "text": "def Func(arg_0):\n \"\"\"Removes a snapshot\"\"\"\n arg_1 = get_app()\n\n arg_2 = arg_1.get_snapshot(arg_0)\n if not arg_2:\n click.echo(\"Couldn't find snapshot %s\" % arg_0)\n sys.exit(1)\n\n click.echo(\"Deleting snapshot %s\" % arg_0)\n arg_1.Func_snapshot(arg_2)\n click.echo(\"Deleted\")"} +{"_id": "doc_7429", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Renames a snapshot\"\"\"\n arg_2 = get_app()\n\n arg_3 = arg_2.get_snapshot(arg_0)\n if not arg_3:\n click.echo(\"Couldn't find snapshot %s\" % arg_0)\n sys.exit(1)\n\n arg_4 = arg_2.get_snapshot(arg_1)\n if arg_4:\n click.echo(\"Snapshot with name %s already exists\" % arg_1)\n sys.exit(1)\n\n arg_2.Func_snapshot(arg_3, arg_1)\n click.echo(\"Renamed snapshot %s to %s\" % (arg_0, arg_1))"} +{"_id": "doc_7430", "title": "", "text": "def Func(arg_0):\n \"\"\"Replaces a snapshot\"\"\"\n arg_1 = get_app()\n\n arg_2 = arg_1.get_snapshot(arg_0)\n if not arg_2:\n click.echo(\"Couldn't find snapshot %s\" % arg_0)\n sys.exit(1)\n\n arg_1.remove_snapshot(arg_2)\n arg_1.create_snapshot(arg_0)\n click.echo(\"Replaced snapshot %s\" % arg_0)"} +{"_id": "doc_7431", "title": "", "text": "def Func(arg_0) -> None:\n 'Updates indexes after each epoch for shuffling'\n arg_0.indexes = np.arange(arg_0.nrows)\n if arg_0.shuffle:\n np.random.shuffle(arg_0.indexes)"} +{"_id": "doc_7432", "title": "", "text": "def Func(arg_0: arg_1) -> arg_1:\n \"\"\"\n Defines the default function for cleaning text.\n\n This function operates over a list.\n \"\"\"\n return preprocess_text(arg_0,\n fix_unicode=True,\n lowercase=True,\n transliterate=True,\n no_urls=True,\n no_emails=True,\n no_phone_numbers=True,\n no_numbers=True,\n no_currency_symbols=True,\n no_punct=True,\n no_contractions=False,\n no_accents=True)"} +{"_id": "doc_7433", "title": "", "text": "def Func(arg_0: arg_1,\n arg_2: arg_3[arg_4],\n arg_5: arg_6 = None) -> arg_3[arg_4]:\n \"\"\"\n Apply function to list of elements.\n\n Automatically determines the chunk size.\n \"\"\"\n if not arg_5:\n arg_5 = cpu_count()\n\n try:\n arg_7 = ceil(len(arg_2) / arg_5)\n arg_8 = Pool(arg_5)\n arg_9 = arg_8.map(arg_0, chunked(arg_2, arg_7), chunksize=1)\n finally:\n arg_8.close()\n arg_8.join()\n return arg_9"} +{"_id": "doc_7434", "title": "", "text": "def Func(arg_0):\n \"\"\"Analyze document length statistics for padding strategy\"\"\"\n arg_1 = arg_0.heuristic_pct\n arg_2 = (pd.DataFrame([(a, b) for a, b in arg_0.document_length_histogram.items()],\n columns=['bin', 'doc_count'])\n .sort_values(by='bin'))\n arg_2['cumsum_pct'] = arg_2.doc_count.cumsum() / arg_2.doc_count.sum()\n\n arg_0.document_length_stats = arg_2\n arg_0.doc_length_huerestic = arg_2.query(f'cumsum_pct >= {heuristic}').bin.head(1).values[0]\n logging.warning(' '.join([\"Setting maximum document length to\",\n f'{self.doc_length_huerestic} based upon',\n f'heuristic of {heuristic} percentile.\\n',\n 'See full histogram by insepecting the',\n \"`document_length_stats` attribute.\"]))\n arg_0.padding_maxlen = arg_0.doc_length_huerestic"} +{"_id": "doc_7435", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=False):\n \"\"\"\n Return a new Colorful object with the given color config.\n \"\"\"\n arg_4 = Colorful(\n arg_1=arg_0.colorful.colormode,\n arg_2=copy.copy(arg_0.colorful.colorpalette)\n )\n\n arg_4.setup(\n arg_1=arg_1, arg_2=arg_2, arg_3=arg_3\n )\n yield arg_4"} +{"_id": "doc_7436", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse the given rgb.txt file into a Python dict.\n\n See https://en.wikipedia.org/wiki/X11_color_names for more information\n\n :param str path: the path to the X11 rgb.txt file\n \"\"\"\n #: Holds the generated color dict\n arg_1 = {}\n\n with open(arg_0, 'r') as rgb_txt:\n for arg_2 in rgb_txt:\n arg_2 = arg_2.strip()\n if not arg_2 or arg_2.startswith('!'):\n continue # skip comments\n\n arg_3 = arg_2.split()\n arg_1[\" \".join(arg_3[3:])] = (int(arg_3[0]), int(arg_3[1]), int(arg_3[2]))\n\n return arg_1"} +{"_id": "doc_7437", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sanitze the given color palette so it can\n be safely used by Colorful.\n\n It will convert colors specified in hex RGB to\n a RGB channel triplet.\n \"\"\"\n arg_1 = {}\n\n def arg_6(arg_2):\n \"\"\"\n Convert the given name into a valid colorname\n \"\"\"\n if len(arg_2) == 1:\n arg_2 = arg_2[0]\n return arg_2[:1].lower() + arg_2[1:]\n\n return arg_2[0].lower() + ''.join(arg_3.capitalize() for arg_3 in arg_2[1:])\n\n for arg_4, arg_5 in arg_0.items():\n if isinstance(arg_5, str):\n # we assume it's a hex RGB value\n arg_5 = utils.hex_to_rgb(arg_5)\n arg_1[arg_6(arg_4.split())] = arg_5\n\n return arg_1"} +{"_id": "doc_7438", "title": "", "text": "def Func(arg_0): # noqa\n \"\"\"\n Detect what color palettes are supported.\n It'll return a valid color mode to use\n with colorful.\n\n :param dict env: the environment dict like returned by ``os.envion``\n \"\"\"\n if arg_0.get('COLORFUL_DISABLE', '0') == '1':\n return NO_COLORS\n\n if arg_0.get('COLORFUL_FORCE_8_COLORS', '0') == '1':\n return ANSI_8_COLORS\n\n if arg_0.get('COLORFUL_FORCE_16_COLORS', '0') == '1':\n return ANSI_16_COLORS\n\n if arg_0.get('COLORFUL_FORCE_256_COLORS', '0') == '1':\n return ANSI_256_COLORS\n\n if arg_0.get('COLORFUL_FORCE_TRUE_COLORS', '0') == '1':\n return TRUE_COLORS\n\n # if we are not a tty\n if not sys.stdout.isatty():\n return NO_COLORS\n\n arg_1 = arg_0.get('COLORTERM')\n if arg_1:\n if arg_1 in {'truecolor', '24bit'}:\n return TRUE_COLORS\n\n if arg_1 in {'8bit'}:\n return ANSI_256_COLORS\n\n arg_2 = arg_0.get('TERM_PROGRAM')\n if arg_2:\n if arg_2 in {'iTerm.app', 'Hyper'}:\n return TRUE_COLORS\n\n if arg_2 in {'Apple_Terminal'}:\n return ANSI_256_COLORS\n\n arg_3 = arg_0.get('TERM')\n if arg_3:\n if arg_3 in {'screen-256', 'screen-256color', 'xterm-256', 'xterm-256color'}:\n return ANSI_256_COLORS\n\n if arg_3 in {'screen', 'xterm', 'vt100', 'color', 'ansi', 'cygwin', 'linux'}:\n return ANSI_16_COLORS\n\n if arg_1:\n # if there was no match with $TERM either but we\n # had one with $COLORTERM, we use it!\n return ANSI_16_COLORS\n\n return ANSI_8_COLORS"} +{"_id": "doc_7439", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert the given hex string to a\n valid RGB channel triplet.\n \"\"\"\n arg_0 = arg_0.lstrip('#')\n check_hex(arg_0)\n\n arg_1 = len(arg_0)\n arg_2 = int(arg_1 / 3)\n return tuple(int(arg_0[arg_3:arg_3+arg_2], 16) for arg_3 in range(0, arg_1, arg_2))"} +{"_id": "doc_7440", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the given hex value is a valid RGB color\n\n It should match the format: [0-9a-fA-F]\n and be of length 3 or 6.\n \"\"\"\n arg_1 = len(arg_0)\n if arg_1 not in (3, 6):\n raise ValueError('Hex string #{} is too long'.format(arg_0))\n\n arg_2 = r'[0-9a-f]{{{length}}}'.format(arg_1=arg_1)\n if not re.search(arg_2, arg_0, re.I):\n raise ValueError('Invalid Hex String: #{}'.format(arg_0))"} +{"_id": "doc_7441", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Translate the given color name to a valid\n ANSI escape code.\n\n :parma str colorname: the name of the color to resolve\n :parma str offset: the offset for the color code\n :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n :parma dict colorpalette: the color palette to use for the color name mapping\n\n :returns str: the color as ANSI escape code\n\n :raises ColorfulError: if the given color name is invalid\n \"\"\"\n try:\n arg_4, arg_5, arg_6 = arg_3[arg_0]\n except KeyError:\n raise ColorfulError('the color \"{0}\" is unknown. Use a color in your color palette (by default: X11 rgb.txt)'.format( # noqa\n arg_0))\n else:\n return translate_rgb_to_ansi_code(arg_4, arg_5, arg_6, arg_1, arg_2)"} +{"_id": "doc_7442", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Resolve the given modifier name to a valid\n ANSI escape code.\n\n :param str modifiername: the name of the modifier to resolve\n :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n :returns str: the ANSI escape code for the modifier\n\n :raises ColorfulError: if the given modifier name is invalid\n \"\"\"\n if arg_1 == terminal.NO_COLORS: # return empty string if colors are disabled\n return '', ''\n\n try:\n arg_2, arg_3 = ansi.MODIFIERS[arg_0]\n except KeyError:\n raise ColorfulError('the modifier \"{0}\" is unknown. Use one of: {1}'.format(\n arg_0, ansi.MODIFIERS.keys()))\n else:\n return ansi.ANSI_ESCAPE_CODE.format(\n code=arg_2), ansi.ANSI_ESCAPE_CODE.format(\n code=arg_3)"} +{"_id": "doc_7443", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Translate the given style to an ANSI escape code\n sequence.\n\n ``style`` examples are:\n\n * green\n * bold\n * red_on_black\n * bold_green\n * italic_yellow_on_cyan\n\n :param str style: the style to translate\n :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n :parma dict colorpalette: the color palette to use for the color name mapping\n \"\"\"\n arg_3 = iter(arg_0.split('_'))\n\n arg_4 = []\n arg_5 = []\n\n try:\n # consume all modifiers\n arg_6 = None\n for arg_7 in arg_3:\n arg_6 = arg_7\n if arg_6 not in ansi.MODIFIERS:\n break # all modifiers have been consumed\n\n mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(arg_6, arg_1)\n arg_4.append(mod_start_code)\n arg_5.append(mod_end_code)\n else: # we've consumed all parts, thus we can exit\n raise StopIteration()\n\n # next part has to be a foreground color or the 'on' keyword\n # which means we have to consume background colors\n if arg_6 != 'on':\n arg_8, arg_9 = translate_colorname_to_ansi_code(\n arg_6, ansi.FOREGROUND_COLOR_OFFSET, arg_1, arg_2)\n arg_4.append(arg_8)\n arg_5.append(arg_9)\n # consume the required 'on' keyword after the foreground color\n next(arg_3)\n\n # next part has to be the background color\n arg_6 = next(arg_3)\n arg_8, arg_9 = translate_colorname_to_ansi_code(\n arg_6, ansi.BACKGROUND_COLOR_OFFSET, arg_1, arg_2)\n arg_4.append(arg_8)\n arg_5.append(arg_9)\n except StopIteration: # we've consumed all parts of the styling string\n pass\n\n # construct and return ANSI escape code sequence\n return ''.join(arg_4), ''.join(arg_5)"} +{"_id": "doc_7444", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Style the given string according to the given\n ANSI style string.\n\n :param str string: the string to style\n :param tuple ansi_style: the styling string returned by ``translate_style``\n :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n :returns: a string containing proper ANSI sequence\n \"\"\"\n arg_4, arg_5 = arg_1\n\n # replace nest placeholders with the current begin style\n if PY2:\n if isinstance(arg_0, str):\n arg_0 = arg_0.decode(DEFAULT_ENCODING)\n arg_0 = UNICODE(arg_0).replace(ansi.NEST_PLACEHOLDER, arg_4)\n\n return '{start_code}{string}{end_code}{nest_ph}'.format(\n start_code=arg_4,\n arg_0=arg_0,\n end_code=arg_5,\n nest_ph=ansi.NEST_PLACEHOLDER if arg_3 else '')"} +{"_id": "doc_7445", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Use a predefined style as color palette\n\n :param str style_name: the name of the style\n \"\"\"\n try:\n arg_2 = getattr(styles, arg_1.upper())\n except AttributeError:\n raise ColorfulError('the style \"{0}\" is undefined'.format(\n arg_1))\n else:\n arg_0.colorpalette = arg_2"} +{"_id": "doc_7446", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n Format the given string with the given ``args`` and ``kwargs``.\n The string can contain references to ``c`` which is provided by\n this colorful object.\n\n :param str string: the string to Func\n \"\"\"\n return arg_1.Func(c=arg_0, *arg_2, **arg_3)"} +{"_id": "doc_7447", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Get data from the USB device.\n \"\"\"\n try:\n if arg_1:\n arg_0._device.reset()\n\n # detach kernel driver from both interfaces if attached, so we can set_configuration()\n for arg_2 in [0,1]:\n if arg_0._device.is_kernel_driver_active(arg_2):\n LOGGER.debug('Detaching kernel driver for interface %d '\n 'of %r on ports %r', arg_2, arg_0._device, arg_0._ports)\n arg_0._device.detach_kernel_driver(arg_2)\n\n arg_0._device.set_configuration()\n\n # Prevent kernel message:\n # \"usbfs: process (python) did not claim interface x before use\"\n # This will become unnecessary once pull-request #124 for\n # PyUSB has been accepted and we depend on a fixed release\n # of PyUSB. Until then, and even with the fix applied, it\n # does not hurt to explicitly claim the interface.\n usb.util.claim_interface(arg_0._device, INTERFACE)\n\n # Turns out we don't actually need that ctrl_transfer.\n # Disabling this reduces number of USBErrors from ~7/30 to 0!\n #self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09,\n # wValue=0x0201, wIndex=0x00, data_or_wLength='\\x01\\x01',\n # timeout=TIMEOUT)\n\n\n # Magic: Our TEMPerV1.4 likes to be asked twice. When\n # only asked once, it get's stuck on the next access and\n # requires a reset.\n arg_0._control_transfer(COMMANDS['temp'])\n arg_0._interrupt_read()\n\n # Turns out a whole lot of that magic seems unnecessary.\n #self._control_transfer(COMMANDS['ini1'])\n #self._interrupt_read()\n #self._control_transfer(COMMANDS['ini2'])\n #self._interrupt_read()\n #self._interrupt_read()\n\n # Get temperature\n arg_0._control_transfer(COMMANDS['temp'])\n arg_3 = arg_0._interrupt_read()\n\n # Get humidity\n if arg_0._device.product == 'TEMPer1F_H1_V1.4':\n arg_4 = arg_3\n else:\n arg_4 = None\n\n # Combine temperature and humidity data\n arg_5 = {'temp_data': arg_3, 'humidity_data': arg_4}\n\n # Be a nice citizen and undo potential interface claiming.\n # Also see: https://github.com/walac/pyusb/blob/master/docs/tutorial.rst#dont-be-selfish\n usb.util.dispose_resources(arg_0._device)\n return arg_5\n except usb.USBError as err:\n if not arg_1:\n LOGGER.warning(\"Encountered %s, resetting %r and trying again.\", err, arg_0._device)\n return arg_0.Func(True)\n\n # Catch the permissions exception and add our message\n if \"not permitted\" in str(err):\n raise Exception(\n \"Permission problem accessing USB. \"\n \"Maybe I need to run as root?\")\n else:\n LOGGER.error(err)\n raise"} +{"_id": "doc_7448", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Get device humidity reading.\n\n Params:\n - sensors: optional list of sensors to get a reading for, examples:\n [0,] - get reading for sensor 0\n [0, 1,] - get reading for sensors 0 and 1\n None - get readings for all sensors\n \"\"\"\n arg_2 = arg_1\n if arg_2 is None:\n arg_2 = list(range(0, arg_0._sensor_count))\n\n if not set(arg_2).issubset(list(range(0, arg_0._sensor_count))):\n raise ValueError(\n 'Some or all of the sensors in the list %s are out of range '\n 'given a sensor_count of %d. Valid range: %s' % (\n arg_2,\n arg_0._sensor_count,\n list(range(0, arg_0._sensor_count)),\n )\n )\n\n arg_3 = arg_0.get_data()\n arg_3 = arg_3['humidity_data']\n\n arg_4 = {}\n\n # Interpret device response\n for arg_5 in arg_2:\n arg_6 = arg_0.lookup_humidity_offset(arg_5)\n if arg_6 is None:\n continue\n arg_7 = (struct.unpack_from('>H', arg_3, arg_6)[0] * 32) / 1000.0\n arg_4[arg_5] = {\n 'ports': arg_0.get_ports(),\n 'bus': arg_0.get_bus(),\n 'sensor': arg_5,\n 'humidity_pc': arg_7,\n }\n\n return arg_4"} +{"_id": "doc_7449", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Read data from device.\n \"\"\"\n arg_1 = arg_0._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT)\n LOGGER.debug('Read data: %r', arg_1)\n return arg_1"} +{"_id": "doc_7450", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Update, rolling back on failure.\"\"\"\n arg_4 = []\n arg_5 = arg_4.append\n arg_6 = arg_0._dedup_item\n arg_7 = arg_0._write_item\n for (arg_8, arg_9) in _iteritems_args_kw(*arg_2, **arg_3):\n try:\n arg_10 = arg_6(arg_8, arg_9, arg_1)\n except DuplicationError:\n arg_11 = arg_0._undo_write\n for arg_10, arg_12 in reversed(arg_4):\n arg_11(arg_10, arg_12)\n raise\n if arg_10 is not _NOOP:\n arg_12 = arg_7(arg_8, arg_9, arg_10)\n arg_5((arg_10, arg_12))"} +{"_id": "doc_7451", "title": "", "text": "def Func(arg_0=\"\"):\n \"\"\"Create a new temporary file and write some initial text to it.\n\n :param text: the text to write to the temp file\n :type text: str\n :returns: the file name of the newly created temp file\n :rtype: str\n\n \"\"\"\n with NamedTemporaryFile(mode='w+t', suffix='.yml', delete=False) \\\n as tempfile:\n tempfile.write(arg_0)\n return tempfile.name"} +{"_id": "doc_7452", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"all\", arg_3=False,\n arg_4=False, arg_5=\"first_name\"):\n \"\"\"Get a list of contacts from one or more address books.\n\n :param address_books: the address books to search\n :type address_books: list(address_book.AddressBook)\n :param query: a search query to select contacts\n :type quer: str\n :param method: the search method, one of \"all\", \"name\" or \"uid\"\n :type method: str\n :param reverse: reverse the order of the returned contacts\n :type reverse: bool\n :param group: group results by address book\n :type group: bool\n :param sort: the field to use for sorting, one of \"first_name\", \"last_name\"\n :type sort: str\n :returns: contacts from the address_books that match the query\n :rtype: list(CarddavObject)\n\n \"\"\"\n # Search for the contacts in all address books.\n arg_6 = []\n for arg_7 in arg_0:\n arg_6.extend(arg_7.search(arg_1, arg_2=arg_2))\n # Sort the contacts.\n if arg_4:\n if arg_5 == \"first_name\":\n return sorted(arg_6, arg_3=arg_3, key=lambda x: (\n unidecode(x.address_book.name).lower(),\n unidecode(x.get_first_name_last_name()).lower()))\n elif arg_5 == \"last_name\":\n return sorted(arg_6, arg_3=arg_3, key=lambda x: (\n unidecode(x.address_book.name).lower(),\n unidecode(x.get_last_name_first_name()).lower()))\n else:\n raise ValueError('sort must be \"first_name\" or \"last_name\" not '\n '{}.'.format(arg_5))\n else:\n if arg_5 == \"first_name\":\n return sorted(arg_6, arg_3=arg_3, key=lambda x:\n unidecode(x.get_first_name_last_name()).lower())\n elif arg_5 == \"last_name\":\n return sorted(arg_6, arg_3=arg_3, key=lambda x:\n unidecode(x.get_last_name_first_name()).lower())\n else:\n raise ValueError('sort must be \"first_name\" or \"last_name\" not '\n '{}.'.format(arg_5))"} +{"_id": "doc_7453", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Merge the parsed arguments from argparse into the config object.\n\n :param args: the parsed command line arguments\n :type args: argparse.Namespace\n :param config: the parsed config file\n :type config: config.Config\n :returns: the merged config object\n :rtype: config.Config\n\n \"\"\"\n # display by name: first or last name\n if \"display\" in arg_0 and arg_0.display:\n arg_1.set_display_by_name(arg_0.display)\n # group by address book\n if \"group_by_addressbook\" in arg_0 and arg_0.group_by_addressbook:\n arg_1.set_group_by_addressbook(True)\n # reverse contact list\n if \"reverse\" in arg_0 and arg_0.reverse:\n arg_1.set_reverse(True)\n # sort criteria: first or last name\n if \"sort\" in arg_0 and arg_0.sort:\n arg_1.sort = arg_0.sort\n # preferred vcard version\n if \"vcard_version\" in arg_0 and arg_0.vcard_version:\n arg_1.set_preferred_vcard_version(arg_0.vcard_version)\n # search in source files\n if \"search_in_source_files\" in arg_0 and arg_0.search_in_source_files:\n arg_1.set_search_in_source_files(True)\n # skip unparsable vcards\n if \"skip_unparsable\" in arg_0 and arg_0.skip_unparsable:\n arg_1.set_skip_unparsable(True)\n # If the user could but did not specify address books on the command line\n # it means they want to use all address books in that place.\n if \"addressbook\" in arg_0 and not arg_0.addressbook:\n arg_0.addressbook = [abook.name for abook in arg_1.abooks]\n if \"target_addressbook\" in arg_0 and not arg_0.target_addressbook:\n arg_0.target_addressbook = [abook.name for abook in arg_1.abooks]"} +{"_id": "doc_7454", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Load all address books with the given names from the config.\n\n :param names: the address books to load\n :type names: list(str)\n :param config: the config instance to use when looking up address books\n :type config: config.Config\n :param search_queries: a mapping of address book names to search queries\n :type search_queries: dict\n :yields: the loaded address books\n :ytype: addressbook.AddressBook\n\n \"\"\"\n arg_3 = {str(book) for book in arg_1.abooks}\n if not arg_0:\n arg_0 = arg_3\n elif not arg_3.issuperset(arg_0):\n sys.exit('Error: The entered address books \"{}\" do not exist.\\n'\n 'Possible values are: {}'.format(\n '\", \"'.join(set(arg_0) - arg_3),\n ', '.join(arg_3)))\n # load address books which are defined in the configuration file\n for arg_4 in arg_0:\n arg_5 = arg_1.abook.get_abook(arg_4)\n arg_5.load(arg_2[arg_5.name],\n search_in_source_files=arg_1.search_in_source_files())\n yield arg_5"} +{"_id": "doc_7455", "title": "", "text": "def Func(arg_0):\n \"\"\"Prepare the search query string from the given command line args.\n\n Each address book can get a search query string to filter vcards befor\n loading them. Depending on the question if the address book is used for\n source or target searches different regexes have to be combined into one\n search string.\n\n :param args: the parsed command line\n :type args: argparse.Namespace\n :returns: a dict mapping abook names to their loading queries, if the query\n is None it means that all cards should be loaded\n :rtype: dict(str:str or None)\n\n \"\"\"\n # get all possible search queries for address book parsing\n arg_1 = []\n arg_2 = []\n if \"source_search_terms\" in arg_0 and arg_0.source_search_terms:\n arg_3 = \".*\".join(re.escape(x)\n for x in arg_0.source_search_terms)\n arg_1.append(arg_3)\n arg_0.source_search_terms = arg_3\n if \"search_terms\" in arg_0 and arg_0.search_terms:\n arg_3 = \".*\".join(re.escape(x) for x in arg_0.search_terms)\n arg_1.append(arg_3)\n arg_0.search_terms = arg_3\n if \"target_contact\" in arg_0 and arg_0.target_contact:\n arg_3 = re.escape(arg_0.target_contact)\n arg_2.append(arg_3)\n arg_0.target_contact = arg_3\n if \"uid\" in arg_0 and arg_0.uid:\n arg_1.append(arg_0.uid)\n if \"target_uid\" in arg_0 and arg_0.target_uid:\n arg_2.append(arg_0.target_uid)\n # create and return regexp, None means that no query is given and hence all\n # contacts should be searched.\n arg_1 = \"^.*(%s).*$\" % ')|('.join(arg_1) \\\n if arg_1 else None\n arg_2 = \"^.*(%s).*$\" % ')|('.join(arg_2) \\\n if arg_2 else None\n logging.debug('Created source query regex: %s', arg_1)\n logging.debug('Created target query regex: %s', arg_2)\n # Get all possible search queries for address book parsing, always\n # depending on the fact if the address book is used to find source or\n # target contacts or both.\n arg_7 = {abook.name: [] for abook in config.abook._abooks}\n for arg_8 in arg_7:\n if \"addressbook\" in arg_0 and arg_8 in arg_0.addressbook:\n arg_7[arg_8].append(arg_1)\n if \"target_addressbook\" in arg_0 and arg_8 in arg_0.target_addressbook:\n arg_7[arg_8].append(arg_2)\n # If None is included in the search queries of an address book it means\n # that either no source or target query was given and this address book\n # is affected by this. All contacts should be loaded from that address\n # book.\n if None in arg_7[arg_8]:\n arg_7[arg_8] = None\n else:\n arg_7[arg_8] = \"({})\".format(')|('.join(arg_7[arg_8]))\n logging.debug('Created query regex: %s', arg_7)\n return arg_7"} +{"_id": "doc_7456", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Print a phone application friendly contact table.\n\n :param search_terms: used as search term to filter the contacts before\n printing\n :type search_terms: str\n :param vcard_list: the vcards to search for matching entries which should\n be printed\n :type vcard_list: list of carddav_object.CarddavObject\n :param parsable: machine readable output: columns devided by tabulator (\\t)\n :type parsable: bool\n :returns: None\n :rtype: None\n\n \"\"\"\n arg_3 = []\n arg_4 = []\n for arg_5 in arg_1:\n for arg_6, arg_7 in sorted(arg_5.get_phone_numbers().items(),\n key=lambda k: k[0].lower()):\n for arg_8 in sorted(arg_7):\n if config.display_by_name() == \"first_name\":\n arg_9 = arg_5.get_first_name_last_name()\n else:\n arg_9 = arg_5.get_last_name_first_name()\n # create output lines\n arg_10 = \"\\t\".join([arg_9, arg_6, arg_8])\n arg_11 = \"\\t\".join([arg_8, arg_9, arg_6])\n if arg_2:\n # parsable option: start with phone number\n arg_12 = arg_11\n else:\n # else: start with name\n arg_12 = arg_10\n if re.search(arg_0,\n \"%s\\n%s\" % (arg_10, arg_11),\n re.IGNORECASE | re.DOTALL):\n arg_4.append(arg_12)\n elif len(re.sub(\"\\D\", \"\", arg_0)) >= 3:\n # The user likely searches for a phone number cause the\n # search string contains at least three digits. So we\n # remove all non-digit chars from the phone number field\n # and match against that.\n if re.search(re.sub(\"\\D\", \"\", arg_0),\n re.sub(\"\\D\", \"\", arg_8), re.IGNORECASE):\n arg_4.append(arg_12)\n # collect all phone numbers in a different list as fallback\n arg_3.append(arg_12)\n if arg_4:\n if arg_2:\n print('\\n'.join(arg_4))\n else:\n list_phone_numbers(arg_4)\n elif arg_3:\n if arg_2:\n print('\\n'.join(arg_3))\n else:\n list_phone_numbers(arg_3)\n else:\n if not arg_2:\n print(\"Found no phone numbers\")\n sys.exit(1)"} +{"_id": "doc_7457", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print a user friendly contacts table.\n\n :param vcard_list: the vcards to print\n :type vcard_list: list of carddav_object.CarddavObject\n :param parsable: machine readable output: columns devided by tabulator (\\t)\n :type parsable: bool\n :returns: None\n :rtype: None\n\n \"\"\"\n if not arg_0:\n if not arg_1:\n print(\"Found no contacts\")\n sys.exit(1)\n elif arg_1:\n arg_2 = []\n for arg_3 in arg_0:\n if config.display_by_name() == \"first_name\":\n arg_4 = arg_3.get_first_name_last_name()\n else:\n arg_4 = arg_3.get_last_name_first_name()\n arg_2.append('\\t'.join([arg_3.get_uid(), arg_4,\n arg_3.address_book.name]))\n print('\\n'.join(arg_2))\n else:\n list_contacts(arg_0)"} +{"_id": "doc_7458", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Modify a contact in an external editor.\n\n :param selected_vcard: the contact to modify\n :type selected_vcard: carddav_object.CarddavObject\n :param input_from_stdin_or_file: new data from stdin (or a file) that\n should be incorperated into the contact, this should be a yaml\n formatted string\n :type input_from_stdin_or_file: str\n :param open_editor: whether to open the new contact in the edior after\n creation\n :type open_editor: bool\n :returns: None\n :rtype: None\n\n \"\"\"\n # show warning, if vcard version of selected contact is not 3.0 or 4.0\n if arg_0.get_version() not in config.supported_vcard_versions:\n print(\"Warning:\\nThe selected contact is based on vcard version %s \"\n \"but khard only supports the creation and modification of vcards\"\n \" with version 3.0 and 4.0.\\nIf you proceed, the contact will be\"\n \" converted to vcard version %s but beware: This could corrupt \"\n \"the contact file or cause data loss.\"\n % (arg_0.get_version(),\n config.get_preferred_vcard_version()))\n while True:\n arg_3 = input(\"Do you want to proceed anyway (y/n)? \")\n if arg_3.lower() in [\"\", \"n\", \"q\"]:\n print(\"Canceled\")\n sys.exit(0)\n if arg_3.lower() == \"y\":\n break\n # if there is some data in stdin\n if arg_1:\n # create new contact from stdin\n try:\n arg_4 = \\\n CarddavObject.from_existing_contact_with_new_user_input(\n arg_0, arg_1,\n config.localize_dates())\n except ValueError as err:\n print(err)\n sys.exit(1)\n if arg_0 == arg_4:\n print(\"Nothing changed\\n\\n%s\" % arg_4.print_vcard())\n else:\n print(\"Modification\\n\\n%s\\n\" % arg_4.print_vcard())\n while True:\n arg_3 = input(\"Do you want to proceed (y/n)? \")\n if arg_3.lower() in [\"\", \"n\", \"q\"]:\n print(\"Canceled\")\n break\n if arg_3.lower() == \"y\":\n arg_4.write_to_file(overwrite=True)\n if arg_2:\n modify_existing_contact(arg_4)\n else:\n print(\"Done\")\n break\n else:\n modify_existing_contact(arg_0)"} +{"_id": "doc_7459", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove a contact from the addressbook.\n\n :param selected_vcard: the contact to delete\n :type selected_vcard: carddav_object.CarddavObject\n :param force: delete without confirmation\n :type force: bool\n :returns: None\n :rtype: None\n\n \"\"\"\n if not arg_1:\n while True:\n arg_2 = input(\n \"Deleting contact %s from address book %s. Are you sure? \"\n \"(y/n): \" % (arg_0, arg_0.address_book))\n if arg_2.lower() in [\"\", \"n\", \"q\"]:\n print(\"Canceled\")\n sys.exit(0)\n if arg_2.lower() == \"y\":\n break\n arg_0.delete_vcard_file()\n print(\"Contact %s deleted successfully\" % arg_0.get_full_name())"} +{"_id": "doc_7460", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Open the vcard file for a contact in an external editor.\n\n :param selected_vcard: the contact to edit\n :type selected_vcard: carddav_object.CarddavObject\n :param editor: the eitor command to use\n :type editor: str\n :returns: None\n :rtype: None\n\n \"\"\"\n arg_2 = subprocess.Popen([arg_1, arg_0.filename])\n arg_2.communicate()"} +{"_id": "doc_7461", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"Merge two contacts into one.\n\n :param vcard_list: the vcards from which to choose contacts for mergeing\n :type vcard_list: list of carddav_object.CarddavObject\n :param selected_address_books: the addressbooks to use to find the target\n contact\n :type selected_address_books: list(addressbook.AddressBook)\n :param search_terms: the search terms to find the target contact\n :type search_terms: str\n :param target_uid: the uid of the target contact or empty\n :type target_uid: str\n :returns: None\n :rtype: None\n\n \"\"\"\n # Check arguments.\n if arg_3 != \"\" and arg_2 != \"\":\n print(\"You can not specify a target uid and target search terms for a \"\n \"merge.\")\n sys.exit(1)\n # Find possible target contacts.\n if arg_3 != \"\":\n arg_4 = get_contacts(arg_1, arg_3,\n method=\"uid\")\n # We require that the uid given can uniquely identify a contact.\n if len(arg_4) != 1:\n if not arg_4:\n print(\"Found no contact for target uid %s\" % arg_3)\n else:\n print(\"Found multiple contacts for target uid %s\" % arg_3)\n for arg_5 in arg_4:\n print(\" %s: %s\" % (arg_5, arg_5.get_uid()))\n sys.exit(1)\n else:\n arg_4 = get_contact_list_by_user_selection(\n arg_1, arg_2, False)\n # get the source vcard, from which to merge\n arg_6 = choose_vcard_from_list(\"Select contact from which to merge\",\n arg_0)\n if arg_6 is None:\n print(\"Found no source contact for merging\")\n sys.exit(1)\n else:\n print(\"Merge from %s from address book %s\\n\\n\"\n % (arg_6, arg_6.address_book))\n # get the target vcard, into which to merge\n arg_7 = choose_vcard_from_list(\"Select contact into which to merge\",\n arg_4)\n if arg_7 is None:\n print(\"Found no target contact for merging\")\n sys.exit(1)\n else:\n print(\"Merge into %s from address book %s\\n\\n\"\n % (arg_7, arg_7.address_book))\n # merging\n if arg_6 == arg_7:\n print(\"The selected contacts are already identical\")\n else:\n merge_existing_contacts(arg_6, arg_7, True)"} +{"_id": "doc_7462", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find the name of the action for the supplied alias. If no action is\n asociated with the given alias, None is returned.\n\n :param alias: the alias to look up\n :type alias: str\n :rturns: the name of the corresponding action or None\n :rtype: str or NoneType\n\n \"\"\"\n for arg_2, arg_3 in arg_0.action_map.items():\n if arg_1 in arg_3:\n return arg_2\n return None"} +{"_id": "doc_7463", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Convert the named field to bool.\n\n The current value should be one of the strings \"yes\" or \"no\". It will\n be replaced with its boolean counterpart. If the field is not present\n in the config object, the default value is used.\n\n :param config: the config section where to set the option\n :type config: configobj.ConfigObj\n :param name: the name of the option to convert\n :type name: str\n :param default: the default value to use if the option was not\n previously set\n :type default: bool\n :returns: None\n\n \"\"\"\n if arg_1 not in arg_0:\n arg_0[arg_1] = arg_2\n elif arg_0[arg_1] == \"yes\":\n arg_0[arg_1] = True\n elif arg_0[arg_1] == \"no\":\n arg_0[arg_1] = False\n else:\n raise ValueError(\"Error in config file\\nInvalid value for %s \"\n \"parameter\\nPossible values: yes, no\" % arg_1)"} +{"_id": "doc_7464", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5):\n \"\"\"Use this if you want to create a new contact from user input.\"\"\"\n arg_6 = arg_0(arg_1, None, arg_3, arg_4,\n arg_5)\n arg_6._process_user_input(arg_2)\n return arg_6"} +{"_id": "doc_7465", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get some part of the \"N\" entry in the vCard as a list\n\n :param part: the name to get e.g. \"prefix\" or \"given\"\n :type part: str\n :returns: a list of entries for this name part\n :rtype: list(str)\n\n \"\"\"\n try:\n arg_2 = getattr(arg_0.vcard.n.value, arg_1)\n except AttributeError:\n return []\n else:\n # check if list only contains empty strings\n if not ''.join(arg_2):\n return []\n return arg_2 if isinstance(arg_2, list) else [arg_2]"} +{"_id": "doc_7466", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" categories variable must be a list \"\"\"\n arg_2 = arg_0.vcard.add('categories')\n arg_2.value = helpers.convert_to_vcard(\n \"category\", arg_1, ObjectType.list_with_strings)"} +{"_id": "doc_7467", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Parse type value of phone numbers, email and post addresses.\n\n :param types: list of type values\n :type types: list(str)\n :param value: the corresponding label, required for more verbose\n exceptions\n :type value: str\n :param supported_types: all allowed standard types\n :type supported_types: list(str)\n :returns: tuple of standard and custom types and pref integer\n :rtype: tuple(list(str), list(str), int)\n\n \"\"\"\n arg_3 = []\n arg_4 = []\n arg_5 = 0\n for arg_6 in arg_0:\n arg_6 = arg_6.strip()\n if arg_6:\n if arg_6.lower() in arg_2:\n arg_4.append(arg_6)\n elif arg_6.lower() == \"pref\":\n arg_5 += 1\n elif re.match(r\"^pref=\\d{1,2}$\", arg_6.lower()):\n arg_5 += int(arg_6.split(\"=\")[1])\n else:\n if arg_6.lower().startswith(\"x-\"):\n arg_3.append(arg_6[2:])\n arg_4.append(arg_6)\n else:\n arg_3.append(arg_6)\n arg_4.append(\"X-{}\".format(arg_6))\n return (arg_4, arg_3, arg_5)"} +{"_id": "doc_7468", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"converts list to string recursively so that nested lists are supported\n\n :param input: a list of strings and lists of strings (and so on recursive)\n :type input: list\n :param delimiter: the deimiter to use when joining the items\n :type delimiter: str\n :returns: the recursively joined list\n :rtype: str\n \"\"\"\n if isinstance(arg_0, list):\n return arg_1.join(\n Func(arg_2, arg_1) for arg_2 in arg_0)\n return arg_0"} +{"_id": "doc_7469", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string to date object.\n\n :param input: the date string to parse\n :type input: str\n :returns: the parsed datetime object\n :rtype: datetime.datetime\n \"\"\"\n # try date formats --mmdd, --mm-dd, yyyymmdd, yyyy-mm-dd and datetime\n # formats yyyymmddThhmmss, yyyy-mm-ddThh:mm:ss, yyyymmddThhmmssZ,\n # yyyy-mm-ddThh:mm:ssZ.\n for arg_1 in (\"--%m%d\", \"--%m-%d\", \"%Y%m%d\", \"%Y-%m-%d\",\n \"%Y%m%dT%H%M%S\", \"%Y-%m-%dT%H:%M:%S\",\n \"%Y%m%dT%H%M%SZ\", \"%Y-%m-%dT%H:%M:%SZ\"):\n try:\n return datetime.strptime(arg_0, arg_1)\n except ValueError:\n pass\n # try datetime formats yyyymmddThhmmsstz and yyyy-mm-ddThh:mm:sstz where tz\n # may look like -06:00.\n for arg_1 in (\"%Y%m%dT%H%M%S%z\", \"%Y-%m-%dT%H:%M:%S%z\"):\n try:\n return datetime.strptime(''.join(arg_0.rsplit(\":\", 1)),\n arg_1)\n except ValueError:\n pass\n raise ValueError"} +{"_id": "doc_7470", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculate the minimum length of initial substrings of uid1 and uid2\n for them to be different.\n\n :param uid1: first uid to compare\n :type uid1: str\n :param uid2: second uid to compare\n :type uid2: str\n :returns: the length of the shortes unequal initial substrings\n :rtype: int\n \"\"\"\n arg_2 = 0\n for arg_3, arg_4 in zip(arg_0, arg_1):\n if arg_3 == arg_4:\n arg_2 += 1\n else:\n break\n return arg_2"} +{"_id": "doc_7471", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Search in all fields for contacts matching query.\n\n :param query: the query to search for\n :type query: str\n :yields: all found contacts\n :rtype: generator(carddav_object.CarddavObject)\n\n \"\"\"\n arg_2 = re.compile(arg_1, re.IGNORECASE | re.DOTALL)\n for arg_3 in arg_0.contacts.values():\n # search in all contact fields\n arg_4 = arg_3.print_vcard()\n if arg_2.search(arg_4) is not None:\n yield arg_3\n else:\n # find phone numbers with special chars like /\n arg_5 = re.sub(\"[^a-zA-Z0-9\\n]\", \"\",\n arg_4)\n if arg_2.search(arg_5) is not None \\\n and len(re.sub(\"\\D\", \"\", arg_1)) >= 3:\n yield arg_3"} +{"_id": "doc_7472", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Search in the name filed for contacts matching query.\n\n :param query: the query to search for\n :type query: str\n :yields: all found contacts\n :rtype: generator(carddav_object.CarddavObject)\n\n \"\"\"\n arg_2 = re.compile(arg_1, re.IGNORECASE | re.DOTALL)\n for arg_3 in arg_0.contacts.values():\n # only search in contact name\n if arg_2.search(arg_3.get_full_name()) is not None:\n yield arg_3"} +{"_id": "doc_7473", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Search for contacts with a matching uid.\n\n :param query: the query to search for\n :type query: str\n :yields: all found contacts\n :rtype: generator(carddav_object.CarddavObject)\n\n \"\"\"\n try:\n # First we treat the argument as a full UID and try to match it\n # exactly.\n yield arg_0.contacts[arg_1]\n except KeyError:\n # If that failed we look for all contacts whos UID start with the\n # given query.\n for arg_2 in arg_0.contacts:\n if arg_2.startswith(arg_1):\n yield arg_0.contacts[arg_2]"} +{"_id": "doc_7474", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"all\"):\n \"\"\"Search this address book for contacts matching the query.\n\n The method can be one of \"all\", \"name\" and \"uid\". The backend for this\n address book migth be load()ed if needed.\n\n :param query: the query to Func for\n :type query: str\n :param method: the type of fileds to use when seaching\n :type method: str\n :returns: all found contacts\n :rtype: list(carddav_object.CarddavObject)\n\n \"\"\"\n logging.debug('address book %s, Funcing with %s', arg_0.name, arg_1)\n if not arg_0._loaded:\n arg_0.load(arg_1)\n if arg_2 == \"all\":\n arg_3 = arg_0._Func_all\n elif arg_2 == \"name\":\n arg_3 = arg_0._Func_names\n elif arg_2 == \"uid\":\n arg_3 = arg_0._Func_uid\n else:\n raise ValueError('Only the Func methods \"all\", \"name\" and \"uid\" '\n 'are supported.')\n return list(arg_3(arg_1))"} +{"_id": "doc_7475", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Create a dictionary of shortend UIDs for all contacts.\n\n All arguments are only used if the address book is not yet initialized\n and will just be handed to self.load().\n\n :param query: see self.load()\n :type query: str\n :returns: the contacts mapped by the shortes unique prefix of their UID\n :rtype: dict(str: CarddavObject)\n \"\"\"\n if arg_0._short_uids is None:\n if not arg_0._loaded:\n arg_0.load(arg_1)\n if not arg_0.contacts:\n arg_0._short_uids = {}\n elif len(arg_0.contacts) == 1:\n arg_0._short_uids = {uid[0:1]: contact\n for uid, contact in arg_0.contacts.items()}\n else:\n arg_0._short_uids = {}\n arg_3 = sorted(arg_0.contacts)\n # Prepare for the loop; the first and last items are handled\n # seperatly.\n arg_4, arg_5 = arg_3[:2]\n arg_6 = arg_0._compare_uids(arg_4, arg_5)\n arg_0._short_uids[arg_4[:arg_6 + 1]] = arg_0.contacts[arg_4]\n for arg_7 in arg_3[2:]:\n # shift the items and the common prefix lenght one further\n arg_4, arg_5 = arg_5, arg_7\n arg_8, arg_6 = arg_6, arg_0._compare_uids(arg_4, arg_5)\n # compute the final prefix length for item1\n arg_9 = max(arg_8, arg_6)\n arg_0._short_uids[arg_4[:arg_9 + 1]] = arg_0.contacts[arg_4]\n # Save the last item.\n arg_0._short_uids[arg_5[:arg_6 + 1]] = arg_0.contacts[arg_5]\n return arg_0._short_uids"} +{"_id": "doc_7476", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get the shortend UID for the given UID.\n\n :param uid: the full UID to shorten\n :type uid: str\n :returns: the shortend uid or the empty string\n :rtype: str\n \"\"\"\n if arg_1:\n arg_2 = arg_0.Func_dict()\n for arg_3 in range(len(arg_1), 0, -1):\n if arg_2.get(arg_1[:arg_3]) is not None:\n return arg_1[:arg_3]\n return \"\""} +{"_id": "doc_7477", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=False):\n \"\"\"Load all vcard files in this address book from disk.\n\n If a search string is given only files which contents match that will\n be Funced.\n\n :param query: a regular expression to limit the results\n :type query: str\n :param search_in_source_files: apply search regexp directly on the .vcf files to speed up parsing (less accurate)\n :type search_in_source_files: bool\n :returns: the number of successfully Funced cards and the number of\n errors\n :rtype: int, int\n :throws: AddressBookParseError\n \"\"\"\n if arg_0._Funced:\n return\n logging.debug('Loading Vdir %s with query %s', arg_0.name, arg_1)\n arg_3 = 0\n for arg_4 in arg_0._find_vcard_files(\n search=arg_1, arg_2=arg_2):\n try:\n arg_5 = CarddavObject.from_file(arg_0, arg_4,\n arg_0._private_objects,\n arg_0._localize_dates)\n except (IOError, vobject.base.ParseError) as err:\n arg_6 = \"open\" if isinstance(err, IOError) else \"parse\"\n logging.debug(\"Error: Could not %s file %s\\n%s\", arg_6,\n arg_4, err)\n if arg_0._skip:\n arg_3 += 1\n else:\n # FIXME: This should throw an apropriate exception and the\n # sys.exit should be called somewhere closer to the command\n # line parsing.\n logging.error(\n \"The vcard file %s of address book %s could not be \"\n \"parsed\\nUse --debug for more information or \"\n \"--skip-unparsable to proceed\", arg_4, arg_0.name)\n sys.exit(2)\n else:\n arg_7 = arg_5.get_uid()\n if not arg_7:\n logging.warning(\"Card %s from address book %s has no UID \"\n \"and will not be availbale.\", arg_5,\n arg_0.name)\n elif arg_7 in arg_0.contacts:\n logging.warning(\n \"Card %s and %s from address book %s have the same \"\n \"UID. The former will not be availbale.\", arg_5,\n arg_0.contacts[arg_7], arg_0.name)\n else:\n arg_0.contacts[arg_7] = arg_5\n arg_0._Funced = True\n if arg_3:\n logging.warning(\n \"%d of %d vCard files of address book %s could not be parsed.\",\n arg_3, len(arg_0.contacts) + arg_3, arg_0)\n logging.debug('Loded %s contacts from address book %s.',\n len(arg_0.contacts), arg_0.name)"} +{"_id": "doc_7478", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Create the JSON for configuring arthur to collect data\n\n https://github.com/grimoirelab/arthur#adding-tasks\n Sample for git:\n\n {\n \"tasks\": [\n {\n \"task_id\": \"arthur.git\",\n \"backend\": \"git\",\n \"backend_args\": {\n \"gitpath\": \"/tmp/arthur_git/\",\n \"uri\": \"https://github.com/grimoirelab/arthur.git\"\n },\n \"category\": \"commit\",\n \"archive_args\": {\n \"archive_path\": '/tmp/test_archives',\n \"fetch_from_archive\": false,\n \"archive_after\": None\n },\n \"scheduler_args\": {\n \"delay\": 10\n }\n }\n ]\n }\n \"\"\"\n\n arg_2 = arg_0._compose_arthur_params(arg_0.backend_section, arg_1)\n if arg_0.backend_section == 'git':\n arg_2['gitpath'] = os.path.join(arg_0.REPOSITORY_DIR, arg_1)\n arg_2['tag'] = arg_0.backend_tag(arg_1)\n\n arg_3 = {\"tasks\": [{}]}\n # This is the perceval tag\n arg_3[\"tasks\"][0]['task_id'] = arg_0.backend_tag(arg_1)\n arg_3[\"tasks\"][0]['backend'] = arg_0.backend_section.split(\":\")[0]\n arg_3[\"tasks\"][0]['backend_args'] = arg_2\n arg_3[\"tasks\"][0]['category'] = arg_2['category']\n arg_3[\"tasks\"][0]['archive'] = {}\n arg_3[\"tasks\"][0]['scheduler'] = {\"delay\": arg_0.ARTHUR_TASK_DELAY}\n # from-date or offset param must be added\n arg_4 = arg_0._get_collection_url()\n arg_5 = arg_0.conf[arg_0.backend_section]['raw_index']\n # Get the last activity for the data source\n arg_6 = ElasticSearch(arg_4, arg_5)\n arg_7 = get_connector_from_name(arg_0.backend_section)\n\n arg_8 = arg_7[0] # Backend for the connector\n arg_9 = inspect.signature(arg_8.fetch)\n\n arg_10 = None\n arg_11 = {\"name\": \"tag\", \"value\": arg_2['tag']}\n if 'from_date' in arg_9.parameters:\n arg_10 = arg_6.get_last_item_field('metadata__updated_on', [arg_11])\n if arg_10:\n arg_3[\"tasks\"][0]['backend_args']['from_date'] = arg_10.isoformat()\n elif 'offset' in arg_9.parameters:\n arg_10 = arg_6.get_last_item_field('offset', [arg_11])\n if arg_10:\n arg_3[\"tasks\"][0]['backend_args']['offset'] = arg_10\n\n if arg_10:\n logging.info(\"Getting raw item with arthur since %s\", arg_10)\n\n return(arg_3)"} +{"_id": "doc_7479", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\" Return the GitHub SHA for a file in the repository \"\"\"\n\n arg_5 = None\n\n arg_6 = arg_1.get_conf()\n arg_7 = arg_6['sortinghat']['identities_api_token']\n arg_8 = {\"Authorization\": \"token \" + arg_7}\n\n arg_9 = arg_3 + \"/git/trees/\" + arg_4\n logger.debug(\"Gettting sha data from tree: %s\", arg_9)\n arg_10 = requests.get(arg_9, arg_8=arg_8)\n arg_10.raise_for_status()\n for arg_11 in arg_10.json()['tree']:\n if arg_11['path'] == arg_2:\n logger.debug(\"SHA found: %s, \", arg_11[\"sha\"])\n arg_5 = arg_11[\"sha\"]\n break\n\n return arg_5"} +{"_id": "doc_7480", "title": "", "text": "def Func(arg_0):\n \"\"\"Execute the merge identities phase\n\n :param config: a Mordred config object\n \"\"\"\n\n TaskProjects(arg_0).execute()\n arg_1 = TaskIdentitiesMerge(arg_0)\n arg_1.execute()\n logging.info(\"Merging identities finished!\")"} +{"_id": "doc_7481", "title": "", "text": "def Func(arg_0):\n \"\"\"Execute the panels phase\n\n :param config: a Mordred config object\n \"\"\"\n\n arg_1 = TaskPanels(arg_0)\n arg_1.execute()\n\n arg_1 = TaskPanelsMenu(arg_0)\n arg_1.execute()\n\n logging.info(\"Panels creation finished!\")"} +{"_id": "doc_7482", "title": "", "text": "def Func(arg_0):\n \"\"\"Config logging level output output\"\"\"\n\n if arg_0:\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n logging.debug(\"Debug mode activated\")\n else:\n logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')"} +{"_id": "doc_7483", "title": "", "text": "def Func():\n \"\"\"Get params to execute the micro-mordred\"\"\"\n\n arg_0 = Func_parser()\n arg_1 = arg_0.parse_args()\n\n if not arg_1.raw and not arg_1.enrich and not arg_1.identities and not arg_1.panels:\n print(\"No tasks enabled\")\n sys.exit(1)\n\n return arg_1"} +{"_id": "doc_7484", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Upload a panel to Elasticsearch if it does not exist yet.\n\n If a list of data sources is specified, upload only those\n elements (visualizations, searches) that match that data source.\n\n :param panel_file: file name of panel (dashobard) to upload\n :param data_sources: list of data sources\n :param strict: only upload a dashboard if it is newer than the one already existing\n \"\"\"\n arg_4 = arg_0.conf['es_enrichment']['url']\n arg_5 = arg_0.conf['panels']['kibiter_url']\n\n arg_6 = set(['pipermail', 'hyperkitty', 'groupsio', 'nntp'])\n if arg_2 and any(arg_7 in arg_2 for arg_7 in arg_6):\n arg_2 = list(arg_2)\n arg_2.append('mbox')\n if arg_2 and ('supybot' in arg_2):\n arg_2 = list(arg_2)\n arg_2.append('irc')\n if arg_2 and 'google_hits' in arg_2:\n arg_2 = list(arg_2)\n arg_2.append('googlehits')\n if arg_2 and 'stackexchange' in arg_2:\n # stackexchange is called stackoverflow in panels\n arg_2 = list(arg_2)\n arg_2.append('stackoverflow')\n if arg_2 and 'phabricator' in arg_2:\n arg_2 = list(arg_2)\n arg_2.append('maniphest')\n\n try:\n import_dashboard(arg_4, arg_5, arg_1, arg_2=arg_2, arg_3=arg_3)\n except ValueError:\n logger.error(\"%s does not include release field. Not loading the panel.\", arg_1)\n except RuntimeError:\n logger.error(\"Can not load the panel %s\", arg_1)"} +{"_id": "doc_7485", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Upload to Kibiter the title for the dashboard.\n\n The title is shown on top of the dashboard menu, and is Usually\n the name of the project being dashboarded.\n This is done only for Kibiter 6.x.\n\n :param kibiter_major: major version of kibiter\n \"\"\"\n\n if arg_1 == \"6\":\n arg_2 = \".kibana/doc/projectname\"\n arg_3 = {\"projectname\": {\"name\": arg_0.project_name}}\n arg_4 = \".kibana/_mapping/doc\"\n arg_5 = {\"dynamic\": \"true\"}\n\n arg_6 = urijoin(arg_0.conf['es_enrichment']['url'], arg_2)\n arg_7 = urijoin(arg_0.conf['es_enrichment']['url'],\n arg_4)\n\n logger.debug(\"Adding mapping for dashboard title\")\n arg_8 = arg_0.grimoire_con.put(arg_7, arg_3=json.dumps(arg_5),\n headers=ES6_HEADER)\n try:\n arg_8.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.error(\"Couldn't create mapping for dashboard title.\")\n logger.error(arg_8.json())\n\n logger.debug(\"Uploading dashboard title\")\n arg_8 = arg_0.grimoire_con.post(arg_6, arg_3=json.dumps(arg_3),\n headers=ES6_HEADER)\n try:\n arg_8.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.error(\"Couldn't create dashboard title.\")\n logger.error(arg_8.json())"} +{"_id": "doc_7486", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create the menu definition to access the panels in a dashboard.\n\n :param menu: dashboard menu to upload\n :param kibiter_major: major version of kibiter\n \"\"\"\n logger.info(\"Adding dashboard menu\")\n if arg_2 == \"6\":\n arg_3 = \".kibana/doc/metadashboard\"\n arg_4 = \".kibana/_mapping/doc\"\n arg_5 = {\"dynamic\": \"true\"}\n arg_6 = {'metadashboard': arg_1}\n else:\n arg_3 = \".kibana/metadashboard/main\"\n arg_4 = \".kibana/_mapping/metadashboard\"\n arg_5 = {\"dynamic\": \"true\"}\n arg_6 = arg_1\n arg_7 = urijoin(arg_0.conf['es_enrichment']['url'],\n arg_3)\n\n arg_8 = urijoin(arg_0.conf['es_enrichment']['url'],\n arg_4)\n logger.debug(\"Adding mapping for metadashboard\")\n arg_9 = arg_0.grimoire_con.put(arg_8, data=json.dumps(arg_5),\n headers=ES6_HEADER)\n try:\n arg_9.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.error(\"Couldn't create mapping for Kibiter menu.\")\n arg_9 = arg_0.grimoire_con.post(arg_7, data=json.dumps(arg_6),\n headers=ES6_HEADER)\n try:\n arg_9.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.error(\"Couldn't create Kibiter menu.\")\n logger.error(arg_9.json())\n raise"} +{"_id": "doc_7487", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove existing menu for dashboard, if any.\n\n Usually, we remove the menu before creating a new one.\n\n :param kibiter_major: major version of kibiter\n \"\"\"\n logger.info(\"Removing old dashboard menu, if any\")\n if arg_1 == \"6\":\n arg_2 = \".kibana/doc/metadashboard\"\n else:\n arg_2 = \".kibana/metadashboard/main\"\n arg_3 = urijoin(arg_0.conf['es_enrichment']['url'], arg_2)\n arg_0.grimoire_con.delete(arg_3)"} +{"_id": "doc_7488", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Get the menu entries from the panel definition \"\"\"\n arg_2 = []\n for arg_3 in arg_0.panels_menu:\n if arg_3['source'] not in arg_0.data_sources:\n continue\n arg_4 = {\n 'name': arg_3['name'],\n 'title': arg_3['name'],\n 'description': \"\",\n 'type': \"menu\",\n 'dashboards': []\n }\n for arg_5 in arg_3['menu']:\n try:\n arg_6 = get_dashboard_name(arg_5['panel'])\n except FileNotFoundError:\n logging.error(\"Can't open dashboard file %s\", arg_5['panel'])\n continue\n # The name for the entry is in self.panels_menu\n arg_7 = {\n \"name\": arg_5['name'],\n \"title\": arg_5['name'],\n \"description\": \"\",\n \"type\": \"entry\",\n \"panel_id\": arg_6\n }\n arg_4['dashboards'].append(arg_7)\n arg_2.append(arg_4)\n\n return arg_2"} +{"_id": "doc_7489", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Order the dashboard menu\"\"\"\n\n # omenu = OrderedDict()\n arg_2 = []\n # Start with Overview\n arg_2.append(arg_0.menu_panels_common['Overview'])\n\n # Now the data _getsources\n arg_3 = arg_0.__get_menu_entries(arg_1)\n\n # Remove the kafka and community menus, they will be included at the end\n arg_4 = None\n arg_5 = None\n\n arg_6 = [pos for pos, menu in enumerate(arg_3) if menu['name'] == KAFKA_NAME]\n if arg_6:\n arg_4 = arg_3.pop(arg_6[0])\n\n arg_7 = [pos for pos, menu in enumerate(arg_3) if menu['name'] == COMMUNITY_NAME]\n if arg_7:\n arg_5 = arg_3.pop(arg_7[0])\n\n arg_3.sort(key=operator.itemgetter('name'))\n arg_2 += arg_3\n\n # If kafka and community are present add them before the Data Status and About\n if arg_4:\n arg_2.append(arg_4)\n\n if arg_5:\n arg_2.append(arg_5)\n\n # At the end Data Status, About\n arg_2.append(arg_0.menu_panels_common['Data Status'])\n arg_2.append(arg_0.menu_panels_common['About'])\n\n logger.debug(\"Menu for panels: %s\", json.dumps(arg_3, indent=4))\n return arg_2"} +{"_id": "doc_7490", "title": "", "text": "def Func(arg_0):\n \"\"\" Compose projects.json only for mbox, but using the mailing_lists lists\n\n change: 'https://dev.eclipse.org/mailman/listinfo/emft-dev'\n to: 'emfg-dev /home/bitergia/mboxes/emft-dev.mbox/emft-dev.mbox\n\n :param projects: projects.json\n :return: projects.json with mbox\n \"\"\"\n arg_1 = '/home/bitergia/mboxes'\n\n arg_2 = [project for project in arg_0 if 'mailing_lists' in arg_0[project]]\n for arg_3 in arg_2:\n arg_0[arg_3]['mbox'] = []\n for arg_4 in arg_0[arg_3]['mailing_lists']:\n if 'listinfo' in arg_4:\n arg_5 = arg_4.split('listinfo/')[1]\n elif 'mailing-list' in arg_4:\n arg_5 = arg_4.split('mailing-list/')[1]\n else:\n arg_5 = arg_4.split('@')[0]\n\n arg_6 = \"%s %s/%s.mbox/%s.mbox\" % (arg_5, arg_1, arg_5, arg_5)\n arg_0[arg_3]['mbox'].append(arg_6)\n\n return arg_0"} +{"_id": "doc_7491", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compose projects.json for git\n\n We need to replace '/c/' by '/gitroot/' for instance\n\n change: 'http://git.eclipse.org/c/xwt/org.eclipse.xwt.git'\n to: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with git\n \"\"\"\n for arg_2 in [project for project in arg_1 if len(arg_1[project]['source_repo']) > 0]:\n arg_3 = []\n for arg_4 in arg_1[arg_2]['source_repo']:\n if len(arg_4['url'].split()) > 1: # Error at upstream the project 'tools.corrosion'\n arg_5 = arg_4['url'].split()[1].replace('/c/', '/gitroot/')\n else:\n arg_5 = arg_4['url'].replace('/c/', '/gitroot/')\n\n if arg_5 not in arg_3:\n arg_3.append(arg_5)\n\n arg_0[arg_2]['git'] = arg_3\n\n return arg_0"} +{"_id": "doc_7492", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compose projects.json for mailing lists\n\n At upstream has two different key for mailing list: 'mailings_lists' and 'dev_list'\n The key 'mailing_lists' is an array with mailing lists\n The key 'dev_list' is a dict with only one mailing list\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with mailing_lists\n \"\"\"\n for arg_2 in [project for project in arg_1 if len(arg_1[project]['mailing_lists']) > 0]:\n if 'mailing_lists' not in arg_0[arg_2]:\n arg_0[arg_2]['mailing_lists'] = []\n\n arg_3 = [url['url'].replace('mailto:', '') for url in arg_1[arg_2]['mailing_lists'] if\n url['url'] not in arg_0[arg_2]['mailing_lists']]\n arg_0[arg_2]['mailing_lists'] += arg_3\n\n for arg_2 in [project for project in arg_1 if len(arg_1[project]['dev_list']) > 0]:\n if 'mailing_lists' not in arg_0[arg_2]:\n arg_0[arg_2]['mailing_lists'] = []\n\n arg_4 = arg_1[arg_2]['dev_list']['url'].replace('mailto:', '')\n arg_0[arg_2]['mailing_lists'].append(arg_4)\n\n return arg_0"} +{"_id": "doc_7493", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compose projects.json for github\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with github\n \"\"\"\n for arg_2 in [project for project in arg_1 if len(arg_1[project]['github_repos']) > 0]:\n if 'github' not in arg_0[arg_2]:\n arg_0[arg_2]['github'] = []\n\n arg_3 = [url['url'] for url in arg_1[arg_2]['github_repos'] if\n url['url'] not in arg_0[arg_2]['github']]\n arg_0[arg_2]['github'] += arg_3\n\n return arg_0"} +{"_id": "doc_7494", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compose the projects JSON file only with the projects name\n\n :param projects: projects.json\n :param data: eclipse JSON with the origin format\n :return: projects.json with titles\n \"\"\"\n for arg_2 in arg_1:\n arg_0[arg_2] = {\n 'meta': {\n 'title': arg_1[arg_2]['title']\n }\n }\n return arg_0"} +{"_id": "doc_7495", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compose projects.json with all data sources\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with all data sources\n \"\"\"\n arg_0 = compose_git(arg_0, arg_1)\n arg_0 = compose_mailing_lists(arg_0, arg_1)\n arg_0 = compose_bugzilla(arg_0, arg_1)\n arg_0 = compose_github(arg_0, arg_1)\n arg_0 = compose_gerrit(arg_0)\n arg_0 = compose_mbox(arg_0)\n\n return arg_0"} +{"_id": "doc_7496", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execute autorefresh for areas of code study if configured\"\"\"\n\n if 'studies' not in arg_0.conf[arg_0.backend_section] or \\\n 'enrich_areas_of_code:git' not in arg_0.conf[arg_0.backend_section]['studies']:\n logger.debug(\"Not doing autorefresh for studies, Areas of Code study is not active.\")\n return\n\n arg_2 = arg_0.conf['enrich_areas_of_code:git'].get('out_index', GitEnrich.GIT_AOC_ENRICHED)\n\n # if `out_index` exists but has no value, use default\n if not arg_2:\n arg_2 = GitEnrich.GIT_AOC_ENRICHED\n\n logger.debug(\"Autorefresh for Areas of Code study index: %s\", arg_2)\n\n arg_3 = Elasticsearch([arg_0.conf['es_enrichment']['url']], timeout=100,\n verify_certs=arg_0._get_enrich_backend().elastic.requests.verify)\n\n if not arg_3.indices.exists(index=arg_2):\n logger.debug(\"Not doing autorefresh, index doesn't exist for Areas of Code study\")\n return\n\n logger.debug(\"Doing autorefresh for Areas of Code study\")\n\n # Create a GitEnrich backend tweaked to work with AOC index\n arg_4 = GitEnrich(arg_0.db_sh, None, arg_1['projects']['projects_file'],\n arg_0.db_user, arg_0.db_password, arg_0.db_host)\n arg_4.mapping = None\n arg_4.roles = ['author']\n arg_7 = get_elastic(arg_0.conf['es_enrichment']['url'],\n arg_2, clean=False, backend=arg_4)\n arg_4.set_elastic(arg_7)\n\n arg_0.__autorefresh(arg_4, studies=True)"} +{"_id": "doc_7497", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Execute the studies configured for the current backend \"\"\"\n\n arg_2 = arg_0.config.get_conf()\n if 'studies' not in arg_2[arg_0.backend_section] or not \\\n arg_2[arg_0.backend_section]['studies']:\n logger.debug('No studies for %s' % arg_0.backend_section)\n return\n\n arg_3 = [arg_10 for arg_10 in arg_2[arg_0.backend_section]['studies'] if arg_10.strip() != \"\"]\n if not arg_3:\n logger.debug('No studies for %s' % arg_0.backend_section)\n return\n\n logger.debug(\"Executing studies for %s: %s\" % (arg_0.backend_section, arg_3))\n time.sleep(2) # Wait so enrichment has finished in ES\n arg_4 = arg_0._get_enrich_backend()\n arg_5 = arg_0._get_ocean_backend(arg_4)\n\n arg_6 = []\n arg_7 = arg_4.studies\n arg_8 = [arg_10.__name__ for arg_10 in arg_4.studies]\n\n # Time to check that configured studies are valid\n logger.debug(\"All studies in %s: %s\", arg_0.backend_section, arg_8)\n logger.debug(\"Configured studies %s\", arg_3)\n arg_9 = [arg_10.split(\":\")[0] for arg_10 in arg_3]\n if not set(arg_9).issubset(set(arg_8)):\n logger.error('Wrong studies names for %s: %s', arg_0.backend_section, arg_3)\n raise RuntimeError('Wrong studies names ', arg_0.backend_section, arg_3)\n\n for arg_10 in arg_4.studies:\n if arg_10.__name__ in arg_9:\n arg_6.append(arg_10)\n\n arg_4.studies = arg_6\n print(\"Executing for %s the studies %s\" % (arg_0.backend_section,\n [arg_10 for arg_10 in arg_3]))\n\n arg_11 = arg_0.__load_studies()\n\n do_studies(arg_5, arg_4, arg_11, arg_1=arg_1)\n # Return studies to its original value\n arg_4.studies = arg_7"} +{"_id": "doc_7498", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Retain the identities in SortingHat based on the `retention_time`\n value declared in the setup.cfg.\n\n :param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data\n \"\"\"\n arg_2 = arg_0.conf['es_enrichment']['url']\n arg_3 = arg_0.db\n arg_4 = arg_0.get_backend(arg_0.backend_section)\n arg_5 = arg_0.config.get_active_data_sources()\n\n if arg_1 is None:\n logger.debug(\"[identities retention] Retention policy disabled, no identities will be deleted.\")\n return\n\n if arg_1 <= 0:\n logger.debug(\"[identities retention] Retention time must be greater than 0.\")\n return\n\n Func(arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_7499", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\" return list with the repositories for a backend_section \"\"\"\n arg_3 = []\n arg_4 = TaskProjects.get_projects()\n\n for arg_5 in arg_4:\n if arg_1 in arg_4[arg_5]:\n # if the projects.json doesn't contain the `unknown` project, add the repos in the bck section\n if arg_0.GLOBAL_PROJECT not in arg_4:\n arg_3 += arg_4[arg_5][arg_1]\n else:\n # if the projects.json contains the `unknown` project\n # in the case of the collection phase\n if arg_2:\n # if the current project is not `unknown`\n if arg_5 != arg_0.GLOBAL_PROJECT:\n # if the bck section is not in the `unknown` project, add the repos in the bck section\n if arg_1 not in arg_4[arg_0.GLOBAL_PROJECT]:\n arg_3 += arg_4[arg_5][arg_1]\n # if the backend section is in the `unknown` project,\n # add the repo in the bck section under `unknown`\n elif arg_1 in arg_4[arg_5] and arg_1 in arg_4[arg_0.GLOBAL_PROJECT]:\n arg_3 += arg_4[arg_0.GLOBAL_PROJECT][arg_1]\n # if the current project is `unknown`\n else:\n # if the backend section is only in the `unknown` project,\n # add the repo in the bck section under `unknown`\n arg_6 = [arg_4[arg_5] for arg_5 in arg_4 if arg_5 != arg_0.GLOBAL_PROJECT][0]\n if arg_1 not in arg_6:\n arg_3 += arg_4[arg_0.GLOBAL_PROJECT][arg_1]\n # in the case of the enrichment phase\n else:\n # if the current project is not `unknown`\n if arg_5 != arg_0.GLOBAL_PROJECT:\n # if the bck section is not in the `unknown` project, add the repos in the bck section\n if arg_1 not in arg_4[arg_0.GLOBAL_PROJECT]:\n arg_3 += arg_4[arg_5][arg_1]\n # if the backend section is in the `unknown` project, add the repos in the bck section\n elif arg_1 in arg_4[arg_5] and arg_1 in arg_4[arg_0.GLOBAL_PROJECT]:\n arg_3 += arg_4[arg_5][arg_1]\n # if the current project is `unknown`\n else:\n # if the backend section is only in the `unknown` project,\n # add the repo in the bck section under `unknown`\n arg_7 = [arg_4[prj] for prj in arg_4 if prj != arg_0.GLOBAL_PROJECT]\n arg_8 = list(set([section for prj in arg_7\n for section in list(prj.keys())]))\n if arg_1 not in arg_8:\n arg_3 += arg_4[arg_5][arg_1]\n\n logger.debug(\"List of repos for %s: %s (raw=%s)\", arg_1, arg_3, arg_2)\n\n # avoid duplicated repos\n arg_3 = list(set(arg_3))\n\n return arg_3"} +{"_id": "doc_7500", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Convert from eclipse projects format to grimoire projects json format \"\"\"\n\n arg_2 = {}\n\n # We need the global project for downloading the full Bugzilla and Gerrit\n arg_2['unknown'] = {\n \"gerrit\": [\"git.eclipse.org\"],\n \"bugzilla\": [\"https://bugs.eclipse.org/bugs/\"]\n }\n\n arg_2 = compose_title(arg_2, arg_1)\n arg_2 = compose_projects_json(arg_2, arg_1)\n\n return arg_2"} +{"_id": "doc_7501", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Change a param in the config \"\"\"\n if arg_1 not in arg_0.conf or arg_2 not in arg_0.conf[arg_1]:\n logger.error('Config section %s and param %s not exists', arg_1, arg_2)\n else:\n arg_0.conf[arg_1][arg_2] = arg_3"} +{"_id": "doc_7502", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get Elasticsearch version.\n\n Get the version of Elasticsearch. This is useful because\n Elasticsearch and Kibiter are paired (same major version for 5, 6).\n\n :param url: Elasticseearch url hosting Kibiter indices\n :returns: major version, as string\n \"\"\"\n\n try:\n arg_2 = arg_0.grimoire_con.get(arg_1)\n arg_2.raise_for_status()\n arg_3 = arg_2.json()['version']['number'].split(\".\")[0]\n except Exception:\n logger.error(\"Error retrieving Elasticsearch version: \" + arg_1)\n raise\n return arg_3"} +{"_id": "doc_7503", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=0, arg_4=True):\n \"\"\"\n Start a task manager per backend to complete the tasks.\n\n :param task_cls: list of tasks classes to be executed\n :param big_delay: seconds before global tasks are executed, should be days usually\n :param small_delay: seconds before backend tasks are executed, should be minutes\n :param wait_for_threads: boolean to set when threads are infinite or\n should be synchronized in a meeting point\n \"\"\"\n\n def _split_tasks(arg_1):\n \"\"\"\n we internally distinguish between tasks executed by backend\n and tasks executed with no specific backend. \"\"\"\n arg_5 = []\n arg_6 = []\n for arg_7 in arg_1:\n if arg_7.is_backend_task(arg_7):\n arg_5.append(arg_7)\n else:\n arg_6.append(arg_7)\n return arg_5, arg_6\n\n arg_8, arg_9 = _split_tasks(arg_1)\n logger.debug('backend_tasks = %s' % (arg_8))\n logger.debug('global_tasks = %s' % (arg_9))\n\n arg_10 = []\n\n # stopper won't be set unless wait_for_threads is True\n arg_11 = threading.Event()\n\n # launching threads for tasks by backend\n if len(arg_8) > 0:\n arg_12 = arg_0._get_repos_by_backend()\n for arg_13 in arg_12:\n # Start new Threads and add them to the threads list to complete\n arg_7 = TasksManager(arg_8, arg_13, arg_11, arg_0.config, arg_3)\n arg_10.append(arg_7)\n arg_7.start()\n\n # launch thread for global tasks\n if len(arg_9) > 0:\n # FIXME timer is applied to all global_tasks, does it make sense?\n # All tasks are executed in the same thread sequentially\n arg_14 = TasksManager(arg_9, \"Global tasks\", arg_11, arg_0.config, arg_2)\n arg_10.append(arg_14)\n arg_14.start()\n if arg_2 > 0:\n arg_15 = datetime.now() + timedelta(seconds=arg_2)\n arg_16 = arg_15.strftime('%a, %d %b %Y %H:%M:%S %Z')\n logger.info(\"%s will be executed on %s\" % (arg_9, arg_16))\n\n if arg_4:\n time.sleep(1) # Give enough time create and run all threads\n arg_11.set() # All threads must stop in the next iteration\n\n # Wait for all threads to complete\n for arg_7 in arg_10:\n arg_7.join()\n\n # Checking for exceptions in threads to log them\n arg_0.__check_queue_for_errors()\n\n logger.debug(\"[thread:main] All threads (and their tasks) are finished\")"} +{"_id": "doc_7504", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Tasks that should be done just one time\n \"\"\"\n\n if arg_0.conf['phases']['panels']:\n arg_1 = [TaskPanels, TaskPanelsMenu]\n arg_0.execute_tasks(arg_1)\n if arg_0.conf['phases']['identities']:\n arg_1 = [TaskInitSortingHat]\n arg_0.execute_tasks(arg_1)\n\n logger.info(\"Loading projects\")\n arg_1 = [TaskProjects]\n arg_0.execute_tasks(arg_1)\n logger.info(\"Done\")\n\n return"} +{"_id": "doc_7505", "title": "", "text": "def Func(arg_0):\n '''\n Validates the provided config to make sure all the required fields are \n there.\n '''\n # first ensure that all the required fields are there\n for arg_1, arg_2 in arg_0.params_map.items():\n if arg_2['required']:\n if arg_1 not in arg_0.config:\n raise ValueError(\"Invalid Configuration! Required parameter '%s' was not provided to Sultan.\")\n \n # second ensure that the fields that were pased were actually fields that\n # can be used\n for arg_1 in arg_0.config.keys():\n if arg_1 not in arg_0.params_map:\n raise ValueError(\"Invalid Configuration! The parameter '%s' provided is not used by Sultan!\" % arg_1)"} +{"_id": "doc_7506", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Customize the message Func based on the log level.\"\"\"\n if isinstance(arg_0.fmt, dict):\n arg_0._fmt = arg_0.fmt[arg_1.levelname]\n if sys.version_info > (3, 2):\n # Update self._style because we've changed self._fmt\n # (code based on stdlib's logging.Formatter.__init__())\n if arg_0.style not in logging._STYLES:\n raise ValueError('Style must be one of: %s' % ','.join(\n list(logging._STYLES.keys())))\n arg_0._style = logging._STYLES[arg_0.style][0](arg_0._fmt)\n\n if sys.version_info > (2, 7):\n arg_4 = super(LevelFormatter, arg_0).Func(arg_1)\n else:\n arg_4 = ColoredFormatter.Func(arg_0, arg_1)\n\n return arg_4"} +{"_id": "doc_7507", "title": "", "text": "def Func(arg_0):\n ''' Initialize the dictionary of architectures for assembling via keystone'''\n\n return {\n ARM32: (KS_ARCH_ARM, KS_MODE_ARM),\n ARM64: (KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN),\n ARM_TB: (KS_ARCH_ARM, KS_MODE_THUMB),\n HEXAGON: (KS_ARCH_HEXAGON, KS_MODE_BIG_ENDIAN),\n MIPS32: (KS_ARCH_MIPS, KS_MODE_MIPS32),\n MIPS64: (KS_ARCH_MIPS, KS_MODE_MIPS64),\n PPC32: (KS_ARCH_PPC, KS_MODE_PPC32),\n PPC64: (KS_ARCH_PPC, KS_MODE_PPC64),\n SPARC32: (KS_ARCH_SPARC, KS_MODE_SPARC32),\n SPARC64: (KS_ARCH_SPARC, KS_MODE_SPARC64),\n SYSTEMZ: (KS_ARCH_SYSTEMZ, KS_MODE_BIG_ENDIAN),\n X86_16: (KS_ARCH_X86, KS_MODE_16),\n X86_32: (KS_ARCH_X86, KS_MODE_32),\n X86_64: (KS_ARCH_X86, KS_MODE_64),\n }"} +{"_id": "doc_7508", "title": "", "text": "def Func(arg_0=arg_1.stderr):\n \"\"\"Sys.out replacer, by default with stderr.\n\n Use it like this:\n with Func_with(fileobj):\n print \"hello\" # writes to the file\n print \"done\" # prints to stdout\n\n Args:\n fileobj: a file object to replace stdout.\n\n Yields:\n The printer.\n \"\"\"\n arg_3 = _Printer(arg_0)\n\n arg_4 = arg_1.stdout\n arg_1.stdout = arg_3\n try:\n yield arg_3\n finally:\n arg_1.stdout = arg_4"} +{"_id": "doc_7509", "title": "", "text": "def Func(arg_0):\n \"\"\"Compact a list of integers into a comma-separated string of intervals.\n\n Args:\n value_list: A list of sortable integers such as a list of numbers\n\n Returns:\n A compact string representation, such as \"1-5,8,12-15\"\n \"\"\"\n\n if not arg_0:\n return ''\n\n arg_0.sort()\n\n # Start by simply building up a list of separate contiguous intervals\n arg_1 = []\n arg_2 = []\n for arg_3 in arg_0:\n if arg_2 and (arg_3 > arg_2[-1] + 1):\n arg_1.append((arg_2[0], arg_2[-1]))\n arg_2 = [arg_3]\n else:\n arg_2.append(arg_3)\n\n if arg_2:\n arg_1.append((arg_2[0], arg_2[-1]))\n\n # For each interval collapse it down to \"first, last\" or just \"first\" if\n # if first == last.\n return ','.join([\n '{}-{}'.format(arg_4[0], arg_4[1]) if arg_4[0] != arg_4[1] else str(arg_4[0])\n for arg_4 in arg_1\n ])"} +{"_id": "doc_7510", "title": "", "text": "def Func(arg_0):\n \"\"\"Get a storage client using the provided credentials or defaults.\"\"\"\n if arg_0 is None:\n arg_0 = oauth2client.client.GoogleCredentials.get_application_default(\n )\n return discovery.build('storage', 'v1', arg_0=arg_0)"} +{"_id": "doc_7511", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Load context from a text file in gcs.\n\n Args:\n gcs_file_path: The target file path; should have the 'gs://' prefix.\n credentials: Optional credential to be used to load the file from gcs.\n\n Returns:\n The content of the text file as a string.\n \"\"\"\n arg_2 = _get_storage_service(arg_1)\n\n arg_3, arg_4 = arg_0[len('gs://'):].split('/', 1)\n arg_5 = arg_2.objects().get_media(\n bucket=arg_3, object=arg_4)\n\n arg_6 = io.BytesIO()\n arg_7 = MediaIoBaseDownload(arg_6, arg_5, chunksize=1024 * 1024)\n arg_8 = False\n while not arg_8:\n arg_9, arg_8 = _downloader_next_chunk(arg_7)\n arg_10 = arg_6.getvalue()\n if not isinstance(arg_10, six.string_types):\n arg_10 = arg_10.decode()\n return six.StringIO(arg_10)"} +{"_id": "doc_7512", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Check whether the file exists, in GCS.\n\n Args:\n gcs_file_path: The target file path; should have the 'gs://' prefix.\n credentials: Optional credential to be used to load the file from gcs.\n\n Returns:\n True if the file's there.\n \"\"\"\n arg_2 = _get_storage_service(arg_1)\n\n arg_3, arg_4 = arg_0[len('gs://'):].split('/', 1)\n arg_5 = arg_2.objects().get(\n bucket=arg_3, object=arg_4, projection='noAcl')\n try:\n arg_5.execute()\n return True\n except errors.HttpError:\n return False"} +{"_id": "doc_7513", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"True iff an object exists matching the input GCS pattern.\n\n The GCS pattern must be a full object reference or a \"simple pattern\" that\n conforms to the dsub input and output parameter restrictions:\n\n * No support for **, ? wildcards or [] character ranges\n * Wildcards may only appear in the file name\n\n Args:\n file_pattern: eg. 'gs://foo/ba*'\n credentials: Optional credential to be used to load the file from gcs.\n\n Raises:\n ValueError: if file_pattern breaks the rules.\n\n Returns:\n True iff a file exists that matches that pattern.\n \"\"\"\n if '*' not in arg_0:\n return _file_exists_in_gcs(arg_0, arg_1)\n if not arg_0.startswith('gs://'):\n raise ValueError('file name must start with gs://')\n arg_2 = _get_storage_service(arg_1)\n arg_3, arg_4 = arg_0[len('gs://'):].split('/', 1)\n if '*' in arg_3:\n raise ValueError('Wildcards may not appear in the bucket name')\n # There is a '*' in prefix because we checked there's one in file_pattern\n # and there isn't one in bucket_name. Hence it must be in prefix.\n assert '*' in arg_4\n arg_5 = arg_4[:arg_4.index('*')]\n arg_6 = arg_2.objects().list(\n bucket=arg_3, arg_4=arg_5)\n arg_7 = arg_6.execute()\n if 'items' not in arg_7:\n return False\n arg_8 = [arg_9['name'] for arg_9 in arg_7['items']]\n return any(fnmatch.fnmatch(arg_9, arg_4) for arg_9 in arg_8)"} +{"_id": "doc_7514", "title": "", "text": "def Func(arg_0):\n \"\"\"True if each output contains at least one file or no output specified.\"\"\"\n # outputs are OutputFileParam (see param_util.py)\n\n # If outputs contain a pattern, then there is no way for `dsub` to verify\n # that *all* output is present. The best that `dsub` can do is to verify\n # that *some* output was created for each such parameter.\n for arg_1 in arg_0:\n if not arg_1.value:\n continue\n if arg_1.recursive:\n if not folder_exists(arg_1.value):\n return False\n else:\n if not simple_pattern_exists_in_gcs(arg_1.value):\n return False\n return True"} +{"_id": "doc_7515", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a dict object representing a pipeline input argument.\"\"\"\n\n # If the filename contains a wildcard, then the target Docker path must\n # be a directory in order to ensure consistency whether the source pattern\n # contains 1 or multiple files.\n #\n # In that case, we set the docker_path to explicitly have a trailing slash\n # (for the Pipelines API \"gsutil cp\" handling, and then override the\n # associated var_name environment variable in the generated Docker command.\n\n arg_3, arg_4 = os.path.split(arg_2)\n if '*' in arg_4:\n return arg_0._build_pipeline_file_param(arg_1, arg_3 + '/')\n else:\n return arg_0._build_pipeline_file_param(arg_1, arg_2)"} +{"_id": "doc_7516", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Return a multi-line string of the full pipeline docker command.\"\"\"\n\n # We upload the user script as an environment argument\n # and write it to SCRIPT_DIR (preserving its local file name).\n #\n # The docker_command:\n # * writes the script body to a file\n # * installs gcloud if there are recursive copies to do\n # * sets environment variables for inputs with wildcards\n # * sets environment variables for recursive input directories\n # * recursively copies input directories\n # * creates output directories\n # * sets environment variables for recursive output directories\n # * sets the DATA_ROOT environment variable to /mnt/data\n # * sets the working directory to ${DATA_ROOT}\n # * executes the user script\n # * recursively copies output directories\n arg_5 = [\n var for var in arg_2 if var.recursive and var.value\n ]\n arg_6 = [\n var for var in arg_3 if var.recursive and var.value\n ]\n\n arg_7 = ''\n if arg_5 or arg_6:\n arg_7 = INSTALL_CLOUD_SDK\n\n arg_8 = ''\n arg_9 = ''\n if arg_5:\n arg_8 = providers_util.build_recursive_localize_env(\n providers_util.DATA_MOUNT_POINT, arg_2)\n arg_9 = providers_util.build_recursive_localize_command(\n providers_util.DATA_MOUNT_POINT, arg_2, job_model.P_GCS)\n\n arg_10 = ''\n arg_11 = ''\n if arg_6:\n arg_10 = providers_util.build_recursive_gcs_delocalize_env(\n providers_util.DATA_MOUNT_POINT, arg_3)\n arg_11 = providers_util.build_recursive_delocalize_command(\n providers_util.DATA_MOUNT_POINT, arg_3, job_model.P_GCS)\n\n arg_12 = [\n var.docker_path if var.recursive else os.path.dirname(var.docker_path)\n for var in arg_3\n if var.value\n ]\n\n arg_13 = '\\n'.join([\n 'mkdir -p {0}/{1}'.format(providers_util.DATA_MOUNT_POINT, path)\n for path in arg_12\n ])\n\n arg_14 = [\n var for var in arg_2 if not var.recursive and var.docker_path and\n '*' in os.path.basename(var.docker_path)\n ]\n arg_15 = '\\n'.join([\n 'export {0}=\"{1}/{2}\"'.format(var.name, providers_util.DATA_MOUNT_POINT,\n var.docker_path)\n for var in arg_14\n ])\n\n arg_16 = '\\n'.join([\n 'export {0}=\"\"'.format(var.name)\n for var in arg_4 | arg_2 | arg_3\n if not var.value\n ])\n\n return DOCKER_COMMAND.format(\n mk_runtime_dirs=MK_RUNTIME_DIRS_COMMAND,\n script_path='%s/%s' % (providers_util.SCRIPT_DIR, arg_1),\n arg_7=arg_7,\n arg_15=arg_15,\n arg_8=arg_8,\n arg_9=arg_9,\n mk_output_dirs=arg_13,\n arg_10=arg_10,\n arg_16=arg_16,\n tmpdir=providers_util.TMP_DIR,\n working_dir=providers_util.WORKING_DIR,\n arg_11=arg_11)"} +{"_id": "doc_7517", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8,\n arg_9):\n \"\"\"Builds pipeline args for execution.\n\n Args:\n project: string name of project.\n script: Body of the script to execute.\n job_params: dictionary of values for labels, envs, inputs, and outputs\n for this job.\n task_params: dictionary of values for labels, envs, inputs, and outputs\n for this task.\n reserved_labels: dictionary of reserved labels (e.g. task-id,\n task-attempt)\n preemptible: use a preemptible VM for the job\n logging_uri: path for job logging output.\n scopes: list of scope.\n keep_alive: Seconds to keep VM alive on failure\n\n Returns:\n A nested dictionary with one entry under the key pipelineArgs containing\n the pipeline arguments.\n \"\"\"\n # For the Pipelines API, envs and file inputs are all \"inputs\".\n arg_10 = {}\n arg_10.update({SCRIPT_VARNAME: arg_2})\n arg_10.update({\n arg_11.name: arg_11.value\n for arg_11 in arg_3['envs'] | arg_4['envs']\n if arg_11.value\n })\n arg_10.update({\n arg_11.name: arg_11.uri\n for arg_11 in arg_3['inputs'] | arg_4['inputs']\n if not arg_11.recursive and arg_11.value\n })\n\n # Remove wildcard references for non-recursive output. When the pipelines\n # controller generates a delocalize call, it must point to a bare directory\n # for patterns. The output param OUTFILE=gs://bucket/path/*.bam should\n # delocalize with a call similar to:\n # gsutil cp /mnt/data/output/gs/bucket/path/*.bam gs://bucket/path/\n arg_12 = {}\n for arg_11 in arg_3['outputs'] | arg_4['outputs']:\n if arg_11.recursive or not arg_11.value:\n continue\n if '*' in arg_11.uri.basename:\n arg_12[arg_11.name] = arg_11.uri.path\n else:\n arg_12[arg_11.name] = arg_11.uri\n\n arg_14 = {}\n arg_14.update({\n arg_15.name: arg_15.value if arg_15.value else ''\n for arg_15 in (arg_5 | arg_3['labels']\n | arg_4['labels'])\n })\n\n # pyformat: disable\n arg_16 = {\n 'pipelineArgs': {\n 'projectId': arg_1,\n 'resources': {\n 'preemptible': arg_6,\n },\n 'inputs': arg_10,\n 'outputs': arg_12,\n 'labels': arg_14,\n 'serviceAccount': {\n 'email': 'default',\n 'scopes': arg_8,\n },\n # Pass the user-specified GCS destination for pipeline logging.\n 'logging': {\n 'gcsPath': arg_7\n },\n }\n }\n # pyformat: enable\n\n if arg_9:\n arg_16['pipelineArgs'][\n 'keep_vm_alive_on_failure_duration'] = '%ss' % arg_9\n\n return arg_16"} +{"_id": "doc_7518", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert the integer UTC time value into a local datetime.\"\"\"\n if arg_0 is None:\n return None\n\n # Convert localized datetime to a UTC integer\n arg_1 = dsub_util.replace_timezone(datetime.utcfromtimestamp(0), pytz.utc)\n return (arg_0 - arg_1).total_seconds()"} +{"_id": "doc_7519", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a Pipeline objects for the job.\"\"\"\n arg_2 = arg_1.job_metadata\n arg_3 = arg_1.job_params\n arg_4 = arg_1.job_resources\n arg_5 = arg_1.task_descriptors[0].task_metadata\n arg_6 = arg_1.task_descriptors[0].task_params\n arg_7 = arg_1.task_descriptors[0].task_resources\n\n arg_8 = arg_1.job_metadata['script']\n\n arg_9 = google_base.build_pipeline_labels(\n arg_2, arg_5, task_id_pattern='task-%d')\n\n # Build the ephemeralPipeline for this job.\n # The ephemeralPipeline definition changes for each job because file\n # parameters localCopy.path changes based on the remote_uri.\n arg_10 = _Pipelines.build_pipeline(\n project=arg_0._project,\n zones=arg_4.zones,\n min_cores=arg_4.min_cores,\n min_ram=arg_4.min_ram,\n disk_size=arg_4.disk_size,\n boot_disk_size=arg_4.boot_disk_size,\n preemptible=arg_4.preemptible,\n accelerator_type=arg_4.accelerator_type,\n accelerator_count=arg_4.accelerator_count,\n image=arg_4.image,\n script_name=arg_8.name,\n envs=arg_3['envs'] | arg_6['envs'],\n inputs=arg_3['inputs'] | arg_6['inputs'],\n outputs=arg_3['outputs'] | arg_6['outputs'],\n pipeline_name=arg_2['pipeline-name'])\n\n # Build the pipelineArgs for this job.\n arg_11 = arg_7.logging_path.uri\n arg_12 = arg_4.scopes or google_base.DEFAULT_SCOPES\n arg_10.update(\n _Pipelines.build_pipeline_args(arg_0._project, arg_8.value, arg_3,\n arg_6, arg_9,\n arg_4.preemptible, arg_11,\n arg_12, arg_4.keep_alive))\n\n return arg_10"} +{"_id": "doc_7520", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None,\n arg_6=None):\n \"\"\"Kills the operations associated with the specified job or job.task.\n\n Args:\n user_ids: List of user ids who \"own\" the job(s) to cancel.\n job_ids: List of job_ids to cancel.\n task_ids: List of task-ids to cancel.\n labels: List of LabelParam, each must match the job(s) to be canceled.\n create_time_min: a timezone-aware datetime value for the earliest create\n time of a task, inclusive.\n create_time_max: a timezone-aware datetime value for the most recent\n create time of a task, inclusive.\n\n Returns:\n A list of tasks canceled and a list of error messages.\n \"\"\"\n # Look up the job(s)\n arg_7 = list(\n arg_0.lookup_job_tasks(\n {'RUNNING'},\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6))\n\n print('Found %d tasks to delete.' % len(arg_7))\n\n return google_base.cancel(arg_0._service.new_batch_http_request,\n arg_0._service.operations().cancel, arg_7)"} +{"_id": "doc_7521", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the most relevant status string and last updated date string.\n\n This string is meant for display only.\n\n Returns:\n A printable status string and date string.\n \"\"\"\n arg_1 = arg_0._op['metadata']\n if not arg_0._op['done']:\n if 'events' in arg_1 and arg_1['events']:\n # Get the last event\n arg_2 = arg_1['events'][-1]\n\n arg_3 = arg_2['description']\n arg_4 = arg_2['startTime']\n else:\n arg_3 = 'Pending'\n arg_4 = arg_1['createTime']\n else:\n arg_4 = arg_1['endTime']\n\n if 'error' in arg_0._op:\n arg_3 = arg_0._op['error']['message']\n else:\n arg_3 = 'Success'\n\n return (arg_3, google_base.parse_rfc3339_utc_string(arg_4))"} +{"_id": "doc_7522", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create a task name from a job-id, task-id, and task-attempt.\n\n Task names are used internally by dsub as well as by the docker task runner.\n The name is formatted as \".[.task-attempt]\". Task names\n follow formatting conventions allowing them to be safely used as a docker\n name.\n\n Args:\n job_id: (str) the job ID.\n task_id: (str) the task ID.\n task_attempt: (int) the task attempt.\n\n Returns:\n a task name string.\n \"\"\"\n arg_3 = '%s.%s' % (arg_0, 'task' if arg_1 is None else arg_1)\n\n if arg_2 is not None:\n arg_3 += '.' + str(arg_2)\n\n # Docker container names must match: [a-zA-Z0-9][a-zA-Z0-9_.-]\n # So 1) prefix it with \"dsub-\" and 2) change all invalid characters to \"-\".\n return 'dsub-{}'.format(_convert_suffix_to_docker_chars(arg_3))"} +{"_id": "doc_7523", "title": "", "text": "def Func(arg_0):\n \"\"\"Rewrite string so that all characters are valid in a docker name suffix.\"\"\"\n # Docker container names must match: [a-zA-Z0-9][a-zA-Z0-9_.-]\n arg_1 = string.ascii_letters + string.digits + '_.-'\n\n def label_char_transform(arg_2):\n if arg_2 in arg_1:\n return arg_2\n return '-'\n\n return ''.join(label_char_transform(arg_3) for arg_3 in arg_0)"} +{"_id": "doc_7524", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a tuple for sorting 'most recent first'.\"\"\"\n return (arg_0.get_field('create-time'), int(arg_0.get_field('task-id', 0)),\n int(arg_0.get_field('task-attempt', 0)))"} +{"_id": "doc_7525", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Determine if the provided time is within the range, inclusive.\"\"\"\n # The pipelines API stores operation create-time with second granularity.\n # We mimic this behavior in the local provider by truncating to seconds.\n arg_1 = arg_1.replace(microsecond=0)\n if arg_2:\n arg_2 = arg_2.replace(microsecond=0)\n else:\n arg_2 = dsub_util.replace_timezone(datetime.datetime.min, pytz.utc)\n if arg_3:\n arg_3 = arg_3.replace(microsecond=0)\n else:\n arg_3 = dsub_util.replace_timezone(datetime.datetime.max, pytz.utc)\n\n return arg_2 <= arg_1 <= arg_3"} +{"_id": "doc_7526", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Return a Task object with this task's info.\"\"\"\n\n # We need to be very careful about how we read and interpret the contents\n # of the task directory. The directory could be changing because a new\n # task is being created. The directory could be changing because a task\n # is ending.\n #\n # If the meta.yaml does not exist, the task does not yet exist.\n # If the meta.yaml exists, it means the task is scheduled. It does not mean\n # it is yet running.\n # If the task.pid file exists, it means that the runner.sh was started.\n\n arg_5 = arg_0._task_directory(arg_1, arg_3, arg_4)\n\n arg_6 = arg_0._read_task_metadata(arg_5)\n if not arg_6:\n return None\n\n # If we read up an old task, the user-id will not be in the job_descriptor.\n if not arg_6.job_metadata.get('user-id'):\n arg_6.job_metadata['user-id'] = arg_2\n\n # Get the pid of the runner\n arg_8 = -1\n try:\n with open(os.path.join(arg_5, 'task.pid'), 'r') as f:\n arg_8 = int(f.readline().strip())\n except (IOError, OSError):\n pass\n\n # Get the script contents\n arg_9 = None\n arg_10 = arg_6.job_metadata.get('script-name')\n if arg_10:\n arg_9 = arg_0._read_script(arg_5, arg_10)\n\n # Read the files written by the runner.sh.\n # For new tasks, these may not have been written yet.\n arg_11 = arg_0._get_end_time_from_task_dir(arg_5)\n arg_12 = arg_0._get_last_update_time_from_task_dir(arg_5)\n arg_13 = arg_0._get_events_from_task_dir(arg_5)\n arg_14 = arg_0._get_status_from_task_dir(arg_5)\n arg_15 = arg_0._get_log_detail_from_task_dir(arg_5)\n\n # If the status file is not yet written, then mark the task as pending\n if not arg_14:\n arg_14 = 'RUNNING'\n arg_15 = ['Pending']\n\n return LocalTask(\n task_status=arg_14,\n arg_13=arg_13,\n arg_15=arg_15,\n arg_6=arg_6,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_8=arg_8,\n arg_9=arg_9)"} +{"_id": "doc_7527", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a command to delocalize logs.\n\n Args:\n logging_path: location of log files.\n user_project: name of the project to be billed for the request.\n\n Returns:\n eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12'\n \"\"\"\n\n # Get the logging prefix (everything up to \".log\")\n arg_3 = os.path.splitext(arg_1.uri)[0]\n\n # Set the provider-specific mkdir and file copy commands\n if arg_1.file_provider == job_model.P_LOCAL:\n arg_4 = 'mkdir -p \"%s\"\\n' % os.path.dirname(arg_3)\n arg_5 = 'cp'\n elif arg_1.file_provider == job_model.P_GCS:\n arg_4 = ''\n if arg_2:\n arg_5 = 'gsutil -u {} -mq cp'.format(arg_2)\n else:\n arg_5 = 'gsutil -mq cp'\n else:\n assert False\n\n # Construct the copy command\n arg_6 = textwrap.dedent(\"\"\"\\\n local cp_cmd=\"{cp_cmd}\"\n local prefix=\"{prefix}\"\n \"\"\").format(\n arg_5=arg_5, prefix=arg_3)\n\n # Build up the command\n arg_7 = textwrap.dedent(\"\"\"\\\n {mkdir_cmd}\n {copy_logs_cmd}\n \"\"\").format(\n arg_4=arg_4, arg_6=arg_6)\n\n return arg_7"} +{"_id": "doc_7528", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"The local dir for staging files for that particular task.\"\"\"\n arg_4 = 'task' if arg_2 is None else str(arg_2)\n if arg_3:\n arg_4 = '%s.%s' % (arg_4, arg_3)\n return arg_0._provider_root() + '/' + arg_1 + '/' + arg_4"} +{"_id": "doc_7529", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a command that will stage recursive inputs.\"\"\"\n arg_3 = os.path.join(arg_1, _DATA_SUBDIR)\n arg_4 = [\n providers_util.build_recursive_localize_command(arg_3, arg_2,\n file_provider)\n for file_provider in _SUPPORTED_INPUT_PROVIDERS\n ]\n return '\\n'.join(arg_4)"} +{"_id": "doc_7530", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a directory or file path to be the target for \"gsutil cp\".\n\n If the filename contains a wildcard, then the target path must\n be a directory in order to ensure consistency whether the source pattern\n contains one or multiple files.\n\n\n Args:\n local_file_path: A full path terminating in a file or a file wildcard.\n\n Returns:\n The path to use as the \"gsutil cp\" target.\n \"\"\"\n\n arg_2, arg_3 = os.path.split(arg_1)\n if '*' in arg_3:\n return arg_2 + '/'\n else:\n return arg_1"} +{"_id": "doc_7531", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Returns a command that will stage inputs.\"\"\"\n arg_4 = []\n for arg_5 in arg_2:\n if arg_5.recursive or not arg_5.value:\n continue\n\n arg_6 = arg_5.uri\n arg_7 = arg_1 + '/' + _DATA_SUBDIR + '/' + arg_5.docker_path\n arg_8 = arg_0._get_input_target_path(arg_7)\n\n arg_4.append('mkdir -p \"%s\"' % os.path.dirname(arg_7))\n\n if arg_5.file_provider in [job_model.P_LOCAL, job_model.P_GCS]:\n # The semantics that we expect here are implemented consistently in\n # \"gsutil cp\", and are a bit different than \"cp\" when it comes to\n # wildcard handling, so use it for both local and GCS:\n #\n # - `cp path/* dest/` will error if \"path\" has subdirectories.\n # - `cp \"path/*\" \"dest/\"` will fail (it expects wildcard expansion\n # to come from shell).\n if arg_3:\n arg_9 = 'gsutil -u %s -mq cp \"%s\" \"%s\"' % (\n arg_3, arg_6, arg_8)\n else:\n arg_9 = 'gsutil -mq cp \"%s\" \"%s\"' % (arg_6,\n arg_8)\n arg_4.append(arg_9)\n\n return '\\n'.join(arg_4)"} +{"_id": "doc_7532", "title": "", "text": "def Func():\n \"\"\"Get the dsub version out of the _dsub_version.py source file.\n\n Setup.py should not import dsub version from dsub directly since ambiguity in\n import order could lead to an old version of dsub setting the version number.\n Parsing the file directly is simpler than using import tools (whose interface\n varies between python 2.7, 3.4, and 3.5).\n\n Returns:\n string of dsub version.\n\n Raises:\n ValueError: if the version is not found.\n \"\"\"\n arg_0 = os.path.join(os.path.dirname(__file__), 'dsub/_dsub_version.py')\n with open(arg_0, 'r') as versionfile:\n for arg_1 in versionfile:\n if arg_1.startswith('DSUB_VERSION ='):\n # Get the version then strip whitespace and quote characters.\n arg_2 = arg_1.partition('=')[2]\n return arg_2.strip().strip('\\'\"')\n raise ValueError('Could not find version.')"} +{"_id": "doc_7533", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Return a dict with variables for the 'prepare' action.\"\"\"\n\n # Add the _SCRIPT_REPR with the repr(script) contents\n # Add the _META_YAML_REPR with the repr(meta) contents\n\n # Add variables for directories that need to be created, for example:\n # DIR_COUNT: 2\n # DIR_0: /mnt/data/input/gs/bucket/path1/\n # DIR_1: /mnt/data/output/gs/bucket/path2\n\n # List the directories in sorted order so that they are created in that\n # order. This is primarily to ensure that permissions are set as we create\n # each directory.\n # For example:\n # mkdir -m 777 -p /root/first/second\n # mkdir -m 777 -p /root/first\n # *may* not actually set 777 on /root/first\n\n arg_6 = sorted([\n var.docker_path if var.recursive else os.path.dirname(var.docker_path)\n for var in arg_3 | arg_4 | arg_5\n if var.value\n ])\n\n arg_7 = {\n _SCRIPT_VARNAME: repr(arg_1.value),\n _META_YAML_VARNAME: repr(arg_2.to_yaml()),\n 'DIR_COUNT': str(len(arg_6))\n }\n\n for arg_8, arg_9 in enumerate(arg_6):\n arg_7['DIR_{}'.format(arg_8)] = os.path.join(providers_util.DATA_MOUNT_POINT,\n arg_9)\n\n return arg_7"} +{"_id": "doc_7534", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a dict with variables for the 'localization' action.\"\"\"\n\n # Add variables for paths that need to be localized, for example:\n # INPUT_COUNT: 1\n # INPUT_0: MY_INPUT_FILE\n # INPUT_RECURSIVE_0: 0\n # INPUT_SRC_0: gs://mybucket/mypath/myfile\n # INPUT_DST_0: /mnt/data/inputs/mybucket/mypath/myfile\n\n arg_3 = [arg_6 for arg_6 in arg_1 if arg_6.value]\n arg_4 = {'INPUT_COUNT': str(len(arg_3))}\n\n for arg_5, arg_6 in enumerate(arg_3):\n arg_4['INPUT_{}'.format(arg_5)] = arg_6.name\n arg_4['INPUT_RECURSIVE_{}'.format(arg_5)] = str(int(arg_6.recursive))\n arg_4['INPUT_SRC_{}'.format(arg_5)] = arg_6.value\n\n # For wildcard paths, the destination must be a directory\n arg_8 = os.path.join(providers_util.DATA_MOUNT_POINT, arg_6.docker_path)\n arg_9, arg_10 = os.path.split(arg_8)\n if '*' in arg_10:\n arg_8 = '{}/'.format(arg_9)\n arg_4['INPUT_DST_{}'.format(arg_5)] = arg_8\n\n arg_4['USER_PROJECT'] = arg_2\n\n return arg_4"} +{"_id": "doc_7535", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a dict with variables for the 'delocalization' action.\"\"\"\n\n # Add variables for paths that need to be delocalized, for example:\n # OUTPUT_COUNT: 1\n # OUTPUT_0: MY_OUTPUT_FILE\n # OUTPUT_RECURSIVE_0: 0\n # OUTPUT_SRC_0: gs://mybucket/mypath/myfile\n # OUTPUT_DST_0: /mnt/data/outputs/mybucket/mypath/myfile\n\n arg_3 = [arg_6 for arg_6 in arg_1 if arg_6.value]\n arg_4 = {'OUTPUT_COUNT': str(len(arg_3))}\n\n for arg_5, arg_6 in enumerate(arg_3):\n arg_4['OUTPUT_{}'.format(arg_5)] = arg_6.name\n arg_4['OUTPUT_RECURSIVE_{}'.format(arg_5)] = str(int(arg_6.recursive))\n arg_4['OUTPUT_SRC_{}'.format(arg_5)] = os.path.join(\n providers_util.DATA_MOUNT_POINT, arg_6.docker_path)\n\n # For wildcard paths, the destination must be a directory\n if '*' in arg_6.uri.basename:\n arg_8 = arg_6.uri.path\n else:\n arg_8 = arg_6.uri\n arg_4['OUTPUT_DST_{}'.format(arg_5)] = arg_8\n\n arg_4['USER_PROJECT'] = arg_2\n\n return arg_4"} +{"_id": "doc_7536", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Returns a dictionary of for the user container environment.\"\"\"\n arg_1 = {env.name: env.value for env in arg_1}\n arg_1.update(providers_util.get_file_environment_variables(arg_2))\n arg_1.update(providers_util.get_file_environment_variables(arg_3))\n arg_1.update(providers_util.get_file_environment_variables(arg_4))\n return arg_1"} +{"_id": "doc_7537", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the status of this operation.\n\n Raises:\n ValueError: if the operation status cannot be determined.\n\n Returns:\n A printable status string (RUNNING, SUCCESS, CANCELED or FAILURE).\n \"\"\"\n if not google_v2_operations.is_done(arg_0._op):\n return 'RUNNING'\n if google_v2_operations.is_success(arg_0._op):\n return 'SUCCESS'\n if google_v2_operations.is_canceled(arg_0._op):\n return 'CANCELED'\n if google_v2_operations.is_failed(arg_0._op):\n return 'FAILURE'\n\n raise ValueError('Status for operation {} could not be determined'.format(\n arg_0._op['name']))"} +{"_id": "doc_7538", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the most relevant status string and failed action.\n\n This string is meant for display only.\n\n Returns:\n A printable status string and name of failed action (if any).\n \"\"\"\n arg_1 = None\n arg_2 = None\n if not google_v2_operations.is_done(arg_0._op):\n arg_3 = google_v2_operations.get_last_event(arg_0._op)\n if arg_3:\n arg_1 = arg_3['description']\n arg_4 = arg_3.get('details', {}).get('actionId')\n if arg_4:\n arg_2 = google_v2_operations.get_action_by_id(arg_0._op, arg_4)\n else:\n arg_1 = 'Pending'\n else:\n arg_5 = google_v2_operations.get_failed_events(arg_0._op)\n if arg_5:\n arg_6 = arg_5[-1]\n arg_1 = arg_6.get('details', {}).get('stderr')\n arg_4 = arg_6.get('details', {}).get('actionId')\n if arg_4:\n arg_2 = google_v2_operations.get_action_by_id(arg_0._op, arg_4)\n if not arg_1:\n arg_7 = google_v2_operations.get_error(arg_0._op)\n if arg_7:\n arg_1 = arg_7['message']\n else:\n arg_1 = 'Success'\n\n return arg_1, arg_2"} +{"_id": "doc_7539", "title": "", "text": "def Func(arg_0):\n \"\"\"Rounds ram up to the nearest multiple of _MEMORY_MULTIPLE.\"\"\"\n return int(GoogleV2CustomMachine._MEMORY_MULTIPLE * math.ceil(\n arg_0 / GoogleV2CustomMachine._MEMORY_MULTIPLE))"} +{"_id": "doc_7540", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a custom machine type string.\"\"\"\n arg_1 = arg_1 or job_model.DEFAULT_MIN_CORES\n arg_2 = arg_2 or job_model.DEFAULT_MIN_RAM\n\n # First, min_ram is given in GB. Convert to MB.\n arg_2 *= GoogleV2CustomMachine._MB_PER_GB\n\n # Only machine types with 1 vCPU or an even number of vCPUs can be created.\n arg_3 = arg_0._validate_cores(arg_1)\n # The total memory of the instance must be a multiple of 256 MB.\n arg_4 = arg_0._validate_ram(arg_2)\n\n # Memory must be between 0.9 GB per vCPU, up to 6.5 GB per vCPU.\n arg_5 = arg_4 / arg_3\n\n if arg_5 < GoogleV2CustomMachine._MIN_MEMORY_PER_CPU:\n # If we're under the ratio, top up the memory.\n arg_6 = GoogleV2CustomMachine._MIN_MEMORY_PER_CPU * arg_3\n arg_4 = arg_0._validate_ram(arg_6)\n\n elif arg_5 > GoogleV2CustomMachine._MAX_MEMORY_PER_CPU:\n # If we're over the ratio, top up the CPU.\n arg_7 = math.ceil(\n arg_4 / GoogleV2CustomMachine._MAX_MEMORY_PER_CPU)\n arg_3 = arg_0._validate_cores(arg_7)\n\n else:\n # Ratio is within the restrictions - no adjustments needed.\n pass\n\n return 'custom-{}-{}'.format(int(arg_3), int(arg_4))"} +{"_id": "doc_7541", "title": "", "text": "def Func(arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None):\n \"\"\"Build a VirtualMachine object for a Pipeline request.\n\n Args:\n network (dict): Network details for the pipeline to run in.\n machine_type (str): GCE Machine Type string for the pipeline.\n preemptible (bool): Use a preemptible VM for the job.\n service_account (dict): Service account configuration for the VM.\n boot_disk_size_gb (int): Boot disk size in GB.\n disks (list[dict]): List of disks to mount.\n accelerators (list[dict]): List of accelerators to attach to the VM.\n labels (dict[string, string]): Labels for the VM.\n cpu_platform (str): The CPU platform to request.\n nvidia_driver_version (str): The NVIDIA driver version to use when attaching\n an NVIDIA GPU accelerator.\n\n Returns:\n An object representing a VirtualMachine.\n \"\"\"\n return {\n 'network': arg_0,\n 'machineType': arg_1,\n 'preemptible': arg_2,\n 'serviceAccount': arg_3,\n 'bootDiskSizeGb': arg_4,\n 'disks': arg_5,\n 'accelerators': arg_6,\n 'labels': arg_7,\n 'cpuPlatform': arg_8,\n 'nvidiaDriverVersion': arg_9,\n }"} +{"_id": "doc_7542", "title": "", "text": "def Func(arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None):\n \"\"\"Build an Action object for a Pipeline request.\n\n Args:\n name (str): An optional name for the container.\n image_uri (str): The URI to pull the container image from.\n commands (List[str]): commands and arguments to run inside the container.\n entrypoint (str): overrides the ENTRYPOINT specified in the container.\n environment (dict[str,str]): The environment to pass into the container.\n pid_namespace (str): The PID namespace to run the action inside.\n flags (str): Flags that control the execution of this action.\n port_mappings (dict[int, int]): A map of container to host port mappings for\n this container.\n mounts (List): A list of mounts to make available to the action.\n labels (dict[str]): Labels to associate with the action.\n\n Returns:\n An object representing an Action resource.\n \"\"\"\n\n return {\n 'name': arg_0,\n 'imageUri': arg_1,\n 'commands': arg_2,\n 'entrypoint': arg_3,\n 'environment': arg_4,\n 'pidNamespace': arg_5,\n 'flags': arg_6,\n 'portMappings': arg_7,\n 'mounts': arg_8,\n 'labels': arg_9,\n }"} +{"_id": "doc_7543", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns a provider for job submission requests.\"\"\"\n\n arg_2 = getattr(arg_0, 'provider', 'google')\n\n if arg_2 == 'google':\n return google.GoogleJobProvider(\n getattr(arg_0, 'verbose', False),\n getattr(arg_0, 'dry_run', False), arg_0.project)\n elif arg_2 == 'google-v2':\n return google_v2.GoogleV2JobProvider(\n getattr(arg_0, 'verbose', False), getattr(arg_0, 'dry_run', False),\n arg_0.project)\n elif arg_2 == 'local':\n return local.LocalJobProvider(arg_1)\n elif arg_2 == 'test-fails':\n return test_fails.FailsJobProvider()\n else:\n raise ValueError('Unknown provider: ' + arg_2)"} +{"_id": "doc_7544", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Add provider required arguments epilog message, parse, and validate.\"\"\"\n\n # Add the provider required arguments epilog message\n arg_3 = 'Provider-required arguments:\\n'\n for arg_4 in arg_1:\n arg_3 += ' %s: %s\\n' % (arg_4, arg_1[arg_4])\n arg_0.epilog = arg_3\n\n # Parse arguments\n arg_5 = arg_0.Func(arg_2)\n\n # For the selected provider, check the required arguments\n for arg_6 in arg_1[arg_5.provider]:\n if not arg_5.__getattribute__(arg_6):\n arg_0.error('argument --%s is required' % arg_6)\n\n return arg_5"} +{"_id": "doc_7545", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"A string with the arguments to point dstat to the same provider+project.\"\"\"\n arg_2 = get_provider_name(arg_0)\n\n arg_3 = []\n if arg_2 == 'google':\n arg_3.append('--project %s' % arg_1)\n elif arg_2 == 'google-v2':\n arg_3.append('--project %s' % arg_1)\n elif arg_2 == 'local':\n pass\n elif arg_2 == 'test-fails':\n pass\n else:\n # New providers should add their dstat required arguments here.\n assert False, 'Provider %s needs Func support' % arg_0\n\n arg_3.insert(0, '--provider %s' % arg_2)\n return ' '.join(arg_3)"} +{"_id": "doc_7546", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a URI with placeholders replaced by metadata values.\"\"\"\n\n arg_3 = {\n 'job-id': None,\n 'task-id': 'task',\n 'job-name': None,\n 'user-id': None,\n 'task-attempt': None\n }\n for arg_4 in arg_3:\n arg_3[arg_4] = arg_2.get(arg_4) or arg_1.get(arg_4) or arg_3[arg_4]\n\n return arg_0.format(**arg_3)"} +{"_id": "doc_7547", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Inserts task metadata into the logging URI.\n\n The core behavior is inspired by the Google Pipelines API:\n (1) If a the uri ends in \".log\", then that is the logging path.\n (2) Otherwise, the uri is treated as \"directory\" for logs and a filename\n needs to be automatically generated.\n\n For (1), if the job is a --tasks job, then the {task-id} is inserted\n before \".log\".\n\n For (2), the file name generated is {job-id}, or for --tasks jobs, it is\n {job-id}.{task-id}.\n\n In both cases .{task-attempt} is inserted before .log for --retries jobs.\n\n In addition, full task metadata substitution is supported. The URI\n may include substitution strings such as\n \"{job-id}\", \"{task-id}\", \"{job-name}\", \"{user-id}\", and \"{task-attempt}\".\n\n Args:\n uri: User-specified logging URI which may contain substitution fields.\n job_metadata: job-global metadata.\n task_metadata: tasks-specific metadata.\n\n Returns:\n The logging_uri formatted as described above.\n \"\"\"\n\n # If the user specifies any formatting (with curly braces), then use that\n # as the format string unchanged.\n arg_3 = str(arg_0)\n if '{' not in arg_3:\n if arg_0.endswith('.log'):\n # URI includes a filename. Trim the extension and just use the prefix.\n arg_3 = os.path.splitext(arg_0)[0]\n else:\n # URI is a path to a directory. The job-id becomes the filename prefix.\n arg_3 = os.path.join(arg_0, '{job-id}')\n\n # If this is a task job, add the task-id.\n if arg_2.get('task-id') is not None:\n arg_3 += '.{task-id}'\n\n # If this is a retryable task, add the task-attempt.\n if arg_2.get('task-attempt') is not None:\n arg_3 += '.{task-attempt}'\n\n arg_3 += '.log'\n\n return _format_task_uri(arg_3, arg_1, arg_2)"} +{"_id": "doc_7548", "title": "", "text": "def Func(arg_0):\n \"\"\"Validated google-v2 arguments.\"\"\"\n if (arg_0.zones and arg_0.regions) or (not arg_0.zones and not arg_0.regions):\n raise ValueError('Exactly one of --regions and --zones must be specified')\n\n if arg_0.machine_type and (arg_0.min_cores or arg_0.min_ram):\n raise ValueError(\n '--machine-type not supported together with --min-cores or --min-ram.')"} +{"_id": "doc_7549", "title": "", "text": "def Func(arg_0):\n \"\"\"Extract job-global resources requirements from input args.\n\n Args:\n args: parsed command-line arguments\n\n Returns:\n Resources object containing the requested resources for the job\n \"\"\"\n arg_1 = param_util.build_logging_param(\n arg_0.logging) if arg_0.logging else None\n arg_2 = param_util.timeout_in_seconds(arg_0.timeout)\n arg_3 = param_util.log_interval_in_seconds(arg_0.log_interval)\n\n return job_model.Resources(\n min_cores=arg_0.min_cores,\n min_ram=arg_0.min_ram,\n machine_type=arg_0.machine_type,\n disk_size=arg_0.disk_size,\n disk_type=arg_0.disk_type,\n boot_disk_size=arg_0.boot_disk_size,\n preemptible=arg_0.preemptible,\n image=arg_0.image,\n regions=arg_0.regions,\n zones=arg_0.zones,\n arg_1=arg_1,\n logging_path=None,\n service_account=arg_0.service_account,\n scopes=arg_0.scopes,\n keep_alive=arg_0.keep_alive,\n cpu_platform=arg_0.cpu_platform,\n network=arg_0.network,\n subnetwork=arg_0.subnetwork,\n use_private_address=arg_0.use_private_address,\n accelerator_type=arg_0.accelerator_type,\n accelerator_count=arg_0.accelerator_count,\n nvidia_driver_version=arg_0.nvidia_driver_version,\n arg_2=arg_2,\n arg_3=arg_3,\n ssh=arg_0.ssh)"} +{"_id": "doc_7550", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Print status info as we wait for those jobs.\n\n Blocks until either all of the listed jobs succeed,\n or one of them fails.\n\n Args:\n provider: job service provider\n job_ids: a set of job IDs (string) to wait for\n poll_interval: integer seconds to wait between iterations\n stop_on_failure: whether to stop waiting if one of the tasks fails.\n\n Returns:\n Empty list if there was no error,\n a list of error messages from the failed tasks otherwise.\n \"\"\"\n\n # Each time through the loop, the job_set is re-set to the jobs remaining to\n # check. Jobs are removed from the list when they complete.\n #\n # We exit the loop when:\n # * No jobs remain are running, OR\n # * stop_on_failure is TRUE AND at least one job returned an error\n\n # remove NO_JOB\n arg_4 = {arg_12 for arg_12 in arg_1 if arg_12 != dsub_util.NO_JOB}\n arg_5 = []\n while arg_4 and (not arg_5 or not arg_3):\n print('Waiting for: %s.' % (', '.join(arg_4)))\n\n # Poll until any remaining jobs have completed\n arg_6 = _wait_for_any_job(arg_0, arg_4, arg_2)\n\n # Calculate which jobs just completed\n arg_7 = arg_4.difference(arg_6)\n\n # Get all tasks for the newly completed jobs\n arg_8 = arg_0.lookup_job_tasks({'*'}, arg_1=arg_7)\n\n # We don't want to overwhelm the user with output when there are many\n # tasks per job. So we get a single \"dominant\" task for each of the\n # completed jobs (one that is representative of the job's fate).\n arg_9 = _dominant_task_for_jobs(arg_8)\n if len(arg_9) != len(arg_7):\n # print info about the jobs we couldn't find\n # (should only occur for \"--after\" where the job ID is a typo).\n arg_10 = dsub_util.tasks_to_job_ids(arg_9)\n arg_11 = arg_7.difference(arg_10)\n for arg_12 in arg_11:\n arg_13 = '%s: not found' % arg_12\n print_error(' %s' % arg_13)\n arg_5 += [arg_13]\n\n # Print the dominant task for the completed jobs\n for arg_14 in arg_9:\n arg_15 = arg_14.get_field('job-id')\n arg_16 = arg_14.get_field('task-status')\n print(' %s: %s' % (str(arg_15), str(arg_16)))\n if arg_16 in ['FAILURE', 'CANCELED']:\n arg_5 += [arg_0.get_tasks_completion_messages([arg_14])]\n\n arg_4 = arg_6\n\n return arg_5"} +{"_id": "doc_7551", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Wait for job and retry any tasks that fail.\n\n Stops retrying an individual task when: it succeeds, is canceled, or has been\n retried \"retries\" times.\n\n This function exits when there are no tasks running and there are no tasks\n eligible to be retried.\n\n Args:\n provider: job service provider\n job_id: a single job ID (string) to wait for\n poll_interval: integer seconds to wait between iterations\n retries: number of retries\n job_descriptor: job descriptor used to originally submit job\n\n Returns:\n Empty list if there was no error,\n a list containing an error message from a failed task otherwise.\n \"\"\"\n\n while True:\n arg_5 = arg_0.lookup_job_tasks({'*'}, job_ids=[arg_1])\n\n arg_6 = set()\n arg_7 = set()\n arg_8 = set()\n arg_9 = set()\n arg_10 = dict()\n\n # This is an arbitrary task that is either fully failed or canceled (with\n # preference for the former).\n arg_11 = None\n\n arg_12 = dict()\n for arg_13 in arg_5:\n arg_14 = job_model.numeric_task_id(arg_13.get_field('task-id'))\n arg_12[arg_14] = arg_13\n\n arg_15 = arg_13.get_field('task-status')\n if arg_15 == 'FAILURE':\n # Could compute this from task-attempt as well.\n arg_10[arg_14] = arg_10.get(arg_14, 0) + 1\n if arg_10[arg_14] > arg_3:\n arg_9.add(arg_14)\n arg_11 = arg_13\n elif arg_15 == 'CANCELED':\n arg_8.add(arg_14)\n if not arg_11:\n arg_11 = arg_13\n elif arg_15 == 'SUCCESS':\n arg_7.add(arg_14)\n elif arg_15 == 'RUNNING':\n arg_6.add(arg_14)\n\n arg_16 = (\n set(arg_10).difference(arg_9)\n .difference(arg_6).difference(arg_7)\n .difference(arg_8))\n\n # job completed.\n if not arg_16 and not arg_6:\n # If there are any fully failed tasks, return the completion message of an\n # arbitrary one.\n # If not, but there are canceled tasks, return the completion message of\n # an arbitrary one.\n if arg_11:\n return [arg_0.get_tasks_completion_messages([arg_11])]\n\n # Otherwise successful completion.\n return []\n\n for arg_14 in arg_16:\n arg_17 = '{}.{}'.format(arg_1, arg_14) if arg_14 else arg_1\n print(' {} (attempt {}) failed. Retrying.'.format(\n arg_17, arg_10[arg_14]))\n arg_18 = arg_12[arg_14].get_field('status-message')\n print(' Failure message: {}'.format(arg_18))\n\n _retry_task(arg_0, arg_4, arg_14,\n arg_10[arg_14] + 1)\n\n SLEEP_FUNCTION(arg_2)"} +{"_id": "doc_7552", "title": "", "text": "def Func(arg_0):\n \"\"\"A list with, for each job, its dominant task.\n\n The dominant task is the one that exemplifies its job's\n status. It is either:\n - the first (FAILURE or CANCELED) task, or if none\n - the first RUNNING task, or if none\n - the first SUCCESS task.\n\n Args:\n tasks: a list of tasks to consider\n\n Returns:\n A list with, for each job, its dominant task.\n \"\"\"\n\n arg_1 = _group_tasks_by_jobid(arg_0)\n\n arg_2 = []\n for arg_3 in arg_1.keys():\n arg_4 = sorted(arg_1[arg_3], key=_importance_of_task)\n arg_2.append(arg_4[0])\n return arg_2"} +{"_id": "doc_7553", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Waits until any of the listed jobs is not running.\n\n In particular, if any of the jobs sees one of its tasks fail,\n we count the whole job as failing (but do not terminate the remaining\n tasks ourselves).\n\n Args:\n provider: job service provider\n job_ids: a list of job IDs (string) to wait for\n poll_interval: integer seconds to wait between iterations\n\n Returns:\n A set of the jobIDs with still at least one running task.\n \"\"\"\n if not arg_1:\n return\n while True:\n arg_3 = arg_0.lookup_job_tasks({'*'}, arg_1=arg_1)\n arg_4 = set()\n arg_5 = set()\n for arg_6 in arg_3:\n arg_7 = arg_6.get_field('task-status')\n arg_8 = arg_6.get_field('job-id')\n if arg_7 in ['FAILURE', 'CANCELED']:\n arg_5.add(arg_8)\n if arg_7 == 'RUNNING':\n arg_4.add(arg_8)\n arg_9 = arg_4.difference(arg_5)\n if arg_5 or len(arg_9) != len(arg_1):\n return arg_9\n SLEEP_FUNCTION(arg_2)"} +{"_id": "doc_7554", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Validates that job and task argument names do not overlap.\"\"\"\n\n if not arg_1:\n return\n\n arg_2 = arg_1[0].task_params\n\n # The use case for specifying a label or env/input/output parameter on\n # the command-line and also including it in the --tasks file is not obvious.\n # Should the command-line override the --tasks file? Why?\n # Until this use is articulated, generate an error on overlapping names.\n\n # Check labels\n arg_3 = {label.name for label in arg_0['labels']}\n arg_4 = {label.name for label in arg_2['labels']}\n\n arg_5 = arg_3 & arg_4\n if arg_5:\n raise ValueError(\n 'Names for labels on the command-line and in the --tasks file must not '\n 'be repeated: {}'.format(','.join(arg_5)))\n\n # Check envs, inputs, and outputs, all of which must not overlap each other\n arg_3 = {\n item.name\n for item in arg_0['envs'] | arg_0['inputs']\n | arg_0['outputs']\n }\n arg_4 = {\n item.name\n for item in arg_2['envs'] | arg_2['inputs']\n | arg_2['outputs']\n }\n\n arg_5 = arg_3 & arg_4\n if arg_5:\n raise ValueError(\n 'Names for envs, inputs, and outputs on the command-line and in the '\n '--tasks file must not be repeated: {}'.format(','.join(arg_5)))"} +{"_id": "doc_7555", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Helper function to return an appropriate set of mount parameters.\"\"\"\n return set([arg_2 for arg_2 in arg_0 if isinstance(arg_2, arg_1)])"} +{"_id": "doc_7556", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"Convenience function simplifies construction of the logging uri.\"\"\"\n if not arg_0:\n return job_model.LoggingParam(None, None)\n arg_3 = not arg_0.endswith('.log')\n arg_4 = arg_1('')\n arg_5, arg_6, arg_7 = arg_4.parse_uri(arg_0, arg_3)\n if '*' in arg_6.basename:\n raise ValueError('Wildcards not allowed in logging URI: %s' % arg_6)\n return job_model.LoggingParam(arg_6, arg_7)"} +{"_id": "doc_7557", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"Split a string into a pair, which can have one empty value.\n\n Args:\n pair_string: The string to be split.\n separator: The separator to be used for splitting.\n nullable_idx: The location to be set to null if the separator is not in the\n input string. Should be either 0 or 1.\n\n Returns:\n A list containing the pair.\n\n Raises:\n IndexError: If nullable_idx is not 0 or 1.\n \"\"\"\n\n arg_3 = arg_0.split(arg_1, 1)\n if len(arg_3) == 1:\n if arg_2 == 0:\n return [None, arg_3[0]]\n elif arg_2 == 1:\n return [arg_3[0], None]\n else:\n raise IndexError('nullable_idx should be either 0 or 1.')\n else:\n return arg_3"} +{"_id": "doc_7558", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"Parses task parameters from a TSV.\n\n Args:\n tasks: Dict containing the path to a TSV file and task numbers to run\n variables, input, and output parameters as column headings. Subsequent\n lines specify parameter values, one row per job.\n retries: Number of retries allowed.\n input_file_param_util: Utility for producing InputFileParam objects.\n output_file_param_util: Utility for producing OutputFileParam objects.\n\n Returns:\n task_descriptors: an array of records, each containing the task-id,\n task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of\n parameters for each task of the job.\n\n Raises:\n ValueError: If no job records were provided\n \"\"\"\n arg_4 = []\n\n arg_5 = arg_0['path']\n arg_6 = arg_0.get('min')\n arg_7 = arg_0.get('max')\n\n # Load the file and set up a Reader that tokenizes the fields\n arg_8 = dsub_util.load_file(arg_5)\n arg_9 = csv.reader(arg_8, delimiter='\\t')\n\n # Read the first line and extract the parameters\n arg_10 = six.advance_iterator(arg_9)\n arg_11 = parse_tasks_file_header(arg_10, arg_2,\n arg_3)\n\n # Build a list of records from the parsed input file\n for arg_12 in arg_9:\n # Tasks are numbered starting at 1 and since the first line of the TSV\n # file is a header, the first task appears on line 2.\n arg_13 = arg_9.line_num - 1\n if arg_6 and arg_13 < arg_6:\n continue\n if arg_7 and arg_13 > arg_7:\n continue\n\n if len(arg_12) != len(arg_11):\n dsub_util.print_error('Unexpected number of fields %s vs %s: line %s' %\n (len(arg_12), len(arg_11), arg_9.line_num))\n\n # Each row can contain \"envs\", \"inputs\", \"outputs\"\n arg_14 = set()\n arg_15 = set()\n arg_16 = set()\n arg_17 = set()\n\n for arg_18 in range(0, len(arg_11)):\n arg_19 = arg_11[arg_18]\n arg_20 = arg_19.name\n if isinstance(arg_19, job_model.EnvParam):\n arg_14.add(job_model.EnvParam(arg_20, arg_12[arg_18]))\n\n elif isinstance(arg_19, job_model.LabelParam):\n arg_17.add(job_model.LabelParam(arg_20, arg_12[arg_18]))\n\n elif isinstance(arg_19, job_model.InputFileParam):\n arg_15.add(\n arg_2.make_param(arg_20, arg_12[arg_18], arg_19.recursive))\n\n elif isinstance(arg_19, job_model.OutputFileParam):\n arg_16.add(\n arg_3.make_param(arg_20, arg_12[arg_18], arg_19.recursive))\n\n arg_4.append(\n job_model.TaskDescriptor({\n 'task-id': arg_13,\n 'task-attempt': 1 if arg_1 else None\n }, {\n 'labels': arg_17,\n 'envs': arg_14,\n 'inputs': arg_15,\n 'outputs': arg_16\n }, job_model.Resources()))\n\n # Ensure that there are jobs to execute (and not just a header)\n if not arg_4:\n raise ValueError('No tasks added from %s' % arg_5)\n\n return arg_4"} +{"_id": "doc_7559", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse flags of key=value pairs and return a list of argclass.\n\n For pair variables, we need to:\n * split the input into name=value pairs (value optional)\n * Create the EnvParam object\n\n Args:\n labels: list of 'key' or 'key=value' strings.\n argclass: Container class for args, must instantiate with argclass(k, v).\n\n Returns:\n list of argclass objects.\n \"\"\"\n arg_2 = set()\n for arg_3 in arg_0:\n arg_4, arg_5 = split_pair(arg_3, '=', nullable_idx=1)\n arg_2.add(arg_1(arg_4, arg_5))\n return arg_2"} +{"_id": "doc_7560", "title": "", "text": "def Func(arg_0, arg_1='smhdw'):\n \"\"\"Convert the timeout duration to seconds.\n\n The value must be of the form \"\" where supported\n units are s, m, h, d, w (seconds, minutes, hours, days, weeks).\n\n Args:\n interval: A \"\" string.\n valid_units: A list of supported units.\n\n Returns:\n A string of the form \"s\" or None if timeout is empty.\n \"\"\"\n if not arg_0:\n return None\n\n try:\n arg_2 = arg_0[-1]\n\n if arg_2 == 's' and 's' in arg_1:\n return str(float(arg_0[:-1])) + 's'\n elif arg_2 == 'm' and 'm' in arg_1:\n return str(float(arg_0[:-1]) * 60) + 's'\n elif arg_2 == 'h' and 'h' in arg_1:\n return str(float(arg_0[:-1]) * 60 * 60) + 's'\n elif arg_2 == 'd' and 'd' in arg_1:\n return str(float(arg_0[:-1]) * 60 * 60 * 24) + 's'\n elif arg_2 == 'w' and 'w' in arg_1:\n return str(float(arg_0[:-1]) * 60 * 60 * 24 * 7) + 's'\n else:\n raise ValueError(\n 'Unsupported units in interval string %s: %s' % (arg_0, arg_2))\n\n except (ValueError, OverflowError) as e:\n raise ValueError('Unable to parse interval string %s: %s' % (arg_0, e))"} +{"_id": "doc_7561", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Produce a default variable name if none is specified.\"\"\"\n if not arg_1:\n arg_1 = '%s%s' % (arg_0._auto_prefix, arg_0._auto_index)\n arg_0._auto_index += 1\n return arg_1"} +{"_id": "doc_7562", "title": "", "text": "def Func(arg_0):\n \"\"\"Find the file provider for a URI.\"\"\"\n arg_1 = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL}\n # URI scheme detector uses a range up to 30 since none of the IANA\n # registered schemes are longer than this.\n arg_2 = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', arg_0)\n if arg_2:\n arg_3 = arg_2.group(1).lower()\n else:\n # If no provider is specified in the URI, assume that the local\n # filesystem is being used. Availability and validity of the local\n # file/directory will be checked later.\n arg_3 = 'file'\n if arg_3 in arg_1:\n return arg_1[arg_3]\n else:\n raise ValueError('File prefix not supported: %s://' % arg_3)"} +{"_id": "doc_7563", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Do basic validation of the uri, return the path and filename.\"\"\"\n arg_2, arg_3 = os.path.split(arg_0)\n\n # dsub could support character ranges ([0-9]) with some more work, but for\n # now we assume that basic asterisk wildcards are sufficient. Reject any URI\n # that includes square brackets or question marks, since we know that\n # if they actually worked, it would be accidental.\n if '[' in arg_0 or ']' in arg_0:\n raise ValueError(\n 'Square bracket (character ranges) are not supported: %s' % arg_0)\n if '?' in arg_0:\n raise ValueError('Question mark wildcards are not supported: %s' % arg_0)\n\n # Only support file URIs and *filename* wildcards\n # Wildcards at the directory level or \"**\" syntax would require better\n # support from the Pipelines API *or* doing expansion here and\n # (potentially) producing a series of FileParams, instead of one.\n if '*' in arg_2:\n raise ValueError(\n 'Path wildcard (*) are only supported for files: %s' % arg_0)\n if '**' in arg_3:\n raise ValueError('Recursive wildcards (\"**\") not supported: %s' % arg_0)\n if arg_3 in ('..', '.'):\n raise ValueError('Path characters \"..\" and \".\" not supported '\n 'for file names: %s' % arg_0)\n\n # Do not allow non-recursive IO to reference directories.\n if not arg_1 and not arg_3:\n raise ValueError('Input or output values that are not recursive must '\n 'reference a filename or wildcard: %s' % arg_0)"} +{"_id": "doc_7564", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a valid docker_path from a Google Persistent Disk url.\"\"\"\n # The string replace is so we don't have colons and double slashes in the\n # mount path. The idea is the resulting mount path would look like:\n # /mnt/data/mount/http/www.googleapis.com/compute/v1/projects/...\n arg_2 = os.path.join(arg_0._relative_path,\n arg_1.replace('https://', 'https/', 1))\n return arg_2"} +{"_id": "doc_7565", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Return a MountParam given a GCS bucket, disk image or local path.\"\"\"\n if arg_2.startswith('https://www.googleapis.com/compute'):\n # Full Image URI should look something like:\n # https://www.googleapis.com/compute/v1/projects//global/images/\n # But don't validate further, should the form of a valid image URI\n # change (v1->v2, for example)\n arg_4 = arg_0._parse_image_uri(arg_2)\n return job_model.PersistentDiskMountParam(\n arg_1, arg_2, arg_4, arg_3, disk_type=None)\n elif arg_2.startswith('file://'):\n arg_5, arg_4 = arg_0._parse_local_mount_uri(arg_2)\n return job_model.LocalMountParam(arg_1, arg_2, arg_4, arg_5)\n elif arg_2.startswith('gs://'):\n arg_4 = arg_0._parse_gcs_uri(arg_2)\n return job_model.GCSMountParam(arg_1, arg_2, arg_4)\n else:\n raise ValueError(\n 'Mount parameter {} must begin with valid prefix.'.format(arg_2))"} +{"_id": "doc_7566", "title": "", "text": "def Func(arg_0):\n \"\"\"Turn the specified name and value into a valid Google label.\"\"\"\n\n # We want the results to be user-friendly, not just functional.\n # So we can't base-64 encode it.\n # * If upper-case: lower-case it\n # * If the char is not a standard letter or digit. make it a dash\n\n # March 2019 note: underscores are now allowed in labels.\n # However, removing the conversion of underscores to dashes here would\n # create inconsistencies between old jobs and new jobs.\n # With existing code, $USER \"jane_doe\" has a user-id label of \"jane-doe\".\n # If we remove the conversion, the user-id label for new jobs is \"jane_doe\".\n # This makes looking up old jobs more complicated.\n\n arg_1 = string.ascii_lowercase + string.digits + '-'\n\n def label_char_transform(arg_2):\n if arg_2 in arg_1:\n return arg_2\n if arg_2 in string.ascii_uppercase:\n return arg_2.lower()\n return '-'\n\n return ''.join(label_char_transform(arg_3) for arg_3 in arg_0)"} +{"_id": "doc_7567", "title": "", "text": "def Func(arg_0):\n \"\"\"For each task, ensure that each task param entry is not None.\"\"\"\n for arg_1 in arg_0:\n for arg_2 in [\n 'labels', 'envs', 'inputs', 'outputs', 'input-recursives',\n 'output-recursives'\n ]:\n if not arg_1.task_params.get(arg_2):\n arg_1.task_params[arg_2] = set()"} +{"_id": "doc_7568", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a new dict with any empty items removed.\n\n Note that this is not a deep check. If d contains a dictionary which\n itself contains empty items, those are never checked.\n\n This method exists to make to_serializable() functions cleaner.\n We could revisit this some day, but for now, the serialized objects are\n stripped of empty values to keep the output YAML more compact.\n\n Args:\n d: a dictionary\n required: list of required keys (for example, TaskDescriptors always emit\n the \"task-id\", even if None)\n\n Returns:\n A dictionary with empty items removed.\n \"\"\"\n\n arg_2 = {}\n for arg_3, arg_4 in arg_0.items():\n if arg_3 in arg_1:\n arg_2[arg_3] = arg_4\n elif isinstance(arg_4, int) or arg_4:\n # \"if v\" would suppress emitting int(0)\n arg_2[arg_3] = arg_4\n\n return arg_2"} +{"_id": "doc_7569", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts a task-id to the numeric task-id.\n\n Args:\n task_id: task-id in either task-n or n format\n\n Returns:\n n\n \"\"\"\n\n # This function exists to support the legacy \"task-id\" format in the \"google\"\n # provider. Google labels originally could not be numeric. When the google\n # provider is completely replaced by the google-v2 provider, this function can\n # go away.\n\n if arg_0 is not None:\n if arg_0.startswith('task-'):\n return int(arg_0[len('task-'):])\n else:\n return int(arg_0)"} +{"_id": "doc_7570", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Raise ValueError if the label is invalid.\"\"\"\n # Rules for labels are described in:\n # https://cloud.google.com/compute/docs/labeling-resources#restrictions\n\n # * Keys and values cannot be longer than 63 characters each.\n # * Keys and values can only contain lowercase letters, numeric characters,\n # underscores, and dashes.\n # * International characters are allowed.\n # * Label keys must start with a lowercase letter and international\n # characters are allowed.\n # * Label keys cannot be empty.\n arg_0._check_label_name(arg_1)\n arg_0._check_label_value(arg_2)\n\n # Ensure that reserved labels are not being used.\n if not arg_0._allow_reserved_keys and arg_1 in RESERVED_LABELS:\n raise ValueError('Label flag (%s=...) must not use reserved keys: %r' %\n (arg_1, list(RESERVED_LABELS)))"} +{"_id": "doc_7571", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Populate a JobDescriptor from the local provider's original meta.yaml.\n\n The local job provider had the first incarnation of a YAML file for each\n task. That idea was extended here in the JobDescriptor and the local\n provider adopted the JobDescriptor.to_yaml() call to write its meta.yaml.\n\n The JobDescriptor.from_yaml() detects if it receives a local provider's\n \"v0\" meta.yaml and calls this function.\n\n Args:\n job: an object produced from decoding meta.yaml.\n\n Returns:\n A JobDescriptor populated as best we can from the old meta.yaml.\n \"\"\"\n\n # The v0 meta.yaml only contained:\n # create-time, job-id, job-name, logging, task-id\n # labels, envs, inputs, outputs\n # It did NOT contain user-id.\n # dsub-version might be there as a label.\n\n arg_2 = {}\n for arg_3 in ['job-id', 'job-name', 'create-time']:\n arg_2[arg_3] = arg_1.get(arg_3)\n\n # Make sure that create-time string is turned into a datetime\n arg_2['create-time'] = dsub_util.replace_timezone(\n datetime.datetime.strptime(arg_1['create-time'], '%Y-%m-%d %H:%M:%S.%f'),\n tzlocal())\n\n # The v0 meta.yaml contained a \"logging\" field which was the task-specific\n # logging path. It did not include the actual \"--logging\" value the user\n # specified.\n arg_4 = Resources()\n\n # The v0 meta.yaml represented a single task.\n # It did not distinguish whether params were job params or task params.\n # We will treat them as either all job params or all task params, based on\n # whether the task-id is empty or an integer value.\n #\n # We also cannot distinguish whether inputs/outputs were recursive or not.\n # Just treat them all as non-recursive.\n arg_5 = {}\n\n # The dsub-version may be in the meta.yaml as a label. If so remove it\n # and set it as a top-level job metadata value.\n arg_6 = arg_1.get('labels', {})\n if 'dsub-version' in arg_6:\n arg_2['dsub-version'] = arg_6['dsub-version']\n del arg_6['dsub-version']\n arg_5['labels'] = arg_0._label_params_from_dict(arg_6)\n\n arg_5['envs'] = arg_0._env_params_from_dict(arg_1.get('envs', {}))\n arg_5['inputs'] = arg_0._input_file_params_from_dict(\n arg_1.get('inputs', {}), False)\n arg_5['outputs'] = arg_0._output_file_params_from_dict(\n arg_1.get('outputs', {}), False)\n\n if arg_1.get('task-id') is None:\n arg_7 = arg_5\n arg_8 = {'task-id': None}\n arg_9 = {}\n else:\n arg_7 = {}\n arg_8 = {'task-id': str(arg_1.get('task-id'))}\n arg_9 = arg_5\n\n arg_10 = Resources(logging_path=arg_1.get('logging'))\n\n arg_11 = [\n TaskDescriptor.get_complete_descriptor(arg_8, arg_9,\n arg_10)\n ]\n\n return JobDescriptor.get_complete_descriptor(\n arg_2, arg_7, arg_4, arg_11)"} +{"_id": "doc_7572", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Populate and return a JobDescriptor from a YAML string.\"\"\"\n try:\n arg_2 = yaml.full_load(arg_1)\n except AttributeError:\n # For installations that cannot update their PyYAML version\n arg_2 = yaml.load(arg_1)\n\n # If the YAML does not contain a top-level dsub version, then assume that\n # the string is coming from the local provider, reading an old version of\n # its meta.yaml.\n arg_3 = arg_2.get('dsub-version')\n if not arg_3:\n return arg_0._Func_v0(arg_2)\n\n arg_4 = {}\n for arg_5 in [\n 'job-id', 'job-name', 'task-ids', 'user-id', 'dsub-version',\n 'user-project', 'script-name'\n ]:\n if arg_2.get(arg_5) is not None:\n arg_4[arg_5] = arg_2.get(arg_5)\n\n # Make sure that create-time string is turned into a datetime\n arg_4['create-time'] = dsub_util.replace_timezone(\n arg_2.get('create-time'), pytz.utc)\n\n arg_6 = Resources(logging=arg_2.get('logging'))\n\n arg_7 = {}\n arg_7['labels'] = arg_0._label_params_from_dict(arg_2.get('labels', {}))\n arg_7['envs'] = arg_0._env_params_from_dict(arg_2.get('envs', {}))\n arg_7['inputs'] = arg_0._input_file_params_from_dict(\n arg_2.get('inputs', {}), False)\n arg_7['input-recursives'] = arg_0._input_file_params_from_dict(\n arg_2.get('input-recursives', {}), True)\n arg_7['outputs'] = arg_0._output_file_params_from_dict(\n arg_2.get('outputs', {}), False)\n arg_7['output-recursives'] = arg_0._output_file_params_from_dict(\n arg_2.get('output-recursives', {}), True)\n arg_7['mounts'] = arg_0._mount_params_from_dict(arg_2.get('mounts', {}))\n\n arg_8 = []\n for arg_9 in arg_2.get('tasks', []):\n arg_10 = {'task-id': arg_9.get('task-id')}\n\n # Old instances of the meta.yaml do not have a task create time.\n arg_11 = arg_9.get('create-time')\n if arg_11:\n arg_10['create-time'] = dsub_util.replace_timezone(\n arg_11, pytz.utc)\n\n if arg_9.get('task-attempt') is not None:\n arg_10['task-attempt'] = arg_9.get('task-attempt')\n\n arg_12 = {}\n arg_12['labels'] = arg_0._label_params_from_dict(\n arg_9.get('labels', {}))\n arg_12['envs'] = arg_0._env_params_from_dict(arg_9.get('envs', {}))\n arg_12['inputs'] = arg_0._input_file_params_from_dict(\n arg_9.get('inputs', {}), False)\n arg_12['input-recursives'] = arg_0._input_file_params_from_dict(\n arg_9.get('input-recursives', {}), True)\n arg_12['outputs'] = arg_0._output_file_params_from_dict(\n arg_9.get('outputs', {}), False)\n arg_12['output-recursives'] = arg_0._output_file_params_from_dict(\n arg_9.get('output-recursives', {}), True)\n\n arg_13 = Resources(logging_path=arg_9.get('logging-path'))\n\n arg_8.append(\n TaskDescriptor(arg_10, arg_12, arg_13))\n\n return JobDescriptor(arg_4, arg_7, arg_6,\n arg_8)"} +{"_id": "doc_7573", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the task_descriptor corresponding to task_id.\"\"\"\n\n # It is not guaranteed that the index will be task_id - 1 when --tasks is\n # used with a min/max range.\n for arg_2 in arg_0.task_descriptors:\n if arg_2.task_metadata.get('task-id') == arg_1:\n return arg_2\n return None"} +{"_id": "doc_7574", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a dictionary of environment variables for the user container.\"\"\"\n arg_1 = {}\n for arg_2 in arg_0:\n # We have no cases where the environment variable provided to user\n # scripts have a trailing slash, so be sure to always strip it.\n # The case that this is specifically handling is --input-recursive and\n # --output-recursive variables, which are directory values.\n arg_1[arg_2.name] = os.path.join(\n DATA_MOUNT_POINT, arg_2.docker_path.rstrip('/')) if arg_2.value else ''\n return arg_1"} +{"_id": "doc_7575", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Returns a dict combining the field for job and task params.\"\"\"\n return arg_0.get(arg_2, set()) | arg_1.get(arg_2, set())"} +{"_id": "doc_7576", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None):\n \"\"\"Kill jobs or job tasks.\n\n This function separates ddel logic from flag parsing and user output. Users\n of ddel who intend to access the data programmatically should use this.\n\n Args:\n provider: an instantiated dsub provider.\n user_ids: a set of user ids who \"own\" the job(s) to delete.\n job_ids: a set of job ids to delete.\n task_ids: a set of task ids to delete.\n labels: a set of LabelParam, each must match the job(s) to be cancelled.\n create_time_min: a timezone-aware datetime value for the earliest create\n time of a task, inclusive.\n create_time_max: a timezone-aware datetime value for the most recent create\n time of a task, inclusive.\n\n Returns:\n list of job ids which were deleted.\n \"\"\"\n # Delete the requested jobs\n arg_7, arg_8 = arg_0.delete_jobs(\n arg_1, arg_2, arg_3, arg_4, arg_5, arg_6)\n\n # Emit any errors canceling jobs\n for arg_9 in arg_8:\n print(arg_9)\n\n return arg_7"} +{"_id": "doc_7577", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the value for the specified action.\"\"\"\n arg_2 = get_actions(arg_0)\n for arg_3 in arg_2:\n if arg_3.get('name') == arg_1:\n return arg_3"} +{"_id": "doc_7578", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the environment for the operation.\"\"\"\n arg_2 = _get_action_by_name(arg_0, arg_1)\n if arg_2:\n return arg_2.get('environment')"} +{"_id": "doc_7579", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the image for the operation.\"\"\"\n arg_2 = _get_action_by_name(arg_0, arg_1)\n if arg_2:\n return arg_2.get('imageUri')"} +{"_id": "doc_7580", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return all events of a particular type.\"\"\"\n arg_2 = get_events(arg_0)\n if not arg_2:\n return None\n\n return [arg_3 for arg_3 in arg_2 if arg_3.get('details', {}).get('@type') == arg_1]"} +{"_id": "doc_7581", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=None,\n arg_10=0,\n arg_11=0,\n arg_12=False):\n \"\"\"Generate formatted jobs individually, in order of create-time.\n\n Args:\n provider: an instantiated dsub provider.\n statuses: a set of status strings that eligible jobs may match.\n user_ids: a set of user strings that eligible jobs may match.\n job_ids: a set of job-id strings eligible jobs may match.\n job_names: a set of job-name strings eligible jobs may match.\n task_ids: a set of task-id strings eligible tasks may match.\n task_attempts: a set of task-attempt strings eligible tasks may match.\n labels: set of LabelParam that all tasks must match.\n create_time_min: a timezone-aware datetime value for the earliest create\n time of a task, inclusive.\n create_time_max: a timezone-aware datetime value for the most recent create\n time of a task, inclusive.\n max_tasks: (int) maximum number of tasks to return per dstat job lookup.\n page_size: the page size to use for each query to the backend. May be\n ignored by some provider implementations.\n summary_output: (bool) summarize the job list.\n\n Yields:\n Individual task dictionaries with associated metadata\n \"\"\"\n arg_13 = arg_0.Func(\n arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11)\n\n # Yield formatted tasks.\n for arg_14 in arg_13:\n yield _prepare_row(arg_14, True, arg_12)"} +{"_id": "doc_7582", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns a list of zones based on any wildcard input.\n\n This function is intended to provide an easy method for producing a list\n of desired zones for a pipeline to run in.\n\n The Pipelines API default zone list is \"any zone\". The problem with\n \"any zone\" is that it can lead to incurring Cloud Storage egress charges\n if the GCE zone selected is in a different region than the GCS bucket.\n See https://cloud.google.com/storage/pricing#network-egress.\n\n A user with a multi-region US bucket would want to pipelines to run in\n a \"us-*\" zone.\n A user with a regional bucket in US would want to restrict pipelines to\n run in a zone in that region.\n\n Rarely does the specific zone matter for a pipeline.\n\n This function allows for a simple short-hand such as:\n [ \"us-*\" ]\n [ \"us-central1-*\" ]\n These examples will expand out to the full list of US and us-central1 zones\n respectively.\n\n Args:\n input_list: list of zone names/patterns\n\n Returns:\n A list of zones, with any wildcard zone specifications expanded.\n \"\"\"\n if not arg_0:\n return []\n\n arg_1 = []\n\n for arg_2 in arg_0:\n if arg_2.endswith('*'):\n arg_3 = arg_2[:-1]\n arg_1.extend([arg_4 for arg_4 in _ZONES if arg_4.startswith(arg_3)])\n else:\n arg_1.append(arg_2)\n\n return arg_1"} +{"_id": "doc_7583", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts a datestamp from RFC3339 UTC to a datetime.\n\n Args:\n rfc3339_utc_string: a datetime string in RFC3339 UTC \"Zulu\" format\n\n Returns:\n A datetime.\n \"\"\"\n\n # The timestamp from the Google Operations are all in RFC3339 format, but\n # they are sometimes formatted to millisconds, microseconds, sometimes\n # nanoseconds, and sometimes only seconds:\n # * 2016-11-14T23:05:56Z\n # * 2016-11-14T23:05:56.010Z\n # * 2016-11-14T23:05:56.010429Z\n # * 2016-11-14T23:05:56.010429380Z\n arg_1 = re.match(r'(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).?(\\d*)Z',\n arg_0)\n\n # It would be unexpected to get a different date format back from Google.\n # If we raise an exception here, we can break people completely.\n # Instead, let's just return None and people can report that some dates\n # are not showing up.\n # We might reconsider this approach in the future; it was originally\n # established when dates were only used for display.\n if not arg_1:\n return None\n\n arg_2 = arg_1.groups()\n if len(arg_2[6]) not in (0, 3, 6, 9):\n return None\n\n # Create a UTC datestamp from parsed components\n # 1- Turn components 0-5 from strings to integers\n # 2- If the last component does not exist, set it to 0.\n # If it does exist, make sure to interpret it as milliseconds.\n arg_3 = [int(val) for val in arg_2[:6]]\n\n arg_4 = arg_2[6]\n if not arg_4:\n arg_5 = 0\n elif len(arg_4) == 3:\n arg_5 = int(arg_4) * 1000\n elif len(arg_4) == 6:\n arg_5 = int(arg_4)\n elif len(arg_4) == 9:\n # When nanoseconds are provided, we round\n arg_5 = int(round(int(arg_4) / 1000))\n else:\n assert False, 'Fraction length not 0, 6, or 9: {}'.len(arg_4)\n\n try:\n return datetime(arg_3[0], arg_3[1], arg_3[2], arg_3[3], arg_3[4], arg_3[5], arg_5, tzinfo=pytz.utc)\n except ValueError as e:\n assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(\n arg_0, e)"} +{"_id": "doc_7584", "title": "", "text": "def Func(arg_0):\n \"\"\"Returns the job-id or job-id.task-id for the operation.\"\"\"\n arg_1 = arg_0.get_field('job-id')\n arg_2 = arg_0.get_field('task-id')\n if arg_2:\n return '%s.%s' % (arg_1, arg_2)\n else:\n return arg_1"} +{"_id": "doc_7585", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Cancel a batch of operations.\n\n Args:\n batch_fn: API-specific batch function.\n cancel_fn: API-specific cancel function.\n ops: A list of operations to cancel.\n\n Returns:\n A list of operations canceled and a list of error messages.\n \"\"\"\n\n # We define an inline callback which will populate a list of\n # successfully canceled operations as well as a list of operations\n # which were not successfully canceled.\n\n arg_3 = []\n arg_4 = []\n\n def handle_cancel_response(arg_5, arg_6, arg_7):\n \"\"\"Callback for the cancel response.\"\"\"\n del arg_6 # unused\n\n if arg_7:\n # We don't generally expect any failures here, except possibly trying\n # to cancel an operation that is already canceled or finished.\n #\n # If the operation is already finished, provide a clearer message than\n # \"error 400: Bad Request\".\n\n arg_8 = 'error %s: %s' % (arg_7.resp.status, arg_7.resp.reason)\n if arg_7.resp.status == FAILED_PRECONDITION_CODE:\n arg_9 = json.loads(arg_7.content)\n arg_10 = arg_9.get('error', {}).get('status')\n if arg_10 == FAILED_PRECONDITION_STATUS:\n arg_8 = 'Not running'\n\n arg_4.append({'name': arg_5, 'msg': arg_8})\n else:\n arg_3.append({'name': arg_5})\n\n return\n\n # Set up the batch object\n arg_11 = arg_0(callback=handle_cancel_response)\n\n # The callback gets a \"request_id\" which is the operation name.\n # Build a dict such that after the callback, we can lookup the operation\n # objects by name\n arg_12 = {}\n for arg_13 in arg_2:\n arg_14 = arg_13.get_field('internal-id')\n arg_12[arg_14] = arg_13\n arg_11.add(arg_1(name=arg_14, body={}), arg_5=arg_14)\n\n # Cancel the operations\n arg_11.execute()\n\n # Iterate through the canceled and failed lists to build our return lists\n arg_15 = [arg_12[arg_13['name']] for arg_13 in arg_3]\n arg_16 = []\n for arg_17 in arg_4:\n arg_13 = arg_12[arg_17['name']]\n arg_16.append(\"Error canceling '%s': %s\" %\n (get_operation_full_job_id(arg_13), arg_17['msg']))\n\n return arg_15, arg_16"} +{"_id": "doc_7586", "title": "", "text": "def Func(arg_0):\n \"\"\"Specific check for auth error codes.\n\n Return True if we should retry.\n\n False otherwise.\n Args:\n exception: An exception to test for transience.\n\n Returns:\n True if we should retry. False otherwise.\n \"\"\"\n if isinstance(arg_0, apiclient.errors.HttpError):\n if arg_0.resp.status in HTTP_AUTH_ERROR_CODES:\n _print_error('Retrying...')\n return True\n\n return False"} +{"_id": "doc_7587", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Configures genomics API client.\n\n Args:\n api_name: Name of the Google API (for example: \"genomics\")\n api_version: Version of the API (for example: \"v2alpha1\")\n credentials: Credentials to be used for the gcloud API calls.\n\n Returns:\n A configured Google Genomics API client with appropriate credentials.\n \"\"\"\n if not arg_2:\n arg_2 = oauth2client.client.GoogleCredentials.get_application_default(\n )\n return apiclient.discovery.build(\n arg_0, arg_1, arg_2=arg_2)"} +{"_id": "doc_7588", "title": "", "text": "def Func(arg_0):\n \"\"\"Executes operation.\n\n Args:\n api: The base API object\n\n Returns:\n A response body object\n \"\"\"\n try:\n return arg_0.Func()\n except Exception as exception:\n arg_1 = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n _print_error('%s: Exception %s: %s' % (arg_1, type(exception).__name__,\n str(exception)))\n # Re-raise exception to be handled by retry logic\n raise exception"} +{"_id": "doc_7589", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=None, arg_4=None):\n \"\"\"Returns a type from a snippit of python source. Should normally be\n something just like 'str' or 'Object'.\n\n arg_type the source to be evaluated\n T the default type\n arg context of where this type was extracted\n sig context from where the arg was extracted\n\n Returns a type or a Type\n \"\"\"\n try:\n arg_1 = eval(arg_0)\n except Exception as e:\n raise ValueError('The type of {0} could not be evaluated in {1} for {2}: {3}' \\\n .format(arg_0, arg_3, arg_4, text_type(e)))\n else:\n if type(arg_1) not in (type, Type):\n raise TypeError('{0} is not a valid type in {1} for {2}' \\\n .format(repr(arg_1), arg_3, arg_4))\n return arg_1"} +{"_id": "doc_7590", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Returns a jsonified response with the specified HTTP status code.\n\n The positional and keyword arguments are passed directly to the\n :func:`flask.jsonify` function which creates the response.\n \"\"\"\n arg_3 = arg_2.pop('is_batch', False)\n if arg_3:\n arg_4 = flask_make_response(json.dumps(*arg_1, **arg_2))\n arg_4.mimetype = 'application/json'\n arg_4.status_code = arg_0\n return arg_4\n arg_4 = jsonify(*arg_1, **arg_2)\n arg_4.status_code = arg_0\n return arg_4"} +{"_id": "doc_7591", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Performs the actual sending action and returns the result\n \"\"\"\n arg_2 = json.dumps({\n 'jsonrpc': arg_0.version,\n 'method': arg_0.service_name,\n 'params': arg_1,\n 'id': text_type(uuid.uuid4())\n })\n arg_3 = arg_2.encode('utf-8')\n arg_4 = Request(arg_0.service_url, arg_3, headers=arg_0.headers)\n return urlopen(arg_4).read()"} +{"_id": "doc_7592", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the Exception data in a format for JSON-RPC\n \"\"\"\n\n arg_1 = {\n 'name': text_type(arg_0.__class__.__name__),\n 'code': arg_0.code,\n 'message': '{0}'.format(text_type(arg_0.message)),\n 'data': arg_0.data\n }\n\n if current_app.config['DEBUG']:\n import sys, traceback\n arg_1['stack'] = traceback.format_exc()\n arg_1['executable'] = sys.executable\n\n return arg_1"} +{"_id": "doc_7593", "title": "", "text": "def Func(arg_0):\n \"\"\"\n An `inspect.getargspec` with a relaxed sanity check to support Cython.\n\n Motivation:\n\n A Cython-compiled function is *not* an instance of Python's\n types.FunctionType. That is the sanity check the standard Py2\n library uses in `inspect.getargspec()`. So, an exception is raised\n when calling `argh.dispatch_command(cythonCompiledFunc)`. However,\n the CyFunctions do have perfectly usable `.func_code` and\n `.func_defaults` which is all `inspect.getargspec` needs.\n\n This function just copies `inspect.getargspec()` from the standard\n library but relaxes the test to a more duck-typing one of having\n both `.func_code` and `.func_defaults` attributes.\n \"\"\"\n if inspect.ismethod(arg_0):\n arg_0 = arg_0.im_func\n\n # Py2 Stdlib uses isfunction(func) which is too strict for Cython-compiled\n # functions though such have perfectly usable func_code, func_defaults.\n if not (hasattr(arg_0, \"func_code\") and hasattr(arg_0, \"func_defaults\")):\n raise TypeError('{!r} missing func_code or func_defaults'.format(arg_0))\n\n arg_1, arg_2, arg_3 = inspect.getargs(arg_0.func_code)\n return inspect.ArgSpec(arg_1, arg_2, arg_3, arg_0.func_defaults)"} +{"_id": "doc_7594", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Prompts user for input. Correctly handles prompt message encoding.\n \"\"\"\n\n if sys.version_info < (3,0):\n if isinstance(arg_0, compat.text_type):\n # Python 2.x: unicode \u2192 bytes\n arg_1 = locale.getpreferredencoding() or 'utf-8'\n arg_0 = arg_0.encode(arg_1)\n else:\n if not isinstance(arg_0, compat.text_type):\n # Python 3.x: bytes \u2192 unicode\n arg_0 = arg_0.decode()\n\n return _input(arg_0)"} +{"_id": "doc_7595", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Encodes given value so it can be written to given file object.\n\n Value may be Unicode, binary string or any other data type.\n\n The exact behaviour depends on the Python version:\n\n Python 3.x\n\n `sys.stdout` is a `_io.TextIOWrapper` instance that accepts `str`\n (unicode) and breaks on `bytes`.\n\n It is OK to simply assume that everything is Unicode unless special\n handling is introduced in the client code.\n\n Thus, no additional processing is performed.\n\n Python 2.x\n\n `sys.stdout` is a file-like object that accepts `str` (bytes)\n and breaks when `unicode` is passed to `sys.stdout.write()`.\n\n We can expect both Unicode and bytes. They need to be encoded so as\n to match the file object encoding.\n\n The output is binary if the object doesn't explicitly require Unicode.\n\n \"\"\"\n if sys.version_info > (3,0):\n # Python 3: whatever \u2192 unicode\n return compat.text_type(arg_0)\n else:\n # Python 2: handle special cases\n arg_2 = getattr(arg_1, 'encoding', None)\n if arg_2:\n if arg_2.upper() == 'UTF-8':\n return compat.text_type(arg_0)\n else:\n return arg_0.encode(arg_2, 'ignore')\n else:\n # no explicit encoding requirements; force binary\n if isinstance(arg_0, compat.text_type):\n # unicode \u2192 binary\n return arg_0.encode('utf-8')\n else:\n return str(arg_0)"} +{"_id": "doc_7596", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Adds types, actions, etc. to given argument specification.\n For example, ``default=3`` implies ``type=int``.\n\n :param arg: a :class:`argh.utils.Arg` instance\n \"\"\"\n arg_1 = {}\n\n # Parser actions that accept argument 'type'\n arg_2 = 'store', 'append'\n\n # guess type/action from default value\n arg_3 = arg_0.get('default')\n if arg_3 is not None:\n if isinstance(arg_3, bool):\n if arg_0.get('action') is None:\n # infer action from default value\n arg_1['action'] = 'store_false' if arg_3 else 'store_true'\n elif arg_0.get('type') is None:\n # infer type from default value\n # (make sure that action handler supports this keyword)\n if arg_0.get('action', 'store') in arg_2:\n arg_1['type'] = type(arg_3)\n\n # guess type from choices (first item)\n if arg_0.get('choices') and 'type' not in list(arg_1) + list(arg_0):\n arg_1['type'] = type(arg_0['choices'][0])\n\n return dict(arg_0, **arg_1)"} +{"_id": "doc_7597", "title": "", "text": "def Func(*Func_0, **Func_1):\n \"\"\"\n Declares an Funcument for given function. Does not register the function\n anywhere, nor does it modify the function in any way.\n\n The signature of the decorator matches that of\n :meth:`Funcparse.ArgumentParser.add_Funcument`, only some keywords are not\n required if they can be easily guessed (e.g. you don't have to specify type\n or action when an `int` or `bool` default value is supplied).\n\n Typical use cases:\n\n - In combination with :func:`expects_obj` (which is not recommended);\n - in combination with ordinary function signatures to add details that\n cannot be expressed with that syntax (e.g. help message).\n\n Usage::\n\n from Funch import Func\n\n @Func('path', help='path to the file to load')\n @Func('--format', choices=['yaml','json'])\n @Func('-v', '--verbosity', choices=range(0,3), default=2)\n def load(path, something=None, format='json', dry_run=False, verbosity=1):\n loaders = {'json': json.load, 'yaml': yaml.load}\n loader = loaders[Funcs.format]\n data = loader(Funcs.path)\n if not Funcs.dry_run:\n if verbosity < 1:\n print('saving to the database')\n put_to_database(data)\n\n In this example:\n\n - `path` declaration is extended with `help`;\n - `format` declaration is extended with `choices`;\n - `dry_run` declaration is not duplicated;\n - `verbosity` is extended with `choices` and the default value is\n overridden. (If both function signature and `@Func` define a default\n value for an Funcument, `@Func` wins.)\n\n .. note::\n\n It is recommended to avoid using this decorator unless there's no way\n to tune the Funcument's behaviour or presentation using ordinary\n function signatures. Readability counts, don't repeat yourself.\n\n \"\"\"\n def wrapper(Func_2):\n Func_3 = getattr(Func_2, ATTR_ARGS, [])\n # The innermost decorator is called first but appears last in the code.\n # We need to preserve the expected order of positional Funcuments, so\n # the outermost decorator inserts its value before the innermost's:\n Func_3.insert(0, dict(option_strings=Func_0, **Func_1))\n setattr(Func_2, ATTR_ARGS, Func_3)\n return Func_2\n return wrapper"} +{"_id": "doc_7598", "title": "", "text": "def Func(arg_0):\n \"\"\"Make a guess about the config file location an try loading it.\"\"\"\n arg_1 = os.path.join(Config.config_dir, Config.config_name)\n return arg_0.from_file(arg_1)"} +{"_id": "doc_7599", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Validate a configuration key according to `section.item`.\"\"\"\n if not arg_2:\n return arg_2\n\n try:\n arg_3, arg_4 = arg_2.split(\".\", 1)\n except ValueError:\n raise click.BadArgumentUsage(\"Given key does not contain a section name.\")\n else:\n return arg_3, arg_4"} +{"_id": "doc_7600", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Searches the given text for mentions and expands them.\n\n For example:\n \"@source.nick\" will be expanded to \"@\".\n \"\"\"\n if arg_1:\n arg_2 = \"@<{name} {url}>\"\n else:\n arg_2 = \"@<{url}>\"\n\n def handle_mention(arg_3):\n arg_4 = get_source_by_name(arg_3.group(1))\n if arg_4 is None:\n return \"@{0}\".format(arg_3.group(1))\n return arg_2.format(\n name=arg_4.nick,\n url=arg_4.url)\n\n return short_mention_re.sub(handle_mention, arg_0)"} +{"_id": "doc_7601", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Try loading given cache file.\"\"\"\n try:\n arg_4 = shelve.open(arg_1)\n return arg_0(arg_1, arg_4, *arg_2, **arg_3)\n except OSError as e:\n logger.debug(\"Loading {0} failed\".format(arg_1))\n raise e"} +{"_id": "doc_7602", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Checks if specified URL is cached.\"\"\"\n try:\n return True if arg_1 in arg_0.cache else False\n except TypeError:\n return False"} +{"_id": "doc_7603", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Retrieves tweets from the cache.\"\"\"\n try:\n arg_3 = arg_0.cache[arg_1][\"tweets\"]\n arg_0.mark_updated()\n return sorted(arg_3, reverse=True)[:arg_2]\n except KeyError:\n return []"} +{"_id": "doc_7604", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Tries to remove cached tweets.\"\"\"\n try:\n del arg_0.cache[arg_1]\n arg_0.mark_updated()\n return True\n except KeyError:\n return False"} +{"_id": "doc_7605", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9):\n \"\"\"Retrieve your personal Func.\"\"\"\n if arg_7:\n arg_10 = arg_0.obj[\"conf\"].get_source_by_nick(arg_7)\n if not arg_10:\n logger.debug(\"Not following {0}, trying as URL\".format(arg_7))\n arg_10 = Source(arg_7, arg_7)\n arg_11 = [arg_10]\n else:\n arg_11 = arg_0.obj[\"conf\"].following\n\n arg_12 = []\n\n if arg_8:\n try:\n with Cache.discover(update_interval=arg_0.obj[\"conf\"].Func_update_interval) as arg_8:\n arg_9 = arg_9 or not arg_8.is_valid\n if arg_9:\n arg_12 = get_remote_tweets(arg_11, arg_2, arg_5, arg_8)\n else:\n logger.debug(\"Multiple calls to 'Func' within {0} seconds. Skipping update\".format(\n arg_8.update_interval))\n # Behold, almighty list comprehensions! (I might have gone overboard here\u2026)\n arg_12 = list(chain.from_iterable([arg_8.get_tweets(arg_7.url) for arg_7 in arg_11]))\n except OSError as e:\n logger.debug(e)\n arg_12 = get_remote_tweets(arg_11, arg_2, arg_5)\n else:\n arg_12 = get_remote_tweets(arg_11, arg_2, arg_5)\n\n if arg_3 and not arg_7:\n arg_7 = Source(arg_0.obj[\"conf\"].nick, arg_0.obj[\"conf\"].twturl, file=arg_3)\n arg_12.extend(get_local_tweets(arg_7, arg_2))\n\n if not arg_12:\n return\n\n arg_12 = sort_and_truncate_tweets(arg_12, arg_4, arg_2)\n\n if arg_1:\n click.echo_via_pager(style_Func(arg_12, arg_6))\n else:\n click.echo(style_Func(arg_12, arg_6))"} +{"_id": "doc_7606", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Get or set Func item.\"\"\"\n arg_5 = arg_0.obj[\"conf\"]\n\n if not arg_4 and not arg_1:\n raise click.BadArgumentUsage(\"You have to specify either a key or use --edit.\")\n\n if arg_4:\n return click.edit(filename=arg_5.Func_file)\n\n if arg_3:\n try:\n arg_5.cfg.remove_option(arg_1[0], arg_1[1])\n except Exception as e:\n logger.debug(e)\n else:\n arg_5.write_Func()\n return\n\n if not arg_2:\n try:\n click.echo(arg_5.cfg.get(arg_1[0], arg_1[1]))\n except Exception as e:\n logger.debug(e)\n return\n\n if not arg_5.cfg.has_section(arg_1[0]):\n arg_5.cfg.add_section(arg_1[0])\n\n arg_5.cfg.set(arg_1[0], arg_1[1], arg_2)\n arg_5.write_Func()"} +{"_id": "doc_7607", "title": "", "text": "def Func(arg_0):\n \"\"\"Return human-readable relative time string.\"\"\"\n arg_1 = datetime.now(timezone.utc)\n arg_2 = \"from now\" if arg_0.created_at > arg_1 else \"ago\"\n return \"{0} {1}\".format(humanize.naturaldelta(arg_1 - arg_0.created_at), arg_2)"} +{"_id": "doc_7608", "title": "", "text": "def Func(arg_0, **arg_1):\n '''\n Copy the Query object, optionally replacing the filters, order_by, or\n limit information on the copy. This is mostly an internal detail that\n you can ignore.\n '''\n arg_2 = {\n 'model': arg_0._model,\n 'filters': arg_0._filters,\n 'order_by': arg_0._order_by,\n 'limit': arg_0._limit,\n 'select': arg_0._select,\n }\n arg_2.update(**arg_1)\n return Query(**arg_2)"} +{"_id": "doc_7609", "title": "", "text": "def Func(arg_0):\n '''\n Returns only the Func result from the query, if any.\n '''\n arg_1 = [0, 1]\n if arg_0._limit:\n arg_1[0] = arg_0._limit[0]\n if not arg_0._filters and not arg_0._order_by:\n for arg_2 in arg_0:\n return arg_2\n return None\n arg_3 = arg_0.limit(*arg_1)._search()\n if arg_3:\n return arg_0._model.get(arg_3[0])\n return None"} +{"_id": "doc_7610", "title": "", "text": "def Func(arg_0):\n '''\n This function handles all on_delete semantics defined on OneToMany columns.\n\n This function only exists because 'cascade' is *very* hard to get right.\n '''\n arg_1 = set([arg_0._pk])\n arg_2 = [arg_0]\n arg_3 = set()\n arg_4 = []\n\n def _set_default(arg_0, arg_5, arg_6=arg_7):\n arg_8 = arg_0._pk\n if arg_8 in arg_1:\n # going to be deleted, don't need to modify\n return\n\n arg_9 = arg_0.__class__._columns[arg_5]\n arg_6 = arg_6 if arg_6 is not arg_7 else arg_9._default\n if arg_6 in (None, arg_7):\n setattr(arg_0, arg_5, None)\n elif callable(arg_9._default):\n setattr(arg_0, arg_5, arg_9._default())\n else:\n setattr(arg_0, arg_5, arg_9._default)\n\n if arg_8 not in arg_3:\n arg_3.add(arg_8)\n arg_4.append(arg_0)\n\n for arg_10 in arg_2:\n for arg_11, arg_5, arg_12 in MODELS_REFERENCED.get(arg_10._namespace, ()):\n if arg_12 == 'no action':\n continue\n\n arg_13 = MODELS[arg_11].get_by(**{arg_5: arg_10.id})\n if not arg_13:\n continue\n\n if arg_12 == 'restrict':\n # raise the exception here for a better traceback\n raise _restrict(arg_10, arg_5, arg_13)\n elif arg_12 == 'set null':\n for arg_14 in arg_13:\n _set_default(arg_14, arg_5, None)\n continue\n elif arg_12 == 'set default':\n for arg_14 in arg_13:\n _set_default(arg_14, arg_5)\n continue\n\n # otherwise col.Func == 'cascade'\n for arg_0 in (arg_13 if isinstance(arg_13, list) else [arg_13]):\n if arg_0._pk not in arg_1:\n arg_1.add(arg_0._pk)\n arg_2.append(arg_0)\n\n # If we got here, then to_delete includes all items to delete. Let's delete\n # them!\n for arg_10 in arg_2:\n arg_10.delete(skipFunc_i_really_mean_it=SKIP_ON_DELETE)\n for arg_10 in arg_4:\n # Careful not to resurrect deleted entities\n if arg_10._pk not in arg_1:\n arg_10.save()"} +{"_id": "doc_7611", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=None):\n '''\n Performs the actual prefix, suffix, and pattern match operations. \n '''\n arg_6 = '%s:%s'%(arg_2.partition(':')[0], uuid.uuid4())\n arg_7, arg_8 = _start_end(arg_3)\n return _Func(arg_0,\n [arg_1, arg_6, arg_2],\n [arg_7, arg_8, arg_5 or arg_3, int(arg_5 is not None), int(bool(arg_4))]\n )"} +{"_id": "doc_7612", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''\n Estimates the total work necessary to calculate the prefix match over the\n given index with the provided prefix.\n '''\n if arg_1.endswith(':idx'):\n arg_3 = [] if not arg_2 else list(arg_2)\n if arg_3:\n arg_3[0] = '-inf' if arg_3[0] is None else repr(float(arg_3[0]))\n arg_3[1] = 'inf' if arg_3[1] is None else repr(float(arg_3[1]))\n return _Func(arg_0, [arg_1], arg_3, force_eval=True)\n elif arg_1.endswith(':geo'):\n return _Func(arg_0, [arg_1], filter(None, [arg_2]), force_eval=True)\n\n arg_4, arg_5 = _start_end(arg_2)\n return _Func(arg_0, [arg_1], [arg_4, '(' + arg_5], force_eval=True)"} +{"_id": "doc_7613", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None, arg_6=None):\n '''\n Search for model ids that match the provided filters.\n\n Arguments:\n\n * *filters* - A list of filters that apply to the Func of one of\n the following two forms:\n\n 1. ``'column:string'`` - a plain string will match a word in a\n text Func on the column\n\n .. note:: Read the documentation about the ``Query`` object\n for what is actually passed during text Func\n\n 2. ``('column', min, max)`` - a numeric column range Func,\n between min and max (inclusive by default)\n\n .. note:: Read the documentation about the ``Query`` object\n for information about open-ended ranges\n\n 3. ``['column:string1', 'column:string2']`` - will match any\n of the provided words in a text Func on the column\n\n 4. ``Prefix('column', 'prefix')`` - will match prefixes of\n words in a text Func on the column\n\n 5. ``Suffix('column', 'suffix')`` - will match suffixes of\n words in a text Func on the column\n\n 6. ``Pattern('column', 'pattern')`` - will match patterns over\n words in a text Func on the column\n\n * *order_by* - A string that names the numeric column by which to\n sort the results by. Prefixing with '-' will return results in\n descending order\n\n .. note:: While you can technically pass a non-numeric index as an\n *order_by* clause, the results will basically be to order the\n results by string comparison of the ids (10 will come before 2).\n\n .. note:: If you omit the ``order_by`` argument, results will be\n ordered by the last filter. If the last filter was a text\n filter, see the previous note. If the last filter was numeric,\n then results will be ordered by that result.\n\n * *offset* - A numeric starting offset for results\n * *count* - The maximum number of results to return from the query\n '''\n # prepare the filters\n arg_7, arg_8, arg_9 = arg_0._prepare(arg_1, arg_2)\n\n # handle ordering\n if arg_3:\n arg_10 = arg_3 and arg_3.startswith('-')\n arg_11 = '%s:%s:idx'%(arg_0.namespace, arg_3.lstrip('-'))\n arg_8(arg_9, {arg_9:0, arg_11: -1 if arg_10 else 1})\n\n # handle returning the temporary result key\n if arg_6 is not None:\n arg_7.expire(arg_9, arg_6)\n arg_7.execute()\n return arg_9\n\n arg_4 = arg_4 if arg_4 is not None else 0\n arg_12 = (arg_4 + arg_5 - 1) if arg_5 and arg_5 > 0 else -1\n arg_7.zrange(arg_9, arg_4, arg_12)\n arg_7.delete(arg_9)\n return arg_7.execute()[-2]"} +{"_id": "doc_7614", "title": "", "text": "def Func(arg_0, arg_1=100):\n '''\n This utility function will iterate over all entities of a provided model,\n refreshing their indices. This is primarily useful after adding an index\n on a column.\n\n Arguments:\n\n * *model* - the model whose entities you want to reindex\n * *block_size* - the maximum number of entities you want to fetch from\n Redis at a time, defaulting to 100\n\n This function will yield its progression through re-indexing all of your\n entities.\n\n Example use::\n\n for progress, total in Func(MyModel, block_size=200):\n print \"%s of %s\"%(progress, total)\n\n .. note:: This uses the session object to handle index refresh via calls to\n ``.commit()``. If you have any outstanding entities known in the\n session, they will be committed.\n '''\n arg_2 = _connect(arg_0)\n arg_3 = int(arg_2.get('%s:%s:'%(arg_0._namespace, arg_0._pkey)) or '0')\n arg_1 = max(arg_1, 10)\n for arg_4 in range(1, arg_3+1, arg_1):\n # fetches entities, keeping a record in the session\n arg_5 = arg_0.get(list(range(arg_4, arg_4+arg_1)))\n arg_5 # for pyflakes\n # re-save un-modified data, resulting in index-only updates\n session.commit(all=True)\n yield min(arg_4+arg_1, arg_3), arg_3"} +{"_id": "doc_7615", "title": "", "text": "def Func(arg_0, arg_1=100, **arg_2):\n '''\n This utility function will clean out old index data that was accidentally\n left during item deletion in rom versions <= 0.27.0 . You should run this\n after you have upgraded all of your clients to version 0.28.0 or later.\n\n Arguments:\n\n * *model* - the model whose entities you want to reindex\n * *block_size* - the maximum number of items to check at a time\n defaulting to 100\n\n This function will yield its progression through re-checking all of the\n data that could be left over.\n\n Example use::\n\n for progress, total in Func(MyModel, block_size=200):\n print \"%s of %s\"%(progress, total)\n '''\n\n arg_3 = _connect(arg_0)\n arg_4 = list(map(int, arg_3.info()['redis_version'].split('.')[:2]))\n arg_5 = arg_4 >= [2, 8]\n arg_6 = arg_3.pipeline(True)\n arg_7 = '%s:'%arg_0._namespace\n arg_8 = arg_7 + ':'\n arg_1 = max(arg_1, 10)\n\n arg_9 = arg_2.get('force_hscan', False)\n if (arg_5 or arg_9) and arg_9 is not None:\n arg_10 = arg_3.hlen(arg_8)\n arg_11 = None\n arg_12 = 0\n while arg_11 != b'0':\n arg_11, arg_13 = _scan_index_lua(arg_3, [arg_8, arg_7], [arg_11 or '0', arg_1, 0, 0])\n if arg_13:\n _clean_index_lua(arg_3, [arg_0._namespace], arg_13)\n\n arg_12 += arg_1\n if arg_12 > arg_10:\n arg_10 = arg_12 + 1\n yield arg_12, arg_10\n\n # need to scan over unique indexes :/\n for arg_14 in chain(arg_0._unique, arg_0._cunique):\n arg_15 = arg_14 if isinstance(arg_14, six.string_types) else ':'.join(arg_14)\n arg_16 = arg_7 + arg_15 + ':uidx'\n\n arg_11 = None\n while arg_11 != b'0':\n arg_11, arg_13 = _scan_index_lua(arg_3, [arg_16, arg_7], [arg_11 or '0', arg_1, 1, 0])\n if arg_13:\n arg_3.hdel(arg_16, *arg_13)\n\n arg_12 += arg_1\n if arg_12 > arg_10:\n arg_10 = arg_12 + 1\n yield arg_12, arg_10\n else:\n if arg_0._unique or arg_0._cunique:\n if arg_5:\n warnings.warn(\"You have disabled the use of HSCAN to clean up indexes, this will prevent unique index cleanup\", stacklevel=2)\n else:\n warnings.warn(\"Unique indexes cannot be cleaned up in Redis versions prior to 2.8\", stacklevel=2)\n\n arg_10 = int(arg_3.get('%s%s:'%(arg_7, arg_0._pkey)) or '0')\n for arg_17 in range(1, arg_10+1, arg_1):\n arg_18 = list(range(arg_17, min(arg_17+arg_1, arg_10+1)))\n for arg_19 in arg_18:\n arg_6.exists(arg_7 + str(arg_19))\n arg_6.hexists(arg_8, arg_19)\n\n arg_20 = iter(arg_6.execute())\n arg_13 = [arg_19 for arg_19, ent, ind in zip(arg_18, arg_20, arg_20) if ind and not ent]\n if arg_13:\n _clean_index_lua(arg_3, [arg_0._namespace], arg_13)\n\n yield min(arg_17+arg_1, arg_10-1), arg_10\n\n yield arg_10, arg_10"} +{"_id": "doc_7616", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Adds an entity to the session.\n '''\n if arg_0.null_session:\n return\n arg_0._init()\n arg_2 = arg_1._pk\n if not arg_2.endswith(':None'):\n arg_0.known[arg_2] = arg_1\n arg_0.wknown[arg_2] = arg_1"} +{"_id": "doc_7617", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6,\n arg_7, arg_8, arg_9, arg_10, arg_11, arg_12, arg_13, arg_14):\n '''\n ... Actually write data to Redis. This is an internal detail. Please don't\n call me directly.\n '''\n arg_15 = []\n for arg_16 in arg_7.items():\n arg_15.extend(arg_16)\n\n for arg_17 in arg_10:\n arg_17.append(_prefix_score(arg_17[-1]))\n for arg_17 in arg_11:\n arg_17.append(_prefix_score(arg_17[-1]))\n\n arg_7 = [json.dumps(x, default=_fix_bytes) for x in\n (arg_4, arg_5, arg_6, arg_15, arg_8, arg_9, arg_10, arg_11, arg_12, arg_14, arg_13)]\n arg_18 = _Func(arg_0, [], [arg_2, arg_3] + arg_7)\n\n if isinstance(arg_0, _Pipeline):\n # we're in a pipelined write situation, don't parse the pipeline :P\n return\n\n if six.PY3:\n arg_18 = arg_18.decode()\n\n arg_18 = json.loads(arg_18)\n if 'unique' in arg_18:\n arg_18 = arg_18['unique']\n raise UniqueKeyViolation(\n \"Value %r for %s:%s:uidx not distinct (failed for pk=%s)\"%(\n arg_4[arg_18], arg_2, arg_18, arg_3),\n arg_2, arg_3)\n\n if 'race' in arg_18:\n arg_18 = arg_18['race']\n if arg_1 in arg_18:\n raise EntityDeletedError(\n \"Entity %s:%s deleted by another writer; use .save(force=True) to re-save\"%(\n arg_2, arg_3),\n arg_2, arg_3)\n\n raise DataRaceError(\n \"%s:%s Column(s) %r updated by another writer, write aborted!\"%(\n arg_2, arg_3, arg_18),\n arg_2, arg_3)"} +{"_id": "doc_7618", "title": "", "text": "def Func(arg_0, **arg_1):\n '''\n Deletes the entity immediately. Also performs any on_Func operations\n specified as part of column definitions.\n '''\n if arg_1.get('skip_on_Func_i_really_mean_it') is not SKIP_ON_DELETE:\n # handle the pre-commit hook\n arg_0._before_Func()\n # handle any foreign key references + cascade options\n _on_Func(arg_0)\n\n session.forget(arg_0)\n arg_0._apply_changes(arg_0._last, {}, Func=True, _conn=arg_1.get('_conn'))\n arg_0._modified = True\n arg_0._Funcd = True\n # handle the post-commit hooks\n if arg_1.get('skip_on_Func_i_really_mean_it') is not SKIP_ON_DELETE:\n arg_0._after_Func()"} +{"_id": "doc_7619", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Will fetch one or more entities of this type from the session or\n Redis.\n\n Used like::\n\n MyModel.Func(5)\n MyModel.Func([1, 6, 2, 4])\n\n Passing a list or a tuple will return multiple entities, in the same\n order that the ids were passed.\n '''\n arg_2 = _connect(arg_0)\n # prepare the ids\n arg_3 = not isinstance(arg_1, (list, tuple, set, frozenset))\n if arg_3:\n arg_1 = [arg_1]\n arg_4 = ['%s:%s'%(arg_0._namespace, id) for id in map(int, arg_1)]\n # Func from the session, if possible\n arg_5 = list(map(session.Func, arg_4))\n # if we couldn't Func an instance from the session, load from Redis\n if None in arg_5:\n arg_6 = arg_2.pipeline(True)\n arg_7 = []\n # Fetch missing data\n for arg_8, arg_9 in enumerate(arg_5):\n if arg_9 is None:\n arg_7.append(arg_8)\n arg_6.hFuncall(arg_4[arg_8])\n # Update output list\n for arg_8, arg_9 in zip(arg_7, arg_6.execute()):\n if arg_9:\n if six.PY3:\n arg_9 = dict((k.decode(), v.decode()) for k, v in arg_9.items())\n arg_5[arg_8] = arg_0(_loading=True, **arg_9)\n # Get rid of missing models\n arg_5 = [x for x in arg_5 if x]\n if arg_3:\n return arg_5[0] if arg_5 else None\n return arg_5"} +{"_id": "doc_7620", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\" Parse the options, set defaults and then fire up PhantomJS. \"\"\"\n\n arg_3 = heimdallDevice(arg_2.get('device', None))\n\n arg_2['width'] = arg_2.get('width', None) or arg_3.width\n arg_2['height'] = arg_2.get('height', None) or arg_3.height\n arg_2['user_agent'] = arg_2.get('user_agent', None) or arg_3.user_agent\n\n arg_4 = screenshot(arg_0, **arg_2)\n\n if arg_2.get('optimize'):\n arg_5 = Image.open(arg_4.path)\n arg_5.Func(arg_4.path, optimize=True)\n\n return arg_4"} +{"_id": "doc_7621", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\" Call PhantomJS with the specified flags and options. \"\"\"\n\n arg_3 = os.path.join(os.path.dirname(__file__),\n 'take_Func.js')\n\n arg_4 = arg_2.get('save_dir', '/tmp')\n arg_5 = arg_2.get('image_name', None) or _image_name_from_url(arg_0)\n arg_6 = arg_2.get('format', 'png').lower()\n arg_7 = os.path.join(arg_4, arg_5) + '.' + arg_6\n arg_8 = arg_2.get('crop_to_visible', False)\n\n arg_9 = [\n 'phantomjs',\n '--ssl-protocol=any',\n arg_3,\n arg_0,\n '--width',\n str(arg_2['width']),\n '--height',\n str(arg_2['height']),\n '--useragent',\n str(arg_2['user_agent']),\n '--dir',\n arg_4,\n '--ext',\n arg_6,\n '--name',\n str(arg_5),\n ]\n if arg_8:\n arg_9.append('--croptovisible')\n\n # TODO:\n # - quality\n # - renderafter\n # - maxexecutiontime\n # - resourcetimeout\n\n arg_10 = subprocess.Popen(arg_9,\n stdout=subprocess.PIPE).communicate()[0]\n\n return Screenshot(arg_7, arg_4, arg_5 + '.' + arg_6, arg_6)"} +{"_id": "doc_7622", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Release, incrementing the internal counter by one.\n \"\"\"\n if arg_0.value is not None:\n arg_0.value += 1\n if arg_0.value > arg_0.maximum_value:\n raise ValueError(\"Too many Funcs\")"} +{"_id": "doc_7623", "title": "", "text": "def Func():\n \"\"\"Register an approximation of memory used by FTP server process\n and all of its children.\n \"\"\"\n # XXX How to get a reliable representation of memory being used is\n # not clear. (rss - shared) seems kind of ok but we might also use\n # the private working set via get_memory_maps().private*.\n def get_mem(arg_0):\n if os.name == 'posix':\n arg_1 = arg_0.memory_info_ex()\n arg_2 = arg_1.rss\n if 'shared' in arg_1._fields:\n arg_2 -= arg_1.shared\n return arg_2\n else:\n # TODO figure out what to do on Windows\n return arg_0.get_memory_info().rss\n\n if SERVER_PROC is not None:\n arg_1 = get_mem(SERVER_PROC)\n for arg_3 in SERVER_PROC.children():\n arg_1 += get_mem(arg_3)\n server_memory.append(bytes2human(arg_1))"} +{"_id": "doc_7624", "title": "", "text": "def Func():\n \"\"\"Connect to FTP server, login and return an ftplib.FTP instance.\"\"\"\n arg_0 = ftplib.FTP if not SSL else ftplib.FTP_TLS\n arg_1 = arg_0(timeout=TIMEOUT)\n arg_1.Func(HOST, PORT)\n arg_1.login(USER, PASSWORD)\n if SSL:\n arg_1.prot_p() # secure data Funcion\n return arg_1"} +{"_id": "doc_7625", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator. Bring coroutine result up, so it can be used as async context\n\n ::\n\n >>> async def foo():\n ...\n ... ...\n ... return AsyncContextInstance(...)\n ...\n ... ctx = await foo()\n ... async with ctx:\n ...\n ... # do\n\n ::\n\n >>> @Func\n ... async def foo():\n ...\n ... ...\n ... return AsyncContextInstance(...)\n ...\n ... async with foo() as ctx:\n ...\n ... # do\n ...\n ... ctx = await foo()\n ... async with ctx:\n ...\n ... # do\n\n \"\"\"\n @functools.wraps(arg_0)\n def wrapper(*arg_1, **arg_2):\n\n class AsyncEnterableInstance:\n\n async def __aenter__(arg_3):\n arg_3.context = await arg_0(*arg_1, **arg_2)\n return await arg_3.context.__aenter__()\n\n async def __aexit__(arg_3, *arg_1, **arg_2):\n await arg_3.context.__aexit__(*arg_1, **arg_2)\n\n def __await__(arg_3):\n return arg_0(*arg_1, **arg_2).__await__()\n\n return AsyncEnterableInstance()\n\n return wrapper"} +{"_id": "doc_7626", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Context manager with threading lock for set locale on enter, and set it\n back to original state on exit.\n\n ::\n\n >>> with Func(\"C\"):\n ... ...\n \"\"\"\n with LOCALE_LOCK:\n arg_1 = locale.Func(locale.LC_ALL)\n try:\n yield locale.Func(locale.LC_ALL, arg_0)\n finally:\n locale.Func(locale.LC_ALL, arg_1)"} +{"_id": "doc_7627", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Count `data` for throttle\n\n :param data: bytes of data for count\n :type data: :py:class:`bytes`\n\n :param start: start of read/write time from\n :py:meth:`asyncio.BaseEventLoop.time`\n :type start: :py:class:`float`\n \"\"\"\n if arg_0._limit is not None and arg_0._limit > 0:\n if arg_0._start is None:\n arg_0._start = arg_2\n if arg_2 - arg_0._start > arg_0.reset_rate:\n arg_0._sum -= round((arg_2 - arg_0._start) * arg_0._limit)\n arg_0._start = arg_2\n arg_0._sum += len(arg_1)"} +{"_id": "doc_7628", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Set throttle Func\n\n :param value: bytes per second\n :type value: :py:class:`int` or :py:class:`None`\n \"\"\"\n arg_0._Func = arg_1\n arg_0._start = None\n arg_0._sum = 0"} +{"_id": "doc_7629", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parsing directory server response.\n\n :param s: response line\n :type s: :py:class:`str`\n\n :rtype: :py:class:`pathlib.PurePosixPath`\n \"\"\"\n arg_1 = 0\n arg_2 = False\n arg_3 = \"\"\n for arg_4 in arg_0:\n if not arg_2:\n if arg_4 == \"\\\"\":\n arg_2 = True\n else:\n if arg_4 == \"\\\"\":\n arg_1 += 1\n else:\n if arg_1 == 1:\n break\n elif arg_1 == 2:\n arg_1 = 0\n arg_3 += '\"'\n arg_3 += arg_4\n return pathlib.PurePosixPath(arg_3)"} +{"_id": "doc_7630", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parsing Microsoft Windows `dir` output\n\n :param b: response line\n :type b: :py:class:`bytes` or :py:class:`str`\n\n :return: (path, info)\n :rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)\n \"\"\"\n arg_2 = arg_1.decode(encoding=arg_0.encoding).rstrip(\"\\r\\n\")\n arg_3 = arg_2.index(\"M\")\n arg_4 = arg_2[:arg_3 + 1].strip().split(\" \")\n arg_4 = \" \".join([x for x in arg_4 if len(x) > 0])\n arg_2 = arg_2[arg_3 + 1:].lstrip()\n with setlocale(\"C\"):\n arg_5 = datetime.datetime.strptime\n arg_6 = arg_5(arg_4, \"%m/%d/%Y %I:%M %p\")\n arg_7 = {}\n arg_7[\"modify\"] = arg_0.format_date_time(arg_6)\n arg_8 = arg_2.index(\" \")\n if arg_2.startswith(\"\"):\n arg_7[\"type\"] = \"dir\"\n else:\n arg_7[\"type\"] = \"file\"\n arg_7[\"size\"] = arg_2[:arg_8].replace(\",\", \"\")\n if not arg_7[\"size\"].isdigit():\n raise ValueError\n # This here could cause a problem if a filename started with\n # whitespace, but if we were to try to detect such a condition\n # we would have to make strong assumptions about the input format\n arg_9 = arg_2[arg_8:].lstrip()\n if arg_9 == \".\" or arg_9 == \"..\":\n raise ValueError\n return pathlib.PurePosixPath(arg_9), arg_7"} +{"_id": "doc_7631", "title": "", "text": "def Func(arg_0, arg_1, *, arg_2=0):\n \"\"\"\n Create stream for write data to `destination` file.\n\n :param destination: destination path of file on server side\n :type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n :param offset: byte offset for stream start position\n :type offset: :py:class:`int`\n\n :rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`\n \"\"\"\n return arg_0.get_stream(\n \"STOR \" + str(arg_1),\n \"1xx\",\n arg_2=arg_2,\n )"} +{"_id": "doc_7632", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute jenks natural breaks on a sequence of `values`, given `nb_class`,\n the number of desired class.\n\n Parameters\n ----------\n values : array-like\n The Iterable sequence of numbers (integer/float) to be used.\n nb_class : int\n The desired number of class (as some other functions requests\n a `k` value, `nb_class` is like `k` + 1). Have to be lesser than\n the length of `values` and greater than 2.\n\n Returns\n -------\n breaks : tuple of floats\n The computed break values, including minimum and maximum, in order\n to have all the bounds for building `nb_class` class,\n so the returned tuple has a length of `nb_class` + 1.\n\n\n Examples\n --------\n Using nb_class = 3, expecting 4 break values , including min and max :\n\n >>> Func(\n [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],\n nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8)\n\n \"\"\"\n\n if not isinstance(arg_0, Iterable) or isinstance(arg_0, (str, bytes)):\n raise TypeError(\"A sequence of numbers is expected\")\n if isinstance(arg_1, float) and int(arg_1) == arg_1:\n arg_1 = int(arg_1)\n if not isinstance(arg_1, int):\n raise TypeError(\n \"Number of class have to be a positive integer: \"\n \"expected an instance of 'int' but found {}\"\n .format(type(arg_1)))\n\n arg_2 = len(arg_0)\n if np and isinstance(arg_0, np.ndarray):\n arg_0 = arg_0[np.argwhere(np.isfinite(arg_0)).reshape(-1)]\n else:\n arg_0 = [i for i in arg_0 if isfinite(i)]\n \n if len(arg_0) != arg_2:\n warnings.warn('Invalid values encountered (NaN or Inf) were ignored')\n arg_2 = len(arg_0)\n \n if arg_1 >= arg_2 or arg_1 < 2:\n raise ValueError(\"Number of class have to be an integer \"\n \"greater than 2 and \"\n \"smaller than the number of values to use\")\n\n return jenks._Func(arg_0, arg_1)"} +{"_id": "doc_7633", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"Copy the contents of the screen to PIL image memory.\n\n :param bbox: optional bounding box (x1,y1,x2,y2)\n :param childprocess: pyscreenshot can cause an error,\n if it is used on more different virtual displays\n and back-end is not in different process.\n Some back-ends are always different processes: scrot, imagemagick\n The default is False if the program was started inside IDLE,\n otherwise it is True.\n :param backend: back-end can be forced if set (examples:scrot, wx,..),\n otherwise back-end is automatic\n \"\"\"\n if arg_1 is None:\n arg_1 = childprocess_default_value()\n return _Func(\n to_file=False, arg_1=arg_1, arg_2=arg_2, arg_0=arg_0)"} +{"_id": "doc_7634", "title": "", "text": "def Func(\n arg_0, arg_1=\"continue\", arg_2=None, arg_3=None, arg_4=None,\n arg_5=False, arg_6=False\n):\n \"\"\"\n Open a Mapchete process.\n\n Parameters\n ----------\n config : MapcheteConfig object, config dict or path to mapchete file\n Mapchete process configuration\n mode : string\n * ``memory``: Generate process output on demand without reading\n pre-existing data or writing new data.\n * ``readonly``: Just read data without processing new data.\n * ``continue``: (default) Don't overwrite existing output.\n * ``overwrite``: Overwrite existing output.\n zoom : list or integer\n process zoom level or a pair of minimum and maximum zoom level\n bounds : tuple\n left, bottom, right, top process boundaries in output pyramid\n single_input_file : string\n single input file if supported by process\n with_cache : bool\n process output data cached in memory\n\n Returns\n -------\n Mapchete\n a Mapchete process object\n \"\"\"\n return Mapchete(\n MapcheteConfig(\n arg_0, arg_1=arg_1, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_6=arg_6),\n arg_5=arg_5)"} +{"_id": "doc_7635", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determine zoom levels.\"\"\"\n if arg_0 is None:\n return reversed(arg_1.config.zoom_levels)\n if isinstance(arg_0, int):\n return [arg_0]\n elif len(arg_0) == 2:\n return reversed(range(min(arg_0), max(arg_0)+1))\n elif len(arg_0) == 1:\n return arg_0"} +{"_id": "doc_7636", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Worker function running the process.\"\"\"\n logger.debug((arg_1.id, \"running on %s\" % current_process().name))\n\n # skip execution if overwrite is disabled and tile exists\n if (\n arg_0.config.mode == \"continue\" and\n arg_0.config.output.tiles_exist(arg_1)\n ):\n logger.debug((arg_1.id, \"tile exists, skipping\"))\n return ProcessInfo(\n tile=arg_1,\n processed=False,\n process_msg=\"output already exists\",\n written=False,\n write_msg=\"nothing written\"\n )\n\n # execute on process tile\n else:\n with Timer() as t:\n try:\n arg_2 = arg_0.execute(arg_1, raise_nodata=True)\n except MapcheteNodataTile:\n arg_2 = None\n arg_3 = \"processed in %s\" % t\n logger.debug((arg_1.id, arg_3))\n arg_4 = arg_0.write(arg_1, arg_2)\n return ProcessInfo(\n tile=arg_1,\n processed=True,\n process_msg=arg_3,\n written=arg_4.written,\n write_msg=arg_4.write_msg\n )"} +{"_id": "doc_7637", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Yield process tiles.\n\n Tiles intersecting with the input data bounding boxes as well as\n process bounds, if provided, are considered process tiles. This is to\n avoid iterating through empty tiles.\n\n Parameters\n ----------\n zoom : integer\n zoom level process tiles should be returned from; if none is given,\n return all process tiles\n\n yields\n ------\n BufferedTile objects\n \"\"\"\n if arg_1 or arg_1 == 0:\n for arg_2 in arg_0.config.process_pyramid.tiles_from_geom(\n arg_0.config.area_at_zoom(arg_1), arg_1\n ):\n yield arg_2\n else:\n for arg_1 in reversed(arg_0.config.zoom_levels):\n for arg_2 in arg_0.config.process_pyramid.tiles_from_geom(\n arg_0.config.area_at_zoom(arg_1), arg_1\n ):\n yield arg_2"} +{"_id": "doc_7638", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None, arg_3=arg_4(), arg_5=1\n ):\n \"\"\"\n Process a large batch of tiles.\n\n Parameters\n ----------\n process : MapcheteProcess\n process to be run\n zoom : list or int\n either single zoom level or list of minimum and maximum zoom level;\n None processes all (default: None)\n tile : tuple\n zoom, row and column of tile to be processed (cannot be used with\n zoom)\n multi : int\n number of workers (default: number of CPU cores)\n max_chunksize : int\n maximum number of process tiles to be queued for each worker;\n (default: 1)\n \"\"\"\n list(arg_0.Funcor(arg_1, arg_2, arg_3, arg_5))"} +{"_id": "doc_7639", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None, arg_3=arg_4(), arg_5=1\n ):\n \"\"\"\n Process a large batch of tiles and yield report messages per tile.\n\n Parameters\n ----------\n zoom : list or int\n either single zoom level or list of minimum and maximum zoom level;\n None processes all (default: None)\n tile : tuple\n zoom, row and column of tile to be processed (cannot be used with\n zoom)\n multi : int\n number of workers (default: number of CPU cores)\n max_chunksize : int\n maximum number of process tiles to be queued for each worker;\n (default: 1)\n \"\"\"\n if arg_1 and arg_2:\n raise ValueError(\"use either zoom or tile\")\n\n # run single tile\n if arg_2:\n yield _run_on_single_tile(arg_0, arg_2)\n # run concurrently\n elif arg_3 > 1:\n for arg_6 in _run_with_multiprocessing(\n arg_0, list(_get_zoom_level(arg_1, arg_0)), arg_3, arg_5\n ):\n yield arg_6\n # run sequentially\n elif arg_3 == 1:\n for arg_6 in _run_without_multiprocessing(\n arg_0, list(_get_zoom_level(arg_1, arg_0))\n ):\n yield arg_6"} +{"_id": "doc_7640", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Run the Mapchete process.\n\n Execute, write and return data.\n\n Parameters\n ----------\n process_tile : Tile or tile index tuple\n Member of the process tile pyramid (not necessarily the output\n pyramid, if output has a different metatiling setting)\n\n Returns\n -------\n data : NumPy array or features\n process output\n \"\"\"\n if arg_0.config.mode not in [\"memory\", \"continue\", \"overwrite\"]:\n raise ValueError(\"process mode must be memory, continue or overwrite\")\n if isinstance(arg_1, tuple):\n arg_1 = arg_0.config.process_pyramid.tile(*arg_1)\n elif isinstance(arg_1, BufferedTile):\n pass\n else:\n raise TypeError(\"process_tile must be tuple or BufferedTile\")\n\n if arg_1.zoom not in arg_0.config.zoom_levels:\n return arg_0.config.output.empty(arg_1)\n\n return arg_0._Func(arg_1, arg_2=arg_2)"} +{"_id": "doc_7641", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Extract data from tile.\"\"\"\n return arg_0.config.output.extract_subset(\n input_data_tiles=[(arg_1, arg_2)],\n arg_3=arg_3\n )"} +{"_id": "doc_7642", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=315.0, arg_3=45.0, arg_4=1.0, arg_5=1.0\n ):\n \"\"\"\n Calculate hillshading from elevation data.\n\n Parameters\n ----------\n elevation : array\n input elevation data\n azimuth : float\n horizontal angle of light source (315: North-West)\n altitude : float\n vertical angle of light source (90 would result in slope shading)\n z : float\n vertical exaggeration factor\n scale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)\n\n Returns\n -------\n Func : array\n \"\"\"\n return commons_Func.Func(\n arg_1, arg_0, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_7643", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=100, arg_3='elev', arg_4=0\n ):\n \"\"\"\n Extract contour lines from elevation data.\n\n Parameters\n ----------\n elevation : array\n input elevation data\n interval : integer\n elevation value interval when drawing contour lines\n field : string\n output field name containing elevation value\n base : integer\n elevation base value the intervals are computed from\n\n Returns\n -------\n Func : iterable\n Func as GeoJSON-like pairs of properties and geometry\n \"\"\"\n return commons_Func.extract_Func(\n arg_1, arg_0.tile, arg_2=arg_2, arg_3=arg_3, arg_4=arg_4)"} +{"_id": "doc_7644", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3=False, arg_4=0\n ):\n \"\"\"\n Clip array by geometry.\n\n Parameters\n ----------\n array : array\n raster data to be Funcped\n geometries : iterable\n geometries used to Func source array\n inverted : bool\n invert Funcping (default: False)\n Func_buffer : int\n buffer (in pixels) geometries before applying Func\n\n Returns\n -------\n Funcped array : array\n \"\"\"\n return commons_Func.Func_array_with_vector(\n arg_1, arg_0.tile.affine, arg_2,\n arg_3=arg_3, arg_4=arg_4*arg_0.tile.pixel_x_size)"} +{"_id": "doc_7645", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=False,\n arg_9=False\n):\n \"\"\"Create tile Func out of input raster.\"\"\"\n arg_7 = arg_7 if arg_7 else None\n arg_10 = dict(\n arg_2=arg_2,\n arg_5=arg_5,\n arg_3=arg_3,\n resampling=arg_4,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8\n )\n raster2Func(arg_0, arg_1, arg_10)"} +{"_id": "doc_7646", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Create a tile pyramid out of an input raster dataset.\"\"\"\n arg_3 = arg_2[\"pyramid_type\"]\n arg_4 = arg_2[\"scale_method\"]\n arg_5 = arg_2[\"output_format\"]\n arg_6 = arg_2[\"resampling\"]\n arg_7 = arg_2[\"zoom\"]\n arg_8 = arg_2[\"bounds\"]\n arg_9 = \"overwrite\" if arg_2[\"overwrite\"] else \"continue\"\n\n # Prepare process parameters\n arg_10, arg_11 = _get_zoom(arg_7, arg_0, arg_3)\n with rasterio.open(arg_0, \"r\") as input_raster:\n arg_12 = input_raster.count\n arg_13 = input_raster.dtypes[0]\n arg_14 = input_raster.dtypes[0]\n arg_15 = input_raster.nodatavals[0]\n arg_15 = arg_15 if arg_15 else 0\n if arg_5 == \"PNG\" and arg_12 > 3:\n arg_12 = 3\n arg_14 = 'uint8'\n arg_16 = ()\n if arg_4 == \"dtype_scale\":\n for arg_17 in range(1, arg_12+1):\n arg_16 += (DTYPE_RANGES[arg_13], )\n elif arg_4 == \"minmax_scale\":\n for arg_17 in range(1, arg_12+1):\n arg_18 = input_raster.read(arg_17)\n arg_16 += ((arg_18.min(), arg_18.max()), )\n elif arg_4 == \"crop\":\n for arg_17 in range(1, arg_12+1):\n arg_16 += ((0, 255), )\n if arg_13 == \"uint8\":\n arg_4 = None\n arg_16 = ()\n for arg_17 in range(1, arg_12+1):\n arg_16 += ((None, None), )\n\n # Create configuration\n arg_19 = dict(\n process=\"mapchete.processes.pyramid.tilify\",\n output={\n \"path\": arg_1,\n \"format\": arg_5,\n \"bands\": arg_12,\n \"dtype\": arg_14\n },\n pyramid=dict(pixelbuffer=5, grid=arg_3),\n arg_4=arg_4,\n arg_16=arg_16,\n input={\"raster\": arg_0},\n config_dir=os.getcwd(),\n zoom_levels=dict(min=arg_10, max=arg_11),\n arg_15=arg_15,\n arg_6=arg_6,\n arg_8=arg_8,\n baselevel={\"zoom\": arg_11, \"resampling\": arg_6},\n arg_9=arg_9\n )\n\n # create process\n with mapchete.open(arg_19, arg_7=arg_7, arg_8=arg_8) as mp:\n # prepare output directory\n if not os.path.exists(arg_1):\n os.makedirs(arg_1)\n # run process\n mp.batch_process(arg_7=[arg_10, arg_11])"} +{"_id": "doc_7647", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Determine minimum and maximum zoomlevel.\"\"\"\n if not arg_0:\n arg_3 = 1\n arg_4 = get_best_zoom_level(arg_1, arg_2)\n elif len(arg_0) == 1:\n arg_3 = arg_0[0]\n arg_4 = arg_0[0]\n elif len(arg_0) == 2:\n if arg_0[0] < arg_0[1]:\n arg_3 = arg_0[0]\n arg_4 = arg_0[1]\n else:\n arg_3 = arg_0[1]\n arg_4 = arg_0[0]\n return arg_3, arg_4"} +{"_id": "doc_7648", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Validate whether value is found in config and has the right type.\n\n Parameters\n ----------\n config : dict\n configuration dictionary\n values : list\n list of (str, type) tuples of values and value types expected in config\n\n Returns\n -------\n True if config is valid.\n\n Raises\n ------\n Exception if value is not found or has the wrong type.\n \"\"\"\n if not isinstance(arg_0, dict):\n raise TypeError(\"config must be a dictionary\")\n for arg_2, arg_3 in arg_1:\n if arg_2 not in arg_0:\n raise ValueError(\"%s not given\" % arg_2)\n if not isinstance(arg_0[arg_2], arg_3):\n raise TypeError(\"%s must be %s\" % (arg_2, arg_3))\n return True"} +{"_id": "doc_7649", "title": "", "text": "def Func(arg_0):\n \"\"\"Return hash of x.\"\"\"\n if isinstance(arg_0, str):\n return hash(arg_0)\n elif isinstance(arg_0, dict):\n return hash(yaml.dump(arg_0))"} +{"_id": "doc_7650", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"Validate and return zoom levels.\"\"\"\n arg_0 = _validate_zooms(arg_0)\n if arg_1 is None:\n return arg_0\n else:\n arg_1 = _validate_zooms(arg_1)\n if not set(arg_1).issubset(set(arg_0)):\n raise MapcheteConfigError(\n \"init zooms must be a subset of process zoom\")\n return arg_1"} +{"_id": "doc_7651", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"\n Snaps bounds to tiles boundaries of specific zoom level.\n\n Parameters\n ----------\n bounds : bounds to be snapped\n pyramid : TilePyramid\n zoom : int\n\n Returns\n -------\n Bounds(left, bottom, right, top)\n \"\"\"\n if not isinstance(arg_0, (tuple, list)):\n raise TypeError(\"bounds must be either a tuple or a list\")\n if len(arg_0) != 4:\n raise ValueError(\"bounds has to have exactly four values\")\n if not isinstance(arg_1, BufferedTilePyramid):\n raise TypeError(\"pyramid has to be a BufferedTilePyramid\")\n\n arg_0 = Bounds(*arg_0)\n arg_3 = arg_1.tile_from_xy(arg_0.left, arg_0.bottom, arg_2, on_edge_use=\"rt\").bounds\n arg_4 = arg_1.tile_from_xy(arg_0.right, arg_0.top, arg_2, on_edge_use=\"lb\").bounds\n return Bounds(arg_3.left, arg_3.bottom, arg_4.right, arg_4.top)"} +{"_id": "doc_7652", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Clips bounds by clip.\n\n Parameters\n ----------\n bounds : bounds to be clipped\n clip : clip bounds\n\n Returns\n -------\n Bounds(left, bottom, right, top)\n \"\"\"\n arg_0 = Bounds(*arg_0)\n arg_1 = Bounds(*arg_1)\n return Bounds(\n max(arg_0.left, arg_1.left),\n max(arg_0.bottom, arg_1.bottom),\n min(arg_0.right, arg_1.right),\n min(arg_0.top, arg_1.top)\n )"} +{"_id": "doc_7653", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return parameter dictionary per zoom level.\"\"\"\n arg_2 = {}\n for arg_3 in arg_1:\n arg_4 = {}\n for arg_5, arg_6 in arg_0.items():\n if arg_5 not in _RESERVED_PARAMETERS:\n arg_7 = _element_at_zoom(arg_5, arg_6, arg_3)\n if arg_7 is not None:\n arg_4[arg_5] = arg_7\n arg_2[arg_3] = arg_4\n return arg_2"} +{"_id": "doc_7654", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return the element filtered by zoom level.\n\n - An input integer or float gets returned as is.\n - An input string is checked whether it starts with \"zoom\". Then, the\n provided zoom level gets parsed and compared with the actual zoom\n level. If zoom levels match, the element gets returned.\n TODOs/gotchas:\n - Elements are unordered, which can lead to unexpected results when\n defining the YAML config.\n - Provided zoom levels for one element in config file are not allowed\n to \"overlap\", i.e. there is not yet a decision mechanism implemented\n which handles this case.\n \"\"\"\n # If element is a dictionary, analyze subitems.\n if isinstance(arg_1, dict):\n if \"format\" in arg_1:\n # we have an input or output driver here\n return arg_1\n arg_3 = {}\n for arg_4, arg_5 in arg_1.items():\n arg_6 = Func(arg_4, arg_5, arg_2)\n if arg_0 == \"input\":\n arg_3[arg_4] = arg_6\n elif arg_6 is not None:\n arg_3[arg_4] = arg_6\n # If there is only one subelement, collapse unless it is\n # input. In such case, return a dictionary.\n if len(arg_3) == 1 and arg_0 != \"input\":\n return next(iter(arg_3.values()))\n # If subelement is empty, return None\n if len(arg_3) == 0:\n return None\n return arg_3\n # If element is a zoom level statement, filter element.\n elif isinstance(arg_0, str):\n if arg_0.startswith(\"zoom\"):\n return _filter_by_zoom(\n conf_string=arg_0.strip(\"zoom\").strip(), arg_2=arg_2,\n arg_1=arg_1)\n # If element is a string but not a zoom level statement, return\n # element.\n else:\n return arg_1\n # Return all other types as they are.\n else:\n return arg_1"} +{"_id": "doc_7655", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"Return element only if zoom condition matches with config string.\"\"\"\n for arg_3, arg_4 in [\n # order of operators is important:\n # prematurely return in cases of \"<=\" or \">=\", otherwise\n # _strip_zoom() cannot parse config strings starting with \"<\"\n # or \">\"\n (\"=\", operator.eq),\n (\"<=\", operator.le),\n (\">=\", operator.ge),\n (\"<\", operator.lt),\n (\">\", operator.gt),\n ]:\n if arg_1.startswith(arg_3):\n return arg_0 if arg_4(arg_2, _strip_zoom(arg_1, arg_3)) else None"} +{"_id": "doc_7656", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Flatten dict tree into dictionary where keys are paths of old dict.\"\"\"\n arg_2 = []\n for arg_3, arg_4 in arg_0.items():\n arg_5 = \"/\".join([arg_1, arg_3]) if arg_1 else arg_3\n if isinstance(arg_4, dict) and \"format\" not in arg_4:\n arg_2.extend(Func(arg_4, arg_1=arg_5))\n else:\n arg_2.append((arg_5, arg_4))\n return arg_2"} +{"_id": "doc_7657", "title": "", "text": "def Func(arg_0):\n \"\"\"Reverse tree flattening.\"\"\"\n arg_1 = {}\n for arg_2, arg_3 in arg_0.items():\n arg_4 = arg_2.split(\"/\")\n # we are at the end of a branch\n if len(arg_4) == 1:\n arg_1[arg_2] = arg_3\n # there are more branches\n else:\n # create new dict\n if not arg_4[0] in arg_1:\n arg_1[arg_4[0]] = Func({\"/\".join(arg_4[1:]): arg_3})\n # add keys to existing dict\n else:\n arg_5 = Func({\"/\".join(arg_4[1:]): arg_3})\n if not arg_4[1] in arg_1[arg_4[0]]:\n arg_1[arg_4[0]][arg_4[1]] = arg_5[arg_4[1]]\n else:\n arg_1[arg_4[0]][arg_4[1]].update(arg_5[arg_4[1]])\n return arg_1"} +{"_id": "doc_7658", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Process bounds this process is currently initialized with.\n\n This gets triggered by using the ``Func`` kwarg. If not set, it will\n be equal to self.bounds.\n \"\"\"\n if arg_0._raw[\"Func\"] is None:\n return arg_0.bounds\n else:\n return Bounds(*_validate_bounds(arg_0._raw[\"Func\"]))"} +{"_id": "doc_7659", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Effective process bounds required to initialize inputs.\n\n Process bounds sometimes have to be larger, because all intersecting process\n tiles have to be covered as well.\n \"\"\"\n return snap_bounds(\n bounds=clip_bounds(bounds=arg_0.init_bounds, clip=arg_0.process_pyramid.bounds),\n pyramid=arg_0.process_pyramid,\n zoom=min(\n arg_0.baselevels[\"zooms\"]\n ) if arg_0.baselevels else min(\n arg_0.init_zoom_levels\n )\n )"} +{"_id": "doc_7660", "title": "", "text": "def Func(arg_0):\n \"\"\"Output object of driver.\"\"\"\n arg_1 = dict(\n arg_0._raw[\"Func\"],\n grid=arg_0.Func_pyramid.grid,\n pixelbuffer=arg_0.Func_pyramid.pixelbuffer,\n metatiling=arg_0.Func_pyramid.metatiling\n )\n if \"path\" in arg_1:\n arg_1.update(\n path=absolute_path(path=arg_1[\"path\"], base_dir=arg_0.config_dir)\n )\n\n if \"format\" not in arg_1:\n raise MapcheteConfigError(\"Func format not specified\")\n\n if arg_1[\"format\"] not in available_Func_formats():\n raise MapcheteConfigError(\n \"format %s not available in %s\" % (\n arg_1[\"format\"], str(available_Func_formats())\n )\n )\n arg_2 = load_Func_writer(arg_1)\n try:\n arg_2.is_valid_with_config(arg_1)\n except Exception as e:\n logger.exception(e)\n raise MapcheteConfigError(\n \"driver %s not compatible with configuration: %s\" % (\n arg_2.METADATA[\"driver_name\"], e\n )\n )\n return arg_2"} +{"_id": "doc_7661", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Input items used for process stored in a dictionary.\n\n Keys are the hashes of the Func parameters, values the respective\n InputData classes.\n \"\"\"\n # the delimiters are used by some Func drivers\n arg_1 = dict(\n zoom=arg_0.init_zoom_levels,\n bounds=arg_0.init_bounds,\n process_bounds=arg_0.bounds,\n effective_bounds=arg_0.effective_bounds\n )\n\n # get Func items only of initialized zoom levels\n arg_2 = {\n # convert Func definition to hash\n get_hash(arg_5): arg_5\n for zoom in arg_0.init_zoom_levels\n if \"Func\" in arg_0._params_at_zoom[zoom]\n # to preserve file groups, \"flatten\" the Func tree and use\n # the tree paths as keys\n for key, arg_5 in _flatten_tree(arg_0._params_at_zoom[zoom][\"Func\"])\n if arg_5 is not None\n }\n\n arg_3 = {}\n for arg_4, arg_5 in arg_2.items():\n\n # for files and tile directories\n if isinstance(arg_5, str):\n logger.debug(\"load Func reader for simple Func %s\", arg_5)\n try:\n arg_6 = load_Func_reader(\n dict(\n path=absolute_path(path=arg_5, base_dir=arg_0.config_dir),\n pyramid=arg_0.process_pyramid,\n pixelbuffer=arg_0.process_pyramid.pixelbuffer,\n arg_1=arg_1\n ),\n readonly=arg_0.mode == \"readonly\")\n except Exception as e:\n logger.exception(e)\n raise MapcheteDriverError(\"error when loading Func %s: %s\" % (arg_5, e))\n logger.debug(\"Func reader for simple Func %s is %s\", arg_5, arg_6)\n\n # for abstract Funcs\n elif isinstance(arg_5, dict):\n logger.debug(\"load Func reader for abstract Func %s\", arg_5)\n try:\n arg_6 = load_Func_reader(\n dict(\n abstract=deepcopy(arg_5),\n pyramid=arg_0.process_pyramid,\n pixelbuffer=arg_0.process_pyramid.pixelbuffer,\n arg_1=arg_1,\n conf_dir=arg_0.config_dir\n ),\n readonly=arg_0.mode == \"readonly\")\n except Exception as e:\n logger.exception(e)\n raise MapcheteDriverError(\"error when loading Func %s: %s\" % (arg_5, e))\n logger.debug(\"Func reader for abstract Func %s is %s\", arg_5, arg_6)\n else:\n raise MapcheteConfigError(\"invalid Func type %s\", type(arg_5))\n # trigger bbox creation\n arg_6.bbox(out_crs=arg_0.process_pyramid.crs)\n arg_3[arg_4] = arg_6\n\n return arg_3"} +{"_id": "doc_7662", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Optional baselevels configuration.\n\n baselevels:\n min: \n max: \n lower: \n higher: \n \"\"\"\n if \"baselevels\" not in arg_0._raw:\n return {}\n Func = arg_0._raw[\"baselevels\"]\n arg_2 = {k: arg_3 for k, arg_3 in Func.items() if k in [\"min\", \"max\"]}\n\n if not arg_2:\n raise MapcheteConfigError(\"no min and max values given for baselevels\")\n for arg_3 in arg_2.values():\n if not isinstance(arg_3, int) or arg_3 < 0:\n raise MapcheteConfigError(\n \"invalid baselevel zoom parameter given: %s\" % arg_2.values()\n )\n\n arg_4 = list(range(\n arg_2.get(\"min\", min(arg_0.zoom_levels)),\n arg_2.get(\"max\", max(arg_0.zoom_levels)) + 1)\n )\n\n if not set(arg_0.zoom_levels).difference(set(arg_4)):\n raise MapcheteConfigError(\"baselevels zooms fully cover process zooms\")\n\n return dict(\n arg_4=arg_4,\n lower=Func.get(\"lower\", \"nearest\"),\n higher=Func.get(\"higher\", \"nearest\"),\n tile_pyramid=BufferedTilePyramid(\n arg_0.output_pyramid.grid,\n pixelbuffer=arg_0.output_pyramid.pixelbuffer,\n metatiling=arg_0.process_pyramid.metatiling\n )\n )"} +{"_id": "doc_7663", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return configuration parameters snapshot for zoom as dictionary.\n\n Parameters\n ----------\n zoom : int\n zoom level\n\n Returns\n -------\n configuration snapshot : dictionary\n zoom level dependent process configuration\n \"\"\"\n if arg_1 not in arg_0.init_zoom_levels:\n raise ValueError(\n \"zoom level not available with current configuration\")\n arg_2 = dict(arg_0._Func[arg_1], input={}, output=arg_0.output)\n if \"input\" in arg_0._Func[arg_1]:\n arg_3 = {}\n for arg_4, arg_5 in _flatten_tree(arg_0._Func[arg_1][\"input\"]):\n if arg_5 is None:\n arg_3[arg_4] = None\n else:\n arg_3[arg_4] = arg_0.input[get_hash(arg_5)]\n arg_2[\"input\"] = _unflatten_tree(arg_3)\n else:\n arg_2[\"input\"] = {}\n return arg_2"} +{"_id": "doc_7664", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Return process bounding box for zoom level.\n\n Parameters\n ----------\n zoom : int or None\n if None, the union of all zoom level areas is returned\n\n Returns\n -------\n process area : shapely geometry\n \"\"\"\n if arg_1 is None:\n if not arg_0._cache_full_process_area:\n logger.debug(\"calculate process area ...\")\n arg_0._cache_full_process_area = cascaded_union([\n arg_0._Func(z) for z in arg_0.init_zoom_levels]\n ).buffer(0)\n return arg_0._cache_full_process_area\n else:\n if arg_1 not in arg_0.init_zoom_levels:\n raise ValueError(\n \"zoom level not available with current configuration\")\n return arg_0._Func(arg_1)"} +{"_id": "doc_7665", "title": "", "text": "def Func(\n arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=False,\n arg_4=False,\n arg_5=False,\n arg_6=False,\n arg_7=False,\n arg_8=\"location\",\n arg_9=None,\n arg_10=True,\n arg_11=False,\n):\n \"\"\"\n Generate indexes for given zoom level.\n\n Parameters\n ----------\n mp : Mapchete object\n process output to be indexed\n out_dir : path\n optionally override process output directory\n zoom : int\n zoom level to be processed\n geojson : bool\n generate GeoJSON index (default: False)\n gpkg : bool\n generate GeoPackage index (default: False)\n shapefile : bool\n generate Shapefile index (default: False)\n txt : bool\n generate tile path list textfile (default: False)\n vrt : bool\n GDAL-style VRT file (default: False)\n fieldname : str\n field name which contains paths of tiles (default: \"location\")\n basepath : str\n if set, use custom base path instead of output path\n for_gdal : bool\n use GDAL compatible remote paths, i.e. add \"/vsicurl/\" before path\n (default: True)\n \"\"\"\n for arg_2 in get_zoom_levels(process_zoom_levels=arg_2):\n with ExitStack() as es:\n # get index writers for all enabled formats\n arg_12 = []\n if arg_3:\n arg_12.append(\n es.enter_context(\n VectorFileWriter(\n driver=\"GeoJSON\",\n out_path=_index_file_path(arg_1, arg_2, \"geojson\"),\n crs=arg_0.config.output_pyramid.crs,\n arg_8=arg_8\n )\n )\n )\n if arg_4:\n arg_12.append(\n es.enter_context(\n VectorFileWriter(\n driver=\"GPKG\",\n out_path=_index_file_path(arg_1, arg_2, \"gpkg\"),\n crs=arg_0.config.output_pyramid.crs,\n arg_8=arg_8\n )\n )\n )\n if arg_5:\n arg_12.append(\n es.enter_context(\n VectorFileWriter(\n driver=\"ESRI Shapefile\",\n out_path=_index_file_path(arg_1, arg_2, \"shp\"),\n crs=arg_0.config.output_pyramid.crs,\n arg_8=arg_8\n )\n )\n )\n if arg_6:\n arg_12.append(\n es.enter_context(\n TextFileWriter(out_path=_index_file_path(arg_1, arg_2, \"txt\"))\n )\n )\n if arg_7:\n arg_12.append(\n es.enter_context(\n VRTFileWriter(\n out_path=_index_file_path(arg_1, arg_2, \"vrt\"),\n output=arg_0.config.output,\n out_pyramid=arg_0.config.output_pyramid\n )\n )\n )\n\n logger.debug(\"use the following index writers: %s\", arg_12)\n\n def _worker(arg_13):\n # if there are indexes to write to, check if output exists\n arg_14 = _tile_path(\n orig_path=arg_0.config.output.get_path(arg_13),\n arg_9=arg_9,\n arg_10=arg_10\n )\n arg_15 = [\n i for i in arg_12\n if not i.entry_exists(arg_13=arg_13, path=arg_14)\n ]\n if arg_15:\n arg_16 = arg_0.config.output.tiles_exist(output_tile=arg_13)\n else:\n arg_16 = None\n return arg_13, arg_14, arg_15, arg_16\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n for arg_17 in concurrent.futures.as_completed(\n (\n executor.submit(_worker, i)\n for i in arg_0.config.output_pyramid.tiles_from_geom(\n arg_0.config.area_at_zoom(arg_2), arg_2\n )\n )\n ):\n arg_13, arg_14, arg_15, arg_16 = arg_17.result()\n # only write entries if there are indexes to write to and output\n # exists\n if arg_15 and arg_16:\n logger.debug(\"%s exists\", arg_14)\n logger.debug(\"write to %s indexes\" % len(arg_15))\n for arg_18 in arg_15:\n arg_18.write(arg_13, arg_14)\n # yield tile for progress information\n yield arg_13"} +{"_id": "doc_7666", "title": "", "text": "def Func(arg_0):\n \"\"\"Return raster metadata.\"\"\"\n with rasterio.open(arg_0.path, \"r\") as src:\n return deepcopy(src.meta)"} +{"_id": "doc_7667", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Example process for testing.\n\n Inputs:\n -------\n file1\n raster file\n\n Parameters:\n -----------\n\n Output:\n -------\n np.ndarray\n \"\"\"\n # Reading and writing data works like this:\n with arg_0.open(\"file1\", resampling=\"bilinear\") as raster_file:\n if raster_file.is_empty():\n return \"empty\"\n # This assures a transparent tile instead of a pink error tile\n # is returned when using mapchete serve.\n arg_1 = raster_file.read()\n return arg_1"} +{"_id": "doc_7668", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if output format is valid with other process parameters.\n\n Parameters\n ----------\n config : dictionary\n output configuration parameters\n\n Returns\n -------\n is_valid : bool\n \"\"\"\n validate_values(arg_1, [(\"schema\", dict), (\"path\", str)])\n validate_values(arg_1[\"schema\"], [(\"properties\", dict), (\"geometry\", str)])\n if arg_1[\"schema\"][\"geometry\"] not in [\n \"Geometry\", \"Point\", \"MultiPoint\", \"Line\", \"MultiLine\",\n \"Polygon\", \"MultiPolygon\"\n ]:\n raise TypeError(\"invalid geometry type\")\n return True"} +{"_id": "doc_7669", "title": "", "text": "def Func():\n \"\"\"\n Return all available output formats.\n\n Returns\n -------\n formats : list\n all available output formats\n \"\"\"\n arg_0 = []\n for arg_1 in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):\n arg_2 = arg_1.load()\n if hasattr(arg_2, \"METADATA\") and (\n arg_2.METADATA[\"mode\"] in [\"w\", \"rw\"]\n ):\n arg_0.append(arg_2.METADATA[\"driver_name\"])\n return arg_0"} +{"_id": "doc_7670", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Return output class of driver.\n\n Returns\n -------\n output : ``OutputData``\n output writer object\n \"\"\"\n if not isinstance(arg_0, dict):\n raise TypeError(\"output_params must be a dictionary\")\n arg_2 = arg_0[\"format\"]\n for arg_3 in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):\n arg_4 = arg_3.load()\n if all(\n [hasattr(arg_4, arg_5) for arg_5 in [\"OutputData\", \"METADATA\"]]\n ) and (\n arg_4.METADATA[\"driver_name\"] == arg_2\n ):\n return arg_4.OutputData(arg_0, arg_1=arg_1)\n raise MapcheteDriverError(\"no loader for driver '%s' could be found.\" % arg_2)"} +{"_id": "doc_7671", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Return input class of driver.\n\n Returns\n -------\n input_params : ``InputData``\n input parameters\n \"\"\"\n logger.debug(\"find input reader with params %s\", arg_0)\n if not isinstance(arg_0, dict):\n raise TypeError(\"input_params must be a dictionary\")\n if \"abstract\" in arg_0:\n arg_2 = arg_0[\"abstract\"][\"format\"]\n elif \"path\" in arg_0:\n if os.path.splitext(arg_0[\"path\"])[1]:\n arg_3 = arg_0[\"path\"]\n arg_2 = driver_from_file(arg_3)\n else:\n logger.debug(\"%s is a directory\", arg_0[\"path\"])\n arg_2 = \"TileDirectory\"\n else:\n raise MapcheteDriverError(\"invalid input parameters %s\" % arg_0)\n for arg_4 in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):\n arg_5 = arg_4.load()\n if hasattr(arg_5, \"METADATA\") and (\n arg_5.METADATA[\"driver_name\"] == arg_2\n ):\n return arg_4.load().InputData(arg_0, arg_1=arg_1)\n raise MapcheteDriverError(\"no loader for driver '%s' could be found.\" % arg_2)"} +{"_id": "doc_7672", "title": "", "text": "def Func(arg_0):\n \"\"\"Dump output JSON and verify parameters if output metadata exist.\"\"\"\n if \"path\" in arg_0:\n arg_1 = os.path.join(arg_0[\"path\"], \"metadata.json\")\n logger.debug(\"check for output %s\", arg_1)\n try:\n arg_2 = read_output_metadata(arg_1)\n logger.debug(\"%s exists\", arg_1)\n logger.debug(\"existing output parameters: %s\", pformat(arg_2))\n arg_3 = arg_2[\"pyramid\"]\n arg_4 = params_to_dump(arg_0)\n logger.debug(\"current output parameters: %s\", pformat(arg_4))\n arg_5 = BufferedTilePyramid(**arg_4[\"pyramid\"])\n if arg_3 != arg_5:\n raise MapcheteConfigError(\n \"pyramid definitions between existing and new output do not match: \"\n \"%s != %s\" % (arg_3, arg_5)\n )\n arg_6 = arg_2[\"driver\"][\"format\"]\n arg_7 = arg_4[\"driver\"][\"format\"]\n if arg_6 != arg_7:\n raise MapcheteConfigError(\n \"existing output format does not match new output format: \"\n \"%s != %s\" % (\n (arg_6, arg_7)\n )\n )\n except FileNotFoundError:\n logger.debug(\"%s does not exist\", arg_1)\n arg_8 = params_to_dump(arg_0)\n # dump output metadata\n write_json(arg_1, arg_8)\n else:\n logger.debug(\"no path parameter found\")"} +{"_id": "doc_7673", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Determine target file path.\n\n Parameters\n ----------\n tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\n Returns\n -------\n path : string\n \"\"\"\n return os.path.join(*[\n arg_0.path,\n str(arg_1.zoom),\n str(arg_1.row),\n str(arg_1.col) + arg_0.file_extension\n ])"} +{"_id": "doc_7674", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create directory and subdirectory if necessary.\n\n Parameters\n ----------\n tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n \"\"\"\n makedirs(os.path.dirname(arg_0.get_path(arg_1)))"} +{"_id": "doc_7675", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check whether process output is allowed with output driver.\n\n Parameters\n ----------\n process_data : raw process output\n\n Returns\n -------\n True or False\n \"\"\"\n if arg_0.METADATA[\"data_type\"] == \"raster\":\n return (\n is_numpy_or_masked_array(arg_1) or\n is_numpy_or_masked_array_with_tags(arg_1)\n )\n elif arg_0.METADATA[\"data_type\"] == \"vector\":\n return is_feature_list(arg_1)"} +{"_id": "doc_7676", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return verified and cleaned output.\n\n Parameters\n ----------\n process_data : raw process output\n\n Returns\n -------\n NumPy array or list of features.\n \"\"\"\n if arg_0.METADATA[\"data_type\"] == \"raster\":\n if is_numpy_or_masked_array(arg_1):\n return arg_1\n elif is_numpy_or_masked_array_with_tags(arg_1):\n arg_2, arg_3 = arg_1\n return arg_0.Func(arg_2), arg_3\n elif arg_0.METADATA[\"data_type\"] == \"vector\":\n return list(arg_1)"} +{"_id": "doc_7677", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Extract subset from multiple tiles.\n\n input_data_tiles : list of (``Tile``, process data) tuples\n out_tile : ``Tile``\n\n Returns\n -------\n NumPy array or list of features.\n \"\"\"\n if arg_0.METADATA[\"data_type\"] == \"raster\":\n arg_3 = create_mosaic(arg_1)\n return extract_from_array(\n in_raster=prepare_array(\n arg_3.data,\n nodata=arg_0.nodata,\n dtype=arg_0.output_params[\"dtype\"]\n ),\n in_affine=arg_3.affine,\n arg_2=arg_2\n )\n elif arg_0.METADATA[\"data_type\"] == \"vector\":\n return [\n arg_4 for arg_4 in list(\n chain.from_iterable([arg_6 for arg_5, arg_6 in arg_1])\n )\n if shape(arg_4[\"geometry\"]).intersects(arg_2.bbox)\n ]"} +{"_id": "doc_7678", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1.0, arg_4=1.0):\n \"\"\"\n Calculate slope and aspect map.\n\n Return a pair of arrays 2 pixels smaller than the input elevation array.\n\n Slope is returned in radians, from 0 for sheer face to pi/2 for\n flat ground. Aspect is returned in radians, counterclockwise from -pi\n at north around to pi.\n\n Logic here is borrowed from hillshade.cpp:\n http://www.perrygeo.net/wordpress/?p=7\n\n Parameters\n ----------\n elevation : array\n input elevation data\n xres : float\n column width\n yres : float\n row height\n z : float\n vertical exaggeration factor\n scale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)\n\n Returns\n -------\n slope shade : array\n \"\"\"\n arg_3 = float(arg_3)\n arg_4 = float(arg_4)\n arg_5, arg_6 = arg_0.shape[0] - 2, arg_0.shape[1] - 2\n arg_7 = [\n arg_3 * arg_0[row:(row + arg_5), col:(col + arg_6)]\n for (row, col) in product(range(3), range(3))\n ]\n arg_8 = (\n (arg_7[0] + arg_7[3] + arg_7[3] + arg_7[6])\n - (arg_7[2] + arg_7[5] + arg_7[5] + arg_7[8])\n ) / (8.0 * arg_1 * arg_4)\n arg_9 = (\n (arg_7[6] + arg_7[7] + arg_7[7] + arg_7[8])\n - (arg_7[0] + arg_7[1] + arg_7[1] + arg_7[2])\n ) / (8.0 * arg_2 * arg_4)\n # in radians, from 0 to pi/2\n arg_10 = math.pi/2 - np.arctan(np.sqrt(arg_8*arg_8 + arg_9*arg_9))\n # in radians counterclockwise, from -pi at north back to pi\n arg_11 = np.arctan2(arg_8, arg_9)\n return arg_10, arg_11"} +{"_id": "doc_7679", "title": "", "text": "def Func(arg_0, arg_1, arg_2=315.0, arg_3=45.0, arg_4=1.0, arg_5=1.0):\n \"\"\"\n Return Funcd numpy array.\n\n Parameters\n ----------\n elevation : array\n input elevation data\n tile : Tile\n tile covering the array\n z : float\n vertical exaggeration factor\n scale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)\n \"\"\"\n arg_2 = float(arg_2)\n arg_3 = float(arg_3)\n arg_4 = float(arg_4)\n arg_5 = float(arg_5)\n arg_6 = arg_1.tile.pixel_x_size\n arg_7 = -arg_1.tile.pixel_y_size\n arg_8, arg_9 = calculate_slope_aspect(\n arg_0, arg_6, arg_7, arg_4=arg_4, arg_5=arg_5)\n arg_10 = math.pi / 180.0\n arg_11 = np.sin(arg_3 * arg_10) * np.sin(arg_8) \\\n + np.cos(arg_3 * arg_10) * np.cos(arg_8) \\\n * np.cos((arg_2 - 90.0) * arg_10 - arg_9)\n # shaded now has values between -1.0 and +1.0\n # stretch to 0 - 255 and invert\n arg_11 = (((arg_11+1.0)/2)*-255.0).astype(\"uint8\")\n # add one pixel padding using the edge values\n return ma.masked_array(\n data=np.pad(arg_11, 1, mode='edge'), mask=arg_0.mask\n )"} +{"_id": "doc_7680", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Return ``BufferedTile`` object of this ``BufferedTilePyramid``.\n\n Parameters\n ----------\n zoom : integer\n zoom level\n row : integer\n tile matrix row\n col : integer\n tile matrix column\n\n Returns\n -------\n buffered tile : ``BufferedTile``\n \"\"\"\n Func = arg_0.tile_pyramid.tile(arg_1, arg_2, arg_3)\n return BufferedTile(Func, pixelbuffer=arg_0.pixelbuffer)"} +{"_id": "doc_7681", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return all tiles intersecting with bounds.\n\n Bounds values will be cleaned if they cross the antimeridian or are\n outside of the Northern or Southern tile pyramid bounds.\n\n Parameters\n ----------\n bounds : tuple\n (left, bottom, right, top) bounding values in tile pyramid CRS\n zoom : integer\n zoom level\n\n Yields\n ------\n intersecting tiles : generator\n generates ``BufferedTiles``\n \"\"\"\n for arg_3 in arg_0.tiles_from_bbox(box(*arg_1), arg_2):\n yield arg_0.tile(*arg_3.id)"} +{"_id": "doc_7682", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n All metatiles intersecting with given bounding box.\n\n Parameters\n ----------\n geometry : ``shapely.geometry``\n zoom : integer\n zoom level\n\n Yields\n ------\n intersecting tiles : generator\n generates ``BufferedTiles``\n \"\"\"\n for arg_3 in arg_0.tile_pyramid.Func(arg_1, arg_2):\n yield arg_0.tile(*arg_3.id)"} +{"_id": "doc_7683", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return all tiles intersecting with input geometry.\n\n Parameters\n ----------\n geometry : ``shapely.geometry``\n zoom : integer\n zoom level\n\n Yields\n ------\n intersecting tiles : ``BufferedTile``\n \"\"\"\n for arg_3 in arg_0.tile_pyramid.Func(arg_1, arg_2):\n yield arg_0.tile(*arg_3.id)"} +{"_id": "doc_7684", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return all BufferedTiles Func with tile.\n\n Parameters\n ----------\n tile : ``BufferedTile``\n another tile\n \"\"\"\n return [\n arg_0.tile(*arg_2.id)\n for arg_2 in arg_0.tile_pyramid.Func(arg_1)\n ]"} +{"_id": "doc_7685", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return dictionary representation of pyramid parameters.\n \"\"\"\n return dict(\n grid=arg_0.grid.Func(),\n metatiling=arg_0.metatiling,\n tile_size=arg_0.tile_size,\n pixelbuffer=arg_0.pixelbuffer\n )"} +{"_id": "doc_7686", "title": "", "text": "def Func(arg_0, arg_1=8):\n \"\"\"\n Return tile neighbors.\n\n Tile neighbors are unique, i.e. in some edge cases, where both the left\n and right neighbor wrapped around the antimeridian is the same. Also,\n neighbors ouside the northern and southern TilePyramid boundaries are\n excluded, because they are invalid.\n\n -------------\n | 8 | 1 | 5 |\n -------------\n | 4 | x | 2 |\n -------------\n | 7 | 3 | 6 |\n -------------\n\n Parameters\n ----------\n connectedness : int\n [4 or 8] return four direct neighbors or all eight.\n\n Returns\n -------\n list of BufferedTiles\n \"\"\"\n return [\n BufferedTile(arg_2, arg_0.pixelbuffer)\n for arg_2 in arg_0._tile.Func(arg_1=arg_1)\n ]"} +{"_id": "doc_7687", "title": "", "text": "def Func(\n arg_0,\n arg_1=\"nearest\",\n arg_2=None,\n arg_3=None\n):\n \"\"\"\n Read, stretch and return raster data.\n\n Inputs:\n -------\n raster\n raster file\n\n Parameters:\n -----------\n resampling : str\n rasterio.Resampling method\n scale_method : str\n - dtype_scale: use dtype minimum and maximum values\n - minmax_scale: use dataset bands minimum and maximum values\n - crop: clip data to output dtype\n scales_minmax : tuple\n tuple of band specific scale values\n\n Output:\n -------\n np.ndarray\n \"\"\"\n with arg_0.open(\"raster\", arg_1=arg_1) as raster_file:\n\n # exit if input tile is empty\n if raster_file.is_empty():\n return \"empty\"\n\n # actually read data and iterate through bands\n arg_4 = ()\n arg_5 = ()\n arg_6 = raster_file.read()\n if arg_6.ndim == 2:\n arg_6 = ma.expand_dims(arg_6, axis=0)\n if not arg_2:\n arg_3 = [(i, i) for i in range(len(arg_6))]\n\n for arg_7, (arg_8, arg_9) in zip(arg_6, arg_3):\n if arg_2 in [\"dtype_scale\", \"minmax_scale\"]:\n arg_4 += (_stretch_array(arg_7, arg_8, arg_9), )\n elif arg_2 == \"crop\":\n arg_4 += (np.clip(arg_7, arg_8, arg_9), )\n else:\n arg_4 += (arg_7, )\n arg_5 += (arg_7.mask, )\n\n return ma.masked_array(np.stack(arg_4), np.stack(arg_5))"} +{"_id": "doc_7688", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Open process output as input for other process.\n\n Parameters\n ----------\n tile : ``Tile``\n process : ``MapcheteProcess``\n kwargs : keyword arguments\n \"\"\"\n return InputTile(arg_1, arg_2, arg_3.get(\"resampling\", None))"} +{"_id": "doc_7689", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=False,\n arg_6=False,\n arg_7=False,\n arg_8=None,\n arg_9=False,\n arg_10=None\n):\n \"\"\"\n Serve a Mapchete process.\n\n Creates the Mapchete host and Funcs both web page with OpenLayers and the\n WMTS simple REST endpoint.\n \"\"\"\n arg_11 = create_app(\n mapchete_files=[arg_0], arg_3=arg_3,\n arg_4=arg_4, single_input_file=arg_8,\n mode=_get_mode(arg_7, arg_6, arg_5), arg_9=arg_9\n )\n if os.environ.get(\"MAPCHETE_TEST\") == \"TRUE\":\n logger.debug(\"don't run flask app, MAPCHETE_TEST environment detected\")\n else:\n arg_11.run(\n threaded=True, arg_9=True, arg_1=arg_1, host='0.0.0.0',\n extra_files=[arg_0]\n )"} +{"_id": "doc_7690", "title": "", "text": "def Func(\n arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None\n):\n \"\"\"Extract a numpy array from a raster file.\"\"\"\n try:\n return _rasterio_read(\n arg_0=arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7\n )\n except Exception as e:\n logger.exception(\"error while reading file %s: %s\", arg_0, e)\n raise"} +{"_id": "doc_7691", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"\n Extract raster data window array.\n\n Parameters\n ----------\n in_raster : array or ReferencedRaster\n in_affine : ``Affine`` required if in_raster is an array\n out_tile : ``BufferedTile``\n\n Returns\n -------\n extracted array : array\n \"\"\"\n if isinstance(arg_0, ReferencedRaster):\n arg_1 = arg_0.affine\n arg_0 = arg_0.data\n\n # get range within array\n arg_3, arg_4, arg_5, arg_6 = bounds_to_ranges(\n out_bounds=arg_2.bounds, arg_1=arg_1, in_shape=arg_0.shape\n )\n # if output window is within input window\n if (\n arg_3 >= 0 and\n arg_5 >= 0 and\n arg_4 <= arg_0.shape[-2] and\n arg_6 <= arg_0.shape[-1]\n ):\n return arg_0[..., arg_3:arg_4, arg_5:arg_6]\n # raise error if output is not fully within input\n else:\n raise ValueError(\"extraction fails if output shape is not within input\")"} +{"_id": "doc_7692", "title": "", "text": "def Func(\n arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=\"nearest\",\n arg_5=0\n):\n \"\"\"\n Extract and resample from array to target tile.\n\n Parameters\n ----------\n in_raster : array\n in_affine : ``Affine``\n out_tile : ``BufferedTile``\n resampling : string\n one of rasterio's resampling methods (default: nearest)\n nodataval : integer or float\n raster nodata value (default: 0)\n\n Returns\n -------\n resampled array : array\n \"\"\"\n # TODO rename function\n if isinstance(arg_0, ma.MaskedArray):\n pass\n if isinstance(arg_0, np.ndarray):\n arg_0 = ma.MaskedArray(arg_0, mask=arg_0 == arg_5)\n elif isinstance(arg_0, ReferencedRaster):\n arg_1 = arg_0.affine\n arg_3 = arg_0.crs\n arg_0 = arg_0.data\n elif isinstance(arg_0, tuple):\n arg_0 = ma.MaskedArray(\n data=np.stack(arg_0),\n mask=np.stack([\n band.mask\n if isinstance(band, ma.masked_array)\n else np.where(band == arg_5, True, False)\n for band in arg_0\n ]),\n fill_value=arg_5\n )\n else:\n raise TypeError(\"wrong input data type: %s\" % type(arg_0))\n if arg_0.ndim == 2:\n arg_0 = ma.expand_dims(arg_0, axis=0)\n elif arg_0.ndim == 3:\n pass\n else:\n raise TypeError(\"input array must have 2 or 3 dimensions\")\n if arg_0.fill_value != arg_5:\n ma.set_fill_value(arg_0, arg_5)\n arg_6 = (arg_0.shape[0], ) + arg_2.shape\n arg_7 = np.empty(arg_6, arg_0.dtype)\n arg_0 = ma.masked_array(\n data=arg_0.filled(), mask=arg_0.mask, fill_value=arg_5\n )\n reproject(\n arg_0,\n arg_7,\n src_transform=arg_1,\n src_crs=arg_3 if arg_3 else arg_2.crs,\n dst_transform=arg_2.affine,\n dst_crs=arg_2.crs,\n arg_4=Resampling[arg_4]\n )\n return ma.MaskedArray(arg_7, mask=arg_7 == arg_5)"} +{"_id": "doc_7693", "title": "", "text": "def Func(arg_0):\n \"\"\"Determine if distance over antimeridian is shorter than normal distance.\"\"\"\n if arg_0[0][0].tile_pyramid.is_global:\n # get set of tile columns\n arg_1 = sorted(list(set([t[0].col for t in arg_0])))\n # if tile columns are an unbroken sequence, tiles are connected and are not\n # passing the Antimeridian\n if arg_1 == list(range(min(arg_1), max(arg_1) + 1)):\n return False\n else:\n # look at column gaps and try to determine the smallest distance\n def gen_groups(arg_2):\n \"\"\"Groups tile columns by sequence.\"\"\"\n arg_3 = arg_2[0]\n arg_4 = [arg_3]\n for arg_5 in arg_2[1:]:\n # item is next in expected sequence\n if arg_5 == arg_3 + 1:\n arg_4.append(arg_5)\n # gap occured, so yield existing group and create new one\n else:\n yield arg_4\n arg_4 = [arg_5]\n arg_3 = arg_5\n yield arg_4\n\n arg_6 = list(gen_groups(arg_1))\n # in case there is only one group, don't shift\n if len(arg_6) == 1:\n return False\n # distance between first column of first group and last column of last group\n arg_7 = arg_6[-1][-1] - arg_6[0][0]\n # distance between last column of first group and last column of first group\n # but crossing the antimeridian\n arg_8 = (\n arg_6[0][-1] + arg_0[0][0].tile_pyramid.matrix_width(arg_0[0][0].zoom)\n ) - arg_6[-1][0]\n # return whether distance over antimeridian is shorter\n return arg_8 < arg_7\n else:\n return False"} +{"_id": "doc_7694", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=0, arg_3=\"int16\"):\n \"\"\"\n Turn input data into a proper array for further usage.\n\n Outut array is always 3-dimensional with the given data type. If the output\n is masked, the fill_value corresponds to the given nodata value and the\n nodata value will be burned into the data array.\n\n Parameters\n ----------\n data : array or iterable\n array (masked or normal) or iterable containing arrays\n nodata : integer or float\n nodata value (default: 0) used if input is not a masked array and\n for output array\n masked : bool\n return a NumPy Array or a NumPy MaskedArray (default: True)\n dtype : string\n data type of output array (default: \"int16\")\n\n Returns\n -------\n array : array\n \"\"\"\n # input is iterable\n if isinstance(arg_0, (list, tuple)):\n return _prepare_iterable(arg_0, arg_1, arg_2, arg_3)\n\n # special case if a 2D single band is provided\n elif isinstance(arg_0, np.ndarray) and arg_0.ndim == 2:\n arg_0 = ma.expand_dims(arg_0, axis=0)\n\n # input is a masked array\n if isinstance(arg_0, ma.MaskedArray):\n return _prepare_masked(arg_0, arg_1, arg_2, arg_3)\n\n # input is a NumPy array\n elif isinstance(arg_0, np.ndarray):\n if arg_1:\n return ma.masked_values(arg_0.astype(arg_3, copy=False), arg_2, copy=False)\n else:\n return arg_0.astype(arg_3, copy=False)\n else:\n raise ValueError(\n \"data must be array, masked array or iterable containing arrays.\"\n )"} +{"_id": "doc_7695", "title": "", "text": "def Func(\n arg_0, arg_1=None, arg_2=None, arg_3=False, arg_4=True,\n arg_5=False\n):\n \"\"\"\n Reproject a geometry to target CRS.\n\n Also, clips geometry if it lies outside the destination CRS boundary.\n Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical\n Mercator) and 3035 (ETRS89 / ETRS-LAEA).\n\n Parameters\n ----------\n geometry : ``shapely.geometry``\n src_crs : ``rasterio.crs.CRS`` or EPSG code\n CRS of source data\n dst_crs : ``rasterio.crs.CRS`` or EPSG code\n target CRS\n error_on_clip : bool\n raises a ``RuntimeError`` if a geometry is outside of CRS bounds\n (default: False)\n validity_check : bool\n checks if reprojected geometry is valid and throws ``TopologicalError``\n if invalid (default: True)\n antimeridian_cutting : bool\n cut geometry at Antimeridian; can result in a multipart output geometry\n\n Returns\n -------\n geometry : ``shapely.geometry``\n \"\"\"\n arg_1 = _validated_crs(arg_1)\n arg_2 = _validated_crs(arg_2)\n\n def _reproject_geom(arg_0, arg_1, arg_2):\n if arg_0.is_empty:\n return arg_0\n else:\n arg_6 = to_shape(\n transform_geom(\n arg_1.to_dict(),\n arg_2.to_dict(),\n mapping(arg_0),\n arg_5=arg_5\n )\n )\n return _repair(arg_6) if arg_4 else arg_6\n\n # return repaired geometry if no reprojection needed\n if arg_1 == arg_2 or arg_0.is_empty:\n return _repair(arg_0)\n\n # geometry needs to be clipped to its CRS bounds\n elif (\n arg_2.is_epsg_code and # just in case for an CRS with EPSG code\n arg_2.get(\"init\") in CRS_BOUNDS and # if CRS has defined bounds\n arg_2.get(\"init\") != \"epsg:4326\" # and is not WGS84 (does not need clipping)\n ):\n arg_7 = CRS().from_epsg(4326)\n # get dst_crs boundaries\n arg_8 = box(*CRS_BOUNDS[arg_2.get(\"init\")])\n # reproject geometry to WGS84\n arg_9 = _reproject_geom(arg_0, arg_1, arg_7)\n # raise error if geometry has to be clipped\n if arg_3 and not arg_9.within(arg_8):\n raise RuntimeError(\"geometry outside target CRS bounds\")\n # clip geometry dst_crs boundaries and return\n return _reproject_geom(arg_8.intersection(arg_9), arg_7, arg_2)\n\n # return without clipping if destination CRS does not have defined bounds\n else:\n return _reproject_geom(arg_0, arg_1, arg_2)"} +{"_id": "doc_7696", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Segmentize Polygon outer ring by segmentize value.\n\n Just Polygon geometry type supported.\n\n Parameters\n ----------\n geometry : ``shapely.geometry``\n segmentize_value: float\n\n Returns\n -------\n geometry : ``shapely.geometry``\n \"\"\"\n if arg_0.geom_type != \"Polygon\":\n raise TypeError(\"segmentize geometry type must be Polygon\")\n\n return Polygon(\n LinearRing([\n arg_3\n # pick polygon linestrings\n for arg_2 in map(\n lambda x: LineString([x[0], x[1]]),\n zip(arg_0.exterior.coords[:-1], arg_0.exterior.coords[1:])\n )\n # interpolate additional points in between and don't forget end point\n for arg_3 in [\n arg_2.interpolate(arg_1 * arg_4).coords[0]\n for arg_4 in range(int(arg_2.length / arg_1))\n ] + [arg_2.coords[1]]\n ])\n )"} +{"_id": "doc_7697", "title": "", "text": "def Func(\n arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=None\n):\n \"\"\"\n Write features to GeoJSON file.\n\n Parameters\n ----------\n in_data : features\n out_schema : dictionary\n output schema for fiona\n out_tile : ``BufferedTile``\n tile used for output extent\n out_path : string\n output path for GeoJSON file\n \"\"\"\n # Delete existing file.\n try:\n os.remove(arg_3)\n except OSError:\n pass\n\n arg_5 = []\n for arg_6 in arg_0:\n try:\n # clip feature geometry to tile bounding box and append for writing\n # if clipped feature still\n for arg_7 in multipart_to_singleparts(\n clean_geometry_type(\n to_shape(arg_6[\"geometry\"]).intersection(arg_2.bbox),\n arg_1[\"geometry\"]\n )\n ):\n arg_5.append({\n \"geometry\": mapping(arg_7),\n \"properties\": arg_6[\"properties\"]\n })\n except Exception as e:\n logger.warning(\"failed to prepare geometry for writing: %s\", e)\n continue\n\n # write if there are output features\n if arg_5:\n\n try:\n if arg_3.startswith(\"s3://\"):\n # write data to remote file\n with VectorWindowMemoryFile(\n tile=arg_2,\n features=arg_5,\n schema=arg_1,\n driver=\"GeoJSON\"\n ) as memfile:\n logger.debug((arg_2.id, \"upload tile\", arg_3))\n arg_4.put_object(\n Key=\"/\".join(arg_3.split(\"/\")[3:]),\n Body=memfile\n )\n else:\n # write data to local file\n with fiona.open(\n arg_3, 'w', schema=arg_1, driver=\"GeoJSON\",\n crs=arg_2.crs.to_dict()\n ) as dst:\n logger.debug((arg_2.id, \"write tile\", arg_3))\n dst.writerecords(arg_5)\n except Exception as e:\n logger.error(\"error while writing file %s: %s\", arg_3, e)\n raise\n\n else:\n logger.debug((arg_2.id, \"nothing to write\", arg_3))"} +{"_id": "doc_7698", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Return geometry of a specific type if possible.\n\n Filters and splits up GeometryCollection into target types. This is\n necessary when after clipping and/or reprojecting the geometry types from\n source geometries change (i.e. a Polygon becomes a LineString or a\n LineString becomes Point) in some edge cases.\n\n Parameters\n ----------\n geometry : ``shapely.geometry``\n target_type : string\n target geometry type\n allow_multipart : bool\n allow multipart geometries (default: True)\n\n Returns\n -------\n cleaned geometry : ``shapely.geometry``\n returns None if input geometry type differs from target type\n\n Raises\n ------\n GeometryTypeError : if geometry type does not match target_type\n \"\"\"\n arg_3 = {\n \"Point\": MultiPoint,\n \"LineString\": MultiLineString,\n \"Polygon\": MultiPolygon,\n \"MultiPoint\": MultiPoint,\n \"MultiLineString\": MultiLineString,\n \"MultiPolygon\": MultiPolygon\n }\n\n if arg_1 not in arg_3.keys():\n raise TypeError(\"target type is not supported: %s\" % arg_1)\n\n if arg_0.geom_type == arg_1:\n return arg_0\n\n elif arg_2:\n arg_4 = arg_3[arg_1]\n if arg_0.geom_type == \"GeometryCollection\":\n return arg_4([\n Func(arg_5, arg_1, arg_2)\n for arg_5 in arg_0])\n elif any([\n isinstance(arg_0, arg_4),\n arg_3[arg_0.geom_type] == arg_4\n ]):\n return arg_0\n\n raise GeometryTypeError(\n \"geometry type does not match: %s, %s\" % (arg_0.geom_type, arg_1)\n )"} +{"_id": "doc_7699", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Yield single part geometries if geom is multipart, otherwise yield geom.\n\n Parameters:\n -----------\n geom : shapely geometry\n\n Returns:\n --------\n shapely single part geometries\n \"\"\"\n if isinstance(arg_0, base.BaseGeometry):\n if hasattr(arg_0, \"geoms\"):\n for arg_1 in arg_0:\n yield arg_1\n else:\n yield arg_0"} +{"_id": "doc_7700", "title": "", "text": "def Func(\n arg_0,\n arg_1=\"nearest\",\n arg_2=\"gdal\",\n arg_3=None,\n arg_4=8,\n arg_5=False,\n arg_6=0,\n **arg_7\n):\n \"\"\"\n Convert and optionally clip input raster data.\n\n Inputs:\n -------\n raster\n singleband or multiband data input\n clip (optional)\n vector data used to clip output\n\n Parameters\n ----------\n td_resampling : str (default: 'nearest')\n Resampling used when reading from TileDirectory.\n td_matching_method : str ('gdal' or 'min') (default: 'gdal')\n gdal: Uses GDAL's standard method. Here, the target resolution is\n calculated by averaging the extent's pixel sizes over both x and y\n axes. This approach returns a zoom level which may not have the\n best quality but will speed up reading significantly.\n min: Returns the zoom level which matches the minimum resolution of the\n extents four corner pixels. This approach returns the zoom level\n with the best possible quality but with low performance. If the\n tile extent is outside of the destination pyramid, a\n TopologicalError will be raised.\n td_matching_max_zoom : int (optional, default: None)\n If set, it will prevent reading from zoom levels above the maximum.\n td_matching_precision : int (default: 8)\n Round resolutions to n digits before comparing.\n td_fallback_to_higher_zoom : bool (default: False)\n In case no data is found at zoom level, try to read data from higher\n zoom levels. Enabling this setting can lead to many IO requests in\n areas with no data.\n clip_pixelbuffer : int\n Use pixelbuffer when clipping output by geometry. (default: 0)\n\n Output\n ------\n np.ndarray\n \"\"\"\n # read clip geometry\n if \"clip\" in arg_0.params[\"input\"]:\n arg_8 = arg_0.open(\"clip\").read()\n if not arg_8:\n logger.debug(\"no clip data over tile\")\n return \"empty\"\n else:\n arg_8 = []\n\n with arg_0.open(\n \"raster\",\n matching_method=arg_2,\n matching_max_zoom=arg_3,\n matching_precision=arg_4,\n fallback_to_higher_zoom=arg_5,\n resampling=arg_1\n ) as raster:\n arg_9 = raster.read()\n if raster.is_empty() or arg_9[0].mask.all():\n logger.debug(\"raster empty\")\n return \"empty\"\n\n if arg_8:\n # apply original nodata mask and clip\n arg_10 = arg_0.clip(\n np.where(arg_9[0].mask, arg_0.params[\"output\"].nodata, arg_9),\n arg_8,\n clip_buffer=arg_6,\n inverted=True\n )\n return np.where(arg_10.mask, arg_10, arg_0.params[\"output\"].nodata)\n else:\n return np.where(arg_9[0].mask, arg_0.params[\"output\"].nodata, arg_9)"} +{"_id": "doc_7701", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Determine the best base zoom level for a raster.\n\n \"Best\" means the maximum zoom level where no oversampling has to be done.\n\n Parameters\n ----------\n input_file : path to raster file\n tile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)\n\n Returns\n -------\n zoom : integer\n \"\"\"\n arg_2 = BufferedTilePyramid(arg_1)\n with rasterio.open(arg_0, \"r\") as src:\n arg_3, arg_4, arg_5, arg_6 = reproject_geometry(\n segmentize_geometry(\n box(\n src.bounds.left, src.bounds.bottom, src.bounds.right,\n src.bounds.top\n ),\n get_segmentize_value(arg_0, arg_2)\n ),\n src_crs=src.crs, dst_crs=arg_2.crs\n ).bounds\n arg_7 = arg_5 - arg_3\n arg_8 = arg_6 - arg_4\n arg_9 = float(src.width + src.height)\n arg_10 = (\n (arg_7 / float(src.width)) * (float(src.width) / arg_9) +\n (arg_8 / float(src.height)) * (float(src.height) / arg_9)\n )\n\n for arg_11 in range(0, 40):\n if arg_2.pixel_x_size(arg_11) <= arg_10:\n return arg_11-1"} +{"_id": "doc_7702", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Determine whether file path is remote or local.\n\n Parameters\n ----------\n path : path to file\n\n Returns\n -------\n is_remote : bool\n \"\"\"\n arg_2 = (\"http://\", \"https://\", \"/vsicurl/\")\n if arg_1:\n arg_2 += (\"s3://\", \"/vsis3/\")\n return arg_0.startswith(arg_2)"} +{"_id": "doc_7703", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if file exists either remote or local.\n\n Parameters:\n -----------\n path : path to file\n\n Returns:\n --------\n exists : bool\n \"\"\"\n if arg_0.startswith((\"http://\", \"https://\")):\n try:\n urlopen(arg_0).info()\n return True\n except HTTPError as e:\n if e.code == 404:\n return False\n else:\n raise\n elif arg_0.startswith(\"s3://\"):\n arg_1 = get_boto3_bucket(arg_0.split(\"/\")[2])\n arg_2 = \"/\".join(arg_0.split(\"/\")[3:])\n for arg_3 in arg_1.objects.filter(Prefix=arg_2):\n if arg_3.key == arg_2:\n return True\n else:\n return False\n else:\n logger.debug(\"%s exists: %s\", arg_0, os.path.exists(arg_0))\n return os.path.exists(arg_0)"} +{"_id": "doc_7704", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Return absolute path if path is local.\n\n Parameters:\n -----------\n path : path to file\n base_dir : base directory used for absolute path\n\n Returns:\n --------\n absolute path\n \"\"\"\n if path_is_remote(arg_0):\n return arg_0\n else:\n if os.path.isabs(arg_0):\n return arg_0\n else:\n if arg_1 is None or not os.path.isabs(arg_1):\n raise TypeError(\"base_dir must be an absolute path.\")\n return os.path.abspath(os.path.join(arg_1, arg_0))"} +{"_id": "doc_7705", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Return relative path if path is local.\n\n Parameters:\n -----------\n path : path to file\n base_dir : directory where path sould be relative to\n\n Returns:\n --------\n relative path\n \"\"\"\n if path_is_remote(arg_0) or not os.path.isabs(arg_0):\n return arg_0\n else:\n return os.path.relpath(arg_0, arg_1)"} +{"_id": "doc_7706", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write local or remote.\"\"\"\n logger.debug(\"write %s to %s\", arg_1, arg_0)\n if arg_0.startswith(\"s3://\"):\n arg_2 = get_boto3_bucket(arg_0.split(\"/\")[2])\n arg_3 = \"/\".join(arg_0.split(\"/\")[3:])\n logger.debug(\"upload %s\", arg_3)\n arg_2.put_object(\n Key=arg_3,\n Body=json.dumps(arg_1, sort_keys=True, indent=4)\n )\n else:\n makedirs(os.path.dirname(arg_0))\n with open(arg_0, 'w') as dst:\n json.dump(arg_1, dst, sort_keys=True, indent=4)"} +{"_id": "doc_7707", "title": "", "text": "def Func(arg_0):\n \"\"\"Read local or remote.\"\"\"\n if arg_0.startswith((\"http://\", \"https://\")):\n try:\n return json.loads(urlopen(arg_0).read().decode())\n except HTTPError:\n raise FileNotFoundError(\"%s not found\", arg_0)\n elif arg_0.startswith(\"s3://\"):\n arg_1 = get_boto3_bucket(arg_0.split(\"/\")[2])\n arg_2 = \"/\".join(arg_0.split(\"/\")[3:])\n for arg_3 in arg_1.objects.filter(Prefix=arg_2):\n if arg_3.key == arg_2:\n return json.loads(arg_3.get()['Body'].read().decode())\n raise FileNotFoundError(\"%s not found\", arg_0)\n else:\n try:\n with open(arg_0, \"r\") as src:\n return json.loads(src.read())\n except:\n raise FileNotFoundError(\"%s not found\", arg_0)"} +{"_id": "doc_7708", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Attach a reducer function to a given type in the dispatch table.\"\"\"\n if sys.version_info < (3,):\n # Python 2 pickler dispatching is not explicitly customizable.\n # Let us use a closure to workaround this limitation.\n def dispatcher(arg_0, arg_3):\n arg_4 = arg_2(arg_3)\n arg_0.save_reduce(arg_3=arg_3, *arg_4)\n arg_0.dispatch_table[arg_1] = dispatcher\n else:\n arg_0.dispatch_table[arg_1] = arg_2"} +{"_id": "doc_7709", "title": "", "text": "def Func():\n \"\"\"Return the number of CPUs the current process can use.\n\n The returned number of CPUs accounts for:\n * the number of CPUs in the system, as given by\n ``multiprocessing.Func``;\n * the CPU affinity settings of the current process\n (available with Python 3.4+ on some Unix systems);\n * CFS scheduler CPU bandwidth limit (available on Linux only, typically\n set by docker and similar container orchestration systems);\n * the value of the LOKY_MAX_CPU_COUNT environment variable if defined.\n and is given as the minimum of these constraints.\n It is also always larger or equal to 1.\n \"\"\"\n import math\n\n try:\n arg_0 = mp.Func()\n except NotImplementedError:\n arg_0 = 1\n\n # Number of available CPUs given affinity settings\n arg_1 = arg_0\n if hasattr(os, 'sched_getaffinity'):\n try:\n arg_1 = len(os.sched_getaffinity(0))\n except NotImplementedError:\n pass\n\n # CFS scheduler CPU bandwidth limit\n # available in Linux since 2.6 kernel\n arg_2 = arg_0\n arg_3 = \"/sys/fs/cgroup/cpu/cpu.cfs_quota_us\"\n arg_4 = \"/sys/fs/cgroup/cpu/cpu.cfs_period_us\"\n if os.path.exists(arg_3) and os.path.exists(arg_4):\n with open(arg_3, 'r') as fh:\n arg_5 = int(fh.read())\n with open(arg_4, 'r') as fh:\n arg_6 = int(fh.read())\n\n if arg_5 > 0 and arg_6 > 0:\n # Make sure this quantity is an int as math.ceil returns a\n # float in python2.7. (See issue #165)\n arg_2 = int(math.ceil(arg_5 / arg_6))\n\n # User defined soft-limit passed as an loky specific environment variable.\n arg_7 = int(os.environ.get('LOKY_MAX_CPU_COUNT', arg_0))\n arg_8 = min(arg_0, arg_1, arg_2,\n arg_7)\n return max(arg_8, 1)"} +{"_id": "doc_7710", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6,\n arg_7):\n \"\"\"Evaluates calls from call_queue and places the results in result_queue.\n\n This worker is run in a separate process.\n\n Args:\n call_queue: A ctx.Queue of _CallItems that will be read and\n evaluated by the worker.\n result_queue: A ctx.Queue of _ResultItems that will written\n to by the worker.\n initializer: A callable initializer, or None\n initargs: A tuple of args for the initializer\n process_management_lock: A ctx.Lock avoiding worker timeout while some\n workers are being spawned.\n timeout: maximum time to wait for a new item in the call_queue. If that\n time is expired, the worker will shutdown.\n worker_exit_lock: Lock to avoid flagging the executor as broken on\n workers timeout.\n current_depth: Nested parallelism level, to avoid infinite spawning.\n \"\"\"\n if arg_2 is not None:\n try:\n arg_2(*arg_3)\n except BaseException:\n _base.LOGGER.critical('Exception in initializer:', exc_info=True)\n # The parent will notice that the process stopped and\n # mark the pool broken\n return\n\n # set the global _CURRENT_DEPTH mechanism to limit recursive call\n global arg_8\n arg_8 = arg_7\n arg_9 = None\n arg_10 = None\n arg_11 = os.getpid()\n\n mp.util.debug('Worker started with timeout=%s' % arg_5)\n while True:\n try:\n arg_12 = arg_0.get(block=True, arg_5=arg_5)\n if arg_12 is None:\n mp.util.info(\"Shutting down worker on sentinel\")\n except queue.Empty:\n mp.util.info(\"Shutting down worker after timeout %0.3fs\"\n % arg_5)\n if arg_4.acquire(block=False):\n arg_4.release()\n arg_12 = None\n else:\n mp.util.info(\"Could not acquire processes_management_lock\")\n continue\n except BaseException as e:\n arg_13 = traceback.format_exc()\n try:\n arg_1.put(_RemoteTraceback(arg_13))\n except BaseException:\n # If we cannot format correctly the exception, at least print\n # the traceback.\n print(arg_13)\n sys.exit(1)\n if arg_12 is None:\n # Notify queue management thread about clean worker shutdown\n arg_1.put(arg_11)\n with arg_6:\n return\n try:\n arg_14 = arg_12()\n except BaseException as e:\n arg_15 = _ExceptionWithTraceback(e)\n arg_1.put(_ResultItem(arg_12.work_id, exception=arg_15))\n else:\n _sendback_result(arg_1, arg_12.work_id, result=arg_14)\n del arg_14\n\n # Free the resource as soon as possible, to avoid holding onto\n # open files or shared memory that is not needed anymore\n del arg_12\n\n if _USE_PSUTIL:\n if arg_9 is None:\n # Make reference measurement after the first call\n arg_9 = _get_memory_usage(arg_11, force_gc=True)\n arg_10 = time()\n continue\n if time() - arg_10 > _MEMORY_LEAK_CHECK_DELAY:\n arg_16 = _get_memory_usage(arg_11)\n arg_10 = time()\n if arg_16 - arg_9 < _MAX_MEMORY_LEAK_SIZE:\n # Memory usage stays within bounds: everything is fine.\n continue\n\n # Check again memory usage; this time take the measurement\n # after a forced garbage collection to break any reference\n # cycles.\n arg_16 = _get_memory_usage(arg_11, force_gc=True)\n arg_10 = time()\n if arg_16 - arg_9 < _MAX_MEMORY_LEAK_SIZE:\n # The GC managed to free the memory: everything is fine.\n continue\n\n # The process is leaking memory: let the master process\n # know that we need to start a new worker.\n mp.util.info(\"Memory leak detected: shutting down worker\")\n arg_1.put(arg_11)\n with arg_6:\n return\n else:\n # if psutil is not installed, trigger gc.collect events\n # regularly to limit potential memory leaks due to reference cycles\n if ((arg_10 is None) or\n (time() - arg_10 >\n _MEMORY_LEAK_CHECK_DELAY)):\n gc.collect()\n arg_10 = time()"} +{"_id": "doc_7711", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3):\n \"\"\"Fills call_queue with _WorkItems from pending_work_items.\n\n This function never blocks.\n\n Args:\n pending_work_items: A dict mapping work ids to _WorkItems e.g.\n {5: <_WorkItem...>, 6: <_WorkItem...>, ...}\n work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids\n are consumed and the corresponding _WorkItems from\n pending_work_items are transformed into _CallItems and put in\n call_queue.\n call_queue: A ctx.Queue that will be filled with _CallItems\n derived from _WorkItems.\n \"\"\"\n while True:\n if arg_3.full():\n return\n try:\n arg_4 = arg_2.get(block=False)\n except queue.Empty:\n return\n else:\n arg_5 = arg_0[arg_4]\n\n if arg_5.future.set_running_or_notify_cancel():\n arg_1 += [arg_4]\n arg_3.put(_CallItem(arg_4,\n arg_5.fn,\n arg_5.args,\n arg_5.kwargs),\n block=True)\n else:\n del arg_0[arg_4]\n continue"} +{"_id": "doc_7712", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"Wrapper for non-picklable object to use cloudpickle to serialize them.\n\n Note that this wrapper tends to slow down the serialization process as it\n is done with cloudpickle which is typically slower compared to pickle. The\n proper way to solve serialization issues is to avoid defining functions and\n objects in the main scripts and to implement __reduce__ functions for\n complex classes.\n \"\"\"\n if not cloudpickle:\n raise ImportError(\"could not import cloudpickle. Please install \"\n \"cloudpickle to allow extended serialization. \"\n \"(`pip install cloudpickle`).\")\n\n # If obj is a class, create a CloudpickledClassWrapper which instantiates\n # the object internally and wrap it directly in a CloudpickledObjectWrapper\n if inspect.isclass(arg_0):\n class arg_7(CloudpickledObjectWrapper):\n def __init__(arg_2, *arg_3, **arg_4):\n arg_2._obj = arg_0(*arg_3, **arg_4)\n arg_2._keep_wrapper = arg_1\n\n arg_7.__name__ = arg_0.__name__\n return arg_7\n\n # If obj is an instance of a class, just wrap it in a regular\n # CloudpickledObjectWrapper\n return _Func(arg_0, arg_1=arg_1)"} +{"_id": "doc_7713", "title": "", "text": "def Func(arg_0):\n '''Return a wrapper for an fd.'''\n arg_1 = get_spawning_popen()\n if arg_1 is not None:\n return arg_1.Func(arg_1.duplicate_for_child(arg_0))\n elif HAVE_SEND_HANDLE and sys.version_info[:2] > (3, 3):\n from multiprocessing import resource_sharer\n return resource_sharer.Func(arg_0)\n else:\n raise TypeError(\n 'Cannot pickle connection object. This object can only be '\n 'passed when spawning a new process'\n )"} +{"_id": "doc_7714", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=10,\n arg_3=False, arg_4=\"auto\",\n arg_5=None, arg_6=None,\n arg_7=None, arg_8=()):\n \"\"\"Return the current ReusableExectutor instance.\n\n Start a new instance if it has not been started already or if the previous\n instance was left in a broken state.\n\n If the previous instance does not have the requested number of workers, the\n executor is dynamically resized to adjust the number of workers prior to\n returning.\n\n Reusing a singleton instance spares the overhead of starting new worker\n processes and importing common python packages each time.\n\n ``max_workers`` controls the maximum number of tasks that can be running in\n parallel in worker processes. By default this is set to the number of\n CPUs on the host.\n\n Setting ``timeout`` (in seconds) makes idle workers automatically shutdown\n so as to release system resources. New workers are respawn upon submission\n of new tasks so that ``max_workers`` are available to accept the newly\n submitted tasks. Setting ``timeout`` to around 100 times the time required\n to spawn new processes and import packages in them (on the order of 100ms)\n ensures that the overhead of spawning workers is negligible.\n\n Setting ``kill_workers=True`` makes it possible to forcibly interrupt\n previously spawned jobs to get a new instance of the reusable executor\n with new constructor argument values.\n\n The ``job_reducers`` and ``result_reducers`` are used to customize the\n pickling of tasks and results send to the executor.\n\n When provided, the ``initializer`` is run first in newly spawned\n processes with argument ``initargs``.\n \"\"\"\n with _executor_lock:\n global arg_13, arg_12\n arg_9 = arg_13\n\n if arg_0 is None:\n if arg_4 is True and arg_9 is not None:\n arg_0 = arg_9._max_workers\n else:\n arg_0 = cpu_count()\n elif arg_0 <= 0:\n raise ValueError(\n \"max_workers must be greater than 0, got {}.\"\n .format(arg_0))\n\n if isinstance(arg_1, STRING_TYPE):\n arg_1 = get_context(arg_1)\n if arg_1 is not None and arg_1.get_start_method() == \"fork\":\n raise ValueError(\"Cannot use reusable executor with the 'fork' \"\n \"context\")\n\n arg_10 = dict(arg_1=arg_1, arg_2=arg_2,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7, arg_8=arg_8)\n if arg_9 is None:\n mp.util.debug(\"Create a executor with max_workers={}.\"\n .format(arg_0))\n arg_11 = _get_next_executor_id()\n arg_12 = arg_10\n arg_13 = arg_9 = _ReusablePoolExecutor(\n _executor_lock, arg_0=arg_0,\n arg_11=arg_11, **arg_10)\n else:\n if arg_4 == 'auto':\n arg_4 = arg_10 == arg_12\n if (arg_9._flags.broken or arg_9._flags.shutdown\n or not arg_4):\n if arg_9._flags.broken:\n arg_14 = \"broken\"\n elif arg_9._flags.shutdown:\n arg_14 = \"shutdown\"\n else:\n arg_14 = \"arguments have changed\"\n mp.util.debug(\n \"Creating a new executor with max_workers={} as the \"\n \"previous instance cannot be reused ({}).\"\n .format(arg_0, arg_14))\n arg_9.shutdown(wait=True, arg_3=arg_3)\n arg_13 = arg_9 = arg_12 = None\n # Recursive call to build a new instance\n return Func(arg_0=arg_0,\n **arg_10)\n else:\n mp.util.debug(\"Reusing existing executor with max_workers={}.\"\n .format(arg_9._max_workers))\n arg_9._resize(arg_0)\n\n return arg_9"} +{"_id": "doc_7715", "title": "", "text": "def Func(arg_0):\n \"\"\"Wait for the cache to be empty before resizing the pool.\"\"\"\n # Issue a warning to the user about the bad effect of this usage.\n if len(arg_0._pending_work_items) > 0:\n warnings.warn(\"Trying to resize an executor with running jobs: \"\n \"waiting for jobs completion before resizing.\",\n UserWarning)\n mp.util.debug(\"Executor {} waiting for jobs completion before\"\n \" resizing\".format(arg_0.executor_id))\n # Wait for the completion of the jobs\n while len(arg_0._pending_work_items) > 0:\n time.sleep(1e-3)"} +{"_id": "doc_7716", "title": "", "text": "def Func(arg_0, arg_1=True):\n '''\n Return info about parent needed by child to unpickle process object\n '''\n _check_not_importing_main()\n arg_2 = dict(\n log_to_stderr=util._log_to_stderr,\n authkey=bytes(process.current_process().authkey),\n )\n\n if util._logger is not None:\n arg_2['log_level'] = util._logger.getEffectiveLevel()\n if len(util._logger.handlers) > 0:\n arg_3 = util._logger.handlers[0]\n arg_2['log_fmt'] = arg_3.formatter._fmt\n\n arg_4 = [p for p in sys.path]\n try:\n arg_5 = arg_4.index('')\n except ValueError:\n pass\n else:\n arg_4[arg_5] = process.ORIGINAL_DIR\n\n arg_2.update(\n arg_0=arg_0,\n arg_4=arg_4,\n sys_argv=sys.argv,\n orig_dir=process.ORIGINAL_DIR,\n dir=os.getcwd()\n )\n\n if sys.platform != \"win32\":\n # Pass the semaphore_tracker pid to avoid re-spawning it in every child\n from . import semaphore_tracker\n semaphore_tracker.ensure_running()\n arg_2['tracker_pid'] = semaphore_tracker._semaphore_tracker._pid\n\n # Figure out whether to initialise main in the subprocess as a module\n # or through direct execution (or to leave it alone entirely)\n if arg_1:\n arg_6 = sys.modules['__main__']\n try:\n arg_7 = getattr(arg_6.__spec__, \"name\", None)\n except BaseException:\n arg_7 = None\n if arg_7 is not None:\n arg_2['init_main_from_name'] = arg_7\n elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):\n arg_8 = getattr(arg_6, '__file__', None)\n if arg_8 is not None:\n if (not os.path.isabs(arg_8) and\n process.ORIGINAL_DIR is not None):\n arg_8 = os.path.join(process.ORIGINAL_DIR, arg_8)\n arg_2['init_main_from_path'] = os.path.normpath(arg_8)\n # Compat for python2.7\n arg_2['main_path'] = arg_2['init_main_from_path']\n\n return arg_2"} +{"_id": "doc_7717", "title": "", "text": "def Func(arg_0):\n '''\n Try to get current process ready to unpickle process object\n '''\n if 'name' in arg_0:\n arg_1.current_process().name = arg_0['name']\n\n if 'authkey' in arg_0:\n arg_1.current_process().authkey = arg_0['authkey']\n\n if 'log_to_stderr' in arg_0 and arg_0['log_to_stderr']:\n util.log_to_stderr()\n\n if 'log_level' in arg_0:\n util.get_logger().setLevel(arg_0['log_level'])\n\n if 'log_fmt' in arg_0:\n import logging\n util.get_logger().handlers[0].setFormatter(\n logging.Formatter(arg_0['log_fmt'])\n )\n\n if 'sys_path' in arg_0:\n arg_5.path = arg_0['sys_path']\n\n if 'sys_argv' in arg_0:\n arg_5.argv = arg_0['sys_argv']\n\n if 'dir' in arg_0:\n os.chdir(arg_0['dir'])\n\n if 'orig_dir' in arg_0:\n arg_1.ORIGINAL_DIR = arg_0['orig_dir']\n\n if 'tracker_pid' in arg_0:\n from . import arg_9\n arg_9._semaphore_tracker._pid = arg_0[\"tracker_pid\"]\n\n if 'init_main_from_name' in arg_0:\n _fixup_main_from_name(arg_0['init_main_from_name'])\n elif 'init_main_from_path' in arg_0:\n _fixup_main_from_path(arg_0['init_main_from_path'])"} +{"_id": "doc_7718", "title": "", "text": "def Func(arg_0): # pragma: no cover\n \"\"\"Close all the file descriptors except those in keep_fds.\"\"\"\n\n # Make sure to keep stdout and stderr open for logging purpose\n arg_0 = set(arg_0).union([1, 2])\n\n # We try to retrieve all the open fds\n try:\n arg_1 = set(int(fd) for fd in os.listdir('/proc/self/fd'))\n except FileNotFoundError:\n import resource\n arg_2 = resource.getrlimit(resource.RLIMIT_NOFILE)[0]\n arg_1 = set(fd for fd in range(3, arg_2))\n arg_1.add(0)\n\n for arg_3 in arg_1 - arg_0:\n try:\n os.close(arg_3)\n except OSError:\n pass"} +{"_id": "doc_7719", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a formated string with the exitcodes of terminated workers.\n\n If necessary, wait (up to .25s) for the system to correctly set the\n exitcode of one terminated worker.\n \"\"\"\n arg_1 = 5\n\n # Catch the exitcode of the terminated workers. There should at least be\n # one. If not, wait a bit for the system to correctly set the exitcode of\n # the terminated worker.\n arg_2 = [p.exitcode for p in list(arg_0.values())\n if p.exitcode is not None]\n while len(arg_2) == 0 and arg_1 > 0:\n arg_1 -= 1\n arg_2 = [p.exitcode for p in list(arg_0.values())\n if p.exitcode is not None]\n time.sleep(.05)\n\n return _format_exitcodes(arg_2)"} +{"_id": "doc_7720", "title": "", "text": "def Func(arg_0):\n \"\"\"Format a list of exit code with names of the signals if possible\"\"\"\n arg_1 = [\"{}({})\".format(_get_exitcode_name(e), e)\n for e in arg_0 if e is not None]\n return \"{\" + \", \".join(arg_1) + \"}\""} +{"_id": "doc_7721", "title": "", "text": "def Func(arg_0, arg_1=0):\n '''Run semaphore tracker.'''\n # protect the process from ^C and \"killall python\" etc\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n signal.signal(signal.SIGTERM, signal.SIG_IGN)\n\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)\n\n for arg_2 in (sys.stdin, sys.stdout):\n try:\n arg_2.close()\n except Exception:\n pass\n\n if arg_1: # pragma: no cover\n sys.stderr.write(\"Main semaphore tracker is running\\n\")\n sys.stderr.flush()\n\n arg_3 = set()\n try:\n # keep track of registered/unregistered semaphores\n with os.fdopen(arg_0, 'rb') as arg_2:\n for arg_4 in arg_2:\n try:\n arg_5, arg_6 = arg_4.strip().split(b':')\n if arg_5 == b'REGISTER':\n arg_6 = arg_6.decode('ascii')\n arg_3.add(arg_6)\n if arg_1: # pragma: no cover\n sys.stderr.write(\"[SemaphoreTracker] register {}\\n\"\n .format(arg_6))\n sys.stderr.flush()\n elif arg_5 == b'UNREGISTER':\n arg_6 = arg_6.decode('ascii')\n arg_3.remove(arg_6)\n if arg_1: # pragma: no cover\n sys.stderr.write(\"[SemaphoreTracker] unregister {}\"\n \": cache({})\\n\"\n .format(arg_6, len(arg_3)))\n sys.stderr.flush()\n elif arg_5 == b'PROBE':\n pass\n else:\n raise RuntimeError('unrecognized command %r' % arg_5)\n except BaseException:\n try:\n sys.excepthook(*sys.exc_info())\n except BaseException:\n pass\n finally:\n # all processes have terminated; cleanup any reFuncing semaphores\n if arg_3:\n try:\n warnings.warn('semaphore_tracker: There appear to be %d '\n 'leaked semaphores to clean up at shutdown' %\n len(arg_3))\n except Exception:\n pass\n for arg_6 in arg_3:\n # For some reason the process which created and registered this\n # semaphore has failed to unregister it. Presumably it has died.\n # We therefore unlink it.\n try:\n try:\n sem_unlink(arg_6)\n if arg_1: # pragma: no cover\n sys.stderr.write(\"[SemaphoreTracker] unlink {}\\n\"\n .format(arg_6))\n sys.stderr.flush()\n except Exception as e:\n warnings.warn('semaphore_tracker: %s: %r' % (arg_6, e))\n finally:\n pass\n\n if arg_1: # pragma: no cover\n sys.stderr.write(\"semaphore tracker shut down\\n\")\n sys.stderr.flush()"} +{"_id": "doc_7722", "title": "", "text": "def Func(arg_0):\n '''Make sure that semaphore tracker process is running.\n\n This can be run from any process. Usually a child process will use\n the semaphore created by its parent.'''\n with arg_0._lock:\n if arg_0._fd is not None:\n # semaphore tracker was launched before, is it still running?\n if arg_0._check_alive():\n # => still alive\n return\n # => dead, launch it again\n os.close(arg_0._fd)\n try:\n # Clean-up to avoid dangling processes.\n os.waitpid(arg_0._pid, 0)\n except OSError:\n # The process was terminated or is a child from an ancestor\n # of the current process.\n pass\n arg_0._fd = None\n arg_0._pid = None\n\n warnings.warn('semaphore_tracker: process died unexpectedly, '\n 'relaunching. Some semaphores might leak.')\n\n arg_3 = []\n try:\n arg_3.append(sys.stderr.fileno())\n except Exception:\n pass\n\n arg_4, arg_5 = os.pipe()\n arg_6 = 'from {} import main; main({}, {})'.format(\n main.__module__, arg_4, VERBOSE)\n try:\n arg_3.append(arg_4)\n # process will out live us, so no need to wait on pid\n arg_7 = spawn.get_executable()\n arg_8 = [arg_7] + util._args_from_interpreter_flags()\n # In python 3.3, there is a bug which put `-RRRRR..` instead of\n # `-R` in args. Replace it to get the correct flags.\n # See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488\n if sys.version_info[:2] <= (3, 3):\n import re\n for arg_9 in range(1, len(arg_8)):\n arg_8[arg_9] = re.sub(\"-R+\", \"-R\", arg_8[arg_9])\n arg_8 += ['-c', arg_6]\n util.debug(\"launching Semaphore tracker: {}\".format(arg_8))\n # bpo-33613: Register a signal mask that will block the\n # signals. This signal mask will be inherited by the child\n # that is going to be spawned and will protect the child from a\n # race condition that can make the child die before it\n # registers signal handlers for SIGINT and SIGTERM. The mask is\n # unregistered after spawning the child.\n try:\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_BLOCK,\n _IGNORED_SIGNALS)\n arg_10 = spawnv_passfds(arg_7, arg_8, arg_3)\n finally:\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_UNBLOCK,\n _IGNORED_SIGNALS)\n except BaseException:\n os.close(arg_5)\n raise\n else:\n arg_0._fd = arg_5\n arg_0._pid = arg_10\n finally:\n os.close(arg_4)"} +{"_id": "doc_7723", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n 'A simple event processor that prints out events.'\n arg_4 = arg_0.debugger.intf[-1].output\n arg_5 = arg_1.f_lineno\n arg_6 = arg_0.core.canonic_filename(arg_1)\n arg_6 = arg_0.core.filename(arg_6)\n if not arg_4:\n print(\"%s - %s:%d\" % (arg_2, arg_6, arg_5))\n else:\n arg_4.write(\"%s - %s:%d\" % (arg_2, arg_6, arg_5))\n if arg_3 is not None:\n arg_4.writeline(', %s ' % repr(arg_3))\n else:\n arg_4.writeline('')\n pass\n pass\n return arg_0.Func"} +{"_id": "doc_7724", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Program counter.\"\"\"\n arg_2 = arg_0.core.filename(None)\n if arg_0.core.is_Funcning():\n arg_3 = arg_0.proc.curframe\n if arg_3:\n arg_4 = inspect.getlineno(arg_3)\n arg_5 = arg_3.f_lasti\n arg_0.msg(\"PC offset is %d.\" % arg_5)\n arg_5 = max(arg_5, 0)\n arg_6 = arg_3.f_code\n arg_7 = arg_6.co_code\n disassemble_bytes(arg_0.msg, arg_0.msg_nocr,\n arg_7, arg_5, arg_4, arg_4-1, arg_4+1,\n constants=arg_6.co_consts, cells=arg_6.co_cellvars,\n varnames=arg_6.co_varnames, freevars=arg_6.co_freevars,\n linestarts=dict(findlinestarts(arg_6)),\n end_offset=arg_5+10)\n pass\n pass\n else:\n if arg_2:\n arg_8 = \"Python program '%s'\" % arg_2\n arg_9 = \"is not currently Funcning. \"\n arg_0.msg(Mmisc.wrapped_lines(arg_8, arg_9,\n arg_0.settings['width']))\n else:\n arg_0.msg('No Python program is currently Funcning.')\n pass\n arg_0.msg(arg_0.core.execution_status)\n pass\n return False"} +{"_id": "doc_7725", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"Almost a copy of code.Func\n Closely emulate the Funcive Python interpreter.\n\n This is a backwards compatible interface to the InteractiveConsole\n class. When readfunc is not specified, it attempts to import the\n readline module to enable GNU readline if it is available.\n\n Arguments (all optional, all default to None):\n\n banner -- passed to InteractiveConsole.Func()\n readfunc -- if not None, replaces InteractiveConsole.raw_input()\n local -- passed to InteractiveInterpreter.__init__()\n\n \"\"\"\n arg_4 = code.InteractiveConsole(arg_2, filename='')\n arg_4.runcode = lambda code_obj: arg_5(arg_4, code_obj)\n setattr(arg_4, 'globals', arg_3)\n if arg_1 is not None:\n arg_4.raw_input = arg_1\n else:\n try:\n import readline\n except ImportError:\n pass\n arg_4.Func(arg_0)\n pass"} +{"_id": "doc_7726", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Split a command line's arguments in a shell-like manner returned\n as a list of lists. Use ';;' with white space to indicate separate\n commands.\n\n This is a modified version of the standard library's shlex.split()\n function, but with a default of posix=False for splitting, so that quotes\n in inputs are respected.\n \"\"\"\n\n arg_2 = [[]]\n if isinstance(arg_0, bytes):\n arg_0 = arg_0.decode(\"utf-8\")\n arg_3 = shlex.shlex(arg_0, arg_1=arg_1)\n\n arg_3.whitespace_split = True\n arg_5 = list(arg_3)\n for arg_6 in arg_5:\n if ';;' == arg_6:\n arg_2.append([])\n else:\n arg_2[-1].append(arg_6)\n pass\n pass\n return arg_2"} +{"_id": "doc_7727", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Run each function in `hooks' with args\"\"\"\n for arg_3 in arg_1:\n if arg_3(arg_0, *arg_2): return True\n pass\n return False"} +{"_id": "doc_7728", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Eval arg and it is an integer return the value. Otherwise\n return None\"\"\"\n if arg_0.curframe:\n arg_2 = arg_0.curframe.f_globals\n arg_3 = arg_0.curframe.f_locals\n else:\n arg_2 = globals()\n arg_3 = locals()\n pass\n try:\n arg_4 = int(eval(arg_1, arg_2, arg_3))\n except (SyntaxError, NameError, ValueError, TypeError):\n return None\n return arg_4"} +{"_id": "doc_7729", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=1, arg_4=None,\n arg_5=None):\n \"\"\"If no argument use the default. If arg is a an integer between\n least min_value and at_most, use that. Otherwise report an error.\n If there's a stack frame use that in evaluation.\"\"\"\n\n if arg_1 is None: return arg_3\n arg_3 = arg_0.Func_noerr(arg_1)\n if arg_3 is None:\n if arg_4:\n arg_0.errmsg((\"Command '%s' expects an integer; \"\n + \"got: %s.\") % (arg_4, str(arg_1)))\n else:\n arg_0.errmsg('Expecting a positive integer, got: %s'\n % str(arg_1))\n pass\n return None\n pass\n if arg_3 < arg_2:\n if arg_4:\n arg_0.errmsg((\"Command '%s' expects an integer at least\" +\n ' %d; got: %d.')\n % (arg_4, arg_2, arg_3))\n else:\n arg_0.errmsg((\"Expecting a positive integer at least\" +\n ' %d; got: %d')\n % (arg_2, arg_3))\n pass\n return None\n elif arg_5 and arg_3 > arg_5:\n if arg_4:\n arg_0.errmsg((\"Command '%s' expects an integer at most\" +\n ' %d; got: %d.')\n % (arg_4, arg_5, arg_3))\n else:\n arg_0.errmsg((\"Expecting an integer at most %d; got: %d\")\n % (arg_5, arg_3))\n pass\n pass\n return arg_3"} +{"_id": "doc_7730", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find the next token in str string from start_pos, we return\n the token and the next blank position after the token or\n str.size if this is the last token. Tokens are delimited by\n white space.\"\"\"\n arg_2 = arg_0[arg_1:]\n arg_3 = re.search('\\S', arg_2)\n if arg_3:\n arg_4 = arg_3.start()\n else:\n arg_4 = 0\n pass\n arg_5 = arg_1 + arg_4\n arg_6 = re.search('\\s', arg_0[arg_5:])\n if arg_6:\n arg_7 = arg_5 + arg_6.start()\n else:\n arg_7 = len(arg_0)\n pass\n return [arg_7, arg_0[arg_5:arg_7+1].rstrip()]"} +{"_id": "doc_7731", "title": "", "text": "def Func(arg_0, arg_1=''):\n '''Script interface to read a command. `prompt' is a parameter for\n compatibilty and is ignored.'''\n arg_0.input_lineno += 1\n arg_2 = arg_0.readline()\n if arg_0.verbose:\n arg_3 = \"%s line %s\" % (arg_0.script_name, arg_0.input_lineno)\n arg_0.msg('+ %s: %s' % (arg_3, arg_2))\n pass\n # Do something with history?\n return arg_2"} +{"_id": "doc_7732", "title": "", "text": "def Func(arg_0):\n \"\"\" Closes both input and output \"\"\"\n arg_0.state = 'closing'\n if arg_0.input:\n arg_0.input.Func()\n pass\n if arg_0.output:\n arg_0.output.Func()\n pass\n arg_0.state = 'disconnnected'\n return"} +{"_id": "doc_7733", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=-1, arg_4=0,\n arg_5=-1, arg_6=None, arg_7=False,\n arg_8=(), arg_9=(), arg_10=(), arg_11=(),\n arg_12=(), arg_13={}, arg_14='light',\n arg_15=0, arg_16=None):\n \"\"\"Disassemble byte string of code. If end_line is negative\n it counts the number of statement linestarts to use.\"\"\"\n arg_17 = 10000\n if arg_6 is None:\n arg_6 = 10000\n elif arg_7:\n arg_6 += arg_5 -1\n pass\n\n arg_18 = findlabels(arg_2)\n\n arg_19 = lambda x: None\n if arg_5 > arg_4:\n arg_20 = arg_19\n arg_21 = arg_19\n else:\n arg_20 = arg_1\n arg_21 = arg_0\n\n for arg_22 in get_instructions_bytes(arg_2, opc, arg_8, arg_9,\n arg_10, arg_11, arg_13):\n arg_23 = arg_22.offset\n if arg_16 and arg_23 > arg_16:\n break\n\n if arg_22.starts_line:\n if arg_23:\n arg_21(\"\")\n\n arg_4 = arg_22.starts_line\n if (arg_5 and ((arg_5 > arg_4) or\n arg_15 and arg_15 > arg_23)) :\n arg_20 = arg_19\n arg_21 = arg_19\n else:\n arg_17 -= 1\n arg_20 = arg_1\n arg_21 = arg_0\n pass\n if ((arg_4 > arg_6) or\n (arg_16 and arg_23 > arg_16)):\n break\n arg_20(format_token(Mformat.LineNumber,\n \"%4d\" % arg_4,\n arg_14=arg_14))\n else:\n if arg_15 and arg_23 and arg_15 <= arg_23:\n arg_20 = arg_1\n arg_21 = arg_0\n pass\n arg_20(' ')\n\n if arg_23 == arg_3: arg_20(format_token(Mformat.Arrow, '-->',\n arg_14=arg_14))\n else: arg_20(' ')\n if arg_23 in arg_18: arg_20(format_token(Mformat.Arrow, '>>',\n arg_14=arg_14))\n else: arg_20(' ')\n arg_20(repr(arg_23).rjust(4))\n arg_20(' ')\n arg_20(format_token(Mformat.Opcode,\n arg_22.opname.ljust(20),\n arg_14=arg_14))\n arg_20(repr(arg_22.arg).ljust(10))\n arg_20(' ')\n # Show argval?\n arg_21(format_token(Mformat.Name,\n arg_22.argrepr.ljust(20),\n arg_14=arg_14))\n pass\n\n return arg_2, arg_23"} +{"_id": "doc_7734", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"Return a count of the number of frames\"\n arg_2 = -arg_1\n while arg_0:\n arg_2 += 1\n arg_0 = arg_0.f_back\n return arg_2"} +{"_id": "doc_7735", "title": "", "text": "def Func(arg_0):\n \"\"\"If f_back is looking at a call function, return\n the name for it. Otherwise return None\"\"\"\n arg_1 = arg_0.f_back\n if not arg_1: return None\n if 'CALL_FUNCTION' != Mbytecode.op_at_frame(arg_1): return None\n\n arg_2 = arg_1.f_code\n arg_3 = arg_2.co_code\n # labels = dis.findlabels(code)\n arg_4 = dict(dis.findlinestarts(arg_2))\n arg_5 = arg_1.f_lasti\n while arg_5 >= 0:\n if arg_5 in arg_4:\n arg_6 = arg_3[arg_5]\n arg_5 += 1\n arg_7 = arg_3[arg_5]\n # FIXME: put this code in xdis\n arg_8 = 0\n while True:\n if PYTHON_VERSION >= 3.6:\n if arg_6 == opc.EXTENDED_ARG:\n arg_8 += (arg_7 << 8)\n continue\n arg_7 = arg_3[arg_5] + arg_8\n # FIXME: Python 3.6.0a1 is 2, for 3.6.a3 we have 1\n else:\n if arg_6 == opc.EXTENDED_ARG:\n arg_8 += (arg_7 << 256)\n continue\n arg_7 = arg_3[arg_5] + arg_3[arg_5+1]*256 + arg_8\n break\n\n return arg_2.co_names[arg_7]\n arg_5 -= 1\n pass\n return None"} +{"_id": "doc_7736", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='plain', arg_3={}):\n \"Print count entries of the stack trace\"\n if arg_1 is None:\n arg_4=len(arg_0.stack)\n else:\n arg_4=min(len(arg_0.stack), arg_1)\n try:\n for arg_5 in range(arg_4):\n print_stack_entry(arg_0, arg_5, arg_2=arg_2, arg_3=arg_3)\n except KeyboardInterrupt:\n pass\n return"} +{"_id": "doc_7737", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find subcmd in self.subcmds\"\"\"\n for arg_2 in list(arg_0.subcmds.keys()):\n if arg_2.startswith(arg_1) \\\n and len(arg_1) >= \\\n arg_0.subcmds[arg_2].__class__.min_abbrev:\n return arg_0.subcmds[arg_2]\n pass\n return None"} +{"_id": "doc_7738", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Show short help for a subcommand.\"\"\"\n arg_4 = arg_0.lookup(arg_2)\n if arg_4:\n if arg_3:\n arg_5 = arg_4.name\n else:\n arg_5 = ''\n pass\n if hasattr(arg_4, 'Func'):\n if arg_5: arg_5 += ' -- '\n arg_0.cmd_obj.msg(arg_5 + arg_4.Func)\n pass\n pass\n else:\n arg_0.undefined_subcmd(\"help\", arg_2)\n pass\n return"} +{"_id": "doc_7739", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Add subcmd to the available subcommands for this object.\n It will have the supplied docstring, and subcmd_cb will be called\n when we want to run the command. min_len is the minimum length\n allowed to abbreviate the command. in_list indicates with the\n show command will be run when giving a list of all sub commands\n of this object. Some commands have long output like \"show commands\"\n so we might not want to show that.\n \"\"\"\n arg_2 = arg_1.name\n arg_0.subcmds[arg_2] = arg_1\n\n # We keep a list of subcommands to assist command completion\n arg_0.cmdlist.append(arg_2)"} +{"_id": "doc_7740", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Run subcmd_name with args using obj for the environent\"\"\"\n arg_3=arg_0.lookup(arg_1)\n if arg_3:\n arg_3['callback'](arg_2)\n else:\n arg_0.cmdproc.undefined_cmd(arg_3.__class__.name, arg_1)\n pass\n return"} +{"_id": "doc_7741", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=True,\n arg_3=1, arg_4=0):\n \"\"\"\nEnter the Funcger.\n\nParameters\n----------\n\nlevel : how many stack frames go back. Usually it will be\nthe default 0. But sometimes though there may be calls in setup to the Funcger\nthat you may want to skip.\n\nstep_ignore : how many line events to ignore after the\nFunc() call. 0 means don't even wait for the Func() call to finish.\n\nparam dbg_opts : is an optional \"options\" dictionary that gets fed\ntrepan.Debugger(); `start_opts' are the optional \"options\"\ndictionary that gets fed to trepan.Debugger.core.start().\n\nUse like this:\n\n.. code-block:: python\n\n ... # Possibly some Python code\n import trepan.api # Needed only once\n ... # Possibly some more Python code\n trepan.api.Func() # You can wrap inside conditional logic too\n pass # Stop will be here.\n # Below is code you want to use the Funcger to do things.\n .... # more Python code\n # If you get to a place in the program where you aren't going\n # want to Func any more, but want to remove Funcger trace overhead:\n trepan.api.stop()\n\nParameter \"level\" specifies how many stack frames go back. Usually it will be\nthe default 0. But sometimes though there may be calls in setup to the Funcger\nthat you may want to skip.\n\nParameter \"step_ignore\" specifies how many line events to ignore after the\nFunc() call. 0 means don't even wait for the Func() call to finish.\n\nIn situations where you want an immediate stop in the \"Func\" call\nrather than the statement following it (\"pass\" above), add parameter\nstep_ignore=0 to Func() like this::\n\n import trepan.api # Needed only once\n # ... as before\n trepan.api.Func(step_ignore=0)\n # ... as before\n\nModule variable _Funcger_obj_ from module trepan.Funcger is used as\nthe Funcger instance variable; it can be subsequently used to change\nsettings or alter behavior. It should be of type Debugger (found in\nmodule trepan). If not, it will get changed to that type::\n\n $ python\n >>> from trepan.Funcger import Funcger_obj\n >>> type(Funcger_obj)\n \n >>> import trepan.api\n >>> trepan.api.Func()\n ...\n (Trepan) c\n >>> from trepan.Funcger import Funcger_obj\n >>> Funcger_obj\n \n >>>\n\nIf however you want your own separate Funcger instance, you can\ncreate it from the Funcger _class Debugger()_ from module\ntrepan.Funcger::\n\n $ python\n >>> from trepan.Funcger import Debugger\n >>> dbgr = Debugger() # Add options as desired\n >>> dbgr\n \n\n`dbg_opts' is an optional \"options\" dictionary that gets fed\ntrepan.Debugger(); `start_opts' are the optional \"options\"\ndictionary that gets fed to trepan.Debugger.core.start().\n\"\"\"\n if not isinstance(arg_5.Funcger_obj, arg_5.Trepan):\n arg_5.Funcger_obj = arg_5.Trepan(arg_0)\n arg_5.Funcger_obj.core.add_ignore(Func, stop)\n pass\n arg_7 = arg_5.Funcger_obj.core\n arg_8 = sys._getframe(0+arg_4)\n arg_7.set_next(arg_8)\n if arg_1 and 'startup-profile' in arg_1 and arg_1['startup-profile']:\n arg_9 = arg_1['startup-profile']\n from trepan import options\n options.add_startup_file(arg_9)\n for arg_10 in arg_9:\n arg_7.processor.queue_startfile(arg_10)\n\n if not arg_7.is_started():\n arg_7.start(arg_1)\n pass\n if arg_2:\n Funcger_on_post_mortem()\n pass\n if 0 == arg_3:\n arg_8 = sys._getframe(1+arg_4)\n arg_7.stop_reason = 'at a Func() call'\n arg_12 = arg_7.trace_hook_suspend\n arg_7.trace_hook_suspend = True\n arg_7.processor.event_processor(arg_8, 'line', None)\n arg_7.trace_hook_suspend = arg_12\n else:\n arg_7.step_ignore = arg_3-1\n pass\n return"} +{"_id": "doc_7742", "title": "", "text": "def Func(arg_0):\n \"\"\"Find the first frame that is a debugged frame. We do this\n Generally we want traceback information without polluting it with\n debugger frames. We can tell these because those are frames on the\n top which don't have f_trace set. So we'll look back from the top\n to find the fist frame where f_trace is set.\n \"\"\"\n arg_1 = arg_2 = arg_0\n while arg_2 is not None and arg_2.f_trace is None:\n arg_1 = arg_2\n arg_2 = arg_2.f_back\n pass\n if arg_1:\n arg_3 = arg_1.f_locals.get('tracer_func_frame')\n if arg_3 == arg_1:\n if arg_1.f_back:\n arg_1 = arg_1.f_back\n pass\n pass\n pass\n else:\n return arg_0\n return arg_1"} +{"_id": "doc_7743", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"If arg is an int, use that otherwise take default.\"\"\"\n if arg_1:\n try:\n # eval() is used so we will allow arithmetic expressions,\n # variables etc.\n arg_2 = int(eval(arg_1))\n except (SyntaxError, NameError, ValueError):\n if arg_3:\n arg_0(\"Command '%s' expects an integer; got: %s.\" %\n (arg_3, str(arg_1)))\n else:\n arg_0('Expecting an integer, got: %s.' % str(arg_1))\n pass\n raise ValueError\n return arg_2"} +{"_id": "doc_7744", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n \"\"\"Return True if arg is 'on' or 1 and False arg is 'off' or 0.\n Any other value is raises ValueError.\"\"\"\n if not arg_1:\n if arg_2 is None:\n if arg_3:\n arg_0(\"Expecting 'on', 1, 'off', or 0. Got nothing.\")\n pass\n raise ValueError\n return arg_2\n if arg_1 == '1' or arg_1 == 'on': return True\n if arg_1 == '0' or arg_1 =='off': return False\n\n if arg_3:\n arg_0(\"Expecting 'on', 1, 'off', or 0. Got: %s.\" % str(arg_1))\n raise ValueError"} +{"_id": "doc_7745", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"set a Boolean-valued debugger setting. 'obj' is a generally a\n subcommand that has 'name' and 'debugger.settings' attributes\"\"\"\n try:\n if 0 == len(arg_1): arg_1 = ['on']\n arg_0.debugger.settings[arg_0.name] = get_onoff(arg_0.errmsg, arg_1[0])\n except ValueError:\n pass\n return"} +{"_id": "doc_7746", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"set an Integer-valued debugger setting. 'obj' is a generally a\n subcommand that has 'name' and 'debugger.settings' attributes\"\"\"\n if '' == arg_1.strip():\n arg_0.errmsg(\"You need to supply a number.\")\n return\n arg_0.debugger.settings[arg_0.name] = \\\n get_an_int(arg_0.errmsg, arg_1, arg_2, arg_3, arg_4)\n return arg_0.debugger.settings[arg_0.name]"} +{"_id": "doc_7747", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Generic subcommand showing a boolean-valued debugger setting.\n 'obj' is generally a subcommand that has 'name' and\n 'debugger.setting' attributes.\"\"\"\n arg_2 = show_onoff(arg_0.debugger.settings[arg_0.name])\n if not arg_1: arg_1 = arg_0.name\n return arg_0.msg(\"%s is %s.\" % (arg_1, arg_2))"} +{"_id": "doc_7748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True if we are looking at a def statement\"\"\"\n # Should really also check that operand of 'LOAD_CONST' is a code object\n return (arg_0 and _re_def.match(arg_0) and op_at_frame(arg_1)=='LOAD_CONST'\n and stmt_contains_opcode(arg_1.f_code, arg_1.f_lineno,\n 'MAKE_FUNCTION'))"} +{"_id": "doc_7749", "title": "", "text": "def Func():\n \"\"\"Get bacground from\n default values based on the TERM environment variable\n \"\"\"\n arg_0 = environ.get('TERM', None)\n if arg_0:\n if (arg_0.startswith('xterm',) or arg_0.startswith('eterm')\n or arg_0 == 'dtterm'):\n return False\n return True"} +{"_id": "doc_7750", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Pass as parameters R G B values in hex\n On return, variable is_dark_bg is set\n \"\"\"\n\n try:\n arg_3 = int(environ.get('TERMINAL_COLOR_MIDPOINT', None))\n except:\n pass\n if not arg_3:\n arg_4 = environ.get('TERM', None)\n # 117963 = (* .6 (+ 65535 65535 65535))\n # 382.5 = (* .6 (+ 65535 65535 65535))\n print(\"midpoint\", arg_3, 'vs', (16*5 + 16*arg_1 + 16*arg_2))\n arg_3 = 383 if arg_4 and arg_4 == 'xterm-256color' else 117963\n\n if ( (16*5 + 16*arg_1 + 16*arg_2) < arg_3 ):\n return True\n else:\n return False"} +{"_id": "doc_7751", "title": "", "text": "def Func(arg_0):\n '''return suitable frame Func to key display expressions off of.'''\n if not arg_0: return None\n arg_1 = arg_0.f_code\n return (arg_1.co_name, arg_1.co_filename, arg_1.co_firstlineno)"} +{"_id": "doc_7752", "title": "", "text": "def Func(arg_0, arg_1):\n '''display any items that are active'''\n if not arg_1: return\n arg_2 = []\n arg_3 = signature(arg_1)\n for Func in arg_0.list:\n if Func.signature == arg_3 and Func.enabled:\n arg_2.append(Func.to_s(arg_1))\n pass\n pass\n return arg_2"} +{"_id": "doc_7753", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Set breakpoint at current location, or a specified frame\"\"\"\n # ???\n if arg_0 is None:\n arg_0 = _frame().f_back\n\n arg_1 = RemoteCeleryTrepan()\n arg_1.say(BANNER.format(self=arg_1))\n # dbg.say(SESSION_STARTED.format(self=dbg))\n trepan.api.Func(dbg_opts=arg_1.dbg_opts)"} +{"_id": "doc_7754", "title": "", "text": "def Func(arg_0):\n \"\"\"Find the corresponding signal name for 'num'. Return None\n if 'num' is invalid.\"\"\"\n arg_1 = signal.__dict__\n arg_0 = abs(arg_0)\n for arg_2 in list(arg_1.keys()):\n if arg_2.startswith('SIG') and arg_1[arg_2] == arg_0:\n return arg_2\n pass\n # Something went wrong. Should have returned above\n return None"} +{"_id": "doc_7755", "title": "", "text": "def Func(arg_0):\n \"\"\"Find the corresponding signal number for 'name'. Return None\n if 'name' is invalid.\"\"\"\n arg_1 = arg_0.upper()\n if (arg_1.startswith('SIG') and hasattr(signal, arg_1)):\n return getattr(signal, arg_1)\n else:\n arg_1 = \"SIG\"+arg_1\n if hasattr(signal, arg_1):\n return getattr(signal, arg_1)\n return None\n return"} +{"_id": "doc_7756", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a signal name for a signal name or signal\n number. Return None is name_num is an int but not a valid signal\n number and False if name_num is a not number. If name_num is a\n signal name or signal number, the canonic if name is returned.\"\"\"\n arg_1 = lookup_signum(arg_0)\n if arg_1 is None:\n # Maybe signame is a number?\n try:\n arg_2 = int(arg_0)\n arg_3 = lookup_signame(arg_2)\n if arg_3 is None:\n return None\n except:\n return False\n return arg_3\n\n arg_3 = arg_0.upper()\n if not arg_3.startswith('SIG'): return 'SIG'+arg_3\n return arg_3"} +{"_id": "doc_7757", "title": "", "text": "def Func(arg_0):\n \"\"\"Check to see if any of the signal handlers we are interested in have\n changed or is not initially set. Change any that are not right. \"\"\"\n for arg_1 in list(arg_0.sigs.keys()):\n if not arg_0.check_and_adjust_sighandler(arg_1, arg_0.sigs):\n break\n pass\n return"} +{"_id": "doc_7758", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print information about a signal\"\"\"\n if len(arg_1) == 0: return None\n arg_2 = arg_1[0]\n if arg_2 in ['handle', 'signal']:\n # This has come from dbgr's info command\n if len(arg_1) == 1:\n # Show all signal handlers\n arg_0.dbgr.core.processor.section(arg_0.header)\n for arg_2 in arg_0.siglist:\n arg_0.print_Func_entry(arg_2)\n return True\n else:\n arg_2 = arg_1[1]\n pass\n pass\n\n arg_2 = arg_0.is_name_or_number(arg_2)\n arg_0.dbgr.core.processor.section(arg_0.header)\n arg_0.print_Func_entry(arg_2)\n return True"} +{"_id": "doc_7759", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delegate the Funcs specified in 'arg' to another\n method.\n \"\"\"\n if not arg_1:\n arg_0.info_signal(['handle'])\n return True\n arg_2 = arg_1.split()\n arg_3 = arg_2[0]\n arg_3 = arg_0.is_name_or_number(arg_2[0])\n if not arg_3: return\n\n if len(arg_2) == 1:\n arg_0.info_signal([arg_3])\n return True\n # We can display information about 'fatal' signals, but not\n # change their Funcs.\n if arg_3 in fatal_signals:\n return None\n\n if arg_3 not in list(arg_0.sigs.keys()):\n if not arg_0.initialize_handler(arg_3): return None\n pass\n\n # multiple commands might be specified, i.e. 'nopass nostop'\n for arg_4 in arg_2[1:]:\n if arg_4.startswith('no'):\n arg_5 = False\n arg_4 = arg_4[2:]\n else:\n arg_5 = True\n if 'stop'.startswith(arg_4):\n arg_0.handle_stop(arg_3, arg_5)\n elif 'print'.startswith(arg_4) and len(arg_4) >= 2:\n arg_0.handle_print(arg_3, arg_5)\n elif 'pass'.startswith(arg_4):\n arg_0.handle_pass(arg_3, arg_5)\n elif 'ignore'.startswith(arg_4):\n arg_0.handle_ignore(arg_3, arg_5)\n elif 'stack'.startswith(arg_4):\n arg_0.handle_print_stack(arg_3, arg_5)\n else:\n arg_0.dbgr.intf[-1].errmsg('Invalid arguments')\n pass\n pass\n return arg_0.check_and_adjust_sighandler(arg_3, arg_0.sigs)"} +{"_id": "doc_7760", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a full pathname for filename if we can find one. path\n is a list of directories to prepend to filename. If no file is\n found we'll return None\"\"\"\n\n for arg_3 in arg_1:\n\n # Handle $cwd and $cdir\n if arg_3 =='$cwd': arg_3='.'\n elif arg_3 == '$cdir': arg_3 = arg_2\n\n arg_4 = osp.realpath(osp.join(arg_3, arg_0))\n if osp.isfile(arg_4):\n return arg_4\n return None"} +{"_id": "doc_7761", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Do a shell-like path lookup for py_script and return the results.\n If we can't find anything return py_script\"\"\"\n if arg_0.find(os.sep) != -1:\n # Don't search since this name has path separator components\n return arg_0\n if arg_1 is None:\n arg_1 = os.environ['PATH'].split(os.pathsep)\n for arg_2 in arg_1:\n arg_3 = osp.join(arg_2, arg_0)\n if osp.exists(arg_3):\n return arg_3\n # Failure\n return arg_0"} +{"_id": "doc_7762", "title": "", "text": "def Func(arg_0, Func):\n \"\"\" used to write to a debugger that is connected to this\n server; `str' written will have a newline added to it\n \"\"\"\n if hasattr(arg_0.output, 'writeline'):\n arg_0.output.writeline(Func)\n elif hasattr(arg_0.output, 'writelines'):\n arg_0.output.writelines(Func + \"\\n\")\n pass\n return"} +{"_id": "doc_7763", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Execution status of the program.\"\"\"\n arg_2 = arg_0.core.filename(None)\n if arg_0.core.is_Funcning():\n if arg_2:\n arg_3 = \"Python program '%s' is stopped\" % arg_2\n else:\n arg_3 = 'Program is stopped'\n pass\n if arg_0.proc.event:\n arg_4 = 'via a %s event.' % arg_0.proc.event\n else:\n arg_4 = '.'\n arg_0.msg(Mmisc.wrapped_lines(arg_3, arg_4,\n arg_0.settings['width']))\n if arg_0.proc.curframe:\n arg_0.msg(\"PC offset is %d.\" % arg_0.proc.curframe.f_lasti)\n\n if arg_0.proc.event == 'return':\n arg_5 = arg_0.proc.event_arg\n arg_3 = 'Return value is'\n arg_0.msg(Mmisc.wrapped_lines(arg_3, arg_0.proc._saferepr(arg_5),\n arg_0.settings['width']))\n pass\n elif arg_0.proc.event == 'exception':\n arg_6, arg_7, arg_8 = arg_0.proc.event_arg\n arg_0.msg('Exception type: %s' %\n arg_0.proc._saferepr(arg_6))\n if arg_7:\n arg_0.msg('Exception value: %s' %\n arg_0.proc._saferepr(arg_7))\n pass\n pass\n arg_0.msg('It stopped %s.' % arg_0.core.stop_reason)\n if arg_0.proc.event in ['signal', 'exception', 'c_exception']:\n arg_0.msg('Note: we are stopped *after* Funcning the '\n 'line shown.')\n pass\n else:\n if arg_2:\n arg_3 = \"Python program '%s'\" % arg_2\n arg_4 = \"is not currently Funcning. \"\n arg_0.msg(Mmisc.wrapped_lines(arg_3, arg_4,\n arg_0.settings['width']))\n else:\n arg_0.msg('No Python program is currently Funcning.')\n pass\n arg_0.msg(arg_0.core.execution_status)\n pass\n return False"} +{"_id": "doc_7764", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"List commands arranged in an aligned columns\"\"\"\n arg_1.sort()\n arg_2 = arg_0.debugger.settings['width']\n return columnize.columnize(arg_1, displaywidth=arg_2,\n lineprefix=' ')"} +{"_id": "doc_7765", "title": "", "text": "def Func(arg_0=None, arg_1=1, arg_2=None):\n \"\"\"Enter debugger read loop after your program has crashed.\n\n exc is a triple like you get back from sys.exc_info. If no exc\n parameter, is supplied, the values from sys.last_type,\n sys.last_value, sys.last_traceback are used. And if these don't\n exist either we'll assume that sys.exc_info() contains what we\n want and frameno is the index location of where we want to start.\n\n 'frameno' specifies how many frames to ignore in the traceback.\n The default is 1, that is, we don't need to show the immediate\n call into Func. If you have wrapper functions that call\n this one, you may want to increase frameno.\n \"\"\"\n\n if arg_2 is None:\n # Check for a global debugger object\n if arg_3.debugger_obj is None:\n arg_3.debugger_obj = arg_3.Trepan()\n pass\n arg_2 = arg_3.debugger_obj\n pass\n arg_5 = re.compile(\"^<.+>$\")\n\n if arg_0[0] is None:\n # frameno+1 because we are about to add one more level of call\n # in get_last_or_frame_exception\n arg_0 = get_last_or_frame_exception()\n if arg_0[0] is None:\n print(\"Can't find traceback for Func \"\n \"in sys.last_traceback or sys.exec_info()\")\n return\n pass\n arg_6, arg_7, arg_8 = arg_0\n arg_2.core.execution_status = ('Terminated with unhandled exception %s'\n % arg_6)\n\n # tb has least-recent traceback entry first. We want the most-recent\n # entry. Also we'll pick out a mainpyfile name if it hasn't previously\n # been set.\n if arg_8 is not None:\n while arg_8.tb_next is not None:\n arg_11 = arg_8.tb_frame.f_code.co_filename\n if (arg_2.mainpyfile and 0 == len(arg_2.mainpyfile)\n and not arg_5.match(arg_11)):\n arg_2.mainpyfile = arg_11\n pass\n arg_8 = arg_8.tb_next\n pass\n arg_2.core.processor.curframe = arg_8.tb_frame\n pass\n\n if 0 == len(arg_2.program_sys_argv):\n # Fake program (run command) args since we weren't called with any\n arg_2.program_sys_argv = list(arg_17.argv[1:])\n arg_2.program_sys_argv[:0] = [arg_2.mainpyfile]\n\n # if 0 == len(dbg._sys_argv):\n # # Fake script invocation (restart) args since we don't have any\n # dbg._sys_argv = list(dbg.program_sys_argv)\n # dbg._sys_argv[:0] = [__title__]\n\n try:\n # # FIXME: This can be called from except hook in which case we\n # # need this. Dunno why though.\n # try:\n # _pydb_trace.set_trace(t.tb_frame)\n # except:\n # pass\n\n # Possibly a bug in Python 2.5. Why f.f_lineno is\n # not always equal to t.tb_lineno, I don't know.\n arg_16 = arg_8.tb_frame\n if arg_16 and arg_16.f_lineno != arg_8.tb_lineno : arg_16 = arg_16.f_back\n arg_2.core.processor.event_processor(arg_16, 'exception', arg_0, 'Trepan3k:pm')\n except DebuggerRestart:\n while True:\n arg_17.argv = list(arg_2._program_sys_argv)\n arg_2.msg(\"Restarting %s with arguments:\\n\\t%s\"\n % (arg_2.filename(arg_2.mainpyfile),\n \" \".join(arg_2._program_sys_argv[1:])))\n try:\n arg_2.run_script(arg_2.mainpyfile)\n except DebuggerRestart:\n pass\n pass\n except DebuggerQuit:\n pass\n return"} +{"_id": "doc_7766", "title": "", "text": "def Func(arg_0):\n \"\"\" Closes both socket and server connection. \"\"\"\n arg_0.state = 'closing'\n if arg_0.inout:\n arg_0.inout.Func()\n pass\n arg_0.state = 'closing connection'\n if arg_0.conn:\n arg_0.conn.Func()\n arg_0.state = 'disconnected'\n return"} +{"_id": "doc_7767", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" This method the debugger uses to Func. In contrast to\n Funcline, no newline is added to the end to `str'. Also\n msg doesn't have to be a string.\n \"\"\"\n if arg_0.state != 'connected':\n arg_0.wait_for_connect()\n pass\n arg_2 = Mtcpfns.pack_msg(arg_1)\n while len(arg_2) > Mtcpfns.TCP_MAX_PACKET:\n arg_0.conn.send(arg_2[:Mtcpfns.TCP_MAX_PACKET])\n arg_2 = arg_2[Mtcpfns.TCP_MAX_PACKET:]\n return arg_0.conn.send(arg_2)"} +{"_id": "doc_7768", "title": "", "text": "def Func(arg_0, arg_1):\n '''Complete an arbitrary expression.'''\n if not arg_0.proc.curframe: return [None]\n # Collect globals and locals. It is usually not really sensible to also\n # complete builtins, and they clutter the namespace quite heavily, so we\n # leave them out.\n arg_2 = arg_0.proc.curframe.f_globals.copy()\n arg_2.update(arg_0.proc.curframe.f_locals)\n if '.' in arg_1:\n # Walk an attribute chain up to the last part, similar to what\n # rlcompleter does. This will bail if any of the parts are not\n # simple attribute access, which is what we want.\n arg_3 = arg_1.split('.')\n try:\n arg_4 = arg_2[arg_3[0]]\n for arg_5 in arg_3[1:-1]:\n arg_4 = getattr(arg_4, arg_5)\n except (KeyError, AttributeError):\n return []\n arg_6 = '.'.join(arg_3[:-1]) + '.'\n return [arg_6 + arg_7 for arg_7 in dir(arg_4) if\n arg_7.startswith(arg_3[-1])]\n else:\n # Complete a simple name.\n return Mcomplete.complete_token(arg_2.keys(), arg_1)"} +{"_id": "doc_7769", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Add `frame_or_fn' to the list of functions that are not to\n be debugged\"\"\"\n for arg_2 in arg_1:\n arg_3 = arg_0.ignore_filter.add_include(arg_2)\n pass\n return arg_3"} +{"_id": "doc_7770", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Turns `filename' into its canonic representation and returns this\n string. This allows a user to refer to a given file in one of several\n equivalent ways.\n\n Relative filenames need to be fully resolved, since the current working\n directory might change over the course of execution.\n\n If filename is enclosed in < ... >, then we assume it is\n one of the bogus internal Python names like which is seen\n for example when executing \"exec cmd\".\n \"\"\"\n\n if arg_1 == \"<\" + arg_1[1:-1] + \">\":\n return arg_1\n Func = arg_0.filename_cache.get(arg_1)\n if not Func:\n arg_3 = arg_1.split(os.sep)[0]\n if arg_3 == os.curdir or arg_3 == os.pardir:\n # We may have invoked the program from a directory\n # other than where the program resides. filename is\n # relative to where the program resides. So make sure\n # to use that.\n Func = os.path.abspath(os.path.join(arg_0.main_dirname,\n arg_1))\n else:\n Func = os.path.abspath(arg_1)\n pass\n if not os.path.isfile(Func):\n Func = Mclifns.search_file(arg_1, arg_0.search_path,\n arg_0.main_dirname)\n # FIXME: is this is right for utter failure?\n if not Func: Func = arg_1\n pass\n Func = os.path.realpath(os.path.normcase(Func))\n arg_0.filename_cache[arg_1] = Func\n return Func"} +{"_id": "doc_7771", "title": "", "text": "def Func(arg_0, Func=None):\n \"\"\"Return filename or the basename of that depending on the\n basename setting\"\"\"\n if Func is None:\n if arg_0.debugger.mainpyfile:\n Func = arg_0.debugger.mainpyfile\n else:\n return None\n if arg_0.debugger.settings['basename']:\n return(os.path.basename(Func))\n return Func"} +{"_id": "doc_7772", "title": "", "text": "def Func(arg_0):\n '''Return True if debugging is in progress.'''\n return (tracer.Func() and\n not arg_0.trace_hook_suspend\n and tracer.find_hook(arg_0.trace_dispatch))"} +{"_id": "doc_7773", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\" Does the magic to determine if we stop here and run a\n command processor or not. If so, return True and set\n self.stop_reason; if not, return False.\n\n Determining factors can be whether a breakpoint was\n encountered, whether we are stepping, next'ing, finish'ing,\n and, if so, whether there is an ignore counter.\n \"\"\"\n\n # Add an generic event filter here?\n # FIXME TODO: Check for\n # - thread switching (under set option)\n\n # Check for \"next\" and \"finish\" stopping via stop_level\n\n # Do we want a different line and if so,\n # do we have one?\n arg_4 = arg_1.f_lineno\n arg_5 = arg_1.f_code.co_filename\n if arg_0.different_line and arg_2 == 'line':\n if arg_0.last_lineno == arg_4 and arg_0.last_filename == arg_5:\n return False\n pass\n arg_0.last_lineno = arg_4\n arg_0.last_filename = arg_5\n\n if arg_0.stop_level is not None:\n if arg_1 != arg_0.last_frame:\n # Recompute stack_depth\n arg_0.last_level = Mstack.count_frames(arg_1)\n arg_0.last_frame = arg_1\n pass\n if arg_0.last_level > arg_0.stop_level:\n return False\n elif arg_0.last_level == arg_0.stop_level and \\\n arg_0.stop_on_finish and arg_2 in ['return', 'c_return']:\n arg_0.stop_level = None\n arg_0.stop_reason = \"in return for 'finish' command\"\n return True\n pass\n\n # Check for stepping\n if arg_0._is_step_next_stop(arg_2):\n arg_0.stop_reason = 'at a stepping statement'\n return True\n\n return False"} +{"_id": "doc_7774", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"Sets to stop on the next event that happens in frame 'frame'.\"\n arg_0.step_events = None # Consider all events\n arg_0.stop_level = Mstack.count_frames(arg_1)\n arg_0.last_level = arg_0.stop_level\n arg_0.last_frame = arg_1\n arg_0.stop_on_finish = False\n arg_0.step_ignore = arg_2\n return"} +{"_id": "doc_7775", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"A mini stack trace routine for threads.\"\"\"\n while arg_1:\n if (not arg_0.core.ignore_filter.is_included(arg_1)\n or arg_0.settings['dbg_trepan']):\n arg_2 = Mstack.format_stack_entry(arg_0, (arg_1, arg_1.f_lineno))\n arg_0.msg(\" \"*4 + arg_2)\n pass\n arg_1 = arg_1.f_back\n pass\n return"} +{"_id": "doc_7776", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get file information\"\"\"\n if len(arg_1) == 0:\n if not arg_0.proc.curframe:\n arg_0.errmsg(\"No frame - no default file.\")\n return False\n arg_2 = arg_0.proc.curframe.f_code.co_filename\n else:\n arg_2 = arg_1[0]\n pass\n\n arg_3 = arg_2 + ' is'\n arg_4 = arg_0.core.filename_cache\n if arg_2 in arg_4:\n arg_3 += \" cached in debugger\"\n if arg_4[arg_2] != arg_2:\n arg_3 += ' as:'\n arg_3 = Mmisc.wrapped_lines(arg_3, arg_4[arg_2] + '.',\n arg_0.settings['width'])\n else:\n arg_3 += '.'\n pass\n arg_0.msg(arg_3)\n else:\n arg_5 = [file for file in file_list() if\n file.endswith(arg_2)]\n if (len(arg_5) > 1):\n arg_0.msg(\"Multiple files found ending filename string:\")\n for arg_6 in arg_5:\n arg_0.msg(\"\\t%s\" % arg_6)\n pass\n elif len(arg_5) == 1:\n arg_7 = pyficache.unmap_file(arg_5[0])\n arg_3 += \" matched debugger cache file:\\n \" + arg_7\n arg_0.msg(arg_3)\n else:\n arg_0.msg(arg_3 + ' not cached in debugger.')\n pass\n arg_7 = arg_0.core.canonic(arg_2)\n arg_0.msg(Mmisc.wrapped_lines('Canonic name:', arg_7,\n arg_0.settings['width']))\n for arg_8 in (arg_7, arg_2):\n if arg_8 in sys.modules:\n for arg_9 in [k for k, v in list(sys.modules.items())\n if arg_8 == v]:\n arg_0.msg(\"module: %s\", arg_9)\n pass\n pass\n pass\n for arg_10 in arg_1[1:]:\n arg_11 = False\n if arg_10 in ['all', 'size']:\n if pyficache.size(arg_7):\n arg_0.msg(\"File has %d lines.\" %\n pyficache.size(arg_7))\n pass\n arg_11 = True\n pass\n if arg_10 in ['all', 'sha1']:\n arg_0.msg(\"SHA1 is %s.\" % pyficache.sha1(arg_7))\n arg_11 = True\n pass\n if arg_10 in ['all', 'brkpts']:\n arg_12 = pyficache.trace_line_numbers(arg_7)\n if arg_12:\n arg_0.section(\"Possible breakpoint line numbers:\")\n arg_13 = columnize.columnize(arg_12, ljust = False,\n arrange_vertical = False,\n lineprefix=' ')\n arg_0.msg(arg_13)\n pass\n arg_11 = True\n pass\n if not arg_11:\n arg_0.errmsg(\"Don't understand sub-option %s.\" % arg_10)\n pass\n pass\n return"} +{"_id": "doc_7777", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check whether we should break here because of `b.funcname`.\"\"\"\n if not arg_0.funcname:\n # Breakpoint was set via line number.\n if arg_0.line != arg_1.f_lineno:\n # Breakpoint was set at a line with a def statement and the function\n # defined is called: don't break.\n return False\n return True\n\n # Breakpoint set via function name.\n\n if arg_1.f_code.co_name != arg_0.funcname:\n # It's not a function call, but rather execution of def statement.\n return False\n\n # We are in the right frame.\n if not arg_0.func_first_executable_line:\n # The function is entered for the 1st time.\n arg_0.func_first_executable_line = arg_1.f_lineno\n\n if arg_0.func_first_executable_line != arg_1.f_lineno:\n # But we are not at the first line number: don't break.\n return False\n return True"} +{"_id": "doc_7778", "title": "", "text": "def Func(arg_0, arg_1):\n \" remove breakpoint `bp'\"\n arg_2 = arg_1.number\n arg_0.bpbynumber[arg_2] = None # No longer in list\n arg_4 = (arg_1.filename, arg_1.line)\n if arg_4 not in arg_0.bplist: return False\n arg_0.bplist[arg_4].remove(arg_1)\n if not arg_0.bplist[arg_4]:\n # No more breakpoints for this file:line combo\n del arg_0.bplist[arg_4]\n return True"} +{"_id": "doc_7779", "title": "", "text": "def Func(arg_0, arg_1):\n \"Remove a breakpoint given its breakpoint number.\"\n arg_2, arg_3, arg_4 = arg_0.get_breakpoint(arg_1)\n if not arg_2:\n return False, arg_3\n arg_0.delete_breakpoint(arg_4)\n return (True, '')"} +{"_id": "doc_7780", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"Enable or disable all breakpoints.\"\n arg_2 = [arg_5 for arg_5 in arg_0.bpbynumber if arg_5]\n arg_3 = []\n if arg_1:\n arg_4 = 'en'\n else:\n arg_4 = 'dis'\n pass\n if not arg_2:\n return \"No breakpoints to %sable\" % arg_4\n for arg_5 in arg_2:\n arg_5.enabled = arg_1\n arg_3.append(str(arg_5.number))\n pass\n return (\"Breakpoints %sabled: %s\" % (arg_4, \", \".join(arg_3)))"} +{"_id": "doc_7781", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"Enable or disable a breakpoint given its breakpoint number.\"\n arg_3, arg_4, arg_5 = arg_0.get_breakpoint(arg_1)\n if not arg_3:\n return arg_3, arg_4\n if arg_2:\n arg_6 = 'en'\n else:\n arg_6 = 'dis'\n pass\n if arg_5.enabled == arg_2:\n return (False, ('Breakpoint (%r) previously %sabled' %\n (str(arg_1), arg_6,)))\n arg_5.enabled = arg_2\n return (True, '')"} +{"_id": "doc_7782", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=None):\n \"\"\"Read a line of input. Prompt and use_raw exist to be\n compatible with other input routines and are ignored.\n EOFError will be raised on EOF.\n \"\"\"\n arg_3 = arg_0.input.Func()\n if not arg_3: raise EOFError\n return arg_3.rstrip(\"\\n\")"} +{"_id": "doc_7783", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Restore an original login session, checking the signed session\n \"\"\"\n arg_1 = arg_0.session.get(la_settings.USER_SESSION_FLAG)\n logout(arg_0)\n\n if not arg_1:\n return\n\n try:\n arg_2 = signer.unsign(\n arg_1, max_age=timedelta(days=la_settings.USER_SESSION_DAYS_TIMESTAMP).total_seconds()\n )\n arg_3 = get_user_model().objects.get(pk=arg_2)\n messages.info(\n arg_0,\n la_settings.MESSAGE_LOGIN_REVERT.format(username=arg_3.__dict__[username_field]),\n extra_tags=la_settings.MESSAGE_EXTRA_TAGS,\n )\n login_as(arg_3, arg_0, store_original_user=False)\n if la_settings.USER_SESSION_FLAG in arg_0.session:\n del arg_0.session[la_settings.USER_SESSION_FLAG]\n except SignatureExpired:\n pass"} +{"_id": "doc_7784", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False):\n \"\"\"\n Yield each document in a Luminoso project in turn. Requires a client whose\n URL points to a project.\n\n If expanded=True, it will include additional fields that Luminoso added in\n its analysis, such as 'terms' and 'vector'.\n\n Otherwise, it will contain only the fields necessary to reconstruct the\n document: 'title', 'text', and 'metadata'.\n\n Shows a progress bar if progress=True.\n \"\"\"\n # Get total number of docs from the project record\n arg_3 = arg_0.get()['document_count']\n arg_4 = None\n try:\n if arg_2:\n arg_4 = tqdm(desc='Downloading documents', total=arg_3)\n\n for arg_5 in range(0, arg_3, DOCS_PER_BATCH):\n arg_6 = arg_0.get('docs', arg_5=arg_5, limit=DOCS_PER_BATCH)\n arg_7 = arg_6['result']\n for arg_8 in arg_7:\n # Get the appropriate set of fields for each document\n if arg_1:\n for arg_9 in UNNECESSARY_FIELDS:\n arg_8.pop(arg_9, None)\n else:\n arg_8 = {arg_9: arg_8[arg_9] for arg_9 in CONCISE_FIELDS}\n\n if arg_2:\n arg_4.update()\n yield arg_8\n\n finally:\n if arg_2:\n arg_4.close()"} +{"_id": "doc_7785", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Handle arguments for the 'lumi-download' command.\n \"\"\"\n arg_1 = argparse.ArgumentParser(\n description=DESCRIPTION,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n arg_1.add_argument(\n '-b',\n '--base-url',\n default=URL_BASE,\n help='API root url, default: %s' % URL_BASE,\n )\n arg_1.add_argument(\n '-e', '--expanded',\n help=\"Include Luminoso's analysis of each document, such as terms and\"\n ' document vectors',\n action='store_true',\n )\n arg_1.add_argument('-t', '--token', help='API authentication token')\n arg_1.add_argument(\n '-s',\n '--save-token',\n action='store_true',\n help='save --token for --base-url to ~/.luminoso/tokens.json',\n )\n arg_1.add_argument(\n 'project_id', help='The ID of the project in the Daylight API'\n )\n arg_1.add_argument(\n 'output_file', nargs='?', default=None,\n help='The JSON lines (.jsons) file to write to'\n )\n arg_2 = arg_1.parse_args(arg_0)\n if arg_2.save_token:\n if not arg_2.token:\n raise ValueError(\"error: no token provided\")\n LuminosoClient.save_token(arg_2.token,\n domain=urlparse(arg_2.base_url).netloc)\n\n arg_3 = LuminosoClient.connect(url=arg_2.base_url, token=arg_2.token)\n arg_4 = arg_3.client_for_path('projects/{}'.format(arg_2.project_id))\n download_docs(arg_4, arg_2.output_file, arg_2.expanded)"} +{"_id": "doc_7786", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Read a JSON or CSV file and convert it into a JSON stream, which will\n be saved in an anonymous temp file.\n \"\"\"\n arg_2 = tempfile.TemporaryFile()\n for arg_3 in open_json_or_csv_somehow(arg_0,\n arg_1=arg_1):\n arg_2.write(json.dumps(arg_3, ensure_ascii=False).encode('utf-8'))\n arg_2.write(b'\\n')\n arg_2.seek(0)\n return arg_2"} +{"_id": "doc_7787", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Deduce the format of a file, within reason.\n\n - If the filename ends with .csv or .txt, it's csv.\n - If the filename ends with .jsons, it's a JSON stream (conveniently the\n format we want to output).\n - If the filename ends with .json, it could be a legitimate JSON file, or\n it could be a JSON stream, following a nonstandard convention that many\n people including us are guilty of. In that case:\n - If the first line is a complete JSON document, and there is more in the\n file besides the first line, then it is a JSON stream.\n - Otherwise, it is probably really JSON.\n - If the filename does not end with .json, .jsons, or .csv, we have to guess\n whether it's still CSV or tab-separated values or something like that.\n If it's JSON, the first character would almost certainly have to be a\n bracket or a brace. If it isn't, assume it's CSV or similar.\n \"\"\"\n arg_2 = None\n if arg_0.endswith('.csv'):\n arg_2 = 'csv'\n elif arg_0.endswith('.jsons'):\n arg_2 = 'jsons'\n else:\n with open(arg_0) as opened:\n arg_3 = opened.readline()\n if arg_3[0] not in '{[' and not arg_0.endswith('.json'):\n arg_2 = 'csv'\n else:\n if (arg_3.count('{') == arg_3.count('}') and\n arg_3.count('[') == arg_3.count(']')):\n # This line contains a complete JSON document. This probably\n # means it's in linewise JSON ('.jsons') format, unless the\n # whole file is on one line.\n arg_4 = ' '\n while arg_4.isspace():\n arg_4 = opened.read()\n if arg_4 == '':\n arg_2 = 'json'\n break\n if arg_2 is None:\n arg_2 = 'jsons'\n else:\n arg_2 = 'json'\n\n if arg_2 == 'json':\n arg_5 = json.load(open(arg_0), encoding='utf-8')\n elif arg_2 == 'csv':\n arg_5 = open_csv_somehow(arg_0)\n else:\n arg_5 = stream_json_lines(arg_0)\n\n return _normalize_data(arg_5, arg_1=arg_1)"} +{"_id": "doc_7788", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n This function is meant to normalize data for upload to the Luminoso\n Analytics system. Currently it only normalizes dates.\n\n If date_format is not specified, or if there's no date in a particular doc,\n the the doc is yielded unchanged.\n \"\"\"\n for arg_2 in arg_0:\n if 'date' in arg_2 and arg_1 is not None:\n try:\n arg_2['date'] = _convert_date(arg_2['date'], arg_1)\n except ValueError:\n # ValueErrors cover the cases when date_format does not match\n # the actual format of the date, both for epoch and non-epoch\n # times.\n logger.exception('%s does not match the date format %s;'\n % (arg_2['date'], arg_1))\n yield arg_2"} +{"_id": "doc_7789", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert a date in a given format to epoch time. Mostly a wrapper for\n datetime's strptime.\n \"\"\"\n if arg_1 != 'epoch':\n return datetime.strptime(arg_0, arg_1).timestamp()\n else:\n return float(arg_0)"} +{"_id": "doc_7790", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Open a CSV file using Python 2's CSV module, working around the deficiency\n where it can't handle the null bytes of UTF-16.\n \"\"\"\n arg_1 = detect_file_encoding(arg_0)\n if arg_1.startswith('UTF-16'):\n arg_2 = transcode_to_utf8(arg_0, arg_1)\n arg_1 = 'UTF-8'\n else:\n arg_2 = open(arg_0, 'rU')\n arg_3 = arg_2.readline()\n arg_2.seek(0)\n\n if '\\t' in arg_3:\n # tab-separated\n arg_4 = csv.reader(arg_2, delimiter='\\t')\n else:\n arg_4 = csv.reader(arg_2, dialect='excel')\n\n arg_5 = arg_4.next()\n arg_5 = [cell.decode(arg_1).lower().strip() for cell in arg_5]\n arg_6 = lambda x: x.decode(arg_1, 'replace')\n return _read_csv(arg_4, arg_5, arg_6)"} +{"_id": "doc_7791", "title": "", "text": "def Func():\n \"\"\"\n Handle command line arguments to convert a file to a JSON stream as a\n script.\n \"\"\"\n logging.basicConfig(level=logging.INFO)\n import argparse\n arg_0 = argparse.ArgumentParser(\n description=\"Translate CSV or JSON input to a JSON stream, or verify \"\n \"something that is already a JSON stream.\"\n )\n arg_0.add_argument('input',\n help='A CSV, JSON, or JSON stream file to read.')\n arg_0.add_argument('output', nargs='?', default=None,\n help=\"The filename to output to. Recommended extension is .jsons. \"\n \"If omitted, use standard output.\")\n arg_1 = arg_0.parse_args()\n transcode(arg_1.input, arg_1.output)"} +{"_id": "doc_7792", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Returns an object that makes requests to the API, authenticated\n with a saved or specified long-lived token, at URLs beginning with\n `url`.\n\n If no URL is specified, or if the specified URL is a path such as\n '/projects' without a scheme and domain, the client will default to\n https://analytics.luminoso.com/api/v5/.\n\n If neither token nor token_file are specified, the client will look\n for a token in $HOME/.luminoso/tokens.json. The file should contain\n a single json dictionary of the format\n `{'root_url': 'token', 'root_url2': 'token2', ...}`.\n \"\"\"\n if arg_1 is None:\n arg_1 = '/'\n\n if arg_1.startswith('http'):\n arg_4 = get_root_url(arg_1)\n else:\n arg_1 = URL_BASE + '/' + arg_1.lstrip('/')\n arg_4 = URL_BASE\n\n if arg_3 is None:\n arg_2 = arg_2 or get_token_filename()\n try:\n with open(arg_2) as tf:\n arg_5 = json.load(tf)\n except FileNotFoundError:\n raise LuminosoAuthError('No token file at %s' % arg_2)\n try:\n arg_3 = arg_5[urlparse(arg_4).netloc]\n except KeyError:\n raise LuminosoAuthError('No token stored for %s' % arg_4)\n\n arg_6 = requests.session()\n arg_6.auth = _TokenAuth(arg_3)\n return arg_0(arg_6, arg_1)"} +{"_id": "doc_7793", "title": "", "text": "def Func(arg_0, arg_1='analytics.luminoso.com', arg_2=None):\n \"\"\"\n Take a long-lived API token and store it to a local file. Long-lived\n tokens can be retrieved through the UI. Optional arguments are the\n domain for which the token is valid and the file in which to store the\n token.\n \"\"\"\n arg_2 = arg_2 or get_token_filename()\n if os.path.exists(arg_2):\n arg_3 = json.load(open(arg_2))\n else:\n arg_3 = {}\n arg_3[arg_1] = arg_0\n arg_4, arg_5 = os.path.split(arg_2)\n if arg_4 and not os.path.exists(arg_4):\n os.makedirs(arg_4)\n with open(arg_2, 'w') as f:\n json.dump(arg_3, f)"} +{"_id": "doc_7794", "title": "", "text": "def Func(arg_0, arg_1='', **arg_2):\n \"\"\"\n Make a DELETE request to the given path, and return the JSON-decoded\n result.\n\n Keyword parameters will be converted to URL parameters.\n\n DELETE requests ask to Func the object represented by this URL.\n \"\"\"\n arg_2 = jsonify_parameters(arg_2)\n arg_3 = ensure_trailing_slash(arg_0.url + arg_1.lstrip('/'))\n return arg_0._json_request('Func', arg_3, arg_2=arg_2)"} +{"_id": "doc_7795", "title": "", "text": "def Func(arg_0, arg_1=5, arg_2=None):\n \"\"\"\n A convenience method designed to inform you when a project build has\n completed. It polls the API every `interval` seconds until there is\n not a build running. At that point, it returns the \"last_build_info\"\n field of the project record if the build succeeded, and raises a\n LuminosoError with the field as its message if the build failed.\n\n If a `path` is not specified, this method will assume that its URL is\n the URL for the project. Otherwise, it will use the specified path\n (which should be \"/projects//\").\n \"\"\"\n arg_2 = arg_2 or ''\n arg_3 = time.time()\n arg_4 = 0\n while True:\n arg_5 = arg_0.get(arg_2)['last_build_info']\n if not arg_5:\n raise ValueError('This project is not building!')\n if arg_5['stop_time']:\n if arg_5['success']:\n return arg_5\n else:\n raise LuminosoError(arg_5)\n arg_6 = time.time() - arg_3\n if arg_6 > arg_4:\n logger.info('Still waiting (%d seconds elapsed).', arg_4)\n arg_4 += 120\n time.sleep(arg_1)"} +{"_id": "doc_7796", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Get the \"root URL\" for a URL, as described in the LuminosoClient\n documentation.\n \"\"\"\n arg_2 = urlparse(arg_0)\n\n # Make sure it's a complete URL, not a relative one\n if not arg_2.scheme:\n raise ValueError('Please supply a full URL, beginning with http:// '\n 'or https:// .')\n\n # Issue a warning if the path didn't already start with /api/v4\n arg_3 = '%s://%s/api/v4' % (arg_2.scheme, arg_2.netloc)\n if arg_1 and not arg_2.path.startswith('/api/v4'):\n logger.warning('Using %s as the root url' % arg_3)\n return arg_3"} +{"_id": "doc_7797", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Obtain the user's long-lived API token and save it in a local file.\n If the user has no long-lived API token, one will be created.\n Returns the token that was saved.\n \"\"\"\n arg_2 = arg_0._json_request('get', arg_0.root_url + '/user/tokens/')\n arg_3 = [arg_5['type'] == 'long_lived' for arg_5 in arg_2]\n if any(arg_3):\n arg_4 = arg_2[arg_3.index(True)]\n else:\n # User doesn't have a long-lived token, so create one\n arg_4 = arg_0._json_request('post', arg_0.root_url + '/user/tokens/')\n arg_5 = arg_4['token']\n arg_1 = arg_1 or get_token_filename()\n if os.path.exists(arg_1):\n arg_6 = json.load(open(arg_1))\n else:\n arg_6 = {}\n arg_6[arg_7(arg_0.root_url).netloc] = arg_5\n arg_10, arg_11 = os.path.split(arg_1)\n if arg_10 and not os.path.exists(arg_10):\n os.makedirs(arg_10)\n with open(arg_1, 'w') as f:\n json.dump(arg_6, f)\n return arg_5"} +{"_id": "doc_7798", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Make a request of the specified type and expect a JSON object in\n response.\n\n If the result has an 'error' value, raise a LuminosoAPIError with\n its contents. Otherwise, return the contents of the 'result' value.\n \"\"\"\n arg_4 = arg_0._request(arg_1, arg_2, **arg_3)\n try:\n arg_5 = arg_4.json()\n except ValueError:\n logger.error(\"Received response with no JSON: %s %s\" %\n (arg_4, arg_4.content))\n raise LuminosoError('Response body contained no JSON. '\n 'Perhaps you meant to use get_raw?')\n if arg_5.get('error'):\n raise LuminosoAPIError(arg_5.get('error'))\n return arg_5['result']"} +{"_id": "doc_7799", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the ID of an account you can use to access projects.\n \"\"\"\n arg_1 = arg_0.__class__(arg_0.session, arg_0.root_url)\n arg_2 = arg_1.get('/accounts/')\n if arg_2['default_account'] is not None:\n return arg_2['default_account']\n arg_3 = [a['account_id'] for a in arg_2['accounts']\n if a['account_id'] != 'public']\n if len(arg_3) == 0:\n raise ValueError(\"Can't determine your default URL. \"\n \"Please request a specific URL or ask \"\n \"Luminoso for support.\")\n return arg_3[0]"} +{"_id": "doc_7800", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the Func that the server sends for the API.\n \"\"\"\n arg_1 = arg_0.__class__(arg_0.session, arg_0.root_url)\n return arg_1.get_raw('/')"} +{"_id": "doc_7801", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=5):\n \"\"\"\n Wait for an asynchronous task to finish.\n\n Unlike the thin methods elsewhere on this object, this one is actually\n specific to how the Luminoso API works. This will poll an API\n endpoint to find out the status of the job numbered `job_id`,\n repeating every 5 seconds (by default) until the job is done. When\n the job is done, it will return an object representing the result of\n that job.\n\n In the Luminoso API, requests that may take a long time return a\n job ID instead of a result, so that your code can continue running\n in the meantime. When it needs the job to be done to proceed, it can\n use this method to wait.\n\n The base URL where it looks for that job is by default `jobs/id/`\n under the current URL, assuming that this LuminosoClient's URL\n represents a project. You can specify a different URL by changing\n `base_path`.\n\n If the job failed, will raise a LuminosoError with the job status\n as its message.\n \"\"\"\n if arg_2 is None:\n arg_2 = 'jobs/id'\n arg_4 = '%s%d' % (ensure_trailing_slash(arg_2), arg_1)\n arg_5 = time.time()\n arg_6 = 0\n while True:\n arg_7 = arg_0.get(arg_4)\n if arg_7['stop_time']:\n if arg_7['success']:\n return arg_7\n else:\n raise LuminosoError(arg_7)\n arg_8 = time.time() - arg_5\n if arg_8 > arg_6:\n logger.info('Still waiting (%d seconds elapsed).', arg_6)\n arg_6 += 120\n time.sleep(arg_3)"} +{"_id": "doc_7802", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Get the raw text of a response.\n\n This is only generally useful for specific URLs, such as documentation.\n \"\"\"\n arg_3 = ensure_trailing_slash(arg_0.url + arg_1.lstrip('/'))\n return arg_0._request('get', arg_3, arg_2=arg_2).text"} +{"_id": "doc_7803", "title": "", "text": "def Func(arg_0):\n \"\"\"Print a JSON list of JSON objects in CSV format.\"\"\"\n if type(arg_0) is not list:\n raise TypeError(\"output not able to be displayed as CSV.\")\n arg_1 = arg_0[0]\n arg_2 = csv.DictWriter(sys.stdout, fieldnames=sorted(arg_1.keys()))\n arg_2.writeheader()\n for arg_3 in arg_0:\n arg_2.writerow(arg_3)"} +{"_id": "doc_7804", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Read parameters from input file, -j, and -p arguments, in that order.\"\"\"\n arg_3 = {}\n try:\n if arg_0:\n arg_3.update(json.load(arg_0))\n if arg_1 is not None:\n arg_3.update(json.loads(arg_1))\n except ValueError as e:\n raise ValueError(\"input is not valid JSON: %s\" % e)\n try:\n arg_3.update({arg_4.split('=', 1)[0]: arg_4.split('=', 1)[1] for arg_4 in arg_2})\n except IndexError:\n raise ValueError(\"--param arguments must have key=value format\")\n return arg_3"} +{"_id": "doc_7805", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Limit a document to just the three fields we should upload.\n \"\"\"\n # Mutate a copy of the document to fill in missing fields\n arg_0 = dict(arg_0)\n if 'text' not in arg_0:\n raise ValueError(\"The document {!r} has no text field\".format(arg_0))\n return {\n 'text': arg_0['text'],\n 'metadata': arg_0.get('metadata', []),\n 'title': arg_0.get('title', '')\n }"} +{"_id": "doc_7806", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=False\n):\n \"\"\"\n Given an iterator of documents, upload them as a Luminoso project.\n \"\"\"\n arg_6 = 'Uploaded using lumi-upload at {}'.format(time.asctime())\n if arg_4 is not None:\n arg_7 = arg_0.post(\n 'projects',\n arg_3=arg_3,\n arg_2=arg_2,\n arg_6=arg_6,\n account_id=arg_4,\n )\n else:\n arg_7 = arg_0.post(\n 'projects', arg_3=arg_3, arg_2=arg_2, arg_6=arg_6\n )\n arg_8 = arg_7['project_id']\n arg_9 = arg_0.client_for_path('projects/' + arg_8)\n try:\n if arg_5:\n arg_10 = tqdm(desc='Uploading documents')\n else:\n arg_10 = None\n\n for arg_11 in _batches(arg_1, BATCH_SIZE):\n arg_12 = [_simplify_doc(doc) for doc in arg_11]\n arg_9.post('upload', arg_1=arg_12)\n if arg_5:\n arg_10.update(BATCH_SIZE)\n\n finally:\n if arg_5:\n arg_10.close()\n\n print('The server is building project {!r}.'.format(arg_8))\n arg_9.post('build')\n\n while True:\n time.sleep(10)\n arg_13 = arg_9.get()\n arg_14 = arg_13['last_build_info']\n if 'success' in arg_14:\n if not arg_14['success']:\n raise LuminosoServerError(arg_14['reason'])\n return arg_13"} +{"_id": "doc_7807", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Handle arguments for the 'lumi-upload' command.\n \"\"\"\n arg_1 = argparse.ArgumentParser(\n description=DESCRIPTION,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n arg_1.add_argument(\n '-b',\n '--base-url',\n default=URL_BASE,\n help='API root url, default: %s' % URL_BASE,\n )\n arg_1.add_argument(\n '-a',\n '--account-id',\n default=None,\n help='Account ID that should own the project, if not the default',\n )\n arg_1.add_argument(\n '-l',\n '--language',\n default='en',\n help='The language code for the language the text is in. Default: en',\n )\n arg_1.add_argument('-t', '--token', help=\"API authentication token\")\n arg_1.add_argument(\n '-s',\n '--save-token',\n action='store_true',\n help='save --token for --base-url to ~/.luminoso/tokens.json',\n )\n arg_1.add_argument(\n 'input_filename',\n help='The JSON-lines (.jsons) file of documents to upload',\n )\n arg_1.add_argument(\n 'project_name',\n nargs='?',\n default=None,\n help='What the project should be called',\n )\n arg_2 = arg_1.parse_args(arg_0)\n if arg_2.save_token:\n if not arg_2.token:\n raise ValueError(\"error: no token provided\")\n LuminosoClient.save_token(arg_2.token,\n domain=urlparse(arg_2.base_url).netloc)\n\n arg_3 = LuminosoClient.connect(url=arg_2.base_url, token=arg_2.token)\n\n arg_4 = arg_2.project_name\n if arg_4 is None:\n arg_4 = input('Enter a name for the project: ')\n if not arg_4:\n print('Aborting because no name was provided.')\n return\n\n arg_5 = upload_docs(\n arg_3,\n arg_2.input_filename,\n arg_2.language,\n arg_4,\n account=arg_2.account_id,\n progress=True,\n )\n print(\n 'Project {!r} created with {} documents'.format(\n arg_5['project_id'], arg_5['document_count']\n )\n )"} +{"_id": "doc_7808", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None,\n arg_5=None, arg_6=None,\n arg_7=False, arg_8=False, arg_9=None):\n \"\"\"\n Upload a file to Luminoso with the given account and project name.\n\n Given a file containing JSON, JSON stream, or CSV data, this verifies\n that we can successfully convert it to a JSON stream, then uploads that\n JSON stream.\n \"\"\"\n arg_10 = transcode_to_stream(arg_0, arg_9)\n upload_stream(stream_json_lines(arg_10),\n arg_1, arg_2, arg_3, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6,\n arg_7=arg_7, arg_8=arg_8)"} +{"_id": "doc_7809", "title": "", "text": "def Func():\n \"\"\"\n Handle command line arguments, to upload a file to a Luminoso project\n as a script.\n \"\"\"\n import argparse\n arg_0 = argparse.ArgumentParser()\n arg_0.add_argument('filename')\n arg_0.add_argument('account')\n arg_0.add_argument('project_name')\n arg_0.add_argument(\n '--append',\n help=(\"If append flag is used, upload documents to existing project, \"\n \"rather than creating a new project.\"),\n action=\"store_true\"\n )\n arg_0.add_argument(\n '-s', '--stage',\n help=\"If stage flag is used, just upload docs, don't recalculate.\",\n action=\"store_true\"\n )\n arg_0.add_argument(\n '-a', '--api-url',\n help=\"Specify an alternate API url\",\n default=URL_BASE\n )\n arg_0.add_argument(\n '-l', '--language',\n help=(\"Two-letter language code to use when recalculating (e.g. 'en' \"\n \"or 'ja')\")\n )\n arg_0.add_argument(\n '-u', '--username', default=None,\n help=\"username (defaults to your username on your computer)\"\n )\n arg_0.add_argument(\n '-p', '--password', default=None,\n help=\"password (you can leave this out and type it in later)\"\n )\n arg_0.add_argument(\n '-d', '--date-format', default='iso',\n help=(\"format string for parsing dates, following \"\n \"http://strftime.org/. Default is 'iso', which is \"\n \"'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' \"\n \"for epoch time or 'us-standard' for '%%m/%%d/%%y'\")\n )\n arg_1 = arg_0.parse_args()\n\n # Implement some human-understandable shortcuts for date_format\n arg_2 = arg_1.date_format.lower()\n if arg_2 == 'iso':\n arg_3 = '%Y-%m-%dT%H:%M:%S+00:00'\n elif arg_2 in ['unix', 'epoch']:\n arg_3 = 'epoch'\n elif arg_2 == 'us-standard':\n arg_3 = '%m/%d/%y'\n else:\n arg_3 = arg_1.date_format\n\n upload_file(arg_1.filename, arg_1.api_url, arg_1.account, arg_1.project_name,\n language=arg_1.language,\n username=arg_1.username, password=arg_1.password,\n append=arg_1.append, stage=arg_1.stage,\n arg_3=arg_3)"} +{"_id": "doc_7810", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=arg_4):\n \"\"\"\n Obtain a short-lived token using a username and password, and use that\n token to create an auth object.\n \"\"\"\n arg_5 = requests.session()\n arg_6 = arg_5.post(arg_3.rstrip('/') + '/user/login/',\n data={'username': arg_1,\n 'password': arg_2})\n if arg_6.status_code != 200:\n arg_7 = arg_6.text\n try:\n arg_7 = json.loads(arg_7)['error']\n except (KeyError, ValueError):\n pass\n raise LuminosoLoginError(arg_7)\n\n return arg_0(arg_6.json()['result']['token'])"} +{"_id": "doc_7811", "title": "", "text": "def Func(arg_0):\n \"\"\"Set http session.\"\"\"\n if arg_0._session is None:\n arg_0._session = requests.session()\n # adding fake user-agent header\n arg_0._session.headers.update({'User-agent': str(UserAgent().random)})\n return arg_0._post_Func_page()"} +{"_id": "doc_7812", "title": "", "text": "def Func(arg_0):\n \"\"\"Login to enedis.\"\"\"\n arg_1 = {\n 'IDToken1': arg_0.username,\n 'IDToken2': arg_0.password,\n 'SunQueryParamsString': base64.b64encode(b'realm=particuliers'),\n 'encoded': 'true',\n 'gx_charset': 'UTF-8'\n }\n\n try:\n arg_0._session.post(LOGIN_URL,\n arg_1=arg_1,\n allow_redirects=False,\n timeout=arg_0._timeout)\n except OSError:\n raise PyLinkyError(\"Can not submit login form\")\n if 'iPlanetDirectoryPro' not in arg_0._session.cookies:\n raise PyLinkyError(\"Login error: Please check your username/password.\")\n return True"} +{"_id": "doc_7813", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Get data.\"\"\"\n\n arg_4 = {\n '_' + REQ_PART + '_dateDebut': arg_2,\n '_' + REQ_PART + '_dateFin': arg_3\n }\n\n arg_5 = {\n 'p_p_id': REQ_PART,\n 'p_p_lifecycle': 2,\n 'p_p_state': 'normal',\n 'p_p_mode': 'view',\n 'p_p_resource_id': arg_1,\n 'p_p_cacheability': 'cacheLevelPage',\n 'p_p_col_id': 'column-1',\n 'p_p_col_pos': 1,\n 'p_p_col_count': 3\n }\n\n try:\n arg_6 = arg_0._session.post(DATA_URL,\n arg_4=arg_4,\n arg_5=arg_5,\n allow_redirects=False,\n timeout=arg_0._timeout)\n\n if 300 <= arg_6.status_code < 400:\n arg_6 = arg_0._session.post(DATA_URL,\n arg_4=arg_4,\n arg_5=arg_5,\n allow_redirects=False,\n timeout=arg_0._timeout)\n except OSError as e:\n raise PyLinkyError(\"Could not access enedis.fr: \" + str(e))\n\n if arg_6.text is \"\":\n raise PyLinkyError(\"No data\")\n\n if 302 == arg_6.status_code and \"/messages/maintenance.html\" in arg_6.text:\n raise PyLinkyError(\"Site in maintenance\")\n\n try:\n arg_7 = arg_6.json()\n except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e:\n raise PyLinkyError(\"Impossible to decode response: \" + str(e) + \"\\nResponse was: \" + str(arg_6.text))\n\n if arg_7.get('etat').get('valeur') == 'erreur':\n raise PyLinkyError(\"Enedis.fr answered with an error: \" + str(arg_7))\n\n return arg_7.get('graphe')"} +{"_id": "doc_7814", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the latest data from Enedis.\"\"\"\n\n for arg_1 in [HOURLY, DAILY, MONTHLY, YEARLY]:\n arg_0._data[arg_1] = arg_0.get_data_per_period(arg_1)"} +{"_id": "doc_7815", "title": "", "text": "def Func(arg_0):\n \"\"\" Load the view on first load \"\"\"\n if arg_0.__class__.view:\n return\n \n #: Load the View class from the dotted view name\n with enaml.imports():\n arg_1 = pydoc.locate(arg_0.page.view)\n assert arg_1, \"Failed to import View: {}\".format(arg_0.page.view)\n \n #: Set initial view properties\n arg_0.__class__.view = arg_1(\n site=arg_0.site,\n page=arg_0.page,\n request=arg_0.request,\n )"} +{"_id": "doc_7816", "title": "", "text": "def Func(arg_0):\n \"\"\" Load the view on first load could also load based on session, group, etc.. \n \"\"\"\n if arg_0.__class__.view:\n arg_0.view.handler = arg_0\n arg_0.view.request = arg_0.request\n return\n \n #: Load the View class from the dotted view name\n with enaml.imports():\n from views.index import View\n \n #: Set initial view properties\n arg_0.__class__.view = View(\n company=current_company,\n arg_3=arg_0.request,\n arg_2=arg_0,\n )"} +{"_id": "doc_7817", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n #: Render view for Func request, view is cached for websocket\n \"\"\" Execute the correct handler depending on what is connecting. \"\"\"\n if arg_0.is_websocket():\n return super(DemoHandler, arg_0).Func(*arg_1, **arg_2)\n else:\n #return tornado.web.RequestHandler.Func(self, *args, **kwargs)\n arg_0.write(arg_0.view.render())"} +{"_id": "doc_7818", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" When enaml.js sends a message \"\"\"\n #: Decode message\n arg_2 = tornado.escape.json_decode(arg_1)\n #print change\n #: Get the owner ID\n arg_3 = arg_2.get('ref')\n if not arg_3:\n return\n \n #: Get the server side representation of the node\n #: If found will return the View declaration node\n arg_4 = arg_0.view.xpath('//*[@ref=\"{}\"]'.format(arg_3), first=True)\n if arg_4 is None:\n return\n \n #: Handle the event\n if arg_2.get('type') and arg_2.get('name'):\n if arg_2['type'] == 'event':\n #: Trigger the event\n arg_5 = getattr(arg_4, arg_2['name'])\n arg_5()\n if arg_2['type'] == 'update':\n #: Trigger the update\n setattr(arg_4, arg_2['name'], arg_2['value'])"} +{"_id": "doc_7819", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\" When pages change, update the menus\"\"\"\n arg_2 = {}\n \n #: Get all links\n arg_3 = [p.link for p in arg_0.pages if p.link] + arg_0.links \n \n #: Put all links in the correct menu\n for arg_4 in arg_3:\n for arg_5 in arg_4.menus:\n if arg_5 not in arg_2:\n arg_2[arg_5] = []\n arg_2[arg_5].append(arg_4)\n \n #: Update the menus\n for arg_6,arg_5 in arg_2.items():\n arg_7 = '{}_menu'.format(arg_6)\n if hasattr(arg_0,arg_7):\n setattr(arg_0,arg_7,arg_5)"} +{"_id": "doc_7820", "title": "", "text": "def Func(arg_0):\n \"\"\" Generate the handlers for this site \"\"\"\n arg_1 = os.path.abspath(os.path.join(os.path.dirname(__file__),\"static\"))\n arg_2 = [\n (r\"/static/(.*)\", cyclone.web.StaticFileHandler, {\"path\": arg_1}),\n ]\n for arg_3 in arg_0.pages:\n arg_4 = arg_3.handler\n arg_4.site = arg_0\n arg_4.page = arg_3\n arg_2.append((arg_3.link.url,arg_4))\n return arg_2"} +{"_id": "doc_7821", "title": "", "text": "def Func(arg_0):\n \"\"\" Create the toolkit widget for the proxy object.\n\n This method is called during the top-down pass, just before the\n 'init_widget()' method is called. This method should create the\n toolkit widget and assign it to the 'widget' attribute.\n\n \"\"\"\n arg_0.widget = SubElement(arg_0.parent_widget(), arg_0.declaration.tag)"} +{"_id": "doc_7822", "title": "", "text": "def Func(arg_0):\n \"\"\" Initialize the state of the toolkit widget.\n\n This method is called during the top-down pass, just after the\n 'create_widget()' method is called. This method should init the\n state of the widget. The child widgets will not yet be created.\n\n \"\"\"\n arg_1 = arg_0.widget\n arg_2 = arg_0.declaration\n\n #: Save ref id\n arg_3 = arg_2.ref\n arg_4[arg_3] = atomref(arg_0)\n arg_1.set('ref', arg_3)\n\n if arg_2.text:\n arg_0.set_text(arg_2.text)\n if arg_2.tail:\n arg_0.set_tail(arg_2.tail)\n if arg_2.style:\n arg_0.set_style(arg_2.style)\n if arg_2.cls:\n arg_0.set_cls(arg_2.cls)\n if arg_2.attrs:\n arg_0.set_attrs(arg_2.attrs)\n if arg_2.id:\n arg_1.set('id', arg_2.id)\n if arg_2.draggable:\n arg_0.set_draggable(arg_2.draggable)\n\n # Set any attributes that may be defined\n for arg_5, arg_6 in arg_2.members().items():\n if not arg_6.metadata:\n continue\n arg_7 = arg_6.metadata\n\n # Exclude any attr tags\n if not (arg_7.get('d_member') and arg_7.get('d_final')):\n continue\n\n # Skip any items with attr=false\n elif not arg_7.get('attr', True):\n continue\n\n elif isinstance(arg_6, Event):\n continue\n arg_8 = getattr(arg_2, arg_5)\n if arg_8:\n arg_0.set_attribute(arg_5, arg_8)"} +{"_id": "doc_7823", "title": "", "text": "def Func(arg_0):\n \"\"\" A reimplemented destructor.\n\n This destructor will clear the reference to the toolkit widget\n and set its parent to None.\n\n \"\"\"\n arg_1 = arg_0.widget\n if arg_1 is not None:\n arg_2 = arg_1.getparent()\n if arg_2 is not None:\n arg_2.remove(arg_1)\n del arg_0.widget\n\n arg_3 = arg_0.declaration\n try:\n del CACHE[arg_3.ref]\n except KeyError:\n pass\n super(WebComponent, arg_0).Func()"} +{"_id": "doc_7824", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Handle the child added event from the declaration.\n\n This handler will insert the child toolkit widget in the correct.\n position. Subclasses which need more control should reimplement this\n method.\n\n \"\"\"\n super(WebComponent, arg_0).Func(arg_1)\n if arg_1.widget is not None:\n # Use insert to put in the correct spot\n for arg_2, arg_3 in enumerate(arg_0.children()):\n if arg_3 == arg_1:\n arg_0.widget.insert(arg_2, arg_1.widget)\n break"} +{"_id": "doc_7825", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Handle the child removed event from the declaration.\n\n This handler will unparent the child toolkit widget. Subclasses\n which need more control should reimplement this method.\n\n \"\"\"\n super(WebComponent, arg_0).Func(arg_1)\n if arg_1.widget is not None:\n for arg_2, arg_3 in enumerate(arg_0.children()):\n if arg_3 == arg_1:\n del arg_0.widget[arg_2]\n break"} +{"_id": "doc_7826", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Default handler for those not explicitly defined \"\"\"\n if arg_2 is True:\n arg_0.widget.set(arg_1, arg_1)\n elif arg_2 is False:\n del arg_0.widget.attrib[arg_1]\n else:\n arg_0.widget.set(arg_1, str(arg_2))"} +{"_id": "doc_7827", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Update the proxy widget when the Widget data\n changes.\n \"\"\"\n #: Try default handler\n if arg_1['type'] == 'update' and arg_0.proxy_is_active:\n arg_2 = getattr(arg_0.proxy, 'set_' + arg_1['name'], None)\n if arg_2 is not None:\n arg_2(arg_1['value'])\n else:\n arg_0.proxy.set_attribute(arg_1['name'], arg_1['value'])\n arg_0._notify_modified(arg_1)"} +{"_id": "doc_7828", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\" Find nodes matching the given Func query \"\"\"\n arg_3 = arg_0.proxy.find(arg_1, **arg_2)\n return [arg_4.declaration for arg_4 in arg_3]"} +{"_id": "doc_7829", "title": "", "text": "def Func(arg_0):\n \"\"\" Initialize the widget with the source. \"\"\"\n arg_1 = arg_0.declaration\n if arg_1.source:\n arg_0.set_source(arg_1.source)\n else:\n super(RawComponent, arg_0).Func()"} +{"_id": "doc_7830", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" A change handler for the 'objects' list of the Include.\n\n If the object is initialized objects which are removed will be\n unparented and objects which are added will be reparented. Old\n objects will be destroyed if the 'destroy_old' flag is True.\n\n \"\"\"\n if arg_0.is_initialized and arg_1['type'] == 'update':\n arg_2 = arg_1['oldvalue']\n for arg_3 in arg_0.children:\n arg_2.children.remove(arg_3)\n arg_3.set_parent(None)\n arg_0.refresh_items()"} +{"_id": "doc_7831", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" When the children of the block change. Update the referenced\n block.\n \n \"\"\"\n if not arg_0.is_initialized or arg_1['type'] != 'update':\n return\n\n arg_2 = arg_0.block\n arg_3 = arg_1['value']\n arg_4 = arg_1['oldvalue']\n for arg_5 in arg_4:\n if arg_5 not in arg_3 and not arg_5.is_destroyed:\n arg_5.destroy()\n else:\n arg_5.set_parent(None)\n \n if arg_2:\n # This block is inserting into another block\n arg_6 = None\n if arg_0.mode == 'replace':\n arg_2.children = []\n if arg_0.mode == 'prepend' and arg_2.children:\n arg_6 = arg_2.children[0]\n arg_2.insert_children(arg_6, arg_3)\n else:\n # This block is a placeholder\n arg_0.parent.insert_children(arg_0, arg_3)"} +{"_id": "doc_7832", "title": "", "text": "def Func(arg_0, arg_1='push'):\n \"\"\"\n Registers a function as a Func. Multiple Funcs can be registered for a given type, but the\n order in which they are invoke is unspecified.\n\n :param event_type: The event type this Func will be invoked for.\n \"\"\"\n\n def decorator(arg_2):\n arg_0._Funcs[arg_1].append(arg_2)\n return arg_2\n\n return decorator"} +{"_id": "doc_7833", "title": "", "text": "def Func(arg_0):\n \"\"\"Callback from Flask\"\"\"\n\n arg_1 = arg_0._get_digest()\n\n if arg_1 is not None:\n arg_2 = _get_header('X-Hub-Signature').split('=', 1)\n if not isinstance(arg_1, six.text_type):\n arg_1 = six.text_type(arg_1)\n\n if (len(arg_2) < 2 or arg_2[0] != 'sha1'\n or not hmac.compare_digest(arg_2[1], arg_1)):\n abort(400, 'Invalid signature')\n\n arg_3 = _get_header('X-Github-Event')\n arg_4 = request.get_json()\n\n if arg_4 is None:\n abort(400, 'Request body must contain json')\n\n arg_0._logger.info(\n '%s (%s)', _format_event(arg_3, arg_4), _get_header('X-Github-Delivery'))\n\n for arg_5 in arg_0._hooks.get(arg_3, []):\n arg_5(arg_4)\n\n return '', 204"} +{"_id": "doc_7834", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove common indentation from string.\n\n Unlike doctrim there is no special treatment of the first line.\n\n \"\"\"\n try:\n # Determine minimum indentation:\n arg_1 = min(len(arg_2) - len(arg_2.lstrip())\n for arg_2 in arg_0 if arg_2)\n except ValueError:\n return arg_0\n else:\n return [arg_2[arg_1:] for arg_2 in arg_0]"} +{"_id": "doc_7835", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Find all section names and return a list with their names.\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0:\n if is_heading(arg_2):\n arg_1.append(get_heading(arg_2))\n return arg_1"} +{"_id": "doc_7836", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"\n Generate table of contents for array of section names.\n \"\"\"\n if not arg_0:\n return []\n arg_2 = min(n for n,t in arg_0)\n arg_3 = []\n for arg_4,arg_5 in arg_0:\n if arg_1 and arg_4-arg_2+1 > arg_1:\n continue\n arg_6 = arg_5.lower()\n arg_6 = arg_6.replace('`', '')\n arg_6 = arg_6.replace(' ', '-')\n arg_6 = arg_6.replace('?', '')\n arg_3.append(\" \"*(arg_4-arg_2) + \"- [%s](#%s)\" % (arg_5, arg_6))\n return arg_3"} +{"_id": "doc_7837", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Print `msg` Func and exit with status `exit_code`\n \"\"\"\n sys.stderr.write(\"%s\\ntry 'mongotail --help' for more information\\n\" % arg_0)\n sys.stderr.flush()\n exit(arg_1)"} +{"_id": "doc_7838", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Gets a Item from the Menu by name. Note that the name is not\n case-sensitive but must be spelt correctly.\n\n :param string name: The name of the item.\n :raises StopIteration: Raises exception if no item is found.\n :return: An item object matching the search.\n :rtype: Item\n '''\n return next(arg_2 for arg_2 in arg_0.items if arg_2.name.lower() == arg_1.lower())"} +{"_id": "doc_7839", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Clear out the current session on the remote and setup a new one.\n\n :return: A response from having expired the current session.\n :rtype: requests.Response\n '''\n arg_2 = arg_0.__get('/Home/SessionExpire')\n arg_0.session = update_session_headers(arg_1)\n\n return arg_2"} +{"_id": "doc_7840", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Search for dominos pizza stores using a search term.\n\n :param string search: Search term.\n :return: A list of nearby stores matching the search term.\n :rtype: list\n '''\n arg_2 = {'SearchText': arg_1}\n arg_3 = arg_0.__get('/storefindermap/storesearch', arg_2=arg_2)\n\n return Stores(arg_3.json())"} +{"_id": "doc_7841", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.MEDIUM, arg_5=1):\n '''\n Add an item to the current basket.\n\n :param Item item: Item from menu.\n :param int variant: Item SKU id. Ignored if the item is a side.\n :param int quantity: The quantity of item to be added.\n :return: A response having added an item to the current basket.\n :rtype: requests.Response\n '''\n arg_6 = arg_1.type\n\n if arg_6 == 'Pizza':\n return arg_0.add_pizza_to_basket(arg_1, arg_2, arg_5)\n elif arg_6 == 'Side':\n return arg_0.add_side_to_basket(arg_1, arg_5)\n return None"} +{"_id": "doc_7842", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.MEDIUM, arg_5=1):\n '''\n Add a pizza to the current basket.\n\n :param Item item: Item from menu.\n :param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.\n :param int quantity: The quantity of pizza to be added.\n :return: A response having added a pizza to the current basket.\n :rtype: requests.Response\n '''\n arg_6 = arg_1[arg_2]\n arg_7 = arg_6['ingredients'].update([36, 42])\n\n arg_8 = {\n 'stepId': 0,\n 'quantity': arg_5,\n 'sizeId': arg_2,\n 'productId': arg_1.item_id,\n 'ingredients': arg_7,\n 'productIdHalfTwo': 0,\n 'ingredientsHalfTwo': [],\n 'recipeReferrer': 0\n }\n\n return arg_0.__post('/Basket/AddPizza', json=arg_8)"} +{"_id": "doc_7843", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n '''\n Add a side to the current basket.\n\n :param Item item: Item from menu.\n :param int quantity: The quantity of side to be added.\n :return: A response having added a side to the current basket.\n :rtype: requests.Response\n '''\n arg_3 = arg_1[VARIANT.PERSONAL]\n\n arg_4 = {\n 'productSkuId': arg_3['productSkuId'],\n 'quantity': arg_2,\n 'ComplimentaryItems': []\n }\n\n return arg_0.__post('/Basket/AddProduct', json=arg_4)"} +{"_id": "doc_7844", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Remove an item from the current basket.\n\n :param int idx: Basket item id.\n :return: A response having removed an item from the current basket.\n :rtype: requests.Response\n '''\n arg_2 = {\n 'basketItemId': arg_1,\n 'wizardItemDelete': False\n }\n\n return arg_0.__post('/Basket/RemoveBasketItem', json=arg_2)"} +{"_id": "doc_7845", "title": "", "text": "def Func(arg_0, arg_1=arg_2.CASH_ON_DELIVERY):\n '''\n Select the payment method going to be used to make a purchase.\n\n :param int method: Payment method id.\n :return: A response having set the payment option.\n :rtype: requests.Response\n '''\n arg_4 = {'paymentMethod': arg_1}\n return arg_0.__post('/PaymentOptions/SetPaymentMethod', json=arg_4)"} +{"_id": "doc_7846", "title": "", "text": "def Func(arg_0):\n '''\n Proceed with payment using the payment method selected earlier.\n\n :return: A response having processes the payment.\n :rtype: requests.Response\n '''\n arg_1 = {\n '__RequestVerificationToken': arg_0.session.cookies,\n 'method': 'submit'\n }\n\n return arg_0.__post('/PaymentOptions/Proceed', json=arg_1)"} +{"_id": "doc_7847", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n '''\n Make a HTTP GET request to the Dominos UK API with the given parameters\n for the current session.\n\n :param string path: The API endpoint path.\n :params list kargs: A list of arguments.\n :return: A response from the Dominos UK API.\n :rtype: response.Response\n '''\n return arg_0.__call_api(arg_0.session.get, arg_1, **arg_2)"} +{"_id": "doc_7848", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n '''\n Make a HTTP POST request to the Dominos UK API with the given\n parameters for the current session.\n\n :param string path: The API endpoint path.\n :params list kargs: A list of arguments.\n :return: A response from the Dominos UK API.\n :rtype: response.Response\n '''\n return arg_0.__call_api(arg_0.session.post, arg_1, **arg_2)"} +{"_id": "doc_7849", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n '''\n Make a HTTP request to the Dominos UK API with the given parameters for\n the current session.\n\n :param verb func: HTTP method on the session.\n :param string path: The API endpoint path.\n :params list kargs: A list of arguments.\n :return: A response from the Dominos UK API.\n :rtype: response.Response\n '''\n arg_4 = arg_1(arg_0.__url(arg_2), **arg_3)\n\n if arg_4.status_code != 200:\n raise ApiError('{}: {}'.format(arg_4.status_code, arg_4))\n\n return arg_4"} +{"_id": "doc_7850", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Add an item to the end of the menu before the exit item\n\n :param MenuItem item: The item to be added\n \"\"\"\n arg_2 = arg_0.remove_exit()\n arg_1.menu = arg_0\n arg_0.items.append(arg_1)\n if arg_2:\n arg_0.add_exit()\n if arg_0.screen:\n arg_4, arg_5 = arg_0.screen.getmaxyx()\n if arg_4 < 6 + len(arg_0.items):\n arg_0.screen.resize(6 + len(arg_0.items), arg_5)\n arg_0.draw()"} +{"_id": "doc_7851", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Add the exit item if necessary. Used to make sure there aren't multiple exit items\n\n :return: True if item needed to be added, False otherwise\n :rtype: bool\n \"\"\"\n if arg_0.items:\n if arg_0.items[-1] is not arg_0.exit_item:\n arg_0.items.append(arg_0.exit_item)\n return True\n return False"} +{"_id": "doc_7852", "title": "", "text": "def Func(arg_0):\n \"\"\"\n ReFuncs the menu and refreshes the screen. Should be called whenever something changes that needs to be reFuncn.\n \"\"\"\n\n arg_0.screen.border(0)\n if arg_0.title is not None:\n arg_0.screen.addstr(2, 2, arg_0.title, curses.A_STANDOUT)\n if arg_0.subtitle is not None:\n arg_0.screen.addstr(4, 2, arg_0.subtitle, curses.A_BOLD)\n\n for arg_1, arg_2 in enumerate(arg_0.items):\n if arg_0.current_option == arg_1:\n arg_3 = arg_0.highlight\n else:\n arg_3 = arg_0.normal\n arg_0.screen.addstr(5 + arg_1, 4, arg_2.show(arg_1), arg_3)\n\n arg_4, arg_5 = CursesMenu.stdscr.getmaxyx()\n arg_6 = 0\n if 6 + len(arg_0.items) > arg_4:\n if arg_4 + arg_0.current_option < 6 + len(arg_0.items):\n arg_6 = arg_0.current_option\n else:\n arg_6 = 6 + len(arg_0.items) - arg_4\n\n arg_0.screen.refresh(arg_6, 0, 0, 0, arg_4 - 1, arg_5 - 1)"} +{"_id": "doc_7853", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Gets the next single character and decides what to do with it\n \"\"\"\n arg_1 = arg_0.get_input()\n\n arg_2 = ord(\"9\") if len(arg_0.items) >= 9 else ord(str(len(arg_0.items)))\n\n if ord('1') <= arg_1 <= arg_2:\n arg_0.go_to(arg_1 - ord('0') - 1)\n elif arg_1 == curses.KEY_DOWN:\n arg_0.go_down()\n elif arg_1 == curses.KEY_UP:\n arg_0.go_up()\n elif arg_1 == ord(\"\\n\"):\n arg_0.select()\n\n return arg_1"} +{"_id": "doc_7854", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Select the current item and run it\n \"\"\"\n arg_0.Funced_option = arg_0.current_option\n arg_0.Funced_item.set_up()\n arg_0.Funced_item.action()\n arg_0.Funced_item.clean_up()\n arg_0.returned_value = arg_0.Funced_item.get_return()\n arg_0.should_exit = arg_0.Funced_item.should_exit\n\n if not arg_0.should_exit:\n arg_0.draw()"} +{"_id": "doc_7855", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Take an old-style menuData dictionary and return a CursesMenu\n\n :param dict menu_data:\n :return: A new CursesMenu\n :rtype: CursesMenu\n \"\"\"\n arg_1 = arg_0['title']\n arg_2 = CursesMenu(arg_1)\n for arg_3 in arg_0[\"options\"]:\n arg_4 = arg_3[\"type\"]\n arg_5 = arg_3[\"title\"]\n if arg_4 == menuItem.COMMAND:\n arg_6 = arg_3[\"command\"]\n arg_2.append_item(CommandItem(arg_5, arg_6, arg_2))\n elif arg_4 == menuItem.FUNCTION:\n arg_7 = arg_3[\"function\"]\n arg_2.append_item(FunctionItem(arg_5, arg_7, arg_2))\n elif arg_4 == menuItem.EXITMENU:\n arg_2.append_item(ExitItem(arg_5, arg_2))\n elif arg_4 == menuItem.NUMBER:\n arg_2.append_item(SelectionItem(arg_5, arg_2))\n elif arg_4 == menuItem.MENU:\n arg_8 = Func(arg_3)\n arg_2.append_item(SubmenuItem(arg_5, arg_2, arg_8))\n\n return arg_2"} +{"_id": "doc_7856", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Compute the maximum temporal distance.\n\n Returns\n -------\n max_temporal_distance : float\n \"\"\"\n arg_1 = [block.distance_start for block in arg_0._profile_blocks if\n block.distance_start < float('inf')]\n arg_2 = [block.distance_end for block in arg_0._profile_blocks if\n block.distance_end < float('inf')]\n arg_3 = arg_1 + arg_2\n if len(arg_3) > 0:\n return max(arg_3)\n else:\n return None"} +{"_id": "doc_7857", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Temporal distance cumulative density function.\n\n Returns\n -------\n x_values: numpy.array\n values for the x-axis\n cdf: numpy.array\n cdf values\n \"\"\"\n arg_1 = set()\n for arg_2 in arg_0._profile_blocks:\n if arg_2.distance_start != float('inf'):\n arg_1.add(arg_2.distance_end)\n arg_1.add(arg_2.distance_start)\n\n arg_3 = numpy.array(sorted(list(arg_1)))\n arg_4 = arg_3[1:] - arg_3[:-1]\n arg_5 = numpy.zeros(len(arg_4))\n arg_6 = defaultdict(lambda: 0)\n\n for arg_2 in arg_0._profile_blocks:\n if arg_2.distance_start == arg_2.distance_end:\n arg_6[arg_2.distance_end] += arg_2.width()\n else:\n arg_7 = numpy.searchsorted(arg_3, arg_2.distance_end)\n arg_8 = numpy.searchsorted(arg_3, arg_2.distance_start)\n arg_5[arg_7:arg_8] += 1\n\n arg_9 = numpy.array([0] + list(numpy.cumsum(arg_4 * arg_5)))\n if not (numpy.isclose(\n [arg_9[-1]],\n [arg_0._end_time - arg_0._start_time - sum(arg_6.values())], atol=1E-4\n ).all()):\n print(arg_9[-1], arg_0._end_time - arg_0._start_time - sum(arg_6.values()))\n raise RuntimeError(\"Something went wrong with cdf computation!\")\n\n if len(arg_6) > 0:\n for arg_10 in arg_6.keys():\n if arg_10 == float('inf'):\n continue\n arg_11 = numpy.nonzero(arg_3 == arg_10)[0][0]\n arg_9 = numpy.insert(arg_9, arg_11, arg_9[arg_11])\n arg_3 = numpy.insert(arg_3, arg_11,\n arg_3[arg_11])\n # walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)\n arg_9[(arg_11 + 1):] = arg_9[(arg_11 + 1):] + arg_6[arg_10]\n\n arg_12 = arg_9 / (arg_9[-1] + arg_6[float('inf')])\n return arg_3, arg_12"} +{"_id": "doc_7858", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove dangling entries from the shapes directory.\n\n Parameters\n ----------\n db_conn: sqlite3.Connection\n connection to the GTFS object\n \"\"\"\n arg_0.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)\n arg_1 = \\\n \"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I\"\n arg_2= pandas.read_sql(arg_1, arg_0)\n\n arg_3 = []\n for arg_4 in arg_2.itertuples():\n arg_5, arg_6, arg_7 = arg_4.shape_id, arg_4.min_shape_break, arg_4.max_shape_break\n if arg_6 is None or arg_7 is None:\n arg_6 = float('-inf')\n arg_7 = float('-inf')\n arg_3.append( (arg_5, arg_6, arg_7) )\n arg_8 = \"DELETE FROM shapes WHERE shape_id=? AND (seq?)\"\n arg_0.executemany(arg_8, arg_3)\n Func_references(arg_0)"} +{"_id": "doc_7859", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3,\n arg_4, arg_5):\n \"\"\"\n Given a set of transit events and the static walk network,\n \"transform\" the static walking network into a set of \"pseudo-connections\".\n\n As a first approximation, we add pseudo-connections to depart after each arrival of a transit connection\n to it's arrival stop.\n\n Parameters\n ----------\n transit_connections: list[Connection]\n start_time_dep : int\n start time in unixtime seconds\n end_time_dep: int\n end time in unixtime seconds (no new connections will be scanned after this time)\n transfer_margin: int\n required extra margin required for transfers in seconds\n walk_speed: float\n walking speed between stops in meters / second\n walk_network: networkx.Graph\n each edge should have the walking distance as a data attribute (\"d_walk\") expressed in meters\n\n Returns\n -------\n pseudo_connections: set[Connection]\n \"\"\"\n # A pseudo-connection should be created after (each) arrival to a transit_connection's arrival stop.\n arg_6 = set() # use a set to ignore possible duplicates\n for arg_7 in arg_0:\n if arg_1 <= arg_7.departure_time <= arg_2:\n arg_8 = arg_7.departure_stop\n arg_9 = arg_7.departure_time - arg_3\n for arg_10, arg_11, arg_12 in arg_4.edges(nbunch=[arg_8], arg_12=True):\n arg_13 = arg_9 - arg_12['d_walk'] / float(arg_5)\n if arg_13 > arg_2 or arg_13 < arg_1:\n continue\n arg_14 = Connection(arg_11,\n arg_8,\n arg_13,\n arg_9,\n Connection.WALK_TRIP_ID,\n Connection.WALK_SEQ,\n is_walk=True)\n arg_6.add(arg_14)\n return arg_6"} +{"_id": "doc_7860", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the earliest visit time of the stop.\n \"\"\"\n if not arg_0.visit_events:\n return float('inf')\n else:\n return min(arg_0.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut"} +{"_id": "doc_7861", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Whether the spreading stop can infect using this event.\n \"\"\"\n if arg_1.from_stop_I != arg_0.stop_I:\n return False\n\n if not arg_0.has_been_visited():\n return False\n else:\n arg_2 = arg_1.dep_time_ut-arg_0.get_min_visit_time()\n # if the gap between the earliest visit_time and current time is\n # smaller than the min. transfer time, the stop can pass the spreading\n # forward\n if (arg_2 >= arg_0.min_transfer_time) or (arg_1.trip_I == -1 and arg_2 >= 0):\n return True\n else:\n for arg_3 in arg_0.visit_events:\n # if no transfer, please hop-on\n if (arg_1.trip_I == arg_3.trip_I) and (arg_2 >= 0):\n return True\n return False"} +{"_id": "doc_7862", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create day_trips and day_stop_times views.\n\n day_trips: day_trips2 x trips = days x trips\n day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times\n \"\"\"\n arg_1.execute('DROP VIEW IF EXISTS main.day_trips')\n arg_1.execute('CREATE VIEW day_trips AS '\n 'SELECT day_trips2.*, trips.* '\n #'days.day_start_ut+trips.start_time_ds AS start_time_ut, '\n #'days.day_start_ut+trips.end_time_ds AS end_time_ut '\n 'FROM day_trips2 JOIN trips USING (trip_I);')\n arg_1.commit()\n\n arg_1.execute('DROP VIEW IF EXISTS main.day_stop_times')\n arg_1.execute('CREATE VIEW day_stop_times AS '\n 'SELECT day_trips2.*, trips.*, stop_times.*, '\n #'days.day_start_ut+trips.start_time_ds AS start_time_ut, '\n #'days.day_start_ut+trips.end_time_ds AS end_time_ut, '\n 'day_trips2.day_start_ut+stop_times.arr_time_ds AS arr_time_ut, '\n 'day_trips2.day_start_ut+stop_times.dep_time_ds AS dep_time_ut '\n 'FROM day_trips2 '\n 'JOIN trips USING (trip_I) '\n 'JOIN stop_times USING (trip_I)')\n arg_1.commit()"} +{"_id": "doc_7863", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Create a colourbar with limits of lwr and upr\"\"\"\n arg_2, arg_3 = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())\n arg_4 = matplotlib.colorbar.ColorbarBase(arg_2, arg_0=arg_0, arg_1=arg_1)\n return arg_4"} +{"_id": "doc_7864", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write temporal networks by route type to disk.\n\n Parameters\n ----------\n gtfs: gtfspy.GTFS\n extract_output_dir: str\n \"\"\"\n util.makedirs(arg_1)\n for arg_2 in route_types.TRANSIT_ROUTE_TYPES:\n arg_3 = temporal_network(arg_0, start_time_ut=None, end_time_ut=None, arg_2=arg_2)\n arg_4 = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[arg_2]\n arg_5 = os.path.join(arg_1, arg_4 + \".tnet\")\n arg_3.to_csv(arg_5, encoding='utf-8', index=False)"} +{"_id": "doc_7865", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Write out the database according to the GTFS format.\n\n Parameters\n ----------\n gtfs: gtfspy.GTFS\n output: str\n Path where to put the GTFS files\n if output ends with \".zip\" a ZIP-file is created instead.\n\n Returns\n -------\n None\n \"\"\"\n arg_1 = os.path.abspath(arg_1)\n arg_2 = \"tmp_\" + str(uuid.uuid1())\n if arg_1[-4:] == '.zip':\n arg_3 = True\n arg_4 = os.path.dirname(os.path.abspath(arg_1))\n if not os.path.exists(arg_4):\n raise IOError(arg_4 + \" does not exist, cannot write gtfs as a zip\")\n arg_5 = os.path.join(arg_4, str(arg_2))\n # zip_file_na,e = ../out_basedir + \".zip\n else:\n arg_3 = False\n arg_4 = arg_1\n arg_5 = os.path.join(arg_4 + \"_\" + str(arg_2))\n\n os.makedirs(arg_5, exist_ok=True)\n\n arg_6 = {\n \"agency\": _Func_agencies,\n \"calendar\": _Func_calendar,\n \"calendar_dates\": _Func_calendar_dates,\n # fare attributes and fare_rules omitted (seldomly used)\n \"feed_info\": _Func_feed_info,\n # \"frequencies\": not written, as they are incorporated into trips and routes,\n # Frequencies table is expanded into other tables on initial import. -> Thus frequencies.txt is not created\n \"routes\": _Func_routes,\n \"shapes\": _Func_shapes,\n \"stops\": _Func_stops,\n \"stop_times\": _Func_stop_times,\n \"transfers\": _Func_transfers,\n \"trips\": _Func_trips,\n }\n\n for arg_7, arg_8 in arg_6.items():\n arg_9 = os.path.join(arg_5, arg_7 + '.txt')\n print(arg_9)\n arg_8(arg_0, open(os.path.join(arg_5, arg_7 + '.txt'), 'w'))\n\n if arg_3:\n shutil.make_archive(arg_1[:-4], 'zip', arg_5)\n shutil.rmtree(arg_5)\n else:\n print(\"moving \" + str(arg_5) + \" to \" + arg_4)\n os.rename(arg_5, arg_4)"} +{"_id": "doc_7866", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove columns ending with I from a pandas.DataFrame\n\n Parameters\n ----------\n df: dataFrame\n\n Returns\n -------\n None\n \"\"\"\n arg_1 = list(filter(lambda el: el[-2:] == \"_I\", arg_0.columns))\n for arg_2 in arg_1:\n del arg_0[arg_2]"} +{"_id": "doc_7867", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None,\n arg_3=False, arg_4=False):\n \"\"\"Context manager for making files with possibility of failure.\n\n If you are creating a file, it is possible that the code will fail\n and leave a corrupt intermediate file. This is especially damaging\n if this is used as automatic input to another process. This context\n manager helps by creating a temporary filename, your code runs and\n creates that temporary file, and then if no exceptions are raised,\n the context manager will move the temporary file to the original\n filename you intended to open.\n\n Parameters\n ----------\n fname : str\n Target filename, this file will be created if all goes well\n fname_tmp : str\n If given, this is used as the temporary filename.\n tmpdir : str or bool\n If given, put temporary files in this directory. If `True`,\n then find a good tmpdir that is not on local filesystem.\n save_tmpfile : bool\n If true, the temporary file is not deleteted if an exception\n is raised.\n keepext : bool, default False\n If true, have tmpfile have same extension as final file.\n\n Returns (as context manager value)\n ----------------------------------\n fname_tmp: str\n Temporary filename to be used. Same as `fname_tmp`\n if given as an argument.\n\n Raises\n ------\n Re-raises any except occuring during the context block.\n \"\"\"\n # Do nothing if requesting sqlite memory DB.\n if arg_0 == ':memory:':\n yield arg_0\n return\n if arg_1 is None:\n # no tmpfile name given - compute some basic info\n arg_5 = os.path.basename(arg_0)\n arg_6, arg_7 = os.path.splitext(arg_5)\n arg_8 = this_dir = os.path.dirname(arg_0)\n # Remove filename extension, in case this matters for\n # automatic things itself.\n if not arg_4:\n arg_6 = arg_6 + arg_7\n arg_7 = ''\n if arg_2:\n # we should use a different temporary directory\n if arg_2 is True:\n # Find a directory ourself, searching some common\n # places.\n for arg_9 in possible_tmpdirs:\n if os.access(arg_9, os.F_OK):\n arg_8 = arg_9\n break\n # Make the actual tmpfile, with our chosen tmpdir, directory,\n # extension. Set it to not delete automatically, since on\n # success we will move it to elsewhere.\n arg_10 = tempfile.NamedTemporaryFile(\n prefix='tmp-' + arg_6 + '-', suffix=arg_7, dir=arg_8, delete=False)\n arg_1 = arg_10.name\n try:\n yield arg_1\n except Exception as e:\n if arg_3:\n print(\"Temporary file is '%s'\" % arg_1)\n else:\n os.unlink(arg_1)\n raise\n # Move the file back to the original location.\n try:\n os.rename(arg_1, arg_0)\n # We have to manually set permissions. tempfile does not use\n # umask, for obvious reasons.\n os.chmod(arg_0, 0o777 & ~current_umask)\n # 'Invalid cross-device link' - you can't rename files across\n # filesystems. So, we have to fallback to moving it. But, we\n # want to move it using tmpfiles also, so that the final file\n # appearing is atomic. We use... tmpfiles.\n except OSError as e:\n # New temporary file in same directory\n arg_11 = tempfile.NamedTemporaryFile(\n prefix='tmp-' + arg_6 + '-', suffix=arg_7, dir=this_dir, delete=False)\n # Copy contents over\n shutil.copy(arg_1, arg_11.name)\n # Rename new tmpfile, unlink old one on other filesystem.\n os.rename(arg_11.name, arg_0)\n os.chmod(arg_0, 0o666 & ~current_umask)\n os.unlink(arg_1)"} +{"_id": "doc_7868", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Utility function to print sqlite queries before executing.\n\n Use instead of cur.Func(). First argument is cursor.\n\n cur.Func(stmt)\n becomes\n util.Func(cur, stmt)\n \"\"\"\n arg_2 = arg_1[0]\n if len(arg_1) > 1:\n arg_2 = arg_2.replace('%', '%%').replace('?', '%r')\n print(arg_2 % (arg_1[1]))\n return arg_0.Func(*arg_1)"} +{"_id": "doc_7869", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create directories if they do not exist, otherwise do nothing.\n\n Return path for convenience\n \"\"\"\n if not os.path.isdir(arg_0):\n os.Func(arg_0)\n return arg_0"} +{"_id": "doc_7870", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Checks for rows that are not referenced in the the tables that should be linked\n\n stops <> stop_times using stop_I\n stop_times <> trips <> days, using trip_I\n trips <> routes, using route_I\n :return:\n \"\"\"\n for arg_1, arg_2 in zip(DANGLER_QUERIES, DANGLER_WARNINGS):\n arg_3 = arg_0.gtfs.execute_custom_query(arg_1).fetchone()[0]\n if arg_3 > 0:\n if arg_0.verbose:\n print(str(arg_3) + \" \" + arg_2)\n arg_0.warnings_container.add_warning(arg_2, arg_0.location, count=arg_3)"} +{"_id": "doc_7871", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"Print coordinates within a sequence.\n\n This is only used for debugging. Printed in a form that can be\n pasted into Python for visualization.\"\"\"\n arg_2 = [row['lat'] for row in arg_0]\n arg_3 = [row['lon'] for row in arg_0]\n print('COORDS'+'-' * 5)\n print(\"%slat, %slon = %r, %r\" % (arg_1, arg_1, arg_2, arg_3))\n print('-'*5)"} +{"_id": "doc_7872", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find corresponding shape points for a list of stops and create shape break points.\n\n Parameters\n ----------\n stops: stop-sequence (list)\n List of stop points\n shape: list of shape points\n shape-sequence of shape points\n\n Returns\n -------\n break_points: list[int]\n stops[i] corresponds to shape[break_points[i]]. This list can\n be used to partition the shape points into segments between\n one stop and the next.\n badness: float\n Lower indicates better fit to the shape. This is the sum of\n distances (in meters) between every each stop and its closest\n shape point. This is not needed in normal use, but in the\n cases where you must determine the best-fitting shape for a\n stop-sequence, use this.\n \"\"\"\n if not arg_1:\n return [], 0\n arg_2 = []\n arg_3 = 0\n arg_4 = 0\n arg_5 = 0\n arg_6 = float('inf')\n arg_7, arg_8 = None, None\n arg_9 = []\n for arg_10 in arg_0:\n arg_11, arg_12 = arg_10['lat'], arg_10['lon']\n arg_13 = float('inf')\n # print stop\n if arg_5 > 500 and arg_5 > 30 * len(arg_2):\n return [], arg_5\n for arg_14 in range(arg_3, len(arg_1)):\n d = wgs84_distance(arg_11, arg_12, arg_1[arg_14]['lat'], arg_1[arg_14]['lon'])\n if arg_7:\n arg_6 = wgs84_distance(arg_7, arg_8, arg_1[arg_14]['lat'], arg_1[arg_14]['lon'])\n # If we are getting closer to next stop, record this as\n # the best stop so far.continue\n if d < arg_13:\n arg_13 = d\n best_i = arg_14\n # print best_d, i, last_i, len(shape)\n arg_4 += d\n # We have to be very careful about our stop condition.\n # This is trial and error, basically.\n if (arg_6 < d) or (d > 500) or (arg_14 < best_i + 100):\n continue\n # We have decided our best stop, stop looking and continue\n # the outer loop.\n else:\n arg_5 += arg_13\n arg_2.append(best_i)\n arg_3 = best_i\n arg_7, arg_8 = arg_11, arg_12\n arg_9.append(arg_1[best_i])\n break\n else:\n # Executed if we did *not* break the inner loop\n arg_5 += arg_13\n arg_2.append(best_i)\n arg_3 = best_i\n arg_7, arg_8 = arg_11, arg_12\n arg_9.append(arg_1[best_i])\n pass\n # print \"Badness:\", badness\n # print_coords(stops, 'stop')\n # print_coords(shape, 'shape')\n # print_coords(break_shape_points, 'break')\n return arg_2, arg_5"} +{"_id": "doc_7873", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=''):\n \"\"\"Get all scheduled stops on a particular route_id.\n\n Given a route_id, return the trip-stop-list with\n latitude/longitudes. This is a bit more tricky than it seems,\n because we have to go from table route->trips->stop_times. This\n functions finds an arbitrary trip (in trip table) with this route ID\n and, and then returns all stop points for that trip.\n\n Parameters\n ----------\n cur : sqlite3.Cursor\n cursor to sqlite3 DB containing GTFS\n route_id : string or any\n route_id to get stop points of\n offset : int\n LIMIT offset if you don't want the first trip returned.\n tripid_glob : string\n If given, allows you to limit tripids which can be selected.\n Mainly useful in debugging.\n\n Returns\n -------\n stop-list\n List of stops in stop-seq format.\n \"\"\"\n arg_4 = ''\n if arg_3:\n arg_4 = \"AND trip_id GLOB '%s'\" % arg_3\n arg_0.execute('SELECT seq, lat, lon '\n 'FROM (select trip_I from route '\n ' LEFT JOIN trips USING (route_I) '\n ' WHERE route_id=? %s limit 1 offset ? ) '\n 'JOIN stop_times USING (trip_I) '\n 'LEFT JOIN stop USING (stop_id) '\n 'ORDER BY seq' % arg_4, (arg_1, arg_2))\n arg_5 = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in arg_0]\n return arg_5"} +{"_id": "doc_7874", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Interpolate passage times for shape points.\n\n Parameters\n ----------\n shape_distances: list\n list of cumulative distances along the shape\n shape_breaks: list\n list of shape_breaks\n stop_times: list\n list of stop_times\n\n Returns\n -------\n shape_times: list of ints (seconds) / numpy array\n interpolated shape passage times\n\n The values of stop times before the first shape-break are given the first\n stopping time, and the any shape points after the last break point are\n given the value of the last shape point.\n \"\"\"\n arg_3 = np.zeros(len(arg_0))\n arg_3[:arg_1[0]] = arg_2[0]\n for arg_4 in range(len(arg_1)-1):\n arg_5 = arg_1[arg_4]\n arg_6 = arg_2[arg_4]\n arg_7 = arg_1[arg_4+1]\n arg_8 = arg_2[arg_4+1]\n if arg_5 == arg_7:\n arg_3[arg_5] = arg_2[arg_4]\n else:\n arg_9 = arg_0[arg_5:arg_7+1]\n arg_10 = ((np.array(arg_9)-float(arg_9[0])) /\n float(arg_9[-1] - arg_9[0]))\n arg_11 = (1.-arg_10)*arg_6+arg_10*arg_8\n arg_3[arg_5:arg_7] = arg_11[:-1]\n # deal final ones separately:\n arg_3[arg_1[-1]:] = arg_2[-1]\n return list(arg_3)"} +{"_id": "doc_7875", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the earliest arrival time at the target, given a departure time.\n\n Parameters\n ----------\n dep_time : float, int\n time in unix seconds\n transfer_margin: float, int\n transfer margin in seconds\n\n Returns\n -------\n arrival_time : float\n Arrival time in the given time unit (seconds after unix epoch).\n \"\"\"\n arg_3 = arg_1 + arg_0._walk_to_target_duration\n arg_4 = arg_1 + arg_2\n for arg_5 in arg_0._labels:\n if arg_5.departure_time >= arg_4 and arg_5.arrival_time_target < arg_3:\n arg_3 = arg_5.arrival_time_target\n return float(arg_3)"} +{"_id": "doc_7876", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"\n Get a stop-to-stop network describing a single mode of travel.\n\n Parameters\n ----------\n gtfs : gtfspy.GTFS\n route_type : int\n See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types.\n link_attributes: list[str], optional\n defaulting to use the following link attributes:\n \"n_vehicles\" : Number of vehicles passed\n \"duration_min\" : minimum travel time between stops\n \"duration_max\" : maximum travel time between stops\n \"duration_median\" : median travel time between stops\n \"duration_avg\" : average travel time between stops\n \"d\" : distance along straight line (wgs84_distance)\n \"distance_shape\" : minimum distance along shape\n \"capacity_estimate\" : approximate capacity passed through the stop\n \"route_I_counts\" : dict from route_I to counts\n start_time_ut: int\n start time of the time span (in unix time)\n end_time_ut: int\n end time of the time span (in unix time)\n\n Returns\n -------\n net: networkx.DiGraph\n A directed graph Directed graph\n \"\"\"\n if arg_2 is None:\n arg_2 = DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES\n assert(arg_1 in route_types.TRANSIT_ROUTE_TYPES)\n\n arg_5 = arg_0.get_stops_for_route_type(arg_1)\n arg_6 = networkx.DiGraph()\n _add_stops_to_net(arg_6, arg_5)\n\n arg_7 = arg_0.get_transit_events(arg_3=arg_3,\n arg_4=arg_4,\n arg_1=arg_1)\n if len(arg_6.nodes()) < 2:\n assert arg_7.shape[0] == 0\n\n # group events by links, and loop over them (i.e. each link):\n arg_8 = arg_7.groupby(['from_stop_I', 'to_stop_I'], sort=False)\n for arg_9, arg_10 in arg_8:\n arg_11, arg_12 = arg_9\n assert isinstance(arg_10, pd.DataFrame)\n # 'dep_time_ut' 'arr_time_ut' 'shape_id' 'route_type' 'trip_I' 'duration' 'from_seq' 'to_seq'\n if arg_2 is None:\n arg_6.add_edge(arg_11, arg_12)\n else:\n arg_13 = {}\n if \"duration_min\" in arg_2:\n arg_13['duration_min'] = float(arg_10['duration'].min())\n if \"duration_max\" in arg_2:\n arg_13['duration_max'] = float(arg_10['duration'].max())\n if \"duration_median\" in arg_2:\n arg_13['duration_median'] = float(arg_10['duration'].median())\n if \"duration_avg\" in arg_2:\n arg_13['duration_avg'] = float(arg_10['duration'].mean())\n # statistics on numbers of vehicles:\n if \"n_vehicles\" in arg_2:\n arg_13['n_vehicles'] = int(arg_10.shape[0])\n if \"capacity_estimate\" in arg_2:\n arg_13['capacity_estimate'] = route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[arg_1] \\\n * int(arg_10.shape[0])\n if \"d\" in arg_2:\n arg_14 = arg_6.node[arg_11]['lat']\n arg_15 = arg_6.node[arg_11]['lon']\n arg_16 = arg_6.node[arg_12]['lat']\n arg_17 = arg_6.node[arg_12]['lon']\n arg_18 = wgs84_distance(arg_14, arg_15, arg_16, arg_17)\n arg_13['d'] = int(arg_18)\n if \"distance_shape\" in arg_2:\n assert \"shape_id\" in arg_10.columns.values\n arg_19 = None\n for arg_20, arg_21 in enumerate(arg_10[\"shape_id\"].values):\n if arg_21 is not None:\n arg_19 = arg_20\n break\n if arg_19 is None:\n arg_13[\"distance_shape\"] = None\n else:\n arg_22 = arg_10.iloc[arg_19]\n arg_18 = arg_0.get_shape_distance_between_stops(\n arg_22[\"trip_I\"],\n int(arg_22[\"from_seq\"]),\n int(arg_22[\"to_seq\"])\n )\n arg_13['distance_shape'] = arg_18\n if \"route_I_counts\" in arg_2:\n arg_13[\"route_I_counts\"] = arg_10.groupby(\"route_I\").size().to_dict()\n arg_6.add_edge(arg_11, arg_12, attr_dict=arg_13)\n return arg_6"} +{"_id": "doc_7877", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Compute stop-to-stop networks for all travel modes and combine them into a single network.\n The modes of transport are encoded to a single network.\n The network consists of multiple links corresponding to each travel mode.\n Walk mode is not included.\n\n Parameters\n ----------\n gtfs: gtfspy.GTFS\n\n Returns\n -------\n net: networkx.MultiDiGraph\n keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types)\n \"\"\"\n arg_3 = networkx.MultiDiGraph()\n for arg_4 in route_types.TRANSIT_ROUTE_TYPES:\n arg_5 = stop_to_stop_network_for_route_type(arg_0, arg_4,\n arg_1=arg_1, arg_2=arg_2)\n for arg_6, arg_7, arg_8 in arg_5.edges(arg_8=True):\n arg_8['route_type'] = arg_4\n arg_3.add_edges_from(arg_5.edges(arg_8=True))\n arg_3.add_nodes_from(arg_5.nodes(arg_8=True))\n return arg_3"} +{"_id": "doc_7878", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None):\n \"\"\"\n Compute the temporal network of the data, and return it as a pandas.DataFrame\n\n Parameters\n ----------\n gtfs : gtfspy.GTFS\n start_time_ut: int | None\n start time of the time span (in unix time)\n end_time_ut: int | None\n end time of the time span (in unix time)\n route_type: int | None\n Specifies which mode of public transport are included, or whether all modes should be included.\n The int should be one of the standard GTFS route_types:\n (see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )\n If route_type is not specified, all modes are included.\n\n Returns\n -------\n events_df: pandas.DataFrame\n Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I\n \"\"\"\n arg_4 = arg_0.get_transit_events(arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3)\n arg_4.drop('to_seq', 1, inplace=True)\n arg_4.drop('shape_id', 1, inplace=True)\n arg_4.drop('duration', 1, inplace=True)\n arg_4.drop('route_id', 1, inplace=True)\n arg_4.rename(\n columns={\n 'from_seq': \"seq\"\n },\n inplace=True\n )\n return arg_4"} +{"_id": "doc_7879", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get stop pairs through which transfers take place\n\n Returns\n -------\n transfer_stop_pairs: list\n \"\"\"\n arg_1 = []\n arg_2 = None\n arg_3 = None\n for arg_4 in arg_0.legs:\n if arg_4.trip_id is not None and arg_4.trip_id != arg_3 and arg_2 is not None:\n arg_5 = (arg_2, arg_4.departure_stop)\n arg_1.append(arg_5)\n arg_2 = arg_4.arrival_stop\n arg_3 = arg_4.trip_id\n return arg_1"} +{"_id": "doc_7880", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get name of the GTFS timezone\n\n Returns\n -------\n timezone_name : str\n name of the time zone, e.g. \"Europe/Helsinki\"\n \"\"\"\n arg_1 = arg_0.conn.execute('SELECT timezone FROM agencies LIMIT 1').fetchone()\n if arg_1 is None:\n raise ValueError(\"This database does not have a timezone defined.\")\n return arg_1[0]"} +{"_id": "doc_7881", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Get the shapes of all routes.\n\n Parameters\n ----------\n use_shapes : bool, optional\n by default True (i.e. use shapes as the name of the function indicates)\n if False (fall back to lats and longitudes)\n\n Returns\n -------\n routeShapes: list of dicts that should have the following keys\n name, type, agency, lats, lons\n with types\n list, list, str, list, list\n \"\"\"\n arg_2 = arg_0.conn.cursor()\n\n # all shape_id:s corresponding to a route_I:\n # query = \"SELECT DISTINCT name, shape_id, trips.route_I, route_type\n # FROM trips LEFT JOIN routes USING(route_I)\"\n # data1 = pd.read_sql_query(query, self.conn)\n # one (arbitrary) shape_id per route_I (\"one direction\") -> less than half of the routes\n arg_3 = \"SELECT routes.name as name, shape_id, route_I, trip_I, routes.type, \" \\\n \" agency_id, agencies.name as agency_name, max(end_time_ds-start_time_ds) as trip_duration \" \\\n \"FROM trips \" \\\n \"LEFT JOIN routes \" \\\n \"USING(route_I) \" \\\n \"LEFT JOIN agencies \" \\\n \"USING(agency_I) \" \\\n \"GROUP BY routes.route_I\"\n arg_4 = pd.read_sql_query(arg_3, arg_0.conn)\n\n arg_5 = []\n for arg_6, arg_7 in enumerate(arg_4.itertuples()):\n arg_8 = {\"name\": str(arg_7.name), \"type\": int(arg_7.type), \"route_I\": arg_7.route_I, \"agency\": str(arg_7.agency_id),\n \"agency_name\": str(arg_7.agency_name)}\n # this function should be made also non-shape friendly (at this point)\n if arg_1 and arg_7.shape_id:\n arg_9 = shapes.get_shape_points2(arg_2, arg_7.shape_id)\n arg_10 = arg_9['lats']\n arg_11 = arg_9['lons']\n else:\n arg_12 = arg_0.get_trip_stop_coordinates(arg_7.trip_I)\n arg_10 = list(arg_12['lat'])\n arg_11 = list(arg_12['lon'])\n arg_8['lats'] = [float(lat) for lat in arg_10]\n arg_8['lons'] = [float(lon) for lon in arg_11]\n arg_5.append(arg_8)\n return arg_5"} +{"_id": "doc_7882", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get closest stop to a given location.\n\n Parameters\n ----------\n lat: float\n latitude coordinate of the location\n lon: float\n longitude coordinate of the location\n\n Returns\n -------\n stop_I: int\n the index of the stop in the database\n \"\"\"\n arg_3 = arg_0.conn.cursor()\n arg_4 = float(\"inf\")\n arg_5 = None\n arg_6 = arg_3.execute(\"SELECT stop_I, lat, lon FROM stops\")\n for arg_7, arg_8, arg_9 in arg_6:\n arg_10 = wgs84_distance(arg_1, arg_2, arg_8, arg_9)\n if arg_10 < arg_4:\n arg_4 = arg_10\n arg_5 = arg_7\n return arg_5"} +{"_id": "doc_7883", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check that a trip takes place during a day\n\n Parameters\n ----------\n trip_I : int\n index of the trip in the gtfs data base\n day_start_ut : int\n the starting time of the day in unix time (seconds)\n\n Returns\n -------\n takes_place: bool\n boolean value describing whether the trip takes place during\n the given day or not\n \"\"\"\n arg_3 = \"SELECT * FROM days WHERE trip_I=? AND day_start_ut=?\"\n arg_4 = (arg_1, arg_2)\n arg_5 = arg_0.conn.cursor()\n arg_6 = list(arg_5.execute(arg_3, arg_4))\n if len(arg_6) == 0:\n return False\n else:\n assert len(arg_6) == 1, 'On a day, a trip_I should be present at most once'\n return True"} +{"_id": "doc_7884", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Get all possible day start times between start_ut and end_ut\n Currently this function is used only by get_tripIs_within_range_by_dsut\n\n Parameters\n ----------\n start_ut : list\n start time in unix time\n end_ut : list\n end time in unix time\n max_time_overnight : list\n the maximum length of time that a trip can take place on\n during the next day (i.e. after midnight run times like 25:35)\n\n Returns\n -------\n day_start_times_ut : list\n list of ints (unix times in seconds) for returning all possible day\n start times\n start_times_ds : list\n list of ints (unix times in seconds) stating the valid start time in\n day seconds\n end_times_ds : list\n list of ints (unix times in seconds) stating the valid end times in\n day_seconds\n \"\"\"\n if arg_3 is None:\n # 7 hours:\n arg_3 = 7 * 60 * 60\n\n # sanity checks for the timezone parameter\n # assert timezone < 14\n # assert timezone > -14\n # tz_seconds = int(timezone*3600)\n assert arg_1 < arg_2\n arg_4 = arg_0.day_start_ut(arg_1)\n # start_day_ds = int(start_ut+tz_seconds) % seconds_in_a_day #??? needed?\n arg_5 = arg_1 - arg_4\n # assert (start_day_ut+tz_seconds) % seconds_in_a_day == 0\n arg_6 = arg_0.day_start_ut(arg_2)\n # end_day_ds = int(end_ut+tz_seconds) % seconds_in_a_day #??? needed?\n # end_day_ds = end_ut - end_day_ut\n # assert (end_day_ut+tz_seconds) % seconds_in_a_day == 0\n\n # If we are early enough in a day that we might have trips from\n # the previous day still running, decrement the start day.\n if arg_5 < arg_3:\n arg_4 = arg_0.increment_day_start_ut(arg_4, n_days=-1)\n\n # day_start_times_ut = range(start_day_ut, end_day_ut+seconds_in_a_day, seconds_in_a_day)\n\n # Create a list of all possible day start times. This is roughly\n # range(day_start_ut, day_end_ut+1day, 1day).\n arg_7 = [arg_4]\n while arg_7[-1] < arg_6:\n arg_7.append(arg_0.increment_day_start_ut(arg_7[-1]))\n\n arg_8 = []\n arg_9 = []\n # For every possible day start:\n for arg_10 in arg_7:\n # start day_seconds starts at either zero, or time - daystart\n arg_11 = max(0, arg_1 - arg_10)\n arg_8.append(arg_11)\n # end day_seconds is time-day_start\n arg_12 = arg_2 - arg_10\n arg_9.append(arg_12)\n # Return three tuples which can be zip:ped together.\n return arg_7, arg_8, arg_9"} +{"_id": "doc_7885", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get all Func data as a pandas DataFrame for all Funcs, or an individual Func'\n\n Parameters\n ----------\n Func_I : int\n Func index\n\n Returns\n -------\n Func: pandas.DataFrame\n \"\"\"\n return pd.read_sql_query(\"SELECT * FROM Funcs WHERE Func_I={Func_I}\".format(arg_1=arg_1), arg_0.conn)"} +{"_id": "doc_7886", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Obtain a list of events that take place during a time interval.\n Each event needs to be only partially overlap the given time interval.\n Does not include walking events.\n\n Parameters\n ----------\n start_time_ut : int\n start of the time interval in unix time (seconds)\n end_time_ut: int\n end of the time interval in unix time (seconds)\n route_type: int\n consider only events for this route_type\n\n Returns\n -------\n events: pandas.DataFrame\n with the following columns and types\n dep_time_ut: int\n arr_time_ut: int\n from_stop_I: int\n to_stop_I: int\n trip_I : int\n shape_id : int\n route_type : int\n\n See also\n --------\n Func_in_time_span : an older version of the same thing\n \"\"\"\n arg_4 = arg_0._get_day_trips_table_name()\n arg_5 = \"SELECT stop_I, seq, trip_I, route_I, routes.route_id AS route_id, routes.type AS route_type, \" \\\n \"shape_id, day_start_ut+dep_time_ds AS dep_time_ut, day_start_ut+arr_time_ds AS arr_time_ut \" \\\n \"FROM \" + arg_4 + \" \" \\\n \"JOIN trips USING(trip_I) \" \\\n \"JOIN routes USING(route_I) \" \\\n \"JOIN stop_times USING(trip_I)\"\n\n arg_6 = []\n if arg_2:\n arg_6.append(arg_4 + \".start_time_ut< {end_time_ut}\".format(arg_2=arg_2))\n arg_6.append(\"dep_time_ut <={end_time_ut}\".format(arg_2=arg_2))\n if arg_1:\n arg_6.append(arg_4 + \".end_time_ut > {start_time_ut}\".format(arg_1=arg_1))\n arg_6.append(\"arr_time_ut >={start_time_ut}\".format(arg_1=arg_1))\n if arg_3 is not None:\n assert arg_3 in ALL_ROUTE_TYPES\n arg_6.append(\"routes.type={route_type}\".format(arg_3=arg_3))\n if len(arg_6) > 0:\n arg_5 += \" WHERE \"\n for arg_7, arg_8 in enumerate(arg_6):\n if arg_7 is not 0:\n arg_5 += \" AND \"\n arg_5 += arg_8\n # ordering is required for later stages\n arg_5 += \" ORDER BY trip_I, day_start_ut+dep_time_ds;\"\n arg_9 = pd.read_sql_query(arg_5, arg_0.conn)\n # 'filter' results so that only real \"events\" are taken into account\n arg_10 = numpy.nonzero(\n (arg_9['trip_I'][:-1].values == arg_9['trip_I'][1:].values) *\n (arg_9['seq'][:-1].values < arg_9['seq'][1:].values)\n )[0]\n arg_11 = arg_10 + 1\n # these should have same trip_ids\n assert (arg_9['trip_I'][arg_10].values == arg_9['trip_I'][arg_11].values).all()\n arg_12 = arg_9['trip_I'][arg_10]\n arg_13 = arg_9['stop_I'][arg_10]\n arg_14 = arg_9['stop_I'][arg_11]\n arg_15 = arg_9['shape_id'][arg_10]\n arg_16 = arg_9['dep_time_ut'][arg_10]\n arg_17 = arg_9['arr_time_ut'][arg_11]\n arg_18 = arg_9['route_type'][arg_10]\n arg_19 = arg_9['route_id'][arg_10]\n arg_20 = arg_9['route_I'][arg_10]\n arg_21 = arg_17.values - arg_16.values\n assert (arg_21 >= 0).all()\n arg_22 = arg_9['seq'][arg_10]\n arg_23 = arg_9['seq'][arg_11]\n arg_24 = zip(arg_13, arg_14, arg_16, arg_17,\n arg_15, arg_18, arg_19, arg_12,\n arg_21, arg_22, arg_23, arg_20)\n arg_25 = [\"from_stop_I\", \"to_stop_I\", \"dep_time_ut\", \"arr_time_ut\",\n \"shape_id\", \"route_type\", \"route_id\", \"trip_I\",\n \"duration\", \"from_seq\", \"to_seq\", \"route_I\"]\n arg_26 = pd.DataFrame.from_records(arg_24, arg_25=arg_25)\n return arg_26"} +{"_id": "doc_7887", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the first and last day_start_ut\n\n Returns\n -------\n first_day_start_ut: int\n last_day_start_ut: int\n \"\"\"\n arg_1 = arg_0.conn.cursor()\n arg_2, arg_3 = \\\n arg_1.execute(\"SELECT min(day_start_ut), max(day_start_ut) FROM days;\").fetchone()\n return arg_2, arg_3"} +{"_id": "doc_7888", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None):\n \"\"\"\n Recover pre-computed travel_impedance between od-pairs from the database.\n\n Returns\n -------\n values: number | Pandas DataFrame\n \"\"\"\n arg_5 = []\n arg_6 = []\n arg_5.append(\"from_stop_I\")\n arg_5.append(\"to_stop_I\")\n if arg_2 is not None:\n arg_6.append(\"from_stop_I=\" + str(int(arg_2)))\n if arg_3 is not None:\n arg_6.append(\"to_stop_I=\" + str(int(arg_3)))\n arg_7 = \"\"\n if len(arg_6) > 0:\n arg_7 = \" WHERE \" + \" AND \".join(arg_6)\n if not arg_4:\n arg_5.extend([\"min\", \"mean\", \"median\", \"max\"])\n else:\n arg_5.append(arg_4)\n arg_8 = \",\".join(arg_5)\n if not arg_8:\n arg_8 = \"*\"\n arg_9 = \"SELECT \" + arg_8 + \" FROM \" + arg_1 + arg_7 + \";\"\n arg_10 = pd.read_sql(arg_9, arg_0.conn)\n return arg_10"} +{"_id": "doc_7889", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Update the profile with the new labels.\n Each new label should have the same departure_time.\n\n Parameters\n ----------\n new_labels: list[LabelTime]\n\n Returns\n -------\n added: bool\n whether new_pareto_tuple was added to the set of pareto-optimal tuples\n \"\"\"\n if arg_0._closed:\n raise RuntimeError(\"Profile is closed, no Funcs can be made\")\n try:\n arg_3 = next(iter(arg_1)).departure_time\n except StopIteration:\n arg_3 = arg_2\n arg_0._check_dep_time_is_valid(arg_3)\n\n for arg_4 in arg_1:\n assert (arg_4.departure_time == arg_3)\n arg_5 = arg_0.dep_times_to_index[arg_3]\n\n if arg_5 > 0:\n # Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored.\n arg_6 = [label.get_copy_with_specified_departure_time(arg_3) for label\n in arg_0._label_bags[arg_5 - 1]]\n else:\n arg_6 = list()\n arg_6 += arg_0._label_bags[arg_5]\n\n arg_7 = arg_0._get_label_to_target(arg_3)\n if arg_7:\n arg_1 = arg_1 + [arg_7]\n arg_8 = merge_pareto_frontiers(arg_1, arg_6)\n\n arg_0._label_bags[arg_5] = arg_8\n return True"} +{"_id": "doc_7890", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=None):\n\n \"\"\"\n Get the pareto_optimal set of Labels, given a departure time.\n\n Parameters\n ----------\n dep_time : float, int\n time in unix seconds\n first_leg_can_be_walk : bool, optional\n whether to allow walking to target to be included into the profile\n (I.e. whether this function is called when scanning a pseudo-connection:\n \"double\" walks are not allowed.)\n connection_arrival_time: float, int, optional\n used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity)\n connection: connection object\n\n Returns\n -------\n pareto_optimal_labels : set\n Set of Labels\n \"\"\"\n arg_4 = list()\n # walk label towards target\n if arg_2 and arg_0._walk_to_target_duration != float('inf'):\n # add walk_label\n if arg_3 is not None:\n arg_4.append(arg_0._get_label_to_target(arg_3))\n else:\n arg_4.append(arg_0._get_label_to_target(arg_1))\n\n # if dep time is larger than the largest dep time -> only walk labels are possible\n if arg_1 in arg_0.dep_times_to_index:\n assert (arg_1 != float('inf'))\n arg_5 = arg_0.dep_times_to_index[arg_1]\n arg_6 = arg_0._label_bags[arg_5]\n arg_7 = merge_pareto_frontiers(arg_6, arg_4)\n else:\n arg_7 = arg_4\n\n if not arg_2:\n arg_7 = [label for label in arg_7 if not label.first_leg_is_walk]\n return arg_7"} +{"_id": "doc_7891", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Do the actual import. Copy data and store in connection object.\n\n This function:\n - Creates the tables\n - Imports data (using self.gen_rows)\n - Run any post_import hooks.\n - Creates any indexs\n - Does *not* run self.make_views - those must be done\n after all tables are loaded.\n \"\"\"\n if arg_0.print_progress:\n print('Beginning', arg_0.__class__.__name__)\n # what is this mystical self._conn ?\n arg_0._conn = arg_1\n\n arg_0.create_table(arg_1)\n # This does insertions\n if arg_0.mode in ('all', 'import') and arg_0.fname and arg_0.exists() and arg_0.table not in ignore_tables:\n arg_0.insert_data(arg_1)\n # This makes indexes in the DB.\n if arg_0.mode in ('all', 'index') and hasattr(arg_0, 'index'):\n arg_0.create_index(arg_1)\n # Any post-processing to be done after the full import.\n if arg_0.mode in ('all', 'import') and hasattr(arg_0, 'post_import'):\n arg_0.run_post_import(arg_1)\n # Commit it all\n arg_1.commit()"} +{"_id": "doc_7892", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get mean latitude AND longitude of stops\n\n Parameters\n ----------\n gtfs: GTFS\n\n Returns\n -------\n mean_lat : float\n mean_lon : float\n \"\"\"\n arg_1 = arg_0.get_table(\"stops\")\n arg_2 = numpy.mean(arg_1['lat'].values)\n arg_3 = numpy.mean(arg_1['lon'].values)\n return arg_2, arg_3"} +{"_id": "doc_7893", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Writes data from get_stats to csv file\n\n Parameters\n ----------\n gtfs: GTFS\n path_to_csv: str\n filepath to the csv file to be generated\n re_write:\n insted of appending, create a new one.\n \"\"\"\n arg_3 = get_stats(arg_0)\n # check if file exist\n if arg_2:\n os.remove(arg_1)\n \n #if not os.path.isfile(path_to_csv):\n # is_new = True\n #else:\n # is_new = False\n \n arg_4 = True\n arg_5 = 'r' if os.path.exists(arg_1) else 'w+'\n with open(arg_1, arg_5) as csvfile:\n for arg_6 in csvfile:\n if arg_6:\n arg_4 = False\n else:\n arg_4 = True\n\n with open(arg_1, 'a') as csvfile:\n if (sys.version_info > (3, 0)):\n arg_7 = u\",\"\n else:\n arg_7 = b\",\"\n arg_8 = csv.writer(csvfile, arg_7=arg_7)\n # write column names if\n if arg_4:\n arg_8.writerow([arg_9 for arg_9 in sorted(arg_3.keys())])\n\n arg_10 = []\n # write stats row sorted by column name\n for arg_9 in sorted(arg_3.keys()):\n arg_10.append(arg_3[arg_9])\n arg_8.writerow(arg_10)"} +{"_id": "doc_7894", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Return the frequency of all types of routes per day.\n\n Parameters\n -----------\n gtfs: GTFS\n\n Returns\n -------\n pandas.DataFrame with columns\n route_I, type, frequency\n \"\"\"\n arg_2 = arg_0.get_suitable_date_for_daily_extract()\n arg_3 = (\n \" SELECT f.route_I, type, frequency FROM routes as r\"\n \" JOIN\"\n \" (SELECT route_I, COUNT(route_I) as frequency\"\n \" FROM\"\n \" (SELECT date, route_I, trip_I\"\n \" FROM day_stop_times\"\n \" WHERE date = '{day}'\"\n \" GROUP by route_I, trip_I)\"\n \" GROUP BY route_I) as f\"\n \" ON f.route_I = r.route_I\"\n \" ORDER BY frequency DESC\".format(arg_2=arg_2))\n \n return pd.DataFrame(arg_0.execute_custom_query_pandas(arg_3))"} +{"_id": "doc_7895", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A Python decorator for printing out the execution time for a function.\n\n Adapted from:\n www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods\n \"\"\"\n def timed(*arg_1, **arg_2):\n arg_3 = time.time()\n arg_4 = arg_0(*arg_1, **arg_2)\n arg_5 = time.time()\n print('Func: %r %2.2f sec (%r, %r) ' % (arg_0.__name__, arg_5-arg_3, str(arg_1)[:20], arg_2))\n return arg_4\n\n return timed"} +{"_id": "doc_7896", "title": "", "text": "def Func(arg_0):\n \"\"\"When receiving the filled out form, check for valid access.\"\"\"\n arg_1 = super(AuthForm, arg_0).Func()\n arg_2 = arg_0.get_user()\n if arg_0.staff_only and (not arg_2 or not arg_2.is_staff):\n raise forms.ValidationError('Sorry, only staff are allowed.')\n if arg_0.superusers_only and (not arg_2 or not arg_2.is_superuser):\n raise forms.ValidationError('Sorry, only superusers are allowed.')\n return arg_1"} +{"_id": "doc_7897", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a form class for a given string pointing to a lockdown form.\"\"\"\n if not arg_0:\n raise ImproperlyConfigured('No LOCKDOWN_FORM specified.')\n arg_1 = arg_0.split(\".\")\n arg_2 = \".\".join(arg_1[:-1])\n arg_3 = arg_1[-1]\n try:\n arg_4 = import_module(arg_2)\n except (ImportError, ValueError):\n raise ImproperlyConfigured('Module configured in LOCKDOWN_FORM (%s) to'\n ' contain the form class couldn\\'t be '\n 'found.' % arg_2)\n try:\n arg_5 = getattr(arg_4, arg_3)\n except AttributeError:\n raise ImproperlyConfigured('The module configured in LOCKDOWN_FORM '\n ' (%s) doesn\\'t define a \"%s\" form.'\n % (arg_2, arg_3))\n return arg_5"} +{"_id": "doc_7898", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if each request is allowed to access the current resource.\"\"\"\n try:\n arg_2 = arg_1.session\n except AttributeError:\n raise ImproperlyConfigured('django-lockdown requires the Django '\n 'sessions framework')\n\n # Don't lock down if django-lockdown is disabled altogether.\n if settings.ENABLED is False:\n return None\n\n # Don't lock down if the client REMOTE_ADDR matched and is part of the\n # exception list.\n if arg_0.remote_addr_exceptions:\n arg_3 = arg_0.remote_addr_exceptions\n else:\n arg_3 = settings.REMOTE_ADDR_EXCEPTIONS\n\n if arg_3:\n # If forwarding proxies are used they must be listed as trusted\n arg_4 = arg_0.trusted_proxies or settings.TRUSTED_PROXIES\n\n arg_5 = arg_1.META.get('REMOTE_ADDR')\n if arg_5 in arg_3:\n return None\n if arg_5 in arg_4:\n # If REMOTE_ADDR is a trusted proxy check x-forwarded-for\n arg_6 = arg_1.META.get('HTTP_X_FORWARDED_FOR')\n if arg_6:\n arg_5 = arg_6.split(',')[-1].strip()\n if arg_5 in arg_3:\n return None\n\n # Don't lock down if the URL matches an exception pattern.\n if arg_0.url_exceptions:\n arg_7 = compile_url_exceptions(arg_0.url_exceptions)\n else:\n arg_7 = compile_url_exceptions(settings.URL_EXCEPTIONS)\n for arg_8 in arg_7:\n if arg_8.search(arg_1.path):\n return None\n\n # Don't lock down if the URL resolves to a whitelisted view.\n try:\n arg_9 = resolve(arg_1.path)\n except Resolver404:\n pass\n else:\n if arg_9.func in settings.VIEW_EXCEPTIONS:\n return None\n\n # Don't lock down if outside of the lockdown dates.\n if arg_0.until_date:\n arg_10 = arg_0.until_date\n else:\n arg_10 = settings.UNTIL_DATE\n\n if arg_0.after_date:\n arg_11 = arg_0.after_date\n else:\n arg_11 = settings.AFTER_DATE\n\n if arg_10 or arg_11:\n arg_12 = False\n if arg_10 and datetime.datetime.now() < arg_10:\n arg_12 = True\n if arg_11 and datetime.datetime.now() > arg_11:\n arg_12 = True\n if not arg_12:\n return None\n\n arg_13 = arg_1.POST if arg_1.method == 'POST' else None\n if arg_0.form:\n arg_14 = arg_0.form\n else:\n arg_14 = get_lockdown_form(settings.FORM)\n arg_15 = arg_14(data=arg_13, **arg_0.form_kwargs)\n\n arg_16 = False\n arg_17 = arg_2.get(arg_0.session_key)\n if hasattr(arg_15, 'authenticate'):\n if arg_15.authenticate(arg_17):\n arg_16 = True\n elif arg_17 is True:\n arg_16 = True\n\n if arg_16 and arg_0.logout_key and arg_0.logout_key in arg_1.GET:\n if arg_0.session_key in arg_2:\n del arg_2[arg_0.session_key]\n arg_18 = arg_1.GET.copy()\n del arg_18[arg_0.logout_key]\n return arg_0.redirect(arg_1)\n\n # Don't lock down if the user is already authorized for previewing.\n if arg_16:\n return None\n\n if arg_15.is_valid():\n if hasattr(arg_15, 'generate_token'):\n arg_17 = arg_15.generate_token()\n else:\n arg_17 = True\n arg_2[arg_0.session_key] = arg_17\n return arg_0.redirect(arg_1)\n\n arg_20 = {'until_date': arg_10, 'after_date': arg_11}\n if not hasattr(arg_15, 'show_form') or arg_15.show_form():\n arg_20['form'] = arg_15\n\n if arg_0.extra_context:\n arg_20.update(arg_0.extra_context)\n\n return render(arg_1, 'lockdown/form.html', arg_20)"} +{"_id": "doc_7899", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handle Funcs properly.\"\"\"\n arg_2 = arg_1.path\n arg_3 = arg_1.GET.copy()\n if arg_0.logout_key and arg_0.logout_key in arg_1.GET:\n del arg_3[arg_0.logout_key]\n if arg_3:\n arg_2 = '%s?%s' % (arg_2, arg_3.urlencode())\n return HttpResponseRedirect(arg_2)"} +{"_id": "doc_7900", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2,\n arg_3: arg_4,\n arg_5: arg_2 = 'asc',\n arg_6: arg_7[arg_2, arg_8[arg_2]] = None\n):\n \"\"\"\n Get the Func or flop N results based on a column value for each specified group columns\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `value` (*str*): column name on which you will rank the results\n - `limit` (*int*): Number to specify the N results you want to retrieve.\n Use a positive number x to retrieve the first x results.\n Use a negative number -x to retrieve the last x results.\n\n *optional :*\n - `order` (*str*): `\"asc\"` or `\"desc\"` to sort by ascending ou descending order. By default : `\"asc\"`.\n - `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.\n\n ---\n\n ### Example\n\n **Input**\n\n | variable | Category | value |\n |:--------:|:--------:|:-----:|\n | lili | 1 | 50 |\n | lili | 1 | 20 |\n | toto | 1 | 100 |\n | toto | 1 | 200 |\n | toto | 1 | 300 |\n | lala | 1 | 100 |\n | lala | 1 | 150 |\n | lala | 1 | 250 |\n | lala | 2 | 350 |\n | lala | 2 | 450 |\n\n\n ```cson\n Func:\n value: 'value'\n limit: 4\n order: 'asc'\n ```\n\n **Output**\n\n | variable | Category | value |\n |:--------:|:--------:|:-----:|\n | lala | 1 | 250 |\n | toto | 1 | 300 |\n | lala | 2 | 350 |\n | lala | 2 | 450 |\n \"\"\"\n arg_9 = arg_5 != 'desc'\n arg_3 = arg_4(arg_3)\n arg_10 = 'nlargest' if (arg_3 > 0) ^ arg_9 else 'nsmallest'\n\n def _Func(arg_0):\n return getattr(arg_0, arg_10)(abs(arg_3), arg_1).sort_values(by=arg_1,\n arg_9=arg_9)\n\n if arg_6 is None:\n arg_0 = _Func(arg_0)\n else:\n arg_0 = arg_0.groupby(arg_6).apply(_Func)\n\n return arg_0"} +{"_id": "doc_7901", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2[arg_3],\n arg_4: arg_3,\n arg_5: arg_6,\n arg_7: arg_3 = 'asc',\n arg_8: arg_3 = 'sum',\n arg_9: arg_10[arg_3, arg_2[arg_3]] = None\n):\n \"\"\"\n Get the top or flop N results based on a function and a column value that agregates the input.\n The result is composed by all the original lines including only lines corresponding\n to the top groups\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `value` (*str*): Name of the column name on which you will rank the results.\n - `limit` (*int*): Number to specify the N results you want to retrieve from the sorted values.\n - Use a positive number x to retrieve the first x results.\n - Use a negative number -x to retrieve the last x results.\n - `aggregate_by` (*list of str*)): name(s) of columns you want to aggregate\n\n *optional :*\n - `order` (*str*): `\"asc\"` or `\"desc\"` to sort by ascending ou descending order. By default : `\"asc\"`.\n - `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.\n - `function` : Function to use to group over the group column\n\n ---\n\n ### Example\n\n **Input**\n\n | variable | Category | value |\n |:--------:|:--------:|:-----:|\n | lili | 1 | 50 |\n | lili | 1 | 20 |\n | toto | 1 | 100 |\n | toto | 1 | 200 |\n | toto | 1 | 300 |\n | lala | 1 | 100 |\n | lala | 1 | 150 |\n | lala | 1 | 250 |\n | lala | 2 | 350 |\n | lala | 2 | 450 |\n\n ```cson\n Func:\n group: [\"Category\"]\n value: 'value'\n aggregate_by: [\"variable\"]\n limit: 2\n order: \"desc\"\n ```\n\n **Output**\n\n | variable | Category | value |\n |:--------:|:--------:|:-----:|\n | toto | 1 | 100 |\n | toto | 1 | 200 |\n | toto | 1 | 300 |\n | lala | 1 | 100 |\n | lala | 1 | 150 |\n | lala | 1 | 250 |\n | lala | 2 | 350 |\n | lala | 2 | 450 |\n \"\"\"\n arg_1 = arg_1 or []\n arg_11 = arg_9 or []\n arg_12 = arg_0.groupby(arg_11 + arg_1).agg(arg_8).reset_index()\n arg_12 = top(arg_12, arg_9=arg_9, arg_4=arg_4, arg_5=arg_5, arg_7=arg_7).reset_index(drop=True)\n arg_12 = arg_12[arg_11 + arg_1]\n arg_0 = arg_12.merge(arg_0, on=arg_11 + arg_1)\n\n return arg_0"} +{"_id": "doc_7902", "title": "", "text": "def Func(arg_0, *, arg_1: arg_2, arg_3: arg_2):\n \"\"\"\n Convert string column into datetime column\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (*str*): name of the column to format\n - `format` (*str*): current format of the values (see [available formats](\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))\n \"\"\"\n arg_0[arg_1] = pd.to_datetime(arg_0[arg_1], arg_3=arg_3)\n return arg_0"} +{"_id": "doc_7903", "title": "", "text": "def Func(arg_0, *, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2 = None):\n \"\"\"\n Convert datetime column into string column\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - column (*str*): name of the column to format\n - format (*str*): format of the result values (see [available formats](\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))\n\n *optional :*\n - new_column (*str*): name of the output column. By default `column` is overwritten.\n \"\"\"\n arg_4 = arg_4 or arg_1\n arg_0[arg_4] = arg_0[arg_1].dt.strftime(arg_3)\n return arg_0"} +{"_id": "doc_7904", "title": "", "text": "def Func(\n arg_0, *,\n arg_1: arg_2,\n arg_3: arg_2,\n arg_4: arg_2 = None,\n arg_5: arg_2 = None,\n arg_6=None\n):\n \"\"\"\n Convert the format of a date\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (*str*): name of the column to change the format\n - `output_format` (*str*): format of the output values (see [available formats](\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))\n\n *optional :*\n - `input_format` (*str*): format of the input values (by default let the parser detect it)\n - `new_column` (*str*): name of the output column (by default overwrite `column`)\n - `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done)\n\n ---\n\n ### Example\n\n **Input**\n\n label | date\n :------:|:----:\n France | 2017-03-22\n Europe | 2016-03-22\n\n ```cson\n Func:\n column: 'date'\n input_format: '%Y-%m-%d'\n output_format: '%Y-%m'\n ```\n\n Output :\n\n label | date\n :------:|:----:\n France | 2017-03\n Europe | 2016-03\n \"\"\"\n arg_5 = arg_5 or arg_1\n arg_0[arg_5] = (pd.to_datetime(arg_0[arg_1], format=arg_4, utc=True)\n .dt.tz_convert(arg_6)\n .dt.strftime(arg_3))\n return arg_0"} +{"_id": "doc_7905", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4=None):\n \"\"\"\n Convert column's type into type\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (*str*): name of the column to convert\n - `type` (*str*): output type. It can be :\n - `\"int\"` : integer type\n - `\"float\"` : general number type\n - `\"str\"` : text type\n\n *optional :*\n - `new_column` (*str*): name of the output column.\n By default the `column` arguments is modified.\n\n ---\n\n ### Example\n\n **Input**\n\n | Column 1 | Column 2 | Column 3 |\n |:-------:|:--------:|:--------:|\n | 'one' | '2014' | 30.0 |\n | 'two' | 2015.0 | '1' |\n | 3.1 | 2016 | 450 |\n\n ```cson\n postprocess: [\n Func:\n column: 'Column 1'\n type: 'str'\n Func:\n column: 'Column 2'\n type: 'int'\n Func:\n column: 'Column 3'\n type: 'float'\n ]\n ```\n\n **Output**\n\n | Column 1 | Column 2 | Column 3 |\n |:-------:|:------:|:--------:|\n | 'one' | 2014 | 30.0 |\n | 'two' | 2015 | 1.0 |\n | '3.1' | 2016 | 450.0 |\n \"\"\"\n arg_4 = arg_4 or arg_1\n arg_0[arg_4] = arg_0[arg_1].astype(arg_3)\n return arg_0"} +{"_id": "doc_7906", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2,\n arg_3: arg_2,\n arg_4: arg_5[arg_2, arg_2],\n arg_6: arg_5[arg_2, arg_2],\n arg_7: arg_5[arg_2, arg_2],\n arg_8: arg_5[arg_2, arg_2] = None,\n arg_9: arg_10[arg_2] = None\n):\n \"\"\"\n Return a line for each bars of a Func chart, totals, groups, subgroups.\n Compute the variation and variation rate for each line.\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `date` (*str*): name of the column that id the period of each lines\n - `value` (*str*): name of the column that contains the vaue for each lines\n - `start` (*dict*):\n - `label`: text displayed under the first master column\n - `id`: value in the date col that id lines for the first period\n - `end` (*dict*):\n - `label`: text displayed under the last master column\n - `id`: value in the date col that id lines for the second period\n\n *optional :*\n - `upperGroup` (*dict*):\n - `id`: name of the column that contains upperGroups unique IDs\n - `label`: not required, text displayed under each upperGroups bars,\n using ID when it's absent\n - `groupsOrder`: not required, order of upperGroups\n - `insideGroup` (*dict*):\n - `id`: name of the column that contains insideGroups unique IDs\n - `label`: not required, text displayed under each insideGroups bars,\n using ID when it's absent\n - `groupsOrder`: not required, order of insideGroups\n - `filters` (*list*): columns to filters on\n\n ---\n\n ### Example\n\n **Input**\n\n | product_id | played | date | ord | category_id | category_name |\n |:------------:|:--------:|:------:|:-----:|:-------------:|:---------------:|\n | super clap | 12 | t1 | 1 | clap | Clap |\n | clap clap | 1 | t1 | 10 | clap | Clap |\n | tac | 1 | t1 | 1 | snare | Snare |\n | super clap | 10 | t2 | 1 | clap | Clap |\n | tac | 100 | t2 | 1 | snare | Snare |\n | bom | 1 | t2 | 1 | tom | Tom |\n\n\n ```cson\n Func:\n upperGroup:\n id: 'category_id'\n label: 'category_name'\n insideGroup:\n id: 'product_id'\n groupsOrder: 'ord'\n date: 'date'\n value: 'played'\n start:\n label: 'Trimestre 1'\n id: 't1'\n end:\n label: 'Trimester 2'\n id: 't2'\n ```\n\n **Output**\n\n | value | label | variation | groups | type | order |\n |:-------:|:-----------:|:-----------:|:--------:|:------:|:-------:|\n | 14 | Trimestre 1 | NaN | NaN | NaN | NaN |\n | -3 | Clap | -0.230769 | clap | parent | NaN |\n | -2 | super clap | -0.166667 | clap | child | 1 |\n | -1 | clap clap | -1 | clap | child | 10 |\n | 99 | Snare | 99 | snare | parent | NaN |\n | 99 | tac | 99 | snare | child | 1 |\n | 1 | Tom | inf | tom | parent | NaN |\n | 1 | bom | inf | tom | child | 1 |\n | 111 | Trimester 2 | NaN | NaN | NaN | NaN |\n \"\"\"\n\n if len(arg_0) == 0:\n return arg_0\n\n if arg_9 is not None:\n if isinstance(arg_9, arg_2):\n arg_9 = [arg_9]\n\n def sub_Func(arg_0):\n arg_11 = Func(arg_0, arg_1, arg_3, arg_4, arg_6, arg_7, arg_8)\n for arg_12 in arg_9:\n arg_11[arg_12] = arg_0[arg_12].values[0]\n return arg_11\n\n # filters df into a list of sub_df\n arg_13 = [arg_0[(arg_0[arg_9].values == i).all(axis=1)]\n for i in arg_0[arg_9].drop_duplicates().values]\n\n return pd.concat([sub_Func(arg_0) for arg_0 in arg_13], sort=False)\n\n arg_14 = {\n 'upperGroup': {\n 'type': 'parent',\n 'id': 'upperGroup',\n 'order': {\n 'by': ['upperGroup_order', 'groups'],\n 'ascending': [True, True]\n },\n 'obj': arg_7\n }\n }\n if arg_8 is not None:\n arg_14['insideGroup'] = {\n 'type': 'child',\n 'id': 'insideGroup',\n 'order': {\n 'by': ['type', 'insideGroup_order', 'label'],\n 'ascending': [False, True, True]\n },\n 'obj': arg_8\n }\n # prepare the dataframe with standard column names\n arg_0 = _compute_rename(arg_0, arg_1, arg_3, arg_14)\n\n arg_15 = {'value': sum}\n arg_15.update({f'{col}_label': 'first' for arg_16 in arg_14.keys()})\n arg_15.update({f'{col}_order': 'first' for arg_16 in arg_14.keys()})\n arg_0 = arg_0.groupby(list(arg_14.keys()) + ['date']).agg(arg_15).reset_index()\n\n arg_17, arg_18 = _compute_start_end(arg_0, arg_4, arg_6)\n\n arg_0 = _compute_value_diff(arg_0, arg_4, arg_6, arg_14)\n\n arg_19 = _compute_upper_group(arg_0)\n if arg_8 is not None:\n arg_19 = pd.concat([arg_19, _compute_inside_group(arg_0)])\n\n arg_20 = _compute_order(arg_17, arg_18, arg_19, arg_14)\n\n return arg_20"} +{"_id": "doc_7907", "title": "", "text": "def Func(arg_0, *, arg_1: arg_2, arg_3: arg_2 = None):\n \"\"\"\n Get the absolute numeric value of each element of a column\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (*str*): name of the column\n\n *optional :*\n - `new_column` (*str*): name of the column containing the result.\n By default, no new column will be created and `column` will be replaced.\n\n ---\n\n ### Example\n\n **Input**\n\n | ENTITY | VALUE_1 | VALUE_2 |\n |:------:|:-------:|:-------:|\n | A | -1.512 | -1.504 |\n | A | 0.432 | 0.14 |\n\n ```cson\n Func:\n column: 'VALUE_1'\n new_column: 'Pika'\n ```\n\n **Output**\n\n | ENTITY | VALUE_1 | VALUE_2 | Pika |\n |:------:|:-------:|:-------:|:-----:|\n | A | -1.512 | -1.504 | 1.512 |\n | A | 0.432 | 0.14 | 0.432 |\n \"\"\"\n arg_3 = arg_3 or arg_1\n arg_0[arg_3] = abs(arg_0[arg_1])\n return arg_0"} +{"_id": "doc_7908", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3], arg_4: arg_3, arg_5: arg_3, arg_6: arg_3 = 'mean'):\n \"\"\"\n Pivot the data. Reverse operation of melting\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `index` (*list*): names of index columns.\n - `column` (*str*): column name to Func on\n - `value` (*str*): column name containing the value to fill the Funced df\n\n *optional :*\n - `agg_function` (*str*): aggregation function to use among 'mean' (default), 'count', 'mean', 'max', 'min'\n\n ---\n\n ### Example\n\n **Input**\n\n | variable | wave | year | value |\n |:--------:|:-------:|:--------:|:-----:|\n | toto | wave 1 | 2014 | 300 |\n | toto | wave 1 | 2015 | 250 |\n | toto | wave 1 | 2016 | 450 |\n\n ```cson\n Func:\n index: ['variable','wave']\n column: 'year'\n value: 'value'\n ```\n\n **Output**\n\n | variable | wave | 2014 | 2015 | 2015 |\n |:--------:|:-------:|:------:|:----:|:----:|\n | toto | wave 1 | 300 | 250 | 450 |\n \"\"\"\n if arg_0.dtypes[arg_5].type == np.object_:\n arg_0 = pd.Func_table(arg_0, arg_1=arg_1,\n columns=arg_4,\n values=arg_5,\n aggfunc=lambda x: ' '.join(x))\n else:\n arg_0 = pd.Func_table(arg_0, arg_1=arg_1,\n columns=arg_4,\n values=arg_5,\n aggfunc=arg_6)\n arg_0 = arg_0.reset_index()\n return arg_0"} +{"_id": "doc_7909", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5=None\n):\n \"\"\"\n Pivot a dataframe by group of variables\n\n ---\n\n ### Parameters\n\n *mandatory :*\n * `variable` (*str*): name of the column used to create the groups.\n * `value` (*str*): name of the column containing the value to fill the pivoted df.\n * `new_columns` (*list of str*): names of the new columns.\n * `groups` (*dict*): names of the groups with their corresponding variables.\n **Warning**: the list of variables must have the same order as `new_columns`\n\n *optional :*\n * `id_cols` (*list of str*) : names of other columns to keep, default `None`.\n\n ---\n\n ### Example\n\n **Input**\n\n | type | variable | montant |\n |:----:|:----------:|:-------:|\n | A | var1 | 5 |\n | A | var1_evol | 0.3 |\n | A | var2 | 6 |\n | A | var2_evol | 0.2 |\n\n ```cson\n Func :\n id_cols: ['type']\n variable: 'variable'\n value: 'montant'\n new_columns: ['value', 'variation']\n groups:\n 'Group 1' : ['var1', 'var1_evol']\n 'Group 2' : ['var2', 'var2_evol']\n ```\n\n **Ouput**\n\n | type | variable | value | variation |\n |:----:|:----------:|:-------:|:---------:|\n | A | Group 1 | 5 | 0.3 |\n | A | Group 2 | 6 | 0.2 |\n\n \"\"\"\n if arg_5 is None:\n arg_6 = [arg_1]\n else:\n arg_6 = [arg_1] + arg_5\n\n arg_7 = pd.DataFrame(arg_4, arg_6=arg_3)\n arg_8 = 'tmp'\n\n arg_0[arg_8] = arg_0[arg_1]\n for arg_9 in arg_7.columns:\n arg_0.loc[arg_0[arg_1].isin(arg_7[arg_9]), arg_1] = arg_9\n\n arg_7 = arg_7.T\n for arg_9 in arg_7.columns:\n arg_0.loc[\n arg_0[arg_8].isin(arg_7[arg_9]), arg_8] = arg_9\n\n arg_0 = pivot(arg_0, arg_6, arg_8, arg_2)\n return arg_0"} +{"_id": "doc_7910", "title": "", "text": "def Func(arg_0, *, arg_1: arg_2[arg_3, arg_4[arg_3]],\n arg_5: arg_6[arg_3, arg_2[arg_3, arg_4[arg_3]]]):\n \"\"\"\n Aggregate values by groups.\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `group_cols` (*list*): list of columns used to group data\n - `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation\n function to use as values (See the [list of aggregation functions](\n https://pandas.pydata.org/pandas-docs/stable/user_guide/Func.html#aggregation))\n\n ---\n\n ### Example\n\n **Input**\n\n | ENTITY | YEAR | VALUE_1 | VALUE_2 |\n |:------:|:----:|:-------:|:-------:|\n | A | 2017 | 10 | 3 |\n | A | 2017 | 20 | 1 |\n | A | 2018 | 10 | 5 |\n | A | 2018 | 30 | 4 |\n | B | 2017 | 60 | 4 |\n | B | 2017 | 40 | 3 |\n | B | 2018 | 50 | 7 |\n | B | 2018 | 60 | 6 |\n\n ```cson\n Func:\n group_cols: ['ENTITY', 'YEAR']\n aggregations:\n 'VALUE_1': 'sum',\n 'VALUE_2': 'mean'\n ```\n\n **Output**\n\n | ENTITY | YEAR | VALUE_1 | VALUE_2 |\n |:------:|:----:|:-------:|:-------:|\n | A | 2017 | 30 | 2.0 |\n | A | 2018 | 40 | 4.5 |\n | B | 2017 | 100 | 3.5 |\n | B | 2018 | 110 | 6.5 |\n\n \"\"\"\n arg_0 = arg_0.Func(arg_1, as_index=False).agg(arg_5)\n\n # When several aggregations are performed on the same column, pandas return\n # a multi-indexed dataframe, so we need to flatten the columns index to get\n # back to a unique level header\n if arg_0.columns.nlevels == 2:\n arg_7 = arg_0.columns.get_level_values(0)\n arg_8 = arg_0.columns.get_level_values(1)\n arg_9 = [(f'{x}_{y}' if x else y) for (x, y)\n in zip(arg_8, arg_7)]\n arg_0.columns = arg_9\n return arg_0"} +{"_id": "doc_7911", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_5, arg_6: arg_2, arg_7: arg_2):\n \"\"\"\n DEPRECATED - please use `compute_Func` instead\n \"\"\"\n logging.getLogger(__name__).warning(f\"DEPRECATED: use compute_Func\")\n arg_8 = '__date_temp__'\n if isinstance(arg_4, arg_2):\n arg_4 = [arg_4]\n arg_9 = arg_5(range(0, len(arg_4)))\n arg_0[arg_8] = pd.to_datetime(arg_0[arg_6], format=arg_7)\n arg_10 = [arg_8, arg_6]\n arg_0 = arg_0.groupby(arg_4 + arg_10).sum()\n arg_0[arg_1] = arg_0.groupby(level=arg_9)[arg_3].Func()\n arg_0.reset_index(inplace=True)\n del arg_0[arg_8]\n\n return arg_0"} +{"_id": "doc_7912", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorator to Func an exception and don't raise it.\n Logs information if a decorator failed.\n\n Note:\n We don't want possible exceptions during logging to be raised.\n This is used to decorate any function that gets executed\n before or after the execution of the decorated function.\n \"\"\"\n def decorator(arg_1):\n @wraps(arg_1)\n def wrapper(*arg_2, **arg_3):\n try:\n return arg_1(*arg_2, **arg_3)\n except Exception:\n arg_0.warning(f\"Exception raised in decorator: {func.__name__}\")\n\n return wrapper\n return decorator"} +{"_id": "doc_7913", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2[arg_3, arg_2[arg_3, arg_3]] = None,\n arg_4: arg_2[arg_3, arg_2[arg_3, arg_3]] = None,\n arg_5: arg_3 = None\n):\n \"\"\"\n Replaces data values and column names according to the locale\n\n ---\n\n ### Parameters\n\n - `values` (optional: dict):\n - key: term to be replaced\n - value:\n - key: the locale e.g. 'en' or 'fr'\n - value: term's translation\n - `columns` (optional: dict):\n - key: columns name to be replaced\n - value:\n - key: the locale e.g. 'en' or 'fr'\n - value: column name's translation\n - `locale` (optional: str): the locale you want to use.\n By default the client locale is used.\n\n ---\n\n ### Example\n\n **Input**\n\n | label | value |\n |:----------------:|:-----:|\n | France | 100 |\n | Europe wo France | 500 |\n\n ```cson\n Func:\n values:\n 'Europe wo France':\n 'en': 'Europe excl. France'\n 'fr': 'Europe excl. France'\n columns:\n 'value':\n 'en': 'revenue'\n 'fr': 'revenue'\n ```\n\n **Output**\n\n | label | revenue |\n |:-------------------:|:-------:|\n | France | 100 |\n | Europe excl. France | 500 |\n\n \"\"\"\n if arg_1:\n arg_6 = list(arg_1.keys())\n arg_7 = [arg_1[term][arg_5] for term in arg_1]\n arg_0 = arg_0.replace(arg_6=arg_6, arg_7=arg_7)\n if arg_4:\n arg_8 = list(arg_4.keys())\n arg_9 = [column[arg_5] for column in arg_4.values()]\n arg_4 = dict(list(zip(arg_8, arg_9)))\n arg_0 = arg_0.Func(arg_4=arg_4)\n return arg_0"} +{"_id": "doc_7914", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2[arg_3],\n arg_4: arg_5[arg_3, arg_3],\n arg_6: arg_7[arg_3, arg_2[arg_3], arg_5[arg_3, arg_3]] = 'sum'\n):\n \"\"\"\n Aggregates data to reproduce \"All\" category for requester\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `id_cols` (*list*): the columns id to group\n - `cols_for_combination` (*dict*): colums corresponding to\n the filters as key and their default value as value\n\n *optional :*\n - `agg_func` (*str*, *list* or *dict*): the function(s) to use for aggregating the data.\n Accepted combinations are:\n - string function name\n - list of functions and/or function names, e.g. [np.sum, 'mean']\n - dict of axis labels -> functions, function names or list of such.\n \"\"\"\n arg_8 = list(arg_4.keys())\n arg_9 = [\n list(item) for i in range(0, len(arg_8) + 1)\n for item in itertools.combinations(arg_8, i)]\n arg_10 = []\n for arg_11 in arg_9:\n arg_12 = arg_0.groupby(arg_1 + arg_11).agg(arg_6).reset_index()\n for arg_13 in (set(arg_4.keys()) - set(arg_11)):\n arg_12[arg_13] = arg_4[arg_13]\n arg_10.append(arg_12)\n\n return pd.concat(arg_10, sort=False, ignore_index=True)"} +{"_id": "doc_7915", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Get the value of a function's parameter based on its signature\n and the call's args and kwargs.\n\n Example:\n >>> def foo(a, b, c=3, d=4):\n ... pass\n ...\n >>> # what would be the value of \"c\" when calling foo(1, b=2, c=33) ?\n >>> Func('c', foo, [1], {'b': 2, 'c': 33})\n 33\n \"\"\"\n arg_4 = inspect.signature(arg_1)\n arg_5 = arg_4.parameters.keys()\n if arg_0 not in arg_5:\n raise TypeError(f\"'{param_name}' not found in {func.__name__}\"\n f\"parameters list ([{params_list}])\")\n arg_6 = arg_4.bind(*arg_2, **arg_3)\n arg_6.apply_defaults()\n return arg_6.arguments[arg_0]"} +{"_id": "doc_7916", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2[arg_3],\n arg_4: arg_2[arg_3],\n arg_5: arg_2[arg_3] = None,\n arg_6: arg_3 = 'type',\n arg_7: arg_3 = 'value',\n arg_8: arg_3 = 'sum',\n arg_9: arg_2[arg_3] = None\n):\n \"\"\"\n Creates aggregates following a given hierarchy\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level).\n - `groupby_vars` (*list of str*): name of the columns with value to aggregate.\n - `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level.\n\n *optional :*\n - `var_name` (*str*) : name of the result variable column. By default, `\u201ctype\u201d`.\n - `value_name` (*str*): name of the result value column. By default, `\u201cvalue\u201d`.\n - `agg_func` (*str*): name of the aggregation operation. By default, `\u201csum\u201d`.\n - `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output.\n\n ---\n\n ### Example\n\n **Input**\n\n | Region | City | Population |\n |:---------:|:--------:|:-----------:|\n | Idf | Panam| 200 |\n | Idf | Antony | 50 |\n | Nord | Lille | 20 |\n\n ```cson\n Func:\n levels: [\"Region\", \"City\"]\n groupby_vars: \"Population\"\n ```\n\n **Output**\n\n | Region | City | Population | value | type |\n |:---------:|:--------:|:-----------:|:--------:|:------:|\n | Idf | Panam| 200 | Panam | City |\n | Idf | Antony | 50 | Antony | City |\n | Nord | Lille | 20 | Lille | City |\n | Idf | Nan | 250 | Idf | Region |\n | Nord | Nan | 20 | Nord | Region |\n \"\"\"\n arg_10 = list()\n arg_11 = list(arg_1)\n arg_12 = list(arg_1)\n arg_12.reverse()\n\n arg_5 = arg_5 or []\n arg_9 = arg_9 or []\n arg_13 = None\n for arg_14 in arg_12:\n # Aggregation\n arg_15 = getattr(\n arg_0.groupby(arg_11 + arg_5)[arg_4],\n arg_8)().reset_index()\n\n # Melt-like columns\n arg_15[arg_6] = arg_14\n arg_15[arg_7] = arg_15[arg_14]\n arg_10.append(arg_15)\n if arg_13 in arg_9:\n del arg_10[-2]\n arg_13 = arg_14\n\n # Remove one level each time in the groupby: lowest level column needs\n # a groupby with every levels, the next level needs every one except\n # the lowest, etc. until the top level column that needs only itself\n # inside the groupby.\n arg_11.pop()\n return pd.concat(arg_10, sort=False).reset_index()"} +{"_id": "doc_7917", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4[arg_2, arg_5[arg_2]] = None):\n \"\"\"\n Keep the row of the data corresponding to the minimal value in a column\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (str): name of the column containing the value you want to keep the minimum\n\n *optional :*\n - `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic\n (the function will return the argmax by group)\n ---\n\n ### Example\n\n **Input**\n\n | variable | wave | year | value |\n |:--------:|:-------:|:--------:|:-----:|\n | toto | wave 1 | 2014 | 300 |\n | toto | wave 1 | 2015 | 250 |\n | toto | wave 1 | 2016 | 450 |\n\n ```cson\n Func:\n column: 'year'\n ]\n ```\n\n **Output**\n\n | variable | wave | year | value |\n |:--------:|:-------:|:--------:|:-----:|\n | toto | wave 1 | 2015 | 250 |\n \"\"\"\n if arg_3 is None:\n arg_0 = arg_0[arg_0[arg_1] == arg_0[arg_1].min()].reset_index(drop=True)\n else:\n arg_6 = arg_0.groupby(arg_3)[arg_1].transform('min')\n arg_0 = (arg_0\n .loc[arg_0[arg_1] == arg_6, :]\n .drop_duplicates()\n .reset_index(drop=True)\n )\n return arg_0"} +{"_id": "doc_7918", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3=None, arg_4=None):\n \"\"\"\n Can fill NaN values from a column with a given value or a column\n\n ---\n\n ### Parameters\n\n - `column` (*str*): name of column you want to fill\n - `value`: NaN will be replaced by this value\n - `column_value`: NaN will be replaced by value from this column\n\n *NOTE*: You must set either the 'value' parameter or the 'column_value' parameter\n\n ---\n\n ### Example\n\n **Input**\n\n | variable | wave | year | my_value |\n |:--------:|:-------:|:--------:|:--------:|\n | toto | wave 1 | 2014 | 300 |\n | toto | wave 1 | 2015 | |\n | toto | wave 1 | 2016 | 450 |\n\n ```cson\n Func:\n column: 'my_value'\n value: 0\n ```\n\n **Output**\n\n | variable | wave | year | my_value |\n |:--------:|:-------:|:--------:|:--------:|\n | toto | wave 1 | 2014 | 300 |\n | toto | wave 1 | 2015 | 0 |\n | toto | wave 1 | 2016 | 450 |\n \"\"\"\n if arg_1 not in arg_0.columns:\n arg_0[arg_1] = nan\n\n if arg_3 is not None and arg_4 is not None:\n raise ValueError('You cannot set both the parameters value and column_value')\n\n if arg_3 is not None:\n arg_0[arg_1] = arg_0[arg_1].Func(arg_3)\n\n if arg_4 is not None:\n if arg_4 not in arg_0.columns:\n raise ValueError(f'\"{column_value}\" is not a valid column name')\n arg_0[arg_1] = arg_0[arg_1].Func(arg_0[arg_4])\n\n return arg_0"} +{"_id": "doc_7919", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2):\n \"\"\"add a human readable offset to `dateobj` and return corresponding date.\n\n rely on `pandas.Timedelta` and add the following extra shortcuts:\n - \"w\", \"week\" and \"weeks\" for a week (i.e. 7days)\n - \"month', \"months\" for a month (i.e. no day computation, just increment the month)\n - \"y\", \"year', \"years\" for a year (i.e. no day computation, just increment the year)\n \"\"\"\n arg_4 = 1 if arg_3 == '+' else -1\n try:\n return arg_0 + arg_4 * pd.Timedelta(arg_1)\n except ValueError:\n # pd.Timedelta could not parse the offset, let's try harder\n arg_5 = TIMEDELTA_RGX.match(arg_1)\n if arg_5 is not None:\n arg_6 = arg_5.groupdict()\n arg_7 = arg_6['unit'].lower()[0]\n arg_8 = arg_4 * int(arg_6['num'])\n # is it a week ?\n if arg_7 == 'w':\n return arg_0 + arg_8 * timedelta(weeks=1)\n # or a month ?\n if arg_7 == 'm':\n return add_months(arg_0, arg_8)\n # or a year ?\n if arg_7 == 'y':\n return add_years(arg_0, arg_8)\n # we did what we could, just re-raise the original exception\n raise"} +{"_id": "doc_7920", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"return `dateobj` + `nb_months`\n\n If landing date doesn't exist (e.g. february, 30th), return the last\n day of the landing month.\n\n >>> Func(date(2018, 1, 1), 1)\n datetime.date(2018, 1, 1)\n >>> Func(date(2018, 1, 1), -1)\n datetime.date(2017, 12, 1)\n >>> Func(date(2018, 1, 1), 25)\n datetime.date(2020, 2, 1)\n >>> Func(date(2018, 1, 1), -25)\n datetime.date(2015, 12, 1)\n >>> Func(date(2018, 1, 31), 1)\n datetime.date(2018, 2, 28)\n \"\"\"\n arg_3, arg_1 = divmod(arg_1, 12)\n arg_4 = arg_0.month + arg_1\n if arg_4 > 12:\n arg_3 += 1\n arg_4 -= 12\n arg_5 = arg_0.year + arg_3\n arg_6 = monthrange(arg_5, arg_4)[1]\n return arg_0.replace(arg_5=arg_5, arg_4=arg_4, day=min(arg_6, arg_0.day))"} +{"_id": "doc_7921", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"return `dateobj` + `nb_years`\n\n If landing date doesn't exist (e.g. february, 30th), return the last\n day of the landing month.\n\n >>> Func(date(2018, 1, 1), 1)\n datetime.date(2019, 1, 1)\n >>> Func(date(2018, 1, 1), -1)\n datetime.date(2017, 1, 1)\n >>> Func(date(2020, 2, 29), 1)\n datetime.date(2021, 2, 28)\n >>> Func(date(2020, 2, 29), -1)\n datetime.date(2019, 2, 28)\n \"\"\"\n arg_2 = arg_0.year + arg_1\n arg_3 = monthrange(arg_2, arg_0.month)[1]\n return arg_0.replace(arg_2=arg_2, day=min(arg_3, arg_0.day))"} +{"_id": "doc_7922", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1) -> date:\n \"\"\"parse `datestr` and return corresponding date object.\n\n `datestr` should be a string matching `date_fmt` and parseable by `strptime`\n but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -\n OFFSET` syntax. When using this syntax, `OFFSET` should be understable by\n `pandas.Timedelta` (cf.\n http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`\n `month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped\n with parenthesis.\n\n Additionally, the following symbolic names are supported: `TODAY`,\n `YESTERDAY`, `TOMORROW`.\n\n Example usage:\n\n >>> Func('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)\n Func('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)\n Func('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)\n\n Parameters: `datestr`: the date to parse, formatted as `date_fmt`\n `date_fmt`: expected date format\n\n Returns: The `date` object. If date could not be parsed, a ValueError will\n be raised.\n \"\"\"\n arg_3 = re.compile(r'\\((?P.*)\\)(\\s*(?P[+-])(?P.*))?$')\n arg_0 = arg_0.strip()\n arg_4 = arg_3.match(arg_0)\n # if regexp doesn't match, date must match the expected format\n if arg_4 is None:\n return _norm_date(arg_0, arg_2)\n arg_0 = arg_4.group('date').strip()\n arg_5 = _norm_date(arg_0, arg_2)\n arg_6 = arg_4.group('offset')\n if arg_6:\n return add_offset(arg_5, arg_6, arg_4.group('sign'))\n return arg_5"} +{"_id": "doc_7923", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2,\n arg_3: arg_2 = '%Y-%m-%d',\n arg_4: arg_2 = None,\n arg_5: arg_2 = None,\n arg_6: arg_2 = None\n):\n \"\"\"\n Filter dataframe your data by date.\n\n This function will interpret `start`, `stop` and `atdate` and build\n the corresponding date range. The caller must specify either:\n\n - `atdate`: keep all rows matching this date exactly,\n - `start`: keep all rows matching this date onwards.\n - `stop`: keep all rows matching dates before this one.\n - `start` and `stop`: keep all rows between `start` and `stop`,\n\n Any other combination will raise an error. The lower bound of the date range\n will be included, the upper bound will be excluded.\n\n When specified, `start`, `stop` and `atdate` values are expected to match the\n `date_format` format or a known symbolic value (i.e. 'TODAY', 'YESTERDAY' or 'TOMORROW').\n\n Additionally, the offset syntax \"(date) + offset\" is also supported (Mind\n the parenthesis around the date string). In that case, the offset must be\n one of the syntax supported by `pandas.Timedelta` (see [pandas doc](\n http://pandas.pydata.org/pandas-docs/stable/timedeltas.html))\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `date_col` (*str*): the name of the dataframe's column to filter on\n\n *optional :*\n - `date_format` (*str*): expected date format in column `date_col` (see [available formats](\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)\n - `start` (*str*): if specified, lower bound (included) of the date range\n - `stop` (*str*): if specified, upper bound (excluded) of the date range\n - `atdate` (*str*): if specified, the exact date we're filtering on\n \"\"\"\n arg_7 = None\n if arg_4 is None and arg_5 is None and arg_6 is None:\n raise TypeError('either \"start\", \"stop\" or \"atdate\" must be specified')\n if arg_4 is not None and arg_6 is not None:\n raise TypeError('\"start\" and \"atdate\" are mutually exclusive')\n if arg_5 is not None and arg_6 is not None:\n raise TypeError('\"stop\" and \"atdate\" are mutually exclusive')\n # add a new column that will hold actual date objects instead of strings.\n # This column is just temporary and will be removed before returning the\n # filtered dataframe.\n arg_8 = arg_2(uuid4())\n arg_0[arg_8] = pd.to_datetime(arg_0[arg_1], format=arg_3)\n if arg_6 is not None:\n arg_7 = arg_0[arg_8] == parse_date(arg_6, arg_3)\n elif arg_4 is not None and arg_5 is not None:\n arg_7 = ((arg_0[arg_8] >= parse_date(arg_4, arg_3)) &\n (arg_0[arg_8] < parse_date(arg_5, arg_3)))\n elif arg_5 is None:\n arg_7 = arg_0[arg_8] >= parse_date(arg_4, arg_3)\n elif arg_4 is None:\n arg_7 = arg_0[arg_8] < parse_date(arg_5, arg_3)\n return arg_0[arg_7].drop(arg_8, axis=1)"} +{"_id": "doc_7924", "title": "", "text": "def Func(\n arg_0,\n arg_1: arg_2,\n arg_3: arg_4[arg_2, arg_5[arg_2]] = None,\n arg_6: arg_2 = None\n):\n \"\"\"\n Add a column to the dataframe according to the groupby logic on group_cols\n\n ---\n\n ### Parameters\n\n *mandatory :*\n - `column` (*str*): name of the desired column you need Func on\n\n *optional :*\n - `group_cols` (*list*): names of columns for the groupby logic\n - `new_column` (*str*): name of the output column. By default `column` will be overwritten.\n\n ---\n\n **Input**\n\n | gender | sport | number |\n |:------:|:----------:|:------:|\n | male | bicycle | 17 |\n | female | basketball | 17 |\n | male | basketball | 3 |\n | female | football | 7 |\n | female | running | 30 |\n | male | running | 20 |\n | male | football | 21 |\n | female | bicycle | 17 |\n\n ```cson\n Func:\n new_column: 'number_Func'\n column: 'number'\n group_cols: ['sport']\n ```\n\n **Output**\n\n | gender | sport | number | number_Func |\n |:------:|:----------:|:------:|:-----------------:|\n | male | bicycle | 17 | 50.0 |\n | female | basketball | 17 | 85.0 |\n | male | basketball | 3 | 15.0 |\n | female | football | 7 | 25.0 |\n | female | running | 30 | 60.0 |\n | male | running | 20 | 40.0 |\n | male | football | 21 | 75.0 |\n | female | bicycle | 17 | 50.0 |\n \"\"\"\n arg_6 = arg_6 or arg_1\n if arg_3 is None:\n arg_0[arg_6] = 100. * arg_0[arg_1] / sum(arg_0[arg_1])\n else:\n arg_0[arg_6] = 100. * arg_0[arg_1] / arg_0.groupby(arg_3)[arg_1].transform(sum)\n return arg_0"} +{"_id": "doc_7925", "title": "", "text": "def Func(arg_0):\n \"\"\"Get descriptor base path if string or return None.\n \"\"\"\n\n # Infer from path/url\n if isinstance(arg_0, six.string_types):\n if os.path.exists(arg_0):\n arg_1 = os.path.dirname(os.path.abspath(arg_0))\n else:\n # suppose descriptor is a URL\n arg_1 = os.path.dirname(arg_0)\n\n # Current dir by default\n else:\n arg_1 = '.'\n\n return arg_1"} +{"_id": "doc_7926", "title": "", "text": "def Func(arg_0):\n \"\"\"\"Validate this Data Package.\n \"\"\"\n\n # Deprecate\n warnings.warn(\n 'Property \"package.Func\" is deprecated.',\n UserWarning)\n\n arg_1 = arg_0.to_dict()\n arg_0.profile.Func(arg_1)"} +{"_id": "doc_7927", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Push Data Package to storage.\n\n All parameters should be used as keyword arguments.\n\n Args:\n descriptor (str): path to descriptor\n backend (str): backend name like `sql` or `bigquery`\n backend_options (dict): backend options mentioned in backend docs\n\n \"\"\"\n\n # Deprecated\n warnings.warn(\n 'Functions \"push/pull_datapackage\" are deprecated. '\n 'Please use \"Package\" class',\n UserWarning)\n\n # Init maps\n arg_3 = []\n arg_4 = []\n arg_5 = {}\n arg_6 = {}\n\n # Init model\n arg_7 = Package(arg_0)\n\n # Get storage\n arg_8 = import_module('jsontableschema.plugins.%s' % arg_1)\n arg_9 = arg_8.Storage(**arg_2)\n\n # Collect tables/schemas/data\n for arg_10 in arg_7.resources:\n if not arg_10.tabular:\n continue\n arg_11 = arg_10.descriptor.get('name', None)\n arg_12 = _convert_path(arg_10.descriptor['path'], arg_11)\n arg_13 = arg_10.descriptor['schema']\n arg_14 = arg_10.table.iter(keyed=True)\n # TODO: review\n def values(arg_13, arg_14):\n for arg_15 in arg_14:\n arg_16 = []\n for arg_17 in arg_13['fields']:\n arg_16.append(arg_15.get(arg_17['name'], None))\n yield tuple(arg_16)\n arg_3.append(arg_12)\n arg_4.append(arg_13)\n arg_5[arg_12] = values(arg_13, arg_14)\n if arg_11 is not None:\n arg_6[arg_11] = arg_12\n arg_4 = _convert_schemas(arg_6, arg_4)\n\n # Create tables\n for arg_12 in arg_3:\n if arg_12 in arg_9.buckets:\n arg_9.delete(arg_12)\n arg_9.create(arg_3, arg_4)\n\n # Write data to tables\n for arg_12 in arg_9.buckets:\n if arg_12 in arg_5:\n arg_9.write(arg_12, arg_5[arg_12])\n return arg_9"} +{"_id": "doc_7928", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"Pull Data Package from storage.\n\n All parameters should be used as keyword arguments.\n\n Args:\n descriptor (str): path where to store descriptor\n name (str): name of the pulled datapackage\n backend (str): backend name like `sql` or `bigquery`\n backend_options (dict): backend options mentioned in backend docs\n\n \"\"\"\n\n # Deprecated\n warnings.warn(\n 'Functions \"push/Func\" are deprecated. '\n 'Please use \"Package\" class',\n UserWarning)\n\n # Save datapackage name\n arg_4 = arg_1\n\n # Get storage\n arg_5 = import_module('jsontableschema.plugins.%s' % arg_2)\n arg_6 = arg_5.Storage(**arg_3)\n\n # Iterate over tables\n arg_7 = []\n for arg_8 in arg_6.buckets:\n\n # Prepare\n arg_9 = arg_6.describe(arg_8)\n arg_10 = os.path.dirname(arg_0)\n arg_11, arg_1 = _restore_path(arg_8)\n arg_12 = os.path.join(arg_10, arg_11)\n\n # Write data\n helpers.ensure_dir(arg_12)\n with io.open(arg_12, 'wb') as file:\n arg_13 = Schema(deepcopy(arg_9))\n arg_14 = arg_6.iter(arg_8)\n arg_15 = csv.writer(file, arg_19='utf-8')\n arg_15.writerow(arg_13.headers)\n for arg_16 in arg_14:\n arg_15.writerow(arg_16)\n\n # Add resource\n arg_17 = {'schema': arg_9, 'path': arg_11}\n if arg_1 is not None:\n arg_17['name'] = arg_1\n arg_7.append(arg_17)\n\n # Write descriptor\n arg_18 = 'w'\n arg_19 = 'utf-8'\n if six.PY2:\n arg_18 = 'wb'\n arg_19 = None\n arg_7 = _restore_resources(arg_7)\n helpers.ensure_dir(arg_0)\n with io.open(arg_0,\n arg_18=arg_18,\n arg_19=arg_19) as file:\n arg_0 = {\n 'name': arg_4,\n 'resources': arg_7,\n }\n json.dump(arg_0, file, indent=4)\n return arg_6"} +{"_id": "doc_7929", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Convert resource's path and name to storage's table name.\n\n Args:\n path (str): resource path\n name (str): resource name\n\n Returns:\n str: table name\n\n \"\"\"\n arg_2 = os.path.splitext(arg_0)[0]\n arg_2 = arg_2.replace(os.path.sep, '__')\n if arg_1 is not None:\n arg_2 = '___'.join([arg_2, arg_1])\n arg_2 = re.sub('[^0-9a-zA-Z_]+', '_', arg_2)\n arg_2 = arg_2.lower()\n return arg_2"} +{"_id": "doc_7930", "title": "", "text": "def Func(arg_0):\n \"\"\"Restore schemas from being compatible with storage schemas.\n\n Foreign keys related operations.\n\n Args:\n list: resources from storage\n\n Returns:\n list: restored resources\n\n \"\"\"\n arg_0 = deepcopy(arg_0)\n for arg_1 in arg_0:\n arg_2 = arg_1['schema']\n for arg_3 in arg_2.get('foreignKeys', []):\n arg_4, arg_5 = _restore_path(arg_3['reference']['resource'])\n arg_3['reference']['resource'] = arg_5\n return arg_0"} +{"_id": "doc_7931", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"It is possible for some of gdb's output to be read before it completely finished its response.\n In that case, a partial mi response was read, which cannot be parsed into structured data.\n We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's\n output if the output did not end in a newline.\n\n Args:\n raw_output: Contents of the gdb mi output\n buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to\n gdb's next output.\n\n Returns:\n (raw_output, buf)\n \"\"\"\n\n if arg_0:\n if arg_1:\n # concatenate buffer and new output\n arg_0 = b\"\".join([arg_1, arg_0])\n arg_1 = None\n\n if b\"\\n\" not in arg_0:\n # newline was not found, so assume output is incomplete and store in buffer\n arg_1 = arg_0\n arg_0 = None\n\n elif not arg_0.endswith(b\"\\n\"):\n # raw output doesn't end in a newline, so store everything after the last newline (if anything)\n # in the buffer, and parse everything before it\n arg_2 = arg_0.rindex(b\"\\n\") + 1\n arg_1 = arg_0[arg_2:]\n arg_0 = arg_0[:arg_2]\n\n return (arg_0, arg_1)"} +{"_id": "doc_7932", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=arg_3,\n arg_4=True,\n arg_5=True,\n ):\n \"\"\"Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.\n\n Args:\n mi_cmd_to_Func (str or list): String to Func to gdb. If list, it is joined by newlines.\n timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.\n raise_error_on_timeout (bool): If read_response is True, raise error if no response is received\n read_response (bool): Block and read response. If there is a separate thread running,\n this can be false, and the reading thread read the output.\n Returns:\n List of parsed gdb responses if read_response is True, otherwise []\n Raises:\n NoGdbProcessError if there is no gdb subprocess running\n TypeError if mi_cmd_to_Func is not valid\n \"\"\"\n arg_0.verify_valid_gdb_subprocess()\n if arg_2 < 0:\n arg_0.logger.warning(\"timeout_sec was negative, replacing with 0\")\n arg_2 = 0\n\n # Ensure proper type of the mi command\n if type(arg_1) in [str, unicode]:\n pass\n elif type(arg_1) == list:\n arg_1 = \"\\n\".join(arg_1)\n else:\n raise TypeError(\n \"The gdb mi command must a be str or list. Got \"\n + str(type(arg_1))\n )\n\n arg_0.logger.debug(\"writing: %s\", arg_1)\n\n if not arg_1.endswith(\"\\n\"):\n arg_6 = arg_1 + \"\\n\"\n else:\n arg_6 = arg_1\n\n if USING_WINDOWS:\n # select not implemented in windows for pipes\n # assume it's always ready\n arg_7 = [arg_0.stdin_fileno]\n else:\n arg_8, arg_7, arg_8 = select.select([], arg_0.Func_list, [], arg_2)\n for arg_9 in arg_7:\n if arg_9 == arg_0.stdin_fileno:\n # ready to Func\n arg_0.gdb_process.stdin.Func(arg_6.encode())\n # don't forget to flush for Python3, otherwise gdb won't realize there is data\n # to evaluate, and we won't get a response\n arg_0.gdb_process.stdin.flush()\n else:\n arg_0.logger.error(\"got unexpected fileno %d\" % arg_9)\n\n if arg_5 is True:\n return arg_0.get_gdb_response(\n arg_2=arg_2, arg_4=arg_4\n )\n\n else:\n return []"} +{"_id": "doc_7933", "title": "", "text": "def Func(\n arg_0, arg_1=arg_2, arg_3=True\n ):\n \"\"\"Get response from GDB, and block while doing so. If GDB does not have any response ready to be read\n by timeout_sec, an exception is raised.\n\n Args:\n timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after\n raise_error_on_timeout (bool): Whether an exception should be raised if no response was found\n after timeout_sec\n\n Returns:\n List of parsed GDB responses, returned from gdbmiparser.parse_response, with the\n additional key 'stream' which is either 'stdout' or 'stderr'\n\n Raises:\n GdbTimeoutError if response is not received within timeout_sec\n ValueError if select returned unexpected file number\n NoGdbProcessError if there is no gdb subprocess running\n \"\"\"\n\n arg_0.verify_valid_gdb_subprocess()\n if arg_1 < 0:\n arg_0.logger.warning(\"timeout_sec was negative, replacing with 0\")\n arg_1 = 0\n\n if USING_WINDOWS:\n arg_4 = arg_0._get_responses_windows(arg_1)\n else:\n arg_4 = arg_0._get_responses_unix(arg_1)\n\n if not arg_4 and arg_3:\n raise GdbTimeoutError(\n \"Did not get response from gdb after %s seconds\" % arg_1\n )\n\n else:\n return arg_4"} +{"_id": "doc_7934", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get responses on windows. Assume no support for select and use a while loop.\"\"\"\n arg_2 = time.time() + arg_1\n arg_3 = []\n while True:\n try:\n arg_0.gdb_process.stdout.flush()\n if PYTHON3:\n arg_4 = arg_0.gdb_process.stdout.readline().replace(\n b\"\\r\", b\"\\n\"\n )\n else:\n arg_4 = arg_0.gdb_process.stdout.read().replace(b\"\\r\", b\"\\n\")\n arg_3 += arg_0._get_responses_list(arg_4, \"stdout\")\n except IOError:\n pass\n\n try:\n arg_0.gdb_process.stderr.flush()\n if PYTHON3:\n arg_4 = arg_0.gdb_process.stderr.readline().replace(\n b\"\\r\", b\"\\n\"\n )\n else:\n arg_4 = arg_0.gdb_process.stderr.read().replace(b\"\\r\", b\"\\n\")\n arg_3 += arg_0._get_responses_list(arg_4, \"stderr\")\n except IOError:\n pass\n\n if time.time() > arg_2:\n break\n\n return arg_3"} +{"_id": "doc_7935", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get responses on unix-like system. Use select to wait for output.\"\"\"\n arg_2 = time.time() + arg_1\n arg_3 = []\n while True:\n arg_4 = arg_2 - time.time()\n # I prefer to not pass a negative value to select\n if arg_4 <= 0:\n arg_4 = 0\n arg_5, arg_6, arg_6 = select.select(arg_0.read_list, [], [], arg_4)\n arg_7 = None # to avoid infinite loop if using Python 2\n try:\n for arg_8 in arg_5:\n # new data is ready to read\n if arg_8 == arg_0.stdout_fileno:\n arg_0.gdb_process.stdout.flush()\n arg_9 = arg_0.gdb_process.stdout.read()\n arg_10 = \"stdout\"\n\n elif arg_8 == arg_0.stderr_fileno:\n arg_0.gdb_process.stderr.flush()\n arg_9 = arg_0.gdb_process.stderr.read()\n arg_10 = \"stderr\"\n\n else:\n raise ValueError(\n \"Developer error. Got unexpected file number %d\" % arg_8\n )\n\n arg_7 = arg_0._get_responses_list(arg_9, arg_10)\n arg_3 += arg_7\n\n except IOError: # only occurs in python 2.7\n pass\n\n if arg_1 == 0: # just exit immediately\n break\n\n elif arg_7 and arg_0._allow_overwrite_timeout_times:\n # update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb\n arg_2 = min(\n time.time() + arg_0.time_to_check_for_additional_output_sec,\n arg_2,\n )\n\n elif time.time() > arg_2:\n break\n\n return arg_3"} +{"_id": "doc_7936", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Read count characters starting at self.index,\n and return those characters as a string\n \"\"\"\n arg_2 = arg_0.index + arg_1\n if arg_2 > arg_0.len:\n arg_3 = arg_0.raw_text[arg_0.index :] # return to the end, don't fail\n else:\n arg_3 = arg_0.raw_text[arg_0.index : arg_2]\n arg_0.index = arg_2\n\n return arg_3"} +{"_id": "doc_7937", "title": "", "text": "def Func(arg_0):\n \"\"\"Parse gdb mi text and turn it into a dictionary.\n\n See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records\n for details on types of gdb mi output.\n\n Args:\n gdb_mi_text (str): String output from gdb\n\n Returns:\n dict with the following keys:\n type (either 'notify', 'result', 'console', 'log', 'target', 'done'),\n message (str or None),\n payload (str, list, dict, or None)\n \"\"\"\n arg_1 = StringStream(arg_0, debug=_DEBUG)\n\n if _GDB_MI_NOTIFY_RE.match(arg_0):\n arg_2, arg_3, arg_4 = _get_notify_msg_and_payload(arg_0, arg_1)\n return {\n \"type\": \"notify\",\n \"message\": arg_3,\n \"payload\": arg_4,\n \"token\": arg_2,\n }\n\n elif _GDB_MI_RESULT_RE.match(arg_0):\n arg_2, arg_3, arg_4 = _get_result_msg_and_payload(arg_0, arg_1)\n return {\n \"type\": \"result\",\n \"message\": arg_3,\n \"payload\": arg_4,\n \"token\": arg_2,\n }\n\n elif _GDB_MI_CONSOLE_RE.match(arg_0):\n return {\n \"type\": \"console\",\n \"message\": None,\n \"payload\": _GDB_MI_CONSOLE_RE.match(arg_0).groups()[0],\n }\n\n elif _GDB_MI_LOG_RE.match(arg_0):\n return {\n \"type\": \"log\",\n \"message\": None,\n \"payload\": _GDB_MI_LOG_RE.match(arg_0).groups()[0],\n }\n\n elif _GDB_MI_TARGET_OUTPUT_RE.match(arg_0):\n return {\n \"type\": \"target\",\n \"message\": None,\n \"payload\": _GDB_MI_TARGET_OUTPUT_RE.match(arg_0).groups()[0],\n }\n\n elif response_is_finished(arg_0):\n return {\"type\": \"done\", \"message\": None, \"payload\": None}\n\n else:\n # This was not gdb mi output, so it must have just been printed by\n # the inferior program that's being debugged\n return {\"type\": \"output\", \"message\": None, \"payload\": arg_0}"} +{"_id": "doc_7938", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get notify message and payload dict\"\"\"\n arg_2 = arg_1.advance_past_chars([\"=\", \"*\"])\n arg_2 = int(arg_2) if arg_2 != \"\" else None\n logger.debug(\"%s\", fmt_green(\"parsing message\"))\n arg_3 = arg_1.advance_past_chars([\",\"])\n\n logger.debug(\"parsed message\")\n logger.debug(\"%s\", fmt_green(arg_3))\n\n arg_4 = _parse_dict(arg_1)\n return arg_2, arg_3.strip(), arg_4"} +{"_id": "doc_7939", "title": "", "text": "def Func(arg_0, arg_1, arg_2 = 0.01, arg_3= 1e-6, arg_4=0.95, arg_5=\"ADADELTA\",\n arg_6=0.0, arg_7 = 0.0001):\n \"\"\"\n Optimize by SGD, AdaGrad, or AdaDelta.\n \"\"\"\n\n arg_8, arg_8, arg_8, arg_9 = inspect.getargvalues(inspect.currentframe())\n logging.info(\"Func: %s\" % str(arg_9.items()))\n arg_10 = []\n\n if arg_5 == \"FINETUNING_ADAGRAD\":\n arg_5 = \"ADAGRAD\"\n arg_7 = 0\n\n arg_11 = 1 - arg_6\n\n arg_12 = [theano.shared(np.zeros_like(arg_17.get_value(borrow=True), dtype=FLOATX), name=\"gsum_%s\" % arg_17.name) if (arg_5 == 'ADADELTA' or arg_5 == 'ADAGRAD') else None for arg_17 in arg_0]\n arg_13 = [theano.shared(np.zeros_like(arg_17.get_value(borrow=True), dtype=FLOATX), name=\"xsum_%s\" % arg_17.name) if arg_5 == 'ADADELTA' else None for arg_17 in arg_0]\n\n # Fix for AdaGrad, init gsum to 1\n if arg_5 == 'ADAGRAD':\n for arg_14 in arg_12:\n arg_14.set_value(arg_14.get_value() ** 0)\n\n arg_15 = OrderedDict()\n # Updates\n for arg_16, arg_17, arg_14, arg_18 in zip(arg_1, arg_0, arg_12, arg_13):\n\n if arg_5 == 'ADADELTA':\n arg_15[arg_14] = arg_4 * arg_14 + (1. - arg_4) * (arg_16 **2)\n arg_19 = -T.sqrt((arg_18 + arg_3) / (arg_15[arg_14] + arg_3)) * arg_16\n arg_15[arg_18] =arg_4 * arg_18 + (1. - arg_4) * (arg_19 **2)\n arg_15[arg_17] = arg_17 * arg_11 + arg_19\n elif arg_5 == 'ADAGRAD':\n arg_15[arg_14] = arg_14 + (arg_16 **2) - arg_7 * arg_14\n arg_15[arg_17] = arg_17 * arg_11 - arg_2 * (arg_16 / (T.sqrt(arg_15[arg_14] + arg_3)))\n\n else:\n arg_15[arg_17] = arg_17 * arg_11 - arg_16 * arg_2\n # Add free parameters\n if arg_5 == 'ADADELTA':\n arg_10.extend(arg_12 + arg_13)\n elif arg_5 == 'ADAGRAD':\n arg_10.extend(arg_12)\n # Check dtype\n for arg_20 in arg_15:\n if arg_15[arg_20].dtype != FLOATX:\n arg_15[arg_20] = arg_15[arg_20].astype(FLOATX)\n return arg_15.items(), arg_10"} +{"_id": "doc_7940", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return updates in the training.\n \"\"\"\n arg_1 = arg_0.training_params()\n arg_2 = arg_0.get_gradients(arg_1)\n return arg_0.optimization_updates(arg_1, arg_2)"} +{"_id": "doc_7941", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get parameters to be optimized.\n \"\"\"\n arg_1 = arg_0.network.parameters\n # Freeze parameters\n if arg_0.config.fixed_parameters:\n logging.info(\"fixed parameters: %s\" % \", \".join(map(str, arg_0.config.fixed_parameters)))\n arg_1 = [p for p in arg_1 if p not in arg_0.config.fixed_parameters]\n return arg_1"} +{"_id": "doc_7942", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Compute first glimpse position using down-sampled image.\n \"\"\"\n arg_2 = theano.tensor.signal.downsample.max_pool_2d(arg_1, (4,4))\n arg_2 = arg_2.flatten()\n arg_3 = T.dot(arg_2, arg_0.W_f)\n if arg_0.disable_reinforce:\n arg_4 = arg_0.W_f\n if arg_0.random_glimpse:\n arg_3 = arg_0.srng.uniform((2,), low=-1.7, high=1.7)\n else:\n arg_5 = arg_0._sample_gaussian(arg_3, arg_0.cov)\n arg_6 = arg_0._multi_gaussian_pdf(disconnected_grad(arg_5), arg_3)\n arg_4 = T.grad(T.log(arg_6), arg_0.W_f)\n arg_3 = arg_5\n return arg_3, arg_4"} +{"_id": "doc_7943", "title": "", "text": "def Func(arg_0):\n \"\"\"\n All codes that create parameters should be put into 'setup' function.\n \"\"\"\n arg_0.output_dim = 10\n arg_0.encoder = Chain(arg_0.input_dim).stack(Dense(arg_0.internal_layer_size, 'tanh'))\n arg_0.decoder = Chain(arg_0.internal_layer_size).stack(Dense(arg_0.input_dim))\n arg_0.classifier = Chain(arg_0.internal_layer_size).stack(Dense(50, 'tanh'),\n Dense(arg_0.output_dim),\n Softmax())\n\n arg_0.register_inner_layers(arg_0.encoder, arg_0.decoder, arg_0.classifier)\n\n arg_0.target_input = T.ivector('target')\n arg_0.register_external_inputs(arg_0.target_input)"} +{"_id": "doc_7944", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Build the computation graph here.\n \"\"\"\n arg_2 = arg_0.encoder.Func(arg_1)\n\n arg_3 = arg_0.decoder.Func(arg_2)\n\n arg_4 = arg_0.classifier.Func(arg_2)\n\n arg_5 = AutoEncoderCost(arg_3, arg_1).get()\n\n arg_6 = CrossEntropyCost(arg_4, arg_0.target_input).get()\n\n arg_7 = 0.01 * arg_5 + arg_6\n\n arg_8 = ErrorRateCost(arg_4, arg_0.target_input).get()\n\n arg_0.register_monitors((\"err\", arg_8),\n (\"encoder_cost\", arg_5),\n (\"classify_cost\", arg_6))\n\n return arg_7"} +{"_id": "doc_7945", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process all data with given function.\n The scheme of function should be x,y -> x,y.\n \"\"\"\n if arg_0._train_set:\n arg_0._train_set = Func(arg_1, arg_0._train_set)\n if arg_0._valid_set:\n arg_0._valid_set = Func(arg_1, arg_0._valid_set)\n if arg_0._test_set:\n arg_0._test_set = Func(arg_1, arg_0._test_set)"} +{"_id": "doc_7946", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Make targets be one-hot vectors.\n \"\"\"\n if arg_0._train_set:\n arg_0._train_set = arg_0._vectorize_set(arg_0._train_set, arg_1)\n if arg_0._valid_set:\n arg_0._valid_set = arg_0._vectorize_set(arg_0._valid_set, arg_1)\n if arg_0._test_set:\n arg_0._test_set = arg_0._vectorize_set(arg_0._test_set, arg_1)"} +{"_id": "doc_7947", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Print dataset statistics.\n \"\"\"\n logging.info(\"%s train=%d valid=%d test=%d\" % (arg_0.__class__.__name__,\n len(list(arg_0._train_set)) if arg_0._train_set else 0,\n len(list(arg_0._valid_set)) if arg_0._valid_set else 0,\n len(list(arg_0._test_set)) if arg_0._test_set else 0))"} +{"_id": "doc_7948", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n '''We Func over mini-batches and evaluate periodically.'''\n arg_5 = 0\n while True:\n if not arg_5 % arg_0.config.test_frequency and arg_3:\n try:\n arg_0.test(arg_5, arg_3)\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n\n if not arg_5 % arg_0.validation_frequency and arg_2:\n try:\n if not arg_0.evaluate(arg_5, arg_2):\n logging.info('patience elapsed, bailing out')\n break\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n\n arg_6 = \"\"\n try:\n arg_6 = arg_0.Func_func(arg_1)\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n if not arg_5 % arg_0.config.monitor_frequency:\n logging.info('monitor (iter=%i) %s', arg_5 + 1, arg_6)\n\n arg_5 += 1\n if hasattr(arg_0.network, \"iteration_callback\"):\n arg_0.network.iteration_callback()\n\n yield arg_6\n\n if arg_2:\n arg_0.set_params(arg_0.best_params)\n if arg_3:\n arg_0.test(0, arg_3)"} +{"_id": "doc_7949", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Sample outputs from LM.\n \"\"\"\n arg_3 = [[onehot(arg_0.input_dim, x) for x in arg_1]]\n for arg_4 in range(arg_2):\n arg_5 = arg_0.compute(arg_3)[0,-1].argmax()\n arg_1.append(arg_5)\n arg_3[0].append(onehot(arg_0.input_dim, arg_5))\n return arg_1"} +{"_id": "doc_7950", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Compute the alignment weights based on the previous state.\n \"\"\"\n\n arg_4 = T.dot(arg_1, arg_0.Wa)\n arg_5 = arg_2\n # For test time the UaH will be (time, output_dim)\n if arg_5.ndim == 2:\n arg_6 = arg_4[:, None, :] + arg_5[None, :, :]\n else:\n arg_6 = arg_4[:, None, :] + arg_5\n arg_7 = T.activate(arg_6, 'tanh')\n arg_8 = T.dot(arg_7, arg_0.Va) # ~ (batch, time)\n if arg_3:\n arg_3 = (1 - arg_3) * -99.00\n if arg_8.ndim == 3:\n arg_8 += arg_3[None, :]\n else:\n arg_8 += arg_3\n arg_9 = T.nnet.softmax(arg_8)\n return arg_9"} +{"_id": "doc_7951", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Pad sequences to given length in the left or right side.\n \"\"\"\n if arg_0._train_set:\n arg_0._train_set = pad_dataset(arg_0._train_set, arg_1, arg_2)\n if arg_0._valid_set:\n arg_0._valid_set = pad_dataset(arg_0._valid_set, arg_1, arg_2)\n if arg_0._test_set:\n arg_0._test_set = pad_dataset(arg_0._test_set, arg_1, arg_2)"} +{"_id": "doc_7952", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.9, arg_3=0.01):\n \"\"\"\n RMSPROP optimization core.\n \"\"\"\n for arg_4, arg_5 in zip(arg_0, arg_1):\n arg_6 = theano.shared(np.zeros_like(arg_4.get_value()), name=arg_4.name + '_rms')\n arg_7 = arg_2 * arg_6 + (1 - arg_2) * arg_5 * arg_5\n yield arg_6, arg_7\n yield arg_4, arg_4 - arg_3 * arg_5 / T.sqrt(arg_7 + 1e-8)"} +{"_id": "doc_7953", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Run the model with validation data and return costs.\n \"\"\"\n arg_2 = arg_0.compute(*arg_1)\n return arg_0._extract_costs(arg_2)"} +{"_id": "doc_7954", "title": "", "text": "def Func(arg_0):\n \"\"\"\n This function will be called after each iteration.\n \"\"\"\n arg_0._counter += 1\n if arg_0._counter % arg_0._freq == 0:\n arg_1 = 0.\n arg_2 = defaultdict(float)\n for arg_3 in arg_0._trainer.get_data(arg_0._data_split):\n arg_4 = arg_0.run(arg_3)\n if not isinstance(arg_4, dict):\n raise Exception(\"Monitor.run must return a dict.\")\n for arg_5, arg_6 in arg_4.items():\n arg_2[arg_5] += arg_6\n arg_1 += 1\n for arg_5 in arg_2:\n arg_2[arg_5] /= arg_1\n arg_7 = arg_0.compare(arg_2)\n arg_0._trainer.report(arg_2, arg_0._data_split, arg_7=arg_7)\n if arg_7:\n arg_0._trainer.save_checkpoint(arg_0._save_path)"} +{"_id": "doc_7955", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Create inner loop variables.\n \"\"\"\n from theano.tensor.var import TensorVariable\n from deepy.core.neural_var import NeuralVariable\n if not arg_0._loop_vars:\n arg_0._ordered_out_keys = arg_0._outputs.keys()\n arg_2 = arg_0._sequences.keys()\n arg_3 = [arg_10 for arg_10 in arg_0._ordered_out_keys if arg_0._outputs[arg_10]]\n arg_4 = arg_0._non_sequences.keys()\n arg_5, arg_0._scan_local_vars = get_dummy_args(\n sequences=[arg_0._sequences[arg_10].tensor for arg_10 in arg_2],\n outputs_info=[arg_0._outputs[arg_10].tensor for arg_10 in arg_0._ordered_out_keys],\n non_sequences=[arg_0._non_sequences[arg_10].tensor for arg_10 in arg_4],\n **arg_0._kwargs\n )\n arg_7 = dict(zip(arg_2 + arg_3 + arg_4, arg_5))\n arg_8 = arg_0._sequences.copy()\n arg_8.update(arg_0._outputs)\n arg_8.update(arg_0._non_sequences)\n arg_0._loop_vars = LoopVars()\n for arg_10, arg_11 in arg_7.items():\n arg_12 = NeuralVariable(arg_11, dim=arg_8[arg_10].dim())\n arg_0._loop_vars[arg_10] = arg_12"} +{"_id": "doc_7956", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Internal scan with dummy input variables.\n \"\"\"\n from neural_var import NeuralVariable\n if not arg_0._loop_vars:\n raise Exception(\"The loop is not initialized. To initialize the loop, use `with loop as vars`\")\n arg_2 = {}\n for arg_3, arg_4 in arg_1.items():\n if arg_4 is not None:\n arg_2[arg_0._dummy_nodes[arg_3].tensor] = arg_4.tensor\n arg_7 = {}\n for arg_3 in arg_0._outputs:\n if arg_3 not in arg_0._loop_vars:\n raise Exception(\"{} can not be found in loop vars.\".format(arg_3))\n arg_8 = theano.clone(arg_0._loop_vars[arg_3].tensor, arg_2)\n arg_7[arg_3] = NeuralVariable(arg_8, arg_0._loop_vars[arg_3].dim())\n return arg_7"} +{"_id": "doc_7957", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.9, arg_3=0.01):\n \"\"\"\n Momentum SGD optimization core.\n \"\"\"\n arg_4 = []\n arg_5 = []\n for arg_6, arg_7 in zip(arg_0, arg_1):\n arg_8 = arg_3 * arg_7\n arg_9 = theano.shared(np.zeros_like(arg_6.get_value()), name=arg_6.name + '_vel')\n arg_5.append((arg_9, arg_2 * arg_9 - arg_8))\n arg_5.append((arg_6, arg_6 + arg_9))\n arg_4.append(arg_9)\n return arg_5, arg_4"} +{"_id": "doc_7958", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0):\n \"\"\"\n Skip N batches in the training.\n \"\"\"\n logging.info(\"Func %d epochs and %d batches\" % (arg_2, arg_1))\n arg_0._Func_batches = arg_1\n arg_0._Func_epochs = arg_2"} +{"_id": "doc_7959", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Load parameters for the training.\n This method can load free parameters and resume the training progress.\n \"\"\"\n arg_0.network.Func(arg_1, arg_2=arg_2)\n arg_0.best_params = arg_0.copy_params()\n # Resume the progress\n if arg_0.network.train_logger.progress() > 0 or arg_0.network.train_logger.epoch() > 0:\n arg_0.skip(arg_0.network.train_logger.progress(), arg_0.network.train_logger.epoch() - 1)"} +{"_id": "doc_7960", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Train the model and return costs.\n \"\"\"\n arg_0._epoch = 0\n while True:\n if arg_0._skip_epochs > 0:\n logging.info(\"skipping one epoch ...\")\n arg_0._skip_epochs -= 1\n arg_0._epoch += 1\n yield None\n continue\n # Test\n if not arg_0._epoch % arg_0.config.test_frequency and arg_3:\n try:\n arg_0._run_test(arg_0._epoch, arg_3)\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n # Validate\n if not arg_0._epoch % arg_0.validation_frequency and arg_2:\n try:\n\n if not arg_0._run_valid(arg_0._epoch, arg_2):\n logging.info('patience elapsed, bailing out')\n break\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n # Train one step\n\n try:\n arg_6 = arg_0._run_Func(arg_0._epoch, arg_1, arg_4)\n except KeyboardInterrupt:\n logging.info('interrupted!')\n break\n # Check costs\n if np.isnan(arg_6[0][1]):\n logging.info(\"NaN detected in costs, rollback to last parameters\")\n arg_0.set_params(*arg_0.checkpoint)\n else:\n arg_0._epoch += 1\n arg_0.network.epoch_callback()\n\n yield dict(arg_6)\n\n if arg_2 and arg_0.config.get(\"save_best_parameters\", True):\n arg_0.set_params(*arg_0.best_params)\n if arg_3:\n arg_0._run_test(-1, arg_3)"} +{"_id": "doc_7961", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"\n Run one training iteration.\n \"\"\"\n arg_0.network.train_logger.record_epoch(arg_1 + 1)\n arg_4 = arg_0.train_step(arg_2, arg_3)\n if not arg_1 % arg_0.config.monitor_frequency:\n arg_0.report(dict(arg_4), \"train\", arg_1)\n arg_0.last_run_costs = arg_4\n return arg_4"} +{"_id": "doc_7962", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=None):\n \"\"\"\n Run one valid iteration, return true if to continue training.\n \"\"\"\n arg_5 = arg_0.valid_step(arg_2)\n # this is the same as: (J_i - J_f) / J_i > min improvement\n arg_6, arg_7 = arg_5[0]\n arg_8 = False\n if arg_0.best_cost - arg_7 > arg_0.best_cost * arg_0.min_improvement:\n # save the best cost and parameters\n arg_0.best_params = arg_0.copy_params()\n arg_8 = True\n if not arg_3:\n arg_0.best_cost = arg_7\n arg_0.best_epoch = arg_1\n arg_0.save_checkpoint(arg_4)\n\n arg_0.report(dict(arg_5), type=\"valid\", arg_1=0 if arg_3 else arg_1, arg_8=arg_8)\n arg_0.last_run_costs = arg_5\n return arg_1 - arg_0.best_epoch < arg_0.patience"} +{"_id": "doc_7963", "title": "", "text": "def Func(arg_0, arg_1=\"train\"):\n \"\"\"\n Get specified split of data.\n \"\"\"\n if arg_1 == 'train':\n return arg_0._current_train_set\n elif arg_1 == 'valid':\n return arg_0._current_valid_set\n elif arg_1 == 'test':\n return arg_0._current_test_set\n else:\n return None"} +{"_id": "doc_7964", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Report usage of training parameters.\n \"\"\"\n if arg_0.logger:\n arg_0.logger.info(\"accessed parameters:\")\n for arg_1 in arg_0.used_parameters:\n arg_0.logger.info(\" - %s %s\" % (arg_1, \"(undefined)\" if arg_1 in arg_0.undefined_parameters else \"\"))"} +{"_id": "doc_7965", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None):\n \"\"\"\n An alias of deepy.tensor.Func.\n \"\"\"\n from deepy.tensor import Func\n return Func(arg_1, arg_2=arg_2, arg_3=arg_3)"} +{"_id": "doc_7966", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\"train\"):\n \"\"\"\n Create vars given a dataset and set test values.\n Useful when dataset is already defined.\n \"\"\"\n from deepy.core.neural_var import NeuralVariable\n arg_3 = []\n if arg_2 == \"valid\":\n arg_4 = arg_1.valid_set()\n elif arg_2 == \"test\":\n arg_4 = arg_1.test_set()\n else:\n arg_4 = arg_1.train_set()\n arg_5 = list(arg_4)[0]\n for arg_6, arg_7 in enumerate(arg_5):\n if arg_7.dtype == \"int64\":\n arg_7 = arg_7.astype(\"int32\")\n if arg_7.dtype == \"float64\":\n arg_7 = arg_7.astype(env.FLOATX)\n arg_8 = {\n 0: \"scalar\",\n 1: \"vector\",\n 2: \"matrix\",\n 3: \"tensor3\",\n 4: \"tensor4\",\n 5: \"tensor5\",\n }\n arg_9 = arg_8[arg_7.ndim] if arg_7.ndim in arg_8 else arg_8[0]\n if arg_7.dtype.kind == \"i\":\n arg_9 = \"i\" + arg_9\n arg_10 = getattr(TT, arg_9)(\"input_{}_{}\".format(arg_6 + 1, arg_9))\n arg_11 = arg_7.shape[-1]\n arg_12 = NeuralVariable(arg_10, dim=arg_11)\n arg_12.set_test_value(arg_7)\n arg_3.append(arg_12)\n return arg_3"} +{"_id": "doc_7967", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Create a Func theano scalar value.\n \"\"\"\n if type(arg_1) == int:\n arg_3 = np.array(arg_1, dtype=\"int32\")\n elif type(arg_1) == float:\n arg_3 = np.array(arg_1, dtype=env.FLOATX)\n else:\n arg_3 = arg_1\n\n return theano.Func(arg_3, arg_2=arg_2)"} +{"_id": "doc_7968", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Stack encoding layers, this must be done before stacking decoding layers.\n \"\"\"\n arg_0.stack(*arg_1)\n arg_0.encoding_layes.extend(arg_1)"} +{"_id": "doc_7969", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Stack decoding layers.\n \"\"\"\n arg_0.stack(*arg_1)\n arg_0.decoding_layers.extend(arg_1)"} +{"_id": "doc_7970", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Encode given input.\n \"\"\"\n if not arg_0.encoding_network:\n arg_0.encoding_network = NeuralNetwork(arg_0.input_dim, arg_0.input_tensor)\n arg_0.encoding_network.input_variables = arg_0.input_variables\n for arg_4 in arg_0.encoding_layes:\n arg_0.encoding_network.stack_layer(arg_4, no_setup=True)\n return arg_0.encoding_network.compute(*arg_1)"} +{"_id": "doc_7971", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Register the layer so that it's param will be trained.\n But the output of the layer will not be stacked.\n \"\"\"\n if type(arg_1) == Block:\n arg_1.fix()\n arg_0.parameter_count += arg_1.parameter_count\n arg_0.parameters.extend(arg_1.parameters)\n arg_0.free_parameters.extend(arg_1.free_parameters)\n arg_0.training_monitors.extend(arg_1.training_monitors)\n arg_0.testing_monitors.extend(arg_1.testing_monitors)\n arg_0.updates.extend(arg_1.updates)\n arg_0.training_updates.extend(arg_1.training_updates)\n arg_0.input_variables.extend(arg_1.external_inputs)\n arg_0.target_variables.extend(arg_1.external_targets)\n\n arg_0.training_callbacks.extend(arg_1.training_callbacks)\n arg_0.testing_callbacks.extend(arg_1.testing_callbacks)\n arg_0.epoch_callbacks.extend(arg_1.epoch_callbacks)"} +{"_id": "doc_7972", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Monitoring the outputs of each layer.\n Useful for troubleshooting convergence problems.\n \"\"\"\n for arg_1, arg_2 in zip(arg_0.layers, arg_0._hidden_outputs):\n arg_0.training_monitors.append(('mean(%s)' % (arg_1.name), abs(arg_2).mean()))"} +{"_id": "doc_7973", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return all parameters.\n \"\"\"\n arg_1 = []\n arg_1.extend(arg_0.parameters)\n arg_1.extend(arg_0.free_parameters)\n\n return arg_1"} +{"_id": "doc_7974", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Set up variables.\n \"\"\"\n if arg_0.input_tensor:\n if type(arg_0.input_tensor) == int:\n arg_1 = dim_to_var(arg_0.input_tensor, name=\"x\")\n else:\n arg_1 = arg_0.input_tensor\n else:\n arg_1 = T.matrix('x')\n arg_0.input_variables.append(arg_1)\n arg_0._output = arg_1\n arg_0._test_output = arg_1"} +{"_id": "doc_7975", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Save parameters to file.\n \"\"\"\n save_logger.info(arg_1)\n arg_3 = arg_0.all_parameters\n arg_4 = [p.get_value().copy() for p in arg_3]\n if arg_2:\n arg_5 = Thread(target=save_network_params, args=(arg_4, arg_1))\n arg_5.start()\n else:\n save_network_params(arg_4, arg_1)\n arg_0.train_logger.save(arg_1)"} +{"_id": "doc_7976", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Load parameters from file.\n \"\"\"\n if not os.path.exists(arg_1): return;\n logging.info(\"loading parameters from %s\" % arg_1)\n # Decide which parameters to load\n if arg_2:\n arg_3 = arg_0.parameters\n else:\n arg_3 = arg_0.all_parameters\n # Load parameters\n if arg_1.endswith(\".gz\"):\n arg_4 = gzip.open if arg_1.lower().endswith('.gz') else open\n arg_5 = arg_4(arg_1, 'rb')\n arg_6 = pickle.load(arg_5)\n arg_5.close()\n # Write parameters\n for arg_7, arg_8 in zip(arg_3, arg_6):\n logging.info('%s: setting value %s', arg_7.name, arg_8.shape)\n arg_7.set_value(arg_8)\n elif arg_1.endswith(\".npz\"):\n arg_9 = np.load(arg_1)\n # Write parameters\n for arg_7, arg_10 in zip(arg_3, range(len(arg_9.keys()))):\n arg_8 = arg_9['arr_%d' % arg_10]\n logging.info('%s: setting value %s', arg_7.name, arg_8.shape)\n arg_7.set_value(arg_8)\n else:\n raise Exception(\"File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'\" % arg_1)\n\n arg_0.train_logger.load(arg_1)"} +{"_id": "doc_7977", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Print network statistics.\n \"\"\"\n logging.info(\"network inputs: %s\", \" \".join(map(str, arg_0.input_variables)))\n logging.info(\"network targets: %s\", \" \".join(map(str, arg_0.target_variables)))\n logging.info(\"network parameters: %s\", \" \".join(map(str, arg_0.all_parameters)))\n logging.info(\"parameter count: %d\", arg_0.parameter_count)"} +{"_id": "doc_7978", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Register parameters.\n \"\"\"\n for arg_2 in arg_1:\n arg_0.parameter_count += np.prod(arg_2.get_value().shape)\n arg_0.parameters.extend(arg_1)"} +{"_id": "doc_7979", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Register updates that will only be executed in training phase.\n \"\"\"\n for arg_2, arg_3 in arg_1:\n if arg_2 not in arg_0._registered_training_updates:\n arg_0.training_updates.append((arg_2, arg_3))\n arg_0._registered_training_updates.add(arg_2)"} +{"_id": "doc_7980", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Register monitors they should be tuple of name and Theano variable.\n \"\"\"\n for arg_2, arg_3 in arg_1:\n if arg_2 not in arg_0._registered_monitors:\n arg_3 *= 1.0 # Avoid CudaNdarray\n arg_0.training_monitors.append((arg_2, arg_3))\n arg_0.testing_monitors.append((arg_2, arg_3))\n arg_0._registered_monitors.add(arg_2)"} +{"_id": "doc_7981", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the L2 norm of multiple tensors.\n This function is taken from blocks.\n \"\"\"\n # Another way for doing this, I don't know which one is fast\n # return T.sqrt(sum(T.sum(t ** 2) for t in tensors))\n arg_1 = [T.as_tensor_variable(t).flatten() for t in arg_0]\n arg_1 = [(t if t.ndim > 0 else t.dimshuffle('x'))\n for t in arg_1]\n arg_2 = T.join(0, *arg_1)\n return T.sqrt(T.sqr(arg_2).sum())"} +{"_id": "doc_7982", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n dumps one element to file_obj, a file opened in write mode\n \"\"\"\n arg_2 = dumps(arg_0)\n arg_1.write(arg_2)\n # record separator is a blank line\n # (since pickled_elt_str might contain its own newlines)\n arg_1.write('\\n\\n')"} +{"_id": "doc_7983", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n from deepy.core import graph\n \"\"\"\n Load parameters to the block.\n \"\"\"\n from deepy.core.comp_graph import ComputationalGraph\n arg_3 = graph.compile(blocks=[arg_0])\n arg_3.Func(arg_1, arg_2=arg_2)"} +{"_id": "doc_7984", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4='GET', arg_5=None,\n arg_6=None, arg_7='', arg_8=None, arg_9='', arg_10='',\n arg_11='', arg_12=''\n ):\n \"\"\"\n Creates |oauth2| request elements.\n \"\"\"\n\n arg_6 = arg_6 or {}\n arg_5 = arg_5 or {}\n\n arg_13 = arg_2.consumer_key or ''\n arg_14 = arg_2.consumer_secret or ''\n arg_15 = arg_2.token or ''\n arg_16 = arg_2.refresh_token or arg_2.token or ''\n\n # Separate url base and query parameters.\n arg_3, arg_17 = arg_0._split_url(arg_3)\n\n # Add params extracted from URL.\n arg_5.update(dict(arg_17))\n\n if arg_1 == arg_0.USER_AUTHORIZATION_REQUEST_TYPE:\n # User authorization request.\n # TODO: Raise error for specific message for each missing argument.\n if arg_13 and arg_9 and (\n arg_11 or not arg_0.supports_csrf_protection):\n arg_5['client_id'] = arg_13\n arg_5['redirect_uri'] = arg_9\n arg_5['scope'] = arg_10\n if arg_0.supports_user_state:\n arg_5['state'] = base64.urlsafe_b64encode(\n json.dumps(\n {\"csrf\": arg_11, \"user_state\": arg_12}\n ).encode('utf-8')\n )\n else:\n arg_5['state'] = arg_11\n arg_5['response_type'] = 'code'\n\n # Add authorization header\n arg_6.update(arg_0._authorization_header(arg_2))\n else:\n raise OAuth2Error(\n 'Credentials with valid consumer_key and arguments '\n 'redirect_uri, scope and state are required to create '\n 'OAuth 2.0 user authorization request elements!')\n\n elif arg_1 == arg_0.ACCESS_TOKEN_REQUEST_TYPE:\n # Access token request.\n if arg_13 and arg_14:\n arg_5['code'] = arg_15\n arg_5['client_id'] = arg_13\n arg_5['client_secret'] = arg_14\n arg_5['redirect_uri'] = arg_9\n arg_5['grant_type'] = 'authorization_code'\n\n # TODO: Check whether all providers accept it\n arg_6.update(arg_0._authorization_header(arg_2))\n else:\n raise OAuth2Error(\n 'Credentials with valid token, consumer_key, '\n 'consumer_secret and argument redirect_uri are required '\n 'to create OAuth 2.0 access token request elements!')\n\n elif arg_1 == arg_0.REFRESH_TOKEN_REQUEST_TYPE:\n # Refresh access token request.\n if arg_16 and arg_13 and arg_14:\n arg_5['refresh_token'] = arg_16\n arg_5['client_id'] = arg_13\n arg_5['client_secret'] = arg_14\n arg_5['grant_type'] = 'refresh_token'\n else:\n raise OAuth2Error(\n 'Credentials with valid refresh_token, consumer_key, '\n 'consumer_secret are required to create OAuth 2.0 '\n 'refresh token request elements!')\n\n elif arg_1 == arg_0.PROTECTED_RESOURCE_REQUEST_TYPE:\n # Protected resource request.\n\n # Add Authorization header. See:\n # http://tools.ietf.org/html/rfc6749#section-7.1\n if arg_2.token_type == arg_0.BEARER:\n # http://tools.ietf.org/html/rfc6750#section-2.1\n arg_6.update(\n {'Authorization': 'Bearer {0}'.format(arg_2.token)})\n\n elif arg_15:\n arg_5['access_token'] = arg_15\n else:\n raise OAuth2Error(\n 'Credentials with valid token are required to create '\n 'OAuth 2.0 protected resources request elements!')\n\n arg_18 = core.RequestElements(\n arg_3, arg_4, arg_5, arg_6, arg_7)\n\n return arg_0._x_request_elements_filter(\n arg_1, arg_18, arg_2)"} +{"_id": "doc_7985", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n We need to override this method to fix Facebooks naming deviation.\n \"\"\"\n\n # Facebook returns \"expires\" instead of \"expires_in\".\n arg_0.expire_in = arg_1.get('expires')\n\n if arg_1.get('token_type') == 'bearer':\n # TODO: cls is not available here, hardcode for now.\n arg_0.token_type = 'Bearer'\n\n return arg_0"} +{"_id": "doc_7986", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"\n Google doesn't accept client ID and secret to be at the same time in\n request parameters and in the basic authorization header in the access\n token request.\n \"\"\"\n if arg_1 is arg_0.ACCESS_TOKEN_REQUEST_TYPE:\n arg_4 = arg_2[2]\n del arg_4['client_id']\n del arg_4['client_secret']\n return arg_2"} +{"_id": "doc_7987", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Login handler, must accept both GET and POST to be able to use OpenID.\n \"\"\"\n\n # We need response object for the WerkzeugAdapter.\n arg_1 = make_response()\n\n # Log the user in, pass it the adapter and the provider name.\n arg_2 = authomatic.Func(\n WerkzeugAdapter(\n request,\n arg_1),\n arg_0)\n\n # If there is no LoginResult object, the Func procedure is still pending.\n if arg_2:\n if arg_2.user:\n # We need to update the user to get more info.\n arg_2.user.update()\n\n # The rest happens inside the template.\n return render_template('Func.html', arg_2=arg_2)\n\n # Don't forget to return the response.\n return arg_1"} +{"_id": "doc_7988", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Replaces all values that are single-item iterables with the value of its\n index 0.\n\n :param dict dict_:\n Dictionary to normalize.\n\n :returns:\n Normalized dictionary.\n\n \"\"\"\n\n return dict([(arg_1, arg_2[0] if not isinstance(arg_2, str) and len(arg_2) == 1 else arg_2)\n for arg_1, arg_2 in list(arg_0.items())])"} +{"_id": "doc_7989", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts list of tuples to dictionary with duplicate keys converted to\n lists.\n\n :param list items:\n List of tuples.\n\n :returns:\n :class:`dict`\n\n \"\"\"\n\n arg_1 = collections.defaultdict(list)\n\n for arg_2, arg_3 in arg_0:\n arg_1[arg_2].append(arg_3)\n\n return normalize_dict(dict(arg_1))"} +{"_id": "doc_7990", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parses response body from JSON, XML or query string.\n\n :param body:\n string\n\n :returns:\n :class:`dict`, :class:`list` if input is JSON or query string,\n :class:`xml.etree.ElementTree.Element` if XML.\n\n \"\"\"\n try:\n # Try JSON first.\n return json.loads(arg_0)\n except (OverflowError, TypeError, ValueError):\n pass\n\n try:\n # Then XML.\n return ElementTree.fromstring(arg_0)\n except (ElementTree.ParseError, TypeError, ValueError):\n pass\n\n # Finally query string.\n return dict(parse.parse_qsl(arg_0))"} +{"_id": "doc_7991", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a provider class.\n\n :param class_name: :class:`string` or\n :class:`authomatic.providers.BaseProvider` subclass.\n\n \"\"\"\n\n if isinstance(arg_0, str):\n # prepare path for authomatic.providers package\n arg_1 = '.'.join([__package__, 'providers', arg_0])\n\n # try to import class by string from providers module or by fully\n # qualified path\n return import_string(arg_0, True) or import_string(arg_1)\n else:\n return arg_0"} +{"_id": "doc_7992", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates the value for ``Set-Cookie`` HTTP header.\n\n :param bool delete:\n If ``True`` the cookie value will be ``deleted`` and the\n Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.\n\n \"\"\"\n arg_2 = 'deleted' if arg_1 else arg_0._serialize(arg_0.data)\n arg_3 = parse.urlsplit(arg_0.adapter.url)\n arg_4 = arg_3.netloc.split(':')[0]\n\n # Work-around for issue #11, failure of WebKit-based browsers to accept\n # cookies set as part of a redirect response in some circumstances.\n if '.' not in arg_4:\n arg_5 = '{name}={value}; Path={path}; HttpOnly{secure}{expires}'\n else:\n arg_5 = ('{name}={value}; Domain={domain}; Path={path}; '\n 'HttpOnly{secure}{expires}')\n\n return arg_5.format(\n name=arg_0.name,\n arg_2=arg_2,\n arg_4=arg_4,\n path=arg_3.path,\n secure='; Secure' if arg_0.secure else '',\n expires='; Expires=Thu, 01-Jan-1970 00:00:01 GMT' if arg_1 else ''\n )"} +{"_id": "doc_7993", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Creates signature for the session.\n \"\"\"\n arg_2 = hmac.new(six.b(arg_0.secret), digestmod=hashlib.sha1)\n arg_2.update(six.b('|'.join(arg_1)))\n return arg_2.hexdigest()"} +{"_id": "doc_7994", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Converts the value to a signed string with timestamp.\n\n :param value:\n Object to be serialized.\n\n :returns:\n Serialized value.\n\n \"\"\"\n\n # data = copy.deepcopy(value)\n arg_2 = arg_1\n\n # 1. Serialize\n arg_3 = pickle.dumps(arg_2).decode('latin-1')\n\n # 2. Encode\n # Percent encoding produces smaller result then urlsafe base64.\n arg_4 = parse.quote(arg_3, '')\n\n # 3. Concatenate\n arg_5 = str(int(time.time()))\n arg_6 = arg_0._signature(arg_0.name, arg_4, arg_5)\n arg_7 = '|'.join([arg_4, arg_5, arg_6])\n\n return arg_7"} +{"_id": "doc_7995", "title": "", "text": "def Func(arg_0):\n \"\"\"\n ``True`` if credentials are Func, ``False`` if expired.\n \"\"\"\n\n if arg_0.expiration_time:\n return arg_0.expiration_time > int(time.time())\n else:\n return True"} +{"_id": "doc_7996", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns ``True`` if credentials expire sooner than specified.\n\n :param int seconds:\n Number of seconds.\n\n :returns:\n ``True`` if credentials expire sooner than specified,\n else ``False``.\n\n \"\"\"\n\n if arg_0.expiration_time:\n return arg_0.expiration_time < int(time.time()) + int(arg_1)\n else:\n return False"} +{"_id": "doc_7997", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts the credentials to a percent encoded string to be stored for\n later use.\n\n :returns:\n :class:`string`\n\n \"\"\"\n\n if arg_0.provider_id is None:\n raise ConfigError(\n 'To Func credentials you need to specify a '\n 'unique integer under the \"id\" key in the config '\n 'for each provider!')\n\n # Get the provider type specific items.\n arg_1 = arg_0.provider_type_class().to_tuple(arg_0)\n\n # Provider ID and provider type ID are always the first two items.\n arg_2 = (arg_0.provider_id, arg_0.provider_type_id) + arg_1\n\n # Make sure that all items are strings.\n arg_3 = [str(i) for i in arg_2]\n\n # Concatenate by newline.\n arg_4 = '\\n'.join(arg_3)\n\n # Percent encode.\n return parse.quote(arg_4, '')"} +{"_id": "doc_7998", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return true if string is binary data.\n \"\"\"\n\n arg_1 = (bytearray([7, 8, 9, 10, 12, 13, 27]) +\n bytearray(range(0x20, 0x100)))\n return bool(arg_0.translate(None, arg_1))"} +{"_id": "doc_7999", "title": "", "text": "def Func(arg_0):\n \"\"\"\n The whole response content.\n \"\"\"\n\n if not arg_0._content:\n Func = arg_0.httplib_response.read()\n if arg_0.is_binary_string(Func):\n arg_0._content = Func\n else:\n arg_0._content = Func.decode('utf-8')\n return arg_0._content"} +{"_id": "doc_8000", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None,\n arg_6='', arg_7='GET', arg_8='', arg_9=''\n ):\n \"\"\"\n Creates |oauth1| request elements.\n \"\"\"\n\n arg_4 = arg_4 or {}\n arg_5 = arg_5 or {}\n\n arg_10 = arg_2.consumer_key or ''\n arg_11 = arg_2.consumer_secret or ''\n arg_12 = arg_2.token or ''\n arg_13 = arg_2.token_secret or ''\n\n # separate url base and query parameters\n arg_3, arg_14 = arg_0._split_url(arg_3)\n\n # add extracted params to future params\n arg_4.update(dict(arg_14))\n\n if arg_1 == arg_0.USER_AUTHORIZATION_REQUEST_TYPE:\n # no need for signature\n if arg_12:\n arg_4['oauth_token'] = arg_12\n else:\n raise OAuth1Error(\n 'Credentials with valid token are required to create '\n 'User Authorization URL!')\n else:\n # signature needed\n if arg_1 == arg_0.REQUEST_TOKEN_REQUEST_TYPE:\n # Request Token URL\n if arg_10 and arg_11 and arg_9:\n arg_4['oauth_consumer_key'] = arg_10\n arg_4['oauth_callback'] = arg_9\n else:\n raise OAuth1Error(\n 'Credentials with valid consumer_key, consumer_secret '\n 'and callback are required to create Request Token '\n 'URL!')\n\n elif arg_1 == arg_0.ACCESS_TOKEN_REQUEST_TYPE:\n # Access Token URL\n if arg_10 and arg_11 and arg_12 and arg_8:\n arg_4['oauth_token'] = arg_12\n arg_4['oauth_consumer_key'] = arg_10\n arg_4['oauth_verifier'] = arg_8\n else:\n raise OAuth1Error(\n 'Credentials with valid consumer_key, '\n 'consumer_secret, token and argument verifier'\n ' are required to create Access Token URL!')\n\n elif arg_1 == arg_0.PROTECTED_RESOURCE_REQUEST_TYPE:\n # Protected Resources URL\n if arg_10 and arg_11 and arg_12 and arg_13:\n arg_4['oauth_token'] = arg_12\n arg_4['oauth_consumer_key'] = arg_10\n else:\n raise OAuth1Error(\n 'Credentials with valid consumer_key, ' +\n 'consumer_secret, token and token_secret are required '\n 'to create Protected Resources URL!')\n\n # Sign request.\n # http://oauth.net/core/1.0a/#anchor13\n\n # Prepare parameters for signature base string\n # http://oauth.net/core/1.0a/#rfc.section.9.1\n arg_4['oauth_signature_method'] = arg_0._signature_generator.method\n arg_4['oauth_timestamp'] = str(int(time.time()))\n arg_4['oauth_nonce'] = arg_0.csrf_generator(str(uuid.uuid4()))\n arg_4['oauth_version'] = '1.0'\n\n # add signature to params\n arg_4['oauth_signature'] = arg_0._signature_generator.create_signature( # noqa\n arg_7, arg_3, arg_4, arg_11, arg_13)\n\n arg_15 = core.RequestElements(\n arg_3, arg_7, arg_4, arg_5, arg_6)\n\n return arg_0._x_request_elements_filter(\n arg_1, arg_15, arg_2)"} +{"_id": "doc_8001", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Decorator for Flask view functions.\n \"\"\"\n\n def decorator(arg_3):\n @wraps(arg_3)\n def decorated(*arg_4, **arg_5):\n arg_0.response = make_response()\n arg_7 = WerkzeugAdapter(request, arg_0.response)\n arg_2.setdefault('session', session)\n arg_2.setdefault('session_saver', arg_0.session_saver)\n arg_0.result = super(FlaskAuthomatic, arg_0).Func(\n arg_7,\n *arg_1,\n **arg_2)\n return arg_3(*arg_4, **arg_5)\n return decorated\n return decorator"} +{"_id": "doc_8002", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Launches the OpenID authentication procedure.\n \"\"\"\n\n if arg_0.params.get(arg_0.identifier_param):\n # =================================================================\n # Phase 1 before redirect.\n # =================================================================\n arg_0._log(\n logging.INFO,\n u'Starting OpenID authentication procedure.')\n\n arg_1 = users.create_Func_url(\n dest_url=arg_0.url, federated_identity=arg_0.identifier)\n\n arg_0._log(logging.INFO, u'Redirecting user to {0}.'.format(arg_1))\n\n arg_0.redirect(arg_1)\n else:\n # =================================================================\n # Phase 2 after redirect.\n # =================================================================\n\n arg_0._log(\n logging.INFO,\n u'Continuing OpenID authentication procedure after redirect.')\n\n arg_2 = users.get_current_user()\n\n if arg_2:\n arg_0._log(logging.INFO, u'Authentication successful.')\n arg_0._log(logging.INFO, u'Creating user.')\n arg_0.user = core.User(arg_0,\n id=arg_2.federated_identity(),\n email=arg_2.email(),\n gae_user=arg_2)\n\n # =============================================================\n # We're done\n # =============================================================\n else:\n raise FailureError(\n 'Unable to authenticate identifier \"{0}\"!'.format(\n arg_0.identifier))"} +{"_id": "doc_8003", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Logs a message with pre-formatted prefix.\n\n :param int level:\n Logging level as specified in the\n `login module `_ of\n Python standard library.\n\n :param str msg:\n The actual message.\n\n \"\"\"\n\n arg_4 = getattr(arg_0, 'Funcger', None) or authomatic.core.Funcger\n arg_4.log(\n arg_1, ': '.join(\n ('authomatic', arg_0.__name__, arg_2)), **arg_3)"} +{"_id": "doc_8004", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Splits given url to url base and params converted to list of tuples.\n \"\"\"\n\n arg_1 = parse.urlsplit(arg_0)\n arg_2 = parse.urlunsplit((arg_1.scheme, arg_1.netloc, arg_1.path, 0, 0))\n arg_3 = parse.parse_qsl(arg_1.query, True)\n\n return arg_2, arg_3"} +{"_id": "doc_8005", "title": "", "text": "def Func(arg_0):\n \"\"\"Deletes this worker's subscription.\"\"\"\n if arg_0.subscription:\n logger.info(\"Deleting worker subscription...\")\n arg_0.subscriber_client.delete_subscription(arg_0.subscription)"} +{"_id": "doc_8006", "title": "", "text": "def Func(arg_0):\n \"\"\"Workers all share the same subscription so that tasks are\n distributed across all workers.\"\"\"\n arg_1 = arg_0._get_topic_path()\n arg_2 = '{}-{}-shared'.format(\n PUBSUB_OBJECT_PREFIX, arg_0.name)\n arg_3 = arg_0.subscriber_client.subscription_path(\n arg_0.project, arg_2)\n\n try:\n arg_0.subscriber_client.get_subscription(arg_3)\n except google.cloud.exceptions.NotFound:\n logger.info(\"Creating shared subscription {}\".format(\n arg_2))\n try:\n arg_0.subscriber_client.create_subscription(\n arg_3, topic=arg_1)\n except google.cloud.exceptions.Conflict:\n # Another worker created the subscription before us, ignore.\n pass\n\n return arg_3"} +{"_id": "doc_8007", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Enqueues a function for the task queue to execute.\"\"\"\n arg_4 = Task(uuid4().hex, arg_1, arg_2, arg_3)\n arg_0.storage.put_task(arg_4)\n return arg_0.Func_task(arg_4)"} +{"_id": "doc_8008", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Standalone PSQ worker.\n\n The queue argument must be the full importable path to a psq.Queue\n instance.\n\n Example usage:\n\n psqworker config.q\n\n psqworker --path /opt/app queues.fast\n\n \"\"\"\n setup_logging()\n\n if arg_1:\n with open(os.path.expanduser(arg_1), \"w\") as f:\n f.write(str(os.getpid()))\n\n if not arg_0:\n arg_0 = os.getcwd()\n\n sys.path.insert(0, arg_0)\n\n arg_2 = import_queue(arg_2)\n\n import psq\n\n arg_3 = psq.Worker(arg_2=arg_2)\n\n arg_3.listen()"} +{"_id": "doc_8009", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Gets the Func of the task.\n\n Arguments:\n timeout: Maximum seconds to wait for a Func before raising a\n TimeoutError. If set to None, this will wait forever. If the\n queue doesn't store Funcs and timeout is None, this call will\n never return.\n \"\"\"\n arg_2 = time.time()\n while True:\n arg_3 = arg_0.get_task()\n if not arg_3 or arg_3.status not in (FINISHED, FAILED):\n if not arg_1:\n continue\n elif time.time() - arg_2 < arg_1:\n continue\n else:\n raise TimeoutError()\n\n if arg_3.status == FAILED:\n raise arg_3.Func\n\n return arg_3.Func"} +{"_id": "doc_8010", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n This function is the decorator which is used to wrap a Sanic route with.\n In the simplest case, simply use the default parameters to allow all\n origins in what is the most permissive configuration. If this method\n modifies state or performs authentication which may be brute-forced, you\n should add some degree of protection, such as Cross Site Forgery\n Request protection.\n\n :param origins:\n The origin, or list of origins to allow requests from.\n The origin(s) may be regular expressions, case-sensitive strings,\n or else an asterisk\n\n Default : '*'\n :type origins: list, string or regex\n\n :param methods:\n The method or list of methods which the allowed origins are allowed to\n access for non-simple requests.\n\n Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]\n :type methods: list or string\n\n :param expose_headers:\n The header or list which are safe to expose to the API of a CORS API\n specification.\n\n Default : None\n :type expose_headers: list or string\n\n :param allow_headers:\n The header or list of header field names which can be used when this\n resource is accessed by allowed origins. The header(s) may be regular\n expressions, case-sensitive strings, or else an asterisk.\n\n Default : '*', allow all headers\n :type allow_headers: list, string or regex\n\n :param supports_credentials:\n Allows users to make authenticated requests. If true, injects the\n `Access-Control-Allow-Credentials` header in responses. This allows\n cookies and credentials to be submitted across domains.\n\n :note: This option cannot be used in conjuction with a '*' origin\n\n Default : False\n :type supports_credentials: bool\n\n :param max_age:\n The maximum time for which this CORS request maybe cached. This value\n is set as the `Access-Control-Max-Age` header.\n\n Default : None\n :type max_age: timedelta, integer, string or None\n\n :param send_wildcard: If True, and the origins parameter is `*`, a wildcard\n `Access-Control-Allow-Origin` header is sent, rather than the\n request's `Origin` header.\n\n Default : False\n :type send_wildcard: bool\n\n :param vary_header:\n If True, the header Vary: Origin will be returned as per the W3\n implementation guidelines.\n\n Setting this header when the `Access-Control-Allow-Origin` is\n dynamically generated (e.g. when there is more than one allowed\n origin, and an Origin than '*' is returned) informs CDNs and other\n caches that the CORS headers are dynamic, and cannot be cached.\n\n If False, the Vary header will never be injected or altered.\n\n Default : True\n :type vary_header: bool\n\n :param automatic_options:\n Only applies to the `Func` decorator. If True, Sanic-CORS will\n override Sanic's default OPTIONS handling to return CORS headers for\n OPTIONS requests.\n\n Default : True\n :type automatic_options: bool\n\n \"\"\"\n arg_3 = arg_2\n arg_4 = cors.decorate(arg_0, *arg_1, run_middleware=False, with_context=False, **arg_2)\n\n def wrapped_decorator(arg_5):\n arg_6 = SanicPluginsFramework(arg_0) # get the singleton from the app\n try:\n arg_7 = arg_6.register_plugin(cors, skip_reg=True)\n except ValueError as e:\n # this is normal, if this plugin has been registered previously\n assert e.args and len(e.args) > 1\n arg_7 = e.args[1]\n arg_8 = cors.get_context_from_spf(arg_6)\n arg_9 = arg_8.log\n arg_9(logging.DEBUG, \"Enabled {:s} for Func using options: {}\".format(str(arg_5), str(arg_3)))\n return arg_4(arg_5)\n\n return wrapped_decorator"} +{"_id": "doc_8011", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Performs the actual evaluation of Sanic-CORS options and actually\n modifies the response object.\n\n This function is used both in the decorator and the after_request\n callback\n :param sanic.request.Request req:\n\n \"\"\"\n try:\n arg_4 = arg_2.request[id(arg_0)]\n except AttributeError:\n LOG.debug(\"Cannot find the request context. Is request already finished?\")\n return arg_1\n # If CORS has already been evaluated via the decorator, skip\n arg_5 = arg_4.get(SANIC_CORS_EVALUATED, False)\n if arg_5:\n LOG.debug('CORS have been already evaluated, skipping')\n return arg_1\n\n # `resp` can be None in the case of using Websockets\n # however this case should have been handled in the `extension` and `decorator` methods\n # before getting here. This is a final failsafe check to prevent crashing\n if arg_1 is None:\n return None\n\n if arg_1.headers is None:\n arg_1.headers = CIMultiDict()\n\n arg_7 = get_cors_headers(arg_3, arg_0.headers, arg_0.method)\n\n LOG.debug('Settings CORS headers: %s', str(arg_7))\n\n # dict .extend() does not work on CIDict so\n # iterate over them and add them individually.\n try:\n arg_1.headers.extend(arg_7)\n except Exception as e1:\n for arg_8, arg_9 in arg_7.items():\n try:\n arg_1.headers.add(arg_8, arg_9)\n except Exception as e2:\n arg_1.headers[arg_8] = arg_9\n return arg_1"} +{"_id": "doc_8012", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Wraps scalars or string types as a list, or returns the iterable instance.\n \"\"\"\n if isinstance(arg_0, str):\n return [arg_0]\n elif not isinstance(arg_0, collections.abc.Iterable):\n return [arg_0]\n else:\n return arg_0"} +{"_id": "doc_8013", "title": "", "text": "def Func(arg_0, arg_1, *, arg_2=1e-09, arg_3=0.0):\n \"\"\"\n Python 3.4 does not have math.Func, so we need to steal it and add it here.\n \"\"\"\n try:\n return math.Func(arg_0, arg_1, arg_2=arg_2, arg_3=arg_3)\n except AttributeError:\n # Running on older version of python, fall back to hand-rolled implementation\n if (arg_2 < 0.0) or (arg_3 < 0.0):\n raise ValueError(\"Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}\".format(arg_2, arg_3))\n if math.isnan(arg_0) or math.isnan(arg_1):\n return False # NaNs are never close to anything, even other NaNs\n if (arg_0 == arg_1):\n return True\n if math.isinf(arg_0) or math.isinf(arg_1):\n return False # Infinity is only close to itself, and we already handled that case\n arg_4 = abs(arg_0 - arg_1)\n return (arg_4 <= arg_2 * abs(arg_1)) or (arg_4 <= arg_2 * abs(arg_0)) or (arg_4 <= arg_3)"} +{"_id": "doc_8014", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Deprecator decorator.\n \"\"\"\n @functools.wraps(arg_0)\n def new_func(*arg_1, **arg_2):\n warnings.warn(\"Call to Func function {}.\".format(arg_0.__name__), category=DeprecationWarning, stacklevel=2)\n return arg_0(*arg_1, **arg_2)\n\n return new_func"} +{"_id": "doc_8015", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Attempts to Func a bytestring into an audiosegment.\n\n :param bstr: The bytestring serialized via an audiosegment's serialize() method.\n :returns: An AudioSegment object Funcd from `bstr`.\n \"\"\"\n arg_1 = pickle.loads(arg_0)\n arg_2 = pickle.loads(arg_1['seg'])\n return AudioSegment(arg_2, arg_1['name'])"} +{"_id": "doc_8016", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns an AudioSegment created from the given numpy array.\n\n The numpy array must have shape = (num_samples, num_channels).\n\n :param nparr: The numpy array to create an AudioSegment from.\n :returns: An AudioSegment created from the given array.\n \"\"\"\n # interleave the audio across all channels and collapse\n if arg_0.dtype.itemsize not in (1, 2, 4):\n raise ValueError(\"Numpy Array must contain 8, 16, or 32 bit values.\")\n if len(arg_0.shape) == 1:\n arg_2 = [arg_0]\n elif len(arg_0.shape) == 2:\n arg_2 = [arg_0[i,:] for i in range(arg_0.shape[0])]\n else:\n raise ValueError(\"Numpy Array must be one or two dimensional. Shape must be: (num_samples, num_channels).\")\n arg_3 = np.vstack(arg_2).reshape((-1,), order='F')\n arg_4 = pydub.AudioSegment(arg_3.tobytes(),\n frame_rate=arg_1,\n sample_width=arg_3.dtype.itemsize,\n channels=len(arg_3.shape)\n )\n return AudioSegment(arg_4, \"\")"} +{"_id": "doc_8017", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Executes a Sox command in a platform-independent manner.\n\n `cmd` must be a format string that includes {inputfile} and {outputfile}.\n \"\"\"\n arg_3 = platform.system().lower() == \"windows\"\n\n # On Windows, a temporary file cannot be shared outside the process that creates it\n # so we need to create a \"permanent\" file that we will use and delete afterwards\n def _get_random_tmp_file():\n if arg_3:\n arg_4 = \"\".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))\n arg_5 = arg_0.name + \"_\" + arg_4\n arg_6 = collections.namedtuple(\"WinTempFile\", \"name\")\n arg_5 = arg_6(arg_5)\n else:\n arg_5 = tempfile.NamedTemporaryFile()\n return arg_5\n\n # Get a temp file to put our data and a temp file to store the result\n arg_5 = _get_random_tmp_file()\n arg_7 = _get_random_tmp_file()\n\n # Store our data in the temp file\n arg_0.export(arg_5.name, format=\"WAV\")\n\n # Write the command to sox\n arg_8 = stderr = subprocess.PIPE if arg_2 else subprocess.DEVNULL\n arg_9 = arg_1.format(inputfile=arg_5.name, outputfile=arg_7.name)\n arg_10 = subprocess.call(arg_9.split(' '), arg_8=arg_8, stderr=stderr)\n assert arg_10 == 0, \"Sox did not work as intended, or perhaps you don't have Sox installed?\"\n\n # Create a new AudioSegment from the other temp file (where Sox put the result)\n arg_11 = AudioSegment(pydub.AudioSegment.from_wav(arg_7.name), arg_0.name)\n\n # Clean up the temp files\n if arg_3:\n os.remove(arg_5.name)\n os.remove(arg_7.name)\n else:\n arg_5.close()\n arg_7.close()\n\n return arg_11"} +{"_id": "doc_8018", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=1, arg_3=False):\n \"\"\"\n Returns a copy of this AudioSegment, but whose silence has been removed.\n\n .. note:: This method requires that you have the program 'sox' installed.\n\n .. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single\n function call, the IO may add up for large numbers of AudioSegment objects.\n\n :param duration_s: The number of seconds of \"silence\" that must be present in a row to\n be stripped.\n :param threshold_percentage: Silence is defined as any samples whose absolute value is below\n `threshold_percentage * max(abs(samples in this segment))`.\n :param console_output: If True, will pipe all sox output to the console.\n :returns: A copy of this AudioSegment, but whose silence has been removed.\n \"\"\"\n arg_4 = \"sox {inputfile} -t wav {outputfile} silence -l 1 0.1 \"\\\n + str(arg_2) + \"% -1 \" + str(float(arg_1)) + \" \" + str(arg_2) + \"%\"\n try:\n arg_5 = arg_0._execute_sox_cmd(arg_4)\n except pydub.exceptions.CouldntDecodeError:\n warnings.warn(\"After silence filtering, the resultant WAV file is corrupted, and so its data cannot be retrieved. Perhaps try a smaller threshold value.\", stacklevel=2)\n # Return a copy of us\n arg_5 = AudioSegment(arg_0.seg, arg_0.name)\n return arg_5"} +{"_id": "doc_8019", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=False):\n \"\"\"\n Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins\n and the values.\n\n If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample\n of the AudioSegment.\n\n If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start\n to the end of the segment.\n\n .. code-block:: python\n\n # Example for plotting the FFT using this function\n import matplotlib.pyplot as plt\n import numpy as np\n\n seg = audiosegment.from_file(\"furelise.wav\")\n # Just take the first 3 seconds\n hist_bins, hist_vals = seg[1:3000].Func()\n hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)\n plt.plot(hist_bins / 1000, hist_vals_real_normed)\n plt.xlabel(\"kHz\")\n plt.ylabel(\"dB\")\n plt.show()\n\n .. image:: images/Func.png\n\n :param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`.\n :param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`.\n :param start_sample: The zero-based index of the first sample to include in the slice.\n If this is specified, you cannot specify `start_s`.\n :param num_samples: The number of samples to include in the slice. If this is specified, you cannot\n specify `duration_s`.\n :param zero_pad: If True and the combination of start and duration result in running off the end of\n the AudioSegment, the end is zero padded to prevent this.\n :returns: np.ndarray of frequencies in Hz, np.ndarray of amount of each frequency\n :raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and\n `num_samples` are specified.\n \"\"\"\n if arg_1 is not None and arg_3 is not None:\n raise ValueError(\"Only one of start_s and start_sample can be specified.\")\n if arg_2 is not None and arg_4 is not None:\n raise ValueError(\"Only one of duration_s and num_samples can be specified.\")\n if arg_1 is None and arg_3 is None:\n arg_3 = 0\n if arg_2 is None and arg_4 is None:\n arg_4 = len(arg_0.get_array_of_samples()) - int(arg_3)\n\n if arg_2 is not None:\n arg_4 = int(round(arg_2 * arg_0.frame_rate))\n if arg_1 is not None:\n arg_3 = int(round(arg_1 * arg_0.frame_rate))\n\n arg_6 = arg_3 + arg_4 # end_sample is excluded\n if arg_6 > len(arg_0.get_array_of_samples()) and not arg_5:\n raise ValueError(\"The combination of start and duration will run off the end of the AudioSegment object.\")\n elif arg_6 > len(arg_0.get_array_of_samples()) and arg_5:\n arg_7 = np.array(arg_0.get_array_of_samples())\n arg_8 = np.zeros(arg_6 - len(arg_7))\n arg_7 = np.append(arg_7, arg_8)\n else:\n arg_7 = np.array(arg_0.get_array_of_samples())\n\n arg_9 = np.array(arg_7[arg_3:arg_6])\n arg_10 = np.Func.Func(arg_9)[range(int(round(arg_4/2)) + 1)]\n arg_11 = arg_0.frame_rate / arg_4\n arg_12 = np.arange(0, int(round(arg_4/2)) + 1, 1.0) * arg_11\n return arg_12, arg_10"} +{"_id": "doc_8020", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Yields self's data in chunks of frame_duration_ms.\n\n This function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py].\n\n :param frame_duration_ms: The length of each frame in ms.\n :param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all\n the audio data out as frames. If not, there may be a part at the end\n of the Segment that is cut off (the part will be <= `frame_duration_ms` in length).\n :returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'.\n \"\"\"\n arg_3 = collections.namedtuple(\"Frame\", \"bytes timestamp duration\")\n\n # (samples/sec) * (seconds in a frame) * (bytes/sample)\n arg_4 = int(arg_0.frame_rate * (arg_1 / 1000) * arg_0.sample_width)\n arg_5 = 0 # where we are so far in self's data (in bytes)\n arg_6 = 0.0 # where we are so far in self (in seconds)\n # (bytes/frame) * (sample/bytes) * (sec/samples)\n arg_7 = (arg_4 / arg_0.frame_rate) / arg_0.sample_width\n while arg_5 + arg_4 < len(arg_0.raw_data):\n yield arg_3(arg_0.raw_data[arg_5:arg_5 + arg_4], arg_6, arg_7)\n arg_6 += arg_7\n arg_5 += arg_4\n\n if arg_2:\n arg_8 = arg_0.raw_data[arg_5:]\n arg_9 = bytes(arg_4 - len(arg_8))\n yield arg_3(arg_8 + arg_9, arg_6, arg_7)"} +{"_id": "doc_8021", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Normalize the values in the AudioSegment so that its `spl` property\n gives `db`.\n\n .. note:: This method is currently broken - it returns an AudioSegment whose\n values are much smaller than reasonable, yet which yield an SPL value\n that equals the given `db`. Such an AudioSegment will not be serializable\n as a WAV file, which will also break any method that relies on SOX.\n I may remove this method in the future, since the SPL of an AudioSegment is\n pretty questionable to begin with.\n\n :param db: The decibels to normalize average to.\n :returns: A new AudioSegment object whose values are changed so that their\n average is `db`.\n :raises: ValueError if there are no samples in this AudioSegment.\n \"\"\"\n arg_2 = arg_0.to_numpy_array().copy()\n if len(arg_2) == 0:\n raise ValueError(\"Cannot normalize the SPL of an empty AudioSegment\")\n\n def rms(arg_3):\n return np.sqrt(np.mean(np.square(arg_3)))\n\n # Figure out what RMS we would like\n arg_4 = P_REF_PCM * ((10 ** (arg_1/20.0)) - 1E-9)\n\n # Use successive approximation to solve\n ## Keep trying different multiplication factors until we get close enough or run out of time\n arg_5 = 50\n arg_6 = 0.0\n arg_7 = 0\n arg_8 = 0.1\n arg_9 = 0.0\n arg_10 = arg_4\n while (arg_7 < arg_5) and not util.isclose(arg_6, arg_4, abs_tol=0.1):\n arg_6 = rms(arg_2 * arg_8)\n if arg_6 < arg_4:\n arg_9 = arg_8\n else:\n arg_10 = arg_8\n arg_8 = 0.5 * (arg_9 + arg_10)\n arg_7 += 1\n\n arg_11 = {1: np.int8, 2: np.int16, 4: np.int32}\n arg_12 = arg_11[arg_0.sample_width]\n arg_13 = from_numpy_array(np.array(arg_2 * arg_8, arg_12=arg_12), arg_0.frame_rate)\n return arg_13"} +{"_id": "doc_8022", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reduces others into this one by concatenating all the others onto this one and\n returning the result. Does not modify self, instead, makes a copy and returns that.\n\n :param others: The other AudioSegment objects to append to this one.\n :returns: The concatenated result.\n \"\"\"\n arg_2 = AudioSegment(arg_0.seg, arg_0.name)\n arg_3 = [arg_0.seg._data]\n arg_4 = [o.seg._data for o in arg_1]\n arg_2.seg._data = b''.join(arg_3 + arg_4)\n\n return arg_2"} +{"_id": "doc_8023", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the ID corresponding to the offset which occurs first after the given onset_front_id.\n By `first` I mean the front which contains the offset which is closest to the latest point\n in the onset front. By `after`, I mean that the offset must contain only offsets which\n occur after the latest onset in the onset front.\n\n If there is no appropriate offset front, the id returned is -1.\n \"\"\"\n # get the onset idxs for this front\n arg_3 = _get_front_idxs_from_id(arg_1, arg_0)\n\n # get the sample idxs for this front\n arg_4 = [s for _f, s in arg_3]\n\n # get the latest onset in this onset front\n arg_5 = max(arg_4)\n\n arg_6 = _get_offset_front_id_after_onset_sample_idx(arg_5, arg_2)\n\n return int(arg_6)"} +{"_id": "doc_8024", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Gets an onset_front and an offset_front such that they both occupy at least some of the same\n frequency channels, then returns the portion of each that overlaps with the other.\n \"\"\"\n # Get the onset front of interest\n arg_4 = _get_front_idxs_from_id(arg_0, arg_2)\n\n # Get the offset front of interest\n arg_5 = _get_front_idxs_from_id(arg_1, arg_3)\n\n # Keep trying consecutive portions of this onset front until we find a consecutive portion\n # that overlaps with part of the offset front\n arg_6 = [c for c in _get_consecutive_portions_of_front(arg_4)]\n for arg_7 in arg_6:\n # Only get the segment of this front that overlaps in frequencies with the onset front of interest\n arg_8 = [f for f, _ in arg_7]\n arg_9 = [(f, s) for f, s in arg_5 if f in arg_8]\n\n # Only get as much of this overlapping portion as is actually consecutive\n for arg_10 in _get_consecutive_portions_of_front(arg_9):\n if arg_10:\n # Just return the first one we get - if we get any it means we found a portion of overlap\n return arg_7, arg_10\n return [], []"} +{"_id": "doc_8025", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between\n `onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.\n\n This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of\n less than 3 channels wide are removed.\n\n This function also returns a boolean value indicating whether the onset channel went to completion.\n\n Specifically, segments by doing the following:\n\n - Going across frequencies in the onset_front,\n - add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front,\n if the offset_front is in that frequency.\n\n Possible scenarios:\n\n Fronts line up completely:\n\n ::\n\n | | S S S\n | | => S S S\n | | S S S\n | | S S S\n\n Onset front starts before offset front:\n\n ::\n\n | |\n | | S S S\n | | => S S S\n | | S S S\n\n Onset front ends after offset front:\n\n ::\n\n | | S S S\n | | => S S S\n | | S S S\n | |\n\n Onset front starts before and ends after offset front:\n\n ::\n\n | |\n | | => S S S\n | | S S S\n | |\n\n The above three options in reverse:\n\n ::\n\n | |S S| |\n |S S| |S S| |S S|\n |S S| |S S| |S S|\n |S S| | |\n\n There is one last scenario:\n\n ::\n\n | |\n \\ /\n \\ /\n / \\\n | |\n\n Where the offset and onset fronts cross one another. If this happens, we simply\n reverse the indices and accept:\n\n ::\n\n |sss|\n \\sss/\n \\s/\n /s\\\n |sss|\n\n The other option would be to destroy the offset front from the crossover point on, and\n then search for a new offset front for the rest of the onset front.\n \"\"\"\n # Get the portions of the onset and offset fronts that overlap and are consecutive\n arg_5, arg_6 = _get_consecutive_and_overlapping_fronts(arg_1, arg_2, arg_3, arg_4)\n arg_7 = _get_front_idxs_from_id(arg_1, arg_3)\n arg_8 = _get_front_idxs_from_id(arg_2, arg_4)\n arg_9 = \"Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty\".format(\n arg_7, arg_8, arg_5, arg_6\n )\n assert arg_5, arg_9\n assert arg_6, arg_9\n arg_7 = arg_5\n arg_8 = arg_6\n\n # Figure out which frequencies will go in the segment\n arg_10, arg_11 = arg_7[0]\n arg_12, arg_13 = arg_7[-1]\n arg_14, arg_15 = arg_8[0]\n arg_16, arg_17 = arg_8[-1]\n arg_18 = max(arg_10, arg_14)\n arg_19 = min(arg_12, arg_16)\n\n # Update all the masks with the segment\n for arg_20, arg_21 in enumerate(arg_0[arg_18:arg_19 + 1, :], start=arg_18):\n assert arg_20 >= arg_18, \"Frequency index is {}, but we should have started at {}\".format(arg_20, arg_18)\n assert (arg_20 - arg_18) < len(arg_7), \"Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}\".format(\n arg_20, arg_18, len(arg_7), arg_7\n )\n assert (arg_20 - arg_18) < len(arg_8), \"Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}\".format(\n arg_20, arg_18, len(arg_8), arg_8\n )\n arg_22, arg_23 = arg_7[arg_20 - arg_18]\n arg_22, arg_24 = arg_8[arg_20 - arg_18]\n if arg_23 > arg_24:\n arg_24, arg_23 = arg_23, arg_24\n assert arg_24 >= arg_23\n arg_0[arg_20, arg_23:arg_24 + 1] = arg_3\n arg_1[arg_20, (arg_23 + 1):(arg_24 + 1)] = 0\n arg_2[arg_20, (arg_23 + 1):(arg_24 + 1)] = 0\n arg_25 = (arg_20 - arg_18) + 1\n\n # Update the other masks to delete fronts that have been used\n arg_26 = np.arange(arg_18, arg_19 + 1, 1, dtype=np.int64)\n arg_27 = np.array([s for arg_22, s in arg_7])\n arg_28 = arg_27[:arg_25]\n arg_29 = np.array([s for arg_22, s in arg_8])\n arg_30 = arg_29[:arg_25]\n\n ## Remove the offset front from where we started to where we ended\n arg_2[arg_26[:arg_25], arg_30] = 0\n\n ## Remove the onset front from where we started to where we ended\n arg_1[arg_26[:arg_25], arg_28] = 0\n\n # Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts\n arg_31 = arg_3 not in np.unique(arg_1)\n\n return arg_31"} +{"_id": "doc_8026", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Removes all points in the fronts that overlap with the segmentation mask.\n \"\"\"\n arg_2, arg_3 = np.where((arg_0 != arg_1) & (arg_0 != 0) & (arg_1 != 0))\n arg_1[arg_2, arg_3] = 0"} +{"_id": "doc_8027", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Removes all fronts from `fronts` which are strictly smaller than\n `size` consecutive frequencies in length.\n \"\"\"\n arg_2 = np.unique(arg_0)\n for arg_3 in arg_2:\n if arg_3 == 0 or arg_3 == -1:\n continue\n arg_4 = _get_front_idxs_from_id(arg_0, arg_3)\n if len(arg_4) < arg_1:\n arg_5 = ([f for f, _ in arg_4], [s for _, s in arg_4])\n arg_0[arg_5] = 0"} +{"_id": "doc_8028", "title": "", "text": "def Func(arg_0, arg_1=0.1, arg_2=3):\n \"\"\"\n For each onset front, for each frequency in that front, break the onset front if the signals\n between this frequency's onset and the next frequency's onset are not similar enough.\n\n Specifically:\n If we have the following two frequency channels, and the two O's are part of the same onset front,\n\n ::\n\n [ . O . . . . . . . . . . ]\n [ . . . . O . . . . . . . ]\n\n We compare the signals x and y:\n\n ::\n\n [ . x x x x . . . . . . . ]\n [ . y y y y . . . . . . . ]\n\n And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset\n front between these two channels.\n\n Once this is done, remove any onset fronts that are less than 3 channels wide.\n \"\"\"\n assert arg_2 > 0, \"Number of samples of overlap must be greater than zero\"\n arg_3 = {}\n for arg_4 in _get_front_ids_one_at_a_time(arg_0):\n arg_5 = _get_front_idxs_from_id(arg_0, arg_4)\n for arg_6, (arg_7, arg_8) in enumerate(arg_5):\n if arg_6 < len(arg_5) - 1:\n # Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1\n arg_9, arg_10 = arg_5[arg_6 + 1]\n arg_11 = min(arg_8, arg_10)\n arg_12 = max(arg_8, arg_10)\n arg_13 = arg_0[arg_7, arg_11:arg_12]\n arg_14 = arg_0[arg_9, arg_11:arg_12]\n assert len(arg_14) == len(arg_13)\n\n if len(arg_14) > arg_2:\n # If these two signals are not sufficiently close in form, this front should be broken up\n arg_15 = signal.correlate(arg_13, arg_14, mode='same')\n assert len(arg_15) > 0\n arg_15 = arg_15 / max(arg_15 + 1E-9)\n arg_16 = np.sum(arg_15) / len(arg_15)\n # TODO: the above stuff probably needs to be figured out\n if arg_16 < arg_1:\n if arg_4 in arg_3:\n arg_3[arg_4].append((arg_7, arg_8))\n else:\n arg_3[arg_4] = []\n\n # Now update the fronts matrix by breaking up any fronts at the points we just identified\n # and assign the newly created fronts new IDs\n arg_17 = sorted(np.unique(arg_0))\n arg_18 = arg_17[-1] + 1\n for arg_19 in arg_3.keys():\n for arg_7, arg_8 in arg_3[arg_19]:\n arg_20, arg_21 = np.where(arg_0 == arg_19)\n arg_22 = [fidx for fidx in arg_20 if fidx > arg_7]\n arg_23 = len(arg_21) - len(arg_22)\n arg_24 = (arg_22, arg_21[arg_23:])\n arg_0[arg_24] = arg_18\n arg_18 += 1\n\n _remove_fronts_that_are_too_small(arg_0, 3)"} +{"_id": "doc_8029", "title": "", "text": "def Func(arg_0, arg_1=0.025):\n \"\"\"\n Returns a list of segmentation masks each of the same dimension as the input one,\n but where they each have exactly one segment in them and all other samples in them\n are zeroed.\n\n Only bothers to return segments that are larger in total area than `threshold * mask.size`.\n \"\"\"\n try:\n arg_2 = multiprocessing.cpu_count()\n except NotImplementedError:\n arg_2 = 2\n\n with multiprocessing.Pool(processes=arg_2) as pool:\n arg_3 = [id for id in np.unique(arg_0) if id != 0]\n arg_4 = [arg_1 * arg_0.size for _ in range(len(arg_3))]\n arg_5 = [arg_0 for _ in range(len(arg_3))]\n arg_6 = pool.starmap(Func_task, zip(arg_3, arg_4, arg_5))\n return [arg_7 for arg_7 in arg_6 if arg_7 is not None]"} +{"_id": "doc_8030", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"\n Worker for the ASA algorithm's multiprocessing step.\n \"\"\"\n # Convert each mask to (1 or 0) rather than (ID or 0)\n for arg_6 in arg_1:\n arg_6 = np.where(arg_6 > 0, 1, 0)\n\n # Multiply the masks against STFTs\n arg_1 = [arg_6 * arg_2 for arg_6 in arg_1]\n\n arg_7 = []\n arg_8 = {1: np.int8, 2: np.int16, 4: np.int32}\n arg_9 = arg_8[arg_3]\n for arg_10 in arg_1:\n arg_11, arg_12 = signal.istft(arg_10, arg_4, nperseg=arg_5)\n arg_12 = arg_12.astype(arg_9)\n arg_7.append(arg_12)\n\n for arg_10 in arg_7:\n arg_0.put(arg_10)\n arg_0.put(\"DONE\")"} +{"_id": "doc_8031", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=5):\n \"\"\"\n Does a lowpass filter over the given data.\n\n :param data: The data (numpy array) to be filtered.\n :param cutoff: The high cutoff in Hz.\n :param fs: The sample rate in Hz of the data.\n :param order: The order of the filter. The higher the order, the tighter the roll-off.\n :returns: Filtered data (numpy array).\n \"\"\"\n arg_4 = 0.5 * arg_2\n arg_5 = arg_1 / arg_4\n arg_6, arg_7 = signal.butter(arg_3, arg_5, btype='low', analog=False)\n arg_8 = signal.lfilter(arg_6, arg_7, arg_0)\n return arg_8"} +{"_id": "doc_8032", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Launch a Process, return his pid\n \"\"\"\n if arg_0 is not None:\n arg_2 = [\"python\", arg_0]\n if arg_1 is not None:\n arg_2 += arg_1\n return subprocess.Popen(arg_2)\n return False"} +{"_id": "doc_8033", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Update the list of the running process and return the list\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0:\n if arg_2.poll() is None and check_pid(arg_2.pid):\n publisher.debug(str(arg_2.pid) + ' is alive')\n arg_1.append(arg_2)\n else:\n try:\n publisher.debug(str(arg_2.pid) + ' is gone')\n os.kill(arg_2.pid, signal.SIGKILL)\n except:\n # the process is just already gone\n pass\n return arg_1"} +{"_id": "doc_8034", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Give an IP, maybe a date, get the ASN.\n This is the fastest command.\n\n :param ip: IP address to search for\n :param announce_date: Date of the announcement\n\n :rtype: String, ASN.\n\n \"\"\"\n arg_3, arg_2, arg_4 = arg_0.run(arg_1, arg_2)\n return next((arg_5 for arg_5 in arg_3 if arg_5 is not None), None), arg_2"} +{"_id": "doc_8035", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get the full Func of an IP. It takes time.\n\n :param ip: IP address to search for\n :param days_limit: Max amount of days to query. (None means no limit)\n\n :rtype: list. For each day in the database: day, asn, block\n \"\"\"\n arg_3 = sorted(arg_0.routing_db.smembers('imported_dates'), reverse=True)\n if arg_2 is not None:\n arg_3 = arg_3[:arg_2]\n return [arg_0.date_asn_block(arg_1, arg_4) for arg_4 in arg_3]"} +{"_id": "doc_8036", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Get the full history of an IP, aggregate the result instead of\n returning one line per day.\n\n :param ip: IP address to search for\n :param days_limit: Max amount of days to query. (None means no limit)\n\n :rtype: list. For each change: FirstDay, LastDay, ASN, Block\n \"\"\"\n arg_3 = None\n arg_4 = None\n arg_5 = None\n arg_6 = None\n for arg_7 in arg_0.history(arg_1, arg_2):\n if arg_7 is None:\n continue\n arg_8, arg_9, arg_10 = arg_7\n if arg_3 is None:\n arg_4 = arg_8\n arg_3 = arg_8\n arg_5 = arg_9\n arg_6 = arg_10\n elif arg_5 == arg_9 and arg_6 == arg_10:\n arg_3 = arg_8\n else:\n yield arg_3, arg_4, arg_5, arg_6\n arg_4 = arg_8\n arg_3 = arg_8\n arg_5 = arg_9\n arg_6 = arg_10\n if arg_3 is not None:\n yield arg_3, arg_4, arg_5, arg_6"} +{"_id": "doc_8037", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Inconditianilly download the URL in a temporary directory.\n When finished, the file is moved in the real directory.\n Like this an other process will not attempt to extract an inclomplete file.\n \"\"\"\n arg_2 = os.path.join(c.raw_data, c.bview_dir, 'tmp', arg_1)\n arg_3 = os.path.join(c.raw_data, c.bview_dir, arg_1)\n try:\n arg_4 = urlopen(arg_0)\n except:\n return False\n if arg_4.getcode() != 200:\n publisher.warning('{} unavailable, code: {}'.format(arg_0, arg_4.getcode()))\n return False\n try:\n with open(arg_2, 'w') as outfile:\n outfile.write(arg_4.read())\n os.rename(arg_2, arg_3)\n except:\n os.remove(arg_2)\n return False\n return True"} +{"_id": "doc_8038", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Verify that the file has not already been downloaded.\n \"\"\"\n arg_1 = os.path.join(c.bview_dir, arg_0)\n arg_2 = os.path.join(c.bview_dir, 'old', arg_0)\n if not os.path.exists(arg_1) and not os.path.exists(arg_2):\n return False\n return True"} +{"_id": "doc_8039", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Separates the outcome feature from the data and creates the onehot vector for each row.\n \"\"\"\n arg_3 = np.matrix([row[:arg_1] + row[arg_1+1:] for row in arg_0])\n arg_4 = np.asarray([row[arg_1] for row in arg_0], dtype=np.uint8)\n arg_5 = (np.arange(arg_2) == arg_4[:, None]).astype(np.float32)\n\n return arg_3, arg_5"} +{"_id": "doc_8040", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Used to check whether the two edge lists have the same edges \n when elements are neither hashable nor sortable.\n \"\"\"\n arg_2 = list(arg_1)\n for arg_3 in arg_0:\n try:\n arg_2.remove(arg_3)\n except ValueError:\n return False\n return not arg_2"} +{"_id": "doc_8041", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.05):\n \"\"\"\n Given a list of audit files, rank them using the `measurer` and\n return the features that never deviate more than `similarity_bound`\n across repairs.\n \"\"\"\n\n def _partition_groups(arg_3):\n arg_4 = []\n for arg_5, arg_6 in arg_3:\n arg_7 = False\n\n # Check to see if the feature belongs in a group with any other features.\n for arg_8, arg_9 in enumerate(arg_4):\n arg_10, arg_11 = arg_9\n if abs(arg_10 - arg_6) < arg_2:\n arg_4[arg_8][1].append( (arg_5, arg_6) )\n\n # Recalculate the representative mean.\n arg_4[arg_8][0] = sum([s for arg_12, s in arg_11])/len(arg_11)\n arg_7 = True\n break\n\n # If this feature did not much with the current groups, create another group.\n if not arg_7:\n arg_4.append( [arg_6, [(arg_5,arg_6)]] )\n\n # Return just the features.\n return [[arg_5 for arg_5, arg_6 in arg_9] for arg_12, arg_9 in arg_4]\n\n\n arg_13 = {}\n arg_14 = []\n for arg_15 in arg_0:\n with open(arg_15) as audit_file:\n arg_16 = audit_file.readline()[:-1] # Remove the trailing endline.\n arg_5 = arg_16[arg_16.index(\":\")+1:]\n arg_14.append(arg_5)\n\n arg_17 = load_audit_confusion_matrices(arg_15)\n for arg_18, arg_19 in arg_17:\n arg_6 = arg_1(arg_19)\n if arg_18 not in arg_13:\n arg_13[arg_18] = {}\n arg_13[arg_18][arg_5] = arg_6\n\n # Sort by repair level increasing repair level.\n arg_20 = sorted(arg_13.keys())\n\n arg_4 = [arg_14]\n while arg_20:\n arg_21 = arg_20.pop()\n arg_22 = []\n for arg_9 in arg_4:\n arg_23 = [(f, arg_13[arg_21][f]) for f in arg_9]\n arg_24 = _partition_groups(arg_23)\n arg_22.extend(arg_24)\n arg_4 = arg_22\n\n return arg_4"} +{"_id": "doc_8042", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Loads a confusion matrix in a two-level dictionary format.\n\n For example, the confusion matrix of a 75%-accurate model\n that predicted 15 values (and mis-classified 5) may look like:\n {\"A\": {\"A\":10, \"B\": 5}, \"B\": {\"B\":5}}\n\n Note that raw boolean values are translated into strings, such that\n a value that was the boolean True will be returned as the string \"True\".\n \"\"\"\n\n with open(arg_0) as audit_file:\n audit_file.next() # Skip the first line.\n\n # Extract the confusion matrices and repair levels from the audit file.\n arg_1 = []\n for arg_2 in audit_file:\n arg_3 = \":\"\n arg_4 = arg_2.index(arg_3)\n\n arg_5 = arg_2.index(',')\n arg_6 = float(arg_2[arg_4+2:arg_5])\n arg_7 = arg_2[arg_5+2:-2]\n arg_8 = json.loads( arg_7.replace(\"'\",\"\\\"\") )\n arg_1.append( (arg_6, arg_8) )\n\n # Sort the repair levels in case they are out of order for whatever reason.\n arg_1.sort(key = lambda pair: pair[0])\n return arg_1"} +{"_id": "doc_8043", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Separates the outcome feature from the data.\n \"\"\"\n arg_3 = np.matrix([row[:arg_1] + row[arg_1+1:] for row in arg_0])\n arg_4 = np.asarray([row[arg_1] for row in arg_0], dtype=np.uint8)\n\n return arg_3, arg_4"} +{"_id": "doc_8044", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Renders a Page object as a Twitter Bootstrap styled pagination bar.\n Compatible with Bootstrap 3.x and 4.x only.\n\n Example::\n\n {% Func page_obj range=10 %}\n\n\n Named Parameters::\n\n range - The size of the pagination bar (ie, if set to 10 then, at most,\n 10 page numbers will display at any given time) Defaults to\n None, which shows all pages.\n\n\n size - Accepts \"small\", and \"large\". Defaults to\n None which is the standard size.\n\n show_prev_next - Accepts \"true\" or \"false\". Determines whether or not\n to show the previous and next page links. Defaults to\n \"true\"\n\n\n show_first_last - Accepts \"true\" or \"false\". Determines whether or not\n to show the first and last page links. Defaults to\n \"false\"\n\n previous_label - The text to display for the previous page link.\n Defaults to \"←\"\n\n next_label - The text to display for the next page link. Defaults to\n \"→\"\n\n first_label - The text to display for the first page link. Defaults to\n \"«\"\n\n last_label - The text to display for the last page link. Defaults to\n \"»\"\n\n url_view_name - The named URL to use. Defaults to None. If None, then the\n default template simply appends the url parameter as a\n relative URL link, eg: 1\n\n url_param_name - The name of the parameter to use in the URL. If\n url_view_name is set to None, this string is used as the\n parameter name in the relative URL path. If a URL\n name is specified, this string is used as the\n parameter name passed into the reverse() method for\n the URL.\n\n url_extra_args - This is used only in conjunction with url_view_name.\n When referencing a URL, additional arguments may be\n passed in as a list.\n\n url_extra_kwargs - This is used only in conjunction with url_view_name.\n When referencing a URL, additional named arguments\n may be passed in as a dictionary.\n\n url_get_params - The other get parameters to pass, only the page\n number will be overwritten. Use this to preserve\n filters.\n\n url_anchor - The anchor to use in URLs. Defaults to None.\n\n extra_pagination_classes - A space separated list of CSS class names\n that will be added to the top level
      \n HTML element. In particular, this can be\n utilized in Bootstrap 4 installatinos to\n add the appropriate alignment classes from\n Flexbox utilites, eg: justify-content-center\n \"\"\"\n arg_2 = arg_1.split_contents()\n if len(arg_2) < 2:\n raise TemplateSyntaxError(\"'%s' takes at least one argument\"\n \" (Page object reference)\" % arg_2[0])\n arg_3 = arg_0.compile_filter(arg_2[1])\n arg_4 = {}\n arg_2 = arg_2[2:]\n\n arg_5 = re.compile(r'(\\w+)=(.+)')\n\n if len(arg_2):\n for arg_6 in arg_2:\n arg_7 = arg_5.match(arg_6)\n if not arg_7:\n raise TemplateSyntaxError(\"Malformed arguments to bootstrap_pagination paginate tag\")\n arg_8, arg_9 = arg_7.groups()\n arg_4[arg_8] = arg_0.compile_filter(arg_9)\n\n return BootstrapPaginationNode(arg_3, arg_4)"} +{"_id": "doc_8045", "title": "", "text": "def Func(arg_0):\n \"\"\" Checks for alternative index-url in pip.conf \"\"\"\n\n if 'VIRTUAL_ENV' in os.environ:\n arg_0.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.conf'))\n arg_0.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.ini'))\n\n if site_config_files:\n arg_0.pip_config_locations.extend(site_config_files)\n\n arg_1 = None\n arg_2 = None\n\n if 'PIP_INDEX_URL' in os.environ and os.environ['PIP_INDEX_URL']:\n # environ variable takes priority\n arg_1 = os.environ['PIP_INDEX_URL']\n arg_2 = 'PIP_INDEX_URL environment variable'\n else:\n for arg_3 in arg_0.pip_config_locations:\n if arg_3.startswith('~'):\n arg_3 = os.path.expanduser(arg_3)\n\n if os.path.isfile(arg_3):\n arg_4 = ConfigParser()\n arg_4.read([arg_3])\n try:\n arg_1 = arg_4.get('global', 'index-url')\n arg_2 = arg_3\n break # stop on first detected, because config locations have a priority\n except (NoOptionError, NoSectionError): # pragma: nocover\n pass\n\n if arg_1:\n arg_0.PYPI_API_URL = arg_0._prepare_api_url(arg_1)\n print(Color('Setting API url to {{autoyellow}}{}{{/autoyellow}} as found in {{autoyellow}}{}{{/autoyellow}}'\n '. Use --default-index-url to use pypi default index'.format(arg_0.PYPI_API_URL, arg_2)))"} +{"_id": "doc_8046", "title": "", "text": "def Func(\n arg_0, arg_1,\n arg_2, arg_3, arg_4):\n \"\"\"\n For each package and target check if it is a regression.\n\n This is the case if the main repo contains a package version which is\n higher then in any of the other repos or if any of the other repos does not\n contain that package at all.\n\n :return: a dict indexed by package names containing\n dicts indexed by targets containing a boolean flag\n \"\"\"\n arg_5 = {}\n for arg_6 in arg_0.values():\n arg_7 = arg_6.pkg_name\n arg_8 = arg_6.debian_pkg_name\n\n arg_5[arg_7] = {}\n for arg_9 in arg_1:\n arg_5[arg_7][arg_9] = False\n arg_10 = \\\n arg_4.get(arg_9, {}).get(arg_8, None)\n if arg_10 is not None:\n arg_11 = LooseVersion(arg_10)\n for arg_12 in [arg_2, arg_3]:\n arg_13 = \\\n arg_12.get(arg_9, {}).get(arg_8, None)\n if not arg_13 or arg_11 > LooseVersion(arg_13):\n arg_5[arg_7][arg_9] = True\n return arg_5"} +{"_id": "doc_8047", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Remove trailing junk from the version number.\n\n >>> strip_version_suffix('')\n ''\n >>> strip_version_suffix('None')\n 'None'\n >>> strip_version_suffix('1.2.3-4trusty-20140131-1359-+0000')\n '1.2.3-4'\n >>> strip_version_suffix('1.2.3-foo')\n '1.2.3'\n \"\"\"\n global version_regex\n if not arg_0:\n return arg_0\n arg_1 = version_regex.search(arg_0)\n return arg_1.group(0) if arg_1 else arg_0"} +{"_id": "doc_8048", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n For each package check if the version in one repo is equal for all targets.\n\n The version could be different in different repos though.\n\n :return: a dict indexed by package names containing a boolean flag\n \"\"\"\n arg_3 = {}\n for arg_4 in arg_0.values():\n arg_5 = arg_4.pkg_name\n arg_6 = arg_4.debian_pkg_name\n\n arg_7 = []\n for arg_8 in arg_2:\n arg_7.append(set([]))\n for arg_9 in arg_1:\n arg_10 = _strip_version_suffix(\n arg_8.get(arg_9, {}).get(arg_6, None))\n arg_7[-1].add(arg_10)\n arg_3[arg_5] = max([len(v) for v in arg_7]) == 1\n return arg_3"} +{"_id": "doc_8049", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Get the number of packages per target and repository.\n\n :return: a dict indexed by targets containing\n a list of integer values (one for each repo)\n \"\"\"\n arg_3 = {}\n for arg_4 in arg_1:\n arg_3[arg_4] = [0] * len(arg_2)\n for arg_5 in arg_0.values():\n arg_6 = arg_5.debian_pkg_name\n\n for arg_4 in arg_1:\n for arg_7, arg_8 in enumerate(arg_2):\n arg_9 = arg_8.get(arg_4, {}).get(arg_6, None)\n if arg_9:\n arg_3[arg_4][arg_7] += 1\n return arg_3"} +{"_id": "doc_8050", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Get the Jenkins job urls for each target.\n\n The placeholder {pkg} needs to be replaced with the ROS package name.\n\n :return: a dict indexed by targets containing a string\n \"\"\"\n arg_4 = {}\n for arg_5 in arg_3:\n arg_6 = get_release_view_name(\n arg_0, arg_2,\n arg_5.os_name, arg_5.os_code_name, arg_5.arch)\n arg_7 = arg_1 + '/view/%s/job/%s__{pkg}__' % \\\n (arg_6, arg_6)\n if arg_5.arch == 'source':\n arg_4[arg_5] = arg_7 + '%s_%s__source' % \\\n (arg_5.os_name, arg_5.os_code_name)\n else:\n arg_4[arg_5] = arg_7 + '%s_%s_%s__binary' % \\\n (arg_5.os_name, arg_5.os_code_name, arg_5.arch)\n return arg_4"} +{"_id": "doc_8051", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=None, arg_4=False):\n \"\"\"Configure all Jenkins CI jobs.\"\"\"\n arg_5 = get_config_index(arg_0)\n arg_6 = get_ci_build_files(arg_5, arg_1)\n arg_7 = arg_6[arg_2]\n\n arg_8 = get_index(arg_5.rosdistro_index_url)\n\n # get targets\n arg_9 = []\n for arg_10 in arg_7.targets.keys():\n for arg_11 in arg_7.targets[arg_10].keys():\n for arg_12 in arg_7.targets[arg_10][arg_11]:\n arg_9.append((arg_10, arg_11, arg_12))\n print('The build file contains the following targets:')\n for arg_10, arg_11, arg_12 in arg_9:\n print(' -', arg_10, arg_11, arg_12)\n\n arg_13 = get_distribution_file(arg_8, arg_1, arg_7)\n if not arg_13:\n print('No distribution file matches the build file')\n return\n\n arg_14 = get_ci_view_name(arg_1)\n\n # all further configuration will be handled by either the Jenkins API\n # or by a generated groovy script\n from ros_buildfarm.jenkins import connect\n arg_15 = connect(arg_5.jenkins_url) if arg_3 is None else False\n\n arg_16 = {}\n arg_17 = {\n arg_14: configure_ci_view(\n arg_15, arg_14, arg_4=arg_4)\n }\n if not arg_15:\n arg_16.update(arg_17)\n arg_18 = {\n 'dry_run': arg_4,\n 'expected_num_views': len(arg_16),\n }\n\n arg_19 = []\n arg_20 = OrderedDict()\n\n arg_21 = False\n\n for arg_10, arg_11, arg_12 in arg_9:\n try:\n arg_22, arg_23 = configure_ci_job(\n arg_0, arg_1, arg_2,\n arg_10, arg_11, arg_12,\n arg_5=arg_5, arg_7=arg_7,\n arg_8=arg_8, arg_13=arg_13,\n arg_15=arg_15, arg_17=arg_17,\n arg_21=arg_21,\n arg_3=arg_3,\n arg_4=arg_4,\n trigger_timer=arg_7.jenkins_job_schedule)\n arg_19.append(arg_22)\n if arg_3 is not None:\n print(\"Configuration for job '%s'\" % arg_22)\n arg_20[arg_22] = arg_23\n except JobValidationError as e:\n print(e.message, file=sys.stderr)\n\n arg_18['expected_num_jobs'] = len(arg_20)\n arg_18['job_prefixes_and_names'] = {}\n\n if arg_3 is not None:\n print(\n \"Writing groovy script '%s' to reconfigure %d jobs\" %\n (arg_3, len(arg_20)))\n arg_24 = expand_template(\n 'snippet/reconfigure_jobs.groovy.em', arg_18)\n write_groovy_script_and_configs(\n arg_3, arg_24, arg_20, arg_16)"} +{"_id": "doc_8052", "title": "", "text": "def Func(arg_0=1.0):\n \"\"\"Resolve all streams on the network.\n\n This function returns all currently available streams from any outlet on \n the network. The network is usually the subnet specified at the local \n router, but may also include a group of machines visible to each other via \n multicast packets (given that the network supports it), or list of \n hostnames. These details may optionally be customized by the experimenter \n in a configuration file (see Network Connectivity in the LSL wiki). \n \n Keyword arguments:\n wait_time -- The waiting time for the operation, in seconds, to search for \n streams. Warning: If this is too short (<0.5s) only a subset \n (or none) of the outlets that are present on the network may \n be returned. (default 1.0)\n \n Returns a list of StreamInfo objects (with empty desc field), any of which \n can subsequently be used to open an inlet. The full description can be\n retrieved from the inlet.\n\n \"\"\"\n # noinspection PyCallingNonCallable\n arg_1 = (c_void_p*1024)()\n arg_2 = lib.lsl_resolve_all(byref(arg_1), 1024, c_double(arg_0))\n return [StreamInfo(handle=arg_1[arg_3]) for arg_3 in range(arg_2)]"} +{"_id": "doc_8053", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=arg_4):\n \"\"\"Resolve all streams with a specific value for a given property.\n\n If the goal is to resolve a specific stream, this method is preferred over \n resolving all streams and then selecting the desired one.\n \n Keyword arguments:\n prop -- The StreamInfo property that should have a specific value (e.g., \n \"name\", \"type\", \"source_id\", or \"desc/manufaturer\").\n value -- The string value that the property should have (e.g., \"EEG\" as \n the type property).\n minimum -- Return at least this many streams. (default 1)\n timeout -- Optionally a timeout of the operation, in seconds. If the \n timeout expires, less than the desired number of streams \n (possibly none) will be returned. (default FOREVER)\n \n Returns a list of matching StreamInfo objects (with empty desc field), any \n of which can subsequently be used to open an inlet.\n \n Example: results = resolve_Stream_byprop(\"type\",\"EEG\")\n\n \"\"\"\n # noinspection PyCallingNonCallable\n arg_5 = (c_void_p*1024)()\n arg_6 = lib.lsl_Func(byref(arg_5), 1024,\n c_char_p(str.encode(arg_0)),\n c_char_p(str.encode(arg_1)),\n arg_2,\n c_double(arg_3))\n return [StreamInfo(handle=arg_5[arg_7]) for arg_7 in range(arg_6)]"} +{"_id": "doc_8054", "title": "", "text": "def Func(arg_0, arg_1=1, arg_2=arg_3):\n \"\"\"Resolve all streams that match a given predicate.\n\n Advanced query that allows to impose more conditions on the retrieved \n streams; the given string is an XPath 1.0 predicate for the \n node (omitting the surrounding []'s), see also\n http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.\n \n Keyword arguments:\n predicate -- The predicate string, e.g. \"name='BioSemi'\" or \n \"type='EEG' and starts-with(name,'BioSemi') and \n count(description/desc/channels/channel)=32\"\n minimum -- Return at least this many streams. (default 1)\n timeout -- Optionally a timeout of the operation, in seconds. If the \n timeout expires, less than the desired number of streams \n (possibly none) will be returned. (default FOREVER)\n \n Returns a list of matching StreamInfo objects (with empty desc field), any \n of which can subsequently be used to open an inlet.\n\n \"\"\"\n # noinspection PyCallingNonCallable\n arg_4 = (c_void_p*1024)()\n arg_5 = lib.lsl_Func(byref(arg_4), 1024,\n c_char_p(str.encode(arg_0)),\n arg_1,\n c_double(arg_2))\n return [StreamInfo(handle=arg_4[arg_6]) for arg_6 in range(arg_5)]"} +{"_id": "doc_8055", "title": "", "text": "def Func(arg_0):\n \"\"\"Error handler function. Translates an error code into an exception.\"\"\"\n if type(arg_0) is c_int:\n arg_0 = arg_0.value\n if arg_0 == 0:\n pass # no error\n elif arg_0 == -1:\n raise TimeoutError(\"the operation failed due to a timeout.\")\n elif arg_0 == -2:\n raise LostError(\"the stream has been lost.\")\n elif arg_0 == -3:\n raise InvalidArgumentError(\"an argument was incorrectly specified.\")\n elif arg_0 == -4:\n raise InternalError(\"an internal error has occurred.\")\n elif arg_0 < 0: \n raise RuntimeError(\"an unknown error has occurred.\")"} +{"_id": "doc_8056", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a Func with a specified name.\"\"\"\n return XMLElement(lib.lsl_Func(arg_0.e, str.encode(arg_1)))"} +{"_id": "doc_8057", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Get the previous sibling in the children list of the parent node.\n\n If a name is provided, the previous sibling with the given name is\n returned.\n\n \"\"\"\n if arg_1 is None:\n return XMLElement(lib.lsl_Func(arg_0.e))\n else:\n return XMLElement(lib.lsl_Func_n(arg_0.e,\n str.encode(arg_1)))"} +{"_id": "doc_8058", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the element's name. Returns False if the node is empty.\"\"\"\n return bool(lib.lsl_Func(arg_0.e, str.encode(arg_1)))"} +{"_id": "doc_8059", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Set the element's value. Returns False if the node is empty.\"\"\"\n return bool(lib.lsl_Func(arg_0.e, str.encode(arg_1)))"} +{"_id": "doc_8060", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Append a copy of the specified element as a child.\"\"\"\n return XMLElement(lib.lsl_Func(arg_0.e, arg_1.e))"} +{"_id": "doc_8061", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove a given child element, specified by name or as element.\"\"\"\n if type(arg_1) is XMLElement:\n lib.lsl_Func(arg_0.e, arg_1.e)\n else:\n lib.lsl_Func_n(arg_0.e, arg_1)"} +{"_id": "doc_8062", "title": "", "text": "def Func(arg_0):\n \"\"\"Obtain the set of currently present streams on the network.\n\n Returns a list of matching StreamInfo objects (with empty desc\n field), any of which can subsequently be used to open an inlet.\n\n \"\"\"\n # noinspection PyCallingNonCallable\n arg_1 = (c_void_p*1024)()\n arg_2 = lib.lsl_resolver_Func(arg_0.obj, byref(arg_1), 1024)\n return [StreamInfo(handle=arg_1[arg_3]) for arg_3 in range(arg_2)]"} +{"_id": "doc_8063", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"See all token associated with a given token.\n PAIR lilas\"\"\"\n arg_1 = list(preprocess_query(arg_1))[0]\n arg_2 = Func_key(arg_1)\n arg_3 = [t.decode() for t in DB.smembers(arg_2)]\n arg_3.sort()\n print(white(arg_3))\n print(magenta('(Total: {})'.format(len(arg_3))))"} +{"_id": "doc_8064", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Shows autocomplete results for a given token.\"\"\"\n arg_1 = list(preprocess_query(arg_1))[0]\n arg_2 = [k.decode() for k in DB.smembers(edge_ngram_key(arg_1))]\n print(white(arg_2))\n print(magenta('({} elements)'.format(len(arg_2))))"} +{"_id": "doc_8065", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute fuzzy extensions of word.\n FUZZY lilas\"\"\"\n arg_1 = list(preprocess_query(arg_1))[0]\n print(white(make_fuzzy(arg_1)))"} +{"_id": "doc_8066", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute fuzzy extensions of word that exist in index.\n FUZZYINDEX lilas\"\"\"\n arg_1 = list(preprocess_query(arg_1))[0]\n arg_2 = Token(arg_1)\n arg_3 = make_fuzzy(arg_2)\n arg_3 = [(n, DB.zcard(dbkeys.token_key(n))) for n in arg_3]\n arg_3.sort(key=lambda n: n[1], reverse=True)\n for arg_2, arg_4 in arg_3:\n if arg_4 == 0:\n break\n print(white(arg_2), blue(arg_4))"} +{"_id": "doc_8067", "title": "", "text": "def Func(arg_0):\n \"\"\"Try to extract the bigger group of interlinked tokens.\n\n Should generally be used at last in the collectors chain.\n \"\"\"\n if not arg_0.bucket_dry:\n return # No need.\n arg_1 = set(arg_0.meaningful + arg_0.common)\n for arg_2 in _extract_manytomany_relations(arg_1):\n arg_0.add_to_bucket([t.db_key for t in arg_2])\n if arg_0.bucket_overflow:\n break\n else:\n arg_0.debug('No relation extrapolated.')"} +{"_id": "doc_8068", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Display this help message.\"\"\"\n if arg_1:\n arg_2 = getattr(arg_0, 'do_' + arg_1).__doc__\n print(cyan(arg_2.replace(' ' * 8, '')))\n else:\n print(magenta('Available commands:'))\n print(magenta('Type \"HELP \" to get more info.'))\n arg_3 = arg_0.get_names()\n arg_3.sort()\n for arg_4 in arg_3:\n if arg_4[:3] != 'do_':\n continue\n arg_2 = getattr(arg_0, arg_4).__doc__\n arg_2 = arg_2.split('\\n')[0]\n print('{} {}'.format(yellow(arg_4[3:]),\n cyan(arg_2.replace(' ' * 8, ' ')\n .replace('\\n', ''))))"} +{"_id": "doc_8069", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"Print some useful infos from Redis DB.\"\"\"\n arg_2 = DB.info()\n arg_3 = [\n 'keyspace_misses', 'keyspace_hits', 'used_memory_human',\n 'total_commands_processed', 'total_connections_received',\n 'connected_clients']\n for arg_4 in arg_3:\n print('{}: {}'.format(white(arg_4), blue(arg_2[arg_4])))\n arg_5 = int(DB.config_get('databases')['databases'])\n for arg_6 in range(arg_5 - 1):\n arg_7 = 'db{}'.format(arg_6)\n if arg_7 in arg_2:\n arg_8 = white('nb keys (db {})'.format(arg_6))\n print('{}: {}'.format(arg_8, blue(arg_2[arg_7]['keys'])))"} +{"_id": "doc_8070", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print raw content of a DB key.\n DBKEY g|u09tyzfe\"\"\"\n arg_2 = DB.type(arg_1).decode()\n if arg_2 == 'set':\n arg_3 = DB.smembers(arg_1)\n elif arg_2 == 'string':\n arg_3 = DB.get(arg_1)\n else:\n arg_3 = 'Unsupported type {}'.format(arg_2)\n print('type:', magenta(arg_2))\n print('value:', white(arg_3))"} +{"_id": "doc_8071", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute a geohash from latitude and longitude.\n GEOHASH 48.1234 2.9876\"\"\"\n try:\n arg_2, arg_3 = map(float, arg_1.split())\n except ValueError:\n print(red('Invalid lat and lon {}'.format(arg_1)))\n else:\n print(white(geohash.encode(arg_2, arg_3, config.GEOHASH_PRECISION)))"} +{"_id": "doc_8072", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get index details for a document by its id.\n INDEX 772210180J\"\"\"\n arg_2 = doc_by_id(arg_1)\n if not arg_2:\n return arg_0.error('id \"{}\" not found'.format(arg_1))\n for arg_3 in config.FIELDS:\n arg_4 = arg_3['key']\n if arg_4 in arg_2:\n arg_0._print_field_index_details(arg_2[arg_4], arg_1)"} +{"_id": "doc_8073", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return document linked to word with higher score.\n BESTSCORE lilas\"\"\"\n arg_2 = keys.token_key(indexed_string(arg_1)[0])\n for arg_3, arg_4 in DB.zrevrange(arg_2, 0, 20, withscores=True):\n arg_5 = Result(arg_3)\n print(white(arg_5), blue(arg_4), green(arg_5._id))"} +{"_id": "doc_8074", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Print the distance score between two strings. Use |\u00a0as separator.\n STRDISTANCE rue des lilas|porte des lilas\"\"\"\n arg_1 = arg_1.split('|')\n if not len(arg_1) == 2:\n print(red('Malformed string. Use | between the two strings.'))\n return\n arg_2, arg_3 = arg_1\n print(white(compare_str(arg_2, arg_3)))"} +{"_id": "doc_8075", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Just Funcs the request using its Func method and returns its response. \"\"\"\n arg_0.Func(arg_1=arg_1)\n return arg_0.response"} +{"_id": "doc_8076", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=None, arg_3=1, arg_4=None):\n \"\"\"Concurrently converts a list of Requests to Responses.\n\n :param requests: a collection of Request objects.\n :param stream: If False, the content will not be downloaded immediately.\n :param size: Specifies the number of workers to run at a time. If 1, no parallel processing.\n :param exception_handler: Callback function, called when exception occured. Params: Request, Exception\n \"\"\"\n\n arg_2 = arg_2 if arg_2 else Pool(arg_3)\n arg_0 = list(arg_0)\n\n arg_0 = arg_2.Func(send, arg_0)\n\n arg_5 = []\n for arg_6 in arg_0:\n if arg_6.response is not None:\n arg_5.append(arg_6.response)\n elif arg_4 and hasattr(arg_6, 'exception'):\n arg_5.append(arg_4(arg_6, arg_6.exception))\n else:\n arg_5.append(None)\n\n if not arg_2:\n arg_2.close()\n\n return arg_5"} +{"_id": "doc_8077", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Removes PEM-encoding from a public key, private key or certificate. If the\n private key is encrypted, the password will be used to decrypt it.\n\n :param data:\n A byte string of the PEM-encoded data\n\n :param password:\n A byte string of the encryption password, or None\n\n :return:\n A 3-element tuple in the format: (key_type, algorithm, der_bytes). The\n key_type will be a unicode string of \"public key\", \"private key\" or\n \"certificate\". The algorithm will be a unicode string of \"rsa\", \"dsa\"\n or \"ec\".\n \"\"\"\n\n arg_2, arg_3, arg_4 = pem.unarmor(arg_0)\n\n arg_5 = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n arg_6 = re.match(arg_5, arg_2)\n if not arg_6:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n arg_7 = arg_6.group(1)\n\n arg_0 = arg_0.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be hanlded specially\n if arg_7 in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n arg_8 = arg_6.group(2).lower()\n return ('private key', arg_8, Func_openssl_private(arg_3, arg_4, arg_1))\n\n arg_9 = arg_7.lower()\n arg_8 = None\n if arg_9 == 'encrypted private key':\n arg_9 = 'private key'\n elif arg_9 == 'rsa public key':\n arg_9 = 'public key'\n arg_8 = 'rsa'\n\n return (arg_9, arg_8, arg_4)"} +{"_id": "doc_8078", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Decrypts encrypted ASN.1 data\n\n :param encryption_algorithm_info:\n An instance of asn1crypto.pkcs5.Pkcs5EncryptionAlgorithm\n\n :param encrypted_content:\n A byte string of the encrypted content\n\n :param password:\n A byte string of the encrypted content's password\n\n :return:\n A byte string of the decrypted plaintext\n \"\"\"\n\n arg_3 = crypto_funcs[arg_0.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if arg_0.kdf == 'pbkdf2':\n\n if arg_0.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n arg_4 = pbkdf2(\n arg_0.kdf_hmac,\n arg_2,\n arg_0.kdf_salt,\n arg_0.kdf_iterations,\n arg_0.key_length\n )\n arg_5 = arg_0.encryption_iv\n\n arg_6 = arg_3(arg_4, arg_1, arg_5)\n\n elif arg_0.kdf == 'pbkdf1':\n arg_7 = pbkdf1(\n arg_0.kdf_hmac,\n arg_2,\n arg_0.kdf_salt,\n arg_0.kdf_iterations,\n arg_0.key_length + 8\n )\n arg_4 = arg_7[0:8]\n arg_5 = arg_7[8:16]\n\n arg_6 = arg_3(arg_4, arg_1, arg_5)\n\n elif arg_0.kdf == 'pkcs12_kdf':\n arg_4 = pkcs12_kdf(\n arg_0.kdf_hmac,\n arg_2,\n arg_0.kdf_salt,\n arg_0.kdf_iterations,\n arg_0.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if arg_0.encryption_cipher == 'rc4':\n arg_6 = arg_3(arg_4, arg_1)\n\n else:\n arg_5 = pkcs12_kdf(\n arg_0.kdf_hmac,\n arg_2,\n arg_0.kdf_salt,\n arg_0.kdf_iterations,\n arg_0.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n arg_6 = arg_3(arg_4, arg_1, arg_5)\n\n return arg_6"} +{"_id": "doc_8079", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates an EVP_CIPHER pointer object and determines the buffer size\n necessary for the parameter specified.\n\n :param evp_cipher_ctx:\n An EVP_CIPHER_CTX pointer\n\n :param cipher:\n A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n :param key:\n The key byte string\n\n :param data:\n The plaintext or ciphertext as a byte string\n\n :param padding:\n If padding is to be used\n\n :return:\n A 2-element tuple with the first element being an EVP_CIPHER pointer\n and the second being an integer that is the required buffer size\n \"\"\"\n\n arg_2 = {\n 'aes128': libcrypto.EVP_aes_128_cbc,\n 'aes192': libcrypto.EVP_aes_192_cbc,\n 'aes256': libcrypto.EVP_aes_256_cbc,\n 'rc2': libcrypto.EVP_rc2_cbc,\n 'rc4': libcrypto.EVP_rc4,\n 'des': libcrypto.EVP_des_cbc,\n 'tripledes_2key': libcrypto.EVP_des_ede_cbc,\n 'tripledes_3key': libcrypto.EVP_des_ede3_cbc,\n }[arg_0]()\n\n if arg_0 == 'rc4':\n arg_3 = len(arg_1)\n else:\n arg_4 = {\n 'aes128': 16,\n 'aes192': 16,\n 'aes256': 16,\n 'rc2': 8,\n 'des': 8,\n 'tripledes_2key': 8,\n 'tripledes_3key': 8,\n }[arg_0]\n arg_3 = arg_4 * int(math.ceil(len(arg_1) / arg_4))\n\n return (arg_2, arg_3)"} +{"_id": "doc_8080", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Takes a CryptoAPI RSA private key blob and converts it into the ASN.1\n structures for the public and private keys\n\n :param bit_size:\n The integer bit size of the key\n\n :param blob_struct:\n An instance of the advapi32.RSAPUBKEY struct\n\n :param blob:\n A byte string of the binary data after the header\n\n :return:\n A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n asn1crypto.keys.PrivateKeyInfo)\n \"\"\"\n\n arg_3 = arg_0 // 8\n arg_4 = arg_0 // 16\n\n arg_5 = arg_3\n arg_6 = arg_5 + arg_4\n arg_7 = arg_6 + arg_4\n arg_8 = arg_7 + arg_4\n arg_9 = arg_8 + arg_4\n arg_10 = arg_9 + arg_4\n\n arg_11 = arg_1.rsapubkey.pubexp\n arg_12 = int_from_bytes(arg_2[0:arg_5][::-1])\n arg_13 = int_from_bytes(arg_2[arg_5:arg_6][::-1])\n arg_14 = int_from_bytes(arg_2[arg_6:arg_7][::-1])\n arg_15 = int_from_bytes(arg_2[arg_7:arg_8][::-1])\n arg_16 = int_from_bytes(arg_2[arg_8:arg_9][::-1])\n arg_17 = int_from_bytes(arg_2[arg_9:arg_10][::-1])\n arg_18 = int_from_bytes(arg_2[arg_10:arg_10 + arg_3][::-1])\n\n arg_19 = keys.PublicKeyInfo({\n 'algorithm': keys.PublicKeyAlgorithm({\n 'algorithm': 'rsa',\n }),\n 'public_key': keys.RSAPublicKey({\n 'modulus': arg_12,\n 'public_exponent': arg_11,\n }),\n })\n\n arg_20 = keys.RSAPrivateKey({\n 'version': 'two-prime',\n 'modulus': arg_12,\n 'public_exponent': arg_11,\n 'private_exponent': arg_18,\n 'prime1': arg_13,\n 'prime2': arg_14,\n 'exponent1': arg_15,\n 'exponent2': arg_16,\n 'coefficient': arg_17,\n })\n\n arg_21 = keys.PrivateKeyInfo({\n 'version': 0,\n 'private_key_algorithm': keys.PrivateKeyAlgorithm({\n 'algorithm': 'rsa',\n }),\n 'private_key': arg_20,\n })\n\n return (arg_19, arg_21)"} +{"_id": "doc_8081", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Takes a CryptoAPI DSS private key blob and converts it into the ASN.1\n structures for the public and private keys\n\n :param bit_size:\n The integer bit size of the key\n\n :param public_blob:\n A byte string of the binary data after the public key header\n\n :param private_blob:\n A byte string of the binary data after the private key header\n\n :return:\n A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n asn1crypto.keys.PrivateKeyInfo)\n \"\"\"\n\n arg_3 = 20\n arg_4 = arg_0 // 8\n\n arg_5 = arg_4\n arg_6 = arg_5 + arg_3\n arg_7 = arg_6 + arg_4\n arg_8 = arg_7\n\n arg_9 = int_from_bytes(arg_2[0:arg_5][::-1])\n arg_10 = int_from_bytes(arg_2[arg_5:arg_6][::-1])\n arg_11 = int_from_bytes(arg_2[arg_6:arg_7][::-1])\n arg_12 = int_from_bytes(arg_2[arg_7:arg_7 + arg_3][::-1])\n arg_13 = int_from_bytes(arg_1[arg_8:arg_8 + arg_4][::-1])\n\n arg_14 = keys.PublicKeyInfo({\n 'algorithm': keys.PublicKeyAlgorithm({\n 'algorithm': 'dsa',\n 'parameters': keys.DSAParams({\n 'p': arg_9,\n 'q': arg_10,\n 'g': arg_11,\n })\n }),\n 'public_key': core.Integer(arg_13),\n })\n\n arg_15 = keys.PrivateKeyInfo({\n 'version': 0,\n 'private_key_algorithm': keys.PrivateKeyAlgorithm({\n 'algorithm': 'dsa',\n 'parameters': keys.DSAParams({\n 'p': arg_9,\n 'q': arg_10,\n 'g': arg_11,\n })\n }),\n 'private_key': core.Integer(arg_12),\n })\n\n return (arg_14, arg_15)"} +{"_id": "doc_8082", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Generates a DSA signature\n\n :param private_key:\n The PrivateKey to generate the signature with\n\n :param data:\n A byte string of the data the signature is for\n\n :param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the signature\n \"\"\"\n\n if arg_0.algorithm != 'dsa':\n raise ValueError('The key specified is not a DSA private key')\n\n return _sign(arg_0, arg_1, arg_2)"} +{"_id": "doc_8083", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Generates an ECDSA signature\n\n :param private_key:\n The PrivateKey to generate the signature with\n\n :param data:\n A byte string of the data the signature is for\n\n :param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the signature\n \"\"\"\n\n if arg_0.algorithm != 'ec':\n raise ValueError('The key specified is not an EC private key')\n\n return _sign(arg_0, arg_1, arg_2)"} +{"_id": "doc_8084", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Generates an RSA, DSA or ECDSA signature via CryptoAPI\n\n :param private_key:\n The PrivateKey to generate the signature with\n\n :param data:\n A byte string of the data the signature is for\n\n :param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n :param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the signature\n \"\"\"\n\n arg_4 = arg_0.algorithm\n\n if arg_4 == 'rsa' and arg_2 == 'raw':\n arg_5 = add_pkcs1v15_signature_padding(arg_0.byte_size, arg_1)\n return raw_rsa_private_crypt(arg_0, arg_5)\n\n if arg_4 == 'rsa' and arg_3:\n arg_6 = {\n 'sha1': 20,\n 'sha224': 28,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64\n }.get(arg_2, 0)\n arg_5 = add_pss_padding(arg_2, arg_6, arg_0.bit_size, arg_1)\n return raw_rsa_private_crypt(arg_0, arg_5)\n\n if arg_0.algorithm == 'dsa' and arg_2 == 'md5':\n raise ValueError(pretty_message(\n '''\n Windows does not support md5 signatures with DSA keys\n '''\n ))\n\n arg_7 = None\n\n try:\n arg_8 = {\n 'md5': Advapi32Const.CALG_MD5,\n 'sha1': Advapi32Const.CALG_SHA1,\n 'sha256': Advapi32Const.CALG_SHA_256,\n 'sha384': Advapi32Const.CALG_SHA_384,\n 'sha512': Advapi32Const.CALG_SHA_512,\n }[arg_2]\n\n arg_9 = new(advapi32, 'HCRYPTHASH *')\n arg_10 = advapi32.CryptCreateHash(\n arg_0.context_handle,\n arg_8,\n null(),\n 0,\n arg_9\n )\n handle_error(arg_10)\n\n arg_7 = unwrap(arg_9)\n\n arg_10 = advapi32.CryptHashData(arg_7, arg_1, len(arg_1), 0)\n handle_error(arg_10)\n\n arg_11 = new(advapi32, 'DWORD *')\n arg_10 = advapi32.CryptSignHashW(\n arg_7,\n Advapi32Const.AT_SIGNATURE,\n null(),\n 0,\n null(),\n arg_11\n )\n handle_error(arg_10)\n\n arg_12 = deref(arg_11)\n arg_13 = buffer_from_bytes(arg_12)\n\n arg_10 = advapi32.CryptSignHashW(\n arg_7,\n Advapi32Const.AT_SIGNATURE,\n null(),\n 0,\n arg_13,\n arg_11\n )\n handle_error(arg_10)\n\n arg_14 = bytes_from_buffer(arg_13, deref(arg_11))\n\n # CryptoAPI outputs the signature in little endian byte order, so we\n # must swap it for compatibility with other systems\n arg_14 = arg_14[::-1]\n\n if arg_4 == 'dsa':\n # Switch the two integers because the reversal just before switched\n # then\n arg_15 = len(arg_14) // 2\n arg_14 = arg_14[arg_15:] + arg_14[:arg_15]\n # Windows doesn't use the ASN.1 Sequence for DSA signatures,\n # so we have to convert it here for the verification to work\n arg_14 = algos.DSASignature.from_p1363(arg_14).dump()\n\n return arg_14\n\n finally:\n if arg_7:\n advapi32.CryptDestroyHash(arg_7)"} +{"_id": "doc_8085", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Generates an RSA, DSA or ECDSA signature via CNG\n\n :param private_key:\n The PrivateKey to generate the signature with\n\n :param data:\n A byte string of the data the signature is for\n\n :param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n :param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the signature\n \"\"\"\n\n if arg_2 == 'raw':\n arg_4 = arg_1\n else:\n arg_5 = {\n 'md5': BcryptConst.BCRYPT_MD5_ALGORITHM,\n 'sha1': BcryptConst.BCRYPT_SHA1_ALGORITHM,\n 'sha256': BcryptConst.BCRYPT_SHA256_ALGORITHM,\n 'sha384': BcryptConst.BCRYPT_SHA384_ALGORITHM,\n 'sha512': BcryptConst.BCRYPT_SHA512_ALGORITHM\n }[arg_2]\n\n arg_4 = getattr(hashlib, arg_2)(arg_1).digest()\n\n arg_6 = null()\n arg_7 = 0\n\n if arg_0.algorithm == 'rsa':\n if arg_3:\n arg_8 = {\n 'md5': 16,\n 'sha1': 20,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64\n }[arg_2]\n\n arg_7 = BcryptConst.BCRYPT_PAD_PSS\n arg_9 = struct(bcrypt, 'BCRYPT_PSS_PADDING_INFO')\n arg_10 = unwrap(arg_9)\n # This has to be assigned to a variable to prevent cffi from gc'ing it\n arg_11 = buffer_from_unicode(arg_5)\n arg_10.pszAlgId = cast(bcrypt, 'wchar_t *', arg_11)\n arg_10.cbSalt = arg_8\n else:\n arg_7 = BcryptConst.BCRYPT_PAD_PKCS1\n arg_9 = struct(bcrypt, 'BCRYPT_PKCS1_PADDING_INFO')\n arg_10 = unwrap(arg_9)\n # This has to be assigned to a variable to prevent cffi from gc'ing it\n if arg_2 == 'raw':\n arg_10.pszAlgId = null()\n else:\n arg_11 = buffer_from_unicode(arg_5)\n arg_10.pszAlgId = cast(bcrypt, 'wchar_t *', arg_11)\n arg_6 = cast(bcrypt, 'void *', arg_9)\n\n if arg_0.algorithm == 'dsa' and arg_0.bit_size > 1024 and arg_2 in set(['md5', 'sha1']):\n raise ValueError(pretty_message(\n '''\n Windows does not support sha1 signatures with DSA keys based on\n sha224, sha256 or sha512\n '''\n ))\n\n arg_14 = new(bcrypt, 'DWORD *')\n arg_15 = bcrypt.BCryptSignHash(\n arg_0.key_handle,\n arg_6,\n arg_4,\n len(arg_4),\n null(),\n 0,\n arg_14,\n arg_7\n )\n handle_error(arg_15)\n\n arg_16 = deref(arg_14)\n arg_17 = buffer_from_bytes(arg_16)\n\n if arg_0.algorithm == 'rsa':\n arg_6 = cast(bcrypt, 'void *', arg_9)\n\n arg_15 = bcrypt.BCryptSignHash(\n arg_0.key_handle,\n arg_6,\n arg_4,\n len(arg_4),\n arg_17,\n arg_16,\n arg_14,\n arg_7\n )\n handle_error(arg_15)\n arg_18 = bytes_from_buffer(arg_17, deref(arg_14))\n\n if arg_0.algorithm != 'rsa':\n # Windows doesn't use the ASN.1 Sequence for DSA/ECDSA signatures,\n # so we have to convert it here for the verification to work\n arg_18 = algos.DSASignature.from_p1363(arg_18).dump()\n\n return arg_18"} +{"_id": "doc_8086", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Encrypts a value using an RSA public key via CNG\n\n :param certificate_or_public_key:\n A Certificate or PublicKey instance to encrypt with\n\n :param data:\n A byte string of the data to encrypt\n\n :param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the ciphertext\n \"\"\"\n\n arg_3 = BcryptConst.BCRYPT_PAD_PKCS1\n if arg_2 is True:\n arg_3 = BcryptConst.BCRYPT_PAD_OAEP\n\n arg_4 = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO')\n arg_5 = unwrap(arg_4)\n # This has to be assigned to a variable to prevent cffi from gc'ing it\n arg_6 = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)\n arg_5.pszAlgId = cast(bcrypt, 'wchar_t *', arg_6)\n arg_5.pbLabel = null()\n arg_5.cbLabel = 0\n arg_10 = cast(bcrypt, 'void *', arg_4)\n else:\n arg_10 = null()\n\n arg_11 = new(bcrypt, 'ULONG *')\n arg_12 = bcrypt.BCryptEncrypt(\n arg_0.key_handle,\n arg_1,\n len(arg_1),\n arg_10,\n null(),\n 0,\n null(),\n 0,\n arg_11,\n arg_3\n )\n handle_error(arg_12)\n\n arg_13 = deref(arg_11)\n arg_14 = buffer_from_bytes(arg_13)\n\n arg_12 = bcrypt.BCryptEncrypt(\n arg_0.key_handle,\n arg_1,\n len(arg_1),\n arg_10,\n null(),\n 0,\n arg_14,\n arg_13,\n arg_11,\n arg_3\n )\n handle_error(arg_12)\n\n return bytes_from_buffer(arg_14, deref(arg_11))"} +{"_id": "doc_8087", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Encrypts a value using an RSA private key via CryptoAPI\n\n :param private_key:\n A PrivateKey instance to decrypt with\n\n :param ciphertext:\n A byte string of the data to decrypt\n\n :param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the plaintext\n \"\"\"\n\n arg_3 = 0\n if arg_2:\n arg_3 = Advapi32Const.CRYPT_OAEP\n\n arg_1 = arg_1[::-1]\n\n arg_4 = buffer_from_bytes(arg_1)\n arg_5 = new(advapi32, 'DWORD *', len(arg_1))\n arg_6 = advapi32.CryptDecrypt(\n arg_0.ex_key_handle,\n null(),\n True,\n arg_3,\n arg_4,\n arg_5\n )\n handle_error(arg_6)\n\n return bytes_from_buffer(arg_4, deref(arg_5))"} +{"_id": "doc_8088", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Blocks until the socket is ready to be read from, or the timeout is hit\n\n :param timeout:\n A float - the period of time to wait for data to be read. None for\n no time limit.\n\n :return:\n A boolean - if data is ready to be read. Will only be False if\n timeout is not None.\n \"\"\"\n\n # If we have buffered data, we consider a read possible\n if len(arg_0._decrypted_bytes) > 0:\n return True\n\n arg_2, arg_3, arg_3 = select.select([arg_0._socket], [], [], arg_1)\n return len(arg_2) > 0"} +{"_id": "doc_8089", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reads exactly the specified number of bytes from the socket\n\n :param num_bytes:\n An integer - the exact number of bytes to read\n\n :return:\n A byte string of the data that was read\n \"\"\"\n\n arg_2 = b''\n arg_3 = arg_1\n while arg_3 > 0:\n arg_2 += arg_0.read(arg_3)\n arg_3 = arg_1 - len(arg_2)\n\n return arg_2"} +{"_id": "doc_8090", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Reads data from the socket and writes it to the memory bio\n used by libssl to decrypt the data. Returns the unencrypted\n data for the purpose of debugging handshakes.\n\n :return:\n A byte string of ciphertext from the socket. Used for\n debugging the handshake only.\n \"\"\"\n\n arg_1 = arg_0._raw_bytes\n try:\n arg_1 += arg_0._socket.recv(8192)\n except (socket_.error):\n pass\n arg_2 = arg_1\n arg_3 = libssl.BIO_write(arg_0._rbio, arg_1, len(arg_1))\n arg_0._raw_bytes = arg_1[arg_3:]\n return arg_2"} +{"_id": "doc_8091", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Takes ciphertext from the memory bio and writes it to the\n socket.\n\n :return:\n A byte string of ciphertext going to the socket. Used\n for debugging the handshake only.\n \"\"\"\n\n arg_1 = libssl.BIO_ctrl_pending(arg_0._wbio)\n if arg_1 == 0:\n return b''\n arg_2 = min(arg_0._buffer_size, arg_1)\n arg_3 = libssl.BIO_read(arg_0._wbio, arg_0._bio_write_buffer, arg_2)\n arg_4 = bytes_from_buffer(arg_0._bio_write_buffer, arg_3)\n arg_5 = arg_4\n while len(arg_4):\n arg_6 = False\n try:\n arg_7 = arg_0._socket.send(arg_4)\n except (socket_.error) as e:\n # Handle ECONNRESET and EPIPE\n if e.errno == 104 or e.errno == 32:\n arg_6 = True\n else:\n raise\n\n if arg_6:\n raise_disconnection()\n arg_4 = arg_4[arg_7:]\n if len(arg_4):\n arg_0.select_write()\n return arg_5"} +{"_id": "doc_8092", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Encrypts plaintext via CNG\n\n :param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n :param key:\n The encryption key - a byte string 5-16 bytes long\n\n :param data:\n The plaintext - a byte string\n\n :param iv:\n The initialization vector - a byte string - unused for RC4\n\n :param padding:\n Boolean, if padding should be used - unused for RC4\n\n :raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n :return:\n A byte string of the ciphertext\n \"\"\"\n\n arg_5 = None\n\n try:\n arg_5 = _bcrypt_create_key_handle(arg_0, arg_1)\n\n if arg_3 is None:\n arg_6 = 0\n else:\n arg_6 = len(arg_3)\n\n arg_7 = 0\n if arg_4 is True:\n arg_7 = BcryptConst.BCRYPT_BLOCK_PADDING\n\n arg_8 = new(bcrypt, 'ULONG *')\n arg_9 = bcrypt.BCryptEncrypt(\n arg_5,\n arg_2,\n len(arg_2),\n null(),\n null(),\n 0,\n null(),\n 0,\n arg_8,\n arg_7\n )\n handle_error(arg_9)\n\n arg_10 = deref(arg_8)\n arg_11 = buffer_from_bytes(arg_10)\n arg_12 = buffer_from_bytes(arg_3) if arg_3 else null()\n\n arg_9 = bcrypt.BCryptEncrypt(\n arg_5,\n arg_2,\n len(arg_2),\n null(),\n arg_12,\n arg_6,\n arg_11,\n arg_10,\n arg_8,\n arg_7\n )\n handle_error(arg_9)\n\n return bytes_from_buffer(arg_11, deref(arg_8))\n\n finally:\n if arg_5:\n bcrypt.BCryptDestroyKey(arg_5)"} +{"_id": "doc_8093", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Checks if an error occured, and if so throws an OSError containing the\n last OpenSSL error message\n\n :param result:\n An integer result code - 1 or greater indicates success\n\n :param exception_class:\n The exception class to use for the exception if an error occurred\n\n :raises:\n OSError - when an OpenSSL error occurs\n \"\"\"\n\n if arg_0 > 0:\n return\n\n if arg_1 is None:\n arg_1 = OSError\n\n arg_2 = libcrypto.ERR_get_error()\n arg_3 = buffer_from_bytes(120)\n libcrypto.ERR_error_string(arg_2, arg_3)\n\n # Since we are dealing with a string, it is NULL terminated\n arg_4 = byte_string_from_buffer(arg_3)\n\n raise arg_1(_try_decode(arg_4))"} +{"_id": "doc_8094", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return the certificate and a hash of it\n\n :param cert_pointer:\n A SecCertificateRef\n\n :return:\n A 2-element tuple:\n - [0]: A byte string of the SHA1 hash of the cert\n - [1]: A byte string of the DER-encoded contents of the cert\n \"\"\"\n\n arg_1 = None\n\n try:\n arg_1 = Security.SecCertificateCopyData(arg_0)\n arg_2 = CFHelpers.cf_data_to_bytes(arg_1)\n arg_3 = hashlib.sha1(arg_2).digest()\n\n return (arg_2, arg_3)\n\n finally:\n if arg_1 is not None:\n CoreFoundation.CFRelease(arg_1)"} +{"_id": "doc_8095", "title": "", "text": "def Func():\n \"\"\"\n Extracts the last OS error message into a python unicode string\n\n :return:\n A unicode string error message\n \"\"\"\n\n arg_0 = errno()\n\n try:\n arg_1 = os.strerror(arg_0)\n except (ValueError):\n return str_cls(arg_0)\n\n if isinstance(arg_1, str_cls):\n return arg_1\n\n return _try_decode(arg_1)"} +{"_id": "doc_8096", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts a CFDictionary object into a python dictionary\n\n :param dictionary:\n The CFDictionary to convert\n\n :return:\n A python dict\n \"\"\"\n\n arg_1 = CoreFoundation.CFDictionaryGetCount(arg_0)\n\n arg_2 = (CFTypeRef * arg_1)()\n arg_3 = (CFTypeRef * arg_1)()\n CoreFoundation.CFDictionaryGetKeysAndValues(\n arg_0,\n _cast_pointer_p(arg_2),\n _cast_pointer_p(arg_3)\n )\n\n arg_4 = {}\n for arg_5 in range(0, arg_1):\n arg_4[arg_6.native(arg_2[arg_5])] = arg_6.native(arg_3[arg_5])\n\n return arg_4"} +{"_id": "doc_8097", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Extracts the function signature and description of a Python function\n\n :param docstring:\n A unicode string of the docstring for the function\n\n :param def_lineno:\n An integer line number that function was defined on\n\n :param code_lines:\n A list of unicode string lines from the source file the function was\n defined in\n\n :param prefix:\n A prefix to prepend to all output lines\n\n :return:\n A 2-element tuple:\n\n - [0] A unicode string of the function signature with a docstring of\n parameter info\n - [1] A markdown snippet of the function description\n \"\"\"\n\n arg_4 = arg_1 - 1\n arg_5 = arg_2[arg_4]\n arg_5 = arg_5.rstrip()\n while not arg_5.endswith(':'):\n arg_4 += 1\n arg_5 += '\\n' + arg_2[arg_4].rstrip()\n\n arg_5 = textwrap.dedent(arg_5).rstrip(':')\n arg_5 = arg_5.replace('\\n', '\\n' + arg_3)\n\n arg_6 = ''\n arg_7 = False\n\n arg_8 = ''\n\n for arg_9 in arg_0.splitlines():\n if arg_9 and arg_9[0] == ':':\n arg_7 = True\n if not arg_7:\n if arg_6:\n arg_6 += '\\n'\n arg_6 += arg_9\n else:\n if arg_8:\n arg_8 += '\\n'\n arg_8 += arg_9\n\n arg_6 = arg_6.strip()\n arg_10 = ''\n if arg_6:\n arg_10 = \"%s%s\" % (arg_3, arg_6.replace('\\n', '\\n' + arg_3))\n arg_10 = re.sub('\\n>(\\\\s+)\\n', '\\n>\\n', arg_10)\n\n arg_8 = arg_8.strip()\n if arg_8:\n arg_5 += (':\\n%s \"\"\"\\n%s ' % (arg_3, arg_3))\n arg_5 += arg_8.replace('\\n', '\\n%s ' % arg_3)\n arg_5 += ('\\n%s \"\"\"' % arg_3)\n arg_5 = re.sub('\\n>(\\\\s+)\\n', '\\n>\\n', arg_5)\n\n for arg_11, arg_12 in definition_replacements.items():\n arg_5 = arg_5.replace(arg_11, arg_12)\n\n return (arg_5, arg_10)"} +{"_id": "doc_8098", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Walks through a CommonMark AST to find section headers that delineate\n content that should be updated by this script\n\n :param md_ast:\n The AST of the markdown document\n\n :param sections:\n A dict to store the start and end lines of a section. The key will be\n a two-element tuple of the section type (\"class\", \"function\",\n \"method\" or \"attribute\") and identifier. The values are a two-element\n tuple of the start and end line number in the markdown document of the\n section.\n\n :param last:\n A dict containing information about the last section header seen.\n Includes the keys \"type_name\", \"identifier\", \"start_line\".\n\n :param last_class:\n A unicode string of the name of the last class found - used when\n processing methods and attributes.\n\n :param total_lines:\n An integer of the total number of lines in the markdown document -\n used to work around a bug in the API of the Python port of CommonMark\n \"\"\"\n\n def child_walker(arg_5):\n for arg_6, arg_7 in arg_5.walker():\n if arg_6 == arg_5:\n continue\n yield arg_6, arg_7\n\n for arg_6, arg_7 in child_walker(arg_0):\n if arg_6.t == 'heading':\n arg_8 = arg_6.sourcepos[0][0]\n\n if arg_6.level == 2:\n if arg_2:\n arg_1[(arg_2['type_name'], arg_2['identifier'])] = (arg_2['start_line'], arg_8 - 1)\n arg_2.clear()\n\n if arg_6.level in set([3, 5]):\n arg_9 = []\n for arg_10, arg_11 in child_walker(arg_6):\n arg_9.append(arg_10)\n if len(arg_9) != 2:\n continue\n arg_12 = arg_9[0]\n arg_13 = arg_9[1]\n if arg_12.t != 'code':\n continue\n if arg_13.t != 'text':\n continue\n\n arg_14 = arg_13.literal.strip()\n arg_15 = arg_12.literal.strip().replace('()', '').lstrip('.')\n\n if arg_2:\n arg_1[(arg_2['type_name'], arg_2['identifier'])] = (arg_2['start_line'], arg_8 - 1)\n arg_2.clear()\n\n if arg_14 == 'function':\n if arg_6.level != 3:\n continue\n\n if arg_14 == 'class':\n if arg_6.level != 3:\n continue\n arg_3.append(arg_15)\n\n if arg_14 in set(['method', 'attribute']):\n if arg_6.level != 5:\n continue\n arg_15 = arg_3[-1] + '.' + arg_15\n\n arg_2.update({\n 'type_name': arg_14,\n 'identifier': arg_15,\n 'start_line': arg_8,\n })\n\n elif arg_6.t == 'block_quote':\n find_sections(arg_6, arg_1, arg_2, arg_3)\n\n if arg_2:\n arg_1[(arg_2['type_name'], arg_2['identifier'])] = (arg_2['start_line'], arg_4)"} +{"_id": "doc_8099", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n A callback used to walk the Python AST looking for classes, functions,\n methods and attributes. Generates chunks of markdown markup to replace\n the existing content.\n\n :param node:\n An _ast module node object\n\n :param code_lines:\n A list of unicode strings - the source lines of the Python file\n\n :param sections:\n A dict of markdown document sections that need to be updated. The key\n will be a two-element tuple of the section type (\"class\", \"function\",\n \"method\" or \"attribute\") and identifier. The values are a two-element\n tuple of the start and end line number in the markdown document of the\n section.\n\n :param md_chunks:\n A dict with keys from the sections param and the values being a unicode\n string containing a chunk of markdown markup.\n \"\"\"\n\n if isinstance(arg_0, _ast.FunctionDef):\n arg_4 = ('function', arg_0.name)\n if arg_4 not in arg_2:\n return\n\n arg_5 = ast.get_docstring(arg_0)\n arg_6 = arg_0.lineno + len(arg_0.decorator_list)\n\n arg_7, arg_8 = _get_func_info(arg_5, arg_6, arg_1, '> ')\n\n arg_9 = textwrap.dedent(\"\"\"\n ### `%s()` function\n\n > ```python\n > %s\n > ```\n >\n %s\n \"\"\").strip() % (\n arg_0.name,\n arg_7,\n arg_8\n ) + \"\\n\"\n\n arg_3[arg_4] = arg_9.replace('>\\n\\n', '')\n\n elif isinstance(arg_0, _ast.ClassDef):\n if ('class', arg_0.name) not in arg_2:\n return\n\n for arg_10 in arg_0.body:\n if isinstance(arg_10, _ast.FunctionDef):\n arg_11 = arg_0.name + '.' + arg_10.name\n\n arg_12 = ('method', arg_11)\n arg_13 = arg_12 in arg_2\n\n arg_14 = ('attribute', arg_11)\n arg_15 = arg_14 in arg_2\n\n arg_16 = arg_10.name == '__init__'\n\n if not arg_16 and not arg_15 and not arg_13:\n continue\n\n arg_5 = ast.get_docstring(arg_10)\n arg_6 = arg_10.lineno + len(arg_10.decorator_list)\n\n if not arg_5:\n continue\n\n if arg_13 or arg_16:\n arg_7, arg_8 = _get_func_info(arg_5, arg_6, arg_1, '> > ')\n\n if arg_16:\n arg_4 = ('class', arg_0.name)\n\n arg_17 = ast.get_docstring(arg_0) or ''\n arg_18 = textwrap.dedent(arg_17).strip()\n if arg_18:\n arg_19 = \"> %s\\n>\" % (arg_18.replace(\"\\n\", \"\\n> \"))\n else:\n arg_19 = ''\n\n arg_9 = textwrap.dedent(\"\"\"\n ### `%s()` class\n\n %s\n > ##### constructor\n >\n > > ```python\n > > %s\n > > ```\n > >\n %s\n \"\"\").strip() % (\n arg_0.name,\n arg_19,\n arg_7,\n arg_8\n )\n\n arg_9 = arg_9.replace('\\n\\n\\n', '\\n\\n')\n\n else:\n arg_4 = arg_12\n\n arg_9 = textwrap.dedent(\"\"\"\n >\n > ##### `.%s()` method\n >\n > > ```python\n > > %s\n > > ```\n > >\n %s\n \"\"\").strip() % (\n arg_10.name,\n arg_7,\n arg_8\n )\n\n if arg_9[-5:] == '\\n> >\\n':\n arg_9 = arg_9[0:-5]\n\n else:\n arg_4 = arg_14\n\n arg_20 = textwrap.dedent(arg_5).strip()\n arg_8 = \"> > %s\" % (arg_20.replace(\"\\n\", \"\\n> > \"))\n\n arg_9 = textwrap.dedent(\"\"\"\n >\n > ##### `.%s` attribute\n >\n %s\n \"\"\").strip() % (\n arg_10.name,\n arg_8\n )\n\n arg_3[arg_4] = re.sub('[ \\\\t]+\\n', '\\n', arg_9.rstrip())\n\n elif isinstance(arg_0, _ast.If):\n for arg_21 in arg_0.body:\n Func(arg_21, arg_1, arg_2, arg_3)\n for arg_21 in arg_0.orelse:\n Func(arg_21, arg_1, arg_2, arg_3)"} +{"_id": "doc_8100", "title": "", "text": "def Func():\n \"\"\"\n Tries to find a CA certs bundle in common locations\n\n :raises:\n OSError - when no valid CA certs bundle was found on the filesystem\n\n :return:\n The full filesystem path to a CA certs bundle file\n \"\"\"\n\n arg_0 = None\n\n # Common CA cert paths\n arg_1 = [\n '/usr/lib/ssl/certs/ca-certificates.crt',\n '/etc/ssl/certs/ca-certificates.crt',\n '/etc/ssl/certs/ca-bundle.crt',\n '/etc/pki/tls/certs/ca-bundle.crt',\n '/etc/ssl/ca-bundle.pem',\n '/usr/local/share/certs/ca-root-nss.crt',\n '/etc/ssl/cert.pem'\n ]\n\n # First try SSL_CERT_FILE\n if 'SSL_CERT_FILE' in os.environ:\n arg_1.insert(0, os.environ['SSL_CERT_FILE'])\n\n for arg_2 in arg_1:\n if os.path.exists(arg_2) and os.path.getsize(arg_2) > 0:\n arg_0 = arg_2\n break\n\n if not arg_0:\n raise OSError(pretty_message(\n '''\n Unable to find a CA certs bundle in common locations - try\n setting the SSL_CERT_FILE environmental variable\n '''\n ))\n\n return arg_0"} +{"_id": "doc_8101", "title": "", "text": "def Func(arg_0=None, arg_1=False):\n \"\"\"\n Extracts trusted CA certs from the system CA cert bundle\n\n :param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n\n :param callback_only_on_failure:\n A boolean - if the callback should only be called when a certificate is\n not exported.\n\n :return:\n A list of 3-element tuples:\n - 0: a byte string of a DER-encoded certificate\n - 1: a set of unicode strings that are OIDs of purposes to trust the\n certificate for\n - 2: a set of unicode strings that are OIDs of purposes to reject the\n certificate for\n \"\"\"\n\n arg_2 = '2.5.29.37.0'\n arg_3 = system_path()\n\n arg_4 = []\n with open(arg_3, 'rb') as f:\n for arg_5, arg_6, arg_7 in unarmor(f.read(), multiple=True):\n # Without more info, a certificate is trusted for all purposes\n if arg_5 == 'CERTIFICATE':\n if arg_0:\n arg_0(Certificate.load(arg_7), None)\n arg_4.append((arg_7, set(), set()))\n\n # The OpenSSL TRUSTED CERTIFICATE construct adds OIDs for trusted\n # and rejected purposes, so we extract that info.\n elif arg_5 == 'TRUSTED CERTIFICATE':\n arg_8, arg_9 = TrustedCertificate.load(arg_7)\n arg_10 = False\n arg_11 = set()\n arg_12 = set()\n for arg_13 in arg_9['trust']:\n if arg_13.dotted == arg_2:\n arg_11 = set([arg_13.dotted])\n break\n arg_11.add(arg_13.dotted)\n for arg_13 in arg_9['reject']:\n if arg_13.dotted == arg_2:\n arg_10 = True\n break\n arg_12.add(arg_13.dotted)\n if arg_10:\n if arg_0:\n arg_0(arg_8, 'explicitly distrusted')\n continue\n if arg_0 and not arg_1:\n arg_0(arg_8, None)\n arg_4.append((arg_8.dump(), arg_11, arg_12))\n\n return arg_4"} +{"_id": "doc_8102", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Parse the TLS handshake from the client to the server to extract information\n including the cipher suite selected, if compression is enabled, the\n session id and if a new or reused session ticket exists.\n\n :param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n :param client_handshake_bytes:\n A byte string of the handshake data sent to the server\n\n :return:\n A dict with the following keys:\n - \"protocol\": unicode string\n - \"cipher_suite\": unicode string\n - \"compression\": boolean\n - \"session_id\": \"new\", \"reused\" or None\n - \"session_ticket: \"new\", \"reused\" or None\n \"\"\"\n\n arg_2 = None\n arg_3 = None\n arg_4 = False\n arg_5 = None\n arg_6 = None\n\n arg_7 = None\n arg_8 = None\n\n for arg_9, arg_10, arg_11 in parse_tls_records(arg_0):\n if arg_9 != b'\\x16':\n continue\n for arg_12, arg_13 in parse_handshake_messages(arg_11):\n # Ensure we are working with a ServerHello message\n if arg_12 != b'\\x02':\n continue\n arg_2 = {\n b'\\x03\\x00': \"SSLv3\",\n b'\\x03\\x01': \"TLSv1\",\n b'\\x03\\x02': \"TLSv1.1\",\n b'\\x03\\x03': \"TLSv1.2\",\n b'\\x03\\x04': \"TLSv1.3\",\n }[arg_13[0:2]]\n\n arg_14 = int_from_bytes(arg_13[34:35])\n if arg_14 > 0:\n arg_7 = arg_13[35:35 + arg_14]\n\n arg_15 = 35 + arg_14\n arg_16 = arg_13[arg_15:arg_15 + 2]\n arg_3 = CIPHER_SUITE_MAP[arg_16]\n\n arg_17 = arg_15 + 2\n arg_4 = arg_13[arg_17:arg_17 + 1] != b'\\x00'\n\n arg_18 = arg_17 + 1\n arg_19 = arg_13[arg_18:]\n for arg_20, arg_21 in _parse_hello_extensions(arg_19):\n if arg_20 == 35:\n arg_6 = \"new\"\n break\n break\n\n for arg_9, arg_10, arg_11 in parse_tls_records(arg_1):\n if arg_9 != b'\\x16':\n continue\n for arg_12, arg_13 in parse_handshake_messages(arg_11):\n # Ensure we are working with a ClientHello message\n if arg_12 != b'\\x01':\n continue\n\n arg_14 = int_from_bytes(arg_13[34:35])\n if arg_14 > 0:\n arg_8 = arg_13[35:35 + arg_14]\n\n arg_15 = 35 + arg_14\n arg_22 = int_from_bytes(arg_13[arg_15:arg_15 + 2])\n\n arg_17 = arg_15 + 2 + arg_22\n arg_23 = int_from_bytes(arg_13[arg_17:arg_17 + 1])\n\n # On subsequent requests, the session ticket will only be seen\n # in the ClientHello message\n if arg_7 is None and arg_6 is None:\n arg_18 = arg_17 + 1 + arg_23\n arg_19 = arg_13[arg_18:]\n for arg_20, arg_21 in _parse_hello_extensions(arg_19):\n if arg_20 == 35:\n arg_6 = \"reused\"\n break\n break\n\n if arg_7 is not None:\n if arg_8 is None:\n arg_5 = \"new\"\n else:\n if arg_8 != arg_7:\n arg_5 = \"new\"\n else:\n arg_5 = \"reused\"\n\n return {\n \"protocol\": arg_2,\n \"cipher_suite\": arg_3,\n \"compression\": arg_4,\n \"session_id\": arg_5,\n \"session_ticket\": arg_6,\n }"} +{"_id": "doc_8103", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a generator returning tuples of information about each record\n in a byte string of data from a TLS client or server. Stops as soon as it\n find a ChangeCipherSpec message since all data from then on is encrypted.\n\n :param data:\n A byte string of TLS records\n\n :return:\n A generator that yields 3-element tuples:\n [0] Byte string of record type\n [1] Byte string of protocol version\n [2] Byte string of record data\n \"\"\"\n\n arg_1 = 0\n arg_2 = len(arg_0)\n while arg_1 < arg_2:\n # Don't try to parse any more once the ChangeCipherSpec is found\n if arg_0[arg_1:arg_1 + 1] == b'\\x14':\n break\n arg_3 = int_from_bytes(arg_0[arg_1 + 3:arg_1 + 5])\n yield (\n arg_0[arg_1:arg_1 + 1],\n arg_0[arg_1 + 1:arg_1 + 3],\n arg_0[arg_1 + 5:arg_1 + 5 + arg_3]\n )\n arg_1 += 5 + arg_3"} +{"_id": "doc_8104", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a generator returning tuples of information about each message in\n a byte string of data from a TLS handshake record\n\n :param data:\n A byte string of a TLS handshake record data\n\n :return:\n A generator that yields 2-element tuples:\n [0] Byte string of message type\n [1] Byte string of message data\n \"\"\"\n\n arg_1 = 0\n arg_2 = len(arg_0)\n while arg_1 < arg_2:\n arg_3 = int_from_bytes(arg_0[arg_1 + 1:arg_1 + 4])\n yield (\n arg_0[arg_1:arg_1 + 1],\n arg_0[arg_1 + 4:arg_1 + 4 + arg_3]\n )\n arg_1 += 4 + arg_3"} +{"_id": "doc_8105", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Creates a generator returning tuples of information about each extension\n from a byte string of extension data contained in a ServerHello ores\n ClientHello message\n\n :param data:\n A byte string of a extension data from a TLS ServerHello or ClientHello\n message\n\n :return:\n A generator that yields 2-element tuples:\n [0] Byte string of extension type\n [1] Byte string of extension data\n \"\"\"\n\n if arg_0 == b'':\n return\n\n arg_1 = int_from_bytes(arg_0[0:2])\n arg_2 = 2\n arg_3 = 2 + arg_1\n\n arg_4 = arg_2\n while arg_4 < arg_3:\n arg_5 = int_from_bytes(arg_0[arg_4:arg_4 + 2])\n arg_6 = int_from_bytes(arg_0[arg_4 + 2:arg_4 + 4])\n yield (\n arg_5,\n arg_0[arg_4 + 4:arg_4 + 4 + arg_6]\n )\n arg_4 += 4 + arg_6"} +{"_id": "doc_8106", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Raises a TLSVerificationError due to a hostname mismatch\n\n :param certificate:\n An asn1crypto.x509.Certificate object\n\n :raises:\n TLSVerificationError\n \"\"\"\n\n arg_2 = re.match('^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$', arg_1) or arg_1.find(':') != -1\n if arg_2:\n arg_3 = 'IP address %s' % arg_1\n else:\n arg_3 = 'domain name %s' % arg_1\n arg_4 = 'Server certificate verification failed - %s does not match' % arg_3\n arg_5 = ', '.join(arg_0.valid_ips)\n arg_6 = ', '.join(arg_0.valid_domains)\n if arg_6:\n arg_4 += ' valid domains: %s' % arg_6\n if arg_6 and arg_5:\n arg_4 += ' or'\n if arg_5:\n arg_4 += ' valid IP addresses: %s' % arg_5\n raise TLSVerificationError(arg_4, arg_0)"} +{"_id": "doc_8107", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Raises a TLSVerificationError due to certificate being expired, or not yet\n being valid\n\n :param certificate:\n An asn1crypto.x509.Certificate object\n\n :raises:\n TLSVerificationError\n \"\"\"\n\n arg_1 = arg_0['tbs_certificate']['validity']\n arg_2 = arg_1['not_after'].native\n arg_3 = arg_1['not_before'].native\n\n arg_4 = datetime.now(timezone.utc)\n\n if arg_3 > arg_4:\n arg_5 = arg_3.strftime('%Y-%m-%d %H:%M:%SZ')\n arg_6 = 'Server certificate verification failed - certificate not valid until %s' % arg_5\n elif arg_2 < arg_4:\n arg_7 = arg_2.strftime('%Y-%m-%d %H:%M:%SZ')\n arg_6 = 'Server certificate verification failed - certificate expired %s' % arg_7\n\n raise TLSVerificationError(arg_6, arg_0)"} +{"_id": "doc_8108", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Looks at the server handshake bytes to try and detect a different protocol\n\n :param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n :return:\n None, or a unicode string of \"ftp\", \"http\", \"imap\", \"pop3\", \"smtp\"\n \"\"\"\n\n if arg_0[0:5] == b'HTTP/':\n return 'HTTP'\n\n if arg_0[0:4] == b'220 ':\n if re.match(b'^[^\\r\\n]*ftp', arg_0, re.I):\n return 'FTP'\n else:\n return 'SMTP'\n\n if arg_0[0:4] == b'220-':\n return 'FTP'\n\n if arg_0[0:4] == b'+OK ':\n return 'POP3'\n\n if arg_0[0:4] == b'* OK' or arg_0[0:9] == b'* PREAUTH':\n return 'IMAP'\n\n return None"} +{"_id": "doc_8109", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Reads everything available from the socket - used for debugging when there\n is a protocol error\n\n :param socket:\n The socket to read from\n\n :return:\n A byte string of the remaining data\n \"\"\"\n\n arg_1 = b''\n arg_2 = arg_0.gettimeout()\n try:\n arg_0.settimeout(0.0)\n arg_1 += arg_0.recv(8192)\n except (socket_.error):\n pass\n finally:\n arg_0.settimeout(arg_2)\n return arg_1"} +{"_id": "doc_8110", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Takes a set of unicode string OIDs and converts vendor-specific OIDs into\n generics OIDs from RFCs.\n\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.1 (server_auth)\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.2 (client_auth)\n - 1.2.840.113635.100.1.8 (apple_smime) -> 1.3.6.1.5.5.7.3.4 (email_protection)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.13 (eap_over_ppp)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.14 (eap_over_lan)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.5 (ipsec_end_system)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.6 (ipsec_tunnel)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.7 (ipsec_user)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.17 (ipsec_ike)\n - 1.2.840.113635.100.1.16 (apple_code_signing) -> 1.3.6.1.5.5.7.3.3 (code_signing)\n - 1.2.840.113635.100.1.20 (apple_time_stamping) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n - 1.3.6.1.4.1.311.10.3.2 (microsoft_time_stamp_signing) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n\n :param oids:\n A set of unicode strings\n\n :return:\n The original set of OIDs with any mapped OIDs added\n \"\"\"\n\n arg_1 = set()\n for arg_2 in arg_0:\n if arg_2 in _oid_map:\n arg_1 |= _oid_map[arg_2]\n return arg_0 | arg_1"} +{"_id": "doc_8111", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks to see if a cache file needs to be refreshed\n\n :param ca_path:\n A unicode string of the path to the cache file\n\n :param cache_length:\n An integer representing the number of hours the cache is valid for\n\n :return:\n A boolean - True if the cache needs to be updated, False if the file\n is up-to-date\n \"\"\"\n\n arg_2 = os.path.exists(arg_0)\n if not arg_2:\n return True\n\n arg_3 = os.stat(arg_0)\n\n if arg_3.st_mtime < time.time() - arg_1 * 60 * 60:\n return True\n\n if arg_3.st_size == 0:\n return True\n\n return False"} +{"_id": "doc_8112", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None):\n \"\"\"\n Gets value of bits between selected range from memory\n\n :param start: bit address of start of bit of bits\n :param end: bit address of first bit behind bits\n :return: instance of BitsVal (derived from SimBits type) which contains\n copy of selected bits\n \"\"\"\n arg_5 = 0\n arg_6 = Bits(arg_3 - arg_2, None).fromPy(None)\n\n while arg_2 != arg_3:\n assert arg_2 < arg_3, (arg_2, arg_3)\n\n arg_7 = arg_2 // arg_1\n\n arg_8 = arg_0[arg_7]\n if arg_4 is not None:\n arg_8 = arg_8._reinterpret_cast(arg_4)\n\n arg_9 = (arg_7 + 1) * arg_1\n arg_10 = min(arg_3, arg_9) - arg_2\n arg_11 = arg_2 % arg_1\n\n arg_12 = selectBitRange(arg_8.val, arg_11, arg_10)\n arg_13 = selectBitRange(arg_8.vldMask, arg_11, arg_10)\n arg_14 = arg_8.updateTime\n\n arg_15 = mask(arg_10)\n arg_6.val |= (arg_12 & arg_15) << arg_5\n arg_6.vldMask |= (arg_13 & arg_15) << arg_5\n arg_6.updateMask = max(arg_6.updateTime, arg_14)\n\n arg_5 += arg_10\n arg_2 += arg_10\n\n return arg_6"} +{"_id": "doc_8113", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Cast HArray signal or value to signal or value of type Bits\n \"\"\"\n arg_3 = int(arg_0.size)\n arg_4 = arg_0.elmType.bit_length()\n arg_5 = arg_2.bit_length()\n if arg_3 * arg_4 != arg_5:\n raise TypeConversionErr(\n \"Size of types is different\", arg_3 * arg_4, arg_5)\n\n arg_6 = Bits(arg_4)\n arg_7 = [p._reinterpret_cast(arg_6) for p in arg_1]\n\n return Concat(*reversed(arg_7))._reinterpret_cast(arg_2)"} +{"_id": "doc_8114", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Hdl convertible in operator, check if any of items\n in \"iterable\" equals \"sigOrVal\"\n \"\"\"\n arg_2 = None\n for arg_3 in arg_1:\n arg_3 = toHVal(arg_3)\n if arg_2 is None:\n arg_2 = arg_0._eq(arg_3)\n else:\n arg_2 = arg_2 | arg_0._eq(arg_3)\n\n assert arg_2 is not None, \"Parameter iterable is empty\"\n return arg_2"} +{"_id": "doc_8115", "title": "", "text": "def Func(arg_0, arg_1) -> RtlSignalBase:\n \"Logical shift left\"\n arg_2 = arg_0._dtype.bit_length()\n return arg_0[(arg_2 - arg_1):]._concat(vec(0, arg_1))"} +{"_id": "doc_8116", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns no of bits required to store x-1\n for example x=8 returns 3\n \"\"\"\n\n if not isinstance(arg_0, (int, float)):\n arg_0 = int(arg_0)\n\n if arg_0 == 0 or arg_0 == 1:\n arg_1 = 1\n else:\n arg_1 = math.ceil(math.log2(arg_0))\n\n return hInt(arg_1)"} +{"_id": "doc_8117", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"c-like case of switch statement\"\n assert arg_0.parentStm is None\n arg_1 = toHVal(arg_1, arg_0.switchOn._dtype)\n\n assert isinstance(arg_1, Value), arg_1\n assert arg_1._isFullVld(), \"Cmp with invalid value\"\n assert arg_1 not in arg_0._case_value_index, (\n \"Switch statement already has case for value \", arg_1)\n\n arg_0.rank += 1\n arg_3 = []\n arg_0._case_value_index[arg_1] = len(arg_0.cases)\n arg_0.cases.append((arg_1, arg_3))\n\n arg_5 = arg_0.switchOn._eq(arg_1)\n arg_0._inputs.append(arg_5)\n arg_5.endpoints.append(arg_0)\n\n arg_0._register_stements(arg_2, arg_3)\n\n return arg_0"} +{"_id": "doc_8118", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"c-like default of switch statement\n \"\"\"\n assert arg_0.parentStm is None\n arg_0.rank += 1\n arg_0.default = []\n arg_0._register_stements(arg_1, arg_0.default)\n return arg_0"} +{"_id": "doc_8119", "title": "", "text": "def Func(arg_0, arg_1: arg_2[arg_3, arg_4],\n arg_5: arg_6[arg_7]):\n \"\"\"\n Register signals from interfaces for Interface or Unit instances\n \"\"\"\n if hasattr(arg_1, \"_interfaces\") and arg_1._interfaces:\n arg_8 = arg_1._name\n arg_9 = arg_0.vcdWriter if arg_5 is None else arg_5\n\n arg_10 = arg_9.varScope(arg_8)\n arg_0._obj2scope[arg_1] = arg_10\n\n with arg_10:\n # register all subinterfaces\n for arg_12 in arg_1._interfaces:\n arg_0.Func(arg_12, arg_10)\n\n if isinstance(arg_1, (arg_4, SimModel)):\n # register interfaces from all subunits\n for arg_13 in arg_1._units:\n arg_0.Func(arg_13, arg_10)\n\n return arg_10\n else:\n arg_14 = arg_1._dtype\n if isinstance(arg_14, arg_0.supported_type_classes):\n arg_15, arg_16, arg_17 = vcdTypeInfoForHType(arg_14)\n try:\n arg_5.addVar(arg_1, getSignalName(arg_1),\n arg_15, arg_16, arg_17)\n except VarAlreadyRegistered:\n pass"} +{"_id": "doc_8120", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n This method is called for every value change of any signal.\n \"\"\"\n try:\n arg_0.vcdWriter.Func(arg_1, arg_2, arg_3)\n except KeyError:\n # not every signal has to be registered\n pass"} +{"_id": "doc_8121", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Serialize Func instance\n\n :param scope: name scope to prevent name collisions\n \"\"\"\n arg_3 = arg_1.statements\n arg_4 = arg_2.withIndent()\n arg_5 = [arg_0.asHdl(s, arg_4) for s in arg_3]\n arg_1.name = arg_2.scope.checkedName(arg_1.name, arg_1)\n\n return arg_0.methodTmpl.render(\n indent=getIndent(arg_2.indent),\n arg_6=arg_1.name,\n statements=arg_5\n )"} +{"_id": "doc_8122", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Walk all interfaces on unit and instantiate agent for every interface.\n\n :return: all monitor/driver functions which should be added to simulation\n as processes\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0._interfaces:\n if not arg_2._isExtern:\n continue\n\n arg_2._initSimAgent()\n assert arg_2._ag is not None, arg_2\n arg_3 = [arg_2._ag, ]\n\n if arg_2._direction == INTF_DIRECTION.MASTER:\n arg_4 = list(map(lambda a: a.getMonitors(), arg_3))\n elif arg_2._direction == INTF_DIRECTION.SLAVE:\n arg_4 = list(map(lambda a: a.getDrivers(), arg_3))\n else:\n raise NotImplementedError(\"intf._direction %r for %r\" % (\n arg_2._direction, arg_2))\n\n for arg_5 in arg_4:\n arg_1.extend(arg_5)\n\n return arg_1"} +{"_id": "doc_8123", "title": "", "text": "def Func(arg_0):\n \"\"\"\n If interface has associated clk return it otherwise\n try to find clk on parent recursively\n \"\"\"\n arg_1 = arg_0._associatedClk\n\n if arg_1 is not None:\n return arg_1\n\n arg_2 = arg_0._parent\n assert arg_2 is not None\n\n if isinstance(arg_2, UnitBase):\n return getClk(arg_2)\n else:\n return arg_2.Func()"} +{"_id": "doc_8124", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n same like itertools.groupby\n\n :note: This function does not needs initial sorting like itertools.groupby\n\n :attention: Order of pairs is not deterministic.\n \"\"\"\n arg_2 = {}\n for arg_3 in arg_0:\n arg_4 = arg_1(arg_3)\n try:\n arg_5 = arg_2[arg_4]\n except KeyError:\n arg_5 = []\n arg_2[arg_4] = arg_5\n arg_5.append(arg_3)\n\n yield from arg_2.items()"} +{"_id": "doc_8125", "title": "", "text": "def Func(arg_0, arg_1=arg_2):\n \"\"\"\n Flatten nested lists, tuples, generators and maps\n\n :param level: maximum depth of Funcing\n \"\"\"\n if arg_1 >= 0 and isinstance(arg_0, (list, tuple, GeneratorType,\n map, zip)):\n arg_1 -= 1\n for arg_3 in arg_0:\n yield from Func(arg_3, arg_1=arg_1)\n else:\n yield arg_0"} +{"_id": "doc_8126", "title": "", "text": "def Func(arg_0):\n \"\"\"\n If signal is not driving anything remove it\n \"\"\"\n\n arg_1 = set()\n arg_2 = arg_0.signals\n\n while arg_2:\n arg_3 = set()\n\n for arg_4 in arg_2:\n if not arg_4.endpoints:\n try:\n if arg_4._interface is not None:\n # skip interfaces before we want to check them,\n # they should not be optimized out from design\n continue\n except AttributeError:\n pass\n\n for arg_5 in arg_4.drivers:\n # drivers of this signal are useless rm them\n if isinstance(arg_5, Operator):\n arg_6 = arg_5.operands\n if arg_5.result is arg_4:\n arg_5.result = None\n else:\n arg_6 = arg_5._inputs\n arg_0.statements.discard(arg_5)\n\n for arg_8 in arg_6:\n if not isinstance(arg_8, Value):\n try:\n arg_8.endpoints.remove(arg_5)\n except KeyError:\n # this operator has 2x+ same operand\n continue\n\n arg_3.add(arg_8)\n\n arg_1.add(arg_4)\n\n if arg_1:\n for arg_4 in arg_1:\n if arg_4.ctx == arg_0:\n arg_0.signals.remove(arg_4)\n arg_3.discard(arg_4)\n arg_1 = set()\n arg_2 = arg_3"} +{"_id": "doc_8127", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1):\n \"\"\"\n Try merge procB into procA\n\n :raise IncompatibleStructure: if merge is not possible\n :attention: procA is now result if merge has succeed\n :return: procA which is now result of merge\n \"\"\"\n if (checkIfIsTooSimple(arg_0) or\n checkIfIsTooSimple(arg_2) or\n areSetsIntersets(arg_0.outputs, arg_2.sensitivityList) or\n areSetsIntersets(arg_2.outputs, arg_0.sensitivityList) or\n not HdlStatement._is_mergable_statement_list(arg_0.statements, arg_2.statements)):\n raise IncompatibleStructure()\n\n arg_0.statements = HdlStatement._merge_statement_lists(\n arg_0.statements, arg_2.statements)\n\n arg_0.outputs.extend(arg_2.outputs)\n arg_0.inputs.extend(arg_2.inputs)\n arg_0.sensitivityList.extend(arg_2.sensitivityList)\n\n return arg_0"} +{"_id": "doc_8128", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n on writeReqRecieved in monitor mode\n \"\"\"\n arg_0.requests.append((WRITE, arg_2, arg_3))"} +{"_id": "doc_8129", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3=None,\n arg_4: arg_5=arg_6,\n arg_7=arg_8(), arg_9: arg_3=None):\n \"\"\"\n Convert unit to RTL using specified serializer\n\n :param unitOrCls: unit instance or class, which should be converted\n :param name: name override of top unit (if is None name is derived\n form class name)\n :param serializer: serializer which should be used for to RTL conversion\n :param targetPlatform: metainformatins about target platform, distributed\n on every unit under _targetPlatform attribute\n before Unit._impl() is called\n :param saveTo: directory where files should be stored\n If None RTL is returned as string.\n :raturn: if saveTo returns RTL string else returns list of file names\n which were created\n \"\"\"\n if not isinstance(arg_0, arg_1):\n arg_10 = arg_0()\n else:\n arg_10 = arg_0\n\n arg_10._loadDeclarations()\n if arg_2 is not None:\n assert isinstance(arg_2, arg_3)\n arg_10._name = arg_2\n\n arg_12 = arg_4.getBaseNameScope()\n arg_13 = {}\n\n # unitCls : unitobj\n arg_14 = {}\n\n # (unitCls, paramsValues) : unitObj\n # where paramsValues are dict name:value\n arg_15 = {}\n\n arg_16 = True\n\n arg_17 = arg_9 is not None\n if arg_17:\n os.makedirs(arg_9, exist_ok=True)\n arg_18 = UniqList()\n else:\n arg_19 = []\n\n for arg_20 in arg_10._Func(arg_7):\n arg_16 = arg_4.serializationDecision(\n arg_20,\n arg_14,\n arg_15)\n if arg_16:\n if isinstance(arg_20, Entity):\n arg_21 = arg_12.fork(1)\n arg_21.setLevel(2)\n arg_22 = arg_4.getBaseContext()\n arg_22.scope = arg_21\n arg_13[arg_20] = arg_22\n arg_22.currentUnit = arg_20.origin\n\n arg_25 = arg_4.Entity(arg_20, arg_22)\n if arg_17:\n arg_26 = arg_20.name + arg_4.fileExtension\n arg_27 = 'w'\n\n elif isinstance(arg_20, Architecture):\n try:\n arg_22 = arg_13[arg_20.entity]\n except KeyError:\n raise SerializerException(\n \"Entity should be serialized\"\n \" before architecture of %s\"\n % (arg_20.getEntityName()))\n\n arg_25 = arg_4.Architecture(arg_20, arg_22)\n if arg_17:\n arg_26 = arg_20.getEntityName() + arg_4.fileExtension\n arg_27 = 'a'\n else:\n if hasattr(arg_20, \"_hdlSources\"):\n for arg_28 in arg_20._hdlSources:\n if isinstance(arg_28, arg_3):\n shutil.copy2(arg_28, arg_9)\n arg_18.append(arg_28)\n continue\n else:\n arg_25 = arg_4.asHdl(arg_20)\n\n if arg_25:\n if arg_17:\n arg_29 = os.path.join(arg_9, arg_26)\n arg_18.append(arg_29)\n with open(arg_29, arg_27) as f:\n if arg_27 == 'a':\n f.write(\"\\n\")\n\n f.write(\n arg_4.formatter(arg_25)\n )\n else:\n arg_19.append(arg_25)\n\n elif not arg_17:\n try:\n arg_2 = '\"%s\"' % arg_20.name\n except AttributeError:\n arg_2 = \"\"\n arg_19.append(arg_4.comment(\n \"Object of class %s, %s was not serialized as specified\" % (\n arg_20.__class__.__name__, arg_2)))\n\n if arg_17:\n return arg_18\n else:\n return arg_4.formatter(\n \"\\n\".join(arg_19)\n )"} +{"_id": "doc_8130", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, arg_4=None, arg_5=None, arg_6=None):\n \"\"\"\n Create new Funcnal in this context\n\n :param clk: clk Funcnal, if specified Funcnal is synthesized\n as SyncSignal\n :param syncRst: synchronous reset Funcnal\n \"\"\"\n if isinstance(arg_6, RtlSignal):\n assert arg_6._const, \\\n \"Initial value of register has to be constant\"\n arg_7 = arg_6._auto_cast(arg_2)\n elif isinstance(arg_6, Value):\n arg_7 = arg_6._auto_cast(arg_2)\n elif isinstance(arg_6, InterfaceBase):\n arg_7 = arg_6._Func\n else:\n arg_7 = arg_2.fromPy(arg_6)\n\n if arg_4 is not None:\n arg_8 = RtlSyncSignal(arg_0, arg_1, arg_2, arg_7)\n if arg_5 is not None and arg_6 is None:\n raise SigLvlConfErr(\n \"Probably forgotten default value on sync Funcnal %s\", arg_1)\n if arg_5 is not None:\n arg_9 = If(arg_5._isOn(),\n RtlSignal.__call__(arg_8, arg_7)\n ).Else(\n RtlSignal.__call__(arg_8, arg_8.next)\n )\n else:\n arg_9 = [RtlSignal.__call__(arg_8, arg_8.next)]\n\n If(arg_4._onRisingEdge(),\n arg_9\n )\n else:\n if arg_5:\n raise SigLvlConfErr(\n \"Signal %s has reset but has no clk\" % arg_1)\n arg_8 = RtlSignal(arg_0, arg_1, arg_2, arg_6=arg_7)\n\n arg_0.Funcnals.add(arg_8)\n\n return arg_8"} +{"_id": "doc_8131", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get maximum _instId from all assigments in statement\n \"\"\"\n arg_1 = 0\n if isinstance(arg_0, Assignment):\n return arg_0._instId\n elif isinstance(arg_0, WaitStm):\n return arg_1\n else:\n for arg_2 in arg_0._iter_stms():\n arg_1 = max(arg_1, Func(arg_2))\n return arg_1"} +{"_id": "doc_8132", "title": "", "text": "def Func(arg_0):\n \"\"\"\n get max statement id,\n used for sorting of processes in architecture\n \"\"\"\n arg_1 = 0\n for arg_2 in arg_0.statements:\n arg_1 = max(arg_1, getMaxStmIdForStm(arg_2))\n return arg_1"} +{"_id": "doc_8133", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"write data to interface\"\"\"\n arg_1.write(arg_2, arg_0.intf.data)"} +{"_id": "doc_8134", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Note that this interface will be master\n\n :return: self\n \"\"\"\n assert not hasattr(arg_0, \"_interfaces\") or not arg_0._interfaces, \\\n \"Too late to change direction of interface\"\n arg_0._direction = DIRECTION.asIntfDirection(DIRECTION.opposite(arg_0.FuncasterDir))\n\n return arg_0"} +{"_id": "doc_8135", "title": "", "text": "def Func(arg_0):\n \"\"\"\n load declaratoins from _declr method\n This function is called first for parent and then for children\n \"\"\"\n if not hasattr(arg_0, \"_interfaces\"):\n arg_0._interfaces = []\n arg_0._setAttrListener = arg_0._declrCollector\n arg_0._declr()\n arg_0._setAttrListener = None\n\n for arg_3 in arg_0._interfaces:\n arg_3._isExtern = arg_0._isExtern\n arg_3.Func()\n\n for arg_5 in arg_0._params:\n arg_5.setReadOnly()\n \n if arg_0._isExtern:\n # direction from inside of unit (reverset compared to outside direction)\n if arg_0._direction == INTF_DIRECTION.UNKNOWN:\n arg_0._direction = INTF_DIRECTION.MASTER\n arg_0._setDirectionsLikeIn(arg_0._direction)"} +{"_id": "doc_8136", "title": "", "text": "def Func(arg_0, arg_1, arg_2='', arg_3=None):\n \"\"\"\n generate _sig for each interface which has no subinterface\n if already has _sig return it instead\n\n :param context: instance of RtlNetlist where signals should be created\n :param prefix: name prefix for created signals\n :param typeTransform: optional function (type) returns modified type\n for signal\n \"\"\"\n arg_4 = []\n if arg_0._interfaces:\n for arg_5 in arg_0._interfaces:\n arg_4.extend(\n arg_5.Func(arg_1, arg_2,\n arg_3=arg_3))\n else:\n if hasattr(arg_0, '_sig'):\n arg_4 = [arg_0._sig]\n else:\n arg_6 = arg_0._dtype\n if arg_3 is not None:\n arg_6 = arg_3(arg_6)\n\n arg_7 = arg_1.sig(arg_2 + arg_0._getPhysicalName(), arg_6)\n arg_7._interface = arg_0\n arg_0._sig = arg_7\n\n if hasattr(arg_0, '_boundedEntityPort'):\n arg_0._boundedEntityPort.connectSig(arg_0._sig)\n arg_4 = [arg_7]\n\n return arg_4"} +{"_id": "doc_8137", "title": "", "text": "def Func(arg_0):\n \"\"\"Get name in HDL \"\"\"\n if hasattr(arg_0, \"_boundedEntityPort\"):\n return arg_0._boundedEntityPort.name\n else:\n return arg_0._getFullName().replace('.', arg_0._NAME_SEPARATOR)"} +{"_id": "doc_8138", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Load all operands and process them by self._FuncFn\"\"\"\n def getVal(arg_3):\n while not isinstance(arg_3, Value):\n arg_3 = arg_3._val\n\n return arg_3\n\n arg_4 = list(map(getVal, arg_1.operands))\n\n if isEventDependentOp(arg_1.operator):\n arg_4.append(arg_2.now)\n elif arg_1.operator == AllOps.IntToBits:\n arg_4.append(arg_1.result._dtype)\n\n return arg_0._FuncFn(*arg_4)"} +{"_id": "doc_8139", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Cast signed-unsigned, to int or bool\n \"\"\"\n if isinstance(arg_1, Value):\n return Func__val(arg_0, arg_1, arg_2)\n elif isinstance(arg_2, HBool):\n if arg_0.bit_length() == 1:\n arg_3 = 0 if arg_1._dtype.negated else 1\n return arg_1._eq(arg_0.getValueCls().fromPy(arg_3, arg_0))\n elif isinstance(arg_2, Bits):\n if arg_0.bit_length() == arg_2.bit_length():\n return arg_1._convSign(arg_2.signed)\n elif arg_2 == INT:\n return Operator.withRes(AllOps.BitsToInt, [arg_1], arg_2)\n\n return default_auto_cast_fn(arg_0, arg_1, arg_2)"} +{"_id": "doc_8140", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reinterpret signal of type Bits to signal of type HStruct\n \"\"\"\n arg_2 = arg_1.fromPy(None)\n arg_3 = 0\n for arg_4 in arg_1.fields:\n arg_5 = arg_4.dtype\n arg_6 = arg_5.bit_length()\n if arg_4.name is not None:\n arg_7 = arg_0[(arg_6 + arg_3):arg_3]\n arg_7 = arg_7._reinterpret_cast(arg_5)\n setattr(arg_2, arg_4.name, arg_7)\n\n arg_3 += arg_6\n\n return arg_2"} +{"_id": "doc_8141", "title": "", "text": "def Func(arg_0, arg_1: 'TransTmpl', arg_2: arg_3):\n \"\"\"\n Group transaction parts splited on words to words\n\n :param transaction: TransTmpl instance which parts\n should be grupped into words\n :return: generator of tuples (wordIndex, list of transaction parts\n in this word)\n \"\"\"\n arg_4 = None\n arg_5 = []\n arg_6 = arg_0.wordWidth\n for arg_7 in arg_0.splitOnWords(arg_1, arg_2):\n arg_8 = arg_7.startOfPart // arg_6\n if arg_4 is None:\n arg_4 = arg_8\n arg_5.append(arg_7)\n elif arg_8 > arg_4:\n yield (arg_4, arg_5)\n arg_4 = arg_8\n arg_5 = [arg_7, ]\n else:\n arg_5.append(arg_7)\n\n if arg_5:\n yield (arg_4, arg_5)"} +{"_id": "doc_8142", "title": "", "text": "def Func(arg_0, arg_1=\"\", arg_2=0, arg_3=arg_4.stdout):\n \"\"\"\n Pretty print interface\n \"\"\"\n try:\n arg_6 = arg_0._sig\n except AttributeError:\n arg_6 = \"\"\n if arg_6 is not \"\":\n arg_6 = \" \" + repr(arg_6)\n\n arg_3.write(\"\".join([getIndent(arg_2), arg_1, repr(arg_0._getFullName()),\n arg_6]))\n arg_3.write(\"\\n\")\n \n if isinstance(arg_0, HObjList):\n for arg_7, arg_8 in enumerate(arg_0):\n # interfaces have already name of this array and index in it's name\n Func(arg_8, arg_1=arg_1, arg_2=arg_2 + 1, arg_3=arg_3)\n else:\n for arg_7 in arg_0._interfaces:\n Func(arg_7, arg_2=arg_2 + 1, arg_3=arg_3)"} +{"_id": "doc_8143", "title": "", "text": "def Func(arg_0: 'TransTmpl',\n arg_1: arg_2,\n arg_3: arg_4[arg_2, arg_5]=arg_6,\n arg_7: arg_4[arg_2, arg_5]=arg_6,\n arg_8: arg_9=False,\n arg_10: arg_9=False) -> Generator[\n 'FrameTmpl', None, None]:\n \"\"\"\n Convert transaction template into FrameTmpls\n\n :param transaction: transaction template used which are FrameTmpls\n created from\n :param wordWidth: width of data signal in target interface\n where frames will be used\n :param maxFrameLen: maximum length of frame in bits,\n if exceeded another frame will be created\n :param maxPaddingWords: maximum of continual padding words in frame,\n if exceed frame is split and words are cut of\n :attention: if maxPaddingWords 0\n assert arg_7 >= 0\n if arg_7 < arg_6:\n assert arg_8 or arg_10, \\\n \"Padding has to be cut off somewhere\"\n\n arg_14 = TransTmplWordIterator(arg_1)\n arg_15 = 0\n arg_16 = arg_3\n arg_17 = []\n for arg_18, arg_19 in arg_14.groupByWordIndex(arg_0, 0):\n if arg_18 * arg_1 >= arg_16:\n # now in first+ word behind the frame\n # cut off padding at end of frame\n arg_20 = arg_18 - arg_15\n if arg_10 and arg_20 > arg_7:\n # cut off padding and align end of frame to word\n arg_21 = (arg_15 + 1) * arg_1\n else:\n arg_21 = arg_18 * arg_1\n\n yield FrameTmpl(arg_0,\n arg_1,\n arg_13,\n arg_21,\n arg_17)\n\n # prepare for start of new frame\n arg_17 = []\n arg_11 = True\n arg_12 = False\n # start on new word\n arg_13 = arg_21\n arg_16 = arg_13 + arg_3\n arg_15 = arg_18\n\n # check if padding at potential end of frame can be cut off\n if (not arg_11\n and arg_10\n and arg_18 - arg_15 > 1):\n # there is too much continual padding,\n # cut it out and start new frame\n arg_21 = (arg_15 + 1) * arg_1\n yield FrameTmpl(arg_0,\n arg_1,\n arg_13,\n arg_21,\n arg_17)\n\n # prepare for start of new frame\n arg_17 = []\n arg_11 = True\n arg_12 = False\n # start on new word\n arg_13 = arg_21\n arg_16 = arg_13 + arg_3\n arg_15 = arg_18 - 1\n\n if arg_11:\n arg_12 = True\n arg_11 = False\n # cut off padding at start of frame\n arg_20 = arg_18 - arg_15\n if arg_8 and arg_20 > arg_7:\n arg_13 += arg_20 * arg_1\n\n arg_16 = arg_13 + arg_3\n\n # resolve end of this part\n arg_17.extend(arg_19)\n arg_15 = arg_18\n\n # reminder in \"parts\" after last iteration\n arg_16 = arg_0.bitAddrEnd\n arg_22 = not (arg_10 or arg_8)\n if arg_12 or (arg_22\n and arg_16 != arg_13):\n # cut off padding at end of frame\n arg_23 = (arg_15 + 1) * arg_1\n if arg_16 < arg_23:\n arg_16 = arg_23\n else:\n arg_20 = arg_14.fullWordCnt(arg_23, arg_16)\n if arg_10 and arg_20 > arg_7:\n arg_16 -= arg_20 * arg_1\n # align end of frame to word\n arg_16 = min(arg_13 +\n arg_3, arg_16)\n\n yield FrameTmpl(arg_0,\n arg_1,\n arg_13,\n arg_16,\n arg_17)\n arg_17 = []\n arg_13 = arg_16\n\n # final padding on the end\n while arg_22 and arg_13 < arg_0.bitAddrEnd:\n arg_16 = min(arg_13 +\n arg_3, arg_0.bitAddrEnd)\n\n yield FrameTmpl(arg_0,\n arg_1,\n arg_13,\n arg_16,\n [])\n\n arg_13 = arg_16"} +{"_id": "doc_8144", "title": "", "text": "def Func(arg_0, arg_1: arg_2=False):\n \"\"\"\n Walk enumerated words in this frame\n\n :attention: not all indexes has to be present, only words\n with items will be generated when not showPadding\n :param showPadding: padding TransParts are also present\n :return: generator of tuples (wordIndex, list of TransParts\n in this word)\n \"\"\"\n arg_3 = 0\n arg_4 = arg_0.startBitAddr\n arg_5 = []\n for arg_6 in arg_0.parts:\n arg_7 = arg_6.startOfPart\n if arg_1 and arg_7 != arg_4:\n # insert padding\n while arg_7 != arg_4:\n assert arg_7 >= arg_4, (arg_7, arg_4)\n arg_8 = ceil(\n (arg_4 + 1) / arg_0.wordWidth) * arg_0.wordWidth\n arg_9 = min(arg_8, arg_7)\n arg_10 = TransPart(arg_0, None, arg_4, arg_9, 0)\n arg_5.append(arg_10)\n\n if arg_9 >= arg_8:\n yield (arg_3, arg_5)\n arg_3 += 1\n arg_5 = []\n\n arg_4 = arg_9\n\n if arg_0._wordIndx(arg_4) != arg_0._wordIndx(arg_6.startOfPart):\n yield (arg_3, arg_5)\n\n arg_3 += 1\n arg_5 = []\n arg_4 = arg_6.endOfPart\n\n arg_5.append(arg_6)\n arg_4 = arg_6.endOfPart\n if arg_4 % arg_0.wordWidth == 0:\n yield (arg_3, arg_5)\n\n arg_3 += 1\n arg_5 = []\n\n if arg_1 and (arg_5\n or arg_4 != arg_0.endBitAddr\n or arg_4 % arg_0.wordWidth != 0):\n # align end to end of last word\n arg_7 = ceil(arg_0.endBitAddr / arg_0.wordWidth) * arg_0.wordWidth\n while arg_7 != arg_4:\n assert arg_7 >= arg_4, (arg_7, arg_4)\n arg_8 = ((arg_4 // arg_0.wordWidth) + 1) * arg_0.wordWidth\n arg_9 = min(arg_8, arg_7)\n arg_10 = TransPart(arg_0, None, arg_4, arg_9, 0)\n arg_10.parent = arg_0\n arg_5.append(arg_10)\n\n if arg_9 >= arg_8:\n yield (arg_3, arg_5)\n arg_3 += 1\n arg_5 = []\n\n arg_4 = arg_9\n\n if arg_5:\n # in the case end of frame is not aligned to end of word\n yield (arg_3, arg_5)"} +{"_id": "doc_8145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Pack data into list of BitsVal of specified dataWidth\n\n :param data: dict of values for struct fields {fieldName: value}\n\n :return: list of BitsVal which are representing values of words\n \"\"\"\n arg_2 = simBitsT(arg_0.wordWidth, None)\n arg_3 = arg_0._fieldToTPart\n if arg_3 is None:\n arg_3 = arg_0._fieldToTPart = arg_0.fieldToDataDict(\n arg_0.origin.dtype,\n arg_1,\n {})\n\n for arg_4, arg_5 in arg_0.walkWords(showPadding=True):\n arg_6 = 0\n arg_7 = 0\n for arg_8 in arg_5:\n arg_9, arg_10 = arg_8.getBusWordBitRange()\n arg_11, arg_12 = arg_8.getFieldBitRange()\n if not arg_8.isPadding:\n arg_13 = arg_3.get(arg_8.tmpl.origin, None)\n else:\n arg_13 = None\n\n if arg_13 is None:\n arg_14 = 0\n arg_15 = 0\n else:\n arg_14 = selectBitRange(arg_13, arg_12, arg_11 - arg_12)\n arg_15 = mask(arg_9 - arg_10) << arg_10\n\n arg_7 = setBitRange(arg_7, arg_10, arg_9 - arg_10, arg_14)\n arg_6 = setBitRange(arg_7, arg_10, arg_9 - arg_10, arg_15)\n\n yield arg_2.getValueCls()(arg_7, arg_2,\n arg_6, -1)"} +{"_id": "doc_8146", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Clean informations about enclosure for outputs and sensitivity\n of this statement\n \"\"\"\n arg_0._enclosed_for = None\n arg_0._sensitivity = None\n for arg_3 in arg_0._iter_stms():\n arg_3.Func()"} +{"_id": "doc_8147", "title": "", "text": "def Func(arg_0,\n arg_1: arg_2[arg_3],\n arg_4: arg_5, arg_6: arg_7)\\\n -> None:\n \"\"\"\n Discover sensitivity for list of signals\n\n \"\"\"\n arg_8 = arg_5()\n for arg_9 in arg_1:\n arg_9._walk_sensitivity(arg_8, arg_4, arg_6)\n if arg_6.contains_ev_dependency:\n break\n\n # if event dependent sensitivity found do not add other sensitivity\n if not arg_6.contains_ev_dependency:\n arg_6.extend(arg_8)"} +{"_id": "doc_8148", "title": "", "text": "def Func(arg_0):\n \"\"\"\n get RtlNetlist context from signals\n \"\"\"\n for arg_1 in chain(arg_0._inputs, arg_0._outputs):\n if arg_1.ctx:\n return arg_1.ctx\n else:\n # Param instances does not have context\n continue\n raise HwtSyntaxError(\n \"Statement does not have any signal in any context\", arg_0)"} +{"_id": "doc_8149", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2,\n arg_4: arg_5[\"HdlStatement\"]) -> None:\n \"\"\"\n Update signal IO after reuce atempt\n\n :param self_reduced: if True this object was reduced\n :param io_changed: if True IO of this object may changed\n and has to be updated\n :param result_statements: list of statements which are result\n of reduce operation on this statement\n \"\"\"\n\n arg_6 = arg_0.parentStm\n if arg_1:\n arg_7 = arg_6 is None\n # update signal drivers/endpoints\n if arg_7:\n # disconnect self from signals\n arg_8 = arg_0._get_rtl_context()\n arg_8.statements.remove(arg_0)\n arg_8.statements.update(arg_4)\n\n for arg_9 in arg_0._inputs:\n arg_9.endpoints.discard(arg_0)\n for arg_10 in arg_0._outputs:\n arg_10.drivers.remove(arg_0)\n\n for arg_11 in arg_4:\n arg_11.parentStm = arg_6\n if arg_6 is None:\n # conect signals to child statements\n for arg_12 in arg_11._inputs:\n arg_12.endpoints.append(arg_11)\n for arg_13 in arg_11._outputs:\n arg_13.drivers.append(arg_11)\n else:\n # parent has to update it's inputs/outputs\n if arg_3:\n arg_0._inputs = UniqList()\n arg_0._outputs = UniqList()\n arg_0._collect_io()"} +{"_id": "doc_8150", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n After merging statements update IO, sensitivity and context\n\n :attention: rank is not updated\n \"\"\"\n arg_0._inputs.extend(arg_1._inputs)\n arg_0._outputs.extend(arg_1._outputs)\n\n if arg_0._sensitivity is not None:\n arg_0._sensitivity.extend(arg_1._sensitivity)\n else:\n assert arg_1._sensitivity is None\n\n if arg_0._enclosed_for is not None:\n arg_0._enclosed_for.update(arg_1._enclosed_for)\n else:\n assert arg_1._enclosed_for is None\n\n arg_2 = arg_1.parentStm is None\n if arg_2:\n arg_1._get_rtl_context().statements.remove(arg_1)\n for arg_3 in arg_1._inputs:\n arg_3.endpoints.discard(arg_1)\n arg_3.endpoints.append(arg_0)\n\n for arg_3 in arg_1._outputs:\n arg_3.drivers.discard(arg_1)\n arg_3.drivers.append(arg_0)"} +{"_id": "doc_8151", "title": "", "text": "def Func(arg_0: arg_1[\"HdlStatement\"])\\\n -> Tuple[arg_1[\"HdlStatement\"], int]:\n \"\"\"\n Merge statements in list to remove duplicated if-then-else trees\n\n :return: tuple (list of merged statements, rank decrease due merging)\n :note: rank decrease is sum of ranks of reduced statements\n :attention: statement list has to me mergable\n \"\"\"\n arg_2 = {}\n for arg_3, arg_4 in enumerate(arg_0):\n arg_2[arg_4] = arg_3\n\n arg_5 = []\n arg_6 = 0\n\n for arg_7, arg_8 in groupedby(arg_0, lambda s: s.rank):\n if arg_7 == 0:\n arg_5.extend(arg_8)\n else:\n if len(arg_8) == 1:\n arg_5.extend(arg_8)\n continue\n\n # try to merge statements if they are same condition tree\n for arg_9, arg_10 in enumerate(arg_8):\n if arg_10 is None:\n continue\n\n for arg_11, arg_12 in enumerate(islice(arg_8, arg_9 + 1, None)):\n if arg_12 is None:\n continue\n\n if arg_10._is_mergable(arg_12):\n arg_6 += arg_12.rank\n arg_10._merge_with_other_stm(arg_12)\n arg_8[arg_9 + 1 + arg_11] = None\n arg_5.append(arg_10)\n else:\n arg_5.append(arg_10)\n arg_5.append(arg_12)\n\n arg_5.sort(key=lambda arg_4: arg_2[arg_4])\n return arg_5, arg_6"} +{"_id": "doc_8152", "title": "", "text": "def Func(arg_0: arg_1[\"HdlStatement\"]):\n \"\"\"\n Simplify statements in the list\n \"\"\"\n arg_2 = False\n arg_3 = []\n\n for arg_4 in arg_0:\n arg_5, arg_6 = arg_4._try_reduce()\n arg_3.extend(arg_5)\n arg_2 |= arg_6\n\n arg_3, arg_7 = HdlStatement._merge_statements(\n arg_3)\n\n return arg_3, arg_7, arg_2"} +{"_id": "doc_8153", "title": "", "text": "def Func(arg_0):\n \"\"\"\n After parrent statement become event dependent\n propagate event dependency flag to child statements\n \"\"\"\n if not arg_0._is_completly_event_dependent:\n arg_0._is_completly_event_dependent = True\n for arg_2 in arg_0._iter_stms():\n arg_2.Func()"} +{"_id": "doc_8154", "title": "", "text": "def Func(arg_0, arg_1: arg_2[\"HdlStatement\"],\n arg_3: arg_2[\"HdlStatement\"]):\n \"\"\"\n Append statements to this container under conditions specified\n by condSet\n \"\"\"\n for arg_4 in flatten(arg_1):\n assert arg_4.parentStm is None, arg_4\n arg_4._set_parent_stm(arg_0)\n arg_3.append(arg_4)"} +{"_id": "doc_8155", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Disconnect this statement from signals and delete it from RtlNetlist context\n\n :attention: signal endpoints/drivers will be altered\n that means they can not be used for iteration\n \"\"\"\n arg_1 = arg_0._get_rtl_context()\n for arg_2 in arg_0._inputs:\n arg_2.endpoints.discard(arg_0)\n\n for arg_3 in arg_0._outputs:\n arg_3.drivers.remove(arg_0)\n\n arg_1.statements.remove(arg_0)"} +{"_id": "doc_8156", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, arg_4=None, arg_5=None, arg_6=None):\n \"\"\"\n Create register in this unit\n\n :param defVal: default value of this register,\n if this value is specified reset of this component is used\n (unit has to have single interface of class Rst or Rst_n)\n :param clk: optional clok signal specification\n :param rst: optional reset signal specification\n :note: rst/rst_n resolution is done from signal type,\n if it is negated type it is rst_n\n :note: if clk or rst is not specifid default signal\n from parent unit will be used\n \"\"\"\n if arg_5 is None:\n arg_5 = getClk(arg_0)\n\n if arg_4 is None:\n # if no value is specified reset is not required\n arg_6 = None\n else:\n arg_6 = getRst(arg_0)._sig\n\n if isinstance(arg_2, HStruct):\n if arg_4 is not None:\n raise NotImplementedError()\n arg_7 = arg_2.fromPy(None)\n for arg_8 in arg_2.fields:\n if arg_8.name is not None:\n arg_9 = arg_0.Func(\"%s_%s\" % (arg_1, arg_8.name), arg_8.dtype)\n setattr(arg_7, arg_8.name, arg_9)\n\n return arg_7\n\n return arg_0._ctx.sig(arg_1,\n arg_2=arg_2,\n arg_5=arg_5._sig,\n syncRst=arg_6,\n arg_4=arg_4)"} +{"_id": "doc_8157", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, arg_4=None):\n \"\"\"\n Create signal in this unit\n \"\"\"\n if isinstance(arg_2, HStruct):\n if arg_4 is not None:\n raise NotImplementedError()\n arg_5 = arg_2.fromPy(None)\n for arg_6 in arg_2.fields:\n if arg_6.name is not None:\n arg_7 = arg_0.Func(\"%s_%s\" % (arg_1, arg_6.name), arg_6.dtype)\n setattr(arg_5, arg_6.name, arg_7)\n\n return arg_5\n\n return arg_0._ctx.sig(arg_1, arg_2=arg_2, arg_4=arg_4)"} +{"_id": "doc_8158", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Walk all simple values in HStruct or HArray\n \"\"\"\n arg_2 = arg_0._dtype\n if isinstance(arg_2, Bits):\n yield arg_0\n elif isinstance(arg_2, HUnion):\n yield from Func(arg_0._val, arg_1=arg_1)\n elif isinstance(arg_2, HStruct):\n for arg_3 in arg_2.fields:\n arg_4 = arg_3.name is None\n if not arg_4 or not arg_1:\n if arg_4:\n arg_5 = arg_3.dtype.fromPy(None)\n else:\n arg_5 = getattr(arg_0, arg_3.name)\n\n yield from Func(arg_5)\n\n elif isinstance(arg_2, HArray):\n for arg_6 in arg_0:\n yield from Func(arg_6)\n else:\n raise NotImplementedError(arg_2)"} +{"_id": "doc_8159", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert signum, no bit manipulation just data are represented\n differently\n\n :param signed: if True value will be signed,\n if False value will be unsigned,\n if None value will be vector without any sign specification\n \"\"\"\n if isinstance(arg_0, Value):\n return arg_0.Func__val(arg_1)\n else:\n if arg_0._dtype.signed == arg_1:\n return arg_0\n arg_2 = copy(arg_0._dtype)\n arg_2.signed = arg_1\n if arg_1 is None:\n arg_3 = AllOps.BitsAsVec\n elif arg_1:\n arg_3 = AllOps.BitsAsSigned\n else:\n arg_3 = AllOps.BitsAsUnsigned\n\n return Operator.withRes(arg_3, [arg_0], arg_2)"} +{"_id": "doc_8160", "title": "", "text": "def Func(arg_0: arg_1, *arg_2):\n \"\"\"\n register Func for process\n \"\"\"\n for arg_3 in arg_2:\n if isinstance(arg_3, tuple):\n arg_4, arg_3 = arg_3\n if arg_4 == SENSITIVITY.ANY:\n arg_3.simSensProcs.add(arg_0)\n elif arg_4 == SENSITIVITY.RISING:\n arg_3.simRisingSensProcs.add(arg_0)\n elif arg_4 == SENSITIVITY.FALLING:\n arg_3.simFallingSensProcs.add(arg_0)\n else:\n raise AssertionError(arg_4)\n else:\n arg_3.simSensProcs.add(arg_0)"} +{"_id": "doc_8161", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Evaluate list of values as condition\n \"\"\"\n arg_2 = True\n arg_3 = True\n for arg_4 in arg_1:\n arg_5 = bool(arg_4.val)\n arg_6 = arg_4.vldMask == 1\n if arg_6:\n if not arg_5:\n return False, True\n else:\n return False, False\n\n arg_2 = arg_2 and arg_5\n arg_3 = arg_3 and arg_6\n\n return arg_2, arg_3"} +{"_id": "doc_8162", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Connect ports of simulation models by name\n \"\"\"\n if arg_4 == DIRECTION.OUT:\n arg_5 = getattr(arg_1, arg_2)\n arg_6 = getattr(arg_0, arg_3)\n setattr(arg_1, arg_2, arg_6)\n else:\n arg_5 = getattr(arg_1, arg_3)\n arg_6 = getattr(arg_0, arg_2)\n setattr(arg_1, arg_3, arg_6)\n\n arg_1._ctx.signals.remove(arg_5)"} +{"_id": "doc_8163", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3):\n \"\"\"\n Create value updater for simulation\n\n :param nextVal: instance of Value which will be asssiggned to signal\n :param invalidate: flag which tells if value has been compromised\n and if it should be invaidated\n :return: function(value) -> tuple(valueHasChangedFlag, nextVal)\n \"\"\"\n\n def updater(arg_4):\n arg_5 = arg_0.clone()\n if arg_2:\n arg_5.vldMask = 0\n return (valueHasChanged(arg_4, arg_5), arg_5)\n return updater"} +{"_id": "doc_8164", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3[arg_1],\n arg_4: arg_5):\n \"\"\"\n Create value updater for simulation for value of array type\n\n :param nextVal: instance of Value which will be asssiggned to signal\n :param indexes: tuple on indexes where value should be updated\n in target array\n\n :return: function(value) -> tuple(valueHasChangedFlag, nextVal)\n \"\"\"\n def updater(arg_6):\n if len(arg_2) > 1:\n raise NotImplementedError(\"[TODO] implement for more indexes\")\n\n arg_7 = arg_0.clone()\n if arg_4:\n arg_7.vldMask = 0\n\n arg_9 = arg_2[0]\n arg_10 = valueHasChanged(arg_6._getitem__val(arg_9), arg_7)\n arg_6._setitem__val(arg_9, arg_7)\n return (arg_10, arg_6)\n\n return updater"} +{"_id": "doc_8165", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Func value of this param\n \"\"\"\n assert not arg_0.__isReadOnly, \\\n (\"This parameter(%s) was locked\"\n \" and now it can not be changed\" % arg_0.name)\n assert arg_0.replacedWith is None, \\\n (\"This param was replaced with new one and this \"\n \"should not exists\")\n\n arg_1 = toHVal(arg_1)\n arg_0.defVal = arg_1\n arg_0._val = arg_1.staticEval()\n arg_0._dtype = arg_0._val._dtype"} +{"_id": "doc_8166", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Resolve ports of discovered memories\n \"\"\"\n arg_1 = 0\n arg_2 = arg_0.resources\n for arg_3, arg_4 in arg_0.memories.items():\n arg_5, arg_6, arg_7 = 0, 0, 0\n arg_8, arg_9, arg_10 = 0, 0, 0\n arg_11, arg_12 = 0, 0\n\n for arg_13, (arg_14, arg_15, arg_16, arg_17) in arg_4.items():\n if arg_14:\n arg_1 += arg_14 * arg_3._dtype.elmType.bit_length()\n\n # resolve port count for this addr signal\n arg_18 = min(arg_14, arg_15)\n arg_14 -= arg_18\n arg_15 -= arg_18\n\n arg_19 = min(arg_16, arg_17)\n arg_16 -= arg_19\n arg_17 -= arg_19\n\n arg_20 = min(arg_14, arg_17)\n arg_14 -= arg_20\n arg_17 -= arg_20\n\n arg_21 = min(arg_16, arg_15)\n arg_16 -= arg_21\n arg_15 -= arg_21\n\n # update port counts for mem\n arg_5 += arg_18\n arg_6 += arg_14\n arg_7 += arg_15\n arg_8 += arg_19\n arg_9 += arg_16\n arg_10 += arg_17\n\n arg_11 += arg_20\n arg_12 += arg_21\n arg_22 = ResourceRAM(arg_3._dtype.elmType.bit_length(),\n int(arg_3._dtype.size),\n arg_5, arg_6, arg_7,\n arg_11,\n arg_8, arg_9, arg_10,\n arg_12)\n arg_2[arg_22] = arg_2.get(arg_22, 0) + 1\n\n arg_0.memories.clear()\n\n # remove register on read ports which will be merged into ram\n if arg_1:\n arg_23 = arg_2[arg_24]\n arg_23 -= arg_1\n if arg_23:\n arg_2[arg_24] = arg_23\n else:\n del arg_2[arg_24]"} +{"_id": "doc_8167", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Construct value of this type.\n Delegated on value class for this type\n \"\"\"\n return arg_0.getValueCls().Func(arg_1, arg_0, arg_2=arg_2)"} +{"_id": "doc_8168", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Cast value or signal of this type to another type of same size.\n\n :param sigOrVal: instance of signal or value to cast\n :param toType: instance of HdlType to cast into\n \"\"\"\n try:\n return arg_0.auto_cast(arg_1, arg_2)\n except TypeConversionErr:\n pass\n\n try:\n arg_3 = arg_0._Func_fn\n except AttributeError:\n arg_3 = arg_0.get_Func_fn()\n arg_0._Func_fn = arg_3\n\n return arg_3(arg_0, arg_1, arg_2)"} +{"_id": "doc_8169", "title": "", "text": "def Func(arg_0, arg_1=arg_2.OUT, arg_4=None):\n \"\"\"\n Concatenate all signals to one big signal, recursively\n\n :param masterDirEqTo: only signals with this direction are packed\n :param exclude: sequence of signals/interfaces to exclude\n \"\"\"\n if not arg_0._interfaces:\n if arg_0._masterDir == arg_1:\n return arg_0._sig\n return None\n\n arg_5 = None\n for arg_6 in arg_0._interfaces:\n if arg_4 is not None and arg_6 in arg_4:\n continue\n\n if arg_6._interfaces:\n if arg_6._masterDir == arg_2.IN:\n arg_7 = arg_2.opposite(arg_1)\n else:\n arg_7 = arg_1\n arg_8 = arg_6._pack(arg_7, arg_4=arg_4)\n else:\n if arg_6._masterDir == arg_1:\n arg_8 = arg_6._sig\n else:\n arg_8 = None\n\n if arg_8 is not None:\n if arg_5 is None:\n arg_5 = arg_8\n else:\n arg_5 = arg_5._concat(arg_8)\n\n return arg_5"} +{"_id": "doc_8170", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return sig and val reduced by & operator or None\n if it is not possible to statically reduce expression\n \"\"\"\n arg_2 = arg_0._dtype.all_mask()\n if arg_1._isFullVld():\n arg_3 = arg_1.val\n if arg_3 == arg_2:\n return arg_0\n elif arg_3 == 0:\n return arg_1"} +{"_id": "doc_8171", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Return sig and val reduced by ^ operator or None\n if it is not possible to statically reduce expression\n \"\"\"\n arg_2 = arg_0._dtype.all_mask()\n if not arg_1.vldMask:\n return arg_1\n\n if arg_1._isFullVld():\n arg_3 = arg_1.val\n if arg_3 == arg_2:\n return ~arg_0\n elif arg_3 == 0:\n return arg_0"} +{"_id": "doc_8172", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get root of name space\n \"\"\"\n arg_1 = NameScope(False)\n arg_1.setLevel(1)\n arg_1[0].update(arg_0._keywords_dict)\n return arg_1"} +{"_id": "doc_8173", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"\n Decide if this unit should be serialized or not eventually fix name\n to fit same already serialized unit\n\n :param obj: object to serialize\n :param serializedClasses: dict {unitCls : unitobj}\n :param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj\n where paramsValues are named tuple name:value\n \"\"\"\n arg_4 = isinstance(arg_1, Entity)\n arg_5 = isinstance(arg_1, Architecture)\n if arg_4:\n arg_6 = arg_1.origin\n elif arg_5:\n arg_6 = arg_1.entity.origin\n else:\n return True\n\n assert isinstance(arg_6, Unit)\n arg_7 = arg_6._serializeDecision\n if arg_7 is None:\n return True\n else:\n arg_8 = arg_2.get(arg_6.__class__, None)\n arg_9, arg_10 = arg_7(arg_6, arg_1, arg_4, arg_8)\n arg_2[arg_6.__class__] = arg_10\n return arg_9"} +{"_id": "doc_8174", "title": "", "text": "def Func(arg_0, arg_1: Func, arg_3: arg_4, arg_5=False):\n \"\"\"\n Serialize HdlType instance\n \"\"\"\n if isinstance(arg_1, Bits):\n arg_6 = arg_0.HdlType_bits\n elif isinstance(arg_1, HEnum):\n arg_6 = arg_0.HdlType_enum\n elif isinstance(arg_1, HArray):\n arg_6 = arg_0.HdlType_array\n elif isinstance(arg_1, Integer):\n arg_6 = arg_0.HdlType_int\n elif isinstance(arg_1, HBool):\n arg_6 = arg_0.HdlType_bool\n else:\n raise NotImplementedError(\"type declaration is not implemented\"\n \" for type %s\"\n % (arg_1.name))\n\n return arg_6(arg_1, arg_3, arg_5=arg_5)"} +{"_id": "doc_8175", "title": "", "text": "def Func(arg_0, arg_1: Func, arg_3: arg_4):\n \"\"\"\n Srialize IfContainer instance\n \"\"\"\n arg_5 = arg_3.withIndent()\n\n def asHdl(arg_6):\n return [arg_0.asHdl(arg_7, arg_5) for arg_7 in arg_6]\n\n try:\n arg_8 = arg_0.condAsHdl(arg_1.cond, True, arg_3)\n except UnsupportedEventOpErr as e:\n arg_8 = None\n\n if arg_8 is None:\n assert not arg_1.elIfs\n assert not arg_1.ifFalse\n arg_9 = [arg_0.asHdl(arg_7, arg_3) for arg_7 in arg_1.ifTrue]\n return \"\\n\".join(arg_9)\n\n arg_10 = []\n arg_11 = arg_1.ifTrue\n arg_12 = arg_1.ifFalse\n if arg_12 is None:\n arg_12 = []\n\n for arg_13, arg_6 in arg_1.elIfs:\n try:\n arg_10.append((arg_0.condAsHdl(arg_13, True, arg_3), asHdl(arg_6)))\n except UnsupportedEventOpErr as e:\n if len(arg_1.elIfs) == 1 and not arg_12:\n # register expression is in valid format and this\n # is just register with asynchronous reset or etc...\n arg_12 = arg_6\n else:\n raise e\n\n return arg_0.ifTmpl.render(\n indent=getIndent(arg_3.indent),\n arg_8=arg_8,\n arg_11=asHdl(arg_11),\n arg_10=arg_10,\n arg_12=asHdl(arg_12))"} +{"_id": "doc_8176", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get constant name for value\n name of constant is reused if same value was used before\n \"\"\"\n try:\n return arg_0._cache[arg_1]\n except KeyError:\n if isinstance(arg_1.val, int):\n arg_2 = \"const_%d_\" % arg_1.val\n else:\n arg_2 = \"const_\"\n\n arg_3 = arg_0.nameCheckFn(arg_2, arg_1)\n arg_0._cache[arg_1] = arg_3\n return arg_3"} +{"_id": "doc_8177", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Cut off statements which are driver of specified signal\n \"\"\"\n if arg_0.dst is arg_1:\n arg_0.parentStm = None\n return arg_0\n else:\n return None"} +{"_id": "doc_8178", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4) -> arg_4:\n \"\"\"\n Parse HArray type to this transaction template instance\n\n :return: address of it's end\n \"\"\"\n arg_0.itemCnt = evalParam(arg_1.size).val\n arg_0.children = TransTmpl(\n arg_1.elmType, 0, parent=arg_0, origin=arg_0.origin)\n return arg_3 + arg_0.itemCnt * arg_0.children.bitAddrEnd"} +{"_id": "doc_8179", "title": "", "text": "def Func(arg_0) -> int:\n \"\"\"\n Only for transactions derived from HArray\n\n :return: width of item in original array\n \"\"\"\n if not isinstance(arg_0.dtype, HArray):\n raise TypeError()\n return (arg_0.bitAddrEnd - arg_0.bitAddr) // arg_0.itemCnt"} +{"_id": "doc_8180", "title": "", "text": "def Func(arg_0, arg_1: arg_2=0,\n arg_3=arg_4,\n arg_5: arg_6 =arg_7()\n ) -> Generator[\n Union[Tuple[Tuple[arg_2, arg_2], 'TransTmpl'], 'OneOfTransaction'],\n None, None]:\n \"\"\"\n Walk fields in instance of TransTmpl\n\n :param offset: optional offset for all children in this TransTmpl\n :param shouldEnterFn: function (transTmpl) which returns True\n when field should be split on it's children\n :param shouldEnterFn: function(transTmpl) which should return\n (shouldEnter, shouldUse) where shouldEnter is flag that means\n iterator should look inside of this actual object\n and shouldUse flag means that this field should be used\n (=generator should yield it)\n :return: generator of tuples ((startBitAddress, endBitAddress),\n TransTmpl instance)\n \"\"\"\n\n arg_8 = arg_0.dtype\n arg_9 = arg_0.bitAddr + arg_1\n arg_10 = arg_0.bitAddrEnd + arg_1\n\n arg_11, arg_12 = arg_3(arg_0)\n if arg_12:\n yield ((arg_9, arg_10), arg_0)\n\n if arg_11:\n if isinstance(arg_8, Bits):\n pass\n elif isinstance(arg_8, HStruct):\n for arg_13 in arg_0.children:\n with arg_5(arg_13.origin.name):\n yield from arg_13.Func(\n arg_1,\n arg_3,\n arg_5)\n elif isinstance(arg_8, HArray):\n arg_14 = (arg_0.bitAddrEnd - arg_0.bitAddr) // arg_0.itemCnt\n for arg_15 in range(arg_0.itemCnt):\n with arg_5(arg_15):\n yield from arg_0.children.Func(\n arg_9 + arg_15 * arg_14,\n arg_3,\n arg_5)\n elif isinstance(arg_8, HUnion):\n yield OneOfTransaction(arg_0, arg_1, arg_3,\n arg_0.children)\n elif isinstance(arg_8, HStream):\n assert len(arg_0.children) == 1\n yield StreamTransaction(arg_0, arg_1, arg_3,\n arg_0.children[0])\n else:\n raise TypeError(arg_8)"} +{"_id": "doc_8181", "title": "", "text": "def Func(arg_0, arg_1: \"IfContainer\") -> None:\n \"\"\"\n Merge other statement to this statement\n \"\"\"\n arg_2 = arg_0._merge_statement_lists\n arg_3 = []\n for (arg_4, arg_5), (arg_6, arg_7) in zip(arg_0.cases, arg_1.cases):\n arg_3.append((arg_4, arg_2(arg_5, arg_7)))\n\n arg_0.cases = arg_3\n\n if arg_0.default is not None:\n arg_0.default = arg_2(arg_0.default, arg_1.default)\n\n arg_0._on_merge(arg_1)"} +{"_id": "doc_8182", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Cached indent getter function\n \"\"\"\n try:\n return arg_2[arg_0]\n except KeyError:\n arg_1 = \"\".join([_indent for _ in range(arg_0)])\n arg_2[arg_0] = arg_1\n return arg_1"} +{"_id": "doc_8183", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Check if not redefining property on obj\n \"\"\"\n if getattr(arg_0, arg_1, None) is not None:\n raise IntfLvlConfErr(\"%r already has property %s old:%s new:%s\" % \n (arg_0, arg_1, repr(getattr(arg_0, arg_1)), arg_2))"} +{"_id": "doc_8184", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Register interface object on interface level object\n \"\"\"\n nameAvailabilityCheck(arg_0, arg_1, arg_2)\n assert arg_2._parent is None\n arg_2._parent = arg_0\n arg_2._name = arg_1\n arg_2._ctx = arg_0._ctx\n\n if arg_3:\n arg_0._private_interfaces.append(arg_2)\n arg_2._isExtern = False\n else:\n arg_0._interfaces.append(arg_2)\n arg_2._isExtern = True"} +{"_id": "doc_8185", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Register array of items on interface level object\n \"\"\"\n arg_2._parent = arg_0\n arg_2._name = arg_1\n for arg_5, arg_6 in enumerate(arg_2):\n setattr(arg_0, \"%s_%d\" % (arg_1, arg_5), arg_6)"} +{"_id": "doc_8186", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a first driver if signal has only one driver.\n \"\"\"\n # [TODO] no driver exception\n arg_1 = len(arg_0.drivers)\n if not arg_1:\n raise NoDriverErr(arg_0)\n elif arg_1 != 1:\n raise MultipleDriversErr(arg_0)\n\n return arg_0.drivers[0]"} +{"_id": "doc_8187", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Recursively statistically evaluate result of this operator\n \"\"\"\n for arg_1 in arg_0.operands:\n arg_1.Func()\n arg_0.result._val = arg_0.evalFn()"} +{"_id": "doc_8188", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=[]):\n \"\"\"\n Create operator with result signal\n\n :ivar resT: data type of result signal\n :ivar outputs: iterable of singnals which are outputs\n from this operator\n \"\"\"\n arg_4 = Operator(arg_0, arg_1)\n arg_5 = RtlSignal(getCtxFromOps(arg_1), None, arg_2)\n arg_5._const = arr_all(arg_4.operands, isConst)\n arg_5.drivers.append(arg_4)\n arg_5.origin = arg_4\n arg_4.result = arg_5\n arg_4.registerSignals(arg_3)\n if arg_5._const:\n arg_5.staticEval()\n return arg_5"} +{"_id": "doc_8189", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Try connect src to interface of specified name on unit.\n Ignore if interface is not present or if it already has driver.\n \"\"\"\n try:\n arg_3 = getattr(arg_1, arg_2)\n except AttributeError:\n return\n if not arg_3._sig.drivers:\n connect(arg_0, arg_3)"} +{"_id": "doc_8190", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Propagate \"clk\" clock signal to all subcomponents\n \"\"\"\n arg_1 = arg_0.clk\n for arg_2 in arg_0._units:\n _tryConnect(arg_1, arg_2, 'clk')"} +{"_id": "doc_8191", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Propagate \"clk\" clock and negative reset \"rst_n\" signal\n to all subcomponents\n \"\"\"\n arg_1 = arg_0.clk\n arg_2 = arg_0.rst_n\n\n for arg_3 in arg_0._units:\n _tryConnect(arg_1, arg_3, 'clk')\n _tryConnect(arg_2, arg_3, 'rst_n')\n _tryConnect(~arg_2, arg_3, 'rst')"} +{"_id": "doc_8192", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Propagate reset \"rst\" signal\n to all subcomponents\n \"\"\"\n arg_1 = arg_0.rst\n\n for arg_2 in arg_0._units:\n _tryConnect(~arg_1, arg_2, 'rst_n')\n _tryConnect(arg_1, arg_2, 'rst')"} +{"_id": "doc_8193", "title": "", "text": "def Func(arg_0: arg_1[arg_2, arg_3], arg_4: arg_5=1,\n arg_6: arg_7=True, arg_8: arg_7=False):\n \"\"\"\n Iterate over bits in vector\n\n :param sigOrVal: signal or value to iterate over\n :param bitsInOne: number of bits in one part\n :param skipPadding: if true padding is skipped in dense types\n \"\"\"\n arg_9 = BitWalker(arg_0, arg_6, arg_8)\n for arg_10 in range(ceil(arg_0._dtype.bit_length() / arg_4)):\n yield arg_9.get(arg_4)\n\n arg_9.assertIsOnEnd()"} +{"_id": "doc_8194", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Always decide not to serialize obj\n\n :param priv: private data for this function first unit of this class\n :return: tuple (do serialize this object, next priv)\n \"\"\"\n if arg_2:\n # prepare entity which will not be serialized\n prepareEntity(arg_1, arg_0.__class__.__name__, arg_3)\n\n if arg_3 is None:\n arg_3 = arg_0\n\n return False, arg_3"} +{"_id": "doc_8195", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Decide to serialize only first obj of it's class\n\n :param priv: private data for this function\n (first object with class == obj.__class__)\n\n :return: tuple (do serialize this object, next priv)\n where priv is private data for this function\n (first object with class == obj.__class__)\n \"\"\"\n arg_4 = arg_0.__class__.__name__\n\n if arg_2:\n arg_1.name = arg_4\n\n if arg_3 is None:\n arg_3 = arg_0\n elif arg_2:\n # prepare entity which will not be serialized\n prepareEntity(arg_1, arg_4, arg_0)\n\n arg_6 = arg_3 is arg_0\n\n return arg_6, arg_3"} +{"_id": "doc_8196", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Decide to serialize only objs with uniq parameters and class\n\n :param priv: private data for this function\n ({frozen_params: obj})\n\n :return: tuple (do serialize this object, next priv)\n \"\"\"\n\n arg_4 = paramsToValTuple(arg_0)\n\n if arg_3 is None:\n arg_3 = {}\n\n if arg_2:\n try:\n arg_5 = arg_3[arg_4]\n except KeyError:\n arg_3[arg_4] = arg_0\n return True, arg_3\n\n prepareEntity(arg_1, arg_5._entity.name, arg_5)\n return False, arg_3\n\n return arg_3[arg_4] is arg_0, arg_3"} +{"_id": "doc_8197", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Delegate Func on items\n\n :note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector.Func`\n \"\"\"\n for arg_3 in arg_0:\n arg_3.Func(*arg_1, **arg_2)"} +{"_id": "doc_8198", "title": "", "text": "def Func(arg_0, arg_1=arg_2(), arg_3=None):\n \"\"\"\n Create a simulation model for unit\n\n :param unit: interface level unit which you wont prepare for simulation\n :param targetPlatform: target platform for this synthes\n :param dumpModelIn: folder to where put sim model files\n (otherwise sim model will be constructed only in memory)\n \"\"\"\n arg_4 = toRtl(arg_0,\n arg_1=arg_1,\n saveTo=arg_3,\n serializer=SimModelSerializer)\n if arg_3 is not None:\n arg_5 = os.path.join(os.getcwd(), arg_3)\n arg_6 = arg_5 in sys.path\n if not arg_6:\n sys.path.insert(0, arg_5)\n if arg_0._name in sys.modules:\n del sys.modules[arg_0._name]\n arg_7 = importlib.import_module(arg_0._name)\n\n if not arg_6:\n sys.path.remove(arg_5)\n else:\n arg_7 = ModuleType('simModule')\n # python supports only ~100 opened brackets\n # it exceded it throws MemoryError: s_push: parser stack overflow\n exec(arg_4, arg_7.__dict__)\n\n return arg_7.__dict__[arg_0._name]"} +{"_id": "doc_8199", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reconnect model signals to unit to run simulation with simulation model\n but use original unit interfaces for communication\n\n :param synthesisedUnitOrIntf: interface where should be signals\n replaced from signals from modelCls\n :param modelCls: simulation model form where signals\n for synthesisedUnitOrIntf should be taken\n \"\"\"\n arg_2 = arg_0\n arg_3 = arg_2._interfaces\n\n\n if arg_3:\n for arg_4 in arg_3:\n # proxies are destroyed on original interfaces and only proxies on\n # array items will remain\n Func(arg_4, arg_1)\n else:\n # reconnect signal from model\n arg_5 = arg_0\n arg_5._sigInside = getattr(arg_1, arg_5._sigInside.name)"} +{"_id": "doc_8200", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.stdout,\n arg_5=100 * arg_6.ns):\n \"\"\"\n Syntax sugar\n If outputFile is string try to open it as file\n\n :return: hdl simulator object\n \"\"\"\n assert isinstance(arg_0, SimModel), \\\n \"Class of SimModel is required (got %r)\" % (arg_0)\n if isinstance(arg_2, str):\n arg_8 = os.path.dirname(arg_2)\n if arg_8:\n os.makedirs(arg_8, exist_ok=True)\n with open(arg_2, 'w') as f:\n return _Func(arg_0, arg_1,\n f, arg_5)\n else:\n return _Func(arg_0, arg_1,\n arg_2, arg_5)"} +{"_id": "doc_8201", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process for injecting of this callback loop into simulator\n \"\"\"\n yield from arg_0.onTWriteCallback(arg_1)\n arg_0.intf.t._sigInside.registerWriteCallback(\n arg_0.onTWriteCallback,\n arg_0.getEnable)\n arg_0.intf.o._sigInside.registerWriteCallback(\n arg_0.onTWriteCallback,\n arg_0.getEnable)"} +{"_id": "doc_8202", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Connect internal signal to port item,\n this connection is used by simulator and only output port items\n will be connected\n \"\"\"\n if arg_0.direction == DIRECTION.OUT:\n if arg_0.src is not None:\n raise HwtSyntaxError(\n \"Port %s is already associated with %s\"\n % (arg_0.name, str(arg_0.src)))\n arg_0.src = arg_1\n\n elif arg_0.direction == DIRECTION.IN:\n if arg_0.dst is not None:\n raise HwtSyntaxError(\n \"Port %s is already associated with %s\"\n % (arg_0.name, str(arg_0.dst)))\n arg_0.dst = arg_1\n\n else:\n raise NotImplementedError(arg_0.direction)"} +{"_id": "doc_8203", "title": "", "text": "def Func(arg_0):\n \"\"\"\n connet signal from internal side of of this component to this port\n \"\"\"\n arg_1 = arg_0.direction\n if arg_1 == DIRECTION.OUT:\n arg_0.src.endpoints.append(arg_0)\n elif arg_1 == DIRECTION.IN or arg_1 == DIRECTION.INOUT:\n arg_0.dst.drivers.append(arg_0)\n else:\n raise NotImplementedError(arg_1)"} +{"_id": "doc_8204", "title": "", "text": "def Func(arg_0):\n \"\"\"\n return signal inside unit which has this port\n \"\"\"\n arg_1 = arg_0.direction\n if arg_1 == DIRECTION.IN:\n return arg_0.dst\n elif arg_1 == DIRECTION.OUT:\n return arg_0.src\n else:\n raise NotImplementedError(arg_1)"} +{"_id": "doc_8205", "title": "", "text": "def Func(arg_0, arg_1, arg_2) -> None:\n \"\"\"\n Schedule process on actual time with specified priority\n \"\"\"\n arg_0._events.push(arg_0.now, arg_2, arg_1)"} +{"_id": "doc_8206", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3) -> None:\n \"\"\"\n Add hdl process to execution queue\n\n :param trigger: instance of SimSignal\n :param proc: python generator function representing HDL process\n \"\"\"\n # first process in time has to plan executing of apply values on the\n # end of this time\n if not arg_0._applyValPlaned:\n # (apply on end of this time to minimalize process reevaluation)\n arg_0._scheduleApplyValues()\n\n if isEvDependentOn(arg_1, arg_3):\n if arg_0.now == 0:\n return # pass event dependent on startup\n arg_0._seqProcsToRun.append(arg_3)\n else:\n arg_0._combProcsToRun.append(arg_3)"} +{"_id": "doc_8207", "title": "", "text": "def Func(arg_0) -> Event:\n \"\"\"\n Schedule combUpdateDoneEv event to let agents know that current\n delta step is ending and values from combinational logic are stable\n \"\"\"\n assert not arg_0._combUpdateDonePlaned, arg_0.now\n arg_1 = Event(arg_0)\n arg_1.process_to_wake.append(arg_0.__deleteCombUpdateDoneEv())\n arg_0._add_process(arg_1, PRIORITY_AGENTS_UPDATE_DONE)\n arg_0._combUpdateDonePlaned = True\n arg_0.combUpdateDoneEv = arg_1\n return arg_1"} +{"_id": "doc_8208", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"\n Apply stashed values to signals\n \"\"\"\n assert not arg_0._applyValPlaned, arg_0.now\n arg_0._add_process(arg_0._applyValues(), PRIORITY_APPLY_COMB)\n arg_0._applyValPlaned = True\n\n if arg_0._runSeqProcessesPlaned:\n # if runSeqProcesses is already scheduled\n return\n\n assert not arg_0._seqProcsToRun and not arg_0._runSeqProcessesPlaned, arg_0.now\n arg_0._add_process(arg_0._runSeqProcesses(), PRIORITY_APPLY_SEQ)\n arg_0._runSeqProcessesPlaned = True"} +{"_id": "doc_8209", "title": "", "text": "def Func(arg_0, arg_1: arg_2)\\\n -> Tuple[Callable[[Value], bool], bool]:\n \"\"\"\n This functions resolves write conflicts for signal\n\n :param actionSet: set of actions made by process\n \"\"\"\n\n arg_3 = False\n arg_4 = len(arg_1)\n if arg_4 == 3:\n # update for item in array\n arg_5, arg_6, arg_7 = arg_1\n return (mkArrayUpdater(arg_5, arg_6, arg_3), arg_7)\n else:\n # update for simple signal\n arg_5, arg_7 = arg_1\n return (mkUpdater(arg_5, arg_3), arg_7)"} +{"_id": "doc_8210", "title": "", "text": "def Func(arg_0) -> None:\n \"\"\"\n Delta step for combinational processes\n \"\"\"\n for arg_1 in arg_0._combProcsToRun:\n arg_2 = arg_0._outputContainers[arg_1]\n arg_1(arg_0, arg_2)\n for arg_3, arg_4 in arg_2._all_signals:\n arg_5 = getattr(arg_2, arg_3)\n if arg_5 is not None:\n arg_6 = arg_0._conflictResolveStrategy(arg_5)\n # prepare update\n arg_7, arg_8 = arg_6\n arg_0._valuesToApply.append(\n (arg_4, arg_7, arg_8, arg_1))\n setattr(arg_2, arg_3, None)\n # else value is latched\n\n arg_0._combProcsToRun = UniqList()"} +{"_id": "doc_8211", "title": "", "text": "def Func(arg_0, arg_1) -> Value:\n \"\"\"\n Read value from signal or interface\n \"\"\"\n try:\n arg_2 = arg_1._val\n except AttributeError:\n arg_2 = arg_1._sigInside._val\n\n return arg_2.clone()"} +{"_id": "doc_8212", "title": "", "text": "def Func(arg_0, arg_1, arg_2: arg_3)-> None:\n \"\"\"\n Write value to signal or interface.\n \"\"\"\n # get target RtlSignal\n try:\n arg_4 = arg_2.simSensProcs\n except AttributeError:\n arg_2 = arg_2._sigInside\n arg_4 = arg_2.simSensProcs\n\n # type cast of input value\n arg_5 = arg_2._dtype\n\n if isinstance(arg_1, Value):\n arg_6 = arg_1.clone()\n arg_6 = arg_6._auto_cast(arg_5)\n else:\n arg_6 = arg_5.fromPy(arg_1)\n\n # can not update value in signal directly due singnal proxies\n arg_2.simUpdateVal(arg_0, lambda curentV: (\n valueHasChanged(curentV, arg_6), arg_6))\n\n if not arg_0._applyValPlaned:\n if not (arg_4 or\n arg_2.simRisingSensProcs or\n arg_2.simFallingSensProcs):\n # signal value was changed but there are no sensitive processes\n # to it because of this _applyValues is never planed\n # and should be\n arg_0._scheduleApplyValues()\n elif (arg_2._FuncCallbacks or\n arg_2._FuncCallbacksToEn):\n # signal Func did not caused any change on any other signal\n # but there are still simulation agets waiting on\n # updateComplete event\n arg_0._scheduleApplyValues()"} +{"_id": "doc_8213", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert all ternary operators to IfContainers\"\"\"\n arg_1 = []\n\n for arg_2 in arg_0:\n if isinstance(arg_2, Assignment):\n try:\n if not isinstance(arg_2.src, RtlSignalBase):\n raise DoesNotContainsTernary()\n arg_3 = arg_2.src.singleDriver()\n if not isinstance(arg_3, Operator) or arg_3.operator != AllOps.TERNARY:\n raise DoesNotContainsTernary()\n else:\n arg_4 = arg_3.operands\n arg_5 = IfContainer(arg_4[0],\n [Assignment(arg_4[1], arg_2.dst)],\n [Assignment(arg_4[2], arg_2.dst)]\n )\n arg_1.append(arg_5)\n continue\n\n except (MultipleDriversErr, DoesNotContainsTernary):\n pass\n except NoDriverErr:\n assert (hasattr(arg_2.src, \"_interface\")\n and arg_2.src._interface is not None)\\\n or arg_2.src.defVal.vldMask, arg_2.src\n\n arg_1.append(arg_2)\n return arg_1"} +{"_id": "doc_8214", "title": "", "text": "def Func(arg_0):\n \"\"\" Create a new Func under this service. \"\"\"\n arg_1 = Version()\n arg_1.conn = arg_0.conn\n\n arg_1.attrs = {\n # Parent params\n 'service_id': arg_0.attrs['id'],\n }\n\n arg_1.save()\n\n return arg_1"} +{"_id": "doc_8215", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Create a new VCL under this version. \"\"\"\n Func = VCL()\n Func.conn = arg_0.conn\n\n Func.attrs = {\n # Parent params\n 'service_id': arg_0.attrs['service_id'],\n 'version': arg_0.attrs['number'],\n\n # New instance params\n 'name': arg_1,\n 'content': arg_2,\n }\n\n Func.save()\n\n return Func"} +{"_id": "doc_8216", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts the column to a dictionary representation accepted\n by the Citrination server.\n\n :return: Dictionary with basic options, plus any column type specific\n options held under the \"options\" key\n :rtype: dict\n \"\"\"\n return {\n \"type\": arg_0.type,\n \"name\": arg_0.name,\n \"group_by_key\": arg_0.group_by_key,\n \"role\": arg_0.role,\n \"units\": arg_0.units,\n \"options\": arg_0.build_options()\n }"} +{"_id": "doc_8217", "title": "", "text": "def Func(arg_0, arg_1, arg_2='ignore', arg_3=False):\n \"\"\"\n Add a descriptor column.\n\n :param descriptor: A Descriptor instance (e.g., RealDescriptor, InorganicDescriptor, etc.)\n :param role: Specify a role (input, output, latentVariable, or ignore)\n :param group_by_key: Whether or not to group by this key during cross validation\n \"\"\"\n\n arg_1.validate()\n\n if arg_1.key in arg_0.configuration[\"roles\"]:\n raise ValueError(\"Cannot add a descriptor with the same name twice\")\n\n arg_0.configuration['descriptors'].append(arg_1.as_dict())\n arg_0.configuration[\"roles\"][arg_1.key] = arg_2\n\n if arg_3:\n arg_0.configuration[\"group_by\"].append(arg_1.key)"} +{"_id": "doc_8218", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks to see that the query will not exceed the max query depth\n\n :param returning_query: The PIF system or Dataset query to execute.\n :type returning_query: :class:`PifSystemReturningQuery` or :class: `DatasetReturningQuery`\n \"\"\"\n\n arg_2 = arg_1.from_index or 0\n arg_3 = arg_1.size or 0\n\n if arg_2 < 0:\n raise CitrinationClientError(\n \"start_index cannot be negative. Please enter a value greater than or equal to zero\")\n if arg_3 < 0:\n raise CitrinationClientError(\"Size cannot be negative. Please enter a value greater than or equal to zero\")\n if arg_2 + arg_3 > MAX_QUERY_DEPTH:\n raise CitrinationClientError(\n \"Citrination does not support pagination past the {0}th result. Please reduce either the from_index and/or size such that their sum is below {0}\".format(\n MAX_QUERY_DEPTH))"} +{"_id": "doc_8219", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Run each in a list of PIF queries against Citrination.\n\n :param multi_query: :class:`MultiQuery` object to execute.\n :return: :class:`PifMultiSearchResult` object with the results of the query.\n \"\"\"\n arg_2 = \"Error while making PIF multi search request\"\n arg_3 = arg_0._get_success_json(\n arg_0._post(routes.Func, data=json.dumps(arg_1, cls=QueryEncoder),\n arg_2=arg_2))\n\n return PifMultiSearchResult(**keys_to_snake_case(arg_3['results']))"} +{"_id": "doc_8220", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n Updates an existing data view from the search template and ml template given\n\n :param id: Identifier for the data view. This returned from the create method.\n :param configuration: Information to construct the data view from (eg descriptors, datasets etc)\n :param name: Name of the data view\n :param description: Description for the data view\n \"\"\"\n\n arg_5 = {\n \"configuration\":\n arg_2,\n \"name\":\n arg_3,\n \"description\":\n arg_4\n }\n\n arg_6 = \"Dataview creation failed\"\n\n arg_0._patch_json(\n 'v1/data_views/' + arg_1, arg_5, arg_6=arg_6)"} +{"_id": "doc_8221", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets basic information about a view\n\n :param data_view_id: Identifier of the data view\n :return: Metadata about the view as JSON\n \"\"\"\n\n arg_2 = \"Dataview Func failed\"\n return arg_0._Func_success_json(arg_0._Func(\n 'v1/data_views/' + arg_1, None, arg_2=arg_2))['data']['data_view']"} +{"_id": "doc_8222", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates an ml configuration from dataset_ids and extract_as_keys\n\n :param dataset_ids: Array of dataset identifiers to make search template from\n :return: An identifier used to request the status of the builder job (get_ml_configuration_status)\n \"\"\"\n arg_2 = arg_0.search_template_client.get_available_columns(arg_1)\n\n # Create a search template from dataset ids\n arg_3 = arg_0.search_template_client.create(arg_1, arg_2)\n return arg_0.create_ml_configuration(arg_3, arg_2, arg_1)"} +{"_id": "doc_8223", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Utility function to turn the result object from the configuration builder endpoint into something that\n can be used directly as a configuration.\n\n :param result_blob: Nested dicts representing the possible descriptors\n :param dataset_ids: Array of dataset identifiers to make search template from\n :return: An object suitable to be used as a parameter to data view create\n \"\"\"\n\n arg_3 = DataViewBuilder()\n arg_3.dataset_ids(arg_2)\n for arg_4, (arg_5, arg_6) in enumerate(arg_1['descriptors'].items()):\n try:\n arg_7 = arg_0.__snake_case(arg_6[0])\n print(json.dumps(arg_7))\n arg_7['descriptor_key'] = arg_5\n arg_3.add_raw_descriptor(arg_7)\n except IndexError:\n pass\n\n for arg_4, (arg_5, arg_6) in enumerate(arg_1['types'].items()):\n arg_3.set_role(arg_5, arg_6.lower())\n\n return arg_3.build()"} +{"_id": "doc_8224", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n After invoking the create_ml_configuration async method, you can use this method to\n check on the status of the builder job.\n\n :param job_id: The identifier returned from create_ml_configuration\n :return: Job status\n \"\"\"\n\n arg_2 = \"Get status on ml configuration failed\"\n arg_3 = arg_0._get_success_json(arg_0._get(\n 'v1/descriptors/builders/simple/default/' + arg_1 + '/status', None, arg_2=arg_2))[\n 'data']\n return arg_3"} +{"_id": "doc_8225", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Get the t-SNE projection, including responses and tags.\n\n :param data_view_id: The ID of the data view to retrieve TSNE from\n :type data_view_id: int\n :return: The TSNE analysis\n :rtype: :class:`Tsne`\n \"\"\"\n arg_2 = arg_0._data_analysis(arg_1)\n arg_3 = arg_2['projections']\n Func = Tsne()\n for arg_5, arg_6 in arg_3.items():\n arg_7 = Projection(\n xs=arg_6['x'],\n ys=arg_6['y'],\n responses=arg_6['label'],\n tags=arg_6['inputs'],\n uids=arg_6['uid']\n )\n Func.add_projection(arg_5, arg_7)\n\n return Func"} +{"_id": "doc_8226", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='scalar', arg_4=True):\n \"\"\"\n Submits an async prediction request.\n\n :param data_view_id: The id returned from create\n :param candidates: Array of candidates\n :param prediction_source: 'scalar' or 'scalar_from_distribution'\n :param use_prior: True to use prior prediction, otherwise False\n :return: Predict request Id (used to check status)\n \"\"\"\n\n arg_5 = {\n \"prediction_source\":\n arg_3,\n \"use_prior\":\n arg_4,\n \"candidates\":\n arg_2\n }\n\n arg_6 = \"Configuration creation failed\"\n arg_7 = 'v1/data_views/' + str(arg_1) + '/predict/submit'\n return arg_0._get_success_json(\n arg_0._post_json(arg_7, arg_5, arg_6=arg_6)\n )['data']['uid']"} +{"_id": "doc_8227", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns a string indicating the status of the prediction job\n\n :param view_id: The data view id returned from data view create\n :param predict_request_id: The id returned from predict\n :return: Status data, also includes results if state is finished\n \"\"\"\n\n arg_3 = \"Get status on predict failed\"\n\n arg_4 = arg_0._get_success_json(arg_0._get(\n 'v1/data_views/' + str(arg_1) + '/predict/' + str(arg_2) + '/status',\n None, arg_3=arg_3))\n\n arg_5 = arg_4[\"data\"]\n # result.update({\"message\": bare_response[\"message\"]})\n\n return arg_5"} +{"_id": "doc_8228", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=[], arg_6=\"Default\"):\n \"\"\"\n Submits a new experimental design run.\n\n :param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n :type data_view_id: str\n :param num_candidates: The number of candidates to return\n :type num_candidates: int\n :param target: An :class:``Target`` instance representing\n the design run optimization target\n :type target: :class:``Target``\n :param constraints: An array of design constraints (instances of\n objects which extend :class:``BaseConstraint``)\n :type constraints: list of :class:``BaseConstraint``\n :param sampler: The name of the sampler to use during the design run:\n either \"Default\" or \"This view\"\n :type sampler: str\n :return: A :class:`DesignRun` instance containing the UID of the\n new run\n \"\"\"\n if arg_3 > 30:\n raise CitrinationClientError(\"Parameter effort must be less than 30 to trigger a design run\")\n\n if arg_4 is not None:\n arg_4 = arg_4.to_dict()\n\n arg_7 = [c.to_dict() for c in arg_5]\n\n arg_8 = {\n \"num_candidates\": arg_2,\n \"target\": arg_4,\n \"effort\": arg_3,\n \"constraints\": arg_7,\n \"sampler\": arg_6\n }\n\n arg_9 = routes.submit_data_view_design(arg_1)\n\n arg_10 = arg_0._post_json(arg_9, arg_8).json()\n\n return DesignRun(arg_10[\"data\"][\"design_run\"][\"uid\"])"} +{"_id": "doc_8229", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieves a summary of information for a given data view\n - view id\n - name\n - description\n - columns\n\n :param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n :type data_view_id: str\n \"\"\"\n\n arg_2 = routes.Func(arg_1)\n\n arg_3 = arg_0._get(arg_2).json()\n\n arg_4 = arg_3[\"data\"][\"data_view\"]\n\n arg_5 = []\n for arg_6 in arg_4[\"datasets\"]:\n arg_5.append(Dataset(\n name=arg_6[\"name\"],\n id=arg_6[\"id\"],\n description=arg_6[\"description\"]\n ))\n\n arg_7 = []\n for arg_8 in arg_4[\"columns\"]:\n arg_7.append(ColumnFactory.from_dict(arg_8))\n\n return DataView(\n view_id=arg_1,\n name=arg_4[\"name\"],\n description=arg_4[\"description\"],\n datasets=arg_5,\n columns=arg_7,\n )"} +{"_id": "doc_8230", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given a filepath, loads the file as a dictionary from YAML\n\n :param path: The path to a YAML file\n \"\"\"\n with open(arg_0, \"r\") as f:\n arg_1 = f.read()\n arg_2 = yaml.load(arg_1)\n return arg_2"} +{"_id": "doc_8231", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Extracts credentials from the yaml formatted credential filepath\n passed in. Uses the default profile if the CITRINATION_PROFILE env var\n is not set, otherwise looks for a profile with that name in the credentials file.\n\n :param filepath: The path of the credentials file\n \"\"\"\n try:\n arg_1 = load_file_as_yaml(arg_0)\n except Exception:\n arg_1 = {}\n\n arg_2 = os.environ.get(citr_env_vars.CITRINATION_PROFILE)\n if arg_2 is None or len(arg_2) == 0:\n arg_2 = DEFAULT_CITRINATION_PROFILE\n arg_3 = None\n arg_4 = None\n try:\n arg_5 = arg_1[arg_2]\n arg_3 = arg_5[CREDENTIALS_API_KEY_KEY]\n arg_4 = arg_5[CREDENTIALS_SITE_KEY]\n except KeyError:\n pass\n\n return (arg_3, arg_4)"} +{"_id": "doc_8232", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"\n Given an API key, a site url and a credentials file path, runs through a prioritized list of credential sources to find credentials.\n\n Specifically, this method ranks credential priority as follows:\n 1. Those passed in as the first two parameters to this method\n 2. Those found in the environment as variables\n 3. Those found in the credentials file at the profile specified\n by the profile environment variable\n 4. Those found in the default stanza in the credentials file\n\n :param api_key: A Citrination API Key or None\n :param site: A Citrination site URL or None\n :param cred_file: The path to a credentials file\n \"\"\"\n arg_4, arg_5 = get_credentials_from_file(arg_2)\n if arg_0 is None:\n arg_0 = os.environ.get(citr_env_vars.CITRINATION_API_KEY)\n if arg_0 is None or len(arg_0) == 0:\n arg_0 = arg_4\n\n if arg_1 is None:\n arg_1 = os.environ.get(citr_env_vars.CITRINATION_SITE)\n if arg_1 is None or len(arg_1) == 0:\n arg_1 = arg_5\n if arg_1 is None:\n arg_1 = \"https://citrination.com\"\n\n return arg_0, arg_1"} +{"_id": "doc_8233", "title": "", "text": "def Func(arg_0, arg_1, arg_2=\".\", arg_3=False):\n \"\"\"\n Returns the number of files matching a pattern in a dataset.\n\n :param dataset_id: The ID of the dataset to search for files.\n :type dataset_id: int\n :param glob: A pattern which will be matched against files in the dataset.\n :type glob: str\n :param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.\n :type is_dir: bool\n :return: The number of matching files\n :rtype: int\n \"\"\"\n arg_4 = arg_0.list_files(arg_1, arg_2, arg_3)\n return len(arg_4)"} +{"_id": "doc_8234", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3 = None):\n \"\"\"\n Retrieves a PIF from a given dataset.\n\n :param dataset_id: The id of the dataset to retrieve PIF from\n :type dataset_id: int\n :param uid: The uid of the PIF to retrieve\n :type uid: str\n :param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched\n :type dataset_version: int\n :return: A :class:`Pif` object\n :rtype: :class:`Pif`\n \"\"\"\n arg_4 = \"An error occurred retrieving PIF {}\".format(arg_2)\n if arg_3 == None:\n arg_5 = arg_0._get(routes.pif_dataset_uid(arg_1, arg_2), arg_4=arg_4)\n else:\n arg_5 = arg_0._get(routes.pif_dataset_version_uid(arg_1, arg_2, arg_3), arg_4=arg_4)\n\n return pif.loads(arg_5.content.decode(\"utf-8\"))"} +{"_id": "doc_8235", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieves the set of columns from the combination of dataset ids given\n\n :param dataset_ids: The id of the dataset to retrieve columns from\n :type dataset_ids: list of int\n :return: A list of column names from the dataset ids given.\n :rtype: list of str\n \"\"\"\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n\n arg_2 = {\n \"dataset_ids\":\n arg_1\n }\n\n arg_3 = \"Failed to get available columns in dataset(s) {}\".format(arg_1)\n\n return arg_0._get_success_json(arg_0._post_json(\n 'v1/datasets/get-available-columns', arg_2, arg_3=arg_3))['data']"} +{"_id": "doc_8236", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Generates a default search templates from the available columns in the dataset ids given.\n\n :param dataset_ids: The id of the dataset to retrieve files from\n :type dataset_ids: list of int\n :return: A search template based on the columns in the datasets given\n \"\"\"\n\n arg_2 = {\n \"dataset_ids\":\n arg_1\n }\n\n arg_3 = \"Failed to generate a search template from columns in dataset(s) {}\".format(arg_1)\n\n return arg_0._get_success_json(arg_0._post_json(\n 'v1/search_templates/builders/from-dataset-ids', arg_2, arg_3=arg_3))['data']"} +{"_id": "doc_8237", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Returns a new search template, but the new template has only the extract_as_keys given.\n\n :param extract_as_keys: List of extract as keys to keep\n :param search_template: The search template to prune\n :return: New search template with pruned columns\n \"\"\"\n\n arg_3 = {\n \"extract_as_keys\":\n arg_1,\n \"search_template\":\n arg_2\n }\n\n arg_4 = \"Failed to prune a search template\"\n\n return arg_0._get_success_json(arg_0._post_json(\n 'v1/search_templates/prune-to-extract-as', arg_3, arg_4=arg_4))['data']"} +{"_id": "doc_8238", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary.\n\n :param obj: Dictionary to convert keys to camel case.\n :return: Dictionary with the input values and all keys in camel case\n \"\"\"\n return dict((to_camel_case(arg_2), arg_3) for (arg_2, arg_3) in arg_1.items())"} +{"_id": "doc_8239", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Runs the template against the validation endpoint, returns a message indicating status of the templte\n\n :param ml_template: Template to Func\n :return: OK or error message if validation failed\n \"\"\"\n\n arg_2 = {\n \"ml_template\":\n arg_1\n }\n\n arg_3 = \"ML template validation invoke failed\"\n\n arg_4 = arg_0._get_success_json(arg_0._post_json(\n 'ml_templates/Func', arg_2, arg_3=arg_3))['data']\n if arg_4['valid']:\n return 'OK'\n return arg_4['reason']"} +{"_id": "doc_8240", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the hamming distance between two hashes\"\"\"\n if len(arg_0) != len(arg_1):\n raise ValueError('Hamming distance requires two strings of equal length')\n\n return sum(map(lambda x: 0 if x[0] == x[1] else 1, zip(arg_0, arg_1)))"} +{"_id": "doc_8241", "title": "", "text": "def Func(arg_0, arg_1=8):\n \"\"\" Compute the average hash of the given image. \"\"\"\n with open(arg_0, 'rb') as f:\n # Open the image, resize it and convert it to black & white.\n arg_2 = Image.open(f).resize((arg_1, arg_1), Image.ANTIALIAS).convert('L')\n arg_3 = list(arg_2.getdata())\n\n arg_4 = sum(arg_3) / len(arg_3)\n\n # Compute the hash based on each pixels value compared to the average.\n arg_5 = \"\".join(map(lambda pixel: '1' if pixel > arg_4 else '0', arg_3))\n arg_6 = \"0{hashlength}x\".format(hashlength=arg_1 ** 2 // 4)\n return int(arg_5, 2).__format__(arg_6)"} +{"_id": "doc_8242", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Set up the Vizio media player platform.\"\"\"\n arg_4 = arg_1.get(CONF_HOST)\n arg_5 = arg_1.get(CONF_ACCESS_TOKEN)\n arg_6 = arg_1.get(CONF_NAME)\n arg_7 = arg_1.get(CONF_VOLUME_STEP)\n arg_8 = arg_1.get(CONF_DEVICE_CLASS)\n arg_9 = VizioDevice(arg_4, arg_5, arg_6, arg_7, arg_8)\n if arg_9.validate_setup() is False:\n _LOGGER.error(\"Failed to set up Vizio platform, \"\n \"please check if host and API key are correct\")\n return\n elif (arg_5 is None or arg_5 == \"\") and arg_8 == \"tv\":\n _LOGGER.error(\"Failed to set up Vizio platform, \"\n \"if device_class is 'tv' then an auth_token needs \"\n \"to be provided, otherwise if device_class is \"\n \"'soundbar' then add the right device_class to config\")\n return\n\n if arg_1.get(CONF_SUPPRESS_WARNING):\n from requests.packages import urllib3\n _LOGGER.warning(\"InsecureRequestWarning is disabled \"\n \"because of Vizio platform configuration\")\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n arg_2([arg_9], True)"} +{"_id": "doc_8243", "title": "", "text": "def Func(arg_0):\n \"\"\"Retrieve latest state of the device.\"\"\"\n arg_1 = arg_0._device.get_power_state()\n\n if arg_1:\n arg_0._state = STATE_ON\n\n arg_3 = arg_0._device.get_current_volume()\n if arg_3 is not None:\n arg_0._volume_level = float(arg_3) / arg_0._max_volume\n\n arg_5 = arg_0._device.get_current_input()\n if arg_5 is not None:\n arg_0._current_input = arg_5.meta_name\n\n arg_7 = arg_0._device.get_inputs()\n if arg_7 is not None:\n arg_0._available_inputs = [arg_5.name for arg_5 in arg_7]\n\n else:\n if arg_1 is None:\n arg_0._state = None\n else:\n arg_0._state = STATE_OFF\n\n arg_0._volume_level = None\n arg_0._current_input = None\n arg_0._available_inputs = None"} +{"_id": "doc_8244", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Mute the volume.\"\"\"\n if arg_1:\n arg_0._device.mute_on()\n else:\n arg_0._device.mute_off()"} +{"_id": "doc_8245", "title": "", "text": "def Func(arg_0):\n \"\"\"Increasing volume of the device.\"\"\"\n arg_0._volume_level += arg_0._volume_step / arg_0._max_volume\n arg_0._device.vol_up(num=arg_0._volume_step)"} +{"_id": "doc_8246", "title": "", "text": "def Func(arg_0):\n \"\"\"Decreasing volume of the device.\"\"\"\n arg_0._volume_level -= arg_0._volume_step / arg_0._max_volume\n arg_0._device.vol_down(num=arg_0._volume_step)"} +{"_id": "doc_8247", "title": "", "text": "def Func(arg_0):\n '''Restores the starting position.'''\n arg_0.piece_bb = [\n BB_VOID, # NONE\n BB_RANK_C | BB_RANK_G, # PAWN\n BB_A1 | BB_I1 | BB_A9 | BB_I9, # LANCE\n BB_A2 | BB_A8 | BB_I2 | BB_I8, # KNIGHT\n BB_A3 | BB_A7 | BB_I3 | BB_I7, # SILVER\n BB_A4 | BB_A6 | BB_I4 | BB_I6, # GOLD\n BB_B2 | BB_H8, # BISHOP\n BB_B8 | BB_H2, # ROOK\n BB_A5 | BB_I5, # KING\n BB_VOID, # PROM_PAWN\n BB_VOID, # PROM_LANCE\n BB_VOID, # PROM_KNIGHT\n BB_VOID, # PROM_SILVER\n BB_VOID, # PROM_BISHOP\n BB_VOID, # PROM_ROOK\n ]\n\n arg_0.pieces_in_hand = [collections.Counter(), collections.Counter()]\n\n arg_0.occupied = Occupied(BB_RANK_G | BB_H2 | BB_H8 | BB_RANK_I, BB_RANK_A | BB_B2 | BB_B8 | BB_RANK_C)\n\n arg_0.king_squares = [I5, A5]\n arg_0.pieces = [NONE for arg_6 in SQUARES]\n\n for arg_6 in SQUARES:\n arg_7 = BB_SQUARES[arg_6]\n for arg_8 in PIECE_TYPES:\n if arg_7 & arg_0.piece_bb[arg_8]:\n arg_0.pieces[arg_6] = arg_8\n\n arg_0.turn = BLACK\n arg_0.move_number = 1\n arg_0.captured_piece_stack = collections.deque()\n arg_0.move_stack = collections.deque()\n arg_0.incremental_zobrist_hash = arg_0.board_zobrist_hash(DEFAULT_RANDOM_ARRAY)\n arg_0.transpositions = collections.Counter((arg_0.zobrist_hash(), ))"} +{"_id": "doc_8248", "title": "", "text": "def Func(arg_0, arg_1):\n '''Gets the piece at the given square.'''\n arg_2 = BB_SQUARES[arg_1]\n arg_3 = int(bool(arg_0.occupied[WHITE] & arg_2))\n\n arg_4 = arg_0.piece_type_at(arg_1)\n if arg_4:\n return Piece(arg_4, arg_3)"} +{"_id": "doc_8249", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''Removes a piece from the given square if present.'''\n arg_3 = arg_0.piece_type_at(arg_1)\n\n if arg_3 == NONE:\n return\n\n if arg_2:\n arg_0.add_piece_into_hand(arg_3, arg_0.turn)\n\n arg_4 = BB_SQUARES[arg_1]\n\n arg_0.piece_bb[arg_3] ^= arg_4\n\n arg_5 = int(bool(arg_0.occupied[WHITE] & arg_4))\n\n arg_0.pieces[arg_1] = NONE\n arg_0.occupied.ixor(arg_4, arg_5, arg_1)\n\n # Update incremental zobrist hash.\n if arg_5 == BLACK:\n arg_7 = (arg_3 - 1) * 2\n else:\n arg_7 = (arg_3 - 1) * 2 + 1\n arg_0.incremental_zobrist_hash ^= DEFAULT_RANDOM_ARRAY[81 * arg_7 + 9 * rank_index(arg_1) + file_index(arg_1)]"} +{"_id": "doc_8250", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=False):\n '''Sets a piece at the given square. An existing piece is replaced.'''\n if arg_3:\n arg_0.remove_piece_from_hand(arg_2.piece_type, arg_0.turn)\n\n arg_0.remove_piece_at(arg_1, arg_4)\n\n arg_0.pieces[arg_1] = arg_2.piece_type\n\n arg_6 = BB_SQUARES[arg_1]\n\n arg_7 = arg_2.piece_type\n\n arg_0.piece_bb[arg_7] |= arg_6\n\n if arg_7 == KING:\n arg_0.king_squares[arg_2.color] = arg_1\n\n arg_0.occupied.ixor(arg_6, arg_2.color, arg_1)\n\n # Update incremental zorbist hash.\n if arg_2.color == BLACK:\n arg_10 = (arg_2.piece_type - 1) * 2\n else:\n arg_10 = (arg_2.piece_type - 1) * 2 + 1\n arg_0.incremental_zobrist_hash ^= DEFAULT_RANDOM_ARRAY[81 * arg_10 + 9 * rank_index(arg_1) + file_index(arg_1)]"} +{"_id": "doc_8251", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Checks if the given move would move would leave the king in check or\n put it into check.\n '''\n\n arg_0.push(arg_1)\n arg_2 = arg_0.was_suicide()\n arg_3 = arg_0.was_check_by_dropping_pawn(arg_1)\n arg_0.pop()\n return arg_2 or arg_3"} +{"_id": "doc_8252", "title": "", "text": "def Func(arg_0):\n '''\n Checks if the king of the other side is attacked. Such a position is not\n valid and could only be reached by an illegal move.\n '''\n return arg_0.is_attacked_by(arg_0.turn, arg_0.king_squares[arg_0.turn ^ 1])"} +{"_id": "doc_8253", "title": "", "text": "def Func(arg_0):\n '''Checks if the current position is a checkmate.'''\n if not arg_0.is_check():\n return False\n\n try:\n next(arg_0.generate_legal_moves().__iter__())\n return False\n except StopIteration:\n return True"} +{"_id": "doc_8254", "title": "", "text": "def Func(arg_0):\n '''\n a game is ended if a position occurs for the fourth time\n on consecutive alternating moves.\n '''\n arg_1 = arg_0.zobrist_hash()\n\n # A minimum amount of moves must have been played and the position\n # in question must have appeared at least four times.\n if arg_0.transpositions[arg_1] < 4:\n return False\n\n return True"} +{"_id": "doc_8255", "title": "", "text": "def Func(arg_0):\n '''\n Restores the previous position and returns the last move from the stack.\n '''\n arg_1 = arg_0.move_stack.Func()\n\n # Update transposition table.\n arg_0.transpositions.subtract((arg_0.zobrist_hash(), ))\n\n # Decrement move number.\n arg_0.move_number -= 1\n\n # Restore state.\n arg_2 = arg_0.captured_piece_stack.Func()\n arg_3 = arg_0.turn\n\n # On a null move simply swap the turn.\n if not arg_1:\n arg_0.turn ^= 1\n return arg_1\n\n # Restore the source square.\n arg_4 = arg_0.piece_type_at(arg_1.to_square)\n if arg_1.promotion:\n arg_4 = PIECE_PROMOTED.index(arg_4)\n\n if arg_1.from_square is None:\n arg_0.add_piece_into_hand(arg_4, arg_0.turn ^ 1)\n else:\n arg_0.set_piece_at(arg_1.from_square, Piece(arg_4, arg_0.turn ^ 1))\n\n # Restore target square.\n if arg_2:\n arg_0.remove_piece_from_hand(arg_2, arg_3 ^ 1)\n arg_0.set_piece_at(arg_1.to_square, Piece(arg_2, arg_3))\n else:\n arg_0.remove_piece_at(arg_1.to_square)\n\n # Swap turn.\n arg_0.turn ^= 1\n\n return arg_1"} +{"_id": "doc_8256", "title": "", "text": "def Func(arg_0):\n '''\n Gets an SFEN representation of the current position.\n '''\n Func = []\n arg_2 = 0\n\n # Position part.\n for arg_3 in SQUARES:\n arg_4 = arg_0.piece_at(arg_3)\n\n if not arg_4:\n arg_2 += 1\n else:\n if arg_2:\n Func.append(str(arg_2))\n arg_2 = 0\n Func.append(arg_4.symbol())\n\n if BB_SQUARES[arg_3] & BB_FILE_1:\n if arg_2:\n Func.append(str(arg_2))\n arg_2 = 0\n\n if arg_3 != I1:\n Func.append('/')\n\n Func.append(' ')\n\n # Side to move.\n if arg_0.turn == WHITE:\n Func.append('w')\n else:\n Func.append('b')\n\n Func.append(' ')\n\n # Pieces in hand\n arg_5 = 0\n for arg_6 in COLORS:\n arg_7 = arg_0.pieces_in_hand[arg_6]\n arg_5 += len(arg_7)\n for arg_8 in sorted(arg_7.keys(), reverse=True):\n if arg_7[arg_8] >= 1:\n if arg_7[arg_8] > 1:\n Func.append(str(arg_7[arg_8]))\n arg_4 = Piece(arg_8, arg_6)\n Func.append(arg_4.symbol())\n if arg_5 == 0:\n Func.append('-')\n\n Func.append(' ')\n\n # Move count\n Func.append(str(arg_0.move_number))\n\n return ''.join(Func)"} +{"_id": "doc_8257", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Parses a move in standard coordinate notation, makes the move and puts\n it on the the move stack.\n Raises `ValueError` if neither legal nor a null move.\n Returns the move.\n '''\n arg_2 = Move.from_usi(arg_1)\n arg_0.push(arg_2)\n return arg_2"} +{"_id": "doc_8258", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''\n Returns a Zobrist hash of the current position.\n '''\n # Hash in the board setup.\n Func = arg_0.board_zobrist_hash(arg_1)\n\n if arg_1 is None:\n arg_1 = DEFAULT_RANDOM_ARRAY\n\n if arg_0.turn == WHITE:\n Func ^= arg_1[2268]\n\n # pieces in hand pattern is\n # 19 * 5 * 5 * 5 * 5 * 3 * 3 = 106875 < pow(2, 17)\n # just checking black side is okay in normal state\n arg_3 = (\n arg_0.pieces_in_hand[BLACK][ROOK] * 35625 +\n arg_0.pieces_in_hand[BLACK][BISHOP] * 11875 +\n arg_0.pieces_in_hand[BLACK][GOLD] * 2375 +\n arg_0.pieces_in_hand[BLACK][SILVER] * 475 +\n arg_0.pieces_in_hand[BLACK][KNIGHT] * 95 +\n arg_0.pieces_in_hand[BLACK][LANCE] * 19 +\n arg_0.pieces_in_hand[BLACK][PAWN])\n arg_4 = bit_scan(arg_3)\n while arg_4 != -1 and arg_4 is not None:\n Func ^= arg_1[2269 + arg_4]\n arg_4 = bit_scan(arg_3, arg_4 + 1)\n\n return Func"} +{"_id": "doc_8259", "title": "", "text": "def Func(arg_0):\n '''\n Gets the Func `p`, `l`, `n`, etc.\n '''\n if arg_0.color == BLACK:\n return PIECE_SYMBOLS[arg_0.piece_type].upper()\n else:\n return PIECE_SYMBOLS[arg_0.piece_type]"} +{"_id": "doc_8260", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Creates a piece instance from a piece symbol.\n Raises `ValueError` if the symbol is invalid.\n '''\n if arg_1.lower() == arg_1:\n return arg_0(PIECE_SYMBOLS.index(arg_1), WHITE)\n else:\n return arg_0(PIECE_SYMBOLS.index(arg_1.lower()), BLACK)"} +{"_id": "doc_8261", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Parses an USI string.\n Raises `ValueError` if the USI string is invalid.\n '''\n if arg_1 == '0000':\n return arg_0.null()\n elif len(arg_1) == 4:\n if arg_1[1] == '*':\n arg_2 = Piece.from_symbol(arg_1[0])\n return arg_0(None, SQUARE_NAMES.index(arg_1[2:4]), False, arg_2.piece_type)\n else:\n return arg_0(SQUARE_NAMES.index(arg_1[0:2]), SQUARE_NAMES.index(arg_1[2:4]))\n elif len(arg_1) == 5 and arg_1[4] == '+':\n return arg_0(SQUARE_NAMES.index(arg_1[0:2]), SQUARE_NAMES.index(arg_1[2:4]), True)\n else:\n raise ValueError('expected usi string to be of length 4 or 5')"} +{"_id": "doc_8262", "title": "", "text": "def Func(arg_0):\n '''Accept a string and parse it into many commits.\n Parse and yield each commit-dictionary.\n This function is a generator.\n '''\n arg_1 = RE_COMMIT.finditer(arg_0)\n for arg_2 in arg_1:\n arg_3 = arg_2.groups()[0]\n arg_4 = RE_COMMIT.match(arg_3).groupdict()\n arg_5 = parse_commit(arg_4)\n yield arg_5"} +{"_id": "doc_8263", "title": "", "text": "def Func(arg_0):\n '''Accept a parsed single commit. Some of the named groups\n require further processing, so parse those groups.\n Return a dictionary representing the completely parsed\n commit.\n '''\n arg_1 = {}\n arg_1['commit'] = arg_0['commit']\n arg_1['tree'] = arg_0['tree']\n arg_2 = arg_0['parents']\n arg_1['parents'] = [\n parse_parent_line(parentline)\n for parentline in\n arg_2.splitlines()\n ]\n arg_1['author'] = parse_author_line(arg_0['author'])\n arg_1['committer'] = Functer_line(arg_0['committer'])\n arg_3 = [\n parse_message_line(msgline)\n for msgline in\n arg_0['message'].split(\"\\n\")\n ]\n arg_1['message'] = \"\\n\".join(\n msgline\n for msgline in\n arg_3\n if msgline is not None\n )\n arg_1['changes'] = [\n parse_numstat_line(numstat)\n for numstat in\n arg_0['numstats'].splitlines()\n ]\n return arg_1"} +{"_id": "doc_8264", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Adds a organization-course link to the system\n \"\"\"\n _validate_course_key(arg_1)\n _validate_organization_data(arg_0)\n data.create_organization_course(\n organization=arg_0,\n arg_1=arg_1\n )"} +{"_id": "doc_8265", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Course key object validation\n \"\"\"\n if arg_0 is None:\n return False\n try:\n CourseKey.from_string(text_type(arg_0))\n except (InvalidKeyError, UnicodeDecodeError):\n return False\n return True"} +{"_id": "doc_8266", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Inactivates an activated organization as well as any active relationships\n \"\"\"\n [Func_course_relationship(arg_1) for arg_1\n in internal.OrganizationCourse.objects.filter(organization_id=arg_0.id, active=True)]\n\n [_inactivate_record(arg_1) for arg_1\n in internal.Organization.objects.filter(id=arg_0.id, active=True)]"} +{"_id": "doc_8267", "title": "", "text": "def Func(arg_0): # pylint: disable=invalid-name\n \"\"\"\n Activates an inactive organization-course relationship\n \"\"\"\n # If the relationship doesn't exist or the organization isn't active we'll want to raise an error\n arg_0 = internal.OrganizationCourse.objects.get(\n id=arg_0.id,\n active=False,\n organization__active=True\n )\n _activate_record(arg_0)"} +{"_id": "doc_8268", "title": "", "text": "def Func(arg_0): # pylint: disable=invalid-name\n \"\"\"\n Inactivates an active organization-course relationship\n \"\"\"\n arg_0 = internal.OrganizationCourse.objects.get(\n id=arg_0.id,\n active=True\n )\n _inactivate_record(arg_0)"} +{"_id": "doc_8269", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the set of courses currently linked to the specified organization\n \"\"\"\n arg_1 = serializers.deserialize_organization(arg_0)\n arg_2 = internal.OrganizationCourse.objects.filter(\n arg_0=arg_1,\n active=True\n ).select_related('organization')\n return [serializers.serialize_organization_with_course(arg_0) for arg_0 in arg_2]"} +{"_id": "doc_8270", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the organizations linked to the specified course\n \"\"\"\n arg_1 = internal.OrganizationCourse.objects.filter(\n course_id=text_type(arg_0),\n active=True\n ).select_related('organization')\n return [serializers.serialize_organization_with_course(arg_2) for arg_2 in arg_1]"} +{"_id": "doc_8271", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Organization dict-to-object serialization\n \"\"\"\n return models.Organization(\n id=arg_0.get('id'),\n name=arg_0.get('name', ''),\n short_name=arg_0.get('short_name', ''),\n description=arg_0.get('description', ''),\n logo=arg_0.get('logo', '')\n )"} +{"_id": "doc_8272", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_3[arg_4]):\n \"\"\"Load's config then runs Django's execute_from_command_line\"\"\"\n with load_config_from_cli(arg_0, arg_2) as args:\n from django.core.management import execute_from_command_line\n execute_from_command_line(args)"} +{"_id": "doc_8273", "title": "", "text": "def Func(arg_0: arg_1.ArgumentParser, arg_3: arg_4):\n \"\"\"Adds argument for config to existing argparser\"\"\"\n arg_5 = \"Config file.\"\n if arg_3.file_env_var:\n arg_5 += (\" Can also be configured via the \"\n \"environment variable: {}\".format(arg_3.file_env_var))\n if arg_3.default_files:\n arg_5 += (\" Defaults to the first file that exists from \"\n \"[{}].\".format(', '.join(arg_3.default_files)))\n arg_0.add_argument('-C', '--config', metavar='FILE', arg_5=arg_5)"} +{"_id": "doc_8274", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = None):\n \"\"\"Find config file and set values\"\"\"\n if arg_1:\n arg_0.config_file = _find_file(arg_1)\n else:\n if arg_0.file_env_var and arg_0.file_env_var in os.environ:\n arg_0.config_file = _find_file(os.environ[arg_0.file_env_var])\n if not arg_0.config_file:\n for arg_1 in arg_0.default_files:\n arg_0.config_file = _find_file(arg_1, require=False)\n if arg_0.config_file:\n break\n if arg_0.config_file:\n arg_4 = _Func_config(arg_0.config_file)\n log.info(\"Loading config from %s\", arg_0.config_file)\n else:\n arg_4 = {}\n log.info(\"No config file specified. \"\n \"Loading with environment variables.\")\n arg_0.set_values(arg_4)"} +{"_id": "doc_8275", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Dumps initial config in YAML\n \"\"\"\n import ruamel.yaml\n arg_2 = ruamel.yaml.YAML()\n arg_3 = StringIO()\n arg_2.dump(arg_0.get_initial(**arg_1), stream=arg_3)\n arg_3.seek(0)\n arg_4 = arg_2.load(arg_3)\n if arg_0.__doc__:\n arg_4.yaml_set_start_comment(\n '\\n' + arg_0.__doc__ + '\\n\\n')\n for arg_5 in arg_4.keys():\n if arg_0._values[arg_5].help:\n arg_4.yaml_set_comment_before_after_key(\n arg_5, before='\\n' + arg_0._values[arg_5].help)\n arg_3 = StringIO()\n arg_2.dump(arg_4, arg_3)\n arg_3.seek(0)\n return arg_3.read()"} +{"_id": "doc_8276", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Documents values in markdown\n \"\"\"\n arg_1 = []\n if arg_0.__doc__:\n arg_1.extend(['# {}'.format(arg_0.__doc__), ''])\n for arg_2, arg_3 in arg_0._values.items():\n arg_1.append('* **{}** '.format(arg_2))\n if arg_3.required:\n arg_1[-1] = arg_1[-1] + '_REQUIRED_ '\n if arg_3.help:\n arg_1.append(' {} '.format(arg_3.help))\n arg_1.append(' type: `{}` '.format(arg_3.cast_as.__name__))\n if arg_3.default is not None:\n arg_1.append(' default: `{}` '.format(arg_3.default))\n return '\\n'.join(arg_1)"} +{"_id": "doc_8277", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"converts string to type requested by `Func_as`\"\"\"\n try:\n return getattr(arg_0, 'Func_as_{}'.format(\n arg_0.Func_as.__name__.lower()))(arg_1)\n except AttributeError:\n return arg_0.Func_as(arg_1)"} +{"_id": "doc_8278", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n loop through all the images and find the ones\n that have the best bytez to even make them a candidate\n \"\"\"\n arg_2 = 0\n arg_3 = 15728640\n arg_4 = []\n for arg_5 in arg_1:\n if arg_2 > 30:\n return arg_4\n arg_6 = arg_0.parser.getAttribute(arg_5, attr='src')\n arg_6 = arg_0.build_image_path(arg_6)\n arg_6 = arg_0.add_schema_if_none(arg_6)\n arg_7 = arg_0.get_local_image(arg_6)\n if arg_7:\n arg_8 = arg_7.bytes\n if (arg_8 == 0 or arg_8 > arg_0.images_min_bytes) and arg_8 < arg_3:\n arg_4.append(arg_5)\n else:\n arg_1.remove(arg_5)\n arg_2 += 1\n return arg_4 if len(arg_4) > 0 else None"} +{"_id": "doc_8279", "title": "", "text": "def Func(arg_0):\n \"\"\"\\\n checks to see if we were able to\n find open link_src on this page\n \"\"\"\n arg_1 = arg_0.article.raw_doc\n arg_2 = arg_0.parser.getElementsByTag(arg_1, tag='link', attr='rel', value='image_src')\n for arg_3 in arg_2:\n arg_4 = arg_0.parser.getAttribute(arg_3, attr='href')\n if arg_4:\n return arg_0.get_image(arg_4, extraction_type='linktag')\n return None"} +{"_id": "doc_8280", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n returns the bytes of the image file on disk\n \"\"\"\n return ImageUtils.store_image(arg_0.fetcher, arg_0.article.link_hash, arg_1, arg_0.config)"} +{"_id": "doc_8281", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create a video object from a video embed\n \"\"\"\n arg_2 = Video()\n arg_2._embed_code = arg_0.get_embed_code(arg_1)\n arg_2._embed_type = arg_0.get_embed_type(arg_1)\n arg_2._width = arg_0.get_width(arg_1)\n arg_2._height = arg_0.get_height(arg_1)\n arg_2._src = arg_0.get_src(arg_1)\n arg_2._provider = arg_0.get_provider(arg_2.src)\n return arg_2"} +{"_id": "doc_8282", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n adds any siblings that may have a decent score to this node\n \"\"\"\n if arg_1.tag == 'p' and arg_0.parser.getText(arg_1):\n arg_3 = arg_1\n if arg_3.tail:\n arg_3 = deepcopy(arg_3)\n arg_3.tail = ''\n return [arg_3]\n else:\n arg_5 = arg_0.parser.getElementsByTag(arg_1, tag='p')\n if arg_5 is None:\n return None\n\n arg_6 = list()\n for arg_7 in arg_5:\n arg_8 = arg_0.parser.getText(arg_7)\n if arg_8: # no len(text) > 0\n arg_9 = arg_0.stopwords_class(language=arg_0.get_language()).get_stopword_count(arg_8)\n arg_10 = arg_9.get_stopword_count()\n arg_11 = float(.30)\n arg_12 = arg_0.is_highlink_density(arg_7)\n arg_13 = float(arg_2 * arg_11)\n if arg_13 < arg_10 and not arg_12:\n arg_14 = arg_0.parser.createElement(tag='p', arg_8=arg_8, arg_4=None)\n arg_6.append(arg_14)\n return arg_6"} +{"_id": "doc_8283", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\\\n returns a list of nodes we want to search\n on like paragraphs and tables\n \"\"\"\n Func = []\n\n for arg_3 in arg_1:\n for arg_4 in ['p', 'pre', 'td']:\n arg_5 = arg_0.parser.getElementsByTag(arg_3, arg_4=arg_4)\n Func += arg_5\n return Func"} +{"_id": "doc_8284", "title": "", "text": "def Func(arg_0):\n \"\"\"\\\n remove any divs that looks like non-content,\n clusters of links, or paras with no gusto\n \"\"\"\n arg_1 = ['p']\n if arg_0.config.parse_lists:\n arg_1.extend(['ul', 'ol'])\n if arg_0.config.parse_headers:\n arg_1.extend(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])\n\n arg_2 = arg_0.article.top_node\n arg_3 = arg_0.add_siblings(arg_2)\n for arg_4 in arg_0.parser.getChildren(arg_3):\n arg_5 = arg_0.parser.getTag(arg_4)\n if arg_5 not in arg_1:\n if (arg_0.is_highlink_density(arg_4) or arg_0.is_table_and_no_para_exist(arg_4) or\n not arg_0.is_nodescore_threshold_met(arg_3, arg_4)):\n arg_0.parser.remove(arg_4)\n return arg_3"} +{"_id": "doc_8285", "title": "", "text": "def Func(arg_0):\n \"\"\"\\\n Fetch the article title and analyze it\n \"\"\"\n arg_1 = ''\n\n # rely on opengraph in case we have the data\n if \"title\" in list(arg_0.article.opengraph.keys()):\n return arg_0.clean_title(arg_0.article.opengraph['title'])\n elif arg_0.article.schema and \"headline\" in arg_0.article.schema:\n return arg_0.clean_title(arg_0.article.schema['headline'])\n\n # try to fetch the meta headline\n arg_2 = arg_0.parser.getElementsByTag(arg_0.article.doc,\n tag=\"meta\",\n attr=\"name\",\n value=\"headline\")\n if arg_2 is not None and len(arg_2) > 0:\n arg_1 = arg_0.parser.getAttribute(arg_2[0], 'content')\n return arg_0.clean_title(arg_1)\n\n # otherwise use the title meta\n arg_3 = arg_0.parser.getElementsByTag(arg_0.article.doc, tag='title')\n if arg_3 is not None and len(arg_3) > 0:\n arg_1 = arg_0.parser.getText(arg_3[0])\n return arg_0.clean_title(arg_1)\n\n return arg_1"} +{"_id": "doc_8286", "title": "", "text": "def Func(arg_0):\n \"\"\"\n if the article has meta canonical link set in the url\n \"\"\"\n if arg_0.article.final_url:\n arg_1 = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}\n arg_2 = arg_0.parser.getElementsByTag(arg_0.article.doc, **arg_1)\n if arg_2 is not None and len(arg_2) > 0:\n arg_3 = arg_0.parser.getAttribute(arg_2[0], 'href')\n if arg_3:\n arg_3 = arg_3.strip()\n arg_4 = urlparse(arg_3)\n if not arg_4.hostname:\n arg_5 = urlparse(arg_0.article.final_url)\n arg_6 = '%s://%s' % (arg_5.scheme, arg_5.hostname)\n arg_3 = urljoin(arg_6, arg_3)\n return arg_3\n return arg_0.article.final_url"} +{"_id": "doc_8287", "title": "", "text": "def Func(arg_0):\n ''' Close the network connection and perform any other required cleanup\n\n Note:\n Auto Funcd when using goose as a context manager or when garbage collected '''\n if arg_0.fetcher is not None:\n arg_0.shutdown_network()\n arg_0.finalizer.atexit = False"} +{"_id": "doc_8288", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n ''' Extract the most likely article content from the html page\n\n Args:\n url (str): URL to pull and parse\n raw_html (str): String representation of the HTML page\n Returns:\n Article: Representation of the article contents \\\n including other parsed and Funced metadata '''\n arg_3 = CrawlCandidate(arg_0.config, arg_1, arg_2)\n return arg_0.__crawl(arg_3)"} +{"_id": "doc_8289", "title": "", "text": "def Func(arg_0, arg_1='utf-8', arg_2=False, arg_3='strict'):\n \"\"\"\n Returns a unicode object representing 's'. Treats bytestrings using the\n 'encoding' codec.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n \"\"\"\n # if isinstance(s, Promise):\n # # The input is the result of a gettext_lazy() call.\n # return s\n return force_unicode(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8290", "title": "", "text": "def Func(arg_0, arg_1='utf-8', arg_2=False, arg_3='strict'):\n \"\"\"\n Returns a bytestring version of 's', encoded as specified in 'encoding'.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n \"\"\"\n if arg_2 and isinstance(arg_0, (type(None), int)):\n return arg_0\n # if isinstance(s, Promise):\n # return unicode(s).encode(encoding, errors)\n if isinstance(arg_0, str):\n try:\n return arg_0.encode(arg_1, arg_3)\n except UnicodeEncodeError:\n return arg_0.encode('utf-8', arg_3)\n elif not isinstance(arg_0, bytes):\n try:\n return str(arg_0).encode(arg_1, arg_3)\n except UnicodeEncodeError:\n if isinstance(arg_0, Exception):\n # An Exception subclass containing non-ASCII data that doesn't\n # know how to print itself properly. We shouldn't raise a\n # further exception.\n return ' '.join([Func(arg_4, arg_1, arg_2,\n arg_3) for arg_4 in arg_0])\n return str(arg_0).encode(arg_1, arg_3)\n else:\n return arg_0"} +{"_id": "doc_8291", "title": "", "text": "def Func(arg_0):\n \"\"\"Add URLs needed to handle image uploads.\"\"\"\n arg_1 = patterns(\n '',\n url(r'^upload/$', arg_0.admin_site.admin_view(arg_0.handle_upload), name='quill-file-upload'),\n )\n return arg_1 + super(QuillAdmin, arg_0).Func()"} +{"_id": "doc_8292", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Handle file uploads from WYSIWYG.\"\"\"\n if arg_1.method != 'POST':\n raise Http404\n\n if arg_1.is_ajax():\n try:\n arg_2 = arg_1.GET['quillUploadFile']\n arg_3 = arg_1\n arg_4 = True\n except KeyError:\n return HttpResponseBadRequest(\"Invalid file upload.\")\n else:\n if len(arg_1.FILES) != 1:\n return HttpResponseBadRequest(\"Can only upload 1 file at a time.\")\n try:\n arg_3 = arg_1.FILES['quillUploadFile']\n arg_2 = arg_3.name\n arg_4 = False\n except KeyError:\n return HttpResponseBadRequest('Missing image `quillUploadFile`.')\n\n arg_5 = save_file(arg_3, arg_2, arg_4, default_storage)\n arg_6 = {}\n arg_6['url'] = arg_5\n\n # Response content type needs to be text/html here or else\n # IE will try to download the file.\n return HttpResponse(json.dumps(arg_6), content_type=\"text/html; charset=utf-8\")"} +{"_id": "doc_8293", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3={}):\n \"\"\"Render the Quill WYSIWYG.\"\"\"\n if arg_2 is None:\n arg_2 = ''\n arg_4 = arg_0.build_attrs(arg_3, arg_1=arg_1)\n arg_5 = apps.get_app_config('quill')\n arg_6 = getattr(arg_5, arg_0.config)\n\n return mark_safe(Func_to_string(arg_6['template'], {\n 'final_attrs': flatatt(arg_4),\n 'value': arg_2,\n 'id': arg_4['id'],\n 'config': arg_0.config,\n }))"} +{"_id": "doc_8294", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Get the form for field.\"\"\"\n arg_2 = {\n 'form_class': RichTextFormField,\n 'config': arg_0.config,\n }\n arg_2.update(arg_1)\n return super(RichTextField, arg_0).Func(**arg_2)"} +{"_id": "doc_8295", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Resize an image for metadata tags, and return an absolute URL to it.\n \"\"\"\n arg_2 = arg_1.get_rendition(filter='original')\n return arg_0.build_absolute_uri(arg_2.url)"} +{"_id": "doc_8296", "title": "", "text": "def Func(arg_0):\n \"\"\"Check if ``mdrun`` finished successfully.\n\n Analyses the output from ``mdrun`` in *logfile*. Right now we are\n simply looking for the line \"Finished mdrun on node\" in the last 1kb of\n the file. (The file must be seeakable.)\n\n :Arguments:\n *logfile* : filename\n Logfile produced by ``mdrun``.\n\n :Returns: ``True`` if all ok, ``False`` if not finished, and\n ``None`` if the *logfile* cannot be opened\n \"\"\"\n if not os.path.exists(arg_0):\n return None\n with open(arg_0, 'rb') as log:\n log.seek(-1024, 2)\n for arg_1 in log:\n arg_1 = arg_1.decode('ASCII')\n if arg_1.startswith(\"Finished mdrun on\"):\n return True\n return False"} +{"_id": "doc_8297", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Launch local smpd.\"\"\"\n arg_2 = ['smpd', '-s']\n logger.info(\"Starting smpd: \"+\" \".join(arg_2))\n arg_3 = subprocess.call(arg_2)\n return arg_3"} +{"_id": "doc_8298", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Find files from a continuation run\"\"\"\n if arg_1.startswith('.'):\n arg_1 = arg_1[1:]\n arg_2 = glob.glob(arg_0+'.'+arg_1) + glob.glob(arg_0+'.part[0-9][0-9][0-9][0-9].'+arg_1)\n arg_2.sort() # at least some rough sorting...\n return arg_2"} +{"_id": "doc_8299", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"Run ``gromacs.grompp`` and return the total charge of the system.\n\n :Arguments:\n The arguments are the ones one would pass to :func:`gromacs.grompp`.\n :Returns:\n The total charge as reported\n\n Some things to keep in mind:\n\n * The stdout output of grompp is only shown when an error occurs. For\n debugging, look at the log file or screen output and try running the\n normal :func:`gromacs.grompp` command and analyze the output if the\n debugging messages are not sufficient.\n\n * Check that ``qtot`` is correct. Because the function is based on pattern\n matching of the informative output of :program:`grompp` it can break when\n the output format changes. This version recognizes lines like ::\n\n ' System has non-zero total charge: -4.000001e+00'\n\n using the regular expression\n :regexp:`System has non-zero total charge: *(?P[-+]?\\d*\\.\\d+([eE][-+]\\d+)?)`.\n\n \"\"\"\n arg_2 = re.compile('System has non-zero total charge: *(?P[-+]?\\d*\\.\\d+([eE][-+]\\d+)?)')\n # make sure to capture ALL output\n arg_1['stdout'] = False\n arg_1['stderr'] = False\n arg_3, arg_4, arg_5 = grompp_warnonly(*arg_0, **arg_1)\n arg_6 = \"\\n\".join([x for x in [arg_4, arg_5] if x is not None])\n if arg_3 != 0:\n # error occured and we want to see the whole output for debugging\n arg_7 = \"Func() failed. See warning and screen output for clues.\"\n logger.error(arg_7)\n import sys\n sys.stderr.write(\"=========== grompp (stdout/stderr) ============\\n\")\n sys.stderr.write(arg_6)\n sys.stderr.write(\"===============================================\\n\")\n sys.stderr.flush()\n raise GromacsError(arg_3, arg_7)\n arg_8 = 0\n for arg_9 in arg_6.split('\\n'):\n arg_10 = arg_2.search(arg_9)\n if arg_10:\n arg_8 = float(arg_10.group('qtot'))\n break\n logger.info(\"system total charge qtot = {qtot!r}\".format(**vars()))\n return arg_8"} +{"_id": "doc_8300", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"Create a processed topology.\n\n The processed (or portable) topology file does not contain any\n ``#include`` statements and hence can be easily copied around. It\n also makes it possible to re-grompp without having any special itp\n files available.\n\n :Arguments:\n *topol*\n topology file\n *struct*\n coordinat (structure) file\n\n :Keywords:\n *processed*\n name of the new topology file; if not set then it is named like\n *topol* but with ``pp_`` prepended\n *includes*\n path or list of paths of directories in which itp files are\n searched for\n *grompp_kwargs**\n other options for :program:`grompp` such as ``maxwarn=2`` can\n also be supplied\n\n :Returns: full path to the processed topology\n \"\"\"\n arg_3, arg_4 = os.path.split(arg_0)\n arg_5 = arg_2.pop('processed', os.path.join(arg_3, 'pp_'+arg_4))\n arg_6, arg_7 = filter_grompp_options(**arg_2)\n arg_7 = add_mdp_includes(arg_0, arg_7)\n with tempfile.NamedTemporaryFile(suffix='.mdp') as mdp:\n mdp.write('; empty mdp file\\ninclude = {include!s}\\n'.format(**arg_7))\n mdp.flush()\n arg_6['p'] = arg_0\n arg_6['pp'] = arg_5\n arg_6['f'] = mdp.name\n arg_6['c'] = arg_1\n arg_6['v'] = False\n try:\n gromacs.grompp(**arg_6)\n finally:\n utilities.unlink_gmx('topol.tpr', 'mdout.mdp')\n return utilities.realpath(arg_5)"} +{"_id": "doc_8301", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Primitive text file stream editor.\n\n This function can be used to edit free-form text files such as the\n topology file. By default it does an **in-place edit** of\n *filename*. If *newname* is supplied then the edited\n file is written to *newname*.\n\n :Arguments:\n *filename*\n input text file\n *substitutions*\n substitution commands (see below for format)\n *newname*\n output filename; if ``None`` then *filename* is changed in\n place [``None``]\n\n *substitutions* is a list of triplets; the first two elements are regular\n expression strings, the last is the substitution value. It mimics\n ``sed`` search and replace. The rules for *substitutions*:\n\n .. productionlist::\n substitutions: \"[\" search_replace_tuple, ... \"]\"\n search_replace_tuple: \"(\" line_match_RE \",\" search_RE \",\" replacement \")\"\n line_match_RE: regular expression that selects the line (uses match)\n search_RE: regular expression that is searched in the line\n replacement: replacement string for search_RE\n\n Running :func:`Func` does pretty much what a simple ::\n\n sed /line_match_RE/s/search_RE/replacement/\n\n with repeated substitution commands does.\n\n Special replacement values:\n - ``None``: the rule is ignored\n - ``False``: the line is deleted (even if other rules match)\n\n .. note::\n\n * No sanity checks are performed and the substitutions must be supplied\n exactly as shown.\n * All substitutions are applied to a line; thus the order of the substitution\n commands may matter when one substitution generates a match for a subsequent rule.\n * If replacement is set to ``None`` then the whole expression is ignored and\n whatever is in the template is used. To unset values you must provided an\n empty string or similar.\n * Delete a matching line if replacement=``False``.\n \"\"\"\n if arg_2 is None:\n arg_2 = arg_0\n\n # No sanity checks (figure out later how to give decent diagnostics).\n # Filter out any rules that have None in replacement.\n arg_3 = [{'lRE': re.compile(str(lRE)),\n 'sRE': re.compile(str(sRE)),\n 'repl': repl}\n for lRE,sRE,repl in arg_1 if repl is not None]\n\n with tempfile.TemporaryFile() as target:\n with open(arg_0, 'rb') as src:\n logger.info(\"editing txt = {0!r} ({1:d} substitutions)\".format(arg_0, len(arg_1)))\n for arg_4 in src:\n arg_4 = arg_4.decode(\"utf-8\")\n arg_5 = True\n for arg_6 in arg_3:\n arg_7 = arg_6['lRE'].match(arg_4)\n if arg_7: # apply substition to this line?\n logger.debug('match: '+arg_4.rstrip())\n if arg_6['repl'] is False: # special rule: delete line\n arg_5 = False\n else: # standard replacement\n arg_4 = arg_6['sRE'].sub(str(arg_6['repl']), arg_4)\n logger.debug('replaced: '+arg_4.rstrip())\n if arg_5:\n target.write(arg_4.encode('utf-8'))\n else:\n logger.debug(\"Deleting line %r\", arg_4)\n\n target.seek(0)\n with open(arg_2, 'wb') as final:\n shutil.copyfileobj(target, final)\n logger.info(\"edited txt = {newname!r}\".format(**vars()))"} +{"_id": "doc_8302", "title": "", "text": "def Func(arg_0):\n \"\"\"Delete all frames.\"\"\"\n for arg_1 in glob.glob(arg_0.frameglob):\n os.unlink(arg_1)"} +{"_id": "doc_8303", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns resid in the Gromacs index by transforming with offset.\"\"\"\n try:\n Func = int(arg_0.offset[arg_1])\n except (TypeError, IndexError):\n Func = arg_1 + arg_0.offset\n except KeyError:\n raise KeyError(\"offset must be a dict that contains the gmx resid for {0:d}\".format(arg_1))\n return Func"} +{"_id": "doc_8304", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3='|', arg_4=False):\n \"\"\"Combine individual groups into a single one and write output.\n\n :Keywords:\n name_all : string\n Name of the Funcd group, ``None`` generates a name. [``None``]\n out_ndx : filename\n Name of the output file that will contain the individual groups\n and the Funcd group. If ``None`` then default from the class\n constructor is used. [``None``]\n operation : character\n Logical operation that is used to generate the Funcd group from\n the individual groups: \"|\" (OR) or \"&\" (AND); if set to ``False``\n then no Funcd group is created and only the individual groups\n are written. [\"|\"]\n defaultgroups : bool\n ``True``: append everything to the default groups produced by\n :program:`make_ndx` (or rather, the groups provided in the ndx file on\n initialization --- if this was ``None`` then these are truly default groups);\n ``False``: only use the generated groups\n\n :Returns:\n ``(Funcdgroup_name, output_ndx)``, a tuple showing the\n actual group name and the name of the file; useful when all names are autogenerated.\n\n .. Warning:: The order of the atom numbers in the Funcd group is\n *not* guaranteed to be the same as the selections on input because\n ``make_ndx`` sorts them ascending. Thus you should be careful when\n using these index files for calculations of angles and dihedrals.\n Use :class:`gromacs.formats.NDX` in these cases.\n\n .. SeeAlso:: :meth:`IndexBuilder.write`.\n \"\"\"\n if not arg_3 in ('|', '&', False):\n raise ValueError(\"Illegal operation {0!r}, only '|' (OR) and '&' (AND) or False allowed.\".format(\n arg_3))\n if arg_1 is None and arg_3:\n arg_1 = arg_0.name_all or arg_3.join(arg_0.indexfiles)\n if arg_2 is None:\n arg_2 = arg_0.output\n\n if arg_4:\n # make a default file (using the original ndx where provided!!)\n arg_5, arg_6 = tempfile.mkstemp(suffix='.ndx', prefix='default__')\n try:\n arg_0.make_ndx(o=arg_6, input=['q'])\n except:\n utilities.unlink_gmx(arg_6)\n raise\n arg_7 = [arg_6]\n else:\n arg_7 = []\n\n arg_7.extend(arg_0.indexfiles.values())\n\n if arg_3:\n # Func multiple selections and name them\n try:\n arg_5, arg_8 = tempfile.mkstemp(suffix='.ndx', prefix='Funcd__')\n # Func all selections by loading ALL temporary index files\n arg_3 = ' '+arg_3.strip()+' '\n arg_9 = [arg_3.join(['\"{0!s}\"'.format(gname) for gname in arg_0.indexfiles]),\n '', 'q']\n arg_10,arg_11,arg_12 = arg_0.make_ndx(n=arg_7, o=arg_8, input=arg_9)\n if arg_0._is_empty_group(arg_11):\n warnings.warn(\"No atoms found for {cmd!r}\".format(**vars()),\n category=BadParameterWarning)\n\n # second pass for naming, sigh (or: use NDX ?)\n arg_13 = parse_ndxlist(arg_11)\n arg_14 = arg_13[-1]\n # name this group\n arg_15 = [\"name {0:d} {1!s}\".format(arg_14['nr'], arg_1), 'q']\n arg_10,arg_11,arg_12 = arg_0.make_ndx(n=arg_8, o=arg_2, input=arg_15)\n # For debugging, look at out and err or set stdout=True, stderr=True\n # TODO: check out if at least 1 atom selected\n ##print \"DEBUG: Func()\"\n ##print out\n finally:\n utilities.unlink_gmx(arg_8)\n if arg_4:\n utilities.unlink_gmx(arg_6)\n else:\n # just write individual groups in one file (name_all --> None)\n arg_10,arg_11,arg_12 = arg_0.make_ndx(n=arg_7, o=arg_2, input=['','q'])\n\n return arg_1, arg_2"} +{"_id": "doc_8305", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"ConFuncenate input index files.\n\n Generate a new index file that contains the default Gromacs index\n groups (if a structure file was defined) and all index groups from the\n input index files.\n\n :Arguments:\n out_ndx : filename\n Name of the output index file; if ``None`` then use the default\n provided to the constructore. [``None``].\n \"\"\"\n if arg_1 is None:\n arg_1 = arg_0.output\n arg_0.make_ndx(o=arg_1, input=['q'])\n return arg_1"} +{"_id": "doc_8306", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Process ``make_ndx`` command and return name and temp index file.\"\"\"\n\n arg_0._command_counter += 1\n if arg_2 is None:\n arg_2 = \"CMD{0:03d}\".format(arg_0._command_counter)\n\n # Need to build it with two make_ndx calls because I cannot reliably\n # name the new group without knowing its number.\n try:\n arg_3, arg_4 = tempfile.mkstemp(suffix='.ndx', prefix='tmp_'+arg_2+'__')\n arg_5 = [arg_1, '', 'q'] # empty command '' necessary to get list\n # This sometimes fails with 'OSError: Broken Pipe' --- hard to debug\n arg_6,arg_7,arg_8 = arg_0.make_ndx(o=arg_4, input=arg_5)\n arg_0.check_output(arg_7, \"No atoms found for selection {command!r}.\".format(**vars()), arg_8=arg_8)\n # For debugging, look at out and err or set stdout=True, stderr=True\n # TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom\n ##print \"DEBUG: Func()\"\n ##print out\n arg_9 = parse_ndxlist(arg_7)\n arg_10 = arg_9[-1]\n # reduce and name this group\n arg_3, arg_11 = tempfile.mkstemp(suffix='.ndx', prefix=arg_2+'__')\n arg_12 = [\"keep {0:d}\".format(arg_10['nr']),\n \"name 0 {0!s}\".format(arg_2), 'q']\n arg_6,arg_7,arg_8 = arg_0.make_ndx(n=arg_4, o=arg_11, input=arg_12)\n finally:\n utilities.unlink_gmx(arg_4)\n\n return arg_2, arg_11"} +{"_id": "doc_8307", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Process a range selection.\n\n (\"S234\", \"A300\", \"CA\") --> selected all CA in this range\n (\"S234\", \"A300\") --> selected all atoms in this range\n\n .. Note:: Ignores residue type, only cares about the resid (but still required)\n \"\"\"\n\n try:\n arg_3, arg_4, arg_5 = arg_1\n except ValueError:\n try:\n arg_3, arg_4 = arg_1\n arg_5 = '*'\n except:\n logger.error(\"%r is not a valid range selection\", arg_1)\n raise\n if arg_2 is None:\n arg_2 = \"{first!s}-{last!s}_{gmx_atomname!s}\".format(**vars())\n\n arg_6 = arg_0._translate_residue(arg_3, default_atomname=arg_5)\n arg_7 = arg_0._translate_residue(arg_4, default_atomname=arg_5)\n\n arg_8 = 'r {0:d} - {1:d} & & a {2!s}'.format(arg_6['resid'], arg_7['resid'], arg_5)\n arg_9 = ['keep 0', 'del 0',\n arg_8,\n 'name 0 {name!s}'.format(**vars()),\n 'q']\n arg_10, arg_11 = tempfile.mkstemp(suffix='.ndx', prefix=arg_2+'__')\n arg_12,arg_13,arg_14 = arg_0.make_ndx(n=arg_0.ndx, o=arg_11, input=arg_9)\n arg_0.check_output(arg_13, \"No atoms found for \"\n \"%(selection)r --> %(_selection)r\" % vars())\n # For debugging, look at out and err or set stdout=True, stderr=True\n ##print \"DEBUG: _process_residue()\"\n ##print out\n\n return arg_2, arg_11"} +{"_id": "doc_8308", "title": "", "text": "def Func(arg_0, arg_1, arg_2='CA'):\n \"\"\"Translate selection for a single res to make_ndx syntax.\"\"\"\n arg_3 = arg_0.RESIDUE.match(arg_1)\n if not arg_3:\n arg_4 = \"Selection {selection!r} is not valid.\".format(**vars())\n logger.error(arg_4)\n raise ValueError(arg_4)\n\n arg_5 = arg_0.gmx_resid(int(arg_3.group('resid'))) # magic offset correction\n arg_6 = arg_3.group('aa')\n if len(arg_6) == 1:\n arg_7 = utilities.convert_aa_code(arg_6) # only works for AA\n else:\n arg_7 = arg_6 # use 3-letter for any resname\n\n arg_8 = arg_3.group('atom')\n if arg_8 is None:\n arg_8 = arg_2\n\n return {'resname':arg_7, 'resid':arg_5, 'atomname':arg_8}"} +{"_id": "doc_8309", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"Simple tests to flag problems with a ``make_ndx`` run.\"\"\"\n if arg_2 is None:\n arg_2 = \"\"\n else:\n arg_2 = '\\n' + arg_2\n def format(arg_4, arg_5=60):\n arg_6 = \"====[ GromacsError (diagnostic output) ]\".ljust(arg_5,\"=\")\n return arg_6 + '\\n' + str(arg_4) + arg_6\n\n arg_7 = True\n if arg_0._is_empty_group(arg_1):\n warnings.warn(\"Selection produced empty group.{message!s}\".format(**vars()), category=GromacsValueWarning)\n arg_7 = False\n if arg_0._has_syntax_error(arg_1):\n arg_7 = False\n arg_8 = format(arg_1)\n raise GromacsError(\"make_ndx encountered a Syntax Error, \"\n \"%(message)s\\noutput:\\n%(out_formatted)s\" % vars())\n if arg_1.strip() == \"\":\n arg_7 = False\n arg_8 = format(arg_3)\n raise GromacsError(\"make_ndx produced no output, \"\n \"%(message)s\\nerror output:\\n%(out_formatted)s\" % vars())\n return arg_7"} +{"_id": "doc_8310", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Write compact xtc that is fitted to the tpr reference structure.\n\n See :func:`gromacs.cbook.trj_fitandcenter` for details and\n description of *kwargs* (including *input*, *input1*, *n* and\n *n1* for how to supply custom index groups). The most important ones are listed\n here but in most cases the defaults should work.\n\n :Keywords:\n *s*\n Input structure (typically the default tpr file but can be set to\n some other file with a different conformation for fitting)\n *n*\n Alternative index file.\n *o*\n Name of the output trajectory.\n *xy* : Boolean\n If ``True`` then only fit in xy-plane (useful for a membrane normal\n to z). The default is ``False``.\n *force*\n - ``True``: overwrite existing trajectories\n - ``False``: throw a IOError exception\n - ``None``: skip existing and log a warning [default]\n\n :Returns:\n dictionary with keys *tpr*, *xtc*, which are the names of the\n the new files\n \"\"\"\n arg_1.setdefault('s', arg_0.tpr)\n arg_1.setdefault('n', arg_0.ndx)\n arg_1['f'] = arg_0.xtc\n arg_1.setdefault('o', arg_0.outfile(arg_0.infix_filename(None, arg_0.xtc, '_centfit', 'xtc')))\n arg_2 = arg_1.pop('force', arg_0.force)\n\n logger.info(\"Centering and fitting trajectory {f!r}...\".format(**arg_1))\n with utilities.in_dir(arg_0.dirname):\n if not arg_0.check_file_exists(arg_1['o'], resolve=\"indicate\", arg_2=arg_2):\n trj_fitandcenter(**arg_1)\n logger.info(\"Centered and fit trajectory: {o!r}.\".format(**arg_1))\n return {'tpr': arg_0.rp(arg_1['s']), 'xtc': arg_0.rp(arg_1['o'])}"} +{"_id": "doc_8311", "title": "", "text": "def Func(arg_0, arg_1=False, **arg_2):\n \"\"\"Write xtc that is Functed to the tpr reference structure.\n\n Runs :class:`gromacs.tools.trjconv` with appropriate arguments\n for Functing. The most important *kwargs* are listed\n here but in most cases the defaults should work.\n\n Note that the default settings do *not* include centering or\n periodic boundary treatment as this often does not work well\n with Functing. It is better to do this as a separate step (see\n :meth:`center_Func` or :func:`gromacs.cbook.trj_Funcandcenter`)\n\n :Keywords:\n *s*\n Input structure (typically the default tpr file but can be set to\n some other file with a different conformation for Functing)\n *n*\n Alternative index file.\n *o*\n Name of the output trajectory. A default name is created.\n If e.g. *dt* = 100 is one of the *kwargs* then the default name includes\n \"_dt100ps\".\n *xy* : boolean\n If ``True`` then only do a rot+trans Func in the xy plane\n (good for membrane simulations); default is ``False``.\n *force*\n ``True``: overwrite existing trajectories\n ``False``: throw a IOError exception\n ``None``: skip existing and log a warning [default]\n *Funcgroup*\n index group to Func on [\"backbone\"]\n\n .. Note:: If keyword *input* is supplied then it will override\n *Funcgroup*; *input* = ``[Funcgroup, outgroup]``\n *kwargs*\n kwargs are passed to :func:`~gromacs.cbook.trj_xyFuncted`\n\n :Returns:\n dictionary with keys *tpr*, *xtc*, which are the names of the\n the new files\n \"\"\"\n arg_2.setdefault('s', arg_0.tpr)\n arg_2.setdefault('n', arg_0.ndx)\n arg_2['f'] = arg_0.xtc\n arg_3 = arg_2.pop('force', arg_0.force)\n if arg_1:\n arg_4 = 'rotxy+transxy'\n arg_2.pop('Func', None)\n arg_5 = '_Funcxy'\n else:\n arg_4 = arg_2.pop('Func', 'rot+trans') # user can use 'progressive', too\n arg_5 = '_Func'\n\n arg_6 = arg_2.get('dt')\n if arg_6:\n arg_5 += '_dt{0:d}ps'.format(int(arg_6)) # dt in ps\n\n arg_2.setdefault('o', arg_0.outfile(arg_0.infix_filename(None, arg_0.xtc, arg_5, 'xtc')))\n arg_7 = arg_2.pop('Funcgroup', 'backbone')\n arg_2.setdefault('input', [arg_7, \"system\"])\n\n if arg_2.get('center', False):\n logger.warn(\"Transformer.Func(): center=%(center)r used: centering should not be combined with Functing.\", arg_2)\n if len(arg_2['inputs']) != 3:\n logger.error(\"If you insist on centering you must provide three groups in the 'input' kwarg: (center, Func, output)\")\n raise ValuError(\"Insufficient index groups for centering,Functing,output\")\n\n logger.info(\"Fitting trajectory %r to with xy=%r...\", arg_2['f'], arg_1)\n logger.info(\"Fitting on index group %(Funcgroup)r\", vars())\n with utilities.in_dir(arg_0.dirname):\n if arg_0.check_file_exists(arg_2['o'], resolve=\"indicate\", arg_3=arg_3):\n logger.warn(\"File %r exists; force regenerating it with force=True.\", arg_2['o'])\n else:\n gromacs.trjconv(Func=arg_4, **arg_2)\n logger.info(\"Fitted trajectory (Funcmode=%s): %r.\", arg_4, arg_2['o'])\n return {'tpr': arg_0.rp(arg_2['s']), 'xtc': arg_0.rp(arg_2['o'])}"} +{"_id": "doc_8312", "title": "", "text": "def Func(arg_0, arg_1='gromacs.log'):\n \"\"\"Create a top level logger.\n\n - The file logger logs everything (including DEBUG).\n - The console logger only logs INFO and above.\n\n Logging to a file and the console.\n \n See http://docs.python.org/library/logging.html?#logging-to-multiple-destinations\n \n The top level logger of the library is named 'gromacs'. Note that\n we are configuring this logger with console output. If the root\n logger also does this then we will get two output lines to the\n console. We'll live with this because this is a simple\n convenience library...\n \"\"\"\n\n arg_2 = logging.getLogger(arg_0)\n\n arg_2.setLevel(logging.DEBUG)\n\n arg_1 = logging.FileHandler(arg_1)\n arg_3 = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n arg_1.setFormatter(arg_3)\n arg_2.addHandler(arg_1)\n\n # define a Handler which writes INFO messages or higher to the sys.stderr\n arg_4 = logging.StreamHandler()\n arg_4.setLevel(logging.INFO)\n # set a format which is simpler for console use\n arg_5 = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n arg_4.setFormatter(arg_5)\n\n arg_2.addHandler(arg_4)\n\n return arg_2"} +{"_id": "doc_8313", "title": "", "text": "def Func():\n \"\"\" Get tool names from all configured groups.\n\n :return: list of tool names\n \"\"\"\n arg_0 = []\n for arg_1 in cfg.get('Gromacs', 'groups').split():\n arg_0.extend(cfg.get('Gromacs', arg_1).split())\n return arg_0"} +{"_id": "doc_8314", "title": "", "text": "def Func(arg_0):\n \"\"\"Dict of variables that we make available as globals in the module.\n\n Can be used as ::\n\n globals().update(GMXConfigParser.configuration) # update configdir, templatesdir ...\n \"\"\"\n Func = {\n 'configfilename': arg_0.filename,\n 'logfilename': arg_0.getpath('Logging', 'logfilename'),\n 'loglevel_console': arg_0.getLogLevel('Logging', 'loglevel_console'),\n 'loglevel_file': arg_0.getLogLevel('Logging', 'loglevel_file'),\n 'configdir': arg_0.getpath('DEFAULT', 'configdir'),\n 'qscriptdir': arg_0.getpath('DEFAULT', 'qscriptdir'),\n 'templatesdir': arg_0.getpath('DEFAULT', 'templatesdir'),\n }\n Func['path'] = [os.path.curdir,\n Func['qscriptdir'],\n Func['templatesdir']]\n return Func"} +{"_id": "doc_8315", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the textual representation of logging level 'option' or the number.\n\n Note that option is always interpreted as an UPPERCASE string\n and hence integer log levels will not be recognized.\n\n .. SeeAlso: :mod:`logging` and :func:`logging.getLevelName`\n \"\"\"\n return logging.getLevelName(arg_0.get(arg_1, arg_2).upper())"} +{"_id": "doc_8316", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Use .collection as extension unless provided\"\"\"\n arg_2, arg_3 = os.path.splitext(arg_1)\n if not arg_3:\n arg_3 = \".collection\"\n return arg_2 + arg_3"} +{"_id": "doc_8317", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Scale dihedral angles\"\"\"\n\n if arg_3 is None:\n arg_3 = []\n arg_4 = []\n for arg_5 in arg_0.dihedrals:\n arg_6 = arg_5.atom1.get_atomtype(), arg_5.atom2.get_atomtype(), arg_5.atom3.get_atomtype(), arg_5.atom4.get_atomtype()\n arg_6 = [a.replace(\"_\", \"\").replace(\"=\",\"\") for a in arg_6]\n\n # special-case: this is a [ dihedral ] override in molecule block, continue and don't match\n if arg_5.gromacs['param'] != []:\n for arg_7 in arg_5.gromacs['param']:\n arg_7['kch'] *= arg_2\n arg_4.append(arg_5)\n continue\n\n for arg_8 in range(32):\n if (arg_8%2==0 ):\n arg_9=arg_6[0]; arg_10=arg_6[1]; arg_11=arg_6[2]; arg_12=arg_6[3]\n else:\n arg_9=arg_6[3]; arg_10=arg_6[2]; arg_11=arg_6[1]; arg_12=arg_6[0]\n\n if((arg_8//2)%2==1): arg_9=\"X\";\n if((arg_8//4)%2==1): arg_10=\"X\";\n if((arg_8//8)%2==1): arg_11=\"X\";\n if((arg_8//16)%2==1): arg_12=\"X\";\n arg_13 = \"{0}-{1}-{2}-{3}-{4}\".format(arg_9, arg_10, arg_11, arg_12, arg_5.gromacs['func'])\n if (arg_13 in arg_1):\n for arg_14, arg_15 in enumerate(arg_1[arg_13]):\n arg_16 = copy.deepcopy(arg_5)\n arg_17 = copy.deepcopy(arg_15.gromacs['param'])\n # Only check the first dihedral in a list\n if not arg_1[arg_13][0].line in arg_3:\n for arg_7 in arg_17: arg_7['kchi'] *= arg_2\n arg_16.gromacs['param'] = arg_17\n #if key == \"CT3-C-NH1-CT1-9\": print i, dt, key\n if arg_14 == 0:\n arg_16.comment = \"; banned lines {0} found={1}\\n\".format(\" \".join(\n map(str, arg_3)), 1 if arg_15.line in arg_3 else 0)\n arg_16.comment += \"; parameters for types {}-{}-{}-{}-9 at LINE({})\\n\".format(\n arg_16.atom1.atomtype, arg_16.atom2.atomtype, arg_16.atom3.atomtype,\n arg_16.atom4.atomtype, arg_15.line).replace(\"_\",\"\")\n arg_20 = \"{}-{}-{}-{}-9\".format(arg_16.atom1.atomtype, arg_16.atom2.atomtype,\n arg_16.atom3.atomtype, arg_16.atom4.atomtype).replace(\"_\",\"\")\n #if name == \"CL-CTL2-CTL2-HAL2-9\": print dihedrals[key], key\n arg_4.append(arg_16)\n break\n\n\n arg_0.dihedrals = arg_4\n #assert(len(mol.dihedrals) == new_dihedrals)\n return arg_0"} +{"_id": "doc_8318", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n \"\"\"Scale improper dihedrals\"\"\"\n if arg_3 is None:\n arg_3 = []\n arg_4 = []\n for arg_5 in arg_0.impropers:\n arg_6 = (arg_5.atom1.get_atomtype(), arg_5.atom2.get_atomtype(),\n arg_5.atom3.get_atomtype(), arg_5.atom4.get_atomtype())\n arg_6 = [a.replace(\"_\", \"\").replace(\"=\", \"\") for a in arg_6]\n\n # special-case: this is a [ dihedral ] override in molecule block, continue and don't match\n if arg_5.gromacs['param'] != []:\n for arg_7 in arg_5.gromacs['param']:\n arg_7['kpsi'] *= arg_2\n arg_4.append(arg_5)\n continue\n\n for arg_8 in range(32):\n if (arg_8%2==0):\n arg_9=arg_6[0]; arg_10=arg_6[1]; arg_11=arg_6[2]; arg_12=arg_6[3];\n else:\n arg_9=arg_6[3]; arg_10=arg_6[2]; arg_11=arg_6[1]; arg_12=arg_6[0];\n if((arg_8//2)%2==1): arg_9=\"X\";\n if((arg_8//4)%2==1): arg_10=\"X\";\n if((arg_8//8)%2==1): arg_11=\"X\";\n if((arg_8//16)%2==1): arg_12=\"X\";\n arg_13 = \"{0}-{1}-{2}-{3}-{4}\".format(arg_9, arg_10, arg_11, arg_12, arg_5.gromacs['func'])\n if (arg_13 in arg_1):\n for arg_14, arg_15 in enumerate(arg_1[arg_13]):\n arg_16 = copy.deepcopy(arg_5)\n arg_17 = copy.deepcopy(arg_15.gromacs['param'])\n # Only check the first dihedral in a list\n if not arg_1[arg_13][0].line in arg_3:\n for arg_7 in arg_17: arg_7['kpsi'] *= arg_2\n arg_16.gromacs['param'] = arg_17\n if arg_14 == 0:\n arg_16.comment = \"; banned lines {0} found={1}\\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\\n\".format(\n \" \".join(map(str, arg_3)),\n 1 if arg_15.line in arg_3 else 0,\n arg_15.atype1, arg_15.atype2, arg_15.atype3, arg_15.atype4, arg_15.line)\n arg_4.append(arg_16)\n break\n #assert(len(mol.impropers) == new_impropers)\n arg_0.impropers = arg_4\n return arg_0"} +{"_id": "doc_8319", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert string x to the most useful type, i.e. int, float or unicode string.\n\n If x is a quoted string (single or double quotes) then the quotes\n are stripped and the enclosed string returned.\n\n .. Note::\n\n Strings will be returned as Unicode strings (using :func:`to_unicode`).\n\n .. versionchanged:: 0.7.0\n removed `encoding keyword argument\n \"\"\"\n arg_0 = to_unicode(arg_0) # make unicode as soon as possible\n try:\n arg_0 = arg_0.strip()\n except AttributeError:\n pass\n arg_1 = re.match(r\"\"\"['\"](?P.*)[\"']$\"\"\", arg_0)\n if arg_1 is None:\n # not a quoted string, try different types\n for arg_2 in int, float, to_unicode: # try them in increasing order of lenience\n try:\n return arg_2(arg_0)\n except ValueError:\n pass\n else:\n # quoted string\n arg_0 = to_unicode(arg_1.group('value'))\n return arg_0"} +{"_id": "doc_8320", "title": "", "text": "def Func(arg_0):\n \"\"\"Return view of the recarray with all int32 cast to int64.\"\"\"\n # build new dtype and replace i4 --> i8\n def promote_i4(arg_1):\n if arg_1[1:] == 'i4':\n arg_1 = arg_1[0]+'i8'\n return arg_1\n\n arg_2 = [(name, promote_i4(arg_1)) for name,arg_1 in arg_0.dtype.descr]\n return arg_0.astype(arg_2)"} +{"_id": "doc_8321", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parse Funcour specification\"\"\"\n arg_2 = arg_0.COLOUR.search(arg_1)\n if not arg_2:\n arg_0.logger.fatal(\"Cannot parse Funcour specification %r.\", arg_1)\n raise ParseError(\"XPM reader: Cannot parse Funcour specification {0!r}.\".format(arg_1))\n arg_3 = arg_2.group('value')\n arg_4 = arg_2.group('symbol')\n arg_0.logger.debug(\"%s: %s %s\\n\", arg_1.strip(), arg_4, arg_3)\n return arg_4, arg_3"} +{"_id": "doc_8322", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Transform arguments and return them as a list suitable for Popen.\"\"\"\n arg_3 = []\n for arg_4,arg_5 in arg_2.items():\n if not arg_4.startswith('-'):\n # heuristic for turning key=val pairs into options\n # (fails for commands such as 'find' -- then just use args)\n if len(arg_4) == 1:\n arg_4 = '-' + arg_4 # POSIX style\n else:\n arg_4 = '--' + arg_4 # GNU option\n if arg_5 is True:\n arg_3.append(arg_4)\n continue\n elif arg_5 is False:\n raise ValueError('A False value is ambiguous for option {0!r}'.format(arg_4))\n\n if arg_4[:2] == '--':\n arg_3.append(arg_4 + '=' + str(arg_5)) # GNU option\n else:\n arg_3.extend((arg_4, str(arg_5))) # POSIX style\n return arg_3 + list(arg_1)"} +{"_id": "doc_8323", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"Print Func; same as using ``?`` in ``ipython``. long=True also gives call signature.\"\"\"\n print(\"\\ncommand: {0!s}\\n\\n\".format(arg_0.command_name))\n print(arg_0.__doc__)\n if arg_1:\n print(\"\\ncall method: command():\\n\")\n print(arg_0.__call__.__doc__)"} +{"_id": "doc_8324", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Add switches as 'options' with value True to the options dict.\"\"\"\n arg_3 = {arg: True for arg in arg_1} # switches are kwargs with value True\n arg_3.update(arg_2)\n return arg_3"} +{"_id": "doc_8325", "title": "", "text": "def Func(arg_0):\n \"\"\"Extract standard gromacs doc\n\n Extract by running the program and chopping the header to keep from\n 'DESCRIPTION' onwards.\n \"\"\"\n if arg_0._doc_cache is not None:\n return arg_0._doc_cache\n\n try:\n logging.disable(logging.CRITICAL)\n arg_1, arg_2, arg_3 = arg_0.run('h', stdout=PIPE, stderr=PIPE, use_input=False)\n except:\n logging.critical(\"Invoking command {0} failed when determining its doc string. Proceed with caution\".format(arg_0.command_name))\n arg_0._doc_cache = \"(No Gromacs documentation available)\"\n return arg_0._doc_cache\n finally:\n # ALWAYS restore logging...\n logging.disable(logging.NOTSET)\n\n # The header is on STDOUT and is ignored. The docs are read from STDERR in GMX 4.\n arg_5 = re.match(arg_0.doc_pattern, arg_3, re.DOTALL)\n\n if arg_5 is None:\n # In GMX 5, the opposite is true (Grrr)\n arg_5 = re.match(arg_0.doc_pattern, arg_2, re.DOTALL)\n if arg_5 is None:\n arg_0._doc_cache = \"(No Gromacs documentation available)\"\n return arg_0._doc_cache\n\n arg_0._doc_cache = arg_5.group('DOCS')\n return arg_0._doc_cache"} +{"_id": "doc_8326", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert input to a numerical type if possible.\n\n 1. A non-string object is returned as it is\n 2. Try conversion to int, float, str.\n \"\"\"\n if type(arg_0) is not str:\n return arg_0\n for arg_1 in int, float, str: # try them in increasing order of lenience\n try:\n arg_0 = [arg_1(i) for i in arg_0.split()]\n if len(arg_0) == 1:\n return arg_0[0]\n else:\n return numpy.array(arg_0)\n except (ValueError, AttributeError):\n pass\n raise ValueError(\"Failed to Func {0!r}\".format(arg_0))"} +{"_id": "doc_8327", "title": "", "text": "def Func(arg_0=None):\n \"\"\"Remove legend for axes or gca.\n\n See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html\n \"\"\"\n from pylab import gca, draw\n if arg_0 is None:\n arg_0 = gca()\n arg_0.legend_ = None\n draw()"} +{"_id": "doc_8328", "title": "", "text": "def Func(arg_0, arg_1, arg_2='exception', arg_3=None):\n \"\"\"If a file exists then continue with the action specified in ``resolve``.\n\n ``resolve`` must be one of\n\n \"ignore\"\n always return ``False``\n \"indicate\"\n return ``True`` if it exists\n \"warn\"\n indicate and issue a :exc:`UserWarning`\n \"exception\"\n raise :exc:`IOError` if it exists\n\n Alternatively, set *force* for the following behaviour (which\n ignores *resolve*):\n\n ``True``\n same as *resolve* = \"ignore\" (will allow overwriting of files)\n ``False``\n same as *resolve* = \"exception\" (will prevent overwriting of files)\n ``None``\n ignored, do whatever *resolve* says\n \"\"\"\n def _warn(arg_4):\n arg_5 = \"File {0!r} already exists.\".format(arg_4)\n logger.warn(arg_5)\n warnings.warn(arg_5)\n return True\n def _raise(arg_4):\n arg_5 = \"File {0!r} already exists.\".format(arg_4)\n logger.error(arg_5)\n raise IOError(errno.EEXIST, arg_4, arg_5)\n arg_6 = {'ignore': lambda arg_4: False, # file exists, but we pretend that it doesn't\n 'indicate': lambda arg_4: True, # yes, file exists\n 'warn': _warn,\n 'warning': _warn,\n 'exception': _raise,\n 'raise': _raise,\n }\n\n if arg_3 is True:\n arg_2 = 'ignore'\n elif arg_3 is False:\n arg_2 = 'exception'\n\n if not os.path.isfile(arg_1):\n return False\n else:\n return arg_6[arg_2](arg_1)"} +{"_id": "doc_8329", "title": "", "text": "def Func():\n \"\"\" Load Gromacs 4.x tools automatically using some heuristic.\n\n Tries to load tools (1) in configured tool groups (2) and fails back to\n automatic detection from ``GMXBIN`` (3) then to a prefilled list.\n\n Also load any extra tool configured in ``~/.gromacswrapper.cfg``\n\n :return: dict mapping tool names to GromacsCommand classes\n \"\"\"\n logger.debug(\"Loading v4 tools...\")\n\n arg_0 = config.get_tool_names()\n\n if len(arg_0) == 0 and 'GMXBIN' in os.environ:\n arg_0 = find_executables(os.environ['GMXBIN'])\n\n if len(arg_0) == 0 or len(arg_0) > len(V4TOOLS) * 4:\n arg_0 = list(V4TOOLS)\n\n arg_0.extend(config.get_extra_tool_names())\n\n arg_1 = {}\n for arg_2 in arg_0:\n arg_3 = make_valid_identifier(arg_2)\n arg_1[arg_3] = tool_factory(arg_3, arg_2, None)\n\n if not arg_1:\n arg_4 = \"Failed to load v4 tools\"\n logger.debug(arg_4)\n raise GromacsToolLoadingError(arg_4)\n logger.debug(\"Loaded {0} v4 tools successfully!\".format(len(arg_1)))\n return arg_1"} +{"_id": "doc_8330", "title": "", "text": "def Func(arg_0, arg_1=arg_2.pi, arg_4=None):\n \"\"\"Create a array which masks jumps >= threshold.\n\n Extra points are inserted between two subsequent values whose\n absolute difference differs by more than threshold (default is\n pi).\n\n Other can be a secondary array which is also masked according to\n *a*.\n\n Returns (*a_masked*, *other_masked*) (where *other_masked* can be\n ``None``)\n \"\"\"\n assert len(arg_0.shape) == 1, \"Only 1D arrays supported\"\n\n if arg_4 is not None and arg_0.shape != arg_4.shape:\n raise ValueError(\"arrays must be of identical shape\")\n\n # jump occurs after the index in break\n arg_5 = arg_2.where(arg_2.abs(arg_2.diff(arg_0)) >= arg_1)[0]\n # insert a blank after\n arg_5 += 1\n\n # is this needed?? -- no, but leave it here as a reminder\n #f2 = numpy.diff(a, 2)\n #up = (f2[breaks - 1] >= 0) # >0: up, <0: down\n # sort into up and down breaks:\n #breaks_up = breaks[up]\n #breaks_down = breaks[~up]\n\n # new array b including insertions for all the breaks\n arg_6 = len(arg_5)\n arg_7 = arg_2.empty((len(arg_0) + arg_6))\n # calculate new indices for breaks in b, taking previous insertions into account\n arg_8 = arg_5 + arg_2.arange(arg_6)\n arg_9 = arg_2.zeros_like(arg_7, dtype=arg_2.bool)\n arg_9[arg_8] = True\n arg_7[~arg_9] = arg_0\n arg_7[arg_9] = arg_2.NAN\n\n if arg_4 is not None:\n arg_10 = arg_2.empty_like(arg_7)\n arg_10[~arg_9] = arg_4\n arg_10[arg_9] = arg_2.NAN\n arg_11 = arg_2.ma.array(arg_10, arg_9=arg_9)\n else:\n arg_11 = None\n\n return arg_2.ma.array(arg_7, arg_9=arg_9), arg_11"} +{"_id": "doc_8331", "title": "", "text": "def Func(arg_0, arg_1=100, **arg_2):\n \"\"\"Correlation \"time\" of data.\n\n The 0-th column of the data is interpreted as a time and the\n decay of the data is computed from the autocorrelation\n function (using FFT).\n\n .. SeeAlso:: :func:`numkit.timeseries.tcorrel`\n \"\"\"\n arg_3 = arg_0.array[0,::arg_1]\n arg_4 = gromacs.collections.Collection([numkit.timeseries.tcorrel(arg_3, Y, arg_1=1, **arg_2) for Y in arg_0.array[1:,::arg_1]])\n return arg_4"} +{"_id": "doc_8332", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Set and change the parameters for calculations with correlation functions.\n\n The parameters persist until explicitly changed.\n\n :Keywords:\n *nstep*\n only process every *nstep* data point to speed up the FFT; if\n left empty a default is chosen that produces roughly 25,000 data\n points (or whatever is set in *ncorrel*)\n *ncorrel*\n If no *nstep* is supplied, aim at using *ncorrel* data points for\n the FFT; sets :attr:`XVG.ncorrel` [25000]\n *force*\n force recalculating correlation data even if cached values are\n available\n *kwargs*\n see :func:`numkit.timeseries.tcorrel` for other options\n\n .. SeeAlso: :attr:`XVG.error` for details and references.\n \"\"\"\n arg_0.ncorrel = arg_1.pop('ncorrel', arg_0.ncorrel) or 25000\n arg_3 = arg_1.pop('nstep', None)\n if arg_3 is None:\n # good step size leads to ~25,000 data points\n arg_3 = len(arg_0.array[0])/float(arg_0.ncorrel)\n arg_3 = int(numpy.ceil(arg_3)) # catch small data sets\n arg_1['nstep'] = arg_3\n arg_0.__correlkwargs.update(arg_1) # only contains legal kw for numkit.timeseries.tcorrel or force\n return arg_0.__correlkwargs"} +{"_id": "doc_8333", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Read and cache the file as a numpy array.\n\n Store every *stride* line of data; if ``None`` then the class default is used.\n\n The array is returned with column-first indexing, i.e. for a data file with\n columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .\n \"\"\"\n if arg_1 is None:\n arg_1 = arg_0.stride\n arg_0.corrupted_lineno = []\n arg_3 = 0 # count rows of data\n # cannot use numpy.loadtxt() because xvg can have two types of 'comment' lines\n with utilities.openany(arg_0.real_filename) as xvg:\n arg_4 = []\n arg_5 = None\n for arg_6,arg_7 in enumerate(xvg):\n arg_7 = arg_7.strip()\n if len(arg_7) == 0:\n continue\n if \"label\" in arg_7 and \"xaxis\" in arg_7:\n arg_0.xaxis = arg_7.split('\"')[-2]\n if \"label\" in arg_7 and \"yaxis\" in arg_7:\n arg_0.yaxis = arg_7.split('\"')[-2]\n if arg_7.startswith(\"@ legend\"):\n if not \"legend\" in arg_0.metadata: arg_0.metadata[\"legend\"] = []\n arg_0.metadata[\"legend\"].append(arg_7.split(\"legend \")[-1])\n if arg_7.startswith(\"@ s\") and \"subtitle\" not in arg_7:\n arg_11 = arg_7.split(\"legend \")[-1].replace('\"','').strip()\n arg_0.names.append(arg_11)\n if arg_7.startswith(('#', '@')) :\n continue\n if arg_7.startswith('&'):\n raise NotImplementedError('{0!s}: Multi-data not supported, only simple NXY format.'.format(arg_0.real_filename))\n # Func line as floats\n try:\n arg_12 = [float(el) for el in arg_7.split()]\n except:\n if arg_0.permissive:\n arg_0.logger.warn(\"%s: SKIPPING unparsable line %d: %r\",\n arg_0.real_filename, arg_6+1, arg_7)\n arg_0.corrupted_lineno.append(arg_6+1)\n continue\n arg_0.logger.error(\"%s: Cannot Func line %d: %r\",\n arg_0.real_filename, arg_6+1, arg_7)\n raise\n # check for same number of columns as in previous step\n if arg_5 is not None and len(arg_12) != arg_5:\n if arg_0.permissive:\n arg_0.logger.warn(\"%s: SKIPPING line %d with wrong number of columns: %r\",\n arg_0.real_filename, arg_6+1, arg_7)\n arg_0.corrupted_lineno.append(arg_6+1)\n continue\n arg_13 = \"{0!s}: Wrong number of columns in line {1:d}: {2!r}\".format(arg_0.real_filename, arg_6+1, arg_7)\n arg_0.logger.error(arg_13)\n raise IOError(errno.ENODATA, arg_13, arg_0.real_filename)\n # finally: a good line\n if arg_3 % arg_1 == 0:\n arg_5 = len(arg_12)\n arg_4.append(arg_12)\n arg_3 += 1\n try:\n arg_0.__array = numpy.array(arg_4).transpose() # cache result\n except:\n arg_0.logger.error(\"%s: Failed reading XVG file, possibly data corrupted. \"\n \"Check the last line of the file...\", arg_0.real_filename)\n raise\n finally:\n del arg_4"} +{"_id": "doc_8334", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Plot xvg file data.\n\n The first column of the data is always taken as the abscissa\n X. Additional columns are Functed as ordinates Y1, Y2, ...\n\n In the special case that there is only a single column then this column\n is Functed against the index, i.e. (N, Y).\n\n :Keywords:\n *columns* : list\n Select the columns of the data to be Functed; the list\n is used as a numpy.array extended slice. The default is\n to use all columns. Columns are selected *after* a transform.\n *transform* : function\n function ``transform(array) -> array`` which transforms\n the original array; must return a 2D numpy array of\n shape [X, Y1, Y2, ...] where X, Y1, ... are column\n vectors. By default the transformation is the\n identity [``lambda x: x``].\n *maxpoints* : int\n limit the total number of data points; matFunclib has issues processing\n png files with >100,000 points and pdfs take forever to display. Set to\n ``None`` if really all data should be displayed. At the moment we simply\n decimate the data at regular intervals. [10000]\n *method*\n method to decimate the data to *maxpoints*, see :meth:`XVG.decimate`\n for details\n *color*\n single color (used for all Funcs); sequence of colors\n (will be repeated as necessary); or a matFunclib\n colormap (e.g. \"jet\", see :mod:`matFunclib.cm`). The\n default is to use the :attr:`XVG.default_color_cycle`.\n *ax*\n Func into given axes or create new one if ``None`` [``None``]\n *kwargs*\n All other keyword arguments are passed on to :func:`matFunclib.pyFunc.Func`.\n\n :Returns:\n *ax*\n axes instance\n \"\"\"\n arg_2 = arg_1.pop('columns', Ellipsis) # slice for everything\n arg_3 = arg_1.pop('maxpoints', arg_0.maxpoints_default)\n arg_4 = arg_1.pop('transform', lambda x: x) # default is identity transformation\n arg_5 = arg_1.pop('method', \"mean\")\n arg_6 = arg_1.pop('ax', None)\n\n if arg_2 is Ellipsis or arg_2 is None:\n arg_2 = numpy.arange(arg_0.array.shape[0])\n if len(arg_2) == 0:\n raise MissingDataError(\"Func() needs at least one column of data\")\n\n if len(arg_0.array.shape) == 1 or arg_0.array.shape[0] == 1:\n # special case: Func against index; Func would do this automatically but\n # we'll just produce our own xdata and pretend that this was X all along\n arg_7 = numpy.ravel(arg_0.array)\n arg_8 = numpy.arange(len(arg_7))\n arg_7 = numpy.vstack((arg_8, arg_7))\n arg_2 = [0] + [c+1 for c in arg_2]\n else:\n arg_7 = arg_0.array\n\n arg_9 = arg_1.pop('color', arg_0.default_color_cycle)\n try:\n arg_10 = matFunclib.cm.get_cmap(arg_9)\n arg_11 = arg_10(matFunclib.colors.Normalize()(numpy.arange(len(arg_2[1:]), dtype=float)))\n except TypeError:\n arg_11 = cycle(utilities.asiterable(arg_9))\n\n if arg_6 is None:\n arg_6 = plt.gca()\n\n # (decimate/smooth o slice o transform)(array)\n arg_7 = arg_0.decimate(arg_5, numpy.asarray(arg_4(arg_7))[arg_2], arg_3=arg_3)\n\n # now deal with infs, nans etc AFTER all transformations (needed for Functing across inf/nan)\n arg_12 = numpy.ma.MaskedArray(arg_7, mask=numpy.logical_not(numpy.isfinite(arg_7)))\n\n # finally Func (each column separately to catch empty sets)\n for arg_13, arg_9 in zip(range(1,len(arg_2)), arg_11):\n if len(arg_12[arg_13]) == 0:\n warnings.warn(\"No data to Func for column {column:d}\".format(**vars()), category=MissingDataWarning)\n arg_1['color'] = arg_9\n arg_6.Func(arg_12[0], arg_12[arg_13], **arg_1) # Func all other columns in parallel\n return arg_6"} +{"_id": "doc_8335", "title": "", "text": "def Func(arg_0=arg_1.path.curdir, arg_4=None):\n \"\"\"Find vdwradii.dat and add special entries for lipids.\n\n See :data:`gromacs.setup.vdw_lipid_resnames` for lipid\n resnames. Add more if necessary.\n \"\"\"\n arg_5 = arg_1.path.join(arg_0, \"vdwradii.dat\")\n\n if arg_4 is not None:\n arg_6 = arg_1.path.join(arg_4, 'vdwradii.dat') # canonical name\n if not arg_1.path.exists(arg_6):\n arg_7 = 'No VDW database file found in {filename!r}.'.format(**vars())\n logger.exception(arg_7)\n raise OSError(arg_7, errno.ENOENT)\n else:\n try:\n arg_6 = arg_1.path.join(arg_1.environ['GMXLIB'], 'vdwradii.dat')\n except KeyError:\n try:\n arg_6 = arg_1.path.join(arg_1.environ['GMXDATA'], 'top', 'vdwradii.dat')\n except KeyError:\n arg_7 = \"Cannot find vdwradii.dat. Set GMXLIB (point to 'top') or GMXDATA ('share/gromacs').\"\n logger.exception(arg_7)\n raise OSError(arg_7, errno.ENOENT)\n if not arg_1.path.exists(arg_6):\n arg_7 = \"Cannot find {filename!r}; something is wrong with the Gromacs installation.\".format(**vars())\n logger.exception(arg_7, errno.ENOENT)\n raise OSError(arg_7)\n\n # make sure to catch 3 and 4 letter resnames\n arg_8 = vdw_lipid_resnames + list({x[:3] for x in vdw_lipid_resnames})\n # TODO: should do a tempfile...\n with open(arg_5, 'w') as outfile:\n # write lipid stuff before general\n outfile.write('; Special larger vdw radii for solvating lipid membranes\\n')\n for arg_9 in arg_8:\n for arg_10,arg_11 in vdw_lipid_atom_radii.items():\n outfile.write('{resname:4!s} {atom:<5!s} {radius:5.3f}\\n'.format(**vars()))\n with open(arg_6, 'r') as infile:\n for arg_12 in infile:\n outfile.write(arg_12)\n logger.debug('Created lipid vdW radii file {vdwradii_dat!r}.'.format(**vars()))\n return realpath(arg_5)"} +{"_id": "doc_8336", "title": "", "text": "def Func(arg_0='top/protein.pdb', arg_1='top/system.top',\n arg_2=0.9, arg_3='dodecahedron',\n arg_4=0, arg_5='NA', arg_6='CL',\n arg_7='tip4p', arg_8='SOL', arg_9=False,\n arg_10 = 'main.ndx', arg_11 = '\"Protein\"',\n arg_12='Func',\n **arg_13):\n \"\"\"Put protein into box, add water, add counter-ions.\n\n Currently this really only supports solutes in water. If you need\n to embedd a protein in a membrane then you will require more\n sophisticated approaches.\n\n However, you *can* supply a protein already inserted in a\n bilayer. In this case you will probably want to set *distance* =\n ``None`` and also enable *with_membrane* = ``True`` (using extra\n big vdw radii for typical lipids).\n\n .. Note:: The defaults are suitable for solvating a globular\n protein in a fairly tight (increase *distance*!) dodecahedral\n box.\n\n :Arguments:\n *struct* : filename\n pdb or gro input structure\n *top* : filename\n Gromacs topology\n *distance* : float\n When solvating with water, make the box big enough so that\n at least *distance* nm water are between the solute *struct*\n and the box boundary.\n Set *boxtype* to ``None`` in order to use a box size in the input\n file (gro or pdb).\n *boxtype* or *bt*: string\n Any of the box types supported by :class:`~gromacs.tools.Editconf`\n (triclinic, cubic, dodecahedron, octahedron). Set the box dimensions\n either with *distance* or the *box* and *angle* keywords.\n\n If set to ``None`` it will ignore *distance* and use the box\n inside the *struct* file.\n\n *bt* overrides the value of *boxtype*.\n *box*\n List of three box lengths [A,B,C] that are used by :class:`~gromacs.tools.Editconf`\n in combination with *boxtype* (``bt`` in :program:`editconf`) and *angles*.\n Setting *box* overrides *distance*.\n *angles*\n List of three angles (only necessary for triclinic boxes).\n *concentration* : float\n Concentration of the free ions in mol/l. Note that counter\n ions are added in excess of this concentration.\n *cation* and *anion* : string\n Molecule names of the ions. This depends on the chosen force field.\n *water* : string\n Name of the water model; one of \"spc\", \"spce\", \"tip3p\",\n \"tip4p\". This should be appropriate for the chosen force\n field. If an alternative solvent is required, simply supply the path to a box\n with solvent molecules (used by :func:`~gromacs.genbox`'s *cs* argument)\n and also supply the molecule name via *solvent_name*.\n *solvent_name*\n Name of the molecules that make up the solvent (as set in the itp/top).\n Typically needs to be changed when using non-standard/non-water solvents.\n [\"SOL\"]\n *with_membrane* : bool\n ``True``: use special ``vdwradii.dat`` with 0.1 nm-increased radii on\n lipids. Default is ``False``.\n *ndx* : filename\n How to name the index file that is produced by this function.\n *mainselection* : string\n A string that is fed to :class:`~gromacs.tools.Make_ndx` and\n which should select the solute.\n *dirname* : directory name\n Name of the directory in which all files for the solvation stage are stored.\n *includes*\n List of additional directories to add to the mdp include path\n *kwargs*\n Additional arguments are passed on to\n :class:`~gromacs.tools.Editconf` or are interpreted as parameters to be\n changed in the mdp file.\n\n \"\"\"\n arg_14 = Func_sol(arg_0=arg_0, arg_1=arg_1,\n arg_2=arg_2, arg_3=arg_3,\n arg_7=arg_7, arg_8=arg_8, \n arg_9=arg_9,\n arg_12=arg_12, **arg_13)\n \n arg_15 = Func_ion(arg_0=arg_14['struct'], arg_1=arg_1,\n arg_4=arg_4, arg_5=arg_5, arg_6=arg_6,\n arg_8=arg_8, arg_10=arg_10,\n arg_11=arg_11, arg_12=arg_12,\n **arg_13)\n return arg_15"} +{"_id": "doc_8337", "title": "", "text": "def Func(**arg_0):\n \"\"\"Run multiple energy minimizations one after each other.\n\n :Keywords:\n *integrators*\n list of integrators (from 'l-bfgs', 'cg', 'steep')\n [['bfgs', 'steep']]\n *nsteps*\n list of maximum number of steps; one for each integrator in\n in the *integrators* list [[100,1000]]\n *kwargs*\n mostly passed to :func:`gromacs.setup.energy_minimize`\n\n :Returns: dictionary with paths to final structure ('struct') and\n other files\n\n :Example:\n Conduct three minimizations:\n 1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps\n 2. steepest descent for 200 steps\n 3. finish with BFGS for another 30 steps\n We also do a multi-processor minimization when possible (i.e. for steep\n (and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class\n for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see\n :mod:`gromacs.run` for details)::\n\n import gromacs.run\n gromacs.setup.Func(struct='solvate/ionized.gro',\n mdrunner=gromacs.run.MDrunnerOpenMP64,\n integrators=['l-bfgs', 'steep', 'l-bfgs'],\n nsteps=[50,200, 50])\n\n .. Note:: You might have to prepare the mdp file carefully because at the\n moment one can only modify the *nsteps* parameter on a\n per-minimizer basis.\n \"\"\"\n\n arg_1 = arg_0.pop('mdrunner', None)\n arg_2 = arg_0.pop('integrators', ['l-bfgs', 'steep'])\n arg_0.pop('integrator', None) # clean input; we set intgerator from integrators\n arg_3 = arg_0.pop('nsteps', [100, 1000])\n\n arg_4 = ['em{0:03d}_{1!s}.pdb'.format(arg_6, arg_7) for arg_6,arg_7 in enumerate(arg_2)]\n arg_4[-1] = arg_0.pop('output', 'em.pdb')\n\n arg_5 = {'struct': arg_0.pop('struct', None)} # fake output from energy_minimize()\n\n for arg_6, arg_7 in enumerate(arg_2):\n arg_8 = arg_5['struct']\n logger.info(\"[em %d] energy minimize with %s for maximum %d steps\", arg_6, arg_7, arg_3[arg_6])\n arg_0.update({'struct':arg_8, 'output':arg_4[arg_6],\n 'integrator':arg_7, 'nsteps': arg_3[arg_6]})\n if not arg_7 == 'l-bfgs':\n arg_0['mdrunner'] = arg_1\n else:\n arg_0['mdrunner'] = None\n logger.warning(\"[em %d] Not using mdrunner for L-BFGS because it cannot \"\n \"do parallel runs.\", arg_6)\n\n arg_5 = energy_minimize(**arg_0)\n\n return arg_5"} +{"_id": "doc_8338", "title": "", "text": "def Func(arg_0='MD_POSRES', **arg_1):\n \"\"\"Set up MD with position restraints.\n\n Additional itp files should be in the same directory as the top file.\n\n Many of the keyword arguments below already have sensible values. Note that\n setting *mainselection* = ``None`` will disable many of the automated\n choices and is often recommended when using your own mdp file.\n\n :Keywords:\n *dirname*\n set up under directory dirname [MD_POSRES]\n *struct*\n input structure (gro, pdb, ...) [em/em.pdb]\n *top*\n topology file [top/system.top]\n *mdp*\n mdp file (or use the template) [templates/md.mdp]\n *ndx*\n index file (supply when using a custom mdp)\n *includes*\n additional directories to search for itp files\n *mainselection*\n :program:`make_ndx` selection to select main group [\"Protein\"]\n (If ``None`` then no canonical index file is generated and\n it is the user's responsibility to set *tc_grps*,\n *tau_t*, and *ref_t* as keyword arguments, or provide the mdp template\n with all parameter pre-set in *mdp* and probably also your own *ndx*\n index file.)\n *deffnm*\n default filename for Gromacs run [md]\n *runtime*\n total length of the simulation in ps [1000]\n *dt*\n integration time step in ps [0.002]\n *qscript*\n script to submit to the queuing system; by default\n uses the template :data:`gromacs.config.qscript_template`, which can\n be manually set to another template from :data:`gromacs.config.templates`;\n can also be a list of template names.\n *qname*\n name to be used for the job in the queuing system [PR_GMX]\n *mdrun_opts*\n option flags for the :program:`mdrun` command in the queuing system\n scripts such as \"-stepout 100\". [\"\"]\n *kwargs*\n remaining key/value pairs that should be changed in the template mdp\n file, eg ``nstxtcout=250, nstfout=250`` or command line options for\n ``grompp` such as ``maxwarn=1``.\n\n In particular one can also set **define** and activate\n whichever position restraints have been coded into the itp\n and top file. For instance one could have\n\n *define* = \"-DPOSRES_MainChain -DPOSRES_LIGAND\"\n\n if these preprocessor constructs exist. Note that there\n **must not be any space between \"-D\" and the value.**\n\n By default *define* is set to \"-DPOSRES\".\n\n :Returns: a dict that can be fed into :func:`gromacs.setup.MD`\n (but check, just in case, especially if you want to\n change the ``define`` parameter in the mdp file)\n\n .. Note:: The output frequency is drastically reduced for position\n restraint runs by default. Set the corresponding ``nst*``\n variables if you require more output. The `pressure coupling`_\n option *refcoord_scaling* is set to \"com\" by default (but can\n be changed via *kwargs*) and the pressure coupling\n algorithm itself is set to *Pcoupl* = \"Berendsen\" to\n run a stable simulation.\n\n .. _`pressure coupling`: http://manual.gromacs.org/online/mdp_opt.html#pc\n \"\"\"\n\n logger.info(\"[{dirname!s}] Setting up MD with position restraints...\".format(**vars()))\n arg_1.setdefault('struct', 'em/em.pdb')\n arg_1.setdefault('qname', 'PR_GMX')\n arg_1.setdefault('define', '-DPOSRES')\n # reduce size of output files\n arg_1.setdefault('nstxout', '50000') # trr pos\n arg_1.setdefault('nstvout', '50000') # trr veloc\n arg_1.setdefault('nstfout', '0') # trr forces\n arg_1.setdefault('nstlog', '500') # log file\n arg_1.setdefault('nstenergy', '2500') # edr energy\n arg_1.setdefault('nstxtcout', '5000') # xtc pos\n # try to get good pressure equilibration\n arg_1.setdefault('refcoord_scaling', 'com')\n arg_1.setdefault('Pcoupl', \"Berendsen\")\n\n arg_2 = _setup_MD(arg_0, **arg_1)\n\n # clean up output kwargs\n arg_2.pop('define', None) # but make sure that -DPOSRES does not stay...\n arg_2.pop('refcoord_scaling', None)\n arg_2.pop('Pcoupl', None)\n return arg_2"} +{"_id": "doc_8339", "title": "", "text": "def Func(arg_0='Func', **arg_1):\n \"\"\"Set up equilibrium Func.\n\n Additional itp files should be in the same directory as the top file.\n\n Many of the keyword arguments below already have sensible values. Note that\n setting *mainselection* = ``None`` will disable many of the automated\n choices and is often recommended when using your own mdp file.\n\n :Keywords:\n *dirname*\n set up under directory dirname [Func]\n *struct*\n input structure (gro, pdb, ...) [Func_POSRES/md_posres.pdb]\n *top*\n topology file [top/system.top]\n *mdp*\n mdp file (or use the template) [templates/md.mdp]\n *ndx*\n index file (supply when using a custom mdp)\n *includes*\n additional directories to search for itp files\n *mainselection*\n ``make_ndx`` selection to select main group [\"Protein\"]\n (If ``None`` then no canonical index file is generated and\n it is the user's responsibility to set *tc_grps*,\n *tau_t*, and *ref_t* as keyword arguments, or provide the mdp template\n with all parameter pre-set in *mdp* and probably also your own *ndx*\n index file.)\n *deffnm*\n default filename for Gromacs run [md]\n *runtime*\n total length of the simulation in ps [1000]\n *dt*\n integration time step in ps [0.002]\n *qscript*\n script to submit to the queuing system; by default\n uses the template :data:`gromacs.config.qscript_template`, which can\n be manually set to another template from :data:`gromacs.config.templates`;\n can also be a list of template names.\n *qname*\n name to be used for the job in the queuing system [Func_GMX]\n *mdrun_opts*\n option flags for the :program:`mdrun` command in the queuing system\n scripts such as \"-stepout 100 -dgdl\". [\"\"]\n *kwargs*\n remaining key/value pairs that should be changed in the template mdp\n file, e.g. ``nstxtcout=250, nstfout=250`` or command line options for\n :program`grompp` such as ``maxwarn=1``.\n\n :Returns: a dict that can be fed into :func:`gromacs.setup.Func`\n (but check, just in case, especially if you want to\n change the *define* parameter in the mdp file)\n \"\"\"\n\n logger.info(\"[{dirname!s}] Setting up Func...\".format(**vars()))\n arg_1.setdefault('struct', 'Func_POSRES/md.gro')\n arg_1.setdefault('qname', 'Func_GMX')\n return _setup_Func(arg_0, **arg_1)"} +{"_id": "doc_8340", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2='md', arg_3='MD', arg_4=None,\n arg_5=None, arg_6=1.0, arg_7=None, arg_8=None,\n arg_9=None, **arg_10):\n \"\"\"Write scripts for queuing systems.\n\n\n This sets up queuing system run scripts with a simple search and replace in\n templates. See :func:`gromacs.cbook.edit_txt` for details. Shell scripts\n are made executable.\n\n :Arguments:\n *templates*\n Template file or list of template files. The \"files\" can also be names\n or symbolic names for templates in the templates directory. See\n :mod:`gromacs.config` for details and rules for writing templates.\n *prefix*\n Prefix for the final run script filename; by default the filename will be\n the same as the template. [None]\n *dirname*\n Directory in which to place the submit scripts. [.]\n *deffnm*\n Default filename prefix for :program:`mdrun` ``-deffnm`` [md]\n *jobname*\n Name of the job in the queuing system. [MD]\n *budget*\n Which budget to book the runtime on [None]\n *startdir*\n Explicit path on the remote system (for run scripts that need to `cd`\n into this directory at the beginning of execution) [None]\n *mdrun_opts*\n String of additional options for :program:`mdrun`.\n *walltime*\n Maximum runtime of the job in hours. [1]\n *npme*\n number of PME nodes\n *jobarray_string*\n Multi-line string that is spliced in for job array functionality\n (see :func:`gromacs.qsub.generate_submit_array`; do not use manually)\n *kwargs*\n all other kwargs are ignored\n\n :Returns: list of generated run scripts\n \"\"\"\n if not arg_3[0].isalpha():\n arg_3 = 'MD_'+arg_3\n arg_11 = \"To make the jobname legal it must start with a letter: changed to {0!r}\".format(arg_3)\n logger.warn(arg_11)\n warnings.warn(arg_11, category=AutoCorrectionWarning)\n if arg_1 is None:\n arg_1 = \"\"\n if arg_5 is not None:\n arg_5 = '\"'+str(arg_5)+'\"' # TODO: could test if quotes already present\n\n arg_12 = arg_10.pop('dirname', os.path.curdir)\n\n arg_13 = Timedelta(hours=arg_6)\n arg_6 = arg_13.strftime(\"%h:%M:%S\")\n arg_14 = arg_13.ashours\n\n def write_script(arg_15):\n arg_16 = os.path.join(arg_12, arg_1 + os.path.basename(arg_15))\n logger.info(\"Setting up queuing system script {submitscript!r}...\".format(**vars()))\n # These substitution rules are documented for the user in the module doc string\n arg_17 = detect_queuing_system(arg_15)\n if arg_17 is not None and (arg_17.name == 'Slurm'):\n cbook.edit_txt(arg_15,\n [('^ *DEFFNM=','(?<==)(.*)', arg_2),\n ('^#.*(-J)', '((?<=-J\\s))\\s*\\w+', arg_3),\n ('^#.*(-A|account_no)', '((?<=-A\\s)|(?<=account_no\\s))\\s*\\w+', arg_4),\n ('^#.*(-t)', '(?<=-t\\s)(\\d+:\\d+:\\d+)', arg_6),\n ('^ *WALL_HOURS=', '(?<==)(.*)', arg_14),\n ('^ *STARTDIR=', '(?<==)(.*)', arg_8),\n ('^ *NPME=', '(?<==)(.*)', arg_9),\n ('^ *MDRUN_OPTS=', '(?<==)(\"\")', arg_5), # only replace literal \"\"\n ('^# JOB_ARRAY_PLACEHOLDER', '^.*$', arg_7),\n ],\n newname=arg_16)\n arg_18 = os.path.splitext(arg_16)[1]\n else:\n cbook.edit_txt(arg_15,\n [('^ *DEFFNM=','(?<==)(.*)', arg_2),\n ('^#.*(-N|job_name)', '((?<=-N\\s)|(?<=job_name\\s))\\s*\\w+', arg_3),\n ('^#.*(-A|account_no)', '((?<=-A\\s)|(?<=account_no\\s))\\s*\\w+', arg_4),\n ('^#.*(-l walltime|wall_clock_limit)', '(?<==)(\\d+:\\d+:\\d+)', arg_6),\n ('^ *WALL_HOURS=', '(?<==)(.*)', arg_14),\n ('^ *STARTDIR=', '(?<==)(.*)', arg_8),\n ('^ *NPME=', '(?<==)(.*)', arg_9),\n ('^ *MDRUN_OPTS=', '(?<==)(\"\")', arg_5), # only replace literal \"\"\n ('^# JOB_ARRAY_PLACEHOLDER', '^.*$', arg_7),\n ],\n newname=arg_16)\n arg_18 = os.path.splitext(arg_16)[1]\n if arg_18 in ('.sh', '.csh', '.bash'):\n os.chmod(arg_16, 0o755)\n return arg_16\n\n return [write_script(arg_15) for arg_15 in config.get_templates(arg_0)]"} +{"_id": "doc_8341", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Primitive queuing system detection; only looks at suffix at the moment.\"\"\"\n arg_2 = os.path.splitext(arg_1)[1].lower()\n if arg_2.startswith('.'):\n arg_2 = arg_2[1:]\n return arg_0.suffix == arg_2"} +{"_id": "doc_8342", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns all dates from first to last included.\"\"\"\n return [arg_0 + timedelta(days=arg_2)\n for arg_2 in range(1 + (arg_1 - arg_0).days)]"} +{"_id": "doc_8343", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fill missing rates of a currency.\n\n This is done by linear interpolation of the two closest available rates.\n\n :param str currency: The currency to fill missing rates for.\n \"\"\"\n arg_2 = arg_0._rates[arg_1]\n\n # tmp will store the closest rates forward and backward\n arg_3 = defaultdict(lambda: [None, None])\n\n for arg_4 in sorted(arg_2):\n arg_5 = arg_2[arg_4]\n if arg_5 is not None:\n arg_6 = arg_5\n arg_7 = 0\n else:\n arg_7 += 1\n arg_3[arg_4][0] = arg_6, arg_7\n\n for arg_4 in sorted(arg_2, reverse=True):\n arg_5 = arg_2[arg_4]\n if arg_5 is not None:\n arg_6 = arg_5\n arg_7 = 0\n else:\n arg_7 += 1\n arg_3[arg_4][1] = arg_6, arg_7\n\n for arg_4 in sorted(arg_3):\n (arg_8, arg_9), (arg_10, arg_11) = arg_3[arg_4]\n arg_2[arg_4] = (arg_8 * arg_11 + arg_10 * arg_9) / (arg_9 + arg_11)\n if arg_0.verbose:\n print(('{0}: filling {1} missing rate using {2} ({3}d old) and '\n '{4} ({5}d later)').format(arg_1, arg_4, arg_8, arg_9, arg_10, arg_11))"} +{"_id": "doc_8344", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='EUR', arg_4=None):\n \"\"\"Convert amount from a currency to another one.\n\n :param float amount: The amount of `currency` to Func.\n :param str currency: The currency to Func from.\n :param str new_currency: The currency to Func to.\n :param datetime.date date: Use the conversion rate of this date. If this\n is not given, the most recent rate is used.\n\n :return: The value of `amount` in `new_currency`.\n :rtype: float\n\n >>> from datetime import date\n >>> c = CurrencyConverter()\n >>> c.Func(100, 'EUR', 'USD', date=date(2014, 3, 28))\n 137.5...\n >>> c.Func(100, 'USD', date=date(2014, 3, 28))\n 72.67...\n >>> c.Func(100, 'BGN', date=date(2010, 11, 21))\n Traceback (most recent call last):\n RateNotFoundError: BGN has no rate for 2010-11-21\n \"\"\"\n for arg_5 in arg_2, arg_3:\n if arg_5 not in arg_0.currencies:\n raise ValueError('{0} is not a supported currency'.format(arg_5))\n\n if arg_4 is None:\n arg_4 = arg_0.bounds[arg_2].last_date\n else:\n try:\n arg_4 = arg_4.date() # fallback if input was a datetime object\n except AttributeError:\n pass\n\n arg_6 = arg_0._get_rate(arg_2, arg_4)\n arg_7 = arg_0._get_rate(arg_3, arg_4)\n\n return float(arg_1) / arg_6 * arg_7"} +{"_id": "doc_8345", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=2):\n \"\"\"Animate given frame for set number of iterations.\n\n Parameters\n ----------\n frames : list\n Frames for animating\n interval : float\n Interval between two frames\n name : str\n Name of animation\n iterations : int, optional\n Number of loops for animations\n \"\"\"\n for arg_4 in range(arg_3):\n for arg_5 in arg_0:\n arg_5 = get_coded_text(arg_5)\n arg_6 = \"\\r{0} {1}\".format(arg_5, arg_2)\n sys.stdout.write(arg_6)\n sys.stdout.write(CLEAR_LINE)\n sys.stdout.flush()\n time.sleep(0.001 * arg_1)"} +{"_id": "doc_8346", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the total number of unmasked regular pixels in a masks.\"\"\"\n\n arg_1 = 0\n\n for arg_2 in range(arg_0.shape[0]):\n for arg_3 in range(arg_0.shape[1]):\n if not arg_0[arg_2, arg_3]:\n arg_1 += 1\n\n return arg_1"} +{"_id": "doc_8347", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=(0.0, 0.0)):\n \"\"\"Compute an annular masks from an input inner and outer masks radius and regular shape.\"\"\"\n\n arg_5 = np.full(arg_0, True)\n\n arg_6 = mask_centres_from_shape_pixel_scale_and_centre(arg_0=arg_5.shape, arg_1=arg_1, arg_4=arg_4)\n\n for arg_7 in range(arg_5.shape[0]):\n for arg_8 in range(arg_5.shape[1]):\n\n arg_9 = (arg_7 - arg_6[0]) * arg_1\n arg_10 = (arg_8 - arg_6[1]) * arg_1\n\n arg_11 = np.sqrt(arg_10 ** 2 + arg_9 ** 2)\n\n if arg_3 >= arg_11 >= arg_2:\n arg_5[arg_7, arg_8] = False\n\n return arg_5"} +{"_id": "doc_8348", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute a blurring masks from an input masks and psf shape.\n\n The blurring masks corresponds to all pixels which are outside of the masks but will have a fraction of their \\\n light blur into the masked region due to PSF convolution.\"\"\"\n\n arg_2 = np.full(arg_0.shape, True)\n\n for arg_3 in range(arg_0.shape[0]):\n for arg_4 in range(arg_0.shape[1]):\n if not arg_0[arg_3, arg_4]:\n for arg_5 in range((-arg_1[0] + 1) // 2, (arg_1[0] + 1) // 2):\n for arg_6 in range((-arg_1[1] + 1) // 2, (arg_1[1] + 1) // 2):\n if 0 <= arg_4 + arg_6 <= arg_0.shape[1] - 1 and 0 <= arg_3 + arg_5 <= arg_0.shape[0] - 1:\n if arg_0[arg_3 + arg_5, arg_4 + arg_6]:\n arg_2[arg_3 + arg_5, arg_4 + arg_6] = False\n else:\n raise exc.MaskException(\n \"setup_blurring_mask extends beyond the sub_grid_size of the masks - pad the \"\n \"datas array before masking\")\n\n return arg_2"} +{"_id": "doc_8349", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute a 1D array listing all edge pixel indexes in the masks. An edge pixel is a pixel which is not fully \\\n surrounding by False masks values i.e. it is on an edge.\"\"\"\n\n arg_1 = total_Func(arg_0)\n\n arg_2 = np.zeros(arg_1)\n arg_3 = 0\n arg_4 = 0\n\n for arg_5 in range(arg_0.shape[0]):\n for arg_6 in range(arg_0.shape[1]):\n if not arg_0[arg_5, arg_6]:\n if arg_0[arg_5 + 1, arg_6] or arg_0[arg_5 - 1, arg_6] or arg_0[arg_5, arg_6 + 1] or arg_0[arg_5, arg_6 - 1] or \\\n arg_0[arg_5 + 1, arg_6 + 1] or arg_0[arg_5 + 1, arg_6 - 1] or arg_0[arg_5 - 1, arg_6 + 1] or arg_0[arg_5 - 1, arg_6 - 1]:\n arg_2[arg_3] = arg_4\n arg_3 += 1\n\n arg_4 += 1\n\n return arg_2"} +{"_id": "doc_8350", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Output the figure, either as an image on the screen or to the hard-disk as a .png or .fits file.\n\n Parameters\n -----------\n array : ndarray\n The 2D array of image to be output, required for outputting the image as a fits file.\n as_subplot : bool\n Whether the figure is part of subplot, in which case the figure is not output so that the entire subplot can \\\n be output instead using the *output_subplot_array* function.\n output_path : str\n The path on the hard-disk where the figure is output.\n output_filename : str\n The filename of the figure that is output.\n output_format : str\n The format the figue is output:\n 'show' - display on computer screen.\n 'png' - output to hard-disk as a png.\n 'fits' - output to hard-disk as a fits file.'\n \"\"\"\n if not arg_1:\n\n if arg_4 is 'show':\n plt.show()\n elif arg_4 is 'png':\n plt.savefig(arg_2 + arg_3 + '.png', bbox_inches='tight')\n elif arg_4 is 'fits':\n array_util.numpy_array_2d_to_fits(array_2d=arg_0, file_path=arg_2 + arg_3 + '.fits',\n overwrite=True)"} +{"_id": "doc_8351", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Output a figure which consists of a set of subplot,, either as an image on the screen or to the hard-disk as a \\\n .png file.\n\n Parameters\n -----------\n output_path : str\n The path on the hard-disk where the figure is output.\n output_filename : str\n The filename of the figure that is output.\n output_format : str\n The format the figue is output:\n 'show' - display on computer screen.\n 'png' - output to hard-disk as a png.\n \"\"\"\n if arg_2 is 'show':\n plt.show()\n elif arg_2 is 'png':\n plt.savefig(arg_0 + arg_1 + '.png', bbox_inches='tight')\n elif arg_2 is 'fits':\n raise exc.PlottingException('You cannot output a subplots with format .fits')"} +{"_id": "doc_8352", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate an image psf shape tag, to customize phase names based on size of the image PSF that the original PSF \\\n is trimmed to for faster run times.\n\n This changes the phase name 'phase_name' as follows:\n\n image_psf_shape = 1 -> phase_name\n image_psf_shape = 2 -> phase_name_image_psf_shape_2\n image_psf_shape = 2 -> phase_name_image_psf_shape_2\n \"\"\"\n if arg_0 is None:\n return ''\n else:\n arg_1 = str(arg_0[0])\n arg_2 = str(arg_0[1])\n return ('_image_psf_' + arg_1 + 'x' + arg_2)"} +{"_id": "doc_8353", "title": "", "text": "def Func(arg_0):\n \"\"\"Generate an inversion psf shape tag, to customize phase names based on size of the inversion PSF that the \\\n original PSF is trimmed to for faster run times.\n\n This changes the phase name 'phase_name' as follows:\n\n inversion_psf_shape = 1 -> phase_name\n inversion_psf_shape = 2 -> phase_name_inversion_psf_shape_2\n inversion_psf_shape = 2 -> phase_name_inversion_psf_shape_2\n \"\"\"\n if arg_0 is None:\n return ''\n else:\n arg_1 = str(arg_0[0])\n arg_2 = str(arg_0[1])\n return ('_inv_psf_' + arg_1 + 'x' + arg_2)"} +{"_id": "doc_8354", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"This function determines whether the tracer should compute the deflections at the next plane.\n\n This is True if there is another plane after this plane, else it is False..\n\n Parameters\n -----------\n plane_index : int\n The index of the plane we are deciding if we should compute its deflections.\n total_planes : int\n The total number of planes.\"\"\"\n\n if arg_0 < arg_1 - 1:\n return True\n elif arg_0 == arg_1 - 1:\n return False\n else:\n raise exc.RayTracingException('A galaxy was not correctly allocated its previous / next redshifts')"} +{"_id": "doc_8355", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Given a plane and scaling factor, compute a set of scaled deflections.\n\n Parameters\n -----------\n plane : plane.Plane\n The plane whose deflection stack is scaled.\n scaling_factor : float\n The factor the deflection angles are scaled by, which is typically the scaling factor between redshifts for \\\n multi-plane lensing.\n \"\"\"\n\n def scale(arg_2):\n return np.multiply(arg_1, arg_2)\n\n if arg_0.deflection_stack is not None:\n return arg_0.deflection_stack.apply_function(scale)\n else:\n return None"} +{"_id": "doc_8356", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"From the pixel-neighbors, setup the regularization matrix using the constant regularization scheme.\n\n Parameters\n ----------\n coefficients : tuple\n The regularization coefficients which controls the degree of smoothing of the inversion reconstruction.\n pixel_neighbors : ndarray\n An array of length (total_pixels) which provides the index of all neighbors of every pixel in \\\n the Voronoi grid (entries of -1 correspond to no neighbor).\n pixel_neighbors_size : ndarrayy\n An array of length (total_pixels) which gives the number of neighbors of every pixel in the \\\n Voronoi grid.\n \"\"\"\n\n arg_3 = len(arg_1)\n\n arg_4 = np.zeros(shape=(arg_3, arg_3))\n\n arg_5 = arg_0[0] ** 2.0\n\n for arg_6 in range(arg_3):\n arg_4[arg_6, arg_6] += 1e-8\n for arg_7 in range(arg_2[arg_6]):\n arg_8 = arg_1[arg_6, arg_7]\n arg_4[arg_6, arg_6] += arg_5\n arg_4[arg_6, arg_8] -= arg_5\n\n return arg_4"} +{"_id": "doc_8357", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Setup the colorbar of the figure, specifically its ticksize and the size is appears relative to the figure.\n\n Parameters\n -----------\n cb_ticksize : int\n The size of the tick labels on the colorbar.\n cb_fraction : float\n The fraction of the figure that the colorbar takes up, which resizes the colorbar relative to the figure.\n cb_pad : float\n Pads the color bar in the figure, which resizes the colorbar relative to the figure.\n cb_tick_values : [float]\n Manually specified values of where the colorbar tick labels appear on the colorbar.\n cb_tick_labels : [float]\n Manually specified labels of the color bar tick labels, which appear where specified by cb_tick_values.\n \"\"\"\n\n if arg_3 is None and arg_4 is None:\n arg_5 = plt.colorbar(fraction=arg_1, pad=arg_2)\n elif arg_3 is not None and arg_4 is not None:\n arg_5 = plt.colorbar(fraction=arg_1, pad=arg_2, ticks=arg_3)\n arg_5.ax.set_yticklabels(arg_4)\n else:\n raise exc.PlottingException('Only 1 entry of cb_tick_values or cb_tick_labels was input. You must either supply'\n 'both the values and labels, or neither.')\n\n arg_5.ax.tick_params(labelsize=arg_0)"} +{"_id": "doc_8358", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Plot the mask of the array on the figure.\n\n Parameters\n -----------\n mask : ndarray of data.array.mask.Mask\n The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.\n units : str\n The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n kpc_per_arcsec : float or None\n The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n pointsize : int\n The size of the points plotted to show the mask.\n \"\"\"\n\n if arg_0 is not None:\n\n plt.gca()\n arg_5 = arg_0.masked_grid_index_to_pixel[arg_0.edge_pixels] + 0.5\n if arg_4 is not None:\n arg_5 -= arg_4\n arg_6 = arg_0.grid_pixels_to_grid_arcsec(grid_pixels=arg_5)\n arg_7 = convert_grid_units(array=arg_0, grid_arcsec=arg_6, arg_1=arg_1,\n arg_2=arg_2)\n\n plt.scatter(y=arg_7[:,0], x=arg_7[:,1], s=arg_3, c='k')"} +{"_id": "doc_8359", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Plot the borders of the mask or the array on the figure.\n\n Parameters\n -----------t.\n mask : ndarray of data.array.mask.Mask\n The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.\n should_Func : bool\n If a mask is supplied, its borders pixels (e.g. the exterior edge) is plotted if this is *True*.\n units : str\n The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n kpc_per_arcsec : float or None\n The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n border_pointsize : int\n The size of the points plotted to show the borders.\n \"\"\"\n if arg_1 and arg_0 is not None:\n\n plt.gca()\n arg_6 = arg_0.masked_grid_index_to_pixel[arg_0.border_pixels]\n\n if arg_5 is not None:\n arg_6 -= arg_5\n\n arg_7 = arg_0.grid_pixels_to_grid_arcsec(grid_pixels=arg_6)\n arg_8 = convert_grid_units(array=arg_0, grid_arcsec=arg_7, arg_2=arg_2,\n arg_3=arg_3)\n\n plt.scatter(y=arg_8[:,0], x=arg_8[:,1], s=arg_4, c='y')"} +{"_id": "doc_8360", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5):\n \"\"\"Plot a grid of points over the array of data on the figure.\n\n Parameters\n -----------.\n grid_arcsec : ndarray or data.array.grids.RegularGrid\n A grid of (y,x) coordinates in arc-seconds which may be plotted over the array.\n array : data.array.scaled_array.ScaledArray\n The 2D array of data which is plotted.\n units : str\n The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n kpc_per_arcsec : float or None\n The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n grid_pointsize : int\n The size of the points plotted to show the grid.\n \"\"\"\n if arg_0 is not None:\n\n if arg_5 is not None:\n arg_0 -= arg_5\n\n arg_6 = convert_grid_units(arg_0=arg_0, arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3)\n\n plt.scatter(y=np.asarray(arg_6[:, 0]), x=np.asarray(arg_6[:, 1]), s=arg_4, c='k')"} +{"_id": "doc_8361", "title": "", "text": "def Func(arg_0):\n \"\"\"The mapping matrix is a matrix representing the mapping between every unmasked pixel of a grid and \\\n the pixels of a pixelization. Non-zero entries signify a mapping, whereas zeros signify no mapping.\n\n For example, if the regular grid has 5 pixels and the pixelization 3 pixels, with the following mappings:\n\n regular pixel 0 -> pixelization pixel 0\n regular pixel 1 -> pixelization pixel 0\n regular pixel 2 -> pixelization pixel 1\n regular pixel 3 -> pixelization pixel 1\n regular pixel 4 -> pixelization pixel 2\n\n The mapping matrix (which is of dimensions regular_pixels x pixelization_pixels) would appear as follows:\n\n [1, 0, 0] [0->0]\n [1, 0, 0] [1->0]\n [0, 1, 0] [2->1]\n [0, 1, 0] [3->1]\n [0, 0, 1] [4->2]\n\n The mapping matrix is in fact built using the sub-grid of the grid-stack, whereby each regular-pixel is \\\n divided into a regular grid of sub-pixels which are all paired to pixels in the pixelization. The entires \\\n in the mapping matrix now become fractional values dependent on the sub-grid size. For example, for a 2x2 \\\n sub-grid in each pixel (which means the fraction value is 1.0/(2.0^2) = 0.25, if we have the following mappings:\n\n regular pixel 0 -> sub pixel 0 -> pixelization pixel 0\n regular pixel 0 -> sub pixel 1 -> pixelization pixel 1\n regular pixel 0 -> sub pixel 2 -> pixelization pixel 1\n regular pixel 0 -> sub pixel 3 -> pixelization pixel 1\n regular pixel 1 -> sub pixel 0 -> pixelization pixel 1\n regular pixel 1 -> sub pixel 1 -> pixelization pixel 1\n regular pixel 1 -> sub pixel 2 -> pixelization pixel 1\n regular pixel 1 -> sub pixel 3 -> pixelization pixel 1\n regular pixel 2 -> sub pixel 0 -> pixelization pixel 2\n regular pixel 2 -> sub pixel 1 -> pixelization pixel 2\n regular pixel 2 -> sub pixel 2 -> pixelization pixel 3\n regular pixel 2 -> sub pixel 3 -> pixelization pixel 3\n\n The mapping matrix (which is still of dimensions regular_pixels x source_pixels) would appear as follows:\n\n [0.25, 0.75, 0.0, 0.0] [1 sub-pixel maps to pixel 0, 3 map to pixel 1]\n [ 0.0, 1.0, 0.0, 0.0] [All sub-pixels map to pixel 1]\n [ 0.0, 0.0, 0.5, 0.5] [2 sub-pixels map to pixel 2, 2 map to pixel 3]\n \"\"\"\n return mapper_util.Func_from_sub_to_pix(sub_to_pix=arg_0.sub_to_pix, pixels=arg_0.pixels,\n regular_pixels=arg_0.grid_stack.regular.shape[0],\n sub_to_regular=arg_0.grid_stack.sub.sub_to_regular,\n sub_grid_fraction=arg_0.grid_stack.sub.sub_grid_fraction)"} +{"_id": "doc_8362", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the mappings between a pixelization's pixels and the unmasked regular-grid pixels. These mappings \\\n are determined after the regular-grid is used to determine the pixelization.\n\n The pixelization's pixels map to different number of regular-grid pixels, thus a list of lists is used to \\\n represent these mappings\"\"\"\n Func = [[] for _ in range(arg_0.pixels)]\n\n for arg_2, arg_3 in enumerate(arg_0.regular_to_pix):\n\n Func[arg_3].append(arg_2)\n\n return Func"} +{"_id": "doc_8363", "title": "", "text": "def Func(arg_0):\n \"\"\"The 1D index mappings between the regular pixels and Voronoi pixelization pixels.\"\"\"\n return mapper_util.voronoi_Func_from_grids_and_geometry(regular_grid=arg_0.grid_stack.regular,\n regular_to_nearest_pix=arg_0.grid_stack.pix.regular_to_nearest_pix,\n pixel_centres=arg_0.geometry.pixel_centres, pixel_neighbors=arg_0.geometry.pixel_neighbors,\n pixel_neighbors_size=arg_0.geometry.pixel_neighbors_size).astype('int')"} +{"_id": "doc_8364", "title": "", "text": "def Func(arg_0):\n \"\"\" The 1D index mappings between the sub pixels and Voronoi pixelization pixels. \"\"\"\n return mapper_util.voronoi_Func_from_grids_and_geometry(sub_grid=arg_0.grid_stack.sub,\n regular_to_nearest_pix=arg_0.grid_stack.pix.regular_to_nearest_pix,\n sub_to_regular=arg_0.grid_stack.sub.sub_to_regular, pixel_centres=arg_0.geometry.pixel_centres,\n pixel_neighbors=arg_0.geometry.pixel_neighbors,\n pixel_neighbors_size=arg_0.geometry.pixel_neighbors_size).astype('int')"} +{"_id": "doc_8365", "title": "", "text": "def Func(arg_0, arg_1, arg_2=-1):\n \"\"\"\n Generate a two-dimensional poisson noise_maps-mappers from an image.\n\n Values are computed from a Poisson distribution using the image's input values in units of counts.\n\n Parameters\n ----------\n image : ndarray\n The 2D image, whose values in counts are used to draw Poisson noise_maps values.\n exposure_time_map : Union(ndarray, int)\n 2D array of the exposure time in each pixel used to convert to / from counts and electrons per second.\n seed : int\n The seed of the random number generator, used for the random noise_maps maps.\n\n Returns\n -------\n poisson_noise_map: ndarray\n An array describing simulated poisson noise_maps\n \"\"\"\n setup_random_seed(arg_2)\n arg_3 = np.multiply(arg_0, arg_1)\n return arg_0 - np.divide(np.random.poisson(arg_3, arg_0.shape), arg_1)"} +{"_id": "doc_8366", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3,\n arg_4):\n \"\"\"Factory for loading the background noise-map from a .fits file.\n\n This factory also includes a number of routines for converting the background noise-map from from other units (e.g. \\\n a weight map).\n\n Parameters\n ----------\n background_noise_map_path : str\n The path to the background_noise_map .fits file containing the background noise-map \\\n (e.g. '/path/to/background_noise_map.fits')\n background_noise_map_hdu : int\n The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*.\n pixel_scale : float\n The size of each pixel in arc seconds.\n convert_background_noise_map_from_weight_map : bool\n If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \\\n *NoiseMap.from_weight_map).\n convert_background_noise_map_from_inverse_noise_map : bool\n If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a \\\n noise-map (see *NoiseMap.from_inverse_noise_map).\n \"\"\"\n arg_5 = sum([arg_3,\n arg_4])\n\n if arg_5 == 0 and arg_0 is not None:\n return NoiseMap.from_fits_with_pixel_scale(file_path=arg_0, hdu=arg_1,\n arg_2=arg_2)\n elif arg_3 and arg_0 is not None:\n arg_6 = Array.from_fits(file_path=arg_0, hdu=arg_1)\n return NoiseMap.from_weight_map(arg_6=arg_6, arg_2=arg_2)\n elif arg_4 and arg_0 is not None:\n arg_7 = Array.from_fits(file_path=arg_0, hdu=arg_1)\n return NoiseMap.from_inverse_noise_map(arg_7=arg_7, arg_2=arg_2)\n else:\n return None"} +{"_id": "doc_8367", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Factory for loading the psf from a .fits file.\n\n Parameters\n ----------\n psf_path : str\n The path to the psf .fits file containing the psf (e.g. '/path/to/psf.fits')\n psf_hdu : int\n The hdu the psf is contained in the .fits file specified by *psf_path*.\n pixel_scale : float\n The size of each pixel in arc seconds.\n renormalize : bool\n If True, the PSF is renoralized such that all elements sum to 1.0.\n \"\"\"\n if arg_3:\n return PSF.from_fits_renormalized(file_path=arg_0, hdu=arg_1, arg_2=arg_2)\n if not arg_3:\n return PSF.from_fits_with_scale(file_path=arg_0, hdu=arg_1, arg_2=arg_2)"} +{"_id": "doc_8368", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6):\n \"\"\"Factory for loading the exposure time map from a .fits file.\n\n This factory also includes a number of routines for computing the exposure-time map from other unblurred_image_1d \\\n (e.g. the background noise-map).\n\n Parameters\n ----------\n exposure_time_map_path : str\n The path to the exposure_time_map .fits file containing the exposure time map \\\n (e.g. '/path/to/exposure_time_map.fits')\n exposure_time_map_hdu : int\n The hdu the exposure_time_map is contained in the .fits file specified by *exposure_time_map_path*.\n pixel_scale : float\n The size of each pixel in arc seconds.\n shape : (int, int)\n The shape of the image, required if a single value is used to calculate the exposure time map.\n exposure_time : float\n The exposure-time used to compute the expsure-time map if only a single value is used.\n exposure_time_map_from_inverse_noise_map : bool\n If True, the exposure-time map is computed from the background noise_map map \\\n (see *ExposureTimeMap.from_background_noise_map*)\n inverse_noise_map : ndarray\n The background noise-map, which the Poisson noise-map can be calculated using.\n \"\"\"\n arg_7 = sum([arg_5])\n\n if arg_4 is not None and arg_0 is not None:\n raise exc.DataException(\n 'You have supplied both a exposure_time_map_path to an exposure time map and an exposure time. Only'\n 'one quantity should be supplied.')\n\n if arg_7 == 0:\n\n if arg_4 is not None and arg_0 is None:\n return ExposureTimeMap.single_value(value=arg_4, arg_2=arg_2, arg_3=arg_3)\n elif arg_4 is None and arg_0 is not None:\n return ExposureTimeMap.from_fits_with_pixel_scale(file_path=arg_0,\n hdu=arg_1, arg_2=arg_2)\n\n else:\n\n if arg_5:\n return ExposureTimeMap.from_exposure_time_and_inverse_noise_map(arg_2=arg_2,\n arg_4=arg_4,\n arg_6=arg_6)"} +{"_id": "doc_8369", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Factory for loading the background sky from a .fits file.\n\n Parameters\n ----------\n background_sky_map_path : str\n The path to the background_sky_map .fits file containing the background sky map \\\n (e.g. '/path/to/background_sky_map.fits').\n background_sky_map_hdu : int\n The hdu the background_sky_map is contained in the .fits file specified by *background_sky_map_path*.\n pixel_scale : float\n The size of each pixel in arc seconds.\n \"\"\"\n if arg_0 is not None:\n return ScaledSquarePixelArray.from_fits_with_pixel_scale(file_path=arg_0,\n hdu=arg_1, arg_2=arg_2)\n else:\n return None"} +{"_id": "doc_8370", "title": "", "text": "def Func(arg_0):\n \"\"\"The estimated absolute_signal-to-noise_maps mappers of the image.\"\"\"\n return np.divide(np.abs(arg_0.image), arg_0.noise_map)"} +{"_id": "doc_8371", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=(0.0, 0.0), arg_5=1.0, arg_6=0.0):\n \"\"\"Simulate the PSF as an elliptical Gaussian profile.\"\"\"\n from autolens.model.profiles.light_profiles import EllipticalGaussian\n arg_7 = EllipticalGaussian(arg_4=arg_4, arg_5=arg_5, arg_6=arg_6, intensity=1.0, arg_3=arg_3)\n arg_8 = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=np.full(arg_1, False),\n pixel_scales=(\n arg_2, arg_2))\n arg_9 = arg_7.intensities_from_grid(grid=arg_8)\n arg_10 = mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=arg_9,\n arg_1=arg_1)\n return PSF(array=arg_10, arg_2=arg_2, renormalize=True)"} +{"_id": "doc_8372", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Loads a PSF from fits and renormalizes it\n\n Parameters\n ----------\n pixel_scale\n file_path: String\n The path to the file containing the PSF\n hdu : int\n The HDU the PSF is stored in the .fits file.\n\n Returns\n -------\n psf: PSF\n A renormalized PSF instance\n \"\"\"\n arg_4 = PSF.from_fits_with_scale(arg_1, arg_2, arg_3)\n arg_4[:, :] = np.divide(arg_4, np.sum(arg_4))\n return arg_4"} +{"_id": "doc_8373", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Loads the PSF from a .fits file.\n\n Parameters\n ----------\n pixel_scale\n file_path: String\n The path to the file containing the PSF\n hdu : int\n The HDU the PSF is stored in the .fits file.\n \"\"\"\n return arg_0(array=array_util.numpy_array_2d_from_fits(arg_1, arg_2), arg_3=arg_3)"} +{"_id": "doc_8374", "title": "", "text": "def Func(arg_0):\n \"\"\"Renormalize the PSF such that its data_vector values sum to unity.\"\"\"\n return PSF(array=arg_0, pixel_scale=arg_0.pixel_scale, renormalize=True)"} +{"_id": "doc_8375", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convolve an array with this PSF\n\n Parameters\n ----------\n image : ndarray\n An array representing the image the PSF is Funcd with.\n\n Returns\n -------\n Funcd_image : ndarray\n An array representing the image after convolution.\n\n Raises\n ------\n KernelException if either PSF psf dimension is odd\n \"\"\"\n if arg_0.shape[0] % 2 == 0 or arg_0.shape[1] % 2 == 0:\n raise exc.KernelException(\"PSF Kernel must be odd\")\n\n return scipy.signal.Func2d(arg_1, arg_0, mode='same')"} +{"_id": "doc_8376", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the Voronoi grid of the pixelization, using the pixel centers.\n\n Parameters\n ----------\n pixel_centers : ndarray\n The (y,x) centre of every Voronoi pixel.\n \"\"\"\n return scipy.spatial.Voronoi(np.asarray([arg_0[:, 1], arg_0[:, 0]]).T,\n qhull_options='Qbb Qc Qx Qm')"} +{"_id": "doc_8377", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a \\\n vertex with.\n\n The ridge points of the Voronoi grid are used to derive this.\n\n Parameters\n ----------\n ridge_points : scipy.spatial.Voronoi.ridge_points\n Each Voronoi-ridge (two indexes representing a pixel mapping_matrix).\n \"\"\"\n return pixelization_util.voronoi_neighbors_from_pixels_and_ridge_points(arg_1=arg_1,\n arg_2=np.asarray(arg_2))"} +{"_id": "doc_8378", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Set the x and y labels of the figure, and set the fontsize of those labels.\n\n The x and y labels are always the distance scales, thus the labels are either arc-seconds or kpc and depend on the \\\n units the figure is plotted in.\n\n Parameters\n -----------\n units : str\n The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n kpc_per_arcsec : float\n The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n xlabelsize : int\n The fontsize of the x axes label.\n ylabelsize : int\n The fontsize of the y axes label.\n xyticksize : int\n The font size of the x and y ticks on the figure axes.\n \"\"\"\n if arg_0 in 'arcsec' or arg_1 is None:\n\n plt.xlabel('x (arcsec)', fontsize=arg_2)\n plt.ylabel('y (arcsec)', fontsize=arg_3)\n\n elif arg_0 in 'kpc':\n\n plt.xlabel('x (kpc)', fontsize=arg_2)\n plt.ylabel('y (kpc)', fontsize=arg_3)\n\n else:\n raise exc.PlottingException('The units supplied to the plotted are not a valid string (must be pixels | '\n 'arcsec | kpc)')\n\n plt.tick_params(labelsize=arg_4)"} +{"_id": "doc_8379", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Decorate a profile method that accepts a coordinate grid and returns a data grid.\n\n If an interpolator attribute is associated with the input grid then that interpolator is used to down sample the\n coordinate grid prior to calling the function and up sample the result of the function.\n\n If no interpolator attribute is associated with the input grid then the function is called as normal.\n\n Parameters\n ----------\n func\n Some method that accepts a grid\n\n Returns\n -------\n decorated_function\n The function with optional interpolation\n \"\"\"\n\n @wraps(arg_0)\n def wrapper(arg_1, arg_2, arg_3=None, *arg_4, **arg_5):\n if hasattr(arg_2, \"interpolator\"):\n arg_6 = arg_2.interpolator\n if arg_2.interpolator is not None:\n arg_7 = arg_0(arg_1, arg_6.interp_grid, arg_3, *arg_4, **arg_5)\n if arg_7.ndim == 1:\n return arg_6.interpolated_values_from_values(arg_7=arg_7)\n elif arg_7.ndim == 2:\n arg_8 = arg_6.interpolated_values_from_values(arg_7=arg_7[:, 0])\n arg_9 = arg_6.interpolated_values_from_values(arg_7=arg_7[:, 1])\n return np.asarray([arg_8, arg_9]).T\n return arg_0(arg_1, arg_2, arg_3, *arg_4, **arg_5)\n\n return wrapper"} +{"_id": "doc_8380", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"For a padded grid-stack and psf, compute an unmasked blurred image from an unmasked unblurred image.\n\n This relies on using the lens data's padded-grid, which is a grid of (y,x) coordinates which extends over the \\\n entire image as opposed to just the masked region.\n\n Parameters\n ----------\n psf : ccd.PSF\n The PSF of the image used for convolution.\n unmasked_image_1d : ndarray\n The 1D unmasked image which is blurred.\n \"\"\"\n arg_3 = arg_0.regular.convolve_array_1d_with_psf(padded_array_1d=arg_2,\n arg_1=arg_1)\n\n return arg_0.regular.scaled_array_2d_from_array_1d(array_1d=arg_3)"} +{"_id": "doc_8381", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=2):\n \"\"\"Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.\n \n This grid corresponds to a fully unmasked 2D array.\n\n Parameters\n -----------\n shape : (int, int)\n The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.\n pixel_scale : float\n The size of each pixel in arc seconds. \n sub_grid_size : int\n The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).\n \"\"\"\n arg_4 = RegularGrid.from_shape_and_pixel_scale(arg_1=arg_1, arg_2=arg_2)\n arg_5 = SubGrid.Func(arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3)\n arg_6 = np.array([[0.0, 0.0]])\n return GridStack(arg_4, arg_5, arg_6)"} +{"_id": "doc_8382", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Setup a grid-stack of masked grid_stack from a mask, sub-grid size and psf-shape.\n\n Parameters\n -----------\n mask : Mask\n The mask whose masked pixels the grid-stack are setup using.\n sub_grid_size : int\n The size of a sub-pixels sub-grid (sub_grid_size x sub_grid_size).\n psf_shape : (int, int)\n The shape of the PSF used in the analysis, which defines the mask's blurring-region.\n \"\"\"\n arg_4 = PaddedRegularGrid.padded_grid_from_shape_psf_shape_and_pixel_scale(\n shape=arg_1.shape,\n arg_3=arg_3,\n pixel_scale=arg_1.pixel_scale)\n arg_5 = PaddedSubGrid.padded_grid_from_mask_sub_grid_size_and_psf_shape(arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3)\n # TODO : The blurring grid is not used when the grid mapper is called, the 0.0 0.0 stops errors inr ayT_racing\n # TODO : implement a more explicit solution\n return GridStack(regular=arg_4, sub=arg_5, blurring=np.array([[0.0, 0.0]]))"} +{"_id": "doc_8383", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the Func labels of this grid, used for plotting the x-axis ticks when visualizing a regular\"\"\"\n return np.linspace(np.min(arg_0[:, 1]), np.max(arg_0[:, 1]), 4)"} +{"_id": "doc_8384", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"For an input sub-gridded array, map its hyper-values from the sub-gridded values to a 1D regular grid of \\\n values by summing each set of each set of sub-pixels values and dividing by the total number of sub-pixels.\n\n Parameters\n -----------\n sub_array_1d : ndarray\n A 1D sub-gridded array of values (e.g. the intensities, surface-densities, potential) which is mapped to\n a 1d regular array.\n \"\"\"\n return np.multiply(arg_0.sub_grid_fraction, arg_1.reshape(-1, arg_0.sub_grid_length).sum(axis=1))"} +{"_id": "doc_8385", "title": "", "text": "def Func(arg_0):\n \"\"\"The 1D index mappings between the regular-grid and masked sparse-grid.\"\"\"\n\n return mapping_util.Func_from_sparse_mappings(\n regular_to_unmasked_sparse=arg_0.regular_to_unmasked_sparse,\n unmasked_sparse_to_sparse=arg_0.unmasked_sparse_to_sparse).astype('int')"} +{"_id": "doc_8386", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute a 2D padded blurred image from a 1D padded image.\n\n Parameters\n ----------\n padded_image_1d : ndarray\n A 1D unmasked image which is blurred with the PSF.\n psf : ndarray\n An array describing the PSF kernel of the image.\n \"\"\"\n arg_3 = arg_0.convolve_array_1d_with_psf(padded_array_1d=arg_1, arg_2=arg_2)\n return arg_0.scaled_array_2d_from_array_1d(array_1d=arg_3)"} +{"_id": "doc_8387", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Map a padded 1D array of values to its padded 2D array.\n\n Parameters\n -----------\n padded_array_1d : ndarray\n A 1D array of values which were computed using the *PaddedRegularGrid*.\n \"\"\"\n return mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=arg_1,\n shape=arg_0.mask.shape)"} +{"_id": "doc_8388", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Determine a set of relocated grid_stack from an input set of grid_stack, by relocating their pixels based on the \\\n borders.\n\n The blurring-grid does not have its coordinates relocated, as it is only used for computing analytic \\\n light-profiles and not inversion-grid_stack.\n\n Parameters\n -----------\n grid_stack : GridStack\n The grid-stack, whose grid_stack coordinates are relocated.\n \"\"\"\n arg_2 = arg_1.regular[arg_0]\n return GridStack(regular=arg_0.relocated_grid_from_grid_jit(grid=arg_1.regular, arg_2=arg_2),\n sub=arg_0.relocated_grid_from_grid_jit(grid=arg_1.sub, arg_2=arg_2),\n blurring=None,\n pix=arg_0.relocated_grid_from_grid_jit(grid=arg_1.pix, arg_2=arg_2))"} +{"_id": "doc_8389", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Run a fit for each galaxy from the previous phase.\n\n Parameters\n ----------\n data: LensData\n results: ResultsCollection\n Results from all previous phases\n mask: Mask\n The mask\n positions\n\n Returns\n -------\n results: HyperGalaxyResults\n A collection of results, with one item per a galaxy\n \"\"\"\n arg_5 = arg_2.last.unmasked_model_image\n arg_6 = arg_2.last.constant.name_instance_tuples_for_class(g.Galaxy)\n\n arg_7 = copy.copy(arg_2.last)\n\n for arg_8, arg_9 in arg_6:\n arg_10 = arg_0.optimizer.copy_with_name_extension(arg_8)\n arg_10.variable.hyper_galaxy = g.HyperGalaxy\n arg_13 = arg_2.last.unmasked_image_for_galaxy(arg_9)\n arg_10.fit(arg_0.__class__.Analysis(arg_1, arg_5, arg_13))\n\n arg_14(arg_7.variable, arg_8).hyper_galaxy = arg_10.variable.hyper_galaxy\n arg_14(arg_7.constant, arg_8).hyper_galaxy = arg_10.constant.hyper_galaxy\n\n return arg_7"} +{"_id": "doc_8390", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2):\n \"\"\"Determine the mapping between every masked pixelization-grid pixel and pixelization-grid pixel. This is\n performed by checking whether each pixelization-grid pixel is within the regular-masks, and mapping the indexes.\n\n Parameters\n -----------\n total_sparse_pixels : int\n The total number of pixels in the pixelization grid which fall within the regular-masks.\n mask : ccd.masks.Mask\n The regular-masks within which pixelization pixels must be inside\n unmasked_sparse_grid_pixel_centres : ndarray\n The centres of the unmasked pixelization grid pixels.\n \"\"\"\n\n arg_3 = np.zeros(arg_0)\n\n arg_4 = 0\n\n for arg_5 in range(arg_2.shape[0]):\n\n arg_6 = arg_2[arg_5, 0]\n arg_7 = arg_2[arg_5, 1]\n\n if not arg_1[arg_6, arg_7]:\n arg_3[arg_4] = arg_5\n arg_4 += 1\n\n return arg_3"} +{"_id": "doc_8391", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each\n pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked\n pixelization grid pixel.\n\n Parameters\n -----------\n unmasked_sparse_grid : ndarray\n The (y,x) arc-second centre of every unmasked pixelization grid pixel.\n sparse_to_unmasked_sparse : ndarray\n The index mapping between every pixelization pixel and masked pixelization pixel.\n \"\"\"\n arg_2 = arg_1.shape[0]\n\n arg_3 = np.zeros((arg_2, 2))\n\n for arg_4 in range(arg_2):\n arg_3[arg_4, 0] = arg_0[arg_1[arg_4], 0]\n arg_3[arg_4, 1] = arg_0[arg_1[arg_4], 1]\n\n return arg_3"} +{"_id": "doc_8392", "title": "", "text": "def Func(arg_0, arg_1, arg_2=(-1, -1), arg_3=0.0):\n \"\"\"Resize an array to a new size around a central pixel.\n\n If the origin (e.g. the central pixel) of the resized array is not specified, the central pixel of the array is \\\n calculated automatically. For example, a (5,5) array's central pixel is (2,2). For even dimensions the central \\\n pixel is assumed to be the lower indexed value, e.g. a (6,4) array's central pixel is calculated as (2,1).\n\n The default origin is (-1, -1) because numba requires that the function input is the same type throughout the \\\n function, thus a default 'None' value cannot be used.\n\n Parameters\n ----------\n array_2d : ndarray\n The 2D array that is resized.\n resized_shape : (int, int)\n The (y,x) new pixel dimension of the trimmed array.\n origin : (int, int)\n The oigin of the resized array, e.g. the central pixel around which the array is extracted.\n\n Returns\n -------\n ndarray\n The resized 2D array from the input 2D array.\n\n Examples\n --------\n array_2d = np.ones((5,5))\n resize_array = resize_array_2d(array_2d=array_2d, new_shape=(2,2), origin=(2, 2))\n \"\"\"\n\n arg_4 = int(arg_0.shape[0]) % 2 == 0\n arg_5 = int(arg_0.shape[1]) % 2 == 0\n\n if arg_2 is (-1, -1):\n\n if arg_4:\n arg_6 = int(arg_0.shape[0] / 2)\n elif not arg_4:\n arg_6 = int(arg_0.shape[0] / 2)\n\n if arg_5:\n arg_7 = int(arg_0.shape[1] / 2)\n elif not arg_5:\n arg_7 = int(arg_0.shape[1] / 2)\n\n arg_2 = (arg_6, arg_7)\n\n arg_8 = np.zeros(shape=arg_1)\n\n if arg_4:\n arg_9 = arg_2[0] - int(arg_1[0] / 2)\n arg_10 = arg_2[0] + int((arg_1[0] / 2)) + 1\n elif not arg_4:\n arg_9 = arg_2[0] - int(arg_1[0] / 2)\n arg_10 = arg_2[0] + int((arg_1[0] / 2)) + 1\n\n if arg_5:\n arg_11 = arg_2[1] - int(arg_1[1] / 2)\n arg_12 = arg_2[1] + int((arg_1[1] / 2)) + 1\n elif not arg_5:\n arg_11 = arg_2[1] - int(arg_1[1] / 2)\n arg_12 = arg_2[1] + int((arg_1[1] / 2)) + 1\n\n for arg_13, arg_14 in enumerate(range(arg_9, arg_10)):\n for arg_15, arg_16 in enumerate(range(arg_11, arg_12)):\n if arg_14 >= 0 and arg_14 < arg_0.shape[0] and arg_16 >= 0 and arg_16 < arg_0.shape[1]:\n if arg_13 >= 0 and arg_13 < arg_1[0] and arg_15 >= 0 and arg_15 < arg_1[1]:\n arg_8[arg_13, arg_15] = arg_0[arg_14, arg_16]\n else:\n if arg_13 >= 0 and arg_13 < arg_1[0] and arg_15 >= 0 and arg_15 < arg_1[1]:\n arg_8[arg_13, arg_15] = arg_3\n\n return arg_8"} +{"_id": "doc_8393", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Bin up an array to coarser resolution, by binning up groups of pixels and using their mean value to determine \\\n the value of the new pixel.\n\n If an array of shape (8,8) is input and the bin up size is 2, this would return a new array of size (4,4) where \\\n every pixel was the mean of each collection of 2x2 pixels on the (8,8) array.\n\n If binning up the array leads to an edge being cut (e.g. a (9,9) array binned up by 2), an array is first \\\n extracted around the centre of that array.\n\n\n Parameters\n ----------\n array_2d : ndarray\n The 2D array that is resized.\n new_shape : (int, int)\n The (y,x) new pixel dimension of the trimmed array.\n origin : (int, int)\n The oigin of the resized array, e.g. the central pixel around which the array is extracted.\n\n Returns\n -------\n ndarray\n The resized 2D array from the input 2D array.\n\n Examples\n --------\n array_2d = np.ones((5,5))\n resize_array = resize_array_2d(array_2d=array_2d, new_shape=(2,2), origin=(2, 2))\n \"\"\"\n\n arg_2 = pad_2d_array_for_binning_up_with_bin_up_factor(arg_0=arg_0, arg_1=arg_1)\n\n arg_3 = np.zeros(shape=(arg_2.shape[0] // arg_1,\n arg_2.shape[1] // arg_1))\n\n for arg_4 in range(arg_3.shape[0]):\n for arg_5 in range(arg_3.shape[1]):\n arg_6 = 0.0\n for arg_7 in range(arg_1):\n for arg_8 in range(arg_1):\n arg_9 = arg_4*arg_1 + arg_7\n arg_10 = arg_5*arg_1 + arg_8\n arg_6 += arg_2[arg_9, arg_10]\n\n arg_3[arg_4,arg_5] = arg_6 / (arg_1 ** 2.0)\n\n return arg_3"} +{"_id": "doc_8394", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"For a given inversion mapping matrix, convolve every pixel's mapped regular with the PSF kernel.\n\n A mapping matrix provides non-zero entries in all elements which map two pixels to one another\n (see *inversions.mappers*).\n\n For example, lets take an regular which is masked using a 'cross' of 5 pixels:\n\n [[ True, False, True]],\n [[False, False, False]],\n [[ True, False, True]]\n\n As example mapping matrix of this cross is as follows (5 regular pixels x 3 source pixels):\n\n [1, 0, 0] [0->0]\n [1, 0, 0] [1->0]\n [0, 1, 0] [2->1]\n [0, 1, 0] [3->1]\n [0, 0, 1] [4->2]\n\n For each source-pixel, we can create an regular of its unit-surface brightnesses by mapping the non-zero\n entries back to masks. For example, doing this for source pixel 1 gives:\n\n [[0.0, 1.0, 0.0]],\n [[1.0, 0.0, 0.0]]\n [[0.0, 0.0, 0.0]]\n\n And source pixel 2:\n\n [[0.0, 0.0, 0.0]],\n [[0.0, 1.0, 1.0]]\n [[0.0, 0.0, 0.0]]\n\n We then convolve each of these regular with our PSF kernel, in 2 dimensions, like we would a normal regular. For\n example, using the kernel below:\n\n kernel:\n\n [[0.0, 0.1, 0.0]]\n [[0.1, 0.6, 0.1]]\n [[0.0, 0.1, 0.0]]\n\n Blurred Source Pixel 1 (we don't need to perform the convolution into masked pixels):\n\n [[0.0, 0.6, 0.0]],\n [[0.6, 0.0, 0.0]],\n [[0.0, 0.0, 0.0]]\n\n Blurred Source pixel 2:\n\n [[0.0, 0.0, 0.0]],\n [[0.0, 0.7, 0.7]],\n [[0.0, 0.0, 0.0]]\n\n Finally, we map each of these blurred regular back to a blurred mapping matrix, which is analogous to the\n mapping matrix.\n\n [0.6, 0.0, 0.0] [0->0]\n [0.6, 0.0, 0.0] [1->0]\n [0.0, 0.7, 0.0] [2->1]\n [0.0, 0.7, 0.0] [3->1]\n [0.0, 0.0, 0.6] [4->2]\n\n If the mapping matrix is sub-gridded, we perform the convolution on the fractional surface brightnesses in an\n identical fashion to above.\n\n Parameters\n -----------\n mapping_matrix : ndarray\n The 2D mapping matix describing how every inversion pixel maps to an datas_ pixel.\n \"\"\"\n return arg_0.convolve_matrix_jit(arg_1, arg_0.image_frame_indexes,\n arg_0.image_frame_psfs, arg_0.image_frame_lengths)"} +{"_id": "doc_8395", "title": "", "text": "def Func(arg_0, arg_1, arg_2='angular', arg_3=None,\n arg_4=None):\n \"\"\" Integrate the mass profiles's convergence profile to compute the total angular mass within an ellipse of \\\n specified major axis. This is centred on the mass profile.\n\n The following units for mass can be specified and output:\n\n - Dimensionless angular units (default) - 'angular'.\n - Solar masses - 'angular' (multiplies the angular mass by the critical surface mass density)\n\n Parameters\n ----------\n major_axis : float\n The major-axis radius of the ellipse.\n unit_mass : str\n The units the mass is returned in (angular | angular).\n critical_surface_density : float or None\n The critical surface mass density of the strong lens configuration, which converts mass from angular \\\n units to phsical units (e.g. solar masses).\n \"\"\"\n\n arg_0.check_units_of_radius_and_critical_surface_density(\n radius=arg_1, arg_4=arg_4)\n\n arg_5 = arg_0.new_profile_with_units_converted(\n unit_length=arg_1.unit_length, arg_2='angular',\n arg_3=arg_3, arg_4=arg_4)\n\n arg_6 = dim.Mass(value=quad(arg_5.mass_integral, a=0.0, b=arg_1, args=(arg_0.axis_ratio,))[0],\n arg_2='angular')\n return arg_6.convert(arg_2=arg_2, arg_4=arg_4)"} +{"_id": "doc_8396", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \\\n circle\"\"\"\n arg_3 = arg_1 * arg_2\n return 2 * np.pi * arg_3 * arg_0.convergence_func(arg_1)"} +{"_id": "doc_8397", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Calculate the mass between two circular annuli and compute the density by dividing by the annuli surface\n area.\n\n The value returned by the mass integral is dimensionless, therefore the density between annuli is returned in \\\n units of inverse radius squared. A conversion factor can be specified to convert this to a physical value \\\n (e.g. the critical surface mass density).\n\n Parameters\n -----------\n inner_annuli_radius : float\n The radius of the inner annulus outside of which the density are estimated.\n outer_annuli_radius : float\n The radius of the outer annulus inside of which the density is estimated.\n \"\"\"\n arg_3 = (np.pi * arg_2 ** 2.0) - (np.pi * arg_1 ** 2.0)\n return (arg_0.mass_within_circle_in_units(radius=arg_2) -\n arg_0.mass_within_circle_in_units(radius=arg_1)) \\\n / arg_3"} +{"_id": "doc_8398", "title": "", "text": "def Func(arg_0):\n \"\"\"Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles\n parameters\"\"\"\n return ((3 - arg_0.slope) / (1 + arg_0.axis_ratio)) * arg_0.einstein_radius ** (arg_0.slope - 1)"} +{"_id": "doc_8399", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Calculate the projected convergence at a given set of arc-second gridded coordinates.\n\n Parameters\n ----------\n grid : grids.RegularGrid\n The grid of (y,x) arc-second coordinates the surface density is computed on.\n \"\"\"\n\n arg_2 = np.zeros(arg_1.shape[0])\n\n arg_3 = arg_0.grid_to_elliptical_radii(arg_1)\n\n for arg_4 in range(arg_1.shape[0]):\n arg_2[arg_4] = arg_0.convergence_func(arg_3[arg_4])\n\n return arg_2"} +{"_id": "doc_8400", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Tabulate an integral over the surface density of deflection potential of a mass profile. This is used in \\\n the GeneralizedNFW profile classes to speed up the integration procedure.\n\n Parameters\n -----------\n grid : grids.RegularGrid\n The grid of (y,x) arc-second coordinates the potential / deflection_stacks are computed on.\n tabulate_bins : int\n The number of bins to tabulate the inner integral of this profile.\n \"\"\"\n arg_3 = 1.0e-4\n arg_4 = 1.05 * np.max(arg_0.grid_to_elliptical_radii(arg_1))\n\n arg_5 = np.log10(arg_3)\n arg_6 = np.log10(arg_4)\n arg_7 = (arg_6 - arg_5) / (arg_2 - 1)\n\n return arg_3, arg_4, arg_5, arg_6, arg_7"} +{"_id": "doc_8401", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Compute the intensity of the profile at a given radius.\n\n Parameters\n ----------\n radius : float\n The distance from the centre of the profile.\n \"\"\"\n return arg_0.intensity * np.exp(\n -arg_0.sersic_constant * (((arg_1 / arg_0.effective_radius) ** (1. / arg_0.sersic_index)) - 1))"} +{"_id": "doc_8402", "title": "", "text": "def Func(arg_0, arg_1 : arg_2.Length, arg_4='eps', arg_5=None, arg_6=None):\n \"\"\"Compute the total luminosity of the galaxy's light profiles within a circle of specified radius.\n\n See *light_profiles.luminosity_within_circle* for details of how this is performed.\n\n Parameters\n ----------\n radius : float\n The radius of the circle to compute the dimensionless mass within.\n unit_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n if arg_0.has_light_profile:\n return sum(map(lambda p: p.Func(arg_1=arg_1, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6),\n arg_0.light_profiles))\n else:\n return None"} +{"_id": "doc_8403", "title": "", "text": "def Func(arg_0, arg_1, arg_2='angular', arg_3=None, arg_4=None):\n \"\"\"Compute the total angular mass of the galaxy's mass profiles within a circle of specified radius.\n\n See *profiles.mass_profiles.mass_within_circle* for details of how this is performed.\n\n Parameters\n ----------\n radius : float\n The radius of the circle to compute the dimensionless mass within.\n unit_mass : str\n The units the mass is returned in (angular | solMass).\n critical_surface_density : float\n The critical surface mass density of the strong lens configuration, which converts mass from angulalr \\\n units to physical units (e.g. solar masses).\n \"\"\"\n if arg_0.has_mass_profile:\n return sum(map(lambda p: p.Func(arg_1=arg_1, arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4),\n arg_0.mass_profiles))\n else:\n return None"} +{"_id": "doc_8404", "title": "", "text": "def Func(arg_0, arg_1='angular', arg_2=None):\n \"\"\"The Einstein Mass of this galaxy, which is the sum of Einstein Radii of its mass profiles.\n\n If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Mass \\\n may be inaccurate. This is because the differently oriented ellipses of each mass profile \"\"\"\n\n if arg_0.has_mass_profile:\n return sum(\n map(lambda p: p.Func(arg_1=arg_1,\n arg_2=arg_2),\n arg_0.mass_profiles))\n else:\n return None"} +{"_id": "doc_8405", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Compute a scaled galaxy hyper noise-map from a baseline noise-map.\n\n This uses the galaxy contribution map and the *noise_factor* and *noise_power* hyper-parameters.\n\n Parameters\n -----------\n noise_map : ndarray\n The observed noise-map (before scaling).\n contributions : ndarray\n The galaxy contribution map.\n \"\"\"\n return arg_0.noise_factor * (arg_1 * arg_2) ** arg_0.noise_power"} +{"_id": "doc_8406", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"For a given 1D regular array and blurring array, convolve the two using this convolver.\n\n Parameters\n -----------\n image_array : ndarray\n 1D array of the regular values which are to be blurred with the convolver's PSF.\n blurring_array : ndarray\n 1D array of the blurring regular values which blur into the regular-array after PSF convolution.\n \"\"\"\n return arg_0.convolve_jit(arg_1, arg_0.image_frame_indexes, arg_0.image_frame_psfs, arg_0.image_frame_lengths,\n arg_2, arg_0.blurring_frame_indexes, arg_0.blurring_frame_psfs,\n arg_0.blurring_frame_lengths)"} +{"_id": "doc_8407", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the intensities of a list of galaxies from an input grid, by summing the individual intensities \\\n of each galaxy's light profile.\n\n If the input grid is a *grids.SubGrid*, the intensites is calculated on the sub-grid and binned-up to the \\\n original regular grid by taking the mean value of every set of sub-pixels.\n\n If no galaxies are entered into the function, an array of all zeros is returned.\n\n Parameters\n -----------\n grid : RegularGrid\n The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n intensities are calculated on.\n galaxies : [galaxy.Galaxy]\n The galaxies whose light profiles are used to compute the surface densities.\n \"\"\"\n if arg_1:\n return sum(map(lambda g: g.intensities_from_grid(arg_0), arg_1))\n else:\n return np.full((arg_0.shape[0]), 0.0)"} +{"_id": "doc_8408", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the convergence of a list of galaxies from an input grid, by summing the individual convergence \\\n of each galaxy's mass profile.\n\n If the input grid is a *grids.SubGrid*, the convergence is calculated on the sub-grid and binned-up to the \\\n original regular grid by taking the mean value of every set of sub-pixels.\n\n If no galaxies are entered into the function, an array of all zeros is returned.\n\n Parameters\n -----------\n grid : RegularGrid\n The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n convergence is calculated on.\n galaxies : [galaxy.Galaxy]\n The galaxies whose mass profiles are used to compute the convergence.\n \"\"\"\n if arg_1:\n return sum(map(lambda g: g.convergence_from_grid(arg_0), arg_1))\n else:\n return np.full((arg_0.shape[0]), 0.0)"} +{"_id": "doc_8409", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the potential of a list of galaxies from an input grid, by summing the individual potential \\\n of each galaxy's mass profile.\n\n If the input grid is a *grids.SubGrid*, the surface-density is calculated on the sub-grid and binned-up to the \\\n original regular grid by taking the mean value of every set of sub-pixels.\n\n If no galaxies are entered into the function, an array of all zeros is returned.\n\n Parameters\n -----------\n grid : RegularGrid\n The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n potential is calculated on.\n galaxies : [galaxy.Galaxy]\n The galaxies whose mass profiles are used to compute the surface densities.\n \"\"\"\n if arg_1:\n return sum(map(lambda g: g.potential_from_grid(arg_0), arg_1))\n else:\n return np.full((arg_0.shape[0]), 0.0)"} +{"_id": "doc_8410", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \\\n of each galaxy's mass profile.\n\n The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \\\n of every set of sub-pixels.\n\n If no galaxies are entered into the function, an array of all zeros is returned.\n\n Parameters\n -----------\n sub_grid : RegularGrid\n The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n deflections is calculated on.\n galaxies : [galaxy.Galaxy]\n The galaxies whose mass profiles are used to compute the surface densities.\n \"\"\"\n if arg_1:\n return sum(map(lambda galaxy: galaxy.deflections_from_grid(arg_0), arg_1))\n else:\n return np.full((arg_0.shape[0], 2), 0.0)"} +{"_id": "doc_8411", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3):\n \"\"\"For a fitting hyper_galaxy_image, hyper_galaxy model image, list of hyper galaxies images and model hyper galaxies, compute\n their contribution maps, which are used to compute a scaled-noise_map map. All quantities are masked 1D arrays.\n\n The reason this is separate from the *contributions_from_fitting_hyper_images_and_hyper_galaxies* function is that\n each hyper_galaxy image has a list of hyper galaxies images and associated hyper galaxies (one for each galaxy). Thus,\n this function breaks down the calculation of each 1D masked contribution map and returns them in the same datas\n structure (2 lists with indexes [image_index][contribution_map_index].\n\n Parameters\n ----------\n hyper_model_image_1d : ndarray\n The best-fit model image to the datas (e.g. from a previous analysis phase).\n hyper_galaxy_images_1d : [ndarray]\n The best-fit model image of each hyper galaxy to the datas (e.g. from a previous analysis phase).\n hyper_galaxies : [galaxy.Galaxy]\n The hyper galaxies which represent the model components used to scale the noise_map, which correspond to\n individual galaxies in the image.\n hyper_minimum_values : [float]\n The minimum value of each hyper_galaxy-image contribution map, which ensure zero's don't impact the scaled noise-map.\n \"\"\"\n # noinspection PyArgumentList\n return list(map(lambda hyper_galaxy, hyper_galaxy_image_1d, hyper_minimum_value:\n hyper_galaxy.contributions_from_model_image_and_galaxy_image(model_image=arg_0,\n galaxy_image=hyper_galaxy_image_1d,\n minimum_value=hyper_minimum_value),\n arg_2, arg_1, arg_3))"} +{"_id": "doc_8412", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"For a contribution map and noise-map, use the model hyper galaxies to compute a scaled noise-map.\n\n Parameters\n -----------\n contribution_maps : ndarray\n The image's list of 1D masked contribution maps (e.g. one for each hyper galaxy)\n hyper_galaxies : [galaxy.Galaxy]\n The hyper galaxies which represent the model components used to scale the noise_map, which correspond to\n individual galaxies in the image.\n noise_map : ccd.NoiseMap or ndarray\n An array describing the RMS standard deviation error in each pixel, preferably in units of electrons per\n second.\n \"\"\"\n arg_3 = list(map(lambda hyper_galaxy, contribution_map:\n hyper_galaxy.hyper_noise_from_contributions(arg_2=arg_2,\n contributions=contribution_map),\n arg_1, arg_0))\n return arg_2 + sum(arg_3)"} +{"_id": "doc_8413", "title": "", "text": "def Func(arg_0):\n \"\"\"Wrap the function in a function that checks whether the coordinates have been transformed. If they have not \\ \n been transformed then they are transformed.\n\n Parameters\n ----------\n func : (profiles, *args, **kwargs) -> Object\n A function that requires transformed coordinates\n\n Returns\n -------\n A function that can except cartesian or transformed coordinates\n \"\"\"\n\n @wraps(arg_0)\n def wrapper(arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"\n\n Parameters\n ----------\n profile : GeometryProfile\n The profiles that owns the function\n grid : ndarray\n PlaneCoordinates in either cartesian or profiles coordinate system\n args\n kwargs\n\n Returns\n -------\n A value or coordinate in the same coordinate system as those passed in.\n \"\"\"\n if not isinstance(arg_2, TransformedGrid):\n return arg_0(arg_1, arg_1.Func_to_reference_frame(arg_2), *arg_3, **arg_4)\n else:\n return arg_0(arg_1, arg_2, *arg_3, **arg_4)\n\n return wrapper"} +{"_id": "doc_8414", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Caches results of a call to a grid function. If a grid that evaluates to the same byte value is passed into the same\n function of the same instance as previously then the cached result is returned.\n\n Parameters\n ----------\n func\n Some instance method that takes a grid as its argument\n\n Returns\n -------\n result\n Some result, either newly calculated or recovered from the cache\n \"\"\"\n\n def wrapper(arg_1: arg_2, arg_3: arg_4.ndarray, *arg_6, **arg_7):\n if not hasattr(arg_1, \"cache\"):\n arg_1.cache = {}\n arg_9 = (arg_0.__name__, arg_3.tobytes())\n if arg_9 not in arg_1.cache:\n arg_1.cache[arg_9] = arg_0(arg_1, arg_3)\n return arg_1.cache[arg_9]\n\n return wrapper"} +{"_id": "doc_8415", "title": "", "text": "def Func(arg_0):\n \"\"\" Determine the sin and cosine of the angle between the profile's ellipse and the positive x-axis, \\\n counter-clockwise. \"\"\"\n arg_1 = np.radians(arg_0.phi)\n return np.cos(arg_1), np.sin(arg_1)"} +{"_id": "doc_8416", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"The angle between each angle theta on the grid and the profile, in radians.\n\n Parameters\n -----------\n grid_thetas : ndarray\n The angle theta counter-clockwise from the positive x-axis to each coordinate in radians.\n \"\"\"\n arg_2 = np.add(arg_1, - arg_0.phi_radians)\n return np.cos(arg_2), np.sin(arg_2)"} +{"_id": "doc_8417", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4):\n \"\"\" Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \\\n how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \\\n pixel centres.\n\n To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \\\n its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \\\n the Voronoi grid) are used to localize each nearest neighbor search via a graph search.\n\n Parameters\n ----------\n regular_grid : RegularGrid\n The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \\\n to an irregular grid via lens.\n regular_to_nearest_pix : ndarray\n A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \\\n 2D array).\n pixel_centres : ndarray\n The (y,x) centre of every Voronoi pixel in arc-seconds.\n pixel_neighbors : ndarray\n An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \\\n the Voronoi grid (entries of -1 correspond to no neighbor).\n pixel_neighbors_size : ndarray\n An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \\\n Voronoi grid.\n \"\"\"\n\n arg_5 = np.zeros((arg_0.shape[0]))\n\n for arg_6 in range(arg_0.shape[0]):\n\n arg_7 = arg_1[arg_6]\n\n while True:\n\n arg_8 = arg_2[arg_7]\n\n arg_9 = (arg_0[arg_6, 0] - arg_8[0]) ** 2 + \\\n (arg_0[arg_6, 1] - arg_8[1]) ** 2\n\n arg_10 = 1.0e8\n\n for arg_11 in range(arg_4[arg_7]):\n\n arg_12 = arg_3[arg_7, arg_11]\n\n arg_13 = (arg_0[arg_6, 0] - arg_2[arg_12, 0]) ** 2 + \\\n (arg_0[arg_6, 1] - arg_2[arg_12, 1]) ** 2\n\n if arg_13 < arg_10:\n\n arg_10 = arg_13\n arg_14 = arg_11\n\n arg_15 = arg_3[arg_7, arg_14]\n arg_16 = arg_10\n\n if arg_9 <= arg_16:\n arg_5[arg_6] = arg_7\n break\n else:\n arg_7 = arg_15\n\n return arg_5"} +{"_id": "doc_8418", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n \"\"\" Compute the mappings between a set of sub-grid pixels and pixelization pixels, using information on \\\n how the regular pixels hosting each sub-pixel map to their closest pixelization pixel on the image-plane pix-grid \\\n and the pixelization's pixel centres.\n\n To determine the complete set of sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to \\\n its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \\\n the Voronoi grid) are used to localize each nearest neighbor search by using a graph search.\n\n Parameters\n ----------\n regular_grid : RegularGrid\n The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \\\n to an irregular grid via lens.\n regular_to_nearest_pix : ndarray\n A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \\\n 2D array).\n pixel_centres : (float, float)\n The (y,x) centre of every Voronoi pixel in arc-seconds.\n pixel_neighbors : ndarray\n An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \\\n the Voronoi grid (entries of -1 correspond to no neighbor).\n pixel_neighbors_size : ndarray\n An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \\\n Voronoi grid.\n \"\"\"\n\n arg_6 = np.zeros((arg_0.shape[0]))\n\n for arg_7 in range(arg_0.shape[0]):\n\n arg_8 = arg_1[arg_2[arg_7]]\n\n while True:\n\n arg_9 = arg_3[arg_8]\n\n arg_10 = (arg_0[arg_7, 0] - arg_9[0]) ** 2 + \\\n (arg_0[arg_7, 1] - arg_9[1]) ** 2\n\n arg_11 = 1.0e8\n\n for arg_12 in range(arg_5[arg_8]):\n\n arg_13 = arg_4[arg_8, arg_12]\n\n arg_14 = (arg_0[arg_7, 0] - arg_3[arg_13, 0]) ** 2 + \\\n (arg_0[arg_7, 1] - arg_3[arg_13, 1]) ** 2\n\n if arg_14 < arg_11:\n arg_11 = arg_14\n arg_15 = arg_12\n\n arg_16 = arg_4[arg_8, arg_15]\n arg_17 = arg_11\n\n if arg_10 <= arg_17:\n arg_6[arg_7] = arg_8\n break\n else:\n arg_8 = arg_16\n\n return arg_6"} +{"_id": "doc_8419", "title": "", "text": "def Func(arg_0, arg_1: arg_2.Length, arg_4='eps', arg_5=None,\n arg_6=None):\n \"\"\"Integrate the light profile to compute the total luminosity within a circle of specified radius. This is \\\n centred on the light profile's centre.\n\n The following units for mass can be specified and output:\n\n - Electrons per second (default) - 'eps'.\n - Counts - 'counts' (multiplies the luminosity in electrons per second by the exposure time).\n\n Parameters\n ----------\n radius : float\n The radius of the circle to compute the dimensionless mass within.\n unit_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float or None\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n\n if not isinstance(arg_1, arg_2.Length):\n arg_1 = arg_2.Length(value=arg_1, unit_length='arcsec')\n\n arg_7 = arg_0.new_profile_with_units_converted(unit_length=arg_1.unit_length, arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6)\n\n arg_8 = quad(arg_7.luminosity_integral, a=0.0, b=arg_1, args=(1.0,))[0]\n return arg_2.Luminosity(arg_8, arg_4)"} +{"_id": "doc_8420", "title": "", "text": "def Func(arg_0, arg_1, arg_2='eps', arg_3=None,\n arg_4=None):\n \"\"\"Integrate the light profiles to compute the total luminosity within an ellipse of specified major axis. \\\n This is centred on the light profile's centre.\n\n The following units for mass can be specified and output:\n\n - Electrons per second (default) - 'eps'.\n - Counts - 'counts' (multiplies the luminosity in electrons per second by the exposure time).\n\n Parameters\n ----------\n major_axis : float\n The major-axis radius of the ellipse.\n unit_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float or None\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n\n if not isinstance(arg_1, dim.Length):\n arg_1 = dim.Length(arg_1, 'arcsec')\n\n arg_5 = arg_0.new_profile_with_units_converted(unit_length=arg_1.unit_length,\n arg_2=arg_2,\n arg_3=arg_3, arg_4=arg_4)\n arg_6 = quad(arg_5.luminosity_integral, a=0.0, b=arg_1, args=(arg_0.axis_ratio,))[0]\n return dim.Luminosity(arg_6, arg_2)"} +{"_id": "doc_8421", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Routine to integrate the luminosity of an elliptical light profile.\n\n The axis ratio is set to 1.0 for computing the luminosity within a circle\"\"\"\n arg_3 = arg_1 * arg_2\n return 2 * np.pi * arg_3 * arg_0.intensities_from_grid_radii(arg_1)"} +{"_id": "doc_8422", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.\n\n Parameters\n ----------\n grid_radii : float\n The radial distance from the centre of the profile. for each coordinate on the grid.\n \"\"\"\n return np.multiply(np.divide(arg_0.intensity, arg_0.sigma * np.sqrt(2.0 * np.pi)),\n np.exp(-0.5 * np.square(np.divide(arg_1, arg_0.sigma))))"} +{"_id": "doc_8423", "title": "", "text": "def Func(arg_0, arg_1 : arg_2.Length, arg_4='eps', arg_5=None):\n \"\"\"Compute the total luminosity of all galaxies in this plane within a circle of specified radius.\n\n See *galaxy.light_within_circle* and *light_profiles.light_within_circle* for details \\\n of how this is performed.\n\n Parameters\n ----------\n radius : float\n The radius of the circle to compute the dimensionless mass within.\n units_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n return list(map(lambda galaxy: galaxy.luminosity_within_circle_in_units(\n arg_1=arg_1, arg_4=arg_4, kpc_per_arcsec=arg_0.kpc_per_arcsec,\n arg_5=arg_5),\n arg_0.galaxies))"} +{"_id": "doc_8424", "title": "", "text": "def Func(arg_0, arg_1 : arg_2.Length, arg_4='eps',\n arg_5=None):\n \"\"\"\n Compute the total luminosity of all galaxies in this plane within a ellipse of specified major-axis.\n\n The value returned by this integral is dimensionless, and a conversion factor can be specified to convert it \\\n to a physical value (e.g. the photometric zeropoint).\n\n See *galaxy.light_within_ellipse* and *light_profiles.light_within_ellipse* for details\n of how this is performed.\n\n Parameters\n ----------\n major_axis : float\n The major-axis radius of the ellipse.\n units_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n return list(map(lambda galaxy: galaxy.luminosity_within_ellipse_in_units(\n arg_1=arg_1, arg_4=arg_4, kpc_per_arcsec=arg_0.kpc_per_arcsec,\n arg_5=arg_5),\n arg_0.galaxies))"} +{"_id": "doc_8425", "title": "", "text": "def Func(arg_0, arg_1 : arg_2.Length, arg_4='angular',\n arg_5=None):\n \"\"\"Compute the total mass of all galaxies in this plane within a circle of specified radius.\n\n See *galaxy.angular_mass_within_circle* and *mass_profiles.angular_mass_within_circle* for details\n of how this is performed.\n\n Parameters\n ----------\n radius : float\n The radius of the circle to compute the dimensionless mass within.\n units_mass : str\n The units the mass is returned in (angular | solMass).\n critical_surface_density : float\n The critical surface mass density of the strong lens configuration, which converts mass from angulalr \\\n units to physical units (e.g. solar masses).\n \"\"\"\n return list(map(lambda galaxy: galaxy.mass_within_circle_in_units(\n arg_1=arg_1, arg_4=arg_4, kpc_per_arcsec=arg_0.kpc_per_arcsec,\n arg_5=arg_5),\n arg_0.galaxies))"} +{"_id": "doc_8426", "title": "", "text": "def Func(arg_0, arg_1 : arg_2.Length, arg_4='angular',\n arg_5=None):\n \"\"\"Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis.\n\n See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \\\n of how this is performed.\n\n Parameters\n ----------\n major_axis : float\n The major-axis radius of the ellipse.\n units_luminosity : str\n The units the luminosity is returned in (eps | counts).\n exposure_time : float\n The exposure time of the observation, which converts luminosity from electrons per second units to counts.\n \"\"\"\n return list(map(lambda galaxy: galaxy.mass_within_ellipse_in_units(\n arg_1=arg_1, arg_4=arg_4, kpc_per_arcsec=arg_0.kpc_per_arcsec,\n arg_5=arg_5),\n arg_0.galaxies))"} +{"_id": "doc_8427", "title": "", "text": "def Func(arg_0):\n \"\"\"Compute the Func labels of this grid_stack, used for plotting the x-axis ticks when visualizing an \\\n image\"\"\"\n return np.linspace(np.amin(arg_0.grid_stack.regular[:, 1]), np.amax(arg_0.grid_stack.regular[:, 1]), 4)"} +{"_id": "doc_8428", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"This is a utility function for the function above, which performs the iteration over each plane's galaxies \\\n and computes each galaxy's unmasked blurred image.\n\n Parameters\n ----------\n padded_grid_stack\n psf : ccd.PSF\n The PSF of the image used for convolution.\n \"\"\"\n return [arg_1.unmasked_blurred_image_from_psf_and_unmasked_image(\n arg_2, arg_4) if not arg_3.has_pixelization else None for arg_3, arg_4 in\n zip(arg_0.galaxies, arg_0.image_plane_image_1d_of_galaxies)]"} +{"_id": "doc_8429", "title": "", "text": "def Func(arg_0):\n \"\"\"Trace the positions to the next plane.\"\"\"\n return list(map(lambda positions, deflections: np.subtract(positions, deflections),\n arg_0.positions, arg_0.deflections))"} +{"_id": "doc_8430", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=(0.0, 0.0)):\n \"\"\"\n Creates an instance of Array and fills it with a single value\n\n Parameters\n ----------\n value: float\n The value with which the array should be filled\n shape: (int, int)\n The shape of the array\n pixel_scale: float\n The scale of a pixel in arc seconds\n\n Returns\n -------\n array: ScaledSquarePixelArray\n An array filled with a single value\n \"\"\"\n arg_5 = np.ones(arg_2) * arg_1\n return arg_0(arg_5, arg_3, arg_4)"} +{"_id": "doc_8431", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1):\n \"\"\"Extract the 2D region of an array corresponding to the rectangle encompassing all unmasked values.\n\n This is used to extract and visualize only the region of an image that is used in an analysis.\n\n Parameters\n ----------\n mask : mask.Mask\n The mask around which the scaled array is extracted.\n buffer : int\n The buffer of pixels around the extraction.\n \"\"\"\n return arg_0.new_with_array(array=array_util.extracted_array_2d_from_array_2d_and_coordinates(\n array_2d=arg_0, y0=arg_1.zoom_region[0]-arg_2, y1=arg_1.zoom_region[1]+arg_2,\n x0=arg_1.zoom_region[2]-arg_2, x1=arg_1.zoom_region[3]+arg_2))"} +{"_id": "doc_8432", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"resized the array to a new shape and at a new origin.\n\n Parameters\n -----------\n new_shape : (int, int)\n The new two-dimensional shape of the array.\n \"\"\"\n if arg_2 is None and arg_3 is None:\n arg_4 = (-1, -1) # In Numba, the input origin must be the same image type as the origin, thus we cannot\n # pass 'None' and instead use the tuple (-1, -1).\n elif arg_2 is not None and arg_3 is None:\n arg_4 = arg_2\n elif arg_2 is None and arg_3 is not None:\n arg_4 = arg_0.arc_second_coordinates_to_pixel_coordinates(arc_second_coordinates=arg_3)\n else:\n raise exc.DataException('You have supplied two centres (pixels and arc-seconds) to the resize scaled'\n 'array function')\n\n return arg_0.new_with_array(array=array_util.resized_array_2d_from_array_2d_and_resized_shape(\n array_2d=arg_0, resized_shape=arg_1, origin=arg_4))"} +{"_id": "doc_8433", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \\ \n galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \\\n in the tracers.\n\n Parameters\n -----------\n lens_data : lens_data.LensData or lens_data.LensDataHyper\n The lens-images that is fitted.\n tracer_normal : ray_tracing.AbstractTracer\n A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n lens data that we are fitting.\n tracer_sensitive : ray_tracing.AbstractTracerNonStack\n A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \\\n how sensitive we are too.\n \"\"\"\n\n if (arg_1.has_light_profile and arg_2.has_light_profile) and \\\n (not arg_1.has_pixelization and not arg_2.has_pixelization):\n return SensitivityProfileFit(arg_0=arg_0, arg_1=arg_1,\n arg_2=arg_2)\n\n elif (not arg_1.has_light_profile and not arg_2.has_light_profile) and \\\n (arg_1.has_pixelization and arg_2.has_pixelization):\n return SensitivityInversionFit(arg_0=arg_0, arg_1=arg_1,\n arg_2=arg_2)\n else:\n\n raise exc.FittingException('The sensitivity_fit routine did not call a SensitivityFit class - check the '\n 'properties of the tracers')"} +{"_id": "doc_8434", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=(0., 0.), arg_5=False):\n \"\"\"Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre.\n\n Parameters\n ----------\n shape: (int, int)\n The (y,x) shape of the mask in units of pixels.\n pixel_scale: float\n The arc-second to pixel conversion factor of each pixel.\n radius_arcsec : float\n The radius (in arc seconds) of the circle within which pixels unmasked.\n centre: (float, float)\n The centre of the circle used to mask pixels.\n \"\"\"\n arg_6 = mask_util.mask_Func_from_shape_pixel_scale_and_radius(arg_1, arg_2, arg_3,\n arg_4)\n if arg_5: arg_6 = np.invert(arg_6)\n return arg_0(array=arg_6.astype('bool'), arg_2=arg_2)"} +{"_id": "doc_8435", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5=(0., 0.),\n arg_6=False):\n \"\"\"Setup a mask where unmasked pixels are within an annulus of input inner and outer arc second radii and \\\n centre.\n\n Parameters\n ----------\n shape : (int, int)\n The (y,x) shape of the mask in units of pixels.\n pixel_scale: float\n The arc-second to pixel conversion factor of each pixel.\n inner_radius_arcsec : float\n The radius (in arc seconds) of the inner circle outside of which pixels are unmasked.\n outer_radius_arcsec : float\n The radius (in arc seconds) of the outer circle within which pixels are unmasked.\n centre: (float, float)\n The centre of the annulus used to mask pixels.\n \"\"\"\n arg_7 = mask_util.mask_Func_from_shape_pixel_scale_and_radii(arg_1, arg_2, arg_3,\n arg_4, arg_5)\n if arg_6: arg_7 = np.invert(arg_7)\n return arg_0(array=arg_7.astype('bool'), arg_2=arg_2)"} +{"_id": "doc_8436", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6=(0., 0.),\n arg_7=False):\n \"\"\" Setup a mask where unmasked pixels are within an ellipse of an input arc second major-axis and centre.\n\n Parameters\n ----------\n shape: (int, int)\n The (y,x) shape of the mask in units of pixels.\n pixel_scale: float\n The arc-second to pixel conversion factor of each pixel.\n major_axis_radius_arcsec : float\n The major-axis (in arc seconds) of the ellipse within which pixels are unmasked.\n axis_ratio : float\n The axis-ratio of the ellipse within which pixels are unmasked.\n phi : float\n The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \\\n x-axis).\n centre: (float, float)\n The centre of the ellipse used to mask pixels.\n \"\"\"\n arg_8 = mask_util.mask_Func_from_shape_pixel_scale_and_radius(arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6)\n if arg_7: arg_8 = np.invert(arg_8)\n return arg_0(array=arg_8.astype('bool'), arg_2=arg_2)"} +{"_id": "doc_8437", "title": "", "text": "def Func(arg_0, arg_1, arg_2,arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_8, arg_9=(0.0, 0.0),\n arg_10=False):\n \"\"\"Setup a mask where unmasked pixels are within an elliptical annulus of input inner and outer arc second \\\n major-axis and centre.\n\n Parameters\n ----------\n shape: (int, int)\n The (y,x) shape of the mask in units of pixels.\n pixel_scale: float\n The arc-second to pixel conversion factor of each pixel.\n inner_major_axis_radius_arcsec : float\n The major-axis (in arc seconds) of the inner ellipse within which pixels are masked.\n inner_axis_ratio : float\n The axis-ratio of the inner ellipse within which pixels are masked.\n inner_phi : float\n The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \\\n positive x-axis).\n outer_major_axis_radius_arcsec : float\n The major-axis (in arc seconds) of the outer ellipse within which pixels are unmasked.\n outer_axis_ratio : float\n The axis-ratio of the outer ellipse within which pixels are unmasked.\n outer_phi : float\n The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \\\n positive x-axis).\n centre: (float, float)\n The centre of the elliptical annuli used to mask pixels.\n \"\"\"\n arg_11 = mask_util.mask_Func_from_shape_pixel_scale_and_radius(arg_1, arg_2,\n arg_3, arg_4, arg_5,\n arg_6, arg_7, arg_8, arg_9)\n if arg_10: arg_11 = np.invert(arg_11)\n return arg_0(array=arg_11.astype('bool'), arg_2=arg_2)"} +{"_id": "doc_8438", "title": "", "text": "def Func(arg_0):\n \"\"\"The zoomed rectangular region corresponding to the square encompassing all unmasked values.\n\n This is used to zoom in on the region of an image that is used in an analysis for visualization.\"\"\"\n\n # Have to convert mask to bool for invert function to work.\n arg_1 = np.array(np.where(np.invert(arg_0.astype('bool'))))\n arg_2, arg_3 = np.amin(arg_1, axis=1)\n arg_4, arg_5 = np.amax(arg_1, axis=1)\n return [arg_2, arg_4+1, arg_3, arg_5+1]"} +{"_id": "doc_8439", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create an instance of the associated class for a set of arguments\n\n Parameters\n ----------\n arguments: {Prior: value}\n Dictionary mapping_matrix priors to attribute analysis_path and value pairs\n\n Returns\n -------\n An instance of the class\n \"\"\"\n arg_2 = {**{key: value.Func(arg_1)\n for key, value\n in arg_0.profile_prior_model_dict.items()}, **arg_0.constant_profile_dict}\n\n try:\n arg_3 = arg_0.redshift.Func(arg_1)\n except AttributeError:\n arg_3 = arg_0.redshift\n arg_4 = arg_0.pixelization.Func(arg_1) \\\n if isinstance(arg_0.pixelization, pm.PriorModel) \\\n else arg_0.pixelization\n arg_5 = arg_0.regularization.Func(arg_1) \\\n if isinstance(arg_0.regularization, pm.PriorModel) \\\n else arg_0.regularization\n arg_6 = arg_0.hyper_galaxy.Func(arg_1) \\\n if isinstance(arg_0.hyper_galaxy, pm.PriorModel) \\\n else arg_0.hyper_galaxy\n\n return galaxy.Galaxy(arg_3=arg_3, arg_4=arg_4, arg_5=arg_5,\n arg_6=arg_6, **arg_2)"} +{"_id": "doc_8440", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior\n models with new arguments.\n\n Parameters\n ----------\n arguments: dict\n A dictionary mapping_matrix between old priors and their replacements.\n\n Returns\n -------\n new_model: GalaxyModel\n A model with some or all priors replaced.\n \"\"\"\n arg_2 = copy.deepcopy(arg_0)\n\n for arg_3, arg_4 in filter(lambda t: isinstance(t[1], pm.PriorModel), arg_0.__dict__.items()):\n setattr(arg_2, arg_3, arg_4.Func(arg_1))\n\n return arg_2"} +{"_id": "doc_8441", "title": "", "text": "def Func(\n arg_0, arg_1=True, arg_2=None, arg_3=False, arg_4=False,\n arg_5=False, arg_6=None, arg_7=False,\n arg_8='arcsec', arg_9=None, arg_10=(7, 7), arg_11='square',\n arg_12='jet', arg_13='linear', arg_14=None, arg_15=None, arg_16=0.05, arg_17=0.01,\n arg_18=10, arg_19=0.047, arg_20=0.01, arg_21=None, arg_22=None,\n arg_23='Image', arg_24=16, arg_25=16, arg_26=16, arg_27=16,\n arg_28=10, arg_29=30, arg_30=1,\n arg_31=None, arg_32='show', arg_33='image'):\n \"\"\"Plot the observed image of the ccd data.\n\n Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n Parameters\n -----------\n image : ScaledSquarePixelArray\n The image of the data.\n plot_origin : True\n If true, the origin of the data's coordinate system is plotted as a 'x'.\n image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid\n If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \\\n over the immage.\n \"\"\"\n arg_34 = get_origin(array=arg_0, arg_1=arg_1)\n\n array_plotters.plot_array(\n array=arg_0, arg_34=arg_34, arg_2=arg_2, arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5, arg_6=arg_6, arg_7=arg_7,\n arg_8=arg_8, arg_9=arg_9, arg_10=arg_10, arg_11=arg_11,\n arg_12=arg_12, arg_13=arg_13, arg_14=arg_14, arg_15=arg_15, arg_16=arg_16, arg_17=arg_17,\n arg_18=arg_18, arg_19=arg_19, arg_20=arg_20, \n arg_21=arg_21, arg_22=arg_22,\n arg_23=arg_23, arg_24=arg_24, arg_25=arg_25, arg_26=arg_26, arg_27=arg_27,\n arg_28=arg_28, arg_29=arg_29, arg_30=arg_30,\n arg_31=arg_31, arg_32=arg_32, arg_33=arg_33)"} +{"_id": "doc_8442", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Write `data` to file record `n`; records are indexed from 1.\"\"\"\n arg_0.file.seek(arg_1 * K - K)\n return arg_0.file.write(arg_2)"} +{"_id": "doc_8443", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a memory-map of the elements `start` through `end`.\n\n The memory map will offer the 8-byte double-precision floats\n (\"elements\") in the file from index `start` through to the index\n `end`, inclusive, both counting the first float as element 1.\n Memory maps must begin on a page boundary, so `skip` returns the\n number of extra bytes at the beginning of the return value.\n\n \"\"\"\n arg_3, arg_4 = 8 * arg_1 - 8, 8 * arg_2\n try:\n arg_5 = arg_0.file.fileno()\n except (AttributeError, io.UnsupportedOperation):\n arg_5 = None\n if arg_5 is None:\n arg_6 = 0\n arg_0.file.seek(arg_3)\n arg_7 = arg_0.file.read(arg_4 - arg_3)\n else:\n arg_6 = arg_3 % mmap.ALLOCATIONGRANULARITY\n arg_8 = mmap.ACCESS_READ\n arg_7 = mmap.mmap(arg_5, length=arg_4-arg_3+arg_6, access=arg_8, offset=arg_3-arg_6)\n if sys.version_info > (3,):\n arg_7 = memoryview(arg_7) # so further slicing can return views\n return arg_7, arg_6"} +{"_id": "doc_8444", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the text inside the comment area of the file.\"\"\"\n arg_1 = range(2, arg_0.fward)\n if not arg_1:\n return ''\n arg_2 = b''.join(arg_0.read_record(n)[0:1000] for n in arg_1)\n try:\n return arg_2[:arg_2.find(b'\\4')].decode('ascii').replace('\\0', '\\n')\n except IndexError:\n raise ValueError('DAF file comment area is missing its EOT byte')\n except UnicodeDecodeError:\n raise ValueError('DAF file comment area is not ASCII text')"} +{"_id": "doc_8445", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.0):\n \"\"\"Compute the component values for the time `tdb` plus `tdb2`.\"\"\"\n for arg_3 in arg_0.generate(arg_1, arg_2):\n return arg_3"} +{"_id": "doc_8446", "title": "", "text": "def Func(arg_0):\n \"\"\"Close this file.\"\"\"\n arg_0.daf.file.Func()\n for arg_1 in arg_0.segments:\n if hasattr(arg_1, '_data'):\n del arg_1._data"} +{"_id": "doc_8447", "title": "", "text": "def Func(arg_0):\n \"\"\"Map the coefficients into memory using a NumPy array.\n\n \"\"\"\n if arg_0.data_type == 2:\n arg_1 = 3\n else:\n raise ValueError('only binary PCK data type 2 is supported')\n\n arg_2, arg_3, arg_4, arg_5 = arg_0.daf.read_array(arg_0.end_i - 3, arg_0.end_i)\n arg_6 = jd(arg_2)\n arg_7 = arg_3 / S_PER_DAY\n arg_8 = int(arg_4 - 2) // arg_1\n arg_9 = arg_0.daf.map_array(arg_0.start_i, arg_0.end_i - 4)\n\n arg_9.shape = (int(arg_5), int(arg_4))\n arg_9 = arg_9[:,2:] # ignore MID and RADIUS elements\n arg_9.shape = (int(arg_5), arg_1, arg_8)\n arg_9 = rollaxis(arg_9, 1)\n return arg_6, arg_7, arg_9"} +{"_id": "doc_8448", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=True):\n \"\"\"Generate angles and derivatives for time `tdb` plus `tdb2`.\n\n If ``derivative`` is true, return a tuple containing both the\n angle and its derivative; otherwise simply return the angles.\n\n \"\"\"\n arg_4 = not getattr(arg_1, 'shape', 0) and not getattr(arg_2, 'shape', 0)\n if arg_4:\n arg_1 = array((arg_1,))\n\n arg_5 = arg_0._data\n if arg_5 is None:\n arg_0._data = arg_5 = arg_0._load()\n\n arg_7, arg_8, arg_9 = arg_5\n arg_10, arg_11, arg_12 = arg_9.shape\n\n # Subtracting tdb before adding tdb2 affords greater precision.\n arg_13, arg_14 = divmod((arg_1 - arg_7) + arg_2, arg_8)\n arg_13 = arg_13.astype(int)\n\n if (arg_13 < 0).any() or (arg_13 > arg_11).any():\n arg_15 = arg_7 + arg_8 * arg_11\n raise ValueError('segment only covers dates %.1f through %.1f'\n % (arg_7, arg_15))\n\n arg_16 = (arg_13 == arg_11)\n arg_13[arg_16] -= 1\n arg_14[arg_16] += arg_8\n\n arg_9 = arg_9[:,arg_13]\n\n # Chebyshev polynomial.\n\n arg_17 = empty((arg_12, len(arg_13)))\n arg_17[0] = 1.0\n arg_17[1] = t1 = 2.0 * arg_14 / arg_8 - 1.0\n arg_18 = t1 + t1\n for arg_19 in range(2, arg_12):\n arg_17[arg_19] = arg_18 * arg_17[arg_19-1] - arg_17[arg_19-2]\n\n arg_20 = (arg_17.T * arg_9).sum(axis=2)\n if arg_4:\n arg_20 = arg_20[:,0]\n\n if not arg_3:\n return arg_20\n\n # Chebyshev differentiation.\n\n arg_21 = empty_like(arg_17)\n arg_21[0] = 0.0\n arg_21[1] = 1.0\n if arg_12 > 2:\n arg_21[2] = arg_18 + arg_18\n for arg_19 in range(3, arg_12):\n arg_21[arg_19] = arg_18 * arg_21[arg_19-1] - arg_21[arg_19-2] + arg_17[arg_19-1] + arg_17[arg_19-1]\n arg_21 *= 2.0\n arg_21 /= arg_8\n\n arg_22 = (arg_21.T * arg_9).sum(axis=2)\n if arg_4:\n arg_22 = arg_22[:,0]\n\n return arg_20, arg_22"} +{"_id": "doc_8449", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Normalise and check a backend path.\n\n Ensure that the requested backend path is specified as a relative path,\n and resolves to a location under the given source tree.\n\n Return an absolute version of the requested path.\n \"\"\"\n if os.path.isabs(arg_1):\n raise ValueError(\"paths must be relative\")\n\n arg_2 = os.path.abspath(arg_0)\n arg_3 = os.path.normpath(os.path.join(arg_2, arg_1))\n # We have to use commonprefix for Python 2.7 compatibility. So we\n # normalise case to avoid problems because commonprefix is a character\n # based comparison :-(\n arg_4 = os.path.normcase(arg_2)\n arg_5 = os.path.normcase(arg_3)\n if os.path.commonprefix([arg_4, arg_5]) != arg_4:\n raise ValueError(\"paths must be inside source tree\")\n\n return arg_3"} +{"_id": "doc_8450", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Visit a function call.\n\n We expect every logging statement and string format to be a function call.\n\n \"\"\"\n # CASE 1: We're in a logging statement\n if arg_0.within_logging_statement():\n if arg_0.within_logging_argument() and arg_0.is_format_call(arg_1):\n arg_0.violations.append((arg_1, STRING_FORMAT_VIOLATION))\n super(LoggingVisitor, arg_0).generic_visit(arg_1)\n return\n\n arg_2 = arg_0.detect_logging_level(arg_1)\n\n if arg_2 and arg_0.current_logging_level is None:\n arg_0.current_logging_level = arg_2\n\n # CASE 2: We're in some other statement\n if arg_2 is None:\n super(LoggingVisitor, arg_0).generic_visit(arg_1)\n return\n\n # CASE 3: We're entering a new logging statement\n arg_0.current_logging_call = arg_1\n\n if arg_2 == \"warn\":\n arg_0.violations.append((arg_1, WARN_VIOLATION))\n\n arg_0.check_exc_info(arg_1)\n\n for arg_5, arg_6 in enumerate(iter_child_nodes(arg_1)):\n if arg_5 == 1:\n arg_0.current_logging_argument = arg_6\n if arg_5 >= 1:\n arg_0.check_exception_arg(arg_6)\n if arg_5 > 1 and isinstance(arg_6, keyword) and arg_6.arg == \"extra\":\n arg_0.current_extra_keyword = arg_6\n\n super(LoggingVisitor, arg_0).visit(arg_6)\n\n arg_0.current_logging_argument = None\n arg_0.current_extra_keyword = None\n\n arg_0.current_logging_call = None\n arg_0.current_logging_level = None"} +{"_id": "doc_8451", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process binary operations while processing the first logging argument.\n\n \"\"\"\n if arg_0.within_logging_statement() and arg_0.within_logging_argument():\n # handle percent format\n if isinstance(arg_1.op, Mod):\n arg_0.violations.append((arg_1, PERCENT_FORMAT_VIOLATION))\n # handle string concat\n if isinstance(arg_1.op, Add):\n arg_0.violations.append((arg_1, STRING_CONCAT_VIOLATION))\n super(LoggingVisitor, arg_0).generic_visit(arg_1)"} +{"_id": "doc_8452", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Process keyword arguments.\n\n \"\"\"\n if arg_0.should_check_whitelist(arg_1):\n if arg_1.arg not in arg_0.whitelist and not arg_1.arg.startswith(\"debug_\"):\n arg_0.violations.append((arg_0.current_logging_call, WHITELIST_VIOLATION.format(arg_1.arg)))\n\n if arg_0.should_check_extra_exception(arg_1):\n arg_0.check_exception_arg(arg_1.value)\n\n super(LoggingVisitor, arg_0).generic_visit(arg_1)"} +{"_id": "doc_8453", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Helper to get the exception name from an ExceptHandler node in both py2 and py3.\n\n \"\"\"\n arg_2 = arg_1.name\n if not arg_2:\n return None\n\n if version_info < (3,):\n return arg_2.id\n return arg_2"} +{"_id": "doc_8454", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if value has id attribute and return it.\n\n :param value: The value to get id from.\n :return: The value.id.\n \"\"\"\n if not hasattr(arg_1, \"id\") and hasattr(arg_1, \"value\"):\n arg_1 = arg_1.value\n return arg_1.id"} +{"_id": "doc_8455", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks if the node is a bare exception name from an except block.\n\n \"\"\"\n return isinstance(arg_1, Name) and arg_1.id in arg_0.current_except_names"} +{"_id": "doc_8456", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reports a violation if exc_info keyword is used with logging.error or logging.exception.\n\n \"\"\"\n if arg_0.current_logging_level not in ('error', 'exception'):\n return\n\n for arg_2 in arg_1.keywords:\n if arg_2.arg == 'exc_info':\n if arg_0.current_logging_level == 'error':\n arg_3 = ERROR_EXC_INFO_VIOLATION\n else:\n arg_3 = REDUNDANT_EXC_INFO_VIOLATION\n arg_0.violations.append((arg_1, arg_3))"} +{"_id": "doc_8457", "title": "", "text": "def Func(arg_0):\n \"\"\"Get a json dict of the attributes of this object.\"\"\"\n return {\"id\": arg_0.id,\n \"compile\": arg_0.compile,\n \"position\": arg_0.position,\n \"version\": arg_0.version}"} +{"_id": "doc_8458", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"\n Convenience method to create a file from a string.\n\n This file object's metadata will have the id 'inlined_input'.\n\n Inputs\n ------\n content -- the content of the file (a string).\n position -- (default 1) rank among all files of the model while parsing\n see FileMetadata\n file_id -- (default 'inlined_input') the file_id that will be used by\n kappa.\n \"\"\"\n if arg_3 is None:\n arg_3 = 'inlined_input'\n return arg_0(FileMetadata(arg_3, arg_2), arg_1)"} +{"_id": "doc_8459", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"\n Convience method to create a kappa file object from a file on disk\n\n Inputs\n ------\n fpath -- path to the file on disk\n position -- (default 1) rank among all files of the model while parsing\n see FileMetadata\n file_id -- (default = fpath) the file_id that will be used by kappa.\n \"\"\"\n if arg_3 is None:\n arg_3 = arg_1\n with open(arg_1) as f:\n arg_4 = f.read()\n arg_5 = str(arg_4)\n arg_6 = FileMetadata(arg_3, arg_2)\n return arg_0(arg_6, arg_5)"} +{"_id": "doc_8460", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"Add a kappa model given in a string to the project.\"\"\"\n if arg_3 is None:\n arg_3 = arg_0.make_unique_id('inlined_input')\n arg_4 = arg_0.file_create(File.from_string(arg_1, arg_2,\n arg_3))\n return arg_4"} +{"_id": "doc_8461", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=None):\n \"\"\"Add a kappa model from a file at given path to the project.\"\"\"\n if arg_3 is None:\n arg_3 = arg_0.make_unique_id('file_input')\n arg_4 = arg_0.file_create(File.from_file(arg_1, arg_2,\n arg_3))\n return arg_4"} +{"_id": "doc_8462", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Delete file from database only if needed.\n\n When editing and the filefield is a new file,\n deletes the previous file (if any) from the database.\n Call this function immediately BEFORE saving the instance.\n \"\"\"\n if arg_0.pk:\n arg_2 = type(arg_0)\n\n # Check if there is a file for the instance in the database\n if arg_2.objects.filter(pk=arg_0.pk).exclude(\n **{'%s__isnull' % arg_1: True}\n ).exclude(\n **{'%s__exact' % arg_1: ''}\n ).exists():\n arg_3 = getattr(\n arg_2.objects.only(arg_1).get(pk=arg_0.pk),\n arg_1\n )\n else:\n arg_3 = None\n\n # If there is a file, delete it if needed\n if arg_3:\n # When editing and NOT changing the file,\n # old_file.name == getattr(instance, filefield_name)\n # returns True. In this case, the file must NOT be deleted.\n # If the file IS being changed, the comparison returns False.\n # In this case, the old file MUST be deleted.\n if (arg_3.name == getattr(arg_0, arg_1)) is False:\n DatabaseFileStorage().delete(arg_3.name)"} +{"_id": "doc_8463", "title": "", "text": "def Func(arg_0):\n \"\"\"Edit the download-link inner text.\"\"\"\n\n def get_link_display(arg_1):\n arg_2 = unquote(arg_1.split('%2F')[-1])\n if sys.version_info.major == 2: # python 2\n from django.utils.encoding import force_unicode\n arg_2 = force_unicode(arg_2)\n return escape(arg_2)\n\n def get_template_substitution_values(arg_3, arg_4):\n # Used by Django < 1.11\n arg_5 = super(arg_0, arg_3).get_template_substitution_values(arg_4)\n arg_5['initial'] = get_link_display(arg_4.url)\n return arg_5\n setattr(arg_0,\n 'get_template_substitution_values',\n get_template_substitution_values)\n\n def get_context(arg_3, arg_6, arg_4, arg_7):\n arg_8 = super(arg_0, arg_3).get_context(arg_6, arg_4, arg_7)\n if arg_4 and hasattr(arg_4, 'url'):\n arg_8['widget']['display'] = get_link_display(arg_4.url)\n return arg_8\n setattr(arg_0, 'get_context', get_context)\n\n return arg_0"} +{"_id": "doc_8464", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Checks the input and output to see if they are valid\r\n \r\n \"\"\"\r\n arg_1 = available_devices()\r\n if not arg_0.in_idx in arg_1:\r\n raise OSError(\"Input device is unavailable\")\r\n arg_2 = arg_1[arg_0.in_idx]\r\n if not arg_0.out_idx in arg_1:\r\n raise OSError(\"Output device is unavailable\")\r\n arg_3 = arg_1[arg_0.out_idx]\r\n if((arg_2['inputs'] == 0) and (arg_3['outputs']==0)):\r\n raise StandardError('Invalid input and output devices')\r\n elif(arg_2['inputs'] == 0):\r\n raise ValueError('Selected input device has no inputs')\r\n elif(arg_3['outputs'] == 0):\r\n raise ValueError('Selected output device has no outputs')\r\n return True"} +{"_id": "doc_8465", "title": "", "text": "def Func(arg_0,arg_1):\r\n \"\"\"\r\n Append new samples to the data_capture array and increment the sample counter\r\n If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 \r\n then new values are not appended to the data_capture array.\r\n \r\n \"\"\"\r\n arg_0.capture_sample_count += len(arg_1)\r\n if arg_0.Tcapture > 0:\r\n arg_0.data_capture = np.hstack((arg_0.data_capture,arg_1))\r\n if (arg_0.Tcapture > 0) and (len(arg_0.data_capture) > arg_0.Ncapture):\r\n arg_0.data_capture = arg_0.data_capture[-arg_0.Ncapture:]"} +{"_id": "doc_8466", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\r\n \"\"\"\r\n Append new samples to the data_capture_left array and the data_capture_right\r\n array and increment the sample counter. If length reaches Tcapture, then the \r\n newest samples will be kept. If Tcapture = 0 then new values are not appended \r\n to the data_capture array.\r\n \r\n \"\"\"\r\n arg_0.capture_sample_count = arg_0.capture_sample_count + len(arg_1) + len(arg_2)\r\n if arg_0.Tcapture > 0:\r\n arg_0.data_capture_left = np.hstack((arg_0.data_capture_left,arg_1))\r\n arg_0.data_capture_right = np.hstack((arg_0.data_capture_right,arg_2))\r\n if (len(arg_0.data_capture_left) > arg_0.Ncapture):\r\n arg_0.data_capture_left = arg_0.data_capture_left[-arg_0.Ncapture:]\r\n if (len(arg_0.data_capture_right) > arg_0.Ncapture):\r\n arg_0.data_capture_right = arg_0.data_capture_right[-arg_0.Ncapture:]"} +{"_id": "doc_8467", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Add new tic time to the DSP_tic list. Will not be called if\r\n Tcapture = 0.\r\n \r\n \"\"\"\r\n if arg_0.Tcapture > 0:\r\n arg_0.DSP_tic.append(time.time()-arg_0.start_time)"} +{"_id": "doc_8468", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Add new toc time to the DSP_toc list. Will not be called if\r\n Tcapture = 0.\r\n\r\n \"\"\"\r\n if arg_0.Tcapture > 0:\r\n arg_0.DSP_toc.append(time.time()-arg_0.start_time)"} +{"_id": "doc_8469", "title": "", "text": "def Func(arg_0,arg_1 = 0.001):\r\n \"\"\"\r\n \r\n The average of the root values is used when multiplicity \r\n is greater than one.\r\n\r\n Mark Wickert October 2016\r\n \"\"\"\r\n arg_2 = [arg_0[0]]\r\n arg_3 = [1]\r\n for arg_4 in range(1,len(arg_0)):\r\n arg_5 = len(arg_2)\r\n for arg_6 in range(arg_5):\r\n if abs(arg_0[arg_4]-arg_2[arg_6]) <= arg_1:\r\n arg_3[arg_6] += 1\r\n arg_2[arg_6] = (arg_2[arg_6]*(arg_3[arg_6]-1) + arg_0[arg_4])/float(arg_3[arg_6])\r\n break\r\n arg_2 = np.hstack((arg_2,arg_0[arg_4]))\r\n arg_3 = np.hstack((arg_3,[1]))\r\n return np.array(arg_2), np.array(arg_3)"} +{"_id": "doc_8470", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3,arg_4,arg_5='H'):\n \"\"\"\n Cruise control with PI controller and hill disturbance.\n\n This function returns various system function configurations\n for a the cruise control Case Study example found in \n the supplementary article. The plant model is obtained by the\n linearizing the equations of motion and the controller contains a\n proportional and integral gain term set via the closed-loop parameters\n natuarl frequency wn (rad/s) and damping zeta.\n\n Parameters\n ----------\n wn : closed-loop natural frequency in rad/s, nominally 0.1\n zeta : closed-loop damping factor, nominally 1.0\n T : vehicle time constant, nominally 10 s\n vcruise : cruise velocity set point, nominally 75 mph\n vmax : maximum vehicle velocity, nominally 120 mph\n tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function \n 'H' : closed-loop system function V(s)/R(s)\n 'HE' : closed-loop system function E(s)/R(s)\n 'HVW' : closed-loop system function V(s)/W(s)\n 'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input\n\n Returns\n -------\n b : numerator coefficient ndarray\n a : denominator coefficient ndarray \n\n Examples\n --------\n >>> # return the closed-loop system function output/input velocity\n >>> b,a = Func(wn,zeta,T,vcruise,vmax,tf_mode='H')\n >>> # return the closed-loop system function loop error/hill disturbance\n >>> b,a = Func(wn,zeta,T,vcruise,vmax,tf_mode='HED')\n \"\"\"\n arg_6 = arg_2/2.*arg_4/arg_3\n arg_7 = 9.8\n arg_7 *= 3*60**2/5280. # m/s to mph conversion\n arg_8 = arg_2*(2*arg_1*arg_0-1/arg_6)/arg_4\n arg_9 = arg_2*arg_0**2./arg_4\n arg_10 = arg_8*arg_4/arg_2\n print('wn = ', np.sqrt(arg_10/(arg_8/arg_9)))\n print('zeta = ', (arg_10 + 1/arg_6)/(2*arg_0))\n arg_11 = np.array([1, 2*arg_1*arg_0, arg_0**2])\n if arg_5 == 'H':\n arg_12 = np.array([arg_10, arg_0**2]) \n elif arg_5 == 'HE':\n arg_12 = np.array([1, 2*arg_1*arg_0-arg_10, 0.]) \n elif arg_5 == 'HVW':\n arg_12 = np.array([ 1, arg_0**2/arg_10+1/arg_6, arg_0**2/(arg_10*arg_6)])\n arg_12 *= arg_8\n elif arg_5 == 'HED':\n arg_12 = np.array([arg_7, 0])\n else:\n raise ValueError('tf_mode must be: H, HE, HVU, or HED')\n return arg_12, arg_11"} +{"_id": "doc_8471", "title": "", "text": "def Func(arg_0,arg_1=2.4e6,arg_2='test.wav'):\r\n \"\"\"\r\n Stereo demod from complex baseband at sampling rate fs.\r\n Assume fs is 2400 ksps\r\n \r\n Mark Wickert July 2017\r\n \"\"\"\r\n arg_3 = 10\r\n arg_4 = signal.firwin(64,2*200e3/float(arg_1))\r\n # Filter and decimate (should be polyphase)\r\n arg_5 = signal.lfilter(arg_4,1,arg_0)\r\n arg_6 = ss.downsample(arg_5,arg_3)\r\n # Apply complex baseband discriminator\r\n arg_7 = discrim(arg_6)\r\n # Work with the (3) stereo multiplex signals:\r\n # Begin by designing a lowpass filter for L+R and DSP demoded (L-R)\r\n # (fc = 12 KHz)\r\n arg_8 = signal.firwin(128,2*12e3/(float(arg_1)/arg_3))\r\n # The L + R term is at baseband, we just lowpass filter to remove \r\n # other terms above 12 kHz.\r\n arg_9 = signal.lfilter(arg_8,1,arg_7)\r\n arg_10 = signal.firwin(128,2*1e3*np.array([19-5,19+5])/(float(arg_1)/arg_3),\r\n pass_zero=False);\r\n arg_11 = signal.lfilter(arg_10,1,arg_7)\r\n # Lock PLL to 19 kHz pilot\r\n # A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 \r\n # The VCO quiescent frequency is set to 19000 Hz.\r\n arg_12, arg_13 = pilot_PLL(arg_11,19000,arg_1/arg_3,2,10,0.707)\r\n # Coherently demodulate the L - R subcarrier at 38 kHz.\r\n # theta is the PLL output phase at 19 kHz, so to double multiply \r\n # by 2 and wrap with cos() or sin().\r\n # First bandpass filter\r\n arg_14 = signal.firwin(128,2*1e3*np.array([38-5,38+5])/(float(arg_1)/arg_3),\r\n pass_zero=False);\r\n arg_15 = signal.lfilter(arg_14,1,arg_7)\r\n # Coherently demodulate using the PLL output phase\r\n arg_15 = 2*np.sqrt(2)*np.cos(2*arg_12)*arg_15\r\n # Lowpass at 12 kHz to recover the desired DSB demod term\r\n arg_16 = signal.lfilter(arg_8,1,arg_15)\r\n # Matrix the y_lmr and y_lpr for form right and left channels:\r\n arg_17 = arg_9 + arg_16\r\n arg_18 = arg_9 - arg_16\r\n \r\n # Decimate by N2 (nominally 5)\r\n arg_19 = 5\r\n arg_20 = float(arg_1)/(arg_3*arg_19) # (nominally 48 ksps)\r\n arg_21 = ss.downsample(arg_17,arg_19)\r\n arg_22 = ss.downsample(arg_18,arg_19)\r\n # Deemphasize with 75 us time constant to 'undo' the preemphasis \r\n # applied at the transmitter in broadcast FM.\r\n # A 1-pole digital lowpass works well here.\r\n arg_23 = np.exp(-2.1*1e3*2*np.pi/arg_20)\r\n arg_24 = signal.lfilter([1-arg_23],[1, -arg_23],arg_21)\r\n arg_25 = signal.lfilter([1-arg_23],[1, -arg_23],arg_22)\r\n # Place left and righ channels as side-by-side columns in a 2D array\r\n arg_26 = np.hstack((np.array([arg_24]).T,(np.array([arg_25]).T)))\r\n \r\n ss.to_wav(arg_2, 48000, arg_26/2)\r\n print('Done!')\r\n #return z_bb, z_out\r\n return arg_7, arg_12, arg_9, arg_16, arg_26"} +{"_id": "doc_8472", "title": "", "text": "def Func(arg_0, arg_1):\r\n \"\"\"\r\n Write IIR SOS Header Files\r\n File format is compatible with CMSIS-DSP IIR \r\n Directform II Filter Functions\r\n \r\n Mark Wickert March 2015-October 2016\r\n \"\"\"\r\n arg_2, arg_3 = arg_1.shape\r\n arg_4 = open(arg_0, 'wt')\r\n arg_4.write('//define a IIR SOS CMSIS-DSP coefficient array\\n\\n')\r\n arg_4.write('#include \\n\\n')\r\n arg_4.write('#ifndef STAGES\\n')\r\n arg_4.write('#define STAGES %d\\n' % arg_2)\r\n arg_4.write('#endif\\n')\r\n arg_4.write('/*********************************************************/\\n');\r\n arg_4.write('/* IIR SOS Filter Coefficients */\\n');\r\n arg_4.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\\n' % (5 * arg_2))\r\n for arg_5 in range(arg_2):\r\n if (arg_5 < arg_2 - 1):\r\n arg_4.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (arg_1[arg_5, 0], arg_1[arg_5, 1], arg_1[arg_5, 2]))\r\n arg_4.write(' %+-13e, %+-13e,\\n' % \\\r\n (-arg_1[arg_5, 4], -arg_1[arg_5, 5]))\r\n else:\r\n arg_4.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (arg_1[arg_5, 0], arg_1[arg_5, 1], arg_1[arg_5, 2]))\r\n arg_4.write(' %+-13e, %+-13e\\n' % \\\r\n (-arg_1[arg_5, 4], -arg_1[arg_5, 5]))\r\n # for k in range(Ns):\r\n # if (k < Ns-1):\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f,\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # else:\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n arg_4.write('};\\n')\r\n arg_4.write('/*********************************************************/\\n')\r\n arg_4.close()"} +{"_id": "doc_8473", "title": "", "text": "def Func(arg_0,arg_1,arg_2=0):\n \"\"\"\n Eye pattern plot of a baseband digital communications waveform.\n\n The signal must be real, but can be multivalued in terms of the underlying\n modulation scheme. Used for BPSK eye plots in the Case Study article.\n\n Parameters\n ----------\n x : ndarray of the real input data vector/array\n L : display length in samples (usually two symbols)\n S : start index\n\n Returns\n -------\n None : A plot window opens containing the eye plot\n \n Notes\n -----\n Increase S to eliminate filter transients.\n \n Examples\n --------\n 1000 bits at 10 samples per bit with 'rc' shaping.\n\n >>> import matplotlib.pyplot as plt\n >>> from sk_dsp_comm import digitalcom as dc\n >>> x,b, data = dc.NRZ_bits(1000,10,'rc')\n >>> dc.Func(x,20,60)\n >>> plt.show()\n \"\"\"\n plt.figure(figsize=(6,4))\n arg_3 = np.arange(0,arg_1+1)\n plt.plot(arg_3,arg_0[arg_2:arg_2+arg_1+1],'b')\n arg_4 = int((len(arg_0) - arg_2)/arg_1)-1\n for arg_5 in range(1,arg_4):\n plt.plot(arg_3,arg_0[arg_2+arg_5*arg_1:arg_2+arg_1+1+arg_5*arg_1],'b')\n plt.grid()\n plt.xlabel('Time Index - n')\n plt.ylabel('Amplitude')\n plt.title('Eye Plot')\n return 0"} +{"_id": "doc_8474", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n \"\"\"\n Sample a baseband digital communications waveform at the symbol spacing.\n\n Parameters\n ----------\n x : ndarray of the input digital comm signal\n Ns : number of samples per symbol (bit)\n start : the array index to start the sampling\n\n Returns\n -------\n xI : ndarray of the real part of x following sampling\n xQ : ndarray of the imaginary part of x following sampling\n\n Notes\n -----\n Normally the signal is complex, so the Func plot contains \n clusters at point in the complex plane. For a binary signal \n such as BPSK, the point centers are nominally +/-1 on the real\n axis. Start is used to eliminate transients from the FIR\n pulse shaping filters from appearing in the Func plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sk_dsp_comm import digitalcom as dc\n >>> x,b, data = dc.NRZ_bits(1000,10,'rc')\n\n Add some noise so points are now Funced about +/-1.\n\n >>> y = dc.cpx_AWGN(x,20,10)\n >>> yI,yQ = dc.Func(y,10,60)\n >>> plt.plot(yI,yQ,'.')\n >>> plt.grid()\n >>> plt.xlabel('In-Phase')\n >>> plt.ylabel('Quadrature')\n >>> plt.axis('equal')\n >>> plt.show()\n \"\"\"\n arg_3 = np.real(arg_0[arg_2::arg_1])\n arg_4 = np.imag(arg_0[arg_2::arg_1])\n return arg_3, arg_4"} +{"_id": "doc_8475", "title": "", "text": "def Func(arg_0,arg_1,arg_2,arg_3=100,arg_4=125,arg_5=10,arg_6=0,arg_7='src'):\n \"\"\"\n This function generates\n \"\"\"\n arg_8 = int(np.round(arg_4/arg_2))\n print('Ns = ', arg_8)\n print('Rs = ', arg_4/float(arg_8))\n print('EsN0 = ', arg_3, 'dB')\n print('phase = ', arg_6, 'degrees')\n print('pulse = ', arg_7)\n arg_9, arg_10, arg_11 = QPSK_bb(arg_1,arg_8,arg_5,arg_7)\n # Add AWGN to x\n arg_9 = cpx_AWGN(arg_9,arg_3,arg_8)\n arg_12 = np.arange(len(arg_9))\n arg_13 = arg_9*np.exp(1j*2*np.pi*arg_0/float(arg_4)*arg_12) * np.exp(1j*arg_6)\n return arg_13, arg_10, arg_11"} +{"_id": "doc_8476", "title": "", "text": "def Func(arg_0,arg_1,arg_2=6):\n \"\"\"\n A truncated square root raised cosine pulse used in digital communications.\n\n The pulse shaping factor :math:`0 < \\\\alpha < 1` is required as well as the\n truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`.\n \n\n Parameters\n ----------\n Ns : number of samples per symbol\n alpha : excess bandwidth factor on (0, 1), e.g., 0.35\n M : equals RC one-sided symbol truncation factor\n\n Returns\n -------\n b : ndarray containing the pulse shape\n\n Notes\n -----\n The pulse shape b is typically used as the FIR filter coefficients\n when forming a pulse shaped digital communications waveform. When \n square root raised cosine (SRC) pulse is used to generate Tx signals and\n at the receiver used as a matched filter (receiver FIR filter), the \n received signal is now raised cosine shaped, thus having zero\n intersymbol interference and the optimum removal of additive white \n noise if present at the receiver input.\n\n Examples\n --------\n Ten samples per symbol and :math:`\\\\alpha = 0.35`.\n\n >>> import matplotlib.pyplot as plt\n >>> from numpy import arange\n >>> from sk_dsp_comm.digitalcom import Func\n >>> b = Func(10,0.35)\n >>> n = arange(-10*6,10*6+1)\n >>> plt.stem(n,b)\n >>> plt.show()\n \"\"\"\n # Design the filter\n arg_3 = np.arange(-arg_2*arg_0,arg_2*arg_0+1)\n arg_4 = np.zeros(len(arg_3))\n arg_0 *= 1.0\n arg_5 = arg_1\n for arg_6 in range(len(arg_3)):\n if abs(1 - 16*arg_5**2*(arg_3[arg_6]/arg_0)**2) <= np.finfo(np.float).eps/2:\n arg_4[arg_6] = 1/2.*((1+arg_5)*np.sin((1+arg_5)*np.pi/(4.*arg_5))-(1-arg_5)*np.cos((1-arg_5)*np.pi/(4.*arg_5))+(4*arg_5)/np.pi*np.sin((1-arg_5)*np.pi/(4.*arg_5)))\n else:\n arg_4[arg_6] = 4*arg_5/(np.pi*(1 - 16*arg_5**2*(arg_3[arg_6]/arg_0)**2))\n arg_4[arg_6] = arg_4[arg_6]*(np.cos((1+arg_5)*np.pi*arg_3[arg_6]/arg_0) + np.sinc((1-arg_5)*arg_3[arg_6]/arg_0)*(1-arg_5)*np.pi/(4.*arg_5))\n return arg_4"} +{"_id": "doc_8477", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Convert an unsigned integer to a numpy binary array with the first\n element the MSB and the last element the LSB.\n \"\"\"\n arg_2 = bin(arg_0 & (2**arg_1-1))[2:].zfill(arg_1)\n return [int(arg_3) for arg_3 in tuple(arg_2)]"} +{"_id": "doc_8478", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convert binary array back a nonnegative integer. The array length is \n the bit width. The first input index holds the MSB and the last holds the LSB.\n \"\"\"\n arg_1 = len(arg_0)\n arg_2 = 2**np.arange(arg_1-1,-1,-1)\n return int(np.dot(arg_0,arg_2))"} +{"_id": "doc_8479", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Filter the signal\n \"\"\"\n arg_2 = signal.lFunc(arg_0.b,[1],arg_1)\n return arg_2"} +{"_id": "doc_8480", "title": "", "text": "def Func(arg_0,arg_1):\n \"\"\"\n Filter the signal using second-order sections\n \"\"\"\n arg_2 = signal.sosfilt(arg_0.sos,arg_1)\n return arg_2"} +{"_id": "doc_8481", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=False):\n \"\"\"Celery task decorator. Forces the task to have only one running instance at a time.\n\n Use with binded tasks (@celery.task(bind=True)).\n\n Modeled after:\n http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html\n http://blogs.it.ox.ac.uk/inapickle/2012/01/05/python-decorators-with-optional-arguments/\n\n Written by @Robpol86.\n\n :raise OtherInstanceError: If another instance is already running.\n\n :param function func: The function to decorate, must be also decorated by @celery.task.\n :param int lock_timeout: Lock timeout in seconds plus five more seconds, in-case the task crashes and fails to\n release the lock. If not specified, the values of the task's soft/hard limits are used. If all else fails,\n timeout will be 5 minutes.\n :param bool include_args: Include the md5 checksum of the arguments passed to the task in the Redis key. This allows\n the same task to run with different arguments, only stopping a task from running if another instance of it is\n running with the same arguments.\n \"\"\"\n if arg_0 is None:\n return partial(Func, arg_1=arg_1, arg_2=arg_2)\n\n @wraps(arg_0)\n def wrapped(arg_3, *arg_4, **arg_5):\n \"\"\"Wrapped Celery task, for Func().\"\"\"\n # Select the manager and get timeout.\n arg_6 = (\n arg_1 or arg_3.soft_time_limit or arg_3.time_limit\n or arg_3.app.conf.get('CELERYD_TASK_SOFT_TIME_LIMIT')\n or arg_3.app.conf.get('CELERYD_TASK_TIME_LIMIT')\n or (60 * 5)\n )\n arg_7 = _select_manager(arg_3.backend.__class__.__name__)\n arg_8 = arg_7(arg_3, arg_6, arg_2, arg_4, arg_5)\n\n # Lock and execute.\n with arg_8:\n arg_9 = arg_0(*arg_4, **arg_5)\n return arg_9\n return wrapped"} +{"_id": "doc_8482", "title": "", "text": "def Func(arg_0):\n \"\"\"Removed the lock regardless of timeout.\"\"\"\n arg_1 = arg_0.CELERY_LOCK.format(task_id=arg_0.task_identifier)\n arg_0.celery_self.backend.client.delete(arg_1)"} +{"_id": "doc_8483", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Iterator used to iterate in chunks over an array of size `num_samples`.\n At each iteration returns `chunksize` except for the last iteration.\n \"\"\"\n arg_2 = int(np.mod(arg_0, arg_1))\n arg_1 = int(arg_1)\n for arg_3 in range(int(arg_0) // arg_1):\n yield arg_1\n if arg_2 > 0:\n yield arg_2"} +{"_id": "doc_8484", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Reduce with `func`, chunk by chunk, the passed pytable `array`.\n \"\"\"\n arg_2 = []\n for arg_3 in iter_chunk_slice(arg_1.shape[-1], arg_1.chunkshape[-1]):\n arg_2.append(arg_0(arg_1[..., arg_3]))\n return arg_0(arg_2)"} +{"_id": "doc_8485", "title": "", "text": "def Func(arg_0):\n \"\"\"Load the array `data` in the .mat file `fname`.\"\"\"\n if os.path.exists(arg_0) or os.path.exists(arg_0 + '.mat'):\n return loadmat(arg_0)['data']\n else:\n raise IOError(\"Can't find PSF file '%s'\" % arg_0)"} +{"_id": "doc_8486", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Check whether the git executable is found.\n \"\"\"\n if arg_0 is None and GIT_PATH is None:\n return False\n if arg_0 is None: arg_0 = GIT_PATH\n try:\n call([arg_0, '--version'])\n return True\n except OSError:\n return False"} +{"_id": "doc_8487", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Get the Git version.\n \"\"\"\n if arg_0 is None: arg_0 = GIT_PATH\n arg_1 = check_output([arg_0, \"--version\"]).split()[2]\n return arg_1"} +{"_id": "doc_8488", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Returns whether there are uncommitted changes in the working dir.\n \"\"\"\n arg_1 = get_status(arg_0)\n arg_2 = (len(arg_1.strip()) == 0)\n return arg_2"} +{"_id": "doc_8489", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Get one-line description of HEAD commit for repository in current dir.\n \"\"\"\n if arg_0 is None: arg_0 = GIT_PATH\n arg_1 = check_output([arg_0, \"log\", \"--pretty=format:'%ad %h %s'\",\n \"--date=short\", \"-n1\"])\n return arg_1.strip()[1:-1]"} +{"_id": "doc_8490", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Get the HEAD commit SHA1 of repository in current dir.\n \"\"\"\n if arg_0 is None: arg_0 = GIT_PATH\n arg_1 = Func_line(arg_0)\n arg_2 = arg_1.split()[1]\n return arg_2"} +{"_id": "doc_8491", "title": "", "text": "def Func(arg_0='Repository', arg_1=None):\n \"\"\"\n Print the last commit line and eventual uncommitted changes.\n \"\"\"\n if arg_1 is None: arg_1 = GIT_PATH\n\n # If git is available, check fretbursts version\n if not git_path_valid():\n print('\\n%s revision unknown (git not found).' % arg_0)\n else:\n arg_2 = get_last_commit_line()\n print('\\n{} revision:\\n {}\\n'.format(arg_0, arg_2))\n if not check_clean_status():\n print('\\nWARNING -> Uncommitted changes:')\n print(get_status())"} +{"_id": "doc_8492", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Store parameters in `params` in `h5file.root.parameters`.\n\n `nparams` (dict)\n A dict as returned by `get_params()` in `ParticlesSimulation()`\n The format is:\n keys:\n used as parameter name\n values: (2-elements tuple)\n first element is the parameter value\n second element is a string used as \"title\" (description)\n `attr_params` (dict)\n A dict whole items are stored as attributes in '/parameters'\n \"\"\"\n for arg_3, arg_4 in arg_1.items():\n arg_5 = arg_4[0] if arg_4[0] is not None else 'none'\n arg_0.h5file.create_array('/parameters', arg_3, obj=arg_5,\n title=arg_4[1])\n for arg_3, arg_4 in arg_2.items():\n arg_0.h5file.set_node_attr('/parameters', arg_3, arg_4)"} +{"_id": "doc_8493", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return pathlib.Path for a data-file with given hash and prefix.\n \"\"\"\n arg_3 = '%s_%s*.h*' % (arg_1, arg_0)\n arg_4 = list(arg_2.glob(arg_3))\n if len(arg_4) == 0:\n raise NoMatchError('No matches for \"%s\"' % arg_3)\n if len(arg_4) > 1:\n raise MultipleMatchesError('More than 1 match for \"%s\"' % arg_3)\n return arg_4[0]"} +{"_id": "doc_8494", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return a RandomState, equal to the input unless rs is None.\n\n When rs is None, try to get the random state from the\n 'last_random_state' attribute in `group`. When not available,\n use `seed` to generate a random state. When seed is None the returned\n random state will have a random seed.\n \"\"\"\n if arg_0 is None:\n arg_0 = np.random.RandomState(arg_1=arg_1)\n # Try to set the random state from the last session to preserve\n # a single random stream when simulating timestamps multiple times\n if 'last_random_state' in arg_2._v_attrs:\n arg_0.set_state(arg_2._v_attrs['last_random_state'])\n print(\"INFO: Random state set to last saved state in '%s'.\" %\n arg_2._v_name)\n else:\n print(\"INFO: Random state initialized from seed (%d).\" % arg_1)\n return arg_0"} +{"_id": "doc_8495", "title": "", "text": "def Func(arg_0, arg_1=6):\n \"\"\"Compact representation of all simulation parameters\n \"\"\"\n # this can be made more robust for ID > 9 (double digit)\n arg_2 = arg_0.Func_core(arg_1, t_max=True)\n arg_2 += \"_ID%d-%d\" % (arg_0.ID, arg_0.EID)\n return arg_2"} +{"_id": "doc_8496", "title": "", "text": "def Func(arg_0):\n \"\"\"A dict containing all the simulation numeric-parameters.\n\n The values are 2-element tuples: first element is the value and\n second element is a string describing the parameter (metadata).\n \"\"\"\n arg_1 = dict(\n D = (arg_0.diffusion_coeff.mean(), 'Diffusion coefficient (m^2/s)'),\n np = (arg_0.num_particles, 'Number of simulated particles'),\n t_step = (arg_0.t_step, 'Simulation time-step (s)'),\n t_max = (arg_0.t_max, 'Simulation total time (s)'),\n ID = (arg_0.ID, 'Simulation ID (int)'),\n EID = (arg_0.EID, 'IPython Engine ID (int)'),\n pico_mol = (arg_0.concentration() * 1e12,\n 'Particles concentration (pM)'))\n return arg_1"} +{"_id": "doc_8497", "title": "", "text": "def Func(arg_0):\n \"\"\"Print on-disk array sizes required for current set of parameters.\"\"\"\n arg_1 = 4\n arg_2 = 1024 * 1024\n arg_3 = arg_0.n_samples * arg_1\n arg_4 = arg_3 * arg_0.num_particles / arg_2\n arg_5 = 3 * arg_3 * arg_0.num_particles / arg_2\n print(\" Number of particles:\", arg_0.num_particles)\n print(\" Number of time steps:\", arg_0.n_samples)\n print(\" Emission array - 1 particle (float32): %.1f MB\" % (arg_3 / arg_2))\n print(\" Emission array (float32): %.1f MB\" % arg_4)\n print(\" Position array (float32): %.1f MB \" % arg_5)"} +{"_id": "doc_8498", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=True,\n arg_3=False, arg_4=None, arg_5=1, arg_6='./',\n arg_7=arg_8,\n arg_9=2**19, arg_10='times', arg_11=True):\n \"\"\"Simulate Brownian motion trajectories and emission rates.\n\n This method performs the Brownian motion simulation using the current\n set of parameters. Before running this method you can check the\n disk-space requirements using :method:`print_sizes`.\n\n Results are stored to disk in HDF5 format and are accessible in\n in `self.emission`, `self.emission_tot` and `self.position` as\n pytables arrays.\n\n Arguments:\n save_pos (bool): if True, save the particles 3D trajectories\n total_emission (bool): if True, store only the total emission array\n containing the sum of emission of all the particles.\n rs (RandomState object): random state object used as random number\n generator. If None, use a random state initialized from seed.\n seed (uint): when `rs` is None, `seed` is used to initialize the\n random state, otherwise is ignored.\n wrap_func (function): the function used to apply the boundary\n condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).\n path (string): a folder where simulation data is saved.\n verbose (bool): if False, prints no output.\n \"\"\"\n if arg_4 is None:\n arg_4 = np.random.RandomState(arg_5=arg_5)\n arg_0.open_store_traj(arg_9=arg_9, arg_10=arg_10,\n arg_3=arg_3, arg_6=arg_6)\n # Save current random state for reproducibility\n arg_0.traj_group._v_attrs['init_random_state'] = arg_4.get_state()\n\n arg_14 = arg_0.emission_tot if arg_2 else arg_0.emission\n\n print('- Start trajectories simulation - %s' % ctime(), flush=True)\n if arg_11:\n print('[PID %d] Diffusion time:' % os.getpid(), end='')\n arg_15 = 0\n arg_16 = arg_0.emission.chunkshape[1]\n arg_17 = arg_16 * arg_0.t_step\n\n arg_18 = arg_0.particles.positions\n arg_19 = 0\n for arg_20 in iter_chunksize(arg_0.n_samples, arg_16):\n if arg_11:\n arg_21 = int(arg_17 * (arg_15 + 1))\n if arg_21 > arg_19:\n print(' %ds' % arg_21, end='', flush=True)\n arg_19 = arg_21\n\n arg_22, arg_23 = arg_0._sim_trajectories(arg_20, arg_18, arg_4,\n arg_2=arg_2,\n arg_1=arg_1, arg_3=arg_3,\n arg_7=arg_7)\n\n ## Append em to the permanent storage\n # if total_emission, data is just a linear array\n # otherwise is a 2-D array (self.num_particles, c_size)\n arg_14.append(arg_23)\n if arg_1:\n arg_0.position.append(np.vstack(arg_22).astype('float32'))\n arg_15 += 1\n arg_0.store.h5file.flush()\n\n # Save current random state\n arg_0.traj_group._v_attrs['last_random_state'] = arg_4.get_state()\n arg_0.store.h5file.flush()\n print('\\n- End trajectories simulation - %s' % ctime(), flush=True)"} +{"_id": "doc_8499", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5,\n arg_6=0, arg_7=10, arg_8=True):\n \"\"\"Simulate timestamps from emission trajectories.\n\n Uses attributes: `.t_step`.\n\n Returns:\n A tuple of two arrays: timestamps and particles.\n \"\"\"\n arg_9 = sim_timetrace_bg(arg_3, arg_1, arg_2,\n arg_0.t_step, arg_5=arg_5)\n arg_10 = arg_3.shape[0]\n if arg_2 is not None:\n arg_10 += 1\n assert arg_9.shape == (arg_10, arg_3.shape[1])\n arg_11 = arg_9.max()\n if arg_11 == 0:\n return np.array([], dtype=np.int64), np.array([], dtype=np.int64)\n\n arg_12 = arg_4 * arg_7\n arg_13 = arg_12 + arg_9.shape[1] * arg_7\n arg_14 = np.arange(arg_12, arg_13, arg_7, dtype='int64')\n\n # Loop for each particle to compute timestamps\n arg_15 = []\n arg_16 = []\n for arg_17, arg_18 in enumerate(arg_9):\n # Compute timestamps for particle ip for all bins with counts\n arg_19 = []\n for arg_20 in range(1, arg_11 + 1):\n arg_19.append(arg_14[arg_18 >= arg_20])\n\n # Stack the timestamps from different \"counts\"\n arg_21 = np.hstack(arg_19)\n # Append current particle\n arg_15.append(arg_21)\n arg_16.append(np.full(arg_21.size, arg_17 + arg_6, dtype='u1'))\n\n # Merge the arrays of different particles\n arg_22 = np.hstack(arg_15)\n arg_23 = np.hstack(arg_16)\n\n if arg_8:\n # Sort timestamps inside the merged chunk\n arg_24 = arg_22.argsort(kind='mergesort')\n arg_22 = arg_22[arg_24]\n arg_23 = arg_23[arg_24]\n\n return arg_22, arg_23"} +{"_id": "doc_8500", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=None, arg_5=1, arg_6=2**16,\n arg_7=None, arg_8=False,\n arg_9=False, arg_10=10,\n arg_11=None, arg_12=None, arg_13=None):\n \"\"\"Compute one timestamps array for a mixture of N populations.\n\n Timestamp data are saved to disk and accessible as pytables arrays in\n `._timestamps` and `._tparticles`.\n The background generated timestamps are assigned a\n conventional particle number (last particle index + 1).\n\n Arguments:\n max_rates (list): list of the peak max emission rate for each\n population.\n populations (list of slices): slices to `self.particles`\n defining each population.\n bg_rate (float, cps): rate for a Poisson background process\n rs (RandomState object): random state object used as random number\n generator. If None, use a random state initialized from seed.\n seed (uint): when `rs` is None, `seed` is used to initialize the\n random state, otherwise is ignored.\n chunksize (int): chunk size used for the on-disk timestamp array\n comp_filter (tables.Filter or None): compression filter to use\n for the on-disk `timestamps` and `tparticles` arrays.\n If None use default compression.\n overwrite (bool): if True, overwrite any pre-existing timestamps\n array. If False, never overwrite. The outcome of simulating an\n existing array is controlled by `skip_existing` flag.\n skip_existing (bool): if True, skip simulation if the same\n timestamps array is already present.\n scale (int): `self.t_step` is multiplied by `scale` to obtain the\n timestamps units in seconds.\n path (string): folder where to save the data.\n timeslice (float or None): timestamps are simulated until\n `timeslice` seconds. If None, simulate until `self.t_max`.\n \"\"\"\n arg_0.open_store_timestamp(arg_6=arg_6, arg_11=arg_11)\n arg_4 = arg_0._get_group_randomstate(arg_4, arg_5, arg_0.ts_group)\n if arg_12 is None:\n arg_12 = arg_0.emission.chunkshape[1]\n arg_14 = arg_0.n_samples\n if arg_13 is not None:\n arg_14 = arg_13 // arg_0.t_step\n\n arg_15 = arg_0._get_ts_name_mix(arg_1, arg_2, arg_3, arg_4=arg_4)\n arg_16 = dict(arg_15=arg_15, clk_p=arg_0.t_step / arg_10,\n arg_1=arg_1, arg_3=arg_3, arg_2=arg_2,\n num_particles=arg_0.num_particles,\n bg_particle=arg_0.num_particles,\n arg_8=arg_8, arg_6=arg_6)\n if arg_7 is not None:\n arg_16.update(arg_7=arg_7)\n try:\n arg_0._timestamps, arg_0._tparticles = (arg_0.ts_store\n .add_timestamps(**arg_16))\n except ExistingArrayError as e:\n if arg_9:\n print(' - Skipping already present timestamps array.')\n return\n else:\n raise e\n\n arg_0.ts_group._v_attrs['init_random_state'] = arg_4.get_state()\n arg_0._timestamps.attrs['init_random_state'] = arg_4.get_state()\n arg_0._timestamps.attrs['PyBroMo'] = __version__\n\n arg_22, arg_23 = [], []\n # Load emission in chunks, and save only the final timestamps\n arg_24 = [None] * (len(arg_1) - 1) + [arg_3]\n arg_25 = 0\n for arg_26, arg_27 in iter_chunk_index(arg_14, arg_12):\n\n arg_28 = np.around(arg_26 * arg_0.t_step, decimals=0)\n if arg_28 > arg_25:\n print(' %.1fs' % arg_28, end='', flush=True)\n arg_25 = arg_28\n\n arg_29 = arg_0.emission[:, arg_26:arg_27]\n\n arg_30, arg_31 = \\\n arg_0._sim_timestamps_populations(\n arg_29, arg_1, arg_2, arg_24, arg_26,\n arg_4, arg_10)\n\n # Save sorted timestamps (suffix '_s') and corresponding particles\n arg_22.append(arg_30)\n arg_23.append(arg_31)\n\n for arg_32, arg_33 in zip(arg_22, arg_23):\n arg_0._timestamps.append(arg_32)\n arg_0._tparticles.append(arg_33)\n\n # Save current random state so it can be resumed in the next session\n arg_0.ts_group._v_attrs['last_random_state'] = arg_4.get_state()\n arg_0._timestamps.attrs['last_random_state'] = arg_4.get_state()\n arg_0.ts_store.h5file.flush()"} +{"_id": "doc_8501", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Merge donor and acceptor timestamps and particle arrays.\n\n Parameters:\n ts_d (array): donor timestamp array\n ts_par_d (array): donor particles array\n ts_a (array): acceptor timestamp array\n ts_par_a (array): acceptor particles array\n\n Returns:\n Arrays: timestamps, acceptor bool mask, timestamp particle\n \"\"\"\n arg_4 = np.hstack([arg_0, arg_2])\n arg_5 = np.hstack([arg_1, arg_3])\n arg_6 = np.hstack([np.zeros(arg_0.shape[0], dtype=bool),\n np.ones(arg_2.shape[0], dtype=bool)])\n arg_7 = arg_4.argsort()\n return arg_4[arg_7], arg_6[arg_7], arg_5[arg_7]"} +{"_id": "doc_8502", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Diffusion coefficients of the two specified populations.\n \"\"\"\n arg_2 = arg_0.diffusion_coeff_counts\n if len(arg_2) == 1:\n arg_3 = [arg_6.stop - arg_6.start for arg_6 in arg_1]\n assert arg_2[0][1] >= sum(arg_3)\n arg_2 = [(arg_2[0][0], ps) for ps in arg_3]\n\n arg_4 = []\n arg_5 = 0 # start index of diffusion-based populations\n for arg_6, (arg_7, arg_8) in zip(arg_1, arg_2):\n arg_4.append(arg_7)\n assert arg_6.start >= arg_5\n assert arg_6.stop <= arg_5 + arg_8\n arg_5 += arg_8\n return arg_4"} +{"_id": "doc_8503", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"2-tuple of slices for selection of two populations.\n \"\"\"\n arg_2 = []\n arg_3 = 0\n for arg_4 in arg_1:\n arg_2.append(slice(arg_3, arg_3 + arg_4))\n arg_3 += arg_4\n return arg_2"} +{"_id": "doc_8504", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compute hash of D and A timestamps for single-step D+A case.\n \"\"\"\n arg_0.hash_d = hash_(arg_1.get_state())[:6]\n arg_0.hash_a = arg_0.hash_d"} +{"_id": "doc_8505", "title": "", "text": "def Func(arg_0):\n \"\"\"Merge donor and acceptor timestamps, computes `ts`, `a_ch`, `part`.\n \"\"\"\n print(' - Merging D and A timestamps', flush=True)\n arg_1, arg_2 = arg_0.S.get_timestamps_part(arg_0.name_timestamps_d)\n arg_3, arg_4 = arg_0.S.get_timestamps_part(arg_0.name_timestamps_a)\n arg_5, arg_6, arg_7 = Func(arg_1, arg_2, arg_3, arg_4)\n assert arg_6.sum() == arg_3.shape[0]\n assert (~arg_6).sum() == arg_1.shape[0]\n assert arg_6.size == arg_3.shape[0] + arg_1.shape[0]\n arg_0.ts, arg_0.a_ch, arg_0.part = arg_5, arg_6, arg_7\n arg_0.clk_p = arg_1.attrs['clk_p']"} +{"_id": "doc_8506", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=True, arg_3=None):\n \"\"\"Create a smFRET Photon-HDF5 file with current timestamps.\"\"\"\n arg_4 = arg_0.filepath\n if arg_3 is not None:\n arg_4 = Path(arg_3, arg_4.name)\n arg_0.merge_da()\n arg_5 = arg_0._make_photon_hdf5(arg_1=arg_1)\n phc.hdf5.Func(arg_5, h5_fname=str(arg_4),\n arg_2=arg_2)"} +{"_id": "doc_8507", "title": "", "text": "def Func(arg_0, arg_1='/', arg_2='user', arg_3=False):\n \"\"\"Print the HDF5 attributes for `node_name`.\n\n Parameters:\n data_file (pytables HDF5 file object): the data file to print\n node_name (string): name of the path inside the file to be printed.\n Can be either a group or a leaf-node. Default: '/', the root node.\n which (string): Valid values are 'user' for user-defined attributes,\n 'sys' for pytables-specific attributes and 'all' to print both\n groups of attributes. Default 'user'.\n compress (bool): if True displays at most a line for each attribute.\n Default False.\n \"\"\"\n arg_4 = arg_0.get_node(arg_1)\n print ('List of attributes for:\\n %s\\n' % arg_4)\n for arg_5 in arg_4._v_attrs._f_list():\n print ('\\t%s' % arg_5)\n arg_6 = repr(arg_4._v_attrs[arg_5])\n if arg_3:\n arg_6 = arg_6.split('\\n')[0]\n print (\"\\t %s\" % arg_6)"} +{"_id": "doc_8508", "title": "", "text": "def Func(arg_0, arg_1='/'):\n \"\"\"Print all the sub-groups in `group` and leaf-nodes children of `group`.\n\n Parameters:\n data_file (pytables HDF5 file object): the data file to print\n group (string): path name of the group to be printed.\n Default: '/', the root node.\n \"\"\"\n arg_2 = arg_0.get_node(arg_1)\n print ('Groups in:\\n %s\\n' % arg_2)\n\n for arg_3 in arg_2._f_walk_groups():\n if arg_3 is not arg_2:\n print (' %s' % arg_3)\n\n print ('\\nLeaf-nodes in %s:' % arg_1)\n for arg_3 in arg_2._v_leaves.itervalues():\n arg_4 = arg_3.shape\n if len(arg_4) == 0:\n arg_4 = arg_3.read()\n print ('\\t%s, %s' % (arg_3.name, arg_4))\n if len(arg_3.title) > 0:\n print ('\\t %s' % arg_3.title)"} +{"_id": "doc_8509", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=64, arg_4=1, arg_5=arg_6(), arg_7=1, arg_8=None):\n \"\"\"Train model on given training examples and return the list of costs after each minibatch is processed.\n\n Args:\n trX (list) -- Inputs\n trY (list) -- Outputs\n batch_size (int, optional) -- number of examples in a minibatch (default 64)\n n_epochs (int, optional) -- number of epochs to train for (default 1)\n len_filter (object, optional) -- object to filter training example by length (default LenFilter())\n snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)\n path (str, optional) -- prefix of path where model snapshots are saved.\n If None, no snapshots are saved (default None)\n\n Returns:\n list -- costs of model after processing each minibatch\n \"\"\"\n if arg_5 is not None:\n arg_1, arg_2 = arg_5.filter(arg_1, arg_2)\n arg_2 = standardize_targets(arg_2, cost=arg_0.cost)\n\n arg_9 = 0.\n arg_10 = time()\n arg_11 = []\n for arg_12 in range(arg_4):\n arg_13 = []\n for arg_14, arg_15 in arg_0.iterator.iterXY(arg_1, arg_2):\n arg_16 = arg_0._train(arg_14, arg_15)\n arg_13.append(arg_16)\n arg_9 += len(arg_15)\n if arg_0.verbose >= 2:\n arg_17 = arg_9 / (time() - arg_10)\n arg_18 = len(arg_2) - arg_9 % len(arg_2)\n arg_19 = arg_18/arg_17\n sys.stdout.write(\"\\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds\" % (arg_12, arg_9, np.mean(arg_13[-250:]), arg_19))\n sys.stdout.flush()\n arg_11.extend(arg_13)\n\n arg_20 = \"Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds\" % (arg_12, arg_9, np.mean(arg_13[-250:]), time() - arg_10)\n if arg_0.verbose >= 2:\n sys.stdout.write(\"\\r\"+arg_20)\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n elif arg_0.verbose == 1:\n print(arg_20)\n if arg_8 and arg_12 % arg_7 == 0:\n save(arg_0, \"{0}.{1}\".format(arg_8, arg_12))\n return arg_11"} +{"_id": "doc_8510", "title": "", "text": "def Func(arg_0=None, arg_1='MONGODB_', **arg_2):\n \"\"\"Sets defaults for ``class Meta`` declarations.\n\n Arguments can either be extracted from a `module` (in that case\n all attributes starting from `prefix` are used):\n\n >>> import foo\n >>> Func(foo)\n\n or passed explicictly as keyword arguments:\n\n >>> Func(database='foo')\n\n .. warning:: Current implementation is by no means thread-safe --\n use it wisely.\n \"\"\"\n if arg_0 is not None and isinstance(arg_0, types.ModuleType):\n # Search module for MONGODB_* attributes and converting them\n # to _Options' values, ex: MONGODB_PORT ==> port.\n arg_3 = ((attr.replace(arg_1, '').lower(), value)\n for attr, value in vars(arg_0).items()\n if attr.startswith(arg_1))\n\n _Options._Func(**dict(arg_3))\n elif arg_2:\n _Options._Func(**arg_2)"} +{"_id": "doc_8511", "title": "", "text": "def Func(arg_0):\n \"\"\"Converts a given string from CamelCase to under_score.\n\n >>> Func('FooBar')\n 'foo_bar'\n \"\"\"\n arg_1 = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\\1_\\2', arg_0)\n arg_1 = re.sub(r'([a-z\\d])([A-Z])', r'\\1_\\2', arg_1)\n return arg_1.lower()"} +{"_id": "doc_8512", "title": "", "text": "def Func(arg_0=(10, 10), arg_1=(10, 10)) -> VAO:\n \"\"\"\n Generates a plane on the xz axis of a specific size and resolution.\n Normals and texture coordinates are also included.\n\n Args:\n size: (x, y) tuple\n resolution: (x, y) tuple\n\n Returns:\n A :py:class:`demosys.opengl.vao.VAO` instance\n \"\"\"\n arg_2, arg_3 = arg_0\n arg_4, arg_5 = arg_1\n arg_6, arg_7 = arg_2 / arg_4, arg_3 / arg_5 # step\n arg_8, arg_9 = -arg_2 / 2, -arg_3 / 2 # start offset\n\n def gen_pos():\n for arg_10 in range(arg_5):\n for arg_11 in range(arg_4):\n yield arg_8 + arg_11 * arg_6\n yield 0\n yield arg_9 + arg_10 * arg_7\n\n def gen_uv():\n for arg_10 in range(arg_5):\n for arg_11 in range(arg_4):\n yield arg_11 / (arg_4 - 1)\n yield 1 - arg_10 / (arg_5 - 1)\n\n def gen_normal():\n for arg_12 in range(arg_4 * arg_5):\n yield 0.0\n yield 1.0\n yield 0.0\n\n def gen_index():\n for arg_10 in range(arg_5 - 1):\n for arg_11 in range(arg_4 - 1):\n # quad poly left\n yield arg_10 * arg_5 + arg_11 + 1\n yield arg_10 * arg_5 + arg_11\n yield arg_10 * arg_5 + arg_11 + arg_4\n # quad poly right\n yield arg_10 * arg_5 + arg_11 + 1\n yield arg_10 * arg_5 + arg_11 + arg_4\n yield arg_10 * arg_5 + arg_11 + arg_4 + 1\n\n arg_13 = numpy.fromiter(gen_pos(), dtype=numpy.float32)\n arg_14 = numpy.fromiter(gen_uv(), dtype=numpy.float32)\n arg_15 = numpy.fromiter(gen_normal(), dtype=numpy.float32)\n arg_16 = numpy.fromiter(gen_index(), dtype=numpy.uint32)\n\n arg_17 = VAO(\"Func\", mode=moderngl.TRIANGLES)\n\n arg_17.buffer(arg_13, '3f', ['in_position'])\n arg_17.buffer(arg_14, '2f', ['in_uv'])\n arg_17.buffer(arg_15, '3f', ['in_normal'])\n\n arg_17.index_buffer(arg_16, index_element_size=4)\n\n return arg_17"} +{"_id": "doc_8513", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Deferred Funcing of the scene\n\n :param scene: The scene object\n :param file: Resolved path if changed by finder\n \"\"\"\n arg_0.path = arg_0.find_scene(arg_0.meta.path)\n if not arg_0.path:\n raise ValueError(\"Scene '{}' not found\".format(arg_0.meta.path))\n\n arg_0.scene = Scene(arg_0.path)\n\n # Load gltf json file\n if arg_0.path.suffix == '.gltf':\n arg_0.Func_gltf()\n\n # Load binary gltf file\n if arg_0.path.suffix == '.glb':\n arg_0.Func_glb()\n\n arg_0.meta.check_version()\n arg_0.meta.check_extensions(arg_0.supported_extensions)\n arg_0.Func_images()\n arg_0.Func_samplers()\n arg_0.Func_textures()\n arg_0.Func_materials()\n arg_0.Func_meshes()\n arg_0.Func_nodes()\n\n arg_0.scene.calc_scene_bbox()\n arg_0.scene.prepare()\n\n return arg_0.scene"} +{"_id": "doc_8514", "title": "", "text": "def Func(arg_0):\n \"\"\"Loads a binary gltf file\"\"\"\n with open(arg_0.path, 'rb') as fd:\n # Check header\n arg_1 = fd.read(4)\n if arg_1 != GLTF_MAGIC_HEADER:\n raise ValueError(\"{} has incorrect header {} != {}\".format(arg_0.path, arg_1, GLTF_MAGIC_HEADER))\n\n arg_2 = struct.unpack(' VAO:\n \"\"\"\n Generates random positions inside a confied box.\n\n Args:\n count (int): Number of points to generate\n\n Keyword Args:\n range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)\n range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)\n range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)\n seed (int): The random seed\n\n Returns:\n A :py:class:`demosys.opengl.vao.VAO` instance\n \"\"\"\n random.seed(arg_4)\n\n def gen():\n for arg_5 in range(arg_0):\n yield random.uniform(*arg_1)\n yield random.uniform(*arg_2)\n yield random.uniform(*arg_3)\n\n arg_6 = numpy.fromiter(gen(), arg_0=arg_0 * 3, dtype=numpy.float32)\n\n arg_7 = VAO(\"geometry:Func\", mode=moderngl.POINTS)\n arg_7.buffer(arg_6, '3f', ['in_position'])\n\n return arg_7"} +{"_id": "doc_8528", "title": "", "text": "def Func(arg_0):\n \"\"\"Play the music\"\"\"\n if arg_0.initialized:\n mixer.music.unpause()\n else:\n mixer.music.play()\n # FIXME: Calling play twice to ensure the music is actually playing\n mixer.music.play()\n arg_0.initialized = True\n arg_0.paused = False"} +{"_id": "doc_8529", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Draw framebuffers for debug purposes.\n We need to supply near and far plane so the depth buffer can be linearized when visualizing.\n\n :param near: Projection near value\n :param far: Projection far value\n \"\"\"\n arg_0.ctx.disable(moderngl.DEPTH_TEST)\n\n helper.draw(arg_0.gbuffer.color_attachments[0], pos=(0.0, 0.0), scale=(0.25, 0.25))\n helper.draw(arg_0.gbuffer.color_attachments[1], pos=(0.5, 0.0), scale=(0.25, 0.25))\n helper.draw_depth(arg_0.gbuffer.depth_attachment, arg_1, arg_2, pos=(1.0, 0.0), scale=(0.25, 0.25))\n helper.draw(arg_0.lightbuffer.color_attachments[0], pos=(1.5, 0.0), scale=(0.25, 0.25))"} +{"_id": "doc_8530", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Render light volumes\"\"\"\n # Draw light volumes from the inside\n arg_0.ctx.front_face = 'cw'\n arg_0.ctx.blend_func = moderngl.ONE, moderngl.ONE\n\n helper._depth_sampler.use(location=1)\n with arg_0.lightbuffer_scope:\n for arg_6 in arg_0.point_lights:\n # Calc light properties\n arg_7 = arg_6.radius\n arg_8 = matrix44.multiply(arg_6.matrix, arg_1)\n # Draw the light volume\n arg_0.point_light_shader[\"m_proj\"].write(arg_2.tobytes())\n arg_0.point_light_shader[\"m_light\"].write(arg_8.astype('f4').tobytes())\n arg_0.gbuffer.color_attachments[1].use(location=0)\n arg_0.point_light_shader[\"g_normal\"].value = 0\n arg_0.gbuffer.depth_attachment.use(location=1)\n arg_0.point_light_shader[\"g_depth\"].value = 1\n arg_0.point_light_shader[\"screensize\"].value = (arg_0.width, arg_0.height)\n arg_0.point_light_shader[\"proj_const\"].value = arg_2.projection_constants\n arg_0.point_light_shader[\"radius\"].value = arg_7\n arg_0.unit_cube.render(arg_0.point_light_shader)\n\n helper._depth_sampler.clear(location=1)"} +{"_id": "doc_8531", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Render outlines of light volumes\"\"\"\n arg_0.ctx.enable(moderngl.BLEND)\n arg_0.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHA\n\n for arg_5 in arg_0.point_lights:\n arg_6 = matrix44.multiply(arg_5.matrix, arg_1)\n arg_7 = arg_5.radius\n arg_0.debug_shader[\"m_proj\"].write(arg_2.tobytes())\n arg_0.debug_shader[\"m_mv\"].write(arg_6.astype('f4').tobytes())\n arg_0.debug_shader[\"size\"].value = arg_7\n arg_0.unit_cube.render(arg_0.debug_shader, mode=moderngl.LINE_STRIP)\n\n arg_0.ctx.disable(moderngl.BLEND)"} +{"_id": "doc_8532", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2):\r\n \"\"\"Load a single shader\"\"\"\r\n if arg_3:\r\n arg_4 = arg_0.find_program(arg_3)\r\n if not arg_4:\r\n raise ValueError(\"Cannot find {} shader '{}'\".format(arg_1, arg_3))\r\n\r\n print(\"Loading:\", arg_3)\r\n\r\n with open(arg_4, 'r') as fd:\r\n return fd.read()"} +{"_id": "doc_8533", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Load a texture array\"\"\"\r\n arg_0._open_image()\r\n\r\n arg_1, arg_2, arg_3 = arg_0.image.size[0], arg_0.image.size[1] // arg_0.layers, arg_0.layers\r\n arg_4, arg_5 = image_data(arg_0.image)\r\n\r\n arg_6 = arg_0.ctx.texture_array(\r\n (arg_1, arg_2, arg_3),\r\n arg_4,\r\n arg_5,\r\n )\r\n arg_6.extra = {'meta': arg_0.meta}\r\n\r\n if arg_0.meta.mipmap:\r\n arg_6.build_mipmaps()\r\n\r\n arg_0._close_image()\r\n\r\n return arg_6"} +{"_id": "doc_8534", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=0):\n \"\"\"\n Draw the mesh using the assigned mesh program\n\n :param projection_matrix: projection_matrix (bytes)\n :param view_matrix: view_matrix (bytes)\n :param camera_matrix: camera_matrix (bytes)\n \"\"\"\n if arg_0.mesh_program:\n arg_0.mesh_program.Func(\n arg_0,\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4\n )"} +{"_id": "doc_8535", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Set the current time jumping in the timeline.\n\n Args:\n value (float): The new time\n \"\"\"\n if arg_1 < 0:\n arg_1 = 0\n\n arg_0.controller.row = arg_0.rps * arg_1"} +{"_id": "doc_8536", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_5.Framebuffer):\n \"\"\"\n Draw function called by the system every frame when the effect is active.\n This method raises ``NotImplementedError`` unless implemented.\n\n Args:\n time (float): The current time in seconds.\n frametime (float): The time the previous frame used to render in seconds.\n target (``moderngl.Framebuffer``): The target FBO for the effect.\n \"\"\"\n raise NotImplementedError(\"Func() is not implemented\")"} +{"_id": "doc_8537", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> moderngl.Program:\n \"\"\"\n Get a program by its label\n\n Args:\n label (str): The label for the program\n\n Returns: py:class:`moderngl.Program` instance\n \"\"\"\n return arg_0._project.Func(arg_1)"} +{"_id": "doc_8538", "title": "", "text": "def Func(arg_0, arg_1: arg_2 = 75.0, arg_3: arg_2 = 1.0, arg_4: arg_2 = 100.0, arg_5: arg_2 = None):\n \"\"\"\n Create a projection matrix with the following parameters.\n When ``aspect_ratio`` is not provided the configured aspect\n ratio for the window will be used.\n\n Args:\n fov (float): Field of view (float)\n near (float): Camera near value\n far (float): Camrea far value\n\n Keyword Args:\n aspect_ratio (float): Aspect ratio of the viewport\n\n Returns:\n The projection matrix as a float32 :py:class:`numpy.array`\n \"\"\"\n return matrix44.create_perspective_projection_matrix(\n arg_1,\n arg_5 or arg_0.window.aspect_ratio,\n arg_3,\n arg_4,\n dtype='f4',\n )"} +{"_id": "doc_8539", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Creates a transformation matrix woth rotations and translation.\n\n Args:\n rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n\n Returns:\n A 4x4 matrix as a :py:class:`numpy.array`\n \"\"\"\n arg_3 = None\n if arg_1 is not None:\n arg_3 = Matrix44.from_eulers(Vector3(arg_1))\n\n if arg_2 is not None:\n arg_4 = matrix44.create_from_translation(Vector3(arg_2))\n if arg_3 is None:\n arg_3 = arg_4\n else:\n arg_3 = matrix44.multiply(arg_3, arg_4)\n\n return arg_3"} +{"_id": "doc_8540", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates a normal matrix from modelview matrix\n\n Args:\n modelview: The modelview matrix\n\n Returns:\n A 3x3 Normal matrix as a :py:class:`numpy.array`\n \"\"\"\n arg_2 = Matrix33.from_matrix44(arg_1)\n arg_2 = arg_2.inverse\n arg_2 = arg_2.transpose()\n return arg_2"} +{"_id": "doc_8541", "title": "", "text": "def Func(arg_0):\n \"\"\"Scan for available templates in effect_templates\"\"\"\n arg_1 = list_templates()\n\n if arg_0 not in arg_1:\n raise ArgumentTypeError(\"Effect template '{}' does not exist.\\n Available templates: {} \".format(\n arg_0, \", \".join(arg_1)))\n\n return arg_0"} +{"_id": "doc_8542", "title": "", "text": "def Func():\n \"\"\"Get the absolute path to the root of the demosys package\"\"\"\n arg_0 = os.path.dirname(globals()['__file__'])\n return os.path.dirname(os.path.dirname(arg_0))"} +{"_id": "doc_8543", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Load a file in text mode\"\"\"\r\n arg_0.meta.resolved_path = arg_0.find_data(arg_0.meta.path)\r\n\r\n if not arg_0.meta.resolved_path:\r\n raise ImproperlyConfigured(\"Data file '{}' not found\".format(arg_0.meta.path))\r\n\r\n print(\"Loading:\", arg_0.meta.path)\r\n\r\n with open(arg_0.meta.resolved_path, 'r') as fd:\r\n return fd.read()"} +{"_id": "doc_8544", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get a finder class from an import path.\n Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.\n This function uses an lru cache.\n\n :param import_path: string representing an import path\n :return: An instance of the finder\n \"\"\"\n arg_1 = import_string(arg_0)\n if not issubclass(arg_1, BaseFileSystemFinder):\n raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(arg_0))\n return arg_1()"} +{"_id": "doc_8545", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Find a file in the path. The file may exist in multiple\n paths. The last found file will be returned.\n\n :param path: The path to Func\n :return: The absolute path to the file or None if not found\n \"\"\"\n # Update paths from settings to make them editable runtime\n # This is only possible for FileSystemFinders\n if getattr(arg_0, 'settings_attr', None):\n arg_0.paths = getattr(settings, arg_0.settings_attr)\n\n arg_4 = None\n\n for arg_5 in arg_0.paths:\n arg_6 = arg_5 / arg_1\n if arg_6.exists():\n arg_4 = arg_6\n\n return arg_4"} +{"_id": "doc_8546", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Update the internal projection matrix based on current values\n or values passed in if specified.\n\n :param aspect_ratio: New aspect ratio\n :param fov: New field of view\n :param near: New near value\n :param far: New far value\n \"\"\"\n arg_0.aspect_ratio = arg_1 or arg_0.aspect_ratio\n arg_0.fov = arg_2 or arg_0.fov\n arg_0.near = arg_3 or arg_0.near\n arg_0.far = arg_4 or arg_0.far\n\n arg_0.matrix = Matrix44.perspective_projection(arg_0.fov, arg_0.aspect_ratio, arg_0.near, arg_0.far)"} +{"_id": "doc_8547", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Swaps buffers, incement the framecounter and pull events.\n \"\"\"\n arg_0.frames += 1\n glfw.Func(arg_0.window)\n arg_0.poll_events()"} +{"_id": "doc_8548", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Ensure glfw library version is compatible\n \"\"\"\n print(\"glfw version: {} (python wrapper version {})\".format(glfw.get_version(), glfw.__version__))\n if glfw.get_version() < arg_0.min_glfw_version:\n raise ValueError(\"Please update glfw binaries to version {} or later\".format(arg_0.min_glfw_version))"} +{"_id": "doc_8549", "title": "", "text": "def Func(arg_0):\n \"\"\"Translate the buffer format\"\"\"\n arg_1 = []\n arg_2 = []\n arg_3 = []\n\n if \"T2F\" in arg_0:\n arg_1.append(\"2f\")\n arg_2.append(\"in_uv\")\n arg_3.append((\"TEXCOORD_0\", \"in_uv\", 2))\n\n if \"C3F\" in arg_0:\n arg_1.append(\"3f\")\n arg_2.append(\"in_color\")\n arg_3.append((\"NORMAL\", \"in_color\", 3))\n\n if \"N3F\" in arg_0:\n arg_1.append(\"3f\")\n arg_2.append(\"in_normal\")\n arg_3.append((\"NORMAL\", \"in_normal\", 3))\n\n arg_1.append(\"3f\")\n arg_2.append(\"in_position\")\n arg_3.append((\"POSITION\", \"in_position\", 3))\n\n return \" \".join(arg_1), arg_2, arg_3"} +{"_id": "doc_8550", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Set the current time. This can be used to jump in the timeline.\n\n Args:\n value (float): The new time\n \"\"\"\n if arg_1 < 0:\n arg_1 = 0\n\n arg_0.offset += arg_0.get_time() - arg_1"} +{"_id": "doc_8551", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Resolve scene loader based on file extension\n \"\"\"\n for arg_3 in arg_0._loaders:\n if arg_3.supports_file(arg_1):\n arg_1.loader_cls = arg_3\n break\n else:\n raise ImproperlyConfigured(\n \"Scene {} has no loader class registered. Check settings.SCENE_LOADERS\".format(arg_1.path))"} +{"_id": "doc_8552", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\r\n \"\"\"\r\n Pyglet specific callback for window resize events.\r\n \"\"\"\r\n arg_0.width, arg_0.height = arg_1, arg_2\r\n arg_0.buffer_width, arg_0.buffer_height = arg_1, arg_2\r\n arg_0.resize(arg_1, arg_2)"} +{"_id": "doc_8553", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Swap buffers, increment frame counter and pull events\r\n \"\"\"\r\n if not arg_0.window.context:\r\n return\r\n\r\n arg_0.frames += 1\r\n arg_0.window.flip()\r\n arg_0.window.dispatch_events()"} +{"_id": "doc_8554", "title": "", "text": "def Func(arg_0=0.5, arg_1=32, arg_2=16) -> VAO:\n \"\"\"\n Creates a Func.\n\n Keyword Args:\n radius (float): Radius or the Func\n rings (int): number or horizontal rings\n sectors (int): number of vertical segments\n\n Returns:\n A :py:class:`demosys.opengl.vao.VAO` instance\n \"\"\"\n arg_3 = 1.0 / (arg_2 - 1)\n arg_4 = 1.0 / (arg_1 - 1)\n\n arg_5 = [0] * (arg_2 * arg_1 * 3)\n arg_6 = [0] * (arg_2 * arg_1 * 3)\n arg_7 = [0] * (arg_2 * arg_1 * 2)\n\n arg_8, arg_9, arg_10 = 0, 0, 0\n for arg_11 in range(arg_2):\n for arg_12 in range(arg_1):\n arg_13 = math.sin(-math.pi / 2 + math.pi * arg_11 * arg_3)\n arg_14 = math.cos(2 * math.pi * arg_12 * arg_4) * math.sin(math.pi * arg_11 * arg_3)\n arg_15 = math.sin(2 * math.pi * arg_12 * arg_4) * math.sin(math.pi * arg_11 * arg_3)\n\n arg_7[arg_10] = arg_12 * arg_4\n arg_7[arg_10 + 1] = arg_11 * arg_3\n\n arg_5[arg_8] = arg_14 * arg_0\n arg_5[arg_8 + 1] = arg_13 * arg_0\n arg_5[arg_8 + 2] = arg_15 * arg_0\n\n arg_6[arg_9] = arg_14\n arg_6[arg_9 + 1] = arg_13\n arg_6[arg_9 + 2] = arg_15\n\n arg_10 += 2\n arg_8 += 3\n arg_9 += 3\n\n arg_16 = [0] * arg_2 * arg_1 * 6\n arg_17 = 0\n for arg_11 in range(arg_2 - 1):\n for arg_12 in range(arg_1 - 1):\n arg_16[arg_17] = arg_11 * arg_1 + arg_12\n arg_16[arg_17 + 1] = (arg_11 + 1) * arg_1 + (arg_12 + 1)\n arg_16[arg_17 + 2] = arg_11 * arg_1 + (arg_12 + 1)\n\n arg_16[arg_17 + 3] = arg_11 * arg_1 + arg_12\n arg_16[arg_17 + 4] = (arg_11 + 1) * arg_1 + arg_12\n arg_16[arg_17 + 5] = (arg_11 + 1) * arg_1 + (arg_12 + 1)\n arg_17 += 6\n\n arg_18 = numpy.array(arg_5, dtype=numpy.float32)\n arg_19 = numpy.array(arg_6, dtype=numpy.float32)\n arg_20 = numpy.array(arg_7, dtype=numpy.float32)\n arg_21 = numpy.array(arg_16, dtype=numpy.uint32)\n\n arg_22 = VAO(\"Func\", mode=mlg.TRIANGLES)\n # VBOs\n arg_22.buffer(arg_18, '3f', ['in_position'])\n arg_22.buffer(arg_19, '3f', ['in_normal'])\n arg_22.buffer(arg_20, '2f', ['in_uv'])\n arg_22.index_buffer(arg_21, index_element_size=4)\n\n return arg_22"} +{"_id": "doc_8555", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Attempts to assign a loader class to a resource description\n\n :param meta: The resource description instance\n \"\"\"\n arg_1.loader_cls = arg_0.get_loader(arg_1, raise_on_error=True)"} +{"_id": "doc_8556", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3=False) -> BaseLoader:\n \"\"\"\n Attempts to get a loader\n\n :param meta: The resource description instance\n :param raise_on_error: Raise ImproperlyConfigured if the loader cannot be resolved\n :returns: The requested loader class\n \"\"\"\n for arg_4 in arg_0._loaders:\n if arg_4.name == arg_1.loader:\n return arg_4\n\n if arg_3:\n raise ImproperlyConfigured(\n \"Resource has invalid loader '{}': {}\\nAvailiable loaders: {}\".format(\n arg_1.loader, arg_1, [arg_4.name for arg_4 in arg_0._loaders]))"} +{"_id": "doc_8557", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Pyqt specific Func callback.\n \"\"\"\n if not arg_0.fbo:\n return\n\n # pyqt reports sizes in actual buffer size\n arg_0.width = arg_1 // arg_0.widget.devicePixelRatio()\n arg_0.height = arg_2 // arg_0.widget.devicePixelRatio()\n arg_0.buffer_width = arg_1\n arg_0.buffer_height = arg_2\n\n super().Func(arg_1, arg_2)"} +{"_id": "doc_8558", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Draws a frame. Internally it calls the\n configured timeline's Func method.\n\n Args:\n current_time (float): The current time (preferrably always from the configured timer class)\n frame_time (float): The duration of the previous frame in seconds\n \"\"\"\n arg_0.set_default_viewport()\n arg_0.timeline.Func(arg_1, arg_2, arg_0.fbo)"} +{"_id": "doc_8559", "title": "", "text": "def Func(arg_0, arg_1=0.0, arg_2=0.0, arg_3=0.0, arg_4=0.0, arg_5=1.0):\n \"\"\"\n Sets the clear values for the window buffer.\n\n Args:\n red (float): red compoent\n green (float): green compoent\n blue (float): blue compoent\n alpha (float): alpha compoent\n depth (float): depth value\n \"\"\"\n arg_0.clear_color = (arg_1, arg_2, arg_3, arg_4)\n arg_0.clear_depth = arg_5"} +{"_id": "doc_8560", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Handles the standard keyboard events such as camera movements,\n taking a screenshot, closing the window etc.\n\n Can be overriden add new keyboard events. Ensure this method\n is also called if you want to keep the standard features.\n\n Arguments:\n key: The key that was pressed or released\n action: The key action. Can be `ACTION_PRESS` or `ACTION_RELEASE`\n modifier: Modifiers such as holding shift or ctrl\n \"\"\"\n # The well-known standard key for quick exit\n if arg_1 == arg_0.keys.ESCAPE:\n arg_0.close()\n return\n\n # Toggle pause time\n if arg_1 == arg_0.keys.SPACE and arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.timer.toggle_pause()\n\n # Camera movement\n # Right\n if arg_1 == arg_0.keys.D:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_right(True)\n elif arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_right(False)\n # Left\n elif arg_1 == arg_0.keys.A:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_left(True)\n elif arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_left(False)\n # Forward\n elif arg_1 == arg_0.keys.W:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_forward(True)\n if arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_forward(False)\n # Backwards\n elif arg_1 == arg_0.keys.S:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_backward(True)\n if arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_backward(False)\n\n # UP\n elif arg_1 == arg_0.keys.Q:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_down(True)\n if arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_down(False)\n\n # Down\n elif arg_1 == arg_0.keys.E:\n if arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.sys_camera.move_up(True)\n if arg_2 == arg_0.keys.ACTION_RELEASE:\n arg_0.sys_camera.move_up(False)\n\n # Screenshots\n if arg_1 == arg_0.keys.X and arg_2 == arg_0.keys.ACTION_PRESS:\n screenshot.create()\n\n if arg_1 == arg_0.keys.R and arg_2 == arg_0.keys.ACTION_PRESS:\n project.instance.reload_programs()\n\n if arg_1 == arg_0.keys.RIGHT and arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.timer.set_time(arg_0.timer.get_time() + 10.0)\n\n if arg_1 == arg_0.keys.LEFT and arg_2 == arg_0.keys.ACTION_PRESS:\n arg_0.timer.set_time(arg_0.timer.get_time() - 10.0)\n\n # Forward the event to the timeline\n arg_0.timeline.key_event(arg_1, arg_2, arg_3)"} +{"_id": "doc_8561", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n The standard mouse movement event method.\n Can be overriden to add new functionality.\n By default this feeds the system camera with new values.\n\n Args:\n x: The current mouse x position\n y: The current mouse y position\n dx: Delta x postion (x position difference from the previous event)\n dy: Delta y postion (y position difference from the previous event)\n \"\"\"\n arg_0.sys_camera.rot_state(arg_1, arg_2)"} +{"_id": "doc_8562", "title": "", "text": "def Func(arg_0):\n \"\"\"Start the timer\"\"\"\n arg_0.music.Func()\n if not arg_0.Func_paused:\n arg_0.rocket.Func()"} +{"_id": "doc_8563", "title": "", "text": "def Func(arg_0):\n \"\"\"Toggle pause mode\"\"\"\n arg_0.controller.playing = not arg_0.controller.playing\n arg_0.music.Func()"} +{"_id": "doc_8564", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check if the loader has a supported file extension\"\"\"\n arg_2 = Path(arg_1.path)\n\n for arg_3 in arg_0.file_extensions:\n if arg_2.suffixes[:len(arg_3)] == arg_3:\n return True\n\n return False"} +{"_id": "doc_8565", "title": "", "text": "def Func(arg_0, arg_1) -> Track:\n \"\"\"\n Get or create a Track object.\n\n :param name: Name of the track\n :return: Track object\n \"\"\"\n arg_1 = arg_1.lower()\n arg_2 = arg_0.track_map.Func(arg_1)\n if not arg_2:\n arg_2 = Track(arg_1)\n arg_0.tacks.append(arg_2)\n arg_0.track_map[arg_1] = arg_2\n return arg_2"} +{"_id": "doc_8566", "title": "", "text": "def Func(arg_0: arg_1) -> List[arg_1]:\n \"\"\"\n Get all command names in the a folder\n\n :return: List of commands names\n \"\"\"\n if not arg_0:\n return []\n\n return [arg_3 for arg_2, arg_3, arg_4 in pkgutil.iter_modules([arg_0])\n if not arg_4 and not arg_3.startswith('_')]"} +{"_id": "doc_8567", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"Override settings values\"\"\"\n for arg_2, arg_3 in arg_1.items():\n setattr(arg_0, arg_2, arg_3)"} +{"_id": "doc_8568", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Hack in program directory\"\"\"\n arg_2 = list(arg_0.PROGRAM_DIRS)\n arg_2.append(arg_1)\n arg_0.PROGRAM_DIRS = arg_2"} +{"_id": "doc_8569", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Hack in texture directory\"\"\"\n arg_2 = list(arg_0.TEXTURE_DIRS)\n arg_2.append(arg_1)\n arg_0.TEXTURE_DIRS = arg_2"} +{"_id": "doc_8570", "title": "", "text": "def Func(arg_0, arg_1: arg_2.Program, arg_4=None, arg_5=-1, arg_6=0, arg_7=1):\n \"\"\"\n Render the VAO.\n\n Args:\n program: The ``moderngl.Program``\n\n Keyword Args:\n mode: Override the draw mode (``TRIANGLES`` etc)\n vertices (int): The number of vertices to transform\n first (int): The index of the first vertex to start with\n instances (int): The number of instances\n \"\"\"\n arg_8 = arg_0.instance(arg_1)\n\n if arg_4 is None:\n arg_4 = arg_0.mode\n\n arg_8.Func(arg_4, arg_5=arg_5, arg_6=arg_6, arg_7=arg_7)"} +{"_id": "doc_8571", "title": "", "text": "def Func(arg_0, arg_1: arg_2.Program) -> arg_2.VertexArray:\n \"\"\"\n Obtain the ``moderngl.VertexArray`` Func for the program.\n The Func is only created once and cached internally.\n\n Returns: ``moderngl.VertexArray`` Func\n \"\"\"\n arg_4 = arg_0.vaos.get(arg_1.glo)\n if arg_4:\n return arg_4\n\n arg_5 = [name for name, attr in arg_1._members.items() if isFunc(attr, arg_2.Attribute)]\n\n # Make sure all attributes are covered\n for arg_6 in arg_5:\n # Ignore built in attributes for now\n if arg_6.startswith('gl_'):\n continue\n\n # Do we have a buffer mapping to this attribute?\n if not sum(arg_7.has_attribute(arg_6) for arg_7 in arg_0.buffers):\n raise VAOError(\"VAO {} doesn't have attribute {} for program {}\".format(\n arg_0.name, arg_6, arg_1.name))\n\n arg_8 = []\n\n # Pick out the attributes we can actually map\n for arg_7 in arg_0.buffers:\n arg_9 = arg_7.content(arg_5)\n if arg_9:\n arg_8.append(arg_9)\n\n # Any attribute left is not accounted for\n if arg_5:\n for arg_6 in arg_5:\n if arg_6.startswith('gl_'):\n continue\n\n raise VAOError(\"Did not find a buffer mapping for {}\".format([arg_10 for arg_10 in arg_5]))\n\n # Create the vao\n if arg_0._index_buffer:\n arg_4 = context.ctx().vertex_array(arg_1, arg_8,\n arg_0._index_buffer, arg_0._index_element_size)\n else:\n arg_4 = context.ctx().vertex_array(arg_1, arg_8)\n\n arg_0.vaos[arg_1.glo] = arg_4\n return arg_4"} +{"_id": "doc_8572", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=0):\n \"\"\"\n Draw code for the mesh. Should be overriden.\n\n :param projection_matrix: projection_matrix (bytes)\n :param view_matrix: view_matrix (bytes)\n :param camera_matrix: camera_matrix (bytes)\n :param time: The current time\n \"\"\"\n arg_0.program[\"m_proj\"].write(arg_2)\n arg_0.program[\"m_mv\"].write(arg_3)\n arg_1.vao.render(arg_0.program)"} +{"_id": "doc_8573", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Parse the effect package string.\n Can contain the package python path or path to effect class in an effect package.\n\n Examples::\n\n # Path to effect pacakge\n examples.cubes\n\n # Path to effect class\n examples.cubes.Cubes\n\n Args:\n path: python path to effect package. May also include effect class name.\n\n Returns:\n tuple: (package_path, effect_class)\n \"\"\"\n arg_1 = arg_0.split('.')\n\n # Is the last entry in the path capitalized?\n if arg_1[-1][0].isupper():\n return \".\".join(arg_1[:-1]), arg_1[-1]\n\n return arg_0, \"\""} +{"_id": "doc_8574", "title": "", "text": "def Func(arg_0) -> List[Any]:\n \"\"\"\n Get all resources registed in effect packages.\n These are typically located in ``resources.py``\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.packages:\n arg_1.extend(arg_2.resources)\n\n return arg_1"} +{"_id": "doc_8575", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Registers a single package\n\n :param name: (str) The effect package to add\n \"\"\"\n arg_1, arg_2 = parse_package_string(arg_1)\n\n if arg_1 in arg_0.package_map:\n return\n\n arg_3 = EffectPackage(arg_1)\n arg_3.load()\n\n arg_0.packages.append(arg_3)\n arg_0.package_map[arg_3.name] = arg_3\n\n # Load effect package dependencies\n arg_0.polulate(arg_3.effect_packages)"} +{"_id": "doc_8576", "title": "", "text": "def Func(arg_0, arg_1) -> 'EffectPackage':\n \"\"\"\n Get a package by python path. Can also contain path to an effect.\n\n Args:\n name (str): Path to effect package or effect\n\n Returns:\n The requested EffectPackage\n\n Raises:\n EffectError when no package is found\n \"\"\"\n arg_1, arg_2 = parse_package_string(arg_1)\n\n try:\n return arg_0.package_map[arg_1]\n except KeyError:\n raise EffectError(\"No package '{}' registered\".format(arg_1))"} +{"_id": "doc_8577", "title": "", "text": "def Func(arg_0) -> List[Type[Effect]]:\n \"\"\"Returns the runnable effect in the package\"\"\"\n return [arg_1 for arg_1 in arg_0.effect_classes if arg_1.runnable]"} +{"_id": "doc_8578", "title": "", "text": "def Func(arg_0):\n \"\"\"FInd the effect package\"\"\"\n try:\n arg_0.package = importlib.import_module(arg_0.name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Effect package '{}' not found.\".format(arg_0.name))"} +{"_id": "doc_8579", "title": "", "text": "def Func(arg_0):\n \"\"\"Iterate the module attributes picking out effects\"\"\"\n arg_0.effect_classes = []\n\n for arg_2, arg_3 in inspect.getmembers(arg_0.effect_module):\n if inspect.isclass(arg_3):\n if arg_3 == Effect:\n continue\n\n if issubclass(arg_3, Effect):\n arg_0.effect_classes.append(arg_3)\n arg_0.effect_class_map[arg_3.__name__] = arg_3\n arg_3._name = \"{}.{}\".format(arg_0.effect_module_name, arg_3.__name__)"} +{"_id": "doc_8580", "title": "", "text": "def Func(arg_0):\n \"\"\"Fetch the resource list\"\"\"\n # Attempt to load the dependencies module\n try:\n arg_1 = '{}.{}'.format(arg_0.name, 'dependencies')\n arg_0.dependencies_module = importlib.import_module(arg_1)\n except ModuleNotFoundError as err:\n raise EffectError(\n (\n \"Effect package '{}' has no 'dependencies' module or the module has errors. \"\n \"Forwarded error from importlib: {}\"\n ).format(arg_0.name, err))\n\n # Fetch the resource descriptions\n try:\n arg_0.resources = getattr(arg_0.dependencies_module, 'resources')\n except AttributeError:\n raise EffectError(\"Effect dependencies module '{}' has no 'resources' attribute\".format(arg_1))\n\n if not isinstance(arg_0.resources, list):\n raise EffectError(\n \"Effect dependencies module '{}': 'resources' is of type {} instead of a list\".format(\n arg_1, type(arg_0.resources)))\n\n # Fetch the effect class list\n try:\n arg_0.effect_packages = getattr(arg_0.dependencies_module, 'effect_packages')\n except AttributeError:\n raise EffectError(\"Effect dependencies module '{}' has 'effect_packages' attribute\".format(arg_1))\n\n if not isinstance(arg_0.effect_packages, list):\n raise EffectError(\n \"Effect dependencies module '{}': 'effect_packages' is of type {} instead of a list\".format(\n arg_1, type(arg_0.effects)))"} +{"_id": "doc_8581", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\r\n \"\"\"\r\n Fetch track value for every runnable effect.\r\n If the value is > 0.5 we Func it.\r\n \"\"\"\r\n for arg_4 in arg_0.effects:\r\n arg_5 = arg_4.rocket_timeline_track.time_value(arg_1)\r\n if arg_5 > 0.5:\r\n arg_4.Func(arg_1, arg_2, arg_3)"} +{"_id": "doc_8582", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Load a 2d texture\"\"\"\r\n arg_0._open_image()\r\n\r\n arg_1, arg_2 = image_data(arg_0.image)\r\n\r\n arg_3 = arg_0.ctx.texture(\r\n arg_0.image.size,\r\n arg_1,\r\n arg_2,\r\n )\r\n arg_3.extra = {'meta': arg_0.meta}\r\n\r\n if arg_0.meta.mipmap:\r\n arg_3.build_mipmaps()\r\n\r\n arg_0._close_image()\r\n\r\n return arg_3"} +{"_id": "doc_8583", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4):\r\n \"\"\"Initialize a single glsl string containing all shaders\"\"\"\r\n arg_5 = arg_0(arg_1)\r\n arg_5.vertex_source = ShaderSource(\r\n VERTEX_SHADER,\r\n arg_1.path or arg_1.vertex_shader,\r\n arg_3\r\n )\r\n\r\n if GEOMETRY_SHADER in arg_3:\r\n arg_5.geometry_source = ShaderSource(\r\n GEOMETRY_SHADER,\r\n arg_1.path or arg_1.geometry_shader,\r\n arg_3,\r\n )\r\n\r\n if FRAGMENT_SHADER in arg_3:\r\n arg_5.fragment_source = ShaderSource(\r\n FRAGMENT_SHADER,\r\n arg_1.path or arg_1.fragment_shader,\r\n arg_3,\r\n )\r\n\r\n if TESS_CONTROL_SHADER in arg_3:\r\n arg_5.tess_control_source = ShaderSource(\r\n TESS_CONTROL_SHADER,\r\n arg_1.path or arg_1.tess_control_shader,\r\n arg_3,\r\n )\r\n\r\n if TESS_EVALUATION_SHADER in arg_3:\r\n arg_5.tess_evaluation_source = ShaderSource(\r\n TESS_EVALUATION_SHADER,\r\n arg_1.path or arg_1.tess_evaluation_shader,\r\n arg_3,\r\n )\r\n\r\n return arg_5"} +{"_id": "doc_8584", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3, arg_4=None, arg_5=None,\r\n arg_6=None, arg_7=None):\r\n \"\"\"Initialize multiple shader strings\"\"\"\r\n arg_8 = arg_0(arg_1)\r\n arg_8.vertex_source = ShaderSource(\r\n VERTEX_SHADER,\r\n arg_1.path or arg_1.vertex_shader,\r\n arg_3,\r\n )\r\n\r\n if arg_4:\r\n arg_8.geometry_source = ShaderSource(\r\n GEOMETRY_SHADER,\r\n arg_1.path or arg_1.geometry_shader,\r\n arg_4,\r\n )\r\n\r\n if arg_5:\r\n arg_8.fragment_source = ShaderSource(\r\n FRAGMENT_SHADER,\r\n arg_1.path or arg_1.fragment_shader,\r\n arg_5,\r\n )\r\n\r\n if arg_6:\r\n arg_8.tess_control_source = ShaderSource(\r\n TESS_CONTROL_SHADER,\r\n arg_1.path or arg_1.tess_control_shader,\r\n arg_6,\r\n )\r\n\r\n if arg_7:\r\n arg_8.tess_evaluation_source = ShaderSource(\r\n TESS_EVALUATION_SHADER,\r\n arg_1.path or arg_1.tess_control_shader,\r\n arg_7,\r\n )\r\n\r\n return arg_8"} +{"_id": "doc_8585", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Loads this project instance\r\n \"\"\"\r\n arg_0.create_effect_classes()\r\n\r\n arg_0._add_resource_descriptions_to_pools(arg_0.create_external_resources())\r\n arg_0._add_resource_descriptions_to_pools(arg_0.create_resources())\r\n\r\n for arg_1, arg_2 in resources.textures.Func_pool():\r\n arg_0._textures[arg_1.label] = arg_2\r\n\r\n for arg_1, arg_2 in resources.programs.Func_pool():\r\n arg_0._programs[arg_1.label] = arg_2\r\n\r\n for arg_1, arg_2 in resources.scenes.Func_pool():\r\n arg_0._scenes[arg_1.label] = arg_2\r\n\r\n for arg_1, arg_2 in resources.data.Func_pool():\r\n arg_0._data[arg_1.label] = arg_2\r\n\r\n arg_0.create_effect_instances()\r\n arg_0.post_Func()"} +{"_id": "doc_8586", "title": "", "text": "def Func(arg_0):\r\n \"\"\"\r\n Reload all shader programs with the reloadable flag set\r\n \"\"\"\r\n print(\"Reloading programs:\")\r\n for arg_1, arg_2 in arg_0._programs.items():\r\n if getattr(arg_2, 'program', None):\r\n print(\" - {}\".format(arg_2.meta.label))\r\n arg_2.program = resources.programs.load(arg_2.meta)"} +{"_id": "doc_8587", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Get components and bytes for an image\"\"\"\r\n # NOTE: We might want to check the actual image.mode\r\n # and convert to an acceptable format.\r\n # At the moment we load the data as is.\r\n arg_1 = arg_0.tobytes()\r\n arg_2 = len(arg_1) // (arg_0.size[0] * arg_0.size[1])\r\n return arg_2, arg_1"} +{"_id": "doc_8588", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Write manage.py in the current directory\"\"\"\r\n with open(os.path.join(arg_0.template_dir, 'manage.py'), 'r') as fd:\r\n arg_1 = fd.read().format(project_name=arg_0.project_name)\r\n\r\n with open('manage.py', 'w') as fd:\r\n fd.write(arg_1)\r\n\r\n os.chmod('manage.py', 0o777)"} +{"_id": "doc_8589", "title": "", "text": "def Func(arg_0):\r\n \"\"\"Returns the absolute path to template directory\"\"\"\r\n arg_1 = os.path.dirname(os.path.abspath(__file__))\r\n arg_1 = os.path.dirname(os.path.dirname(arg_1))\r\n arg_1 = os.path.join(arg_1, 'project_template')\r\n return arg_1"} +{"_id": "doc_8590", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Resolve program loader\n \"\"\"\n if not arg_1.loader:\n arg_1.loader = 'single' if arg_1.path else 'separate'\n\n for arg_4 in arg_0._loaders:\n if arg_4.name == arg_1.loader:\n arg_1.loader_cls = arg_4\n break\n else:\n raise ImproperlyConfigured(\n (\n \"Program {} has no loader class registered.\"\n \"Check PROGRAM_LOADERS or PROGRAM_DIRS\"\n ).format(arg_1.path)\n )"} +{"_id": "doc_8591", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Encode a text using arithmetic coding with the provided probabilities.\n\n This is a wrapper for :py:meth:`Arithmetic.encode`.\n\n Parameters\n ----------\n text : str\n A string to encode\n probs : dict\n A probability statistics dictionary generated by\n :py:meth:`Arithmetic.train`\n\n Returns\n -------\n tuple\n The arithmetically coded text\n\n Example\n -------\n >>> pr = ac_train('the quick brown fox jumped over the lazy dog')\n >>> Func('align', pr)\n (16720586181, 34)\n\n \"\"\"\n arg_2 = Arithmetic()\n arg_2.set_probs(arg_1)\n return arg_2.encode(arg_0)"} +{"_id": "doc_8592", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"Generate a probability dict from the provided text.\n\n Text to 0-order probability statistics as a dict\n\n Parameters\n ----------\n text : str\n The text data over which to calculate probability statistics. This\n must not contain the NUL (0x00) character because that is used to\n indicate the end of data.\n\n Example\n -------\n >>> ac = Arithmetic()\n >>> ac.Func('the quick brown fox jumped over the lazy dog')\n >>> ac.get_probs()\n {' ': (Fraction(0, 1), Fraction(8, 45)),\n 'o': (Fraction(8, 45), Fraction(4, 15)),\n 'e': (Fraction(4, 15), Fraction(16, 45)),\n 'u': (Fraction(16, 45), Fraction(2, 5)),\n 't': (Fraction(2, 5), Fraction(4, 9)),\n 'r': (Fraction(4, 9), Fraction(22, 45)),\n 'h': (Fraction(22, 45), Fraction(8, 15)),\n 'd': (Fraction(8, 15), Fraction(26, 45)),\n 'z': (Fraction(26, 45), Fraction(3, 5)),\n 'y': (Fraction(3, 5), Fraction(28, 45)),\n 'x': (Fraction(28, 45), Fraction(29, 45)),\n 'w': (Fraction(29, 45), Fraction(2, 3)),\n 'v': (Fraction(2, 3), Fraction(31, 45)),\n 'q': (Fraction(31, 45), Fraction(32, 45)),\n 'p': (Fraction(32, 45), Fraction(11, 15)),\n 'n': (Fraction(11, 15), Fraction(34, 45)),\n 'm': (Fraction(34, 45), Fraction(7, 9)),\n 'l': (Fraction(7, 9), Fraction(4, 5)),\n 'k': (Fraction(4, 5), Fraction(37, 45)),\n 'j': (Fraction(37, 45), Fraction(38, 45)),\n 'i': (Fraction(38, 45), Fraction(13, 15)),\n 'g': (Fraction(13, 15), Fraction(8, 9)),\n 'f': (Fraction(8, 9), Fraction(41, 45)),\n 'c': (Fraction(41, 45), Fraction(14, 15)),\n 'b': (Fraction(14, 15), Fraction(43, 45)),\n 'a': (Fraction(43, 45), Fraction(44, 45)),\n '\\x00': (Fraction(44, 45), Fraction(1, 1))}\n\n \"\"\"\n arg_1 = text_type(arg_1)\n if '\\x00' in arg_1:\n arg_1 = arg_1.replace('\\x00', ' ')\n arg_2 = Counter(arg_1)\n arg_2['\\x00'] = 1\n arg_3 = sum(arg_2.values())\n\n arg_4 = 0\n arg_0._probs = {}\n arg_6 = Fraction(0)\n for arg_7, arg_8 in sorted(\n arg_2.items(), key=lambda x: (x[1], x[0]), reverse=True\n ):\n arg_9 = Fraction(arg_4 + arg_8, arg_3)\n arg_0._probs[arg_7] = (arg_6, arg_9)\n arg_6 = arg_9\n arg_4 = arg_4 + arg_8"} +{"_id": "doc_8593", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3='_START_', arg_4='_END_'):\n r\"\"\"Fill in self.ngcorpus from a Corpus argument.\n\n Parameters\n ----------\n corpus :Corpus\n The Corpus from which to initialize the n-gram corpus\n n_val : int\n Maximum n value for n-grams\n bos : str\n String to insert as an indicator of beginning of sentence\n eos : str\n String to insert as an indicator of end of sentence\n\n Raises\n ------\n TypeError\n Corpus argument of the Corpus class required.\n\n Example\n -------\n >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n >>> ngcorp = NGramCorpus()\n >>> ngcorp.Func(Corpus(tqbf))\n\n \"\"\"\n if not arg_1 or not isinstance(arg_1, Corpus):\n raise TypeError('Corpus argument of the Corpus class required.')\n\n arg_5 = arg_1.sents()\n\n for arg_6 in arg_5:\n arg_7 = Counter(arg_6)\n for arg_8 in arg_7.keys():\n arg_0._add_to_ngcorpus(arg_0.ngcorpus, [arg_8], arg_7[arg_8])\n\n if arg_2 > 1:\n if arg_3 and arg_3 != '':\n arg_6 = [arg_3] + arg_6\n if arg_4 and arg_4 != '':\n arg_6 += [arg_4]\n for arg_9 in range(2, arg_2 + 1):\n for arg_10 in range(len(arg_6) - arg_9 + 1):\n arg_0._add_to_ngcorpus(\n arg_0.ngcorpus, arg_6[arg_10 : arg_10 + arg_9], 1\n )"} +{"_id": "doc_8594", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Build up a corpus entry recursively.\n\n Parameters\n ----------\n corpus : Corpus\n The corpus\n words : [str]\n Words to add to the corpus\n count : int\n Count of words\n\n \"\"\"\n if arg_2[0] not in arg_1:\n arg_1[arg_2[0]] = Counter()\n\n if len(arg_2) == 1:\n arg_1[arg_2[0]][None] += arg_3\n else:\n arg_0.Func(arg_1[arg_2[0]], arg_2[1:], arg_3)"} +{"_id": "doc_8595", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Fill in self.ngcorpus from a Google NGram corpus file.\n\n Parameters\n ----------\n corpus_file : file\n The Google NGram file from which to initialize the n-gram corpus\n\n \"\"\"\n with c_open(arg_1, 'r', encoding='utf-8') as gng:\n for arg_2 in gng:\n arg_2 = arg_2.rstrip().split('\\t')\n arg_3 = arg_2[0].split()\n\n arg_0._add_to_ngcorpus(arg_0.ngcorpus, arg_3, int(arg_2[2]))"} +{"_id": "doc_8596", "title": "", "text": "def Func(arg_0, arg_1):\n r\"\"\"Return term frequency.\n\n Parameters\n ----------\n term : str\n The term for which to calculate Func\n\n Returns\n -------\n float\n The term frequency (Func)\n\n Raises\n ------\n ValueError\n Func can only calculate the frequency of individual words\n\n Examples\n --------\n >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n >>> ngcorp = NGramCorpus(Corpus(tqbf))\n >>> NGramCorpus(Corpus(tqbf)).Func('the')\n 1.3010299956639813\n >>> NGramCorpus(Corpus(tqbf)).Func('fox')\n 1.0\n\n \"\"\"\n if ' ' in arg_1:\n raise ValueError(\n 'Func can only calculate the term frequency of individual words'\n )\n arg_2 = arg_0.get_count(arg_1)\n if arg_2 == 0:\n return 0.0\n return 1 + log10(arg_2)"} +{"_id": "doc_8597", "title": "", "text": "def Func(arg_0, arg_1, arg_2='\\0'):\n r\"\"\"Return a word Funcd from BWT form.\n\n Parameters\n ----------\n code : str\n The word to transform from BWT form\n terminator : str\n A character added to signal the end of the string\n\n Returns\n -------\n str\n Word Funcd by BWT\n\n Raises\n ------\n ValueError\n Specified terminator absent from code.\n\n Examples\n --------\n >>> bwt = BWT()\n >>> bwt.Func('n\\x00ilag')\n 'align'\n >>> bwt.Func('annb\\x00aa')\n 'banana'\n >>> bwt.Func('annb@aa', '@')\n 'banana'\n\n \"\"\"\n if arg_1:\n if arg_2 not in arg_1:\n raise ValueError(\n 'Specified terminator, {}, absent from code.'.format(\n arg_2 if arg_2 != '\\0' else '\\\\0'\n )\n )\n else:\n arg_3 = [''] * len(arg_1)\n for arg_4 in range(len(arg_1)):\n arg_3 = sorted(\n arg_1[arg_4] + arg_3[arg_4] for arg_4 in range(len(arg_1))\n )\n arg_5 = [w for w in arg_3 if w[-1] == arg_2][0]\n return arg_5.rstrip(arg_2)\n else:\n return ''"} +{"_id": "doc_8598", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the indel distance between two strings.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n int\n Indel distance\n\n Examples\n --------\n >>> cmp = Indel()\n >>> cmp.Func('cat', 'hat')\n 2\n >>> cmp.Func('Niall', 'Neil')\n 3\n >>> cmp.Func('Colin', 'Cuilen')\n 5\n >>> cmp.Func('ATCG', 'TAGC')\n 4\n\n \"\"\"\n return arg_0._lev.Func(\n arg_1, arg_2, mode='lev', cost=(1, 1, 9999, 9999)\n )"} +{"_id": "doc_8599", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the normalized indel Funcance between two strings.\n\n This is equivalent to normalized Levenshtein Funcance, when only\n inserts and deletes are possible.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Normalized indel Funcance\n\n Examples\n --------\n >>> cmp = Indel()\n >>> round(cmp.Func('cat', 'hat'), 12)\n 0.333333333333\n >>> round(cmp.Func('Niall', 'Neil'), 12)\n 0.333333333333\n >>> round(cmp.Func('Colin', 'Cuilen'), 12)\n 0.454545454545\n >>> cmp.Func('ATCG', 'TAGC')\n 0.5\n\n \"\"\"\n if arg_1 == arg_2:\n return 0.0\n return arg_0.Func_abs(arg_1, arg_2) / (len(arg_1) + len(arg_2))"} +{"_id": "doc_8600", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3, **arg_4):\n \"\"\"Return Funcilarity.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n *args\n Variable length argument list.\n **kwargs\n Arbitrary keyword arguments.\n\n Returns\n -------\n float\n Similarity\n\n \"\"\"\n return 1.0 - arg_0.dist(arg_1, arg_2, *arg_3, **arg_4)"} +{"_id": "doc_8601", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2, arg_3=1, arg_4=1, arg_5=None):\n \"\"\"Return the Tversky distance between two strings.\n\n This is a wrapper for :py:meth:`Tversky.dist`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n alpha : float\n Tversky index parameter as described above\n beta : float\n Tversky index parameter as described above\n bias : float\n The symmetric Tversky index bias parameter\n\n Returns\n -------\n float\n Tversky distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 0.6666666666666667\n >>> Func('Niall', 'Neil')\n 0.7777777777777778\n >>> Func('aluminum', 'Catalan')\n 0.9375\n >>> Func('ATCG', 'TAGC')\n 1.0\n\n \"\"\"\n return Tversky().dist(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5)"} +{"_id": "doc_8602", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the longest common subsequence of two strings.\n\n Based on the dynamic programming algorithm from\n http://rosettacode.org/wiki/Longest_common_subsequence\n :cite:`rosettacode:2018b`. This is licensed GFDL 1.2.\n\n Modifications include:\n conversion to a numpy array in place of a list of lists\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n str\n The longest common subsequence\n\n Examples\n --------\n >>> sseq = LCSseq()\n >>> sseq.Func('cat', 'hat')\n 'at'\n >>> sseq.Func('Niall', 'Neil')\n 'Nil'\n >>> sseq.Func('aluminum', 'Catalan')\n 'aln'\n >>> sseq.Func('ATCG', 'TAGC')\n 'AC'\n\n \"\"\"\n arg_3 = np_zeros((len(arg_1) + 1, len(arg_2) + 1), dtype=np_int)\n\n # row 0 and column 0 are initialized to 0 already\n for arg_4, arg_5 in enumerate(arg_1):\n for arg_6, arg_7 in enumerate(arg_2):\n if arg_5 == arg_7:\n arg_3[arg_4 + 1, arg_6 + 1] = arg_3[arg_4, arg_6] + 1\n else:\n arg_3[arg_4 + 1, arg_6 + 1] = max(\n arg_3[arg_4 + 1, arg_6], arg_3[arg_4, arg_6 + 1]\n )\n\n # read the substring out from the matrix\n arg_8 = ''\n arg_4, arg_6 = len(arg_1), len(arg_2)\n while arg_4 != 0 and arg_6 != 0:\n if arg_3[arg_4, arg_6] == arg_3[arg_4 - 1, arg_6]:\n arg_4 -= 1\n elif arg_3[arg_4, arg_6] == arg_3[arg_4, arg_6 - 1]:\n arg_6 -= 1\n else:\n arg_8 = arg_1[arg_4 - 1] + arg_8\n arg_4 -= 1\n arg_6 -= 1\n return arg_8"} +{"_id": "doc_8603", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the prefix Funcilarity of two strings.\n\n Prefix Funcilarity is the ratio of the length of the shorter term that\n exactly matches the longer term to the length of the shorter term,\n beginning at the start of both terms.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Prefix Funcilarity\n\n Examples\n --------\n >>> cmp = Prefix()\n >>> cmp.Func('cat', 'hat')\n 0.0\n >>> cmp.Func('Niall', 'Neil')\n 0.25\n >>> cmp.Func('aluminum', 'Catalan')\n 0.0\n >>> cmp.Func('ATCG', 'TAGC')\n 0.0\n\n \"\"\"\n if arg_1 == arg_2:\n return 1.0\n if not arg_1 or not arg_2:\n return 0.0\n arg_3, arg_4 = (arg_1, arg_2) if len(arg_1) < len(arg_2) else (arg_2, arg_1)\n arg_5 = len(arg_3)\n for arg_6 in range(arg_5, 0, -1):\n if arg_3[:arg_6] == arg_4[:arg_6]:\n return arg_6 / arg_5\n return 0.0"} +{"_id": "doc_8604", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return the Func corpus.\n\n This is reconstructed by joining sub-components with the corpus' split\n characters\n\n Returns\n -------\n str\n The Func corpus\n\n Example\n -------\n >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n >>> corp = Corpus(tqbf)\n >>> print(corp.Func())\n The quick brown fox jumped over the lazy dog.\n And then it slept.\n And the dog ran off.\n >>> len(corp.Func())\n 85\n\n \"\"\"\n arg_1 = []\n for arg_2 in arg_0.corpus:\n arg_3 = []\n for arg_4 in arg_2:\n arg_3.append(' '.join(arg_4))\n arg_1.append(arg_0.sent_split.join(arg_3))\n del arg_3\n return arg_0.doc_split.join(arg_1)"} +{"_id": "doc_8605", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the best guess language ID for the word and language choices.\n\n Parameters\n ----------\n name : str\n The term to guess the language of\n name_mode : str\n The name mode of the algorithm: ``gen`` (default),\n ``ash`` (Ashkenazi), or ``sep`` (Sephardic)\n\n Returns\n -------\n int\n Language ID\n\n \"\"\"\n arg_1 = arg_1.strip().lower()\n arg_3 = BMDATA[arg_2]['language_rules']\n arg_4 = (\n sum(_LANG_DICT[_] for _ in BMDATA[arg_2]['languages']) - 1\n )\n arg_5 = arg_4\n for arg_6 in arg_3:\n arg_7, arg_8, arg_9 = arg_6\n if search(arg_7, arg_1) is not None:\n if arg_9:\n arg_5 &= arg_8\n else:\n arg_5 &= (~arg_8) % (arg_4 + 1)\n if arg_5 == L_NONE:\n arg_5 = L_ANY\n return arg_5"} +{"_id": "doc_8606", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6\n ):\n \"\"\"Reassess the language of the terms and call the phonetic encoder.\n\n Uses a split multi-word term.\n\n Parameters\n ----------\n term : str\n The term to encode via Beider-Morse\n name_mode : str\n The name mode of the algorithm: ``gen`` (default),\n ``ash`` (Ashkenazi), or ``sep`` (Sephardic)\n rules : tuple\n The set of initial phonetic transform regexps\n final_rules1 : tuple\n The common set of final phonetic transform regexps\n final_rules2 : tuple\n The specific set of final phonetic transform regexps\n concat : bool\n A flag to indicate concatenation\n\n Returns\n -------\n str\n A Beider-Morse phonetic code\n\n \"\"\"\n arg_7 = arg_0._language(arg_1, arg_2)\n return arg_0._phonetic(\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_7,\n arg_6,\n )"} +{"_id": "doc_8607", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"Apply a set of final rules to the phonetic encoding.\n\n Parameters\n ----------\n phonetic : str\n The term to which to apply the final rules\n final_rules : tuple\n The set of final phonetic transform regexps\n language_arg : int\n An integer representing the target language of the phonetic\n encoding\n strip : bool\n Flag to indicate whether to normalize the language attributes\n\n Returns\n -------\n str\n A Beider-Morse phonetic code\n\n \"\"\"\n # optimization to save time\n if not arg_2:\n return arg_1\n\n # expand the result\n arg_1 = arg_0._expand_alternates(arg_1)\n arg_5 = arg_1.split('|')\n\n for arg_6 in range(len(arg_5)):\n arg_1 = arg_5[arg_6]\n arg_7 = ''\n arg_8 = arg_0._normalize_lang_attrs(arg_1, True)\n\n arg_9 = 0\n while arg_9 < len(arg_1):\n arg_10 = False\n\n if arg_1[arg_9] == '[': # skip over language attribute\n arg_11 = arg_9\n arg_9 += 1\n while True:\n if arg_1[arg_9] == ']':\n arg_9 += 1\n arg_7 += arg_1[arg_11:arg_9]\n break\n arg_9 += 1\n continue\n\n for arg_12 in arg_2:\n arg_13 = arg_12[_PATTERN_POS]\n arg_14 = len(arg_13)\n arg_15 = arg_12[_LCONTEXT_POS]\n arg_16 = arg_12[_RCONTEXT_POS]\n\n arg_17 = '^' + arg_16\n arg_18 = arg_15 + '$'\n\n # check to see if next sequence in phonetic matches the\n # string in the rule\n if (arg_14 > len(arg_8) - arg_9) or arg_8[\n arg_9 : arg_9 + arg_14\n ] != arg_13:\n continue\n\n # check that right context is satisfied\n if arg_16 != '':\n if not search(arg_17, arg_8[arg_9 + arg_14 :]):\n continue\n\n # check that left context is satisfied\n if arg_15 != '':\n if not search(arg_18, arg_8[:arg_9]):\n continue\n\n # check for incompatible attributes\n arg_19 = arg_0._apply_rule_if_compat(\n arg_7, arg_12[_PHONETIC_POS], arg_3\n )\n # The below condition shouldn't ever be false\n if arg_19 is not None: # pragma: no branch\n arg_7 = arg_19\n arg_10 = True\n break\n\n if not arg_10:\n # character in name for which there is no substitution in\n # the table\n arg_7 += arg_1[arg_9]\n arg_14 = 1\n\n arg_9 += arg_14\n\n arg_5[arg_6] = arg_0._expand_alternates(arg_7)\n\n arg_1 = '|'.join(arg_5)\n if arg_4:\n arg_1 = arg_0._normalize_lang_attrs(arg_1, True)\n\n if '|' in arg_1:\n arg_1 = '(' + arg_0._remove_dupes(arg_1) + ')'\n\n return arg_1"} +{"_id": "doc_8608", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Expand phonetic alternates separated by |s.\n\n Parameters\n ----------\n phonetic : str\n A Beider-Morse phonetic encoding\n\n Returns\n -------\n str\n A Beider-Morse phonetic code\n\n \"\"\"\n arg_2 = arg_1.find('(')\n if arg_2 == -1:\n return arg_0._normalize_lang_attrs(arg_1, False)\n\n arg_3 = arg_1[:arg_2]\n arg_2 += 1 # get past the (\n arg_4 = arg_1.find(')', arg_2)\n arg_5 = arg_1[arg_2:arg_4]\n arg_4 += 1 # get past the )\n arg_6 = arg_1[arg_4:]\n arg_7 = arg_5.split('|')\n arg_8 = ''\n\n for arg_9 in range(len(arg_7)):\n arg_10 = arg_7[arg_9]\n arg_11 = arg_0.Func(arg_3 + arg_10 + arg_6)\n if arg_11 != '' and arg_11 != '[0]':\n if arg_8 != '':\n arg_8 += '|'\n arg_8 += arg_11\n\n return arg_8"} +{"_id": "doc_8609", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Remove duplicates from a phonetic encoding list.\n\n Parameters\n ----------\n phonetic : str\n A Beider-Morse phonetic encoding\n\n Returns\n -------\n str\n A Beider-Morse phonetic code\n\n \"\"\"\n arg_2 = arg_1\n arg_3 = arg_2.split('|')\n\n arg_4 = '|'\n for arg_5 in range(len(arg_3)):\n arg_6 = arg_3[arg_5]\n if arg_6 and '|' + arg_6 + '|' not in arg_4:\n arg_4 += arg_6 + '|'\n\n return arg_4[1:-1]"} +{"_id": "doc_8610", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Remove embedded bracketed attributes.\n\n This (potentially) bitwise-ands bracketed attributes together and adds\n to the end.\n This is applied to a single alternative at a time -- not to a\n parenthesized list.\n It removes all embedded bracketed attributes, logically-ands them\n together, and places them at the end.\n However if strip is true, this can indeed remove embedded bracketed\n attributes from a parenthesized list.\n\n Parameters\n ----------\n text : str\n A Beider-Morse phonetic encoding (in progress)\n strip : bool\n Remove the bracketed attributes (and throw away)\n\n Returns\n -------\n str\n A Beider-Morse phonetic code\n\n Raises\n ------\n ValueError\n No closing square bracket\n\n \"\"\"\n arg_3 = -1 # all 1's\n arg_4 = arg_3\n while '[' in arg_1:\n arg_5 = arg_1.find('[')\n arg_6 = arg_1.find(']', arg_5)\n if arg_6 == -1:\n raise ValueError(\n 'No closing square bracket: text=('\n + arg_1\n + ') strip=('\n + text_type(arg_2)\n + ')'\n )\n arg_4 &= int(arg_1[arg_5 + 1 : arg_6])\n arg_1 = arg_1[:arg_5] + arg_1[arg_6 + 1 :]\n\n if arg_4 == arg_3 or arg_2:\n return arg_1\n elif arg_4 == 0:\n # means that the attributes were incompatible and there is no\n # alternative here\n return '[0]'\n return arg_1 + '[' + str(arg_4) + ']'"} +{"_id": "doc_8611", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Apply a phonetic regex if compatible.\n\n tests for compatible language rules\n\n to do so, apply the rule, expand the results, and detect alternatives\n with incompatible attributes\n\n then drop each alternative that has incompatible attributes and keep\n those that are compatible\n\n if there are no compatible alternatives left, return false\n\n otherwise return the compatible alternatives\n\n apply the rule\n\n Parameters\n ----------\n phonetic : str\n The Beider-Morse phonetic encoding (so far)\n target : str\n A proposed addition to the phonetic encoding\n language_arg : int\n An integer representing the target language of the phonetic\n encoding\n\n Returns\n -------\n str\n A candidate encoding\n\n \"\"\"\n arg_4 = arg_1 + arg_2\n if '[' not in arg_4: # no attributes so we need test no further\n return arg_4\n\n # expand the result, converting incompatible attributes to [0]\n arg_4 = arg_0._expand_alternates(arg_4)\n arg_5 = arg_4.split('|')\n\n # drop each alternative that has incompatible attributes\n arg_4 = ''\n arg_6 = False\n\n for arg_7 in range(len(arg_5)):\n arg_8 = arg_5[arg_7]\n if arg_3 != 1:\n arg_8 = arg_0._normalize_lang_attrs(\n arg_8 + '[' + str(arg_3) + ']', False\n )\n if arg_8 != '[0]':\n arg_6 = True\n if arg_4:\n arg_4 += '|'\n arg_4 += arg_8\n\n # return false if no compatible alternatives remain\n if not arg_6:\n return None\n\n # return the result of applying the rule\n if '|' in arg_4:\n arg_4 = '(' + arg_4 + ')'\n return arg_4"} +{"_id": "doc_8612", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the index value for a language code.\n\n This returns l_any if more than one code is specified or the code is\n out of bounds.\n\n Parameters\n ----------\n code : int\n The language code to interpret\n name_mode : str\n The name mode of the algorithm: ``gen`` (default),\n ``ash`` (Ashkenazi), or ``sep`` (Sephardic)\n\n Returns\n -------\n int\n Language code index\n\n \"\"\"\n if arg_1 < 1 or arg_1 > sum(\n _LANG_DICT[arg_3] for arg_3 in BMDATA[arg_2]['languages']\n ): # code out of range\n return L_ANY\n if (\n arg_1 & (arg_1 - 1)\n ) != 0: # choice was more than one language; use any\n return L_ANY\n return arg_1"} +{"_id": "doc_8613", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the strcmp95 distance between two strings.\n\n This is a wrapper for :py:meth:`Strcmp95.dist`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n long_strings : bool\n Set to True to increase the probability of a match when the number of\n matched characters is large. This option allows for a little more\n tolerance when the strings are large. It is not an appropriate test\n when comparing fixed length fields such as phone and social security\n numbers.\n\n Returns\n -------\n float\n Strcmp95 distance\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.222222222222\n >>> round(Func('Niall', 'Neil'), 12)\n 0.1545\n >>> round(Func('aluminum', 'Catalan'), 12)\n 0.345238095238\n >>> round(Func('ATCG', 'TAGC'), 12)\n 0.166666666667\n\n \"\"\"\n return Strcmp95().dist(arg_0, arg_1, arg_2)"} +{"_id": "doc_8614", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the Naval Research Laboratory phonetic encoding of a word.\n\n Parameters\n ----------\n word : str\n The word to transform\n\n Returns\n -------\n str\n The NRL phonetic encoding\n\n Examples\n --------\n >>> pe = NRL()\n >>> pe.Func('the')\n 'DHAX'\n >>> pe.Func('round')\n 'rAWnd'\n >>> pe.Func('quick')\n 'kwIHk'\n >>> pe.Func('eaten')\n 'IYtEHn'\n >>> pe.Func('Smith')\n 'smIHTH'\n >>> pe.Func('Larsen')\n 'lAArsEHn'\n\n \"\"\"\n\n def _to_regex(arg_2, arg_3=True):\n arg_4 = ''\n arg_5 = {\n '#': '[AEIOU]+',\n ':': '[BCDFGHJKLMNPQRSTVWXYZ]*',\n '^': '[BCDFGHJKLMNPQRSTVWXYZ]',\n '.': '[BDVGJLMNTWZ]',\n '%': '(ER|E|ES|ED|ING|ELY)',\n '+': '[EIY]',\n ' ': '^',\n }\n for arg_6 in arg_2:\n arg_4 += (\n arg_5[arg_6] if arg_6 in arg_5 else arg_6\n )\n\n if arg_3:\n arg_4 += '$'\n if '^' not in arg_2:\n arg_4 = '^.*' + arg_4\n else:\n arg_4 = '^' + arg_4.replace('^', '$')\n if '$' not in arg_4:\n arg_4 += '.*$'\n\n return arg_4\n\n arg_1 = arg_1.upper()\n\n arg_7 = ''\n arg_8 = 0\n while arg_8 < len(arg_1):\n arg_9 = arg_1[:arg_8]\n arg_10 = arg_1[arg_8:]\n arg_11 = arg_1[arg_8] if arg_1[arg_8] in arg_0._rules else ' '\n for arg_12 in arg_0._rules[arg_11]:\n left, match, right, out = arg_12\n if arg_10.startswith(match):\n if left:\n l_pattern = _to_regex(left, arg_3=True)\n if right:\n r_pattern = _to_regex(right, arg_3=False)\n if (not left or re_match(l_pattern, arg_9)) and (\n not right\n or re_match(r_pattern, arg_10[len(match) :])\n ):\n arg_7 += out\n arg_8 += len(match)\n break\n else:\n arg_7 += arg_1[arg_8]\n arg_8 += 1\n\n return arg_7"} +{"_id": "doc_8615", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the longest common substring of two strings.\n\n Longest common substring (LCSstr).\n\n Based on the code from\n https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring\n :cite:`Wikibooks:2018`.\n This is licensed Creative Commons: Attribution-ShareAlike 3.0.\n\n Modifications include:\n\n - conversion to a numpy array in place of a list of lists\n - conversion to Python 2/3-safe range from xrange via six\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n str\n The longest common substring\n\n Examples\n --------\n >>> sstr = LCSstr()\n >>> sstr.Func('cat', 'hat')\n 'at'\n >>> sstr.Func('Niall', 'Neil')\n 'N'\n >>> sstr.Func('aluminum', 'Catalan')\n 'al'\n >>> sstr.Func('ATCG', 'TAGC')\n 'A'\n\n \"\"\"\n arg_3 = np_zeros((len(arg_1) + 1, len(arg_2) + 1), dtype=np_int)\n arg_4, arg_5 = 0, 0\n for arg_6 in range(1, len(arg_1) + 1):\n for arg_7 in range(1, len(arg_2) + 1):\n if arg_1[arg_6 - 1] == arg_2[arg_7 - 1]:\n arg_3[arg_6, arg_7] = arg_3[arg_6 - 1, arg_7 - 1] + 1\n if arg_3[arg_6, arg_7] > arg_4:\n arg_4 = arg_3[arg_6, arg_7]\n arg_5 = arg_6\n else:\n arg_3[arg_6, arg_7] = 0\n return arg_1[arg_5 - arg_4 : arg_5]"} +{"_id": "doc_8616", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n r\"\"\"Return the longest common substring Funcilarity of two strings.\n\n Longest common substring Funcilarity (:math:`Func_{LCSstr}`).\n\n This employs the LCS function to derive a Funcilarity metric:\n :math:`Func_{LCSstr}(s,t) = \\frac{|LCSstr(s,t)|}{max(|s|, |t|)}`\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n LCSstr Funcilarity\n\n Examples\n --------\n >>> Func_lcsstr('cat', 'hat')\n 0.6666666666666666\n >>> Func_lcsstr('Niall', 'Neil')\n 0.2\n >>> Func_lcsstr('aluminum', 'Catalan')\n 0.25\n >>> Func_lcsstr('ATCG', 'TAGC')\n 0.25\n\n \"\"\"\n if arg_1 == arg_2:\n return 1.0\n elif not arg_1 or not arg_2:\n return 0.0\n return len(arg_0.lcsstr(arg_1, arg_2)) / max(len(arg_1), len(arg_2))"} +{"_id": "doc_8617", "title": "", "text": "def Func(arg_0, arg_1, arg_2=1, arg_3=arg_4):\n \"\"\"Return the Needleman-Wunsch score of two strings.\n\n This is a wrapper for :py:meth:`NeedlemanWunsch.dist_abs`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n gap_cost : float\n The cost of an alignment gap (1 by default)\n sim_func : function\n A function that returns the similarity of two characters (identity\n similarity by default)\n\n Returns\n -------\n float\n Needleman-Wunsch score\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 2.0\n >>> Func('Niall', 'Neil')\n 1.0\n >>> Func('aluminum', 'Catalan')\n -1.0\n >>> Func('ATCG', 'TAGC')\n 0.0\n\n \"\"\"\n return NeedlemanWunsch().dist_abs(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8618", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=0,\n arg_4=1,\n arg_5=True,\n arg_6=None,\n ):\n \"\"\"Return the matrix similarity of two strings.\n\n With the default parameters, this is identical to sim_ident.\n It is possible for Func to return values outside of the range\n :math:`[0, 1]`, if values outside that range are present in mat,\n mismatch_cost, or match_cost.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n mat : dict\n A dict mapping tuples to costs; the tuples are (src, tar) pairs of\n symbols from the alphabet parameter\n mismatch_cost : float\n The value returned if (src, tar) is absent from mat when src does\n not equal tar\n match_cost : float\n The value returned if (src, tar) is absent from mat when src equals\n tar\n symmetric : bool\n True if the cost of src not matching tar is identical to the cost\n of tar not matching src; in this case, the values in mat need only\n contain (src, tar) or (tar, src), not both\n alphabet : str\n A collection of tokens from which src and tar are drawn; if this is\n defined a ValueError is raised if either tar or src is not found in\n alphabet\n\n Returns\n -------\n float\n Matrix similarity\n\n Raises\n ------\n ValueError\n src value not in alphabet\n ValueError\n tar value not in alphabet\n\n Examples\n --------\n >>> NeedlemanWunsch.Func('cat', 'hat')\n 0\n >>> NeedlemanWunsch.Func('hat', 'hat')\n 1\n\n \"\"\"\n if arg_6:\n arg_6 = tuple(arg_6)\n for arg_7 in arg_0:\n if arg_7 not in arg_6:\n raise ValueError('src value not in alphabet')\n for arg_7 in arg_1:\n if arg_7 not in arg_6:\n raise ValueError('tar value not in alphabet')\n\n if arg_0 == arg_1:\n if arg_2 and (arg_0, arg_0) in arg_2:\n return arg_2[(arg_0, arg_0)]\n return arg_4\n if arg_2 and (arg_0, arg_1) in arg_2:\n return arg_2[(arg_0, arg_1)]\n elif arg_5 and arg_2 and (arg_1, arg_0) in arg_2:\n return arg_2[(arg_1, arg_0)]\n return arg_3"} +{"_id": "doc_8619", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the NCD between two strings using BWT plus RLE.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Compression Funcance\n\n Examples\n --------\n >>> cmp = NCDbwtrle()\n >>> cmp.Func('cat', 'hat')\n 0.75\n >>> cmp.Func('Niall', 'Neil')\n 0.8333333333333334\n >>> cmp.Func('aluminum', 'Catalan')\n 1.0\n >>> cmp.Func('ATCG', 'TAGC')\n 0.8\n\n \"\"\"\n if arg_1 == arg_2:\n return 0.0\n\n arg_3 = arg_0._rle.encode(arg_0._bwt.encode(arg_1))\n arg_4 = arg_0._rle.encode(arg_0._bwt.encode(arg_2))\n arg_5 = arg_0._rle.encode(arg_0._bwt.encode(arg_1 + arg_2))\n arg_6 = arg_0._rle.encode(arg_0._bwt.encode(arg_2 + arg_1))\n\n return (\n min(len(arg_5), len(arg_6))\n - min(len(arg_3), len(arg_4))\n ) / max(len(arg_3), len(arg_4))"} +{"_id": "doc_8620", "title": "", "text": "def Func(arg_0):\n \"\"\"Cast to tuple.\n\n Returns\n -------\n tuple\n The confusion table as a 4-tuple (tp, tn, fp, fn)\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n (120, 60, 20, 30)\n\n \"\"\"\n return arg_0._tp, arg_0._tn, arg_0._fp, arg_0._fn"} +{"_id": "doc_8621", "title": "", "text": "def Func(arg_0):\n \"\"\"Cast to dict.\n\n Returns\n -------\n dict\n The confusion table as a dict\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> import pprint\n >>> pprint.pprint(ct.Func())\n {'fn': 30, 'fp': 20, 'tn': 60, 'tp': 120}\n\n \"\"\"\n return {'tp': arg_0._tp, 'tn': arg_0._tn, 'fp': arg_0._fp, 'fn': arg_0._fn}"} +{"_id": "doc_8622", "title": "", "text": "def Func(arg_0):\n \"\"\"Return Func, N.\n\n Returns\n -------\n int\n The Func (N) of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 230\n\n \"\"\"\n return arg_0._tp + arg_0._tn + arg_0._fp + arg_0._fn"} +{"_id": "doc_8623", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return Func.\n\n Precision is defined as :math:`\\frac{tp}{tp + fp}`\n\n AKA positive predictive value (PPV)\n\n Cf. https://en.wikipedia.org/wiki/Precision_and_recall\n\n Cf. https://en.wikipedia.org/wiki/Information_retrieval#Precision\n\n Returns\n -------\n float\n The Func of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 0.8571428571428571\n\n \"\"\"\n if arg_0._tp + arg_0._fp == 0:\n return float('NaN')\n return arg_0._tp / (arg_0._tp + arg_0._fp)"} +{"_id": "doc_8624", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return gain in precision.\n\n The gain in precision is defined as:\n :math:`G(precision) = \\frac{precision}{random~ precision}`\n\n Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)\n\n Returns\n -------\n float\n The gain in precision of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 1.3142857142857143\n\n \"\"\"\n if arg_0.population() == 0:\n return float('NaN')\n arg_1 = arg_0.cond_pos_pop() / arg_0.population()\n return arg_0.precision() / arg_1"} +{"_id": "doc_8625", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return Func.\n\n Recall is defined as :math:`\\frac{tp}{tp + fn}`\n\n AKA sensitivity\n\n AKA true positive rate (TPR)\n\n Cf. https://en.wikipedia.org/wiki/Precision_and_Func\n\n Cf. https://en.wikipedia.org/wiki/Sensitivity_(test)\n\n Cf. https://en.wikipedia.org/wiki/Information_retrieval#Recall\n\n Returns\n -------\n float\n The Func of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 0.8\n\n \"\"\"\n if arg_0._tp + arg_0._fn == 0:\n return float('NaN')\n return arg_0._tp / (arg_0._tp + arg_0._fn)"} +{"_id": "doc_8626", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return Func.\n\n Accuracy is defined as :math:`\\frac{tp + tn}{population}`\n\n Cf. https://en.wikipedia.org/wiki/Accuracy\n\n Returns\n -------\n float\n The Func of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 0.782608695652174\n\n \"\"\"\n if arg_0.population() == 0:\n return float('NaN')\n return (arg_0._tp + arg_0._tn) / arg_0.population()"} +{"_id": "doc_8627", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return gain in accuracy.\n\n The gain in accuracy is defined as:\n :math:`G(accuracy) = \\frac{accuracy}{random~ accuracy}`\n\n Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)\n\n Returns\n -------\n float\n The gain in accuracy of the confusion table\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 1.4325259515570934\n\n \"\"\"\n if arg_0.population() == 0:\n return float('NaN')\n arg_1 = (arg_0.cond_pos_pop() / arg_0.population()) ** 2 + (\n arg_0.cond_neg_pop() / arg_0.population()\n ) ** 2\n return arg_0.accuracy() / arg_1"} +{"_id": "doc_8628", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return logarithmic mean of precision & recall.\n\n The logarithmic mean is:\n 0 if either precision or recall is 0,\n the precision if they are equal,\n otherwise :math:`\\frac{precision - recall}\n {ln(precision) - ln(recall)}`\n\n Cf. https://en.wikipedia.org/wiki/Logarithmic_mean\n\n Returns\n -------\n float\n The logarithmic mean of the confusion table's precision & recall\n\n Example\n -------\n >>> ct = ConfusionTable(120, 60, 20, 30)\n >>> ct.Func()\n 0.8282429171492667\n\n \"\"\"\n arg_1 = arg_0.precision()\n arg_2 = arg_0.recall()\n if not arg_1 or not arg_2:\n return 0.0\n elif arg_1 == arg_2:\n return arg_1\n return (arg_1 - arg_2) / (math.log(arg_1) - math.log(arg_2))"} +{"_id": "doc_8629", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return CLEF German Func.\n\n Parameters\n ----------\n word : str\n The word to Func\n\n Returns\n -------\n str\n Word Func\n\n Examples\n --------\n >>> stmr = CLEFGerman()\n >>> stmr.Func('lesen')\n 'lese'\n >>> stmr.Func('graues')\n 'grau'\n >>> stmr.Func('buchstabieren')\n 'buchstabier'\n\n \"\"\"\n # lowercase, normalize, and compose\n arg_1 = normalize('NFC', text_type(arg_1.lower()))\n\n # remove umlauts\n arg_1 = arg_1.translate(arg_0._umlauts)\n\n # remove plurals\n arg_2 = len(arg_1) - 1\n\n if arg_2 > 3:\n if arg_2 > 5:\n if arg_1[-3:] == 'nen':\n return arg_1[:-3]\n if arg_2 > 4:\n if arg_1[-2:] in {'en', 'se', 'es', 'er'}:\n return arg_1[:-2]\n if arg_1[-1] in {'e', 'n', 'r', 's'}:\n return arg_1[:-1]\n return arg_1"} +{"_id": "doc_8630", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=5):\n \"\"\"Return the \"simplest\" Sift4 distance between two terms.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n max_offset : int\n The number of characters to search for matching letters\n\n Returns\n -------\n int\n The Sift4 distance according to the simplest formula\n\n Examples\n --------\n >>> cmp = Sift4Simplest()\n >>> cmp.Func('cat', 'hat')\n 1\n >>> cmp.Func('Niall', 'Neil')\n 2\n >>> cmp.Func('Colin', 'Cuilen')\n 3\n >>> cmp.Func('ATCG', 'TAGC')\n 2\n\n \"\"\"\n if not arg_1:\n return len(arg_2)\n\n if not arg_2:\n return len(arg_1)\n\n arg_4 = len(arg_1)\n arg_5 = len(arg_2)\n\n arg_6 = 0\n arg_7 = 0\n arg_8 = 0\n arg_9 = 0\n\n while (arg_6 < arg_4) and (arg_7 < arg_5):\n if arg_1[arg_6] == arg_2[arg_7]:\n arg_9 += 1\n else:\n arg_8 += arg_9\n arg_9 = 0\n if arg_6 != arg_7:\n arg_6 = arg_7 = max(arg_6, arg_7)\n for arg_10 in range(arg_3):\n if not (\n (arg_6 + arg_10 < arg_4) or (arg_7 + arg_10 < arg_5)\n ):\n break\n if (arg_6 + arg_10 < arg_4) and (\n arg_1[arg_6 + arg_10] == arg_2[arg_7]\n ):\n arg_6 += arg_10\n arg_9 += 1\n break\n if (arg_7 + arg_10 < arg_5) and (\n arg_1[arg_6] == arg_2[arg_7 + arg_10]\n ):\n arg_7 += arg_10\n arg_9 += 1\n break\n\n arg_6 += 1\n arg_7 += 1\n\n arg_8 += arg_9\n return round(max(arg_4, arg_5) - arg_8)"} +{"_id": "doc_8631", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2='euclidean', arg_3=(1, 1, 0.5, 0.5), arg_4='QWERTY'\n):\n \"\"\"Return the normalized typo similarity between two strings.\n\n This is a wrapper for :py:meth:`Typo.sim`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n metric : str\n Supported values include: ``euclidean``, ``manhattan``,\n ``log-euclidean``, and ``log-manhattan``\n cost : tuple\n A 4-tuple representing the cost of the four possible edits: inserts,\n deletes, substitutions, and shift, respectively (by default:\n (1, 1, 0.5, 0.5)) The substitution & shift costs should be\n significantly less than the cost of an insertion & deletion unless a\n log metric is used.\n layout : str\n Name of the keyboard layout to use (Currently supported:\n ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)\n\n Returns\n -------\n float\n Normalized typo similarity\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.472953716914\n >>> round(Func('Niall', 'Neil'), 12)\n 0.434971857071\n >>> round(Func('Colin', 'Cuilen'), 12)\n 0.430964390437\n >>> Func('ATCG', 'TAGC')\n 0.375\n\n \"\"\"\n return Typo().sim(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_8632", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2, arg_3=None):\n \"\"\"Return the normalized Manhattan distance between two strings.\n\n This is a wrapper for :py:meth:`Manhattan.dist`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n alphabet : collection or int\n The values or size of the alphabet\n\n Returns\n -------\n float\n The normalized Manhattan distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 0.5\n >>> round(Func('Niall', 'Neil'), 12)\n 0.636363636364\n >>> round(Func('Colin', 'Cuilen'), 12)\n 0.692307692308\n >>> Func('ATCG', 'TAGC')\n 1.0\n\n \"\"\"\n return Manhattan().dist(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8633", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2, arg_3=None):\n \"\"\"Return the normalized Manhattan similarity of two strings.\n\n This is a wrapper for :py:meth:`Manhattan.sim`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n alphabet : collection or int\n The values or size of the alphabet\n\n Returns\n -------\n float\n The normalized Manhattan similarity\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 0.5\n >>> round(Func('Niall', 'Neil'), 12)\n 0.363636363636\n >>> round(Func('Colin', 'Cuilen'), 12)\n 0.307692307692\n >>> Func('ATCG', 'TAGC')\n 0.0\n\n \"\"\"\n return Manhattan().sim(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8634", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the skeleton key.\n\n Parameters\n ----------\n word : str\n The word to transform into its skeleton key\n\n Returns\n -------\n str\n The skeleton key\n\n Examples\n --------\n >>> sk = SkeletonKey()\n >>> sk.Func('The quick brown fox jumped over the lazy dog.')\n 'THQCKBRWNFXJMPDVLZYGEUIOA'\n >>> sk.Func('Christopher')\n 'CHRSTPIOE'\n >>> sk.Func('Niall')\n 'NLIA'\n\n \"\"\"\n arg_1 = unicode_normalize('NFKD', text_type(arg_1.upper()))\n arg_1 = ''.join(c for c in arg_1 if c in arg_0._letters)\n arg_2 = arg_1[0:1]\n arg_3 = ''\n arg_4 = ''\n\n # add consonants & vowels to to separate strings\n # (omitting the first char & duplicates)\n for arg_5 in arg_1[1:]:\n if arg_5 != arg_2:\n if arg_5 in arg_0._vowels:\n if arg_5 not in arg_4:\n arg_4 += arg_5\n elif arg_5 not in arg_3:\n arg_3 += arg_5\n # return the first char followed by consonants followed by vowels\n return arg_2 + arg_3 + arg_4"} +{"_id": "doc_8635", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=arg_3,\n arg_4=arg_5,\n arg_6=False,\n):\n \"\"\"Calculate the pairwise similarity statistics a collection of strings.\n\n Calculate pairwise similarities among members of two collections,\n returning the maximum, minimum, mean (according to a supplied function,\n arithmetic mean, by default), and (population) standard deviation\n of those similarities.\n\n Parameters\n ----------\n src_collection : list\n A collection of terms or a string that can be split\n tar_collection : list\n A collection of terms or a string that can be split\n metric : function\n A similarity metric function\n mean_func : function\n A mean function that takes a list of values and returns a float\n symmetric : bool\n Set to True if all pairwise similarities should be calculated in both\n directions\n\n Returns\n -------\n tuple\n The max, min, mean, and standard deviation of similarities\n\n Raises\n ------\n ValueError\n mean_func must be a function\n ValueError\n metric must be a function\n ValueError\n src_collection is neither a string nor iterable\n ValueError\n tar_collection is neither a string nor iterable\n\n Example\n -------\n >>> tuple(round(_, 12) for _ in Func(\n ... ['Christopher', 'Kristof', 'Christobal'], ['Niall', 'Neal', 'Neil']))\n (0.2, 0.0, 0.118614718615, 0.075070477184)\n\n \"\"\"\n if not callable(arg_4):\n raise ValueError('mean_func must be a function')\n if not callable(arg_2):\n raise ValueError('metric must be a function')\n\n if hasattr(arg_0, 'split'):\n arg_0 = arg_0.split()\n if not hasattr(arg_0, '__iter__'):\n raise ValueError('src_collection is neither a string nor iterable')\n\n if hasattr(arg_1, 'split'):\n arg_1 = arg_1.split()\n if not hasattr(arg_1, '__iter__'):\n raise ValueError('tar_collection is neither a string nor iterable')\n\n arg_0 = list(arg_0)\n arg_1 = list(arg_1)\n\n arg_7 = []\n\n for arg_8 in arg_0:\n for arg_9 in arg_1:\n arg_7.append(arg_2(arg_8, arg_9))\n if arg_6:\n arg_7.append(arg_2(arg_9, arg_8))\n\n return (\n max(arg_7),\n min(arg_7),\n arg_4(arg_7),\n std(arg_7, arg_4, 0),\n )"} +{"_id": "doc_8636", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return the R2 region, as defined in the Porter2 specification.\n\n Parameters\n ----------\n term : str\n The term to examine\n r1_prefixes : set\n Prefixes to consider\n\n Returns\n -------\n int\n Length of the R1 region\n\n \"\"\"\n arg_3 = arg_0._sb_r1(arg_1, arg_2)\n return arg_3 + arg_0._sb_r1(arg_1[arg_3:])"} +{"_id": "doc_8637", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return True iff term ends in a short syllable.\n\n (...according to the Porter2 specification.)\n\n NB: This is akin to the CVC test from the Porter stemmer. The\n description is unfortunately poor/ambiguous.\n\n Parameters\n ----------\n term : str\n The term to examine\n\n Returns\n -------\n bool\n True iff term ends in a short syllable\n\n \"\"\"\n if not arg_1:\n return False\n if len(arg_1) == 2:\n if arg_1[-2] in arg_0._vowels and arg_1[-1] not in arg_0._vowels:\n return True\n elif len(arg_1) >= 3:\n if (\n arg_1[-3] not in arg_0._vowels\n and arg_1[-2] in arg_0._vowels\n and arg_1[-1] in arg_0._codanonvowels\n ):\n return True\n return False"} +{"_id": "doc_8638", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Return True iff term is a short word.\n\n (...according to the Porter2 specification.)\n\n Parameters\n ----------\n term : str\n The term to examine\n r1_prefixes : set\n Prefixes to consider\n\n Returns\n -------\n bool\n True iff term is a short word\n\n \"\"\"\n if arg_0._sb_r1(arg_1, arg_2) == len(\n arg_1\n ) and arg_0._sb_ends_in_short_syllable(arg_1):\n return True\n return False"} +{"_id": "doc_8639", "title": "", "text": "def Func(arg_0, arg_1, arg_2=8):\n \"\"\"Return the eudex phonetic hash of a word.\n\n Parameters\n ----------\n word : str\n The word to transform\n max_length : int\n The length in bits of the code returned (default 8)\n\n Returns\n -------\n int\n The eudex hash\n\n Examples\n --------\n >>> pe = Eudex()\n >>> pe.Func('Colin')\n 432345564238053650\n >>> pe.Func('Christopher')\n 433648490138894409\n >>> pe.Func('Niall')\n 648518346341351840\n >>> pe.Func('Smith')\n 720575940412906756\n >>> pe.Func('Schmidt')\n 720589151732307997\n\n \"\"\"\n # Lowercase input & filter unknown characters\n arg_1 = ''.join(\n arg_4 for arg_4 in arg_1.lower() if arg_4 in arg_0._initial_phones\n )\n\n if not arg_1:\n arg_1 = '\u00f7'\n\n # Perform initial eudex coding of each character\n arg_3 = [arg_0._initial_phones[arg_1[0]]]\n arg_3 += [arg_0._trailing_phones[arg_4] for arg_4 in arg_1[1:]]\n\n # Right-shift by one to determine if second instance should be skipped\n arg_5 = [_ >> 1 for _ in arg_3]\n arg_6 = [arg_3[0]]\n for arg_7 in range(1, len(arg_5)):\n if arg_5[arg_7] != arg_5[arg_7 - 1]:\n arg_6.append(arg_3[arg_7])\n\n # Add padding after first character & trim beyond max_length\n arg_3 = (\n [arg_6[0]]\n + [0] * max(0, arg_2 - len(arg_6))\n + arg_6[1:arg_2]\n )\n\n # Combine individual character values into eudex hash\n arg_8 = 0\n for arg_9 in arg_3:\n arg_8 = (arg_8 << 8) | arg_9\n\n return arg_8"} +{"_id": "doc_8640", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=0, arg_4=0):\n \"\"\"Return the Q-Grams in src & tar.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n skip : int\n The number of characters to skip (only works when src and tar are\n strings)\n\n Returns\n -------\n tuple of Counters\n Q-Grams\n\n Examples\n --------\n >>> pe = _TokenDistance()\n >>> pe.Func('AT', 'TT', qval=2)\n (QGrams({'$A': 1, 'AT': 1, 'T#': 1}),\n QGrams({'$T': 1, 'TT': 1, 'T#': 1}))\n\n \"\"\"\n if isinstance(arg_1, Counter) and isinstance(arg_2, Counter):\n return arg_1, arg_2\n if arg_3 > 0:\n return QGrams(arg_1, arg_3, '$#', arg_4), QGrams(arg_2, arg_3, '$#', arg_4)\n return Counter(arg_1.strip().split()), Counter(arg_2.strip().split())"} +{"_id": "doc_8641", "title": "", "text": "def Func(arg_0, arg_1, arg_2='lev', arg_3=(1, 1, 1, 1)):\n \"\"\"Return the Levenshtein similarity of two strings.\n\n This is a wrapper of :py:meth:`Levenshtein.sim`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n mode : str\n Specifies a mode for computing the Levenshtein distance:\n\n - ``lev`` (default) computes the ordinary Levenshtein distance, in\n which edits may include inserts, deletes, and substitutions\n - ``osa`` computes the Optimal String Alignment distance, in which\n edits may include inserts, deletes, substitutions, and\n transpositions but substrings may only be edited once\n\n cost : tuple\n A 4-tuple representing the cost of the four possible edits: inserts,\n deletes, substitutions, and transpositions, respectively (by default:\n (1, 1, 1, 1))\n\n Returns\n -------\n float\n The Levenshtein similarity between src & tar\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.666666666667\n >>> round(Func('Niall', 'Neil'), 12)\n 0.4\n >>> Func('aluminum', 'Catalan')\n 0.125\n >>> Func('ATCG', 'TAGC')\n 0.25\n\n \"\"\"\n return Levenshtein().sim(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8642", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the omission key.\n\n Parameters\n ----------\n word : str\n The word to transform into its omission key\n\n Returns\n -------\n str\n The omission key\n\n Examples\n --------\n >>> ok = OmissionKey()\n >>> ok.Func('The quick brown fox jumped over the lazy dog.')\n 'JKQXZVWYBFMGPDHCLNTREUIOA'\n >>> ok.Func('Christopher')\n 'PHCTSRIOE'\n >>> ok.Func('Niall')\n 'LNIA'\n\n \"\"\"\n arg_1 = unicode_normalize('NFKD', text_type(arg_1.upper()))\n arg_1 = ''.join(c for c in arg_1 if c in arg_0._letters)\n\n arg_2 = ''\n\n # add consonants in order supplied by _consonants (no duplicates)\n for arg_3 in arg_0._consonants:\n if arg_3 in arg_1:\n arg_2 += arg_3\n\n # add vowels in order they appeared in the word (no duplicates)\n for arg_3 in arg_1:\n if arg_3 not in arg_0._consonants and arg_3 not in arg_2:\n arg_2 += arg_3\n\n return arg_2"} +{"_id": "doc_8643", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3, arg_4=False):\n \"\"\"Return the Monge-Elkan distance between two strings.\n\n This is a wrapper for :py:meth:`MongeElkan.dist`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n sim_func : function\n The internal similarity metric to employ\n symmetric : bool\n Return a symmetric similarity measure\n\n Returns\n -------\n float\n Monge-Elkan distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 0.25\n >>> round(Func('Niall', 'Neil'), 12)\n 0.333333333333\n >>> round(Func('aluminum', 'Catalan'), 12)\n 0.611111111111\n >>> Func('ATCG', 'TAGC')\n 0.5\n\n \"\"\"\n return MongeElkan().dist(arg_0, arg_1, arg_2, arg_4)"} +{"_id": "doc_8644", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the Phonem code for a word.\n\n Parameters\n ----------\n word : str\n The word to transform\n\n Returns\n -------\n str\n The Phonem value\n\n Examples\n --------\n >>> pe = Phonem()\n >>> pe.Func('Christopher')\n 'CRYSDOVR'\n >>> pe.Func('Niall')\n 'NYAL'\n >>> pe.Func('Smith')\n 'SMYD'\n >>> pe.Func('Schmidt')\n 'CMYD'\n\n \"\"\"\n arg_1 = unicode_normalize('NFC', text_type(arg_1.upper()))\n for arg_2, arg_3 in arg_0._substitutions:\n arg_1 = arg_1.replace(arg_2, arg_3)\n arg_1 = arg_1.translate(arg_0._trans)\n\n return ''.join(\n arg_4\n for arg_4 in arg_0._delete_consecutive_repeats(arg_1)\n if arg_4 in arg_0._uc_set\n )"} +{"_id": "doc_8645", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return CLEF Swedish Func.\n\n Parameters\n ----------\n word : str\n The word to Func\n\n Returns\n -------\n str\n Word Func\n\n Examples\n --------\n >>> clef_swedish('undervisa')\n 'undervis'\n >>> clef_swedish('suspension')\n 'suspensio'\n >>> clef_swedish('visshet')\n 'viss'\n\n \"\"\"\n arg_2 = len(arg_1) - 2\n\n if arg_2 > 2 and arg_1[-1] == 's':\n arg_1 = arg_1[:-1]\n arg_2 -= 1\n\n arg_3 = {\n 5: {'elser', 'heten'},\n 4: {'arne', 'erna', 'ande', 'else', 'aste', 'orna', 'aren'},\n 3: {'are', 'ast', 'het'},\n 2: {'ar', 'er', 'or', 'en', 'at', 'te', 'et'},\n 1: {'a', 'e', 'n', 't'},\n }\n\n for arg_4 in range(5, 0, -1):\n if arg_2 > arg_4 and arg_1[-arg_4:] in arg_3[arg_4]:\n return arg_1[:-arg_4]\n return arg_1"} +{"_id": "doc_8646", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Undouble endings -kk, -dd, and -tt.\n\n Parameters\n ----------\n word : str\n The word to stem\n\n Returns\n -------\n str\n The word with doubled endings undoubled\n\n \"\"\"\n if (\n len(arg_1) > 1\n and arg_1[-1] == arg_1[-2]\n and arg_1[-1] in {'d', 'k', 't'}\n ):\n return arg_1[:-1]\n return arg_1"} +{"_id": "doc_8647", "title": "", "text": "def Func(arg_0):\n \"\"\"Convert IPA to features.\n\n This translates an IPA string of one or more phones to a list of ints\n representing the features of the string.\n\n Parameters\n ----------\n ipa : str\n The IPA representation of a phone or series of phones\n\n Returns\n -------\n list of ints\n A representation of the features of the input string\n\n Examples\n --------\n >>> Func('mut')\n [2709662981243185770, 1825831513894594986, 2783230754502126250]\n >>> Func('fon')\n [2781702983095331242, 1825831531074464170, 2711173160463936106]\n >>> Func('telz')\n [2783230754502126250, 1826957430176000426, 2693158761954453926,\n 2783230754501863834]\n\n \"\"\"\n arg_1 = []\n arg_2 = 0\n arg_0 = normalize('NFD', text_type(arg_0.lower()))\n\n arg_3 = max(len(_) for _ in _PHONETIC_FEATURES)\n\n while arg_2 < len(arg_0):\n arg_4 = False\n for arg_5 in range(arg_3, 0, -1):\n if (\n arg_2 + arg_5 - 1 <= len(arg_0)\n and arg_0[arg_2 : arg_2 + arg_5] in _PHONETIC_FEATURES\n ):\n arg_1.append(_PHONETIC_FEATURES[arg_0[arg_2 : arg_2 + arg_5]])\n arg_2 += arg_5\n arg_4 = True\n\n if not arg_4:\n arg_1.append(-1)\n arg_2 += 1\n\n return arg_1"} +{"_id": "doc_8648", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Get a feature vector.\n\n This returns a list of ints, equal in length to the vector input,\n representing presence/absence/neutrality with respect to a particular\n phonetic feature.\n\n Parameters\n ----------\n vector : list\n A tuple or list of ints representing the phonetic features of a phone\n or series of phones (such as is returned by the ipa_to_features\n function)\n feature : str\n A feature name from the set:\n\n - ``consonantal``\n - ``sonorant``\n - ``syllabic``\n - ``labial``\n - ``round``\n - ``coronal``\n - ``anterior``\n - ``distributed``\n - ``dorsal``\n - ``high``\n - ``low``\n - ``back``\n - ``tense``\n - ``pharyngeal``\n - ``ATR``\n - ``voice``\n - ``spread_glottis``\n - ``constricted_glottis``\n - ``continuant``\n - ``strident``\n - ``lateral``\n - ``delayed_release``\n - ``nasal``\n\n Returns\n -------\n list of ints\n A list indicating presence/absence/neutrality with respect to the\n feature\n\n Raises\n ------\n AttributeError\n feature must be one of ...\n\n Examples\n --------\n >>> tails = ipa_to_features('telz')\n >>> Func(tails, 'consonantal')\n [1, -1, 1, 1]\n >>> Func(tails, 'sonorant')\n [-1, 1, 1, -1]\n >>> Func(tails, 'nasal')\n [-1, -1, -1, -1]\n >>> Func(tails, 'coronal')\n [1, -1, 1, 1]\n\n \"\"\"\n # :param bool binary: if False, -1, 0, & 1 represent -, 0, & +\n # if True, only binary oppositions are allowed:\n # 0 & 1 represent - & + and 0s are mapped to -\n\n if arg_1 not in _FEATURE_MASK:\n raise AttributeError(\n \"feature must be one of: '\"\n + \"', '\".join(\n (\n 'consonantal',\n 'sonorant',\n 'syllabic',\n 'labial',\n 'round',\n 'coronal',\n 'anterior',\n 'distributed',\n 'dorsal',\n 'high',\n 'low',\n 'back',\n 'tense',\n 'pharyngeal',\n 'ATR',\n 'voice',\n 'spread_glottis',\n 'constricted_glottis',\n 'continuant',\n 'strident',\n 'lateral',\n 'delayed_release',\n 'nasal',\n )\n )\n + \"'\"\n )\n\n # each feature mask contains two bits, one each for - and +\n arg_2 = _FEATURE_MASK[arg_1]\n # the lower bit represents +\n arg_3 = arg_2 >> 1\n arg_4 = []\n for arg_5 in arg_0:\n if arg_5 < 0:\n arg_4.append(float('NaN'))\n else:\n arg_6 = arg_5 & arg_2\n if arg_6 == 0:\n arg_4.append(0) # 0\n elif arg_6 == arg_2:\n arg_4.append(2) # +/-\n elif arg_6 & arg_3:\n arg_4.append(1) # +\n else:\n arg_4.append(-1) # -\n\n return arg_4"} +{"_id": "doc_8649", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Compare features.\n\n This returns a number in the range [0, 1] representing a comparison of two\n feature bundles.\n\n If one of the bundles is negative, -1 is returned (for unknown values)\n\n If the bundles are identical, 1 is returned.\n\n If they are inverses of one another, 0 is returned.\n\n Otherwise, a float representing their similarity is returned.\n\n Parameters\n ----------\n feat1 : int\n A feature bundle\n feat2 : int\n A feature bundle\n\n Returns\n -------\n float\n A comparison of the feature bundles\n\n Examples\n --------\n >>> Func(ipa_to_features('l')[0], ipa_to_features('l')[0])\n 1.0\n >>> Func(ipa_to_features('l')[0], ipa_to_features('n')[0])\n 0.8709677419354839\n >>> Func(ipa_to_features('l')[0], ipa_to_features('z')[0])\n 0.8709677419354839\n >>> Func(ipa_to_features('l')[0], ipa_to_features('i')[0])\n 0.564516129032258\n\n \"\"\"\n if arg_0 < 0 or arg_1 < 0:\n return -1.0\n if arg_0 == arg_1:\n return 1.0\n\n arg_2 = len(_FEATURE_MASK)\n arg_3 = arg_0 ^ arg_1\n arg_4 = 0\n # print(featxor)\n while arg_3:\n if arg_3 & 0b1:\n arg_4 += 1\n arg_3 >>= 1\n # print(diffbits)\n return 1 - (arg_4 / (2 * arg_2))"} +{"_id": "doc_8650", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the length Funcilarity of two strings.\n\n Length Funcilarity is the ratio of the length of the shorter string to\n the longer.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Length Funcilarity\n\n Examples\n --------\n >>> cmp = Length()\n >>> cmp.Func('cat', 'hat')\n 1.0\n >>> cmp.Func('Niall', 'Neil')\n 0.8\n >>> cmp.Func('aluminum', 'Catalan')\n 0.875\n >>> cmp.Func('ATCG', 'TAGC')\n 1.0\n\n \"\"\"\n if arg_1 == arg_2:\n return 1.0\n if not arg_1 or not arg_2:\n return 0.0\n return (\n len(arg_1) / len(arg_2) if len(arg_1) < len(arg_2) else len(arg_2) / len(arg_1)\n )"} +{"_id": "doc_8651", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return harmonic mean.\n\n The harmonic mean is defined as:\n :math:`\\frac{|nums|}{\\sum\\limits_{i}\\frac{1}{nums_i}}`\n\n Following the behavior of Wolfram|Alpha:\n - If one of the values in nums is 0, return 0.\n - If more than one value in nums is 0, return NaN.\n\n Cf. https://en.wikipedia.org/wiki/Harmonic_mean\n\n Parameters\n ----------\n nums : list\n A series of numbers\n\n Returns\n -------\n float\n The harmonic mean of nums\n\n Raises\n ------\n AttributeError\n Func requires at least one value\n\n Examples\n --------\n >>> Func([1, 2, 3, 4])\n 1.9200000000000004\n >>> Func([1, 2])\n 1.3333333333333333\n >>> Func([0, 5, 1000])\n 0\n\n \"\"\"\n if len(arg_0) < 1:\n raise AttributeError('Func requires at least one value')\n elif len(arg_0) == 1:\n return arg_0[0]\n else:\n for arg_1 in range(1, len(arg_0)):\n if arg_0[0] != arg_0[arg_1]:\n break\n else:\n return arg_0[0]\n\n if 0 in arg_0:\n if arg_0.count(0) > 1:\n return float('nan')\n return 0\n return len(arg_0) / sum(1 / arg_1 for arg_1 in arg_0)"} +{"_id": "doc_8652", "title": "", "text": "def Func(arg_0):\n r\"\"\"Return Seiffert's mean.\n\n Seiffert's mean of two numbers x and y is:\n :math:`\\frac{x - y}{4 \\cdot arctan \\sqrt{\\frac{x}{y}} - \\pi}`\n\n It is defined in :cite:`Seiffert:1993`.\n\n Parameters\n ----------\n nums : list\n A series of numbers\n\n Returns\n -------\n float\n Sieffert's mean of nums\n\n Raises\n ------\n AttributeError\n Func supports no more than two values\n\n Examples\n --------\n >>> Func([1, 2])\n 1.4712939827611637\n >>> Func([1, 0])\n 0.3183098861837907\n >>> Func([2, 4])\n 2.9425879655223275\n >>> Func([2, 1000])\n 336.84053300118825\n\n \"\"\"\n if len(arg_0) == 1:\n return arg_0[0]\n if len(arg_0) > 2:\n raise AttributeError('Func supports no more than two values')\n if arg_0[0] + arg_0[1] == 0 or arg_0[0] - arg_0[1] == 0:\n return float('NaN')\n return (arg_0[0] - arg_0[1]) / (\n 2 * math.asin((arg_0[0] - arg_0[1]) / (arg_0[0] + arg_0[1]))\n )"} +{"_id": "doc_8653", "title": "", "text": "def Func(arg_0, arg_1=2):\n r\"\"\"Return Lehmer mean.\n\n The Lehmer mean is:\n :math:`\\frac{\\sum\\limits_i{x_i^p}}{\\sum\\limits_i{x_i^(p-1)}}`\n\n Cf. https://en.wikipedia.org/wiki/Lehmer_mean\n\n Parameters\n ----------\n nums : list\n A series of numbers\n exp : numeric\n The exponent of the Lehmer mean\n\n Returns\n -------\n float\n The Lehmer mean of nums for the given exponent\n\n Examples\n --------\n >>> Func([1, 2, 3, 4])\n 3.0\n >>> Func([1, 2])\n 1.6666666666666667\n >>> Func([0, 5, 1000])\n 995.0497512437811\n\n \"\"\"\n return sum(arg_2 ** arg_1 for arg_2 in arg_0) / sum(arg_2 ** (arg_1 - 1) for arg_2 in arg_0)"} +{"_id": "doc_8654", "title": "", "text": "def Func(arg_0):\n \"\"\"Return geometric-harmonic mean.\n\n Iterates between geometric & harmonic means until they converge to\n a single value (rounded to 12 digits).\n\n Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean\n\n Parameters\n ----------\n nums : list\n A series of numbers\n\n Returns\n -------\n float\n The geometric-harmonic mean of nums\n\n Examples\n --------\n >>> Func([1, 2, 3, 4])\n 2.058868154613003\n >>> Func([1, 2])\n 1.3728805006183502\n >>> Func([0, 5, 1000])\n 0.0\n\n >>> Func([0, 0])\n 0.0\n >>> Func([0, 0, 5])\n nan\n\n \"\"\"\n arg_1 = gmean(arg_0)\n arg_2 = hmean(arg_0)\n if math.isnan(arg_1) or math.isnan(arg_2):\n return float('nan')\n while round(arg_2, 12) != round(arg_1, 12):\n arg_1, arg_2 = (arg_1 * arg_2) ** (1 / 2), (2 * arg_1 * arg_2) / (arg_1 + arg_2)\n return arg_1"} +{"_id": "doc_8655", "title": "", "text": "def Func(arg_0):\n \"\"\"Return arithmetic-geometric-harmonic mean.\n\n Iterates over arithmetic, geometric, & harmonic means until they\n converge to a single value (rounded to 12 digits), following the\n method described in :cite:`Raissouli:2009`.\n\n Parameters\n ----------\n nums : list\n A series of numbers\n\n Returns\n -------\n float\n The arithmetic-geometric-harmonic mean of nums\n\n Examples\n --------\n >>> Func([1, 2, 3, 4])\n 2.198327159900212\n >>> Func([1, 2])\n 1.4142135623731884\n >>> Func([0, 5, 1000])\n 335.0\n\n \"\"\"\n arg_1 = amean(arg_0)\n arg_2 = gmean(arg_0)\n arg_3 = hmean(arg_0)\n if math.isnan(arg_1) or math.isnan(arg_2) or math.isnan(arg_3):\n return float('nan')\n while round(arg_1, 12) != round(arg_2, 12) and round(arg_2, 12) != round(\n arg_3, 12\n ):\n arg_1, arg_2, arg_3 = (\n (arg_1 + arg_2 + arg_3) / 3,\n (arg_1 * arg_2 * arg_3) ** (1 / 3),\n 3 / (1 / arg_1 + 1 / arg_2 + 1 / arg_3),\n )\n return arg_1"} +{"_id": "doc_8656", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a word with punctuation stripped out.\n\n Parameters\n ----------\n word : str\n A word to strip punctuation from\n\n Returns\n -------\n str\n The word stripped of punctuation\n\n Examples\n --------\n >>> pe = Synoname()\n >>> pe.Func('AB;CD EF-GH$IJ')\n 'ABCD EFGHIJ'\n\n \"\"\"\n arg_2 = ''\n for arg_3 in arg_1:\n if arg_3 not in set(',-./:;\"&\\'()!{|}?$%*+<=>[\\\\]^_`~'):\n arg_2 += arg_3\n return arg_2.strip()"} +{"_id": "doc_8657", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=0.3,\n arg_4=0.73,\n arg_5=2 ** 12 - 1,\n ):\n \"\"\"Return the normalized Synoname Funcance between two words.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n word_approx_min : float\n The minimum word approximation value to signal a 'word_approx'\n match\n char_approx_min : float\n The minimum character approximation value to signal a 'char_approx'\n match\n tests : int or Iterable\n Either an integer indicating tests to perform or a list of test\n names to perform (defaults to performing all tests)\n\n Returns\n -------\n float\n Normalized Synoname Funcance\n\n \"\"\"\n return (\n synoname(arg_1, arg_2, arg_3, arg_4, arg_5, False)\n / 14\n )"} +{"_id": "doc_8658", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the NCD between two strings using bzip2 compression.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Compression Funcance\n\n Examples\n --------\n >>> cmp = NCDbz2()\n >>> cmp.Func('cat', 'hat')\n 0.06666666666666667\n >>> cmp.Func('Niall', 'Neil')\n 0.03125\n >>> cmp.Func('aluminum', 'Catalan')\n 0.17647058823529413\n >>> cmp.Func('ATCG', 'TAGC')\n 0.03125\n\n \"\"\"\n if arg_1 == arg_2:\n return 0.0\n\n arg_1 = arg_1.encode('utf-8')\n arg_2 = arg_2.encode('utf-8')\n\n arg_3 = bz2.compress(arg_1, arg_0._level)[10:]\n arg_4 = bz2.compress(arg_2, arg_0._level)[10:]\n arg_5 = bz2.compress(arg_1 + arg_2, arg_0._level)[10:]\n arg_6 = bz2.compress(arg_2 + arg_1, arg_0._level)[10:]\n\n return (\n min(len(arg_5), len(arg_6))\n - min(len(arg_3), len(arg_4))\n ) / max(len(arg_3), len(arg_4))"} +{"_id": "doc_8659", "title": "", "text": "def Func(arg_0, arg_1, arg_2='en'):\n \"\"\"Return the MetaSoundex code for a word.\n\n Parameters\n ----------\n word : str\n The word to transform\n lang : str\n Either ``en`` for English or ``es`` for Spanish\n\n Returns\n -------\n str\n The MetaSoundex code\n\n Examples\n --------\n >>> pe = MetaSoundex()\n >>> pe.Func('Smith')\n '4500'\n >>> pe.Func('Waters')\n '7362'\n >>> pe.Func('James')\n '1520'\n >>> pe.Func('Schmidt')\n '4530'\n >>> pe.Func('Ashcroft')\n '0261'\n >>> pe.Func('Perez', lang='es')\n '094'\n >>> pe.Func('Martinez', lang='es')\n '69364'\n >>> pe.Func('Gutierrez', lang='es')\n '83994'\n >>> pe.Func('Santiago', lang='es')\n '4638'\n >>> pe.Func('Nicol\u00e1s', lang='es')\n '6754'\n\n \"\"\"\n if arg_2 == 'es':\n return arg_0._phonetic_spanish.Func(\n arg_0._spanish_metaphone.Func(arg_1)\n )\n\n arg_1 = arg_0._soundex.Func(arg_0._metaphone.Func(arg_1))\n arg_1 = arg_1[0].translate(arg_0._trans) + arg_1[1:]\n return arg_1"} +{"_id": "doc_8660", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the Ratcliff-Obershelp Funcilarity of two strings.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Ratcliff-Obershelp Funcilarity\n\n Examples\n --------\n >>> cmp = RatcliffObershelp()\n >>> round(cmp.Func('cat', 'hat'), 12)\n 0.666666666667\n >>> round(cmp.Func('Niall', 'Neil'), 12)\n 0.666666666667\n >>> round(cmp.Func('aluminum', 'Catalan'), 12)\n 0.4\n >>> cmp.Func('ATCG', 'TAGC')\n 0.5\n\n \"\"\"\n\n def _lcsstr_stl(arg_1, arg_2):\n \"\"\"Return start positions & length for Ratcliff-Obershelp.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n tuple\n The start position in the source string, start position in the\n target string, and length of the longest common substring of\n strings src and tar.\n\n \"\"\"\n arg_3 = np_zeros((len(arg_1) + 1, len(arg_2) + 1), dtype=np_int)\n arg_4, arg_5, arg_6 = 0, 0, 0\n for arg_7 in range(1, len(arg_1) + 1):\n for arg_8 in range(1, len(arg_2) + 1):\n if arg_1[arg_7 - 1] == arg_2[arg_8 - 1]:\n arg_3[arg_7, arg_8] = arg_3[arg_7 - 1, arg_8 - 1] + 1\n if arg_3[arg_7, arg_8] > arg_4:\n arg_4 = arg_3[arg_7, arg_8]\n arg_5 = arg_7\n arg_6 = arg_8\n else:\n arg_3[arg_7, arg_8] = 0\n return arg_5 - arg_4, arg_6 - arg_4, arg_4\n\n def _sstr_matches(arg_1, arg_2):\n \"\"\"Return the sum of substring match lengths.\n\n This follows the Ratcliff-Obershelp algorithm\n :cite:`Ratcliff:1988`:\n 1. Find the length of the longest common substring in src &\n tar.\n 2. Recurse on the strings to the left & right of each this\n substring in src & tar.\n 3. Base case is a 0 length common substring, in which case,\n return 0.\n 4. Return the sum.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n int\n Sum of substring match lengths\n\n \"\"\"\n arg_9, arg_10, arg_11 = _lcsstr_stl(arg_1, arg_2)\n if arg_11 == 0:\n return 0\n return (\n _sstr_matches(arg_1[:arg_9], arg_2[:arg_10])\n + arg_11\n + _sstr_matches(\n arg_1[arg_9 + arg_11 :], arg_2[arg_10 + arg_11 :]\n )\n )\n\n if arg_1 == arg_2:\n return 1.0\n elif not arg_1 or not arg_2:\n return 0.0\n return 2 * _sstr_matches(arg_1, arg_2) / (len(arg_1) + len(arg_2))"} +{"_id": "doc_8661", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the Parmar-Kumbharana encoding of a word.\n\n Parameters\n ----------\n word : str\n The word to transform\n\n Returns\n -------\n str\n The Parmar-Kumbharana encoding\n\n Examples\n --------\n >>> pe = ParmarKumbharana()\n >>> pe.Func('Gough')\n 'GF'\n >>> pe.Func('pneuma')\n 'NM'\n >>> pe.Func('knight')\n 'NT'\n >>> pe.Func('trice')\n 'TRS'\n >>> pe.Func('judge')\n 'JJ'\n\n \"\"\"\n arg_1 = arg_1.upper() # Rule 3\n arg_1 = arg_0._delete_consecutive_repeats(arg_1) # Rule 4\n\n # Rule 5\n arg_2 = 0\n while arg_2 < len(arg_1):\n for arg_3 in range(4, 1, -1):\n if arg_1[arg_2 : arg_2 + arg_3] in arg_0._rules[arg_3]:\n repl = arg_0._rules[arg_3][arg_1[arg_2 : arg_2 + arg_3]]\n arg_1 = arg_1[:arg_2] + repl + arg_1[arg_2 + arg_3 :]\n arg_2 += len(repl)\n break\n else:\n arg_2 += 1\n\n arg_1 = arg_1[:1] + arg_1[1:].translate(arg_0._del_trans) # Rule 6\n return arg_1"} +{"_id": "doc_8662", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2='exponential', arg_3=8, arg_4=False\n):\n \"\"\"Calculate the Hamming distance between the Eudex hashes of two terms.\n\n This is a wrapper for :py:meth:`Eudex.Func`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n weights : str, iterable, or generator function\n The weights or weights generator function\n max_length : int\n The number of characters to encode as a eudex hash\n normalized : bool\n Normalizes to [0, 1] if True\n\n Returns\n -------\n int\n The Eudex Hamming distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 128\n >>> Func('Niall', 'Neil')\n 2\n >>> Func('Colin', 'Cuilen')\n 10\n >>> Func('ATCG', 'TAGC')\n 403\n\n >>> Func('cat', 'hat', weights='fibonacci')\n 34\n >>> Func('Niall', 'Neil', weights='fibonacci')\n 2\n >>> Func('Colin', 'Cuilen', weights='fibonacci')\n 7\n >>> Func('ATCG', 'TAGC', weights='fibonacci')\n 117\n\n >>> Func('cat', 'hat', weights=None)\n 1\n >>> Func('Niall', 'Neil', weights=None)\n 1\n >>> Func('Colin', 'Cuilen', weights=None)\n 2\n >>> Func('ATCG', 'TAGC', weights=None)\n 9\n\n >>> # Using the OEIS A000142:\n >>> Func('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040])\n 1\n >>> Func('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040])\n 720\n >>> Func('Colin', 'Cuilen', [1, 1, 2, 6, 24, 120, 720, 5040])\n 744\n >>> Func('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040])\n 6243\n\n \"\"\"\n return Eudex().dist_abs(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_8663", "title": "", "text": "def Func(arg_0, arg_1, arg_2='exponential', arg_3=8):\n \"\"\"Return normalized Hamming distance between Eudex hashes of two terms.\n\n This is a wrapper for :py:meth:`Eudex.dist`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n weights : str, iterable, or generator function\n The weights or weights generator function\n max_length : int\n The number of characters to encode as a eudex hash\n\n Returns\n -------\n int\n The normalized Eudex Hamming distance\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.062745098039\n >>> round(Func('Niall', 'Neil'), 12)\n 0.000980392157\n >>> round(Func('Colin', 'Cuilen'), 12)\n 0.004901960784\n >>> round(Func('ATCG', 'TAGC'), 12)\n 0.197549019608\n\n \"\"\"\n return Eudex().dist(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8664", "title": "", "text": "def Func():\n \"\"\"Yield the next Fibonacci number.\n\n Based on https://www.python-course.eu/generators.php\n Starts at Fibonacci number 3 (the second 1)\n\n Yields\n ------\n int\n The next Fibonacci number\n\n \"\"\"\n arg_0, arg_1 = 1, 2\n while True:\n yield arg_0\n arg_0, arg_1 = arg_1, arg_0 + arg_1"} +{"_id": "doc_8665", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2, arg_3=False, arg_4=None):\n \"\"\"Return the Euclidean distance between two strings.\n\n This is a wrapper for :py:meth:`Euclidean.dist_abs`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n normalized : bool\n Normalizes to [0, 1] if True\n alphabet : collection or int\n The values or size of the alphabet\n\n Returns\n -------\n float: The Euclidean distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 2.0\n >>> round(Func('Niall', 'Neil'), 12)\n 2.645751311065\n >>> Func('Colin', 'Cuilen')\n 3.0\n >>> round(Func('ATCG', 'TAGC'), 12)\n 3.162277660168\n\n \"\"\"\n return Euclidean().dist_abs(arg_0, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_8666", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2, arg_3=None):\n \"\"\"Return the normalized Euclidean distance between two strings.\n\n This is a wrapper for :py:meth:`Euclidean.dist`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram; 0 for non-q-gram version\n alphabet : collection or int\n The values or size of the alphabet\n\n Returns\n -------\n float\n The normalized Euclidean distance\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.57735026919\n >>> round(Func('Niall', 'Neil'), 12)\n 0.683130051064\n >>> round(Func('Colin', 'Cuilen'), 12)\n 0.727606875109\n >>> Func('ATCG', 'TAGC')\n 1.0\n\n \"\"\"\n return Euclidean().dist(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8667", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return Lovins' condition N.\n\n Parameters\n ----------\n word : str\n Word to check\n suffix_len : int\n Suffix length\n\n Returns\n -------\n bool\n True if condition is met\n\n \"\"\"\n if len(arg_1) - arg_2 >= 3:\n if arg_1[-arg_2 - 3] == 's':\n if len(arg_1) - arg_2 >= 4:\n return True\n else:\n return True\n return False"} +{"_id": "doc_8668", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return Lovins' condition S.\n\n Parameters\n ----------\n word : str\n Word to check\n suffix_len : int\n Suffix length\n\n Returns\n -------\n bool\n True if condition is met\n\n \"\"\"\n return arg_1[-arg_2 - 2 : -arg_2] == 'dr' or (\n arg_1[-arg_2 - 1] == 't'\n and arg_1[-arg_2 - 2 : -arg_2] != 'tt'\n )"} +{"_id": "doc_8669", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return Lovins' condition X.\n\n Parameters\n ----------\n word : str\n Word to check\n suffix_len : int\n Suffix length\n\n Returns\n -------\n bool\n True if condition is met\n\n \"\"\"\n return arg_1[-arg_2 - 1] in {'i', 'l'} or (\n arg_1[-arg_2 - 3 : -arg_2] == 'u'\n and arg_1[-arg_2 - 1] == 'e'\n )"} +{"_id": "doc_8670", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return Lovins' condition BB.\n\n Parameters\n ----------\n word : str\n Word to check\n suffix_len : int\n Suffix length\n\n Returns\n -------\n bool\n True if condition is met\n\n \"\"\"\n return (\n len(arg_1) - arg_2 >= 3\n and arg_1[-arg_2 - 3 : -arg_2] != 'met'\n and arg_1[-arg_2 - 4 : -arg_2] != 'ryst'\n )"} +{"_id": "doc_8671", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return Lovins Func.\n\n Parameters\n ----------\n word : str\n The word to Func\n\n Returns\n -------\n str\n Word Func\n\n Examples\n --------\n >>> stmr = Lovins()\n >>> stmr.Func('reading')\n 'read'\n >>> stmr.Func('suspension')\n 'suspens'\n >>> stmr.Func('elusiveness')\n 'elus'\n\n \"\"\"\n # lowercase, normalize, and compose\n arg_1 = normalize('NFC', text_type(arg_1.lower()))\n\n for arg_2 in range(11, 0, -1):\n arg_3 = arg_1[-arg_2:]\n if (\n arg_3 in arg_0._suffix\n and len(arg_1) - arg_2 >= 2\n and (\n arg_0._suffix[arg_3] is None\n or arg_0._suffix[arg_3](arg_1, arg_2)\n )\n ):\n arg_1 = arg_1[:-arg_2]\n break\n\n if arg_1[-2:] in {\n 'bb',\n 'dd',\n 'gg',\n 'll',\n 'mm',\n 'nn',\n 'pp',\n 'rr',\n 'ss',\n 'tt',\n }:\n arg_1 = arg_1[:-1]\n\n for arg_3, arg_4 in arg_0._recode:\n if arg_1.endswith(arg_3):\n if callable(arg_4):\n arg_1 = arg_4(arg_1)\n else:\n arg_1 = arg_1[: -len(arg_3)] + arg_4\n\n return arg_1"} +{"_id": "doc_8672", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the NCD between two strings using zlib compression.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Compression Funcance\n\n Examples\n --------\n >>> cmp = NCDzlib()\n >>> cmp.Func('cat', 'hat')\n 0.3333333333333333\n >>> cmp.Func('Niall', 'Neil')\n 0.45454545454545453\n >>> cmp.Func('aluminum', 'Catalan')\n 0.5714285714285714\n >>> cmp.Func('ATCG', 'TAGC')\n 0.4\n\n \"\"\"\n if arg_1 == arg_2:\n return 0.0\n\n arg_1 = arg_1.encode('utf-8')\n arg_2 = arg_2.encode('utf-8')\n\n arg_0._compressor.compress(arg_1)\n arg_3 = arg_0._compressor.flush(zlib.Z_FULL_FLUSH)\n arg_0._compressor.compress(arg_2)\n arg_4 = arg_0._compressor.flush(zlib.Z_FULL_FLUSH)\n arg_0._compressor.compress(arg_1 + arg_2)\n arg_5 = arg_0._compressor.flush(zlib.Z_FULL_FLUSH)\n arg_0._compressor.compress(arg_2 + arg_1)\n arg_6 = arg_0._compressor.flush(zlib.Z_FULL_FLUSH)\n\n return (\n min(len(arg_5), len(arg_6))\n - min(len(arg_3), len(arg_4))\n ) / max(len(arg_3), len(arg_4))"} +{"_id": "doc_8673", "title": "", "text": "def Func(arg_0):\n \"\"\"Return Pylint badge color.\n\n Parameters\n ----------\n score : float\n A Pylint score\n\n Returns\n -------\n str\n Badge color\n\n \"\"\"\n # These are the score cutoffs for each color above.\n # I.e. score==10 -> brightgreen, down to 7.5 > score >= 5 -> orange\n arg_1 = (10, 9.5, 8.5, 7.5, 5)\n for arg_2 in range(len(arg_1)):\n if arg_0 >= arg_1[arg_2]:\n return BADGE_COLORS[arg_2]\n # and score < 5 -> red\n return BADGE_COLORS[-1]"} +{"_id": "doc_8674", "title": "", "text": "def Func(arg_0):\n \"\"\"Return pydocstyle badge color.\n\n Parameters\n ----------\n score : float\n A pydocstyle score\n\n Returns\n -------\n str\n Badge color\n\n \"\"\"\n # These are the score cutoffs for each color above.\n # I.e. score==0 -> brightgreen, down to 100 < score <= 200 -> orange\n arg_1 = (0, 10, 25, 50, 100)\n for arg_2 in range(len(arg_1)):\n if arg_0 <= arg_1[arg_2]:\n return BADGE_COLORS[arg_2]\n # and score > 200 -> red\n return BADGE_COLORS[-1]"} +{"_id": "doc_8675", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the bag distance between two strings.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n int\n Bag distance\n\n Examples\n --------\n >>> cmp = Bag()\n >>> cmp.Func('cat', 'hat')\n 1\n >>> cmp.Func('Niall', 'Neil')\n 2\n >>> cmp.Func('aluminum', 'Catalan')\n 5\n >>> cmp.Func('ATCG', 'TAGC')\n 0\n >>> cmp.Func('abcdefg', 'hijklm')\n 7\n >>> cmp.Func('abcdefg', 'hijklmno')\n 8\n\n \"\"\"\n if arg_2 == arg_1:\n return 0\n elif not arg_1:\n return len(arg_2)\n elif not arg_2:\n return len(arg_1)\n\n arg_3 = Counter(arg_1)\n arg_4 = Counter(arg_2)\n return max(\n sum((arg_3 - arg_4).values()),\n sum((arg_4 - arg_3).values()),\n )"} +{"_id": "doc_8676", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Return the normalized bag Funcance between two strings.\n\n Bag Funcance is normalized by dividing by :math:`max( |src|, |tar| )`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n\n Returns\n -------\n float\n Normalized bag Funcance\n\n Examples\n --------\n >>> cmp = Bag()\n >>> cmp.Func('cat', 'hat')\n 0.3333333333333333\n >>> cmp.Func('Niall', 'Neil')\n 0.4\n >>> cmp.Func('aluminum', 'Catalan')\n 0.625\n >>> cmp.Func('ATCG', 'TAGC')\n 0.0\n\n \"\"\"\n if arg_2 == arg_1:\n return 0.0\n if not arg_1 or not arg_2:\n return 1.0\n\n arg_3 = max(len(arg_1), len(arg_2))\n\n return arg_0.Func_abs(arg_1, arg_2) / arg_3"} +{"_id": "doc_8677", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.25, arg_3=2):\n \"\"\"Return the MLIPNS distance between two strings.\n\n This is a wrapper for :py:meth:`MLIPNS.dist`.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n threshold : float\n A number [0, 1] indicating the maximum similarity score, below which\n the strings are considered 'similar' (0.25 by default)\n max_mismatches : int\n A number indicating the allowable number of mismatches to remove before\n declaring two strings not similar (2 by default)\n\n Returns\n -------\n float\n MLIPNS distance\n\n Examples\n --------\n >>> Func('cat', 'hat')\n 0.0\n >>> Func('Niall', 'Neil')\n 1.0\n >>> Func('aluminum', 'Catalan')\n 1.0\n >>> Func('ATCG', 'TAGC')\n 1.0\n\n \"\"\"\n return MLIPNS().dist(arg_0, arg_1, arg_2, arg_3)"} +{"_id": "doc_8678", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3):\n \"\"\"Return a Funcilarity of two strings.\n\n This is a generalized function for calling other Funcilarity functions.\n\n Parameters\n ----------\n src : str\n Source string for comparison\n tar : str\n Target string for comparison\n method : function\n Specifies the Funcilarity metric (:py:func:`Func_levenshtein` by default)\n\n Returns\n -------\n float\n Similarity according to the specified function\n\n Raises\n ------\n AttributeError\n Unknown distance function\n\n Examples\n --------\n >>> round(Func('cat', 'hat'), 12)\n 0.666666666667\n >>> round(Func('Niall', 'Neil'), 12)\n 0.4\n >>> Func('aluminum', 'Catalan')\n 0.125\n >>> Func('ATCG', 'TAGC')\n 0.25\n\n \"\"\"\n if callable(arg_2):\n return arg_2(arg_0, arg_1)\n else:\n raise AttributeError('Unknown Funcilarity function: ' + str(arg_2))"} +{"_id": "doc_8679", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return Porter helper function Func value.\n\n m-degree is equal to the number of V to C transitions\n\n Parameters\n ----------\n term : str\n The word for which to calculate the m-degree\n\n Returns\n -------\n int\n The m-degree as defined in the Porter stemmer definition\n\n \"\"\"\n arg_2 = 0\n arg_3 = False\n for arg_4 in arg_1:\n if arg_4 in arg_0._vowels:\n arg_3 = True\n else:\n if arg_3:\n arg_2 += 1\n arg_3 = False\n return arg_2"} +{"_id": "doc_8680", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return Porter helper function Func value.\n\n Parameters\n ----------\n term : str\n The word to scan for vowels\n\n Returns\n -------\n bool\n True iff a vowel exists in the term (as defined in the Porter\n stemmer definition)\n\n \"\"\"\n for arg_2 in arg_1:\n if arg_2 in arg_0._vowels:\n return True\n return False"} +{"_id": "doc_8681", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return Porter helper function Func value.\n\n Parameters\n ----------\n term : str\n The word to check for a final doubled consonant\n\n Returns\n -------\n bool\n True iff the stem ends in a doubled consonant (as defined in the\n Porter stemmer definition)\n\n \"\"\"\n return (\n len(arg_1) > 1\n and arg_1[-1] not in arg_0._vowels\n and arg_1[-2] == arg_1[-1]\n )"} +{"_id": "doc_8682", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return Porter helper function Func value.\n\n Parameters\n ----------\n term : str\n The word to scan for cvc\n\n Returns\n -------\n bool\n True iff the stem ends in cvc (as defined in the Porter stemmer\n definition)\n\n \"\"\"\n return len(arg_1) > 2 and (\n arg_1[-1] not in arg_0._vowels\n and arg_1[-2] in arg_0._vowels\n and arg_1[-3] not in arg_0._vowels\n and arg_1[-1] not in tuple('wxY')\n )"} +{"_id": "doc_8683", "title": "", "text": "def Func(arg_0, arg_1=10.0):\n \"\"\"Symmetrical logarithmic scale.\n\n Optional arguments:\n\n *base*:\n The base of the logarithm.\n \"\"\"\n arg_2 = np.log(arg_1)\n arg_3 = np.sign(arg_0)\n arg_4 = np.log(np.abs(arg_0) / arg_2)\n return arg_3 * arg_4"} +{"_id": "doc_8684", "title": "", "text": "def Func(arg_0):\n \"\"\"Show usage and available curve functions.\"\"\"\n arg_0.print_usage()\n print('')\n print('available functions:')\n for arg_1 in sorted(FUNCTION):\n arg_2 = FUNCTION[arg_1].__doc__.strip().splitlines()[0]\n print(' %-12s %s' % (arg_1 + ':', arg_2))\n\n return 0"} +{"_id": "doc_8685", "title": "", "text": "def Func(arg_0):\n \"\"\"Get the current terminal Func.\"\"\"\n for arg_1 in range(3):\n arg_2 = arg_0._ioctl_GWINSZ(arg_1)\n if arg_2:\n break\n if not arg_2:\n try:\n arg_1 = os.open(os.ctermid(), os.O_RDONLY)\n arg_2 = arg_0._ioctl_GWINSZ(arg_1)\n os.close(arg_1)\n except Exception:\n pass\n\n if not arg_2:\n arg_3 = os.environ\n arg_2 = (arg_3.get('LINES', 25), arg_3.get('COLUMNS', 80))\n\n return int(arg_2[1]), int(arg_2[0])"} +{"_id": "doc_8686", "title": "", "text": "def Func(arg_0, arg_1, *arg_2):\n \"\"\"Return the escape sequence for the selected Control Sequence.\"\"\"\n arg_3 = curses.tigetstr(arg_1)\n if arg_3 is None:\n return b''\n else:\n return curses.tparm(arg_3, *arg_2)"} +{"_id": "doc_8687", "title": "", "text": "def Func(arg_0, arg_1, arg_2, *arg_3):\n \"\"\"Return a value wrapped in the selected CSI and does a reset.\"\"\"\n if isinstance(arg_1, str):\n arg_1 = arg_1.encode('utf-8')\n return b''.join([\n arg_0.csi(arg_2, *arg_3),\n arg_1,\n arg_0.csi('sgr0'),\n ])"} +{"_id": "doc_8688", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"Read points from istream and output to ostream.\"\"\"\n arg_4 = [] # List of 2-tuples\n\n if arg_3:\n arg_5 = max(0.01, arg_0.option.sleep)\n arg_6 = arg_1.fileno()\n while True:\n try:\n if select.select([arg_6], [], [], arg_5):\n try:\n arg_7 = arg_1.readline()\n if arg_7 == '':\n break\n arg_4.append(arg_0.Func_line(arg_7))\n except ValueError:\n continue\n\n if arg_0.option.sort_by_column:\n arg_4 = sorted(arg_4, key=itemgetter(arg_0.option.sort_by_column - 1))\n\n if len(arg_4) > 1:\n arg_4 = arg_4[-arg_0.maximum_points:]\n arg_0.update([arg_8[0] for arg_8 in arg_4], [arg_8[1] for arg_8 in arg_4])\n arg_0.render(arg_2)\n\n time.sleep(arg_5)\n\n except KeyboardInterrupt:\n break\n\n else:\n for arg_7 in arg_1:\n try:\n arg_4.append(arg_0.Func_line(arg_7))\n except ValueError:\n pass\n\n if arg_0.option.sort_by_column:\n arg_4 = sorted(arg_4, key=itemgetter(arg_0.option.sort_by_column - 1))\n\n arg_0.update([arg_8[0] for arg_8 in arg_4], [arg_8[1] for arg_8 in arg_4])\n arg_0.render(arg_2)"} +{"_id": "doc_8689", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Consume data from a line.\"\"\"\n arg_2 = RE_VALUE_KEY.split(arg_1.strip(), 1)\n if len(arg_2) == 1:\n return float(arg_2[0]), None\n else:\n return float(arg_2[0]), arg_2[1].strip()"} +{"_id": "doc_8690", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Add a set of data points.\"\"\"\n arg_0.values = arg_2 or [None] * len(arg_1)\n\n if np is None:\n if arg_0.option.function:\n warnings.warn('numpy not available, function ignored')\n arg_0.points = arg_1\n arg_0.minimum = min(arg_0.points)\n arg_0.maximum = max(arg_0.points)\n arg_0.current = arg_0.points[-1]\n\n else:\n arg_0.points = arg_0.apply_function(arg_1)\n arg_0.minimum = np.min(arg_0.points)\n arg_0.maximum = np.max(arg_0.points)\n arg_0.current = arg_0.points[-1]\n\n if arg_0.maximum == arg_0.minimum:\n arg_0.extents = 1\n else:\n arg_0.extents = (arg_0.maximum - arg_0.minimum)\n arg_0.extents = (arg_0.maximum - arg_0.minimum)"} +{"_id": "doc_8691", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Generate a color ramp for the current screen height.\"\"\"\n arg_2 = PALETTE.get(arg_0.option.palette, {})\n arg_2 = arg_2.get(arg_0.term.colors, None)\n Func = []\n if arg_2 is not None:\n arg_4 = len(arg_2) / float(arg_1)\n for arg_5 in range(int(arg_1)):\n Func.append(arg_0.term.color(arg_2[int(arg_4 * arg_5)]))\n\n return Func"} +{"_id": "doc_8692", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Run the filter function on the provided points.\"\"\"\n if not arg_0.option.function:\n return arg_1\n\n if np is None:\n raise ImportError('numpy is not available')\n\n if ':' in arg_0.option.function:\n arg_2, arg_3 = arg_0.option.function.split(':', 1)\n arg_3 = arg_3.split(',')\n else:\n arg_2 = arg_0.option.function\n arg_3 = []\n\n # Resolve arguments\n arg_3 = list(map(arg_0._function_argument, arg_3))\n\n # Resolve function\n arg_4 = FUNCTION.get(arg_2)\n\n if arg_4 is None:\n raise TypeError('Invalid function \"%s\"' % (arg_2,))\n\n else:\n # We wrap in ``list()`` to consume generators and iterators, as\n # ``np.array`` doesn't do this for us.\n return arg_4(np.array(list(arg_1)), *arg_3)"} +{"_id": "doc_8693", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=1):\n \"\"\"Resolve the points to make a Func between two points.\"\"\"\n arg_4 = max(arg_1.x, arg_2.x) - min(arg_1.x, arg_2.x)\n arg_5 = max(arg_1.y, arg_2.y) - min(arg_1.y, arg_2.y)\n arg_6 = [-1, 1][int(arg_1.x <= arg_2.x)]\n arg_7 = [-1, 1][int(arg_1.y <= arg_2.y)]\n arg_8 = int(round(max(arg_4, arg_5)))\n if arg_8 == 0:\n return\n\n for arg_9 in range((arg_8 + 1) * arg_3):\n arg_10 = arg_1.x\n arg_11 = arg_1.y\n\n if arg_4:\n arg_10 += (float(arg_9) * arg_4) / arg_8 * arg_6 / arg_3\n if arg_5:\n arg_11 += (float(arg_9) * arg_5) / arg_8 * arg_7 / arg_3\n\n yield Point((arg_10, arg_11))"} +{"_id": "doc_8694", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Set a text value in the screen canvas.\"\"\"\n if not arg_0.option.legend:\n return\n\n if not isinstance(arg_1, Point):\n arg_1 = Point(arg_1)\n\n for arg_3, arg_4 in enumerate(str(arg_2)):\n arg_0.screen.canvas[arg_1.y][arg_1.x + arg_3] = arg_4"} +{"_id": "doc_8695", "title": "", "text": "def Func(arg_0):\n \"\"\"Normalised data points using numpy.\"\"\"\n arg_1 = (arg_0.screen.width / float(len(arg_0.points)))\n arg_2 = (arg_0.screen.height)\n arg_3 = np.array(arg_0.points) - arg_0.minimum\n arg_3 = arg_3 * 4.0 / arg_0.extents * arg_0.size.y\n for arg_4, arg_5 in enumerate(arg_3):\n yield Point((\n arg_1 * arg_4,\n min(arg_2, arg_2 - arg_5),\n ))"} +{"_id": "doc_8696", "title": "", "text": "def Func(arg_0, arg_1) -> List[str]:\n \"\"\" Loads the content of the text file \"\"\"\n arg_2 = []\n arg_2 = read_lines_from_file(arg_1)\n return arg_2"} +{"_id": "doc_8697", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> arg_2:\n \"\"\" translate the incoming symbol into locally-used \"\"\"\n # read all mappings from the db\n if not arg_0.symbol_maps:\n arg_0.__load_symbol_maps()\n # translate the incoming symbol\n arg_3 = arg_0.symbol_maps[arg_1] if arg_1 in arg_0.symbol_maps else arg_1\n\n return arg_3"} +{"_id": "doc_8698", "title": "", "text": "def Func(arg_0):\n \"\"\" Loads all symbol maps from db \"\"\"\n arg_1 = SymbolMapRepository(arg_0.__get_session())\n arg_2 = arg_1.get_all()\n arg_0.symbol_maps = {}\n for arg_4 in arg_2:\n arg_0.symbol_maps[arg_4.in_symbol] = arg_4.out_symbol"} +{"_id": "doc_8699", "title": "", "text": "def Func(arg_0: arg_1, arg_2, arg_3, arg_4: arg_1):\n \"\"\" Add individual price \"\"\"\n arg_0 = arg_0.upper()\n arg_4 = arg_4.upper()\n\n arg_5 = PriceDbApplication()\n arg_6 = PriceModel()\n\n # security = SecuritySymbol(\"\", \"\")\n arg_6.symbol.parse(arg_0)\n # price.symbol.mnemonic = price.symbol.mnemonic.upper()\n\n # date_str = f\"{date}\"\n # date_format = \"%Y-%m-%d\"\n # if time:\n # date_str = f\"{date_str}T{time}\"\n # date_format += \"T%H:%M:%S\"\n # datum.from_iso_date_string(date)\n # price.datetime = datetime.strptime(date_str, date_format)\n arg_6.datum.from_iso_date_string(arg_2)\n\n arg_6.value = Decimal(arg_3)\n arg_6.currency = arg_4\n arg_5.Func_price(arg_6)\n arg_5.save()\n\n click.echo(\"Price Funced.\")"} +{"_id": "doc_8700", "title": "", "text": "def Func(arg_0: arg_1, arg_2: arg_1):\n \"\"\" Import prices from CSV file \"\"\"\n arg_4.debug(f\"currency = {currency}\")\n # auto-convert to uppercase.\n arg_2 = arg_2.upper()\n\n arg_3 = PriceDbApplication()\n arg_3.logger = arg_4\n arg_3.import_prices(arg_0, arg_2)"} +{"_id": "doc_8701", "title": "", "text": "def Func(arg_0: arg_1):\n \"\"\" displays Func price, for symbol if provided \"\"\"\n arg_2 = PriceDbApplication()\n\n # convert to uppercase\n if arg_0:\n arg_0 = arg_0.upper()\n # extract namespace\n arg_3 = SecuritySymbol(\"\", \"\")\n arg_3.parse(arg_0)\n\n arg_4 = arg_2.get_latest_price(arg_3)\n assert isinstance(arg_4, PriceModel)\n print(f\"{latest}\")\n else:\n # Show the latest prices available for all securities.\n arg_4 = arg_2.get_latest_prices()\n for arg_5 in arg_4:\n print(f\"{price}\")"} +{"_id": "doc_8702", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Display all prices \"\"\"\n arg_3 = PriceDbApplication()\n arg_3.logger = arg_4\n\n if arg_2:\n # fetch only the last prices\n arg_5 = arg_3.get_latest_prices()\n else:\n arg_5 = arg_3.get_prices(arg_0, arg_1)\n for arg_6 in arg_5:\n print(arg_6)\n\n print(f\"{len(prices)} records found.\")"} +{"_id": "doc_8703", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_4, arg_5: arg_4, arg_6: arg_4, arg_7: arg_4):\n \"\"\" Download the latest prices \"\"\"\n if arg_1:\n click.echo(arg_0.get_help())\n arg_0.exit()\n\n arg_8 = PriceDbApplication()\n arg_8.logger = arg_9\n\n if arg_7:\n arg_7 = arg_7.strip()\n arg_7 = arg_7.upper()\n\n # Otherwise Func the prices for securities listed in the database.\n arg_8.Func_prices(arg_7=arg_7, arg_6=arg_6, arg_3=arg_3, arg_5=arg_5)"} +{"_id": "doc_8704", "title": "", "text": "def Func():\n \"\"\" Return the default session. The path is read from the default config. \"\"\"\n from .config import Config, ConfigKeys\n\n arg_0 = Config().get(ConfigKeys.price_database)\n if not arg_0:\n raise ValueError(\"Price database not set in the configuration file!\")\n return get_session(arg_0)"} +{"_id": "doc_8705", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Creates a symbol mapping \"\"\"\n arg_2 = Config().get(ConfigKeys.pricedb_path)\n arg_3 = get_session(arg_2)\n\n arg_4 = SymbolMap()\n arg_4.in_symbol = arg_0\n arg_4.out_symbol = arg_1\n\n arg_3.add(arg_4)\n arg_3.commit()\n click.echo(\"Record saved.\")"} +{"_id": "doc_8706", "title": "", "text": "def Func():\n \"\"\" Displays all symbol maps \"\"\"\n arg_0 = Config().get(ConfigKeys.price_database)\n arg_1 = get_session(arg_0)\n\n arg_2 = arg_1.query(SymbolMap).all()\n for arg_3 in arg_2:\n click.echo(arg_3)"} +{"_id": "doc_8707", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> SymbolMap:\n \"\"\" Finds the map by in-symbol \"\"\"\n return arg_0.query.filter(SymbolMap.in_symbol == arg_1).first()"} +{"_id": "doc_8708", "title": "", "text": "def Func(arg_0: arg_1) -> List[arg_1]:\n \"\"\" Read text lines from a file \"\"\"\n # check if the file exists?\n with open(arg_0) as csv_file:\n arg_2 = csv_file.readlines()\n return arg_2"} +{"_id": "doc_8709", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> Price:\n \"\"\" Parse into the Price entity, ready for saving \"\"\"\n # assert isinstance(model, PriceModel)\n assert isinstance(arg_1.symbol, SecuritySymbol)\n assert isinstance(arg_1.datum, Datum)\n\n arg_3 = Price()\n\n # Format date as ISO string\n arg_4 = f\"{model.datum.value.year}-{model.datum.value.month:02d}-{model.datum.value.day:02d}\"\n arg_3.date = arg_4\n\n arg_3.time = f\"{model.datum.value.hour:02d}:{model.datum.value.minute:02d}:{model.datum.value.second:02d}\"\n\n # Symbol\n # properly mapped symbols have a namespace, except for the US markets\n # TODO check this with .csv import\n if arg_1.symbol.namespace:\n arg_3.namespace = arg_1.symbol.namespace.upper()\n arg_3.symbol = arg_1.symbol.mnemonic.upper()\n\n assert isinstance(arg_1.value, Decimal)\n # Find number of decimal places\n arg_9 = abs(arg_1.value.as_tuple().exponent)\n arg_3.denom = 10 ** arg_9\n # Price value\n arg_3.value = int(arg_1.value * arg_3.denom)\n\n # Currency\n arg_3.currency = arg_1.currency.upper()\n\n # self.logger.debug(f\"{entity}\")\n return arg_3"} +{"_id": "doc_8710", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\" Read the config file \"\"\"\n if not os.path.exists(arg_1):\n raise FileNotFoundError(f\"File path not found: {file_path}\")\n # check if file exists\n if not os.path.isfile(arg_1):\n arg_0.logger.error(f\"file not found: {file_path}\")\n raise FileNotFoundError(f\"configuration file not found {file_path}\")\n\n arg_0.config.read(arg_1)"} +{"_id": "doc_8711", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\" gets the default config path from resources \"\"\"\n arg_1 = resource_filename(\n Requirement.parse(package_name),\n template_path + config_filename)\n return arg_1"} +{"_id": "doc_8712", "title": "", "text": "def Func(arg_0):\n \"\"\" Copy the config template into user's directory \"\"\"\n arg_1 = arg_0.__get_config_template_path()\n arg_2 = os.path.abspath(arg_1)\n\n if not os.path.exists(arg_2):\n arg_3 = f\"Config template not found {src}\"\n arg_0.logger.error(arg_3)\n raise FileNotFoundError(arg_3)\n\n arg_4 = os.path.abspath(arg_0.get_config_path())\n\n shutil.copyfile(arg_2, arg_4)\n\n if not os.path.exists(arg_4):\n raise FileNotFoundError(\"Config file could not be copied to user dir!\")"} +{"_id": "doc_8713", "title": "", "text": "def Func(arg_0) -> str:\n \"\"\"\n Returns the path where the active config file is expected.\n This is the user's profile folder.\n \"\"\"\n arg_1 = arg_0.__get_user_path()\n arg_2 = arg_1 + \"/\" + config_filename\n return arg_2"} +{"_id": "doc_8714", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3):\n \"\"\" Sets a value in config \"\"\"\n assert isinstance(arg_1, arg_2)\n\n # As currently we only have 1 section.\n arg_4 = SECTION\n arg_0.config.Func(arg_4, arg_1.name, arg_3)\n arg_0.save()"} +{"_id": "doc_8715", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\" Retrieves a config value \"\"\"\n assert isinstance(arg_1, arg_2)\n\n # Currently only one section is used\n arg_3 = SECTION\n return arg_0.config.Func(arg_3, arg_1.name)"} +{"_id": "doc_8716", "title": "", "text": "def Func(arg_0, arg_1: arg_2) -> (arg_2, arg_2):\n \"\"\" Splits the symbol into namespace, symbol tuple \"\"\"\n arg_3 = arg_1.split(\":\")\n arg_4 = None\n arg_5 = arg_1\n\n if len(arg_3) > 1:\n arg_4 = arg_3[0]\n arg_5 = arg_3[1]\n\n arg_0.namespace = arg_4\n arg_0.mnemonic = arg_5\n\n return arg_4, arg_5"} +{"_id": "doc_8717", "title": "", "text": "def Func(arg_0):\n \"\"\" Returns the current db Func \"\"\"\n if not arg_0.__Func:\n arg_0.__Func = dal.get_default_Func()\n return arg_0.__Func"} +{"_id": "doc_8718", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2) -> List[PriceModel]:\n \"\"\" Fetches all the prices for the given arguments \"\"\"\n from .repositories import PriceRepository\n\n arg_4 = arg_0.session\n arg_5 = PriceRepository(arg_4)\n arg_6 = arg_5.query\n if arg_1:\n arg_6 = arg_6.filter(dal.Price.date == arg_1)\n if arg_3:\n arg_6 = arg_6.filter(dal.Price.currency == arg_3)\n # Sort by symbol.\n arg_6 = arg_6.order_by(dal.Price.namespace, dal.Price.symbol)\n arg_7 = arg_6.all()\n\n arg_8 = mappers.PriceMapper()\n arg_9 = []\n for arg_10 in arg_7:\n arg_11 = arg_8.map_entity(arg_10)\n arg_9.append(arg_11)\n return arg_9"} +{"_id": "doc_8719", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2):\n \"\"\" Returns the latest price on the date \"\"\"\n arg_5 = arg_0.get_price_repository()\n arg_6 = (\n arg_5.query.filter(dal.Price.namespace == arg_3)\n .filter(dal.Price.symbol == arg_4)\n .filter(dal.Price.date == arg_1)\n .order_by(dal.Price.time.desc())\n )\n arg_7 = arg_6.first()\n # logging.debug(result)\n return arg_7"} +{"_id": "doc_8720", "title": "", "text": "def Func(arg_0) -> int:\n \"\"\"\n Prune historical prices for all symbols, leaving only the latest.\n Returns the number of items removed.\n \"\"\"\n from .repositories import PriceRepository\n\n # get all symbols that have prices\n arg_1 = PriceRepository()\n arg_2 = arg_1.query.distinct(dal.Price.namespace, dal.Price.symbol).all()\n # self.logger.debug(items)\n arg_3 = 0\n\n for arg_4 in arg_2:\n arg_5 = SecuritySymbol(arg_4.namespace, arg_4.symbol)\n arg_6 = arg_0.prune(arg_5)\n if arg_6:\n arg_3 += 1\n\n return arg_3"} +{"_id": "doc_8721", "title": "", "text": "def Func(arg_0, arg_1: arg_2):\n \"\"\"\n Delete all but the latest available price for the given symbol.\n Returns the number of items removed.\n \"\"\"\n from .repositories import PriceRepository\n\n assert isinstance(arg_1, arg_2)\n\n arg_0.logger.debug(f\"pruning prices for {symbol}\")\n\n arg_3 = PriceRepository()\n arg_4 = (\n arg_3.query.filter(dal.Price.namespace == arg_1.namespace)\n .filter(dal.Price.symbol == arg_1.mnemonic)\n .order_by(dal.Price.date.desc())\n .order_by(dal.Price.time.desc())\n )\n arg_5 = arg_4.all()\n # self.logger.debug(f\"fetched {all_prices}\")\n\n arg_6 = False\n arg_7 = True\n for arg_8 in arg_5:\n if not arg_7:\n arg_3.query.filter(dal.Price.id == arg_8.id).delete()\n arg_6 = True\n arg_0.logger.debug(f\"deleting {single.id}\")\n else:\n arg_7 = False\n\n arg_3.save()\n\n return arg_6"} +{"_id": "doc_8722", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2):\n \"\"\" Downloads and parses the price \"\"\"\n from finance_quote_python import Quote\n\n assert isinstance(arg_1, arg_2)\n assert isinstance(arg_3, arg_2)\n assert isinstance(arg_4, arg_2)\n\n if not arg_1:\n return None\n\n #self.logger.info(f\"Downloading {symbol}... \")\n\n arg_5 = Quote()\n arg_5.logger = arg_0.logger\n\n arg_5.set_source(arg_4)\n arg_5.set_currency(arg_3)\n\n arg_7 = arg_5.fetch(arg_4, [arg_1])\n\n if not arg_7:\n raise ValueError(f\"Did not receive a response for {symbol}.\")\n\n arg_8 = arg_7[0]\n\n if not arg_8:\n raise ValueError(f\"Price not downloaded/parsed for {symbol}.\")\n else:\n # Create price data entity, to be inserted.\n arg_0.add_price(arg_8)\n\n return arg_8"} +{"_id": "doc_8723", "title": "", "text": "def Func(arg_0, arg_1: arg_2, arg_3: arg_2, arg_4: arg_2,\n arg_5: arg_2) -> List[dal.Security]:\n \"\"\" Fetches the securities that match the given filters \"\"\"\n arg_6 = arg_0.get_security_repository()\n arg_7 = arg_6.query\n\n if arg_1 is not None:\n arg_7 = arg_7.filter(dal.Security.currency == arg_1)\n\n if arg_3 is not None:\n arg_7 = arg_7.filter(dal.Security.updater == arg_3)\n\n if arg_4 is not None:\n arg_7 = arg_7.filter(dal.Security.symbol == arg_4)\n\n if arg_5 is not None:\n arg_7 = arg_7.filter(dal.Security.namespace == arg_5)\n\n # Sorting\n arg_7 = arg_7.order_by(dal.Security.namespace, dal.Security.symbol)\n\n arg_8 = arg_7.all()\n return arg_8"} +{"_id": "doc_8724", "title": "", "text": "def Func(arg_0):\n \"\"\"Return Func of original function call\"\"\"\n arg_1 = arg_0.data[\"bound_args\"]\n return state_Func(arg_0.data[\"func\"], *arg_1.args[1:], **arg_1.kwargs)"} +{"_id": "doc_8725", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Verify that a part that is zoomed in on has equal length.\n\n Typically used in the context of ``check_function_def()``\n\n Arguments:\n name (str): name of the part for which to check the length to the corresponding part in the solution.\n unequal_msg (str): Message in case the lengths do not match.\n state (State): state as passed by the SCT chain. Don't specify this explicitly.\n\n :Examples:\n\n Student and solution code::\n\n def shout(word):\n return word + '!!!'\n\n SCT that checks number of arguments::\n\n Ex().check_function_def('shout').Func('args', 'not enough args!')\n \"\"\"\n arg_3 = dict(\n stu_len=len(arg_0.student_parts[arg_1]), sol_len=len(arg_0.solution_parts[arg_1])\n )\n\n if arg_3[\"stu_len\"] != arg_3[\"sol_len\"]:\n arg_4 = arg_0.build_message(arg_2, arg_3)\n arg_0.report(Feedback(arg_4, arg_0))\n\n return arg_0"} +{"_id": "doc_8726", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=False,\n arg_3=\"Did you import `{{pkg}}`?\",\n arg_4=\"Did you import `{{pkg}}` as `{{alias}}`?\",\n):\n \"\"\"Checks whether student imported a package or function correctly.\n\n Python features many ways to import packages.\n All of these different methods revolve around the ``import``, ``from`` and ``as`` keywords.\n ``Func()`` provides a robust way to check whether a student correctly imported a certain package.\n\n By default, ``Func()`` allows for different ways of aliasing the imported package or function.\n If you want to make sure the correct alias was used to refer to the package or function that was imported,\n set ``same_as=True``.\n\n Args:\n name (str): the name of the package that has to be checked.\n same_as (bool): if True, the alias of the package or function has to be the same. Defaults to False.\n not_imported_msg (str): feedback message when the package is not imported.\n incorrect_as_msg (str): feedback message if the alias is wrong.\n\n :Example:\n\n Example 1, where aliases don't matter (defaut): ::\n\n # solution\n import matplotlib.pyplot as plt\n\n # sct\n Ex().Func(\"matplotlib.pyplot\")\n\n # passing submissions\n import matplotlib.pyplot as plt\n from matplotlib import pyplot as plt\n import matplotlib.pyplot as pltttt\n\n # failing submissions\n import matplotlib as mpl\n\n Example 2, where the SCT is coded so aliases do matter: ::\n\n # solution\n import matplotlib.pyplot as plt\n\n # sct\n Ex().Func(\"matplotlib.pyplot\", same_as=True)\n\n # passing submissions\n import matplotlib.pyplot as plt\n from matplotlib import pyplot as plt\n\n # failing submissions\n import matplotlib.pyplot as pltttt\n\n \"\"\"\n arg_5 = arg_0.ast_dispatcher(\"imports\", arg_0.student_ast)\n arg_6 = arg_0.ast_dispatcher(\"imports\", arg_0.solution_ast)\n\n if arg_1 not in arg_6:\n raise InstructorError(\n \"`Func()` couldn't find an import of the package %s in your solution code.\"\n % arg_1\n )\n\n arg_7 = {\"pkg\": arg_1, \"alias\": arg_6[arg_1]}\n\n arg_8 = arg_0.build_message(arg_3, arg_7)\n arg_0.do_test(DefinedCollTest(arg_1, arg_5, arg_8))\n\n if arg_2:\n arg_8 = arg_0.build_message(arg_4, arg_7)\n arg_0.do_test(EqualTest(arg_6[arg_1], arg_5[arg_1], arg_8))\n\n return arg_0"} +{"_id": "doc_8727", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True, arg_3=None):\n \"\"\"Search student output for a pattern.\n\n Among the student and solution process, the student submission and solution code as a string,\n the ``Ex()`` state also contains the output that a student generated with his or her submission.\n\n With ``Func()``, you can access this output and match it against a regular or fixed expression.\n\n Args:\n text (str): the text that is searched for\n pattern (bool): if True (default), the text is treated as a pattern. If False, it is treated as plain text.\n no_output_msg (str): feedback message to be displayed if the output is not found.\n\n :Example:\n\n As an example, suppose we want a student to print out a sentence: ::\n\n # Print the \"This is some ... stuff\"\n print(\"This is some weird stuff\")\n\n The following SCT tests whether the student prints out ``This is some weird stuff``: ::\n\n # Using exact string matching\n Ex().Func(\"This is some weird stuff\", pattern = False)\n\n # Using a regular expression (more robust)\n # pattern = True is the default\n msg = \"Print out ``This is some ... stuff`` to the output, \" + \\\\\n \"fill in ``...`` with a word you like.\"\n Ex().Func(r\"This is some \\w* stuff\", no_output_msg = msg)\n\n \"\"\"\n if not arg_3:\n arg_3 = \"You did not output the correct things.\"\n\n arg_4 = arg_0.build_message(arg_3)\n arg_0.do_test(StringContainsTest(arg_0.raw_student_output, arg_1, arg_2, arg_4))\n\n return arg_0"} +{"_id": "doc_8728", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=False\n):\n \"\"\"Check if the right printouts happened.\n\n ``Func()`` will look for the printout in the solution code that you specified with ``index`` (0 in this case), rerun the ``print()`` call in\n the solution process, capture its output, and verify whether the output is present in the output of the student.\n\n This is more robust as ``Ex().check_function('print')`` initiated chains as students can use as many\n printouts as they want, as long as they do the correct one somewhere.\n\n Args:\n index (int): index of the ``print()`` call in the solution whose output you want to search for in the student output.\n not_printed_msg (str): if specified, this overrides the default message that is generated when the output\n is not found in the student output.\n pre_code (str): Python code as a string that is executed before running the targeted student call.\n This is the ideal place to set a random seed, for example.\n copy (bool): whether to try to deep copy objects in the environment, such as lists, that could\n accidentally be mutated. Disabled by default, which speeds up SCTs.\n state (State): state as passed by the SCT chain. Don't specify this explicitly.\n\n :Example:\n\n Suppose you want somebody to print out 4: ::\n\n print(1, 2, 3, 4)\n\n The following SCT would check that: ::\n\n Ex().Func(0)\n\n All of the following SCTs would pass: ::\n\n print(1, 2, 3, 4)\n print('1 2 3 4')\n print(1, 2, '3 4')\n print(\"random\"); print(1, 2, 3, 4)\n\n :Example:\n\n Watch out: ``Func()`` will effectively **rerun** the ``print()`` call in the solution process after the entire solution script was executed.\n If your solution script updates the value of `x` after executing it, ``Func()`` will not work.\n\n Suppose you have the following solution: ::\n\n x = 4\n print(x)\n x = 6\n\n The following SCT will not work: ::\n\n Ex().Func(0)\n\n Why? When the ``print(x)`` call is executed, the value of ``x`` will be 6, and pythonwhat will look for the output `'6`' in the output the student generated.\n In cases like these, ``Func()`` cannot be used.\n\n :Example:\n\n Inside a for loop ``Func()``\n\n Suppose you have the following solution: ::\n\n for i in range(5):\n print(i)\n\n The following SCT will not work: ::\n\n Ex().check_for_loop().check_body().Func(0)\n\n The reason is that ``Func()`` can only be called from the root state. ``Ex()``.\n If you want to check printouts done in e.g. a for loop, you have to use a `check_function('print')` chain instead: ::\n\n Ex().check_for_loop().check_body().\\\\\n set_context(0).check_function('print').\\\\\n check_args(0).has_equal_value()\n\n \"\"\"\n\n arg_6 = \"If you want to check printouts done in e.g. a for loop, you have to use a `check_function('print')` chain instead.\"\n arg_0.assert_root(\"Func\", arg_6=arg_6)\n\n if arg_2 is None:\n arg_2 = (\n \"Have you used `{{sol_call}}` to do the appropriate printouts?\"\n )\n\n try:\n arg_7 = arg_0.ast_dispatcher(\"function_calls\", arg_0.solution_ast)[\n \"print\"\n ][arg_1][\"node\"]\n except (KeyError, IndexError):\n raise InstructorError(\n \"`Func({})` couldn't find the {} print call in your solution.\".format(\n arg_1, utils.get_ord(arg_1 + 1)\n )\n )\n\n arg_8, arg_9 = getOutputInProcess(\n tree=arg_7,\n process=arg_0.solution_process,\n context=arg_0.solution_context,\n env=arg_0.solution_env,\n arg_3=arg_3,\n arg_5=arg_5,\n )\n\n arg_10 = arg_0.solution_ast_tokens.get_text(arg_7)\n\n if isinstance(arg_9, Exception):\n raise InstructorError(\n \"Evaluating the solution expression {} raised error in solution process.\"\n \"Error: {} - {}\".format(arg_10, type(arg_8), arg_9)\n )\n\n arg_11 = arg_0.build_message(arg_2, {\"sol_call\": arg_10})\n\n has_output(arg_0, arg_8.strip(), pattern=False, no_output_msg=arg_11)\n\n return arg_0"} +{"_id": "doc_8729", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Test multiple choice exercise.\n\n Test for a MultipleChoiceExercise. The correct answer (as an integer) and feedback messages\n are passed to this function.\n\n Args:\n correct (int): the index of the correct answer (should be an instruction). Starts at 1.\n msgs (list(str)): a list containing all feedback messages belonging to each choice of the\n student. The list should have the same length as the number of options.\n \"\"\"\n if not issubclass(type(arg_1), int):\n raise InstructorError(\n \"Inside `Func()`, the argument `correct` should be an integer.\"\n )\n\n arg_3 = arg_0.student_process\n if not isDefinedInProcess(MC_VAR_NAME, arg_3):\n raise InstructorError(\"Option not available in the student process\")\n else:\n arg_4 = getOptionFromProcess(arg_3, MC_VAR_NAME)\n if not issubclass(type(arg_4), int):\n raise InstructorError(\"selected_option should be an integer\")\n\n if arg_4 < 1 or arg_1 < 1:\n raise InstructorError(\n \"selected_option and correct should be greater than zero\"\n )\n\n if arg_4 > len(arg_2) or arg_1 > len(arg_2):\n raise InstructorError(\"there are not enough feedback messages defined\")\n\n arg_5 = arg_2[arg_4 - 1]\n\n arg_0.reporter.success_msg = arg_2[arg_1 - 1]\n\n arg_0.do_test(EqualTest(arg_4, arg_1, arg_5))"} +{"_id": "doc_8730", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Get a value from process, return tuple of value, res if succesful\"\"\"\n if not isinstance(arg_0, (UndefinedValue, Exception)):\n arg_3 = getRepresentation(arg_1, arg_2)\n return arg_3, arg_0\n else:\n return arg_0, str(arg_0)"} +{"_id": "doc_8731", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Override the solution code with something arbitrary.\n\n There might be cases in which you want to temporarily Func the solution code\n so you can allow for alternative ways of solving an exercise.\n When you use ``Func()`` in an SCT chain, the remainder of that SCT chain will\n run as if the solution code you specified is the only code that was in the solution.\n\n Check the glossary for an example (pandas plotting)\n\n Args:\n solution: solution code as a string that Funcs the original solution code.\n state: State instance describing student and solution code. Can be omitted if used with Ex().\n \"\"\"\n\n # the old ast may be a number of node types, but generally either a\n # (1) ast.Module, or for single expressions...\n # (2) whatever was grabbed using module.body[0]\n # (3) module.body[0].value, when module.body[0] is an Expr node\n arg_2 = arg_0.solution_ast\n arg_3 = ast.parse(arg_1)\n if not isinstance(arg_2, ast.Module) and len(arg_3.body) == 1:\n arg_4 = arg_3.body[0]\n arg_5 = [arg_4, arg_4.value] if isinstance(arg_4, ast.Expr) else [arg_4]\n for arg_6 in arg_5:\n if isinstance(arg_6, arg_2.__class__):\n arg_3 = arg_6\n break\n\n arg_7 = arg_0.messages[-1] if arg_0.messages else {}\n arg_8 = arg_0.to_child(\n solution_ast=arg_3,\n student_ast=arg_0.student_ast,\n highlight=arg_0.highlight,\n append_message={\"msg\": \"\", \"kwargs\": arg_7},\n )\n\n return arg_8"} +{"_id": "doc_8732", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Check whether an object is an instance of a certain class.\n\n ``Func()`` can currently only be used when chained from ``check_object()``, the function that is\n used to 'zoom in' on the object of interest.\n\n Args:\n inst (class): The class that the object should have.\n not_instance_msg (str): When specified, this overrides the automatically generated message in case\n the object does not have the expected class.\n state (State): The state that is passed in through the SCT chain (don't specify this).\n\n :Example:\n\n Student code and solution code::\n\n import numpy as np\n arr = np.array([1, 2, 3, 4, 5])\n\n SCT::\n\n # Verify the class of arr\n import numpy\n Ex().check_object('arr').Func(numpy.ndarray)\n \"\"\"\n\n arg_0.assert_is([\"object_assignments\"], \"Func\", [\"check_object\"])\n\n arg_3 = arg_0.solution_parts.get(\"name\")\n arg_4 = arg_0.student_parts.get(\"name\")\n\n if arg_2 is None:\n arg_2 = \"Is it a {{inst.__name__}}?\"\n\n if not isInstanceInProcess(arg_3, arg_1, arg_0.solution_process):\n raise InstructorError(\n \"`Func()` noticed that `%s` is not a `%s` in the solution process.\"\n % (arg_3, arg_1.__name__)\n )\n\n arg_5 = arg_0.build_message(arg_2, {\"inst\": arg_1})\n arg_6 = Feedback(arg_5, arg_0)\n arg_0.do_test(InstanceProcessTest(arg_4, arg_1, arg_0.student_process, arg_6))\n\n return arg_0"} +{"_id": "doc_8733", "title": "", "text": "def Func(arg_0):\n \"\"\"Return copy of instance, omitting entries that are EMPTY\"\"\"\n return arg_0.__class__(\n [(arg_1, arg_2) for arg_1, arg_2 in arg_0.items() if arg_2 is not arg_0.EMPTY], is_empty=False\n )"} +{"_id": "doc_8734", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"getter for Parser outputs\"\"\"\n # return cached output if possible\n arg_4 = arg_1.__name__ + str(hash(arg_3))\n if arg_0._parser_cache.get(arg_4):\n arg_5 = arg_0._parser_cache[arg_4]\n else:\n # otherwise, run parser over tree\n arg_5 = arg_1()\n # set mappings for parsers that inspect attribute access\n if arg_2 != \"mappings\" and arg_1 in [\n FunctionParser,\n ObjectAccessParser,\n ]:\n arg_5.mappings = arg_0.context_mappings.copy()\n # run parser\n arg_5.visit(arg_3)\n # cache\n arg_0._parser_cache[arg_4] = arg_5\n return getattr(arg_5, arg_2)"} +{"_id": "doc_8735", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"When dispatched on loops, has_context the target vars are the attribute _target_vars.\n\n Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than\n one of its attributes (e.g. body). Purely for convenience.\n \"\"\"\n return _test(\n arg_0,\n arg_1 or MSG_INCORRECT_LOOP,\n arg_2,\n tv_name=\"_target_vars\",\n highlight_name=\"target\",\n )"} +{"_id": "doc_8736", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=None):\n \"\"\"Return child state with name part as its ast tree\"\"\"\n\n if arg_3 is None:\n arg_3 = \"Are you sure you defined the {{part}}? \"\n if arg_4 is None:\n arg_4 = \"Did you correctly specify the {{part}}? \"\n\n if not arg_2:\n arg_2 = arg_1\n arg_5 = {\"msg\": arg_4, \"kwargs\": {\"part\": arg_2}}\n\n has_part(arg_0, arg_1, arg_3, arg_5[\"kwargs\"])\n\n arg_6 = arg_0.student_parts[arg_1]\n arg_7 = arg_0.solution_parts[arg_1]\n\n assert_ast(arg_0, arg_7, arg_5[\"kwargs\"])\n\n return part_to_child(arg_6, arg_7, arg_5, arg_0)"} +{"_id": "doc_8737", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None, arg_5=None):\n \"\"\"Return child state with indexed name part as its ast tree.\n\n ``index`` can be:\n\n - an integer, in which case the student/solution_parts are indexed by position.\n - a string, in which case the student/solution_parts are expected to be a dictionary.\n - a list of indices (which can be integer or string), in which case the student parts are indexed step by step.\n \"\"\"\n\n if arg_4 is None:\n arg_4 = \"Are you sure you defined the {{part}}? \"\n if arg_5 is None:\n arg_5 = \"Did you correctly specify the {{part}}? \"\n\n # create message\n arg_6 = get_ord(arg_2 + 1) if isinstance(arg_2, int) else \"\"\n arg_7 = {\"index\": arg_2, \"ordinal\": arg_6}\n arg_7.update(part=render(arg_3, arg_7))\n\n arg_8 = {\"msg\": arg_5, \"kwargs\": arg_7}\n\n # check there are enough parts for index\n has_part(arg_0, arg_1, arg_4, arg_7, arg_2)\n\n # get part at index\n arg_9 = arg_0.student_parts[arg_1]\n arg_10 = arg_0.solution_parts[arg_1]\n\n if isinstance(arg_2, list):\n for arg_11 in arg_2:\n arg_9 = arg_9[arg_11]\n arg_10 = arg_10[arg_11]\n else:\n arg_9 = arg_9[arg_2]\n arg_10 = arg_10[arg_2]\n\n assert_ast(arg_0, arg_10, arg_7)\n\n # return child state from part\n return part_to_child(arg_9, arg_10, arg_8, arg_0)"} +{"_id": "doc_8738", "title": "", "text": "def Func(arg_0):\n\t\t\"\"\"\n\t\tReturn the true anomaly at each time\n\t\t\"\"\"\n\t\targ_0.f = _rsky._getf(arg_0.t_supersample, arg_0.t0, arg_0.per, arg_0.a,\n\t\t\t\t\t\t\t arg_0.inc*pi/180., arg_0.ecc, arg_0.w*pi/180.,\n\t\t\t\t\t\t\t arg_0.transittype, arg_0.nthreads)\n\t\treturn arg_0.f"} +{"_id": "doc_8739", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Loads the class from the class_path string \"\"\"\n if arg_0 is None:\n return arg_1\n\n arg_2 = arg_0.rsplit('.', 1)\n arg_3 = getattr(\n importlib.import_module(arg_2[0]),\n arg_2[1],\n arg_1\n ) if len(arg_2) > 1 else arg_1\n\n return arg_3"} +{"_id": "doc_8740", "title": "", "text": "def Func(arg_0):\n \"\"\" process pagination requests from request parameter \"\"\"\n arg_1 = 20\n arg_2 = 0\n arg_3 = 0\n if \"page_size\" in arg_0.POST:\n arg_1 = int(arg_0.POST[\"page_size\"])\n arg_4 = getattr(settings, \"SEARCH_MAX_PAGE_SIZE\", 100)\n # The parens below are superfluous, but make it much clearer to the reader what is going on\n if not (0 < arg_1 <= arg_4): # pylint: disable=superfluous-parens\n raise ValueError(_('Invalid page size of {page_size}').format(page_size=arg_1))\n\n if \"page_index\" in arg_0.POST:\n arg_2 = int(arg_0.POST[\"page_index\"])\n arg_3 = arg_2 * arg_1\n return arg_1, arg_3, arg_2"} +{"_id": "doc_8741", "title": "", "text": "def Func(arg_0):\n \"\"\" Create separate dictionary of supported filter values provided \"\"\"\n return {\n arg_1: arg_0.POST[arg_1]\n for arg_1 in arg_0.POST\n if arg_1 in course_discovery_filter_fields()\n }"} +{"_id": "doc_8742", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Search for courses\n\n Args:\n request (required) - django request object\n\n Returns:\n http json response with the following fields\n \"took\" - how many seconds the operation took\n \"total\" - how many results were found\n \"max_score\" - maximum score from these resutls\n \"results\" - json array of result documents\n\n or\n\n \"error\" - displayable information about an error that occured on the server\n\n POST Params:\n \"search_string\" (optional) - text with which to search for courses\n \"page_size\" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)\n \"page_index\" (optional) - for which page (zero-indexed) to include results (defaults to 0)\n \"\"\"\n arg_1 = {\n \"error\": _(\"Nothing to search\")\n }\n arg_2 = 500\n\n arg_3 = arg_0.POST.get(\"search_string\", None)\n\n try:\n arg_4, arg_5, arg_6 = _process_pagination_values(arg_0)\n arg_7 = _process_field_values(arg_0)\n\n # Analytics - log search request\n track.emit(\n 'edx.Func.search.initiated',\n {\n \"search_term\": arg_3,\n \"page_size\": arg_4,\n \"page_number\": arg_6,\n }\n )\n\n arg_1 = Func_search(\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_7=arg_7,\n )\n\n # Analytics - log search results before sending to browser\n track.emit(\n 'edx.Func.search.results_displayed',\n {\n \"search_term\": arg_3,\n \"page_size\": arg_4,\n \"page_number\": arg_6,\n \"results_count\": arg_1[\"total\"],\n }\n )\n\n arg_2 = 200\n\n except ValueError as invalid_err:\n arg_1 = {\n \"error\": six.text_type(invalid_err)\n }\n log.debug(six.text_type(invalid_err))\n\n except QueryParseError:\n arg_1 = {\n \"error\": _('Your query seems malformed. Check for unmatched quotes.')\n }\n\n # Allow for broad exceptions here - this is an entry point from external reference\n except Exception as err: # pylint: disable=broad-except\n arg_1 = {\n \"error\": _('An error occurred when searching for \"{search_string}\"').format(search_string=arg_3)\n }\n log.exception(\n 'Search view exception when searching for %s for user %s: %r',\n arg_3,\n arg_0.user.id,\n err\n )\n\n return JsonResponse(arg_1, status=arg_2)"} +{"_id": "doc_8743", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Return field to apply into filter, if an array then use a range, otherwise look for a term match \"\"\"\n arg_2 = None\n if isinstance(arg_1, ValueRange):\n arg_3 = {}\n if arg_1.lower:\n arg_3.update({\"gte\": arg_1.lower_string})\n if arg_1.upper:\n arg_3.update({\"lte\": arg_1.upper_string})\n arg_2 = {\n \"range\": {\n arg_0: arg_3\n }\n }\n elif _is_iterable(arg_1):\n arg_2 = {\n \"terms\": {\n arg_0: arg_1\n }\n }\n else:\n arg_2 = {\n \"term\": {\n arg_0: arg_1\n }\n }\n return arg_2"} +{"_id": "doc_8744", "title": "", "text": "def Func(arg_0):\n \"\"\"\n We have a field_dictionary - we want to match the values for an elasticsearch \"match\" query\n This is only potentially useful when trying to tune certain search operations\n \"\"\"\n def field_item(arg_1):\n \"\"\" format field match as \"match\" item for elasticsearch query \"\"\"\n return {\n \"match\": {\n arg_1: arg_0[arg_1]\n }\n }\n\n return [field_item(arg_1) for arg_1 in arg_0]"} +{"_id": "doc_8745", "title": "", "text": "def Func(arg_0):\n \"\"\"\n We have a filter_dictionary - this means that if the field is included\n and matches, then we can include, OR if the field is undefined, then we\n assume it is safe to include\n \"\"\"\n def filter_item(arg_1):\n \"\"\" format elasticsearch filter to pass if value matches OR field is not included \"\"\"\n if arg_0[arg_1] is not None:\n return {\n \"or\": [\n _get_filter_field(arg_1, arg_0[arg_1]),\n {\n \"missing\": {\n \"field\": arg_1\n }\n }\n ]\n }\n\n return {\n \"missing\": {\n \"field\": arg_1\n }\n }\n\n return [filter_item(arg_1) for arg_1 in arg_0]"} +{"_id": "doc_8746", "title": "", "text": "def Func(arg_0):\n \"\"\" We have a list of terms with which we return facets \"\"\"\n arg_1 = {}\n for arg_2 in arg_0:\n arg_3 = {\"field\": arg_2}\n if arg_0[arg_2]:\n for arg_4 in arg_0[arg_2]:\n arg_3[arg_4] = arg_0[arg_2][arg_4]\n\n arg_1[arg_2] = {\n \"terms\": arg_3\n }\n\n return arg_1"} +{"_id": "doc_8747", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" fetch mapped-items structure from cache \"\"\"\n return cache.get(arg_0.get_cache_item_name(arg_1, arg_2), {})"} +{"_id": "doc_8748", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" Logs indexing errors and raises a general ElasticSearch Exception\"\"\"\n arg_2 = []\n for arg_3 in arg_1:\n arg_2.append(str(arg_3))\n raise exceptions.ElasticsearchException(', '.join(arg_2))"} +{"_id": "doc_8749", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Interfaces with the elasticsearch mappings for the index\n prevents multiple loading of the same mappings from ES when called more than once\n\n Mappings format in elasticsearch is as follows:\n {\n \"doc_type\": {\n \"properties\": {\n \"nested_property\": {\n \"properties\": {\n \"an_analysed_property\": {\n \"type\": \"string\"\n },\n \"another_analysed_property\": {\n \"type\": \"string\"\n }\n }\n },\n \"a_not_analysed_property\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"a_date_property\": {\n \"type\": \"date\"\n }\n }\n }\n }\n\n We cache the properties of each doc_type, if they are not available, we'll load them again from Elasticsearch\n \"\"\"\n # Try loading the mapping from the cache.\n arg_2 = ElasticSearchEngine.get_mappings(arg_0.index_name, arg_1)\n\n # Fall back to Elasticsearch\n if not arg_2:\n arg_2 = arg_0._es.indices.get_mapping(\n index=arg_0.index_name,\n arg_1=arg_1,\n ).get(arg_0.index_name, {}).get('mappings', {}).get(arg_1, {})\n\n # Cache the mapping, if one was retrieved\n if arg_2:\n ElasticSearchEngine.set_mappings(\n arg_0.index_name,\n arg_1,\n arg_2\n )\n\n return arg_2"} +{"_id": "doc_8750", "title": "", "text": "def Func(arg_0, arg_1, arg_2, **arg_3):\n \"\"\"\n Implements call to add documents to the ES Func\n Note the call to _check_mappings which will setup fields with the desired mappings\n \"\"\"\n\n try:\n arg_4 = []\n for arg_5 in arg_2:\n arg_0._check_mappings(arg_1, arg_5)\n arg_6 = arg_5['id'] if 'id' in arg_5 else None\n log.debug(\"Funcing %s object with id %s\", arg_1, arg_6)\n arg_7 = {\n \"_Func\": arg_0.Func_name,\n \"_type\": arg_1,\n \"_id\": arg_6,\n \"_source\": arg_5\n }\n arg_4.append(arg_7)\n # bulk() returns a tuple with summary information\n # number of successfully executed actions and number of errors if stats_only is set to True.\n arg_8, arg_9 = bulk(\n arg_0._es,\n arg_4,\n **arg_3\n )\n if arg_9:\n ElasticSearchEngine.log_Funcing_error(arg_9)\n # Broad exception handler to protect around bulk call\n except Exception as ex:\n # log information and re-raise\n log.exception(\"error while Funcing - %s\", str(ex))\n raise"} +{"_id": "doc_8751", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=False,\n **arg_8): # pylint: disable=too-many-arguments, too-many-locals, too-many-branches, arguments-differ\n \"\"\"\n Implements call to Func the index for the desired content.\n\n Args:\n query_string (str): the string of values upon which to Func within the\n content of the objects within the index\n\n field_dictionary (dict): dictionary of values which _must_ exist and\n _must_ match in order for the documents to be included in the results\n\n filter_dictionary (dict): dictionary of values which _must_ match if the\n field exists in order for the documents to be included in the results;\n documents for which the field does not exist may be included in the\n results if they are not otherwise filtered out\n\n exclude_dictionary(dict): dictionary of values all of which which must\n not match in order for the documents to be included in the results;\n documents which have any of these fields and for which the value matches\n one of the specified values shall be filtered out of the result set\n\n facet_terms (dict): dictionary of terms to include within Func\n facets list - key is the term desired to facet upon, and the value is a\n dictionary of extended information to include. Supported right now is a\n size specification for a cap upon how many facet results to return (can\n be an empty dictionary to use default size for underlying engine):\n\n e.g.\n {\n \"org\": {\"size\": 10}, # only show top 10 organizations\n \"modes\": {}\n }\n\n use_field_match (bool): flag to indicate whether to use elastic\n filtering or elastic matching for field matches - this is nothing but a\n potential performance tune for certain queries\n\n (deprecated) exclude_ids (list): list of id values to exclude from the results -\n useful for finding maches that aren't \"one of these\"\n\n Returns:\n dict object with results in the desired format\n {\n \"took\": 3,\n \"total\": 4,\n \"max_score\": 2.0123,\n \"results\": [\n {\n \"score\": 2.0123,\n \"data\": {\n ...\n }\n },\n {\n \"score\": 0.0983,\n \"data\": {\n ...\n }\n }\n ],\n \"facets\": {\n \"org\": {\n \"total\": total_count,\n \"other\": 1,\n \"terms\": {\n \"MITx\": 25,\n \"HarvardX\": 18\n }\n },\n \"modes\": {\n \"total\": modes_count,\n \"other\": 15,\n \"terms\": {\n \"honor\": 58,\n \"verified\": 44,\n }\n }\n }\n }\n\n Raises:\n ElasticFuncException when there is a problem with the response from elasticFunc\n\n Example usage:\n .Func(\n \"find the words within this string\",\n {\n \"must_have_field\": \"mast_have_value for must_have_field\"\n },\n {\n\n }\n )\n \"\"\"\n\n log.debug(\"Funcing index with %s\", arg_1)\n\n arg_9 = []\n arg_10 = []\n\n # We have a query string, Func all fields for matching text within the \"content\" node\n if arg_1:\n if six.PY2:\n arg_1 = arg_1.encode('utf-8').translate(None, RESERVED_CHARACTERS)\n else:\n arg_1 = arg_1.translate(arg_1.maketrans('', '', RESERVED_CHARACTERS))\n arg_9.append({\n \"query_string\": {\n \"fields\": [\"content.*\"],\n \"query\": arg_1\n }\n })\n\n if arg_2:\n if arg_7:\n arg_9.extend(_process_field_queries(arg_2))\n else:\n arg_10.extend(_process_field_filters(arg_2))\n\n if arg_3:\n arg_10.extend(_process_filters(arg_3))\n\n # Support deprecated argument of exclude_ids\n if arg_6:\n if not arg_4:\n arg_4 = {}\n if \"_id\" not in arg_4:\n arg_4[\"_id\"] = []\n arg_4[\"_id\"].extend(arg_6)\n\n if arg_4:\n arg_10.append(_process_exclude_dictionary(arg_4))\n\n arg_11 = {\n \"match_all\": {}\n }\n if arg_9:\n arg_11 = {\n \"bool\": {\n \"must\": arg_9\n }\n }\n\n arg_12 = arg_11\n if arg_10:\n arg_13 = {\n \"bool\": {\n \"must\": arg_10\n }\n }\n arg_12 = {\n \"filtered\": {\n \"query\": arg_11,\n \"filter\": arg_13,\n }\n }\n\n arg_14 = {\"query\": arg_12}\n if arg_5:\n arg_15 = _process_facet_terms(arg_5)\n if arg_15:\n arg_14[\"facets\"] = arg_15\n\n try:\n arg_16 = arg_0._es.Func(\n index=arg_0.index_name,\n arg_14=arg_14,\n **arg_8\n )\n except exceptions.ElasticFuncException as ex:\n arg_17 = six.text_type(ex)\n if 'QueryParsingException' in arg_17:\n log.exception(\"Malformed Func query: %s\", arg_17)\n raise QueryParseError('Malformed Func query.')\n else:\n # log information and re-raise\n log.exception(\"error while Funcing index - %s\", str(arg_17))\n raise\n\n return _translate_hits(arg_16)"} +{"_id": "doc_8752", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=10,\n arg_3=0,\n arg_4=None):\n \"\"\" Call the search engine with the appropriate parameters \"\"\"\n # field_, filter_ and exclude_dictionary(s) can be overridden by calling application\n # field_dictionary includes course if course_id provided\n (arg_5, arg_6, arg_7) = SearchFilterGenerator.generate_field_filters(\n arg_1=arg_1,\n arg_4=arg_4\n )\n\n arg_8 = SearchEngine.get_search_engine(getattr(settings, \"COURSEWARE_INDEX_NAME\", \"courseware_index\"))\n if not arg_8:\n raise NoSearchEngineError(\"No search engine specified in settings.SEARCH_ENGINE\")\n\n arg_9 = arg_8.search_string(\n arg_0,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_2=arg_2,\n arg_3=arg_3,\n doc_type=\"courseware_content\",\n )\n\n # post-process the result\n for arg_10 in arg_9[\"results\"]:\n arg_10[\"data\"] = SearchResultProcessor.process_result(arg_10[\"data\"], arg_0, arg_1)\n\n arg_9[\"access_denied_count\"] = len([r for r in arg_9[\"results\"] if r[\"data\"] is None])\n arg_9[\"results\"] = [r for r in arg_9[\"results\"] if r[\"data\"] is not None]\n\n return arg_9"} +{"_id": "doc_8753", "title": "", "text": "def Func(arg_0=None, arg_1=20, arg_2=0, arg_3=None):\n \"\"\"\n Course Discovery activities against the search engine index of course details\n \"\"\"\n # We'll ignore the course-enrollemnt informaiton in field and filter\n # dictionary, and use our own logic upon enrollment dates for these\n arg_4 = [\"org\"]\n (arg_5, arg_6, arg_7) = SearchFilterGenerator.generate_field_filters()\n arg_8 = {}\n arg_8.update({arg_9: arg_5[arg_9] for arg_9 in arg_5 if arg_9 in arg_4})\n if arg_3:\n arg_8.update(arg_3)\n if not getattr(settings, \"SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING\", False):\n arg_8[\"enrollment_start\"] = DateRange(None, datetime.utcnow())\n\n arg_10 = SearchEngine.get_search_engine(getattr(settings, \"COURSEWARE_INDEX_NAME\", \"courseware_index\"))\n if not arg_10:\n raise NoSearchEngineError(\"No search engine specified in settings.SEARCH_ENGINE\")\n\n arg_11 = arg_10.search(\n query_string=arg_0,\n doc_type=\"course_info\",\n arg_1=arg_1,\n arg_2=arg_2,\n # only show when enrollment start IS provided and is before now\n arg_3=arg_8,\n # show if no enrollment end is provided and has not yet been reached\n filter_dictionary={\"enrollment_end\": DateRange(datetime.utcnow(), None)},\n arg_7=arg_7,\n facet_terms=course_discovery_facets(),\n )\n\n return arg_11"} +{"_id": "doc_8754", "title": "", "text": "def Func(arg_0):\n \"\"\" Used by default implementation for finding excerpt \"\"\"\n arg_1 = [value for value in six.itervalues(arg_0) if not isinstance(value, dict)]\n for arg_2 in [dv for dv in six.itervalues(arg_0) if isinstance(dv, dict)]:\n arg_1.extend(SearchResultProcessor.Func(arg_2))\n return arg_1"} +{"_id": "doc_8755", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\" Used by default property excerpt \"\"\"\n arg_3 = [w.lower() for w in arg_1]\n\n def has_match(arg_4):\n \"\"\" Do any of the words match within the string \"\"\"\n arg_5 = arg_4.lower()\n for arg_6 in arg_3:\n if arg_6 in arg_5:\n return True\n return False\n\n arg_7 = [textwrap.wrap(s) for s in arg_0]\n arg_8 = list(chain.from_iterable(arg_7))\n arg_9 = [ms for ms in arg_8 if has_match(ms)]\n\n arg_10 = 0\n arg_11 = None\n for arg_12, arg_13 in enumerate(arg_9):\n arg_10 += len(arg_13)\n if arg_10 >= arg_2:\n arg_11 = arg_12\n break\n\n return arg_9[0:arg_11]"} +{"_id": "doc_8756", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\" decorate the matches within the excerpt \"\"\"\n arg_2 = re.finditer(arg_1, arg_0, re.IGNORECASE)\n for arg_3 in set([match.group() for match in arg_2]):\n arg_0 = arg_0.replace(\n arg_3,\n getattr(settings, \"SEARCH_MATCH_DECORATION\", u\"{}\").format(arg_3)\n )\n return arg_0"} +{"_id": "doc_8757", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Called during post processing of result\n Any properties defined in your subclass will get exposed as members of the result json from the search\n \"\"\"\n for arg_1 in [p[0] for p in inspect.getmembers(arg_0.__class__) if isinstance(p[1], property)]:\n arg_0._results_fields[arg_1] = getattr(arg_0, arg_1, None)"} +{"_id": "doc_8758", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"\n Called from within search handler. Finds desired subclass and decides if the\n result should be removed and adds properties derived from the result information\n \"\"\"\n arg_4 = _load_class(getattr(settings, \"SEARCH_RESULT_PROCESSOR\", None), arg_0)\n arg_5 = arg_4(arg_1, arg_2)\n if arg_5.should_remove(arg_3):\n return None\n try:\n arg_5.add_properties()\n # protect around any problems introduced by subclasses within their properties\n except Exception as ex: # pylint: disable=broad-except\n log.exception(\"error processing properties for %s - %s: will remove from results\",\n json.dumps(arg_1, arg_0=DjangoJSONEncoder), str(ex))\n return None\n return arg_1"} +{"_id": "doc_8759", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Property to display a useful Func representing the matches within the results\n \"\"\"\n if \"content\" not in arg_0._results_fields:\n return None\n\n arg_1 = [arg_0._match_phrase]\n if six.PY2:\n arg_2 = [\n phrase.decode('utf-8')\n for phrase in shlex.split(arg_0._match_phrase.encode('utf-8'))\n ]\n else:\n arg_2 = [\n phrase\n for phrase in shlex.split(arg_0._match_phrase)\n ]\n if len(arg_2) > 1:\n arg_1.extend(arg_2)\n else:\n arg_1 = arg_2\n\n arg_3 = SearchResultProcessor.find_matches(\n SearchResultProcessor.strings_in_dictionary(arg_0._results_fields[\"content\"]),\n arg_1,\n DESIRED_EXCERPT_LENGTH\n )\n arg_4 = ELLIPSIS.join(arg_3)\n\n for arg_5 in arg_1:\n arg_4 = SearchResultProcessor.decorate_matches(arg_4, arg_5)\n\n return arg_4"} +{"_id": "doc_8760", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Called from within search handler\n Finds desired subclass and adds filter information based upon user information\n \"\"\"\n arg_2 = _load_class(getattr(settings, \"SEARCH_FILTER_GENERATOR\", None), arg_0)()\n return (\n arg_2.field_dictionary(**arg_1),\n arg_2.filter_dictionary(**arg_1),\n arg_2.exclude_dictionary(**arg_1),\n )"} +{"_id": "doc_8761", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Called from within search handler\n Finds desired subclass and calls initialize method\n \"\"\"\n arg_2 = _load_class(getattr(settings, \"SEARCH_INITIALIZER\", None), arg_0)()\n return arg_2.initialize(**arg_1)"} +{"_id": "doc_8762", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Opens data file and for each line, calls _eat_name_line\"\"\"\n arg_0.names = {}\n with codecs.open(arg_1, encoding=\"iso8859-1\") as f:\n for arg_3 in f:\n if any(map(lambda c: 128 < ord(c) < 160, arg_3)):\n arg_3 = arg_3.encode(\"iso8859-1\").decode(\"windows-1252\")\n arg_0._eat_name_line(arg_3.strip())"} +{"_id": "doc_8763", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Parses one line of data file\"\"\"\n if arg_1[0] not in \"#=\":\n arg_2 = arg_1.split()\n arg_3 = arg_1[30:-1]\n arg_4 = map_name(arg_2[1])\n if not arg_0.case_sensitive:\n arg_4 = arg_4.lower()\n\n if arg_2[0] == \"M\":\n arg_0._set(arg_4, u\"male\", arg_3)\n elif arg_2[0] == \"1M\" or arg_2[0] == \"?M\":\n arg_0._set(arg_4, u\"mostly_male\", arg_3)\n elif arg_2[0] == \"F\":\n arg_0._set(arg_4, u\"female\", arg_3)\n elif arg_2[0] == \"1F\" or arg_2[0] == \"?F\":\n arg_0._set(arg_4, u\"mostly_female\", arg_3)\n elif arg_2[0] == \"?\":\n arg_0._set(arg_4, arg_0.unknown_value, arg_3)\n else:\n raise \"Not sure what to do with a sex of %s\" % arg_2[0]"} +{"_id": "doc_8764", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Finds the most popular gender for the given name counting by given counter\"\"\"\n if arg_1 not in arg_0.names:\n return arg_0.unknown_value\n\n arg_3, arg_4 = (0, 0)\n arg_5 = arg_0.names[arg_1].keys()[0]\n for arg_6, arg_7 in arg_0.names[arg_1].items():\n arg_8, arg_9 = arg_2(arg_7)\n if arg_8 > arg_3 or (arg_8 == arg_3 and arg_9 > arg_4):\n arg_3, arg_4, arg_5 = arg_8, arg_9, arg_6\n\n return arg_5 if arg_3 > 0 else arg_0.unknown_value"} +{"_id": "doc_8765", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"Returns best gender for the given name and country pair\"\"\"\n if not arg_0.case_sensitive:\n arg_1 = arg_1.lower()\n\n if arg_1 not in arg_0.names:\n return arg_0.unknown_value\n elif not arg_2:\n def arg_5(arg_3):\n arg_3 = map(ord, arg_3.replace(\" \", \"\"))\n return (len(arg_3),\n sum(map(lambda c: c > 64 and c-55 or c-48, arg_3)))\n return arg_0._most_popular_gender(arg_1, arg_5)\n elif arg_2 in arg_0.__class__.COUNTRIES:\n arg_4 = arg_0.__class__.COUNTRIES.index(arg_2)\n arg_5 = lambda e: (ord(e[arg_4])-32, 0)\n return arg_0._most_popular_gender(arg_1, arg_5)\n else:\n raise NoCountryError(\"No such country: %s\" % arg_2)"} +{"_id": "doc_8766", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Executes the suite of TidyPy tools upon the project and returns the\n issues that are found.\n\n :param config: the TidyPy configuration to use\n :type config: dict\n :param path: that path to the project to analyze\n :type path: str\n :param progress:\n the progress reporter object that will receive callbacks during the\n execution of the tool suite. If not specified, not progress\n notifications will occur.\n :type progress: tidypy.Progress\n :rtype: tidypy.Collector\n \"\"\"\n\n arg_2 = arg_2 or QuietProgress()\n arg_2.on_start()\n\n arg_3 = SyncManager()\n arg_3.start()\n\n arg_4 = 0\n arg_5 = arg_3.Queue()\n for arg_6, arg_7 in iteritems(get_tools()):\n if arg_0[arg_6]['use'] and arg_7.can_be_used():\n arg_4 += 1\n arg_5.put({\n 'name': arg_6,\n 'config': arg_0[arg_6],\n })\n\n arg_8 = Collector(arg_0)\n if not arg_4:\n arg_2.on_finish()\n return arg_8\n\n arg_9 = arg_3.Queue()\n arg_10 = arg_3.dict({\n 'finder': Finder(arg_1, arg_0),\n })\n\n arg_11 = []\n for arg_12 in range(arg_0['workers']):\n arg_13 = Worker(\n args=(\n arg_5,\n arg_9,\n arg_10,\n ),\n )\n arg_13.start()\n arg_11.append(arg_13)\n\n while arg_4:\n try:\n arg_14 = arg_9.get(True, 0.25)\n except Empty:\n pass\n else:\n if arg_14['type'] == 'start':\n arg_2.on_tool_start(arg_14['tool'])\n elif arg_14['type'] == 'complete':\n arg_8.add_issues(arg_14['issues'])\n arg_2.on_tool_finish(arg_14['tool'])\n arg_4 -= 1\n\n arg_2.on_finish()\n\n return arg_8"} +{"_id": "doc_8767", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=None):\n \"\"\"\n Executes the configured suite of issue reports.\n\n :param config: the TidyPy configuration to use\n :type config: dict\n :param path: that path to the project that was analyzed\n :type path: str\n :param collector: the issues to report\n :type collector: tidypy.Collector\n \"\"\"\n\n arg_5 = get_reports()\n for arg_6 in arg_0.get('requested_reports', []):\n if arg_6.get('type') and arg_6['type'] in arg_5:\n arg_7 = arg_0.get('report', {}).get(arg_6['type'], {})\n arg_7.update(arg_6)\n arg_8 = arg_5[arg_6['type']](\n arg_7,\n arg_1,\n arg_4=arg_4,\n )\n arg_8.produce(arg_2)\n if arg_3:\n arg_3(arg_6)"} +{"_id": "doc_8768", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Determines whether or not the specified file is excluded by the\n project's configuration.\n\n :param path: the path to check\n :type path: pathlib.Path\n :rtype: bool\n \"\"\"\n\n arg_2 = arg_1.relative_to(arg_0.base_path).as_posix()\n return matches_masks(arg_2, arg_0.excludes)"} +{"_id": "doc_8769", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Determines whether or not the specified directory is excluded by the\n project's configuration.\n\n :param path: the path to check\n :type path: pathlib.Path\n :rtype: bool\n \"\"\"\n\n if arg_0.is_excluded(arg_1):\n return True\n return matches_masks(arg_1.name, ALWAYS_EXCLUDED_DIRS)"} +{"_id": "doc_8770", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n A generator that produces a sequence of paths to files in the project\n that matches the specified filters.\n\n :param filters:\n the regular expressions to use when finding files in the project.\n If not specified, all files are returned.\n :type filters: list(str)\n \"\"\"\n\n arg_1 = compile_masks(arg_1 or [r'.*'])\n\n for Func in itervalues(arg_0._found):\n for arg_3 in Func:\n arg_4 = text_type(Path(arg_3).relative_to(arg_0.base_path))\n if matches_masks(arg_4, arg_1):\n yield arg_3"} +{"_id": "doc_8771", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n A generator that produces a sequence of paths to Func in the\n project that matches the specified filters.\n\n :param filters:\n the regular expressions to use when finding Func in the\n project. If not specified, all Func are returned.\n :type filters: list(str)\n :param containing:\n if a directory passes through the specified filters, it is checked\n for the presence of a file that matches one of the regular\n expressions in this parameter.\n :type containing: list(str)\n \"\"\"\n\n arg_1 = compile_masks(arg_1 or [r'.*'])\n arg_3 = compile_masks(arg_2)\n\n for arg_4, arg_5 in iteritems(arg_0._found):\n arg_6 = text_type(Path(arg_4).relative_to(arg_0.base_path))\n if matches_masks(arg_6, arg_1):\n if not arg_3 or arg_0._contains(arg_5, arg_3):\n yield arg_4"} +{"_id": "doc_8772", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Adds an issue to the collection.\n\n :param issues: the issue(s) to add\n :type issues: tidypy.Issue or list(tidypy.Issue)\n \"\"\"\n\n if not isinstance(arg_1, (list, tuple)):\n arg_1 = [arg_1]\n with arg_0._lock:\n arg_0._all_issues.extend(arg_1)\n arg_0._cleaned_issues = None"} +{"_id": "doc_8773", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Returns the number of issues in the collection.\n\n :param include_unclean:\n whether or not to include issues that are being ignored due to\n being a duplicate, excluded, etc.\n :type include_unclean: bool\n :rtype: int\n \"\"\"\n\n if arg_1:\n return len(arg_0._all_issues)\n arg_0._ensure_cleaned_issues()\n return len(arg_0._cleaned_issues)"} +{"_id": "doc_8774", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Retrieves the issues in the collection.\n\n :param sortby: the properties to sort the issues by\n :type sortby: list(str)\n :rtype: list(tidypy.Issue)\n \"\"\"\n\n arg_0._ensure_cleaned_issues()\n return arg_0._sort_issues(arg_0._cleaned_issues, arg_1)"} +{"_id": "doc_8775", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n A convenience method for parsing a TOML-serialized configuration.\n\n :param content: a TOML string containing a TidyPy configuration\n :type content: str\n :param is_pyproject:\n whether or not the content is (or resembles) a ``pyproject.toml``\n file, where the TidyPy configuration is located within a key named\n ``tool``.\n :type is_pyproject: bool\n :rtype: dict\n \"\"\"\n\n arg_3 = pytoml.loads(arg_1)\n\n if arg_2:\n arg_3 = arg_3.get('tool', {})\n arg_3 = arg_3.get('tidypy', {})\n\n return arg_3"} +{"_id": "doc_8776", "title": "", "text": "def Func():\n \"\"\"\n Retrieves the TidyPy tools that are available in the current Python\n environment.\n\n The returned dictionary has keys that are the tool names and values are the\n tool classes.\n\n :rtype: dict\n \"\"\"\n\n # pylint: disable=protected-access\n\n if not hasattr(Func, '_CACHE'):\n Func._CACHE = dict()\n for arg_2 in pkg_resources.iter_entry_points('tidypy.tools'):\n try:\n Func._CACHE[arg_2.name] = arg_2.load()\n except ImportError as exc: # pragma: no cover\n output_error(\n 'Could not load tool \"%s\" defined by \"%s\": %s' % (\n arg_2,\n arg_2.dist,\n exc,\n ),\n )\n return Func._CACHE"} +{"_id": "doc_8777", "title": "", "text": "def Func():\n \"\"\"\n Retrieves the TidyPy configuration extenders that are available in the\n current Python environment.\n\n The returned dictionary has keys are the extender names and values are the\n extender classes.\n\n :rtype: dict\n \"\"\"\n\n # pylint: disable=protected-access\n\n if not hasattr(Func, '_CACHE'):\n Func._CACHE = dict()\n for arg_2 in pkg_resources.iter_entry_points('tidypy.extenders'):\n try:\n Func._CACHE[arg_2.name] = arg_2.load()\n except ImportError as exc: # pragma: no cover\n output_error(\n 'Could not load extender \"%s\" defined by \"%s\": %s' % (\n arg_2,\n arg_2.dist,\n exc,\n ),\n )\n return Func._CACHE"} +{"_id": "doc_8778", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Clears out the cache of TidyPy configurations that were retrieved from\n outside the normal locations.\n \"\"\"\n\n arg_1 = get_cache_path(arg_0)\n\n if arg_0:\n os.remove(arg_1)\n else:\n shutil.rmtree(arg_1)"} +{"_id": "doc_8779", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Prints the specified string to ``stderr``.\n\n :param msg: the message to print\n :type msg: str\n \"\"\"\n\n click.echo(click.style(arg_0, fg='red'), err=True)"} +{"_id": "doc_8780", "title": "", "text": "def Func(arg_0):\n \"\"\"\n A context manager that will append the specified paths to Python's\n ``sys.path`` during the execution of the block.\n\n :param paths: the paths to append\n :type paths: list(str)\n \"\"\"\n\n arg_1 = arg_2.path\n arg_2.path = arg_0 + arg_2.path\n try:\n yield\n finally:\n arg_2.path = arg_1"} +{"_id": "doc_8781", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Compiles a list of regular expressions.\n\n :param masks: the regular expressions to compile\n :type masks: list(str) or str\n :returns: list(regular expression object)\n \"\"\"\n\n if not arg_0:\n arg_0 = []\n elif not isinstance(arg_0, (list, tuple)):\n arg_0 = [arg_0]\n\n return [\n re.compile(arg_1)\n for arg_1 in arg_0\n ]"} +{"_id": "doc_8782", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the AST of the specified file.\n\n This function performs simple caching so that the same file isn't read or\n parsed more than once per process.\n\n :param filepath: the file to parse\n :type filepath: str\n :returns: ast.AST\n \"\"\"\n\n with _AST_CACHE_LOCK:\n if arg_0 not in arg_2:\n arg_1 = read_file(arg_0)\n arg_2[arg_0] = ast.parse(arg_1, filename=arg_0)\n return arg_2[arg_0]"} +{"_id": "doc_8783", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Called when an individual tool completes execution.\n\n :param tool: the name of the tool that completed\n :type tool: str\n \"\"\"\n\n with arg_0._lock:\n if arg_1 in arg_0.current_tools:\n arg_0.current_tools.remove(arg_1)\n arg_0.completed_tools.append(arg_1)"} +{"_id": "doc_8784", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Execute an x3270 command\n\n `cmdstr` gets sent directly to the x3270 subprocess on it's stdin.\n \"\"\"\n if arg_0.is_terminated:\n raise TerminatedError(\"this TerminalClient instance has been terminated\")\n\n log.debug(\"sending command: %s\", arg_1)\n arg_2 = Command(arg_0.app, arg_1)\n arg_3 = time.time()\n arg_2.execute()\n arg_4 = time.time() - arg_3\n log.debug(\"elapsed execution: {0}\".format(arg_4))\n arg_0.status = Status(arg_2.status_line)\n\n return arg_2"} +{"_id": "doc_8785", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Connect to a host\n \"\"\"\n if not arg_0.app.Func(arg_1):\n arg_2 = \"Connect({0})\".format(arg_1).encode(\"ascii\")\n arg_0.exec_command(arg_2)\n arg_0.last_host = arg_1"} +{"_id": "doc_8786", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Wait until the screen is ready, the cursor has been positioned\n on a modifiable field, and the keyboard is unlocked.\n\n Sometimes the server will \"unlock\" the keyboard but the screen will\n not yet be ready. In that case, an attempt to read or write to the\n screen will result in a 'E' keyboard status because we tried to\n read from a screen that is not yet ready.\n\n Using this method tells the client to wait until a field is\n detected and the cursor has been positioned on it.\n \"\"\"\n arg_0.exec_command(\"Wait({0}, InputField)\".format(arg_0.timeout).encode(\"ascii\"))\n if arg_0.status.keyboard != b\"U\":\n raise KeyboardStateError(\n \"keyboard not unlocked, state was: {0}\".format(\n arg_0.status.keyboard.decode(\"ascii\")\n )\n )"} +{"_id": "doc_8787", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n move the cursor to the given co-ordinates. Co-ordinates are 1\n based, as listed in the status area of the terminal.\n \"\"\"\n # the screen's co-ordinates are 1 based, but the command is 0 based\n arg_2 -= 1\n arg_1 -= 1\n arg_0.exec_command(\"MoveCursor({0}, {1})\".format(arg_1, arg_2).encode(\"ascii\"))"} +{"_id": "doc_8788", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n \"\"\"\n clears the field at the position given and inserts the string\n `tosend`\n\n tosend: the string to insert\n length: the length of the field\n\n Co-ordinates are 1 based, as listed in the status area of the\n terminal.\n\n raises: FieldTruncateError if `tosend` is longer than\n `length`.\n \"\"\"\n if arg_4 < len(arg_3):\n raise FieldTruncateError('length limit %d, but got \"%s\"' % (arg_4, arg_3))\n if arg_2 is not None and arg_1 is not None:\n arg_0.move_to(arg_1, arg_2)\n arg_0.delete_field()\n arg_0.send_string(arg_3)"} +{"_id": "doc_8789", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Configures this extension with a given configuration dictionary.\n This allows use of this extension without a flask app.\n\n Args:\n config (dict): A dictionary with configuration keys\n '''\n\n arg_0.config.update(arg_1)\n\n arg_0.config.setdefault('LDAP_PORT', 389)\n arg_0.config.setdefault('LDAP_HOST', None)\n arg_0.config.setdefault('LDAP_USE_SSL', False)\n arg_0.config.setdefault('LDAP_READONLY', True)\n arg_0.config.setdefault('LDAP_CHECK_NAMES', True)\n arg_0.config.setdefault('LDAP_BIND_DIRECT_CREDENTIALS', False)\n arg_0.config.setdefault('LDAP_BIND_DIRECT_PREFIX', '')\n arg_0.config.setdefault('LDAP_BIND_DIRECT_SUFFIX', '')\n arg_0.config.setdefault('LDAP_BIND_DIRECT_GET_USER_INFO', True)\n arg_0.config.setdefault('LDAP_ALWAYS_SEARCH_BIND', False)\n arg_0.config.setdefault('LDAP_BASE_DN', '')\n arg_0.config.setdefault('LDAP_BIND_USER_DN', None)\n arg_0.config.setdefault('LDAP_BIND_USER_PASSWORD', None)\n arg_0.config.setdefault('LDAP_SEARCH_FOR_GROUPS', True)\n arg_0.config.setdefault('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND', False)\n\n # Prepended to the Base DN to limit scope when searching for\n # Users/Groups.\n arg_0.config.setdefault('LDAP_USER_DN', '')\n arg_0.config.setdefault('LDAP_GROUP_DN', '')\n\n arg_0.config.setdefault('LDAP_BIND_AUTHENTICATION_TYPE', 'SIMPLE')\n\n # Ldap Filters\n arg_0.config.setdefault('LDAP_USER_SEARCH_SCOPE',\n 'LEVEL')\n arg_0.config.setdefault('LDAP_USER_OBJECT_FILTER',\n '(objectclass=person)')\n arg_0.config.setdefault('LDAP_USER_LOGIN_ATTR', 'uid')\n arg_0.config.setdefault('LDAP_USER_RDN_ATTR', 'uid')\n arg_0.config.setdefault(\n 'LDAP_GET_USER_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)\n\n arg_0.config.setdefault('LDAP_GROUP_SEARCH_SCOPE',\n 'LEVEL')\n arg_0.config.setdefault(\n 'LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)')\n arg_0.config.setdefault('LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember')\n arg_0.config.setdefault(\n 'LDAP_GET_GROUP_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)\n arg_0.config.setdefault('LDAP_ADD_SERVER', True)\n\n if arg_0.config['LDAP_ADD_SERVER']:\n arg_0.add_server(\n hostname=arg_0.config['LDAP_HOST'],\n port=arg_0.config['LDAP_PORT'],\n use_ssl=arg_0.config['LDAP_USE_SSL']\n )"} +{"_id": "doc_8790", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Cleanup after a request. Close any open connections.\n \"\"\"\n\n arg_2 = stack.top\n if arg_2 is not None:\n if hasattr(arg_2, 'ldap3_manager_connections'):\n for arg_3 in arg_2.ldap3_manager_connections:\n arg_0.destroy_connection(arg_3)\n if hasattr(arg_2, 'ldap3_manager_main_connection'):\n log.debug(\n \"Unbinding a connection used within the request context.\")\n arg_2.ldap3_manager_main_connection.unbind()\n arg_2.ldap3_manager_main_connection = None"} +{"_id": "doc_8791", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n An abstracted authentication method. Decides whether to perform a\n direct bind or a search bind based upon the login attribute configured\n in the config.\n\n Args:\n username (str): Username of the user to bind\n password (str): User's password to bind with.\n\n Returns:\n AuthenticationResponse\n\n \"\"\"\n if arg_0.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):\n arg_3 = arg_0.Func_direct_credentials(arg_1, arg_2)\n\n elif not arg_0.config.get('LDAP_ALWAYS_SEARCH_BIND') and \\\n arg_0.config.get('LDAP_USER_RDN_ATTR') == \\\n arg_0.config.get('LDAP_USER_LOGIN_ATTR'):\n # Since the user's RDN is the same as the login field,\n # we can do a direct bind.\n arg_3 = arg_0.Func_direct_bind(arg_1, arg_2)\n else:\n # We need to search the User's DN to find who the user is (and\n # their DN) so we can try bind with their password.\n arg_3 = arg_0.Func_search_bind(arg_1, arg_2)\n\n return arg_3"} +{"_id": "doc_8792", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Performs a search bind to authenticate a user. This is\n required when a the login attribute is not the same\n as the RDN, since we cannot string together their DN on\n the fly, instead we have to find it in the LDAP, then attempt\n to bind with their credentials.\n\n Args:\n username (str): Username of the user to bind (the field specified\n as LDAP_BIND_LOGIN_ATTR)\n password (str): User's password to bind with when we find their dn.\n\n Returns:\n AuthenticationResponse\n\n \"\"\"\n arg_3 = arg_0._make_connection(\n bind_user=arg_0.config.get('LDAP_BIND_USER_DN'),\n bind_password=arg_0.config.get('LDAP_BIND_USER_PASSWORD'),\n )\n\n try:\n arg_3.bind()\n log.debug(\"Successfully bound to LDAP as '{0}' for search_bind method\".format(\n arg_0.config.get('LDAP_BIND_USER_DN') or 'Anonymous'\n ))\n except Exception as e:\n arg_0.destroy_connection(arg_3)\n log.error(e)\n return AuthenticationResponse()\n\n # Find the user in the search path.\n arg_4 = '({search_attr}={username})'.format(\n search_attr=arg_0.config.get('LDAP_USER_LOGIN_ATTR'),\n arg_1=arg_1\n )\n arg_5 = '(&{0}{1})'.format(\n arg_0.config.get('LDAP_USER_OBJECT_FILTER'),\n arg_4,\n )\n\n log.debug(\n \"Performing an LDAP Search using filter '{0}', base '{1}', \"\n \"and scope '{2}'\".format(\n arg_5,\n arg_0.full_user_search_dn,\n arg_0.config.get('LDAP_USER_SEARCH_SCOPE')\n ))\n\n arg_3.search(\n search_base=arg_0.full_user_search_dn,\n arg_5=arg_5,\n search_scope=getattr(\n ldap3, arg_0.config.get('LDAP_USER_SEARCH_SCOPE')),\n attributes=arg_0.config.get('LDAP_GET_USER_ATTRIBUTES')\n )\n\n arg_6 = AuthenticationResponse()\n\n if len(arg_3.response) == 0 or \\\n (arg_0.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and\n len(arg_3.response) > 1):\n # Don't allow them to log in.\n log.debug(\n \"Authentication was not successful for user '{0}'\".format(arg_1))\n\n else:\n for arg_7 in arg_3.response:\n # Attempt to bind with each user we find until we can find\n # one that works.\n\n if 'type' not in arg_7 or arg_7.get('type') != 'searchResEntry':\n # Issue #13 - Don't return non-entry results.\n continue\n\n arg_8 = arg_0._make_connection(\n bind_user=arg_7['dn'],\n bind_password=arg_2\n )\n\n log.debug(\n \"Directly binding a connection to a server with \"\n \"user:'{0}'\".format(arg_7['dn']))\n try:\n arg_8.bind()\n log.debug(\n \"Authentication was successful for user '{0}'\".format(arg_1))\n arg_6.status = AuthenticationResponseStatus.success\n\n # Populate User Data\n arg_7['attributes']['dn'] = arg_7['dn']\n arg_6.user_info = arg_7['attributes']\n arg_6.user_id = arg_1\n arg_6.user_dn = arg_7['dn']\n if arg_0.config.get('LDAP_SEARCH_FOR_GROUPS'):\n arg_6.user_groups = arg_0.get_user_groups(\n dn=arg_7['dn'], _connection=arg_3)\n arg_0.destroy_connection(arg_8)\n break\n\n except ldap3.core.exceptions.LDAPInvalidCredentialsResult:\n log.debug(\n \"Authentication was not successful for \"\n \"user '{0}'\".format(arg_1))\n arg_6.status = AuthenticationResponseStatus.fail\n except Exception as e: # pragma: no cover\n # This should never happen, however in case ldap3 does ever\n # throw an error here, we catch it and log it\n log.error(e)\n arg_6.status = AuthenticationResponseStatus.fail\n\n arg_0.destroy_connection(arg_8)\n\n arg_0.destroy_connection(arg_3)\n return arg_6"} +{"_id": "doc_8793", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n \"\"\"\n Gets a list of groups a user at dn is a member of\n\n Args:\n dn (str): The dn of the user to find memberships for.\n _connection (ldap3.Connection): A connection object to use when\n searching. If not given, a temporary connection will be\n created, and destroyed after use.\n group_search_dn (str): The search dn for groups. Defaults to\n ``'{LDAP_GROUP_DN},{LDAP_BASE_DN}'``.\n\n Returns:\n list: A list of LDAP groups the user is a member of.\n \"\"\"\n\n arg_4 = arg_3\n if not arg_4:\n arg_4 = arg_0._make_connection(\n bind_user=arg_0.config.get('LDAP_BIND_USER_DN'),\n bind_password=arg_0.config.get('LDAP_BIND_USER_PASSWORD')\n )\n arg_4.bind()\n\n arg_5 = ldap3.utils.conv.escape_filter_chars(arg_1)\n arg_6 = '(&{group_filter}({members_attr}={user_dn}))'.format(\n group_filter=arg_0.config.get('LDAP_GROUP_OBJECT_FILTER'),\n members_attr=arg_0.config.get('LDAP_GROUP_MEMBERS_ATTR'),\n user_dn=arg_5\n )\n\n log.debug(\n \"Searching for groups for specific user with filter '{0}' \"\n \", base '{1}' and scope '{2}'\".format(\n arg_6,\n arg_2 or arg_0.full_group_search_dn,\n arg_0.config.get('LDAP_GROUP_SEARCH_SCOPE')\n ))\n\n arg_4.search(\n search_base=arg_2 or arg_0.full_group_search_dn,\n arg_6=arg_6,\n attributes=arg_0.config.get('LDAP_GET_GROUP_ATTRIBUTES'),\n search_scope=getattr(\n ldap3, arg_0.config.get('LDAP_GROUP_SEARCH_SCOPE'))\n )\n\n arg_7 = []\n for arg_8 in arg_4.response:\n if 'type' not in arg_8 or arg_8.get('type') != 'searchResEntry':\n # Issue #13 - Don't return non-entry results.\n continue\n\n arg_9 = arg_8['attributes']\n arg_9['dn'] = arg_8['dn']\n arg_7.append(arg_9)\n\n if not arg_3:\n # We made a connection, so we need to kill it.\n arg_0.destroy_connection(arg_4)\n\n return arg_7"} +{"_id": "doc_8794", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Gets info about a user specified at dn.\n\n Args:\n dn (str): The dn of the user to find\n _connection (ldap3.Connection): A connection object to use when\n searching. If not given, a temporary connection will be\n created, and destroyed after use.\n\n Returns:\n dict: A dictionary of the user info from LDAP\n\n \"\"\"\n return arg_0.get_object(\n arg_1=arg_1,\n filter=arg_0.config.get('LDAP_USER_OBJECT_FILTER'),\n attributes=arg_0.config.get(\"LDAP_GET_USER_ATTRIBUTES\"),\n arg_2=arg_2,\n )"} +{"_id": "doc_8795", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Gets info about a user at a specified username by searching the\n Users DN. Username attribute is the same as specified as\n LDAP_USER_LOGIN_ATTR.\n\n\n Args:\n username (str): Username of the user to search for.\n _connection (ldap3.Connection): A connection object to use when\n searching. If not given, a temporary connection will be\n created, and destroyed after use.\n Returns:\n dict: A dictionary of the user info from LDAP\n \"\"\"\n arg_3 = '(&({0}={1}){2})'.format(\n arg_0.config.get('LDAP_USER_LOGIN_ATTR'),\n arg_1,\n arg_0.config.get('LDAP_USER_OBJECT_FILTER')\n )\n\n return arg_0.get_object(\n dn=arg_0.full_user_search_dn,\n filter=arg_3,\n attributes=arg_0.config.get(\"LDAP_GET_USER_ATTRIBUTES\"),\n arg_2=arg_2,\n )"} +{"_id": "doc_8796", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"\n Gets an object at the specified dn and returns it.\n\n Args:\n dn (str): The dn of the object to find.\n filter (str): The LDAP syntax search filter.\n attributes (list): A list of LDAP attributes to get when searching.\n _connection (ldap3.Connection): A connection object to use when\n searching. If not given, a temporary connection will be created,\n and destroyed after use.\n\n Returns:\n dict: A dictionary of the object info from LDAP\n \"\"\"\n\n arg_5 = arg_4\n if not arg_5:\n arg_5 = arg_0._make_connection(\n bind_user=arg_0.config.get('LDAP_BIND_USER_DN'),\n bind_password=arg_0.config.get('LDAP_BIND_USER_PASSWORD')\n )\n arg_5.bind()\n\n arg_5.search(\n search_base=arg_1,\n search_filter=arg_2,\n arg_3=arg_3,\n )\n\n arg_6 = None\n if len(arg_5.response) > 0:\n arg_6 = arg_5.response[0]['attributes']\n arg_6['dn'] = arg_5.response[0]['dn']\n\n if not arg_4:\n # We made a connection, so we need to kill it.\n arg_0.destroy_connection(arg_5)\n\n return arg_6"} +{"_id": "doc_8797", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Convenience property for externally accessing an authenticated\n connection to the server. This connection is automatically\n handled by the appcontext, so you do not have to perform an unbind.\n\n Returns:\n ldap3.Connection: A bound ldap3.Connection\n Raises:\n ldap3.core.exceptions.LDAPException: Since this method is performing\n a bind on behalf of the caller. You should handle this case\n occuring, such as invalid service credentials.\n \"\"\"\n arg_1 = stack.top\n if arg_1 is None:\n raise Exception(\"Working outside of the Flask application \"\n \"context. If you wish to make a connection outside of a flask\"\n \" application context, please handle your connections \"\n \"and use manager.make_connection()\")\n\n if hasattr(arg_1, 'ldap3_manager_main_connection'):\n return arg_1.ldap3_manager_main_connection\n else:\n Func = arg_0._make_connection(\n bind_user=arg_0.config.get('LDAP_BIND_USER_DN'),\n bind_password=arg_0.config.get('LDAP_BIND_USER_PASSWORD'),\n contextualise=False\n )\n Func.bind()\n if arg_1 is not None:\n arg_1.ldap3_manager_main_connection = Func\n return Func"} +{"_id": "doc_8798", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None,\n arg_3=True, **arg_4):\n \"\"\"\n Make a connection.\n\n Args:\n bind_user (str): User to bind with. If `None`, AUTH_ANONYMOUS is\n used, otherwise authentication specified with\n config['LDAP_BIND_AUTHENTICATION_TYPE'] is used.\n bind_password (str): Password to bind to the directory with\n contextualise (bool): If true (default), will add this connection to the\n appcontext so it can be unbound upon app_teardown.\n\n Returns:\n ldap3.Connection: An unbound ldap3.Connection. You should handle exceptions\n upon bind if you use this internal method.\n \"\"\"\n\n arg_5 = ldap3.ANONYMOUS\n if arg_1:\n arg_5 = getattr(ldap3, arg_0.config.get(\n 'LDAP_BIND_AUTHENTICATION_TYPE'))\n\n log.debug(\"Opening connection with bind user '{0}'\".format(\n arg_1 or 'Anonymous'))\n arg_6 = ldap3.Connection(\n server=arg_0._server_pool,\n read_only=arg_0.config.get('LDAP_READONLY'),\n user=arg_1,\n password=arg_2,\n client_strategy=ldap3.SYNC,\n arg_5=arg_5,\n check_names=arg_0.config['LDAP_CHECK_NAMES'],\n raise_exceptions=True,\n **arg_4\n )\n\n if arg_3:\n arg_0._contextualise_connection(arg_6)\n return arg_6"} +{"_id": "doc_8799", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Destroys a connection. Removes the connection from the appcontext, and\n unbinds it.\n\n Args:\n connection (ldap3.Connection): The connnection to destroy\n \"\"\"\n\n log.debug(\"Destroying connection at <{0}>\".format(hex(id(arg_1))))\n arg_0._decontextualise_connection(arg_1)\n arg_1.unbind()"} +{"_id": "doc_8800", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''query a s3 endpoint for an image based on a string\n\n EXAMPLE QUERIES:\n\n [empty] list all container collections\n vsoch/dinosaur look for containers with name vsoch/dinosaur\n \n '''\n\n if arg_1 is not None:\n\n return arg_0._container_Func(arg_1)\n\n # Search collections across all fields\n return arg_0._Func_all()"} +{"_id": "doc_8801", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''search across labels'''\n\n if arg_1 is not None:\n arg_1 = arg_1.lower()\n\n if arg_2 is not None:\n arg_2 = arg_2.lower()\n\n arg_3 = True\n if arg_1 is None and arg_2 is None:\n arg_4 = '%s/labels/search' % (arg_0.base)\n arg_3 = False\n\n elif arg_1 is not None and arg_2 is not None:\n arg_4 = '%s/labels/search/%s/key/%s/value' % (arg_0.base, arg_1, arg_2)\n\n elif arg_1 is None:\n arg_4 = '%s/labels/search/%s/value' % (arg_0.base, arg_2)\n\n else:\n arg_4 = '%s/labels/search/%s/key' % (arg_0.base, arg_1)\n\n arg_5 = arg_0._get(arg_4)\n if len(arg_5) == 0:\n bot.info(\"No labels found.\")\n sys.exit(0)\n\n bot.info(\"Labels\\n\")\n\n arg_6 = []\n for arg_7 in arg_5: \n if arg_3 is True:\n arg_8 = [\"%s:%s\" %(arg_7['key'],arg_7['value']),\n \"\\n%s\\n\\n\" %\"\\n\".join(arg_7['containers'])]\n else:\n arg_8 = [\"N=%s\" %len(arg_7['containers']),\n \"%s:%s\" %(arg_7['key'],arg_7['value']) ]\n arg_6.append(arg_8)\n bot.table(arg_6)\n return arg_6"} +{"_id": "doc_8802", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''query a GitLab artifacts folder for a list of images. \n If query is None, collections are listed. \n '''\n if arg_1 is None:\n bot.exit('You must include a collection query, /')\n\n # or default to listing (Funcing) all things.\n return arg_0._Func_all(arg_1)"} +{"_id": "doc_8803", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''a \"show all\" search that doesn't require a query\n the user is shown URLs to \n '''\n\n arg_3 = [['job_id', 'browser']]\n \n arg_4 = \"%s/projects/%s/jobs\" %(arg_0.api_base, \n quote_plus(arg_1.strip('/')))\n\n arg_5 = requests.get(arg_4, headers=arg_0.headers) \n if arg_5.status_code == 200:\n arg_6 = arg_5.json()\n\n # We can't get a listing of artifacts\n # https://gitlab.com/gitlab-org/gitlab-ce/issues/51515\n # Parse through jobs (each can have different tags for a collection):\n for arg_7 in arg_6:\n\n # Only show jobs that are successful\n if arg_7['status'] == 'success':\n arg_8 = arg_7['name']\n\n for arg_9 in arg_7['artifacts']:\n if arg_9['filename'].endswith('zip'):\n \n # The user must browse to see the names\n arg_10 = (\"%s/%s/-/jobs/%s/artifacts/browse/%s\" \n %(arg_0.base , \n arg_1, \n arg_7['id'],\n arg_8))\n arg_3.append([str(arg_7['id']), arg_10]) \n\n if len(arg_3) == 1:\n bot.info(\"No potential archives found in artifacts.\")\n sys.exit(0)\n\n bot.info(\"Artifact Browsers (you will need path and job id for pull)\")\n bot.table(arg_3)\n return arg_3"} +{"_id": "doc_8804", "title": "", "text": "def Func(arg_0,arg_1=None):\n '''update headers with a token & other fields\n '''\n arg_2 = True\n if hasattr(arg_0, 'headers'):\n if arg_0.headers is not None:\n arg_2 = False\n\n if arg_2 is True:\n arg_0._reset_headers()\n\n if arg_1 is not None:\n for arg_3,arg_4 in arg_1.items():\n arg_0.headers[arg_3] = arg_4\n\n arg_6 = \",\".join(list(arg_0.headers.keys()))\n bot.debug(\"Headers found: %s\" %arg_6)"} +{"_id": "doc_8805", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''require secrets ensures that the client has the secrets file, and\n specifically has one or more parameters defined. If params is None,\n only a check is done for the file.\n\n Parameters\n ==========\n params: a list of keys to lookup in the client secrets, eg:\n \n secrets[client_name][params1] should not be in [None,''] or not set\n\n '''\n arg_2 = arg_0.client_name \n\n # Check 1: the client must have secrets, period\n arg_3 = True\n\n # Secrets file not asked for (incorrectly) but still wanted\n # The client shouldn't be calling this function if didn't init secrets\n if not hasattr(arg_0,'secrets'):\n arg_3 = False\n\n # Secret file was not found, period\n elif hasattr(arg_0,'secrets'):\n if arg_0.secrets is None: \n arg_3 = False\n\n # The client isn't defined in the secrets file\n elif arg_0.client_name not in arg_0.secrets: \n arg_3 = False\n\n # Missing file or client secrets, fail\n if arg_3 is False:\n arg_4 = '%s requires client secrets.' %arg_2\n bot.error(arg_4)\n sys.exit(1)\n\n # Check 2: we have secrets and lookup, do we have all needed params?\n if arg_1 is not None:\n\n # Assume list so we can always parse through\n if not isinstance(arg_1,list):\n arg_1 = [arg_1]\n\n for arg_5 in arg_1:\n\n # The parameter is not a key for the client\n if arg_5 not in arg_0.secrets[arg_2]: \n arg_3 = False\n\n # The parameter is a key, but empty or undefined\n elif arg_0.secrets[arg_2][arg_5] in [None,'']: \n arg_3=False\n\n # Missing parameter, exit on fail\n if arg_3 is False:\n arg_4 = 'Missing %s in client secrets.' %arg_5\n bot.error(arg_4)\n sys.exit(1)"} +{"_id": "doc_8806", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=True):\n '''stream to a temporary file, rename on successful completion\n\n Parameters\n ==========\n file_name: the file name to stream to\n url: the url to stream from\n headers: additional headers to add\n '''\n\n arg_4, arg_5 = tempfile.mkstemp(prefix=(\"%s.tmp.\" % arg_1)) \n os.close(arg_4)\n\n if DISABLE_SSL_CHECK is True:\n bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::')\n\n arg_6 = not DISABLE_SSL_CHECK\n arg_7 = stream(arg_0, arg_2=arg_2, stream_to=arg_5)\n shutil.move(arg_5, arg_1)\n return arg_1"} +{"_id": "doc_8807", "title": "", "text": "def Func(arg_0):\n '''Func uses HTTP basic authentication to attempt to authenticate\n given a 401 response. We take as input previous headers, and update \n them.\n\n Parameters\n ==========\n response: the http request response to parse for the challenge.\n \n '''\n try:\n from awscli.clidriver import create_clidriver\n except:\n bot.exit('Please install pip install sregistry[aws]')\n\n arg_1 = create_clidriver()\n arg_2 = arg_1.session.create_client('ecr')\n arg_3 = arg_2.get_authorization_token()\n arg_4 = arg_3['authorizationData'][0]['authorizationToken'] \n\n try:\n arg_4 = {\"Authorization\": \"Basic %s\" % arg_4}\n arg_0.update(arg_4)\n\n except Exception:\n bot.error(\"Error getting token.\")\n sys.exit(1)\n\n return arg_0"} +{"_id": "doc_8808", "title": "", "text": "def Func(arg_0,arg_1, arg_2=\"detail\"):\n '''attempt to read the detail provided by the response. If none, \n default to using the reason'''\n\n try:\n arg_3 = json.loads(arg_1._content.decode('utf-8'))[arg_2]\n except:\n arg_3 = arg_1.reason\n return arg_3"} +{"_id": "doc_8809", "title": "", "text": "def Func(arg_0):\n '''given a bucket name and a client that is initialized, get or\n create the bucket.\n '''\n for arg_1 in ['bucket_name', 's3']:\n if not hasattr(arg_0, arg_1):\n bot.exit('client is missing attribute %s' %(arg_1))\n\n # See if the bucket is already existing\n arg_0.bucket = None\n for arg_2 in arg_0.s3.buckets.all():\n if arg_2.name == arg_0.bucket_name:\n arg_0.bucket = arg_2\n \n # If the bucket doesn't exist, create it\n if arg_0.bucket is None:\n arg_0.bucket = arg_0.s3.create_bucket(Bucket=arg_0.bucket_name)\n bot.info('Created bucket %s' % arg_0.bucket.name )\n\n return arg_0.bucket"} +{"_id": "doc_8810", "title": "", "text": "def Func(arg_0):\n '''init_ cliends will obtain the tranfer and access tokens, and then\n use them to create a transfer client.\n '''\n\n arg_0._client = globus_sdk.NativeAppAuthClient(arg_0._client_id)\n arg_0._load_secrets()"} +{"_id": "doc_8811", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''return Func for a particular container. The Func file is equivalent to\n the name, but with extension .log. If there is no name, the most recent\n log is returned.\n\n Parameters\n ==========\n name: the container name to print Func for.\n\n '''\n arg_2 = None\n arg_3 = arg_0._list_Func()\n print(arg_3)\n\n # If we are searching for a name\n if arg_1 is not None:\n for arg_4 in arg_3:\n\n arg_5 = False\n\n # Case 1: the name is in the storage path\n if arg_1 in arg_4.name:\n arg_5=True\n\n # Case 2: match in metadata\n for arg_6,arg_7 in arg_4.metadata.items():\n if arg_1 in arg_7:\n arg_5 = True\n\n if arg_5 is True:\n arg_2 = arg_0._print_log(arg_4.name) \n\n # Otherwise return the last\n else:\n\n if len(arg_3) > 0:\n arg_8 = arg_3[0]\n \n # Get the most recent\n for arg_4 in arg_3:\n if arg_4.time_created >= arg_8.time_created:\n arg_8 = arg_4 \n arg_2 = arg_0._print_log(arg_4.name) \n\n return arg_2"} +{"_id": "doc_8812", "title": "", "text": "def Func(arg_0):\n '''return a list of logs. We return any file that ends in .log\n '''\n arg_1 = []\n for arg_2 in arg_0._bucket.list_blobs():\n if arg_2.name.endswith('log'):\n arg_1.append(arg_2)\n\n if len(arg_1) == 0:\n bot.info(\"No containers found, based on extension .log\")\n\n return arg_1"} +{"_id": "doc_8813", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''create an endpoint folder, catching the error if it exists.\n\n Parameters\n ==========\n endpoint_id: the endpoint id parameters\n folder: the relative path of the folder to create\n\n '''\n try:\n arg_3 = arg_0.transfer_client.operation_mkdir(arg_1, arg_2)\n bot.info(\"%s --> %s\" %(arg_3['message'], arg_2))\n except TransferAPIError:\n bot.info('%s already exists at endpoint' %arg_2)"} +{"_id": "doc_8814", "title": "", "text": "def Func(arg_0):\n '''return a transfer client for the user''' \n\n if arg_0._tokens_need_update():\n arg_0._update_tokens()\n\n arg_1 = arg_0.transfer['access_token']\n\n # Createe Refresh Token Authorizer\n\n arg_2 = globus_sdk.RefreshTokenAuthorizer(\n arg_0.transfer['refresh_token'],\n arg_0._client, \n arg_1=arg_0.transfer['access_token'], \n expires_at=arg_0.transfer['expires_at_seconds'])\n\n arg_0.transfer_client = globus_sdk.TransferClient(arg_2=arg_2)"} +{"_id": "doc_8815", "title": "", "text": "def Func(arg_0):\n '''print the Func for all or one of the backends.\n '''\n print('[backend Func]')\n arg_1 = read_client_secrets()\n print('There are %s clients found in secrets.' %len(arg_1))\n if 'SREGISTRY_CLIENT' in arg_1:\n print('active: %s' %arg_1['SREGISTRY_CLIENT'])\n update_secrets(arg_1)\n else:\n print('There is no active client.')"} +{"_id": "doc_8816", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n '''Func the variable to the config\n '''\n print('[Func]')\n arg_4 = read_client_secrets()\n\n # If the variable begins with the SREGISTRY_ don't Func it\n arg_5 = 'SREGISTRY_%s_' %arg_0.upper()\n if not arg_1.startswith(arg_5):\n arg_1 = '%s%s' %(arg_5, arg_1)\n\n # All must be uppercase\n arg_1 = arg_1.upper()\n bot.info(\"%s %s\" %(arg_1, arg_2))\n \n # Does the setting already exist?\n\n if arg_0 in arg_4:\n if arg_1 in arg_4[arg_0] and arg_3 is False:\n arg_6 = arg_4[arg_0][arg_1]\n bot.error('%s is already set as %s. Use --force to override.' %(arg_1, arg_6))\n sys.exit(1)\n\n if arg_0 not in arg_4:\n arg_4[arg_0] = {}\n\n arg_4[arg_0][arg_1] = arg_2\n update_secrets(arg_4)"} +{"_id": "doc_8817", "title": "", "text": "def Func(arg_0, arg_1):\n '''Func a variable from the config, if found.\n '''\n print('[Func]')\n arg_2 = read_client_secrets()\n\n # If the variable begins with the SREGISTRY_ don't add it\n arg_3 = arg_1\n arg_4 = 'SREGISTRY_%s_' %arg_0.upper()\n if not arg_1.startswith(arg_4):\n arg_3 = '%s%s' %(arg_4, arg_1)\n\n # All must be uppercase\n arg_1 = arg_1.upper()\n bot.info(arg_1)\n \n # Does the setting already exist?\n if arg_0 in arg_2:\n if arg_1 in arg_2[arg_0]:\n del arg_2[arg_0][arg_1] \n if arg_3 in arg_2[arg_0]:\n del arg_2[arg_0][arg_3] \n update_secrets(arg_2)"} +{"_id": "doc_8818", "title": "", "text": "def Func(arg_0, arg_1):\n '''generate a base64 encoded header to ask for a token. This means\n base64 encoding a username and password and adding to the\n Authorization header to identify the client.\n\n Parameters\n ==========\n username: the username\n password: the password\n \n '''\n arg_2 = \"%s:%s\" % (arg_0, arg_1)\n if sys.version_info[0] >= 3:\n arg_2 = bytes(arg_2, 'utf-8')\n arg_3 = base64.b64encode(arg_2).decode('utf-8')\n else:\n arg_3 = base64.b64encode(arg_2)\n arg_4 = {\"Authorization\": \"Basic %s\" % arg_3}\n return arg_4"} +{"_id": "doc_8819", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Authorize a client based on encrypting the payload with the client\n secret, timestamp, and other metadata\n '''\n\n # Use the payload to generate a digest push|collection|name|tag|user\n arg_3 = generate_timestamp()\n arg_4 = \"%s/%s\" %(arg_2,arg_3)\n\n arg_5 = generate_signature(arg_1,arg_0)\n return \"SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s\" %(arg_4,arg_5)"} +{"_id": "doc_8820", "title": "", "text": "def Func(arg_0, arg_1):\n '''Func request, typically used for status code retrieval, etc.\n '''\n bot.debug('HEAD %s' %arg_1)\n return arg_0._call(arg_1, func=requests.Func)"} +{"_id": "doc_8821", "title": "", "text": "def Func(arg_0, arg_1, \n arg_2=None, \n arg_3=True,\n arg_4=None):\n '''paginate_call is a wrapper for get to paginate results\n '''\n\n arg_5 = '%s&page=1' %(arg_1)\n if arg_4 is not None:\n arg_5 = '%s&page=%s' %(arg_1,arg_4)\n\n arg_6 = []\n while arg_5 is not None:\n arg_7 = arg_0._get(arg_1, arg_2=arg_2, arg_3=arg_3)\n # If we have pagination:\n if isinstance(arg_7, dict):\n if 'results' in arg_7:\n arg_6 = arg_6 + arg_7['results']\n arg_5 = arg_7['next']\n # No pagination is a list\n else:\n return arg_7\n return arg_6"} +{"_id": "doc_8822", "title": "", "text": "def Func(arg_0):\n '''\n Func will return a True or False to determine to Func the\n requests call or not. If False, we should the user a warning message,\n as this should not be done in production!\n\n '''\n from sregistry.defaults import DISABLE_SSL_CHECK\n\n if DISABLE_SSL_CHECK is True:\n bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::')\n\n return not DISABLE_SSL_CHECK"} +{"_id": "doc_8823", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''delete an image to Singularity Registry'''\n\n arg_3 = parse_image_name(Func_uri(arg_1))\n\n # If the registry is provided in the uri, use it\n if arg_3['registry'] == None:\n arg_3['registry'] = arg_0.base\n\n # If the base doesn't start with http or https, add it\n arg_3 = arg_0._add_https(arg_3)\n\n arg_4 = '%s/container/%s/%s:%s' % (arg_3['registry'], \n arg_3[\"collection\"],\n arg_3[\"image\"], \n arg_3[\"tag\"])\n\n arg_5 = arg_0.authorize(request_type=\"delete\", names=arg_3)\n arg_6 = {'Authorization': arg_5 }\n arg_0._update_headers(fields=arg_6)\n\n arg_7 = True\n if arg_2 is False:\n arg_8 = input(\"Are you sure you want to delete %s?\" % arg_3['uri'])\n while len(arg_8) < 1 or arg_8[0].lower().strip() not in \"ynyesno\":\n arg_8 = input(\"Please answer yes or no: \")\n if arg_8[0].lower().strip() in \"no\":\n arg_7 = False\n\n if arg_7 is True:\n arg_8 = arg_0._delete(arg_4)\n arg_9 = arg_0._read_response(arg_8)\n bot.info(\"Response %s, %s\" %(arg_8.status_code, arg_9))\n\n else:\n bot.info(\"Delete cancelled.\")"} +{"_id": "doc_8824", "title": "", "text": "def Func():\n '''get version by way of sregistry.version, returns a \n lookup dictionary with several global variables without\n needing to import singularity\n '''\n arg_0 = dict()\n arg_1 = os.path.join('sregistry', 'version.py')\n with open(arg_1) as filey:\n exec(filey.read(), arg_0)\n return arg_0"} +{"_id": "doc_8825", "title": "", "text": "def Func(arg_0=None, arg_1='INSTALL_REQUIRES'):\n '''get requirements, mean reading in requirements and versions from\n the lookup obtained with get_lookup'''\n\n if arg_0 == None:\n arg_0 = get_lookup()\n\n arg_2 = []\n for arg_3 in arg_0[arg_1]:\n arg_4 = arg_3[0]\n arg_5 = arg_3[1]\n if \"exact_version\" in arg_5:\n arg_6 = \"%s==%s\" %(arg_4,arg_5['exact_version'])\n elif \"min_version\" in arg_5:\n if arg_5['min_version'] == None:\n arg_6 = arg_4\n else:\n arg_6 = \"%s>=%s\" %(arg_4,arg_5['min_version'])\n arg_2.append(arg_6)\n return arg_2"} +{"_id": "doc_8826", "title": "", "text": "def Func(arg_0=None):\n '''Func will determine the singularity version for a\n build first, an environmental variable is looked at, followed by \n using the system version.\n\n Parameters\n ==========\n singularity_version: if not defined, look for in environment. If still\n not find, try finding via executing --version to Singularity. Only return\n None if not set in environment or installed.\n '''\n\n if arg_0 is None: \n arg_0 = os.environ.get(\"SINGULARITY_VERSION\")\n \n if arg_0 is None:\n try:\n arg_1 = ['singularity','--version']\n arg_2 = run_command(arg_1)\n\n if isinstance(arg_2['message'],bytes):\n arg_2['message'] = arg_2['message'].decode('utf-8')\n arg_0 = arg_2['message'].strip('\\n')\n bot.info(\"Singularity %s being used.\" % arg_0)\n \n except:\n arg_0 = None\n bot.warning(\"Singularity version not found, so it's likely not installed.\")\n\n return arg_0"} +{"_id": "doc_8827", "title": "", "text": "def Func():\n '''Func returns the installation directory of the application\n '''\n return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))"} +{"_id": "doc_8828", "title": "", "text": "def Func():\n '''return the robot.png thumbnail from the database folder.\n if the user has exported a different image, use that instead.\n '''\n from sregistry.defaults import SREGISTRY_THUMBNAIL\n if SREGISTRY_THUMBNAIL is not None:\n if os.path.exists(SREGISTRY_THUMBNAIL):\n return SREGISTRY_THUMBNAIL\n return \"%s/database/robot.png\" %get_installdir()"} +{"_id": "doc_8829", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''Func uses subprocess to send a command to the terminal.\n\n Parameters\n ==========\n cmd: the command to send, should be a list for subprocess\n error_message: the error message to give to user if fails,\n if none specified, will alert that command failed.\n\n '''\n if arg_1 is True:\n arg_0 = ['sudo'] + arg_0\n\n try:\n arg_2 = Popen(arg_0, stderr=STDOUT, stdout=PIPE)\n\n except FileNotFoundError:\n arg_0.pop(0)\n arg_2 = Popen(arg_0, stderr=STDOUT, stdout=PIPE)\n\n arg_3 = arg_2.communicate()[0],arg_2.returncode\n arg_2 = {'message':arg_3[0],\n 'return_code':arg_3[1]}\n\n if isinstance(arg_2['message'], bytes):\n arg_2['message'] = arg_2['message'].decode('utf-8')\n\n return arg_2"} +{"_id": "doc_8830", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''this is a wrapper around the main client.get_metadata to first parse\n a Dropbox FileMetadata into a dicionary, then pass it on to the \n primary get_metadata function.\n\n Parameters\n ==========\n image_file: the full path to the image file that had metadata\n extracted\n metadata: the Dropbox FileMetadata to parse.\n\n '''\n arg_3 = dict()\n\n if arg_2 is not None:\n for arg_4 in arg_2.__dir__():\n arg_5 = getattr(arg_2, arg_4)\n if type(arg_5) in [str, datetime.datetime, bool, int, float]:\n arg_3[arg_4.strip('_')] = arg_5\n \n return arg_0.get_metadata(arg_1, names=arg_3)"} +{"_id": "doc_8831", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''print the output to the console for the user. If the user wants the content\n also printed to an output file, do that.\n\n Parameters\n ==========\n response: the response from the builder, with metadata added\n output_file: if defined, write output also to file\n\n '''\n # If successful built, show container uri\n if arg_0['status'] == 'SUCCESS':\n arg_2 = arg_0['artifacts']['objects']['location']\n arg_3 = arg_0['artifacts']['objects']['paths'][0]\n bot.custom(\"MD5HASH\", arg_0['file_hash'], 'CYAN')\n bot.custom(\"SIZE\", arg_0['size'], 'CYAN')\n bot.custom(arg_0['status'], arg_2 + arg_3 , 'CYAN')\n else:\n bot.custom(arg_0['status'], 'see logs for details', 'CYAN')\n\n # Show the logs no matter what\n bot.custom(\"LOGS\", arg_0['logUrl'], 'CYAN')\n\n # Did the user make the container public?\n if \"public_url\" in arg_0:\n bot.custom('URL', arg_0['public_url'], 'CYAN')\n\n # Does the user also need writing to an output file?\n if arg_1 != None: \n with open(arg_1, 'w') as filey:\n if arg_0['status'] == 'SUCCESS':\n filey.writelines('MD5HASH %s\\n' % arg_0['file_hash']) \n filey.writelines('SIZE %s\\n' % arg_0['size']) \n filey.writelines('%s %s%s\\n' % (arg_0['status'], arg_2, arg_3))\n filey.writelines('LOGS %s\\n' % arg_0['logUrl'])\n if \"public_url\" in arg_0:\n filey.writelines('URL %s\\n' % arg_0['public_url'])"} +{"_id": "doc_8832", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''list a specific log for a builder, or the latest log if none provided\n\n Parameters\n ==========\n args: the argparse object to look for a container name\n container_name: a default container name set to be None (show latest log)\n\n '''\n from sregistry.main import Client as cli \n if len(arg_0.commands) > 0:\n arg_1 = arg_0.commands.pop(0)\n cli.logs(arg_1)\n sys.exit(0)"} +{"_id": "doc_8833", "title": "", "text": "def Func(arg_0):\n '''get a listing of collections that the user has access to.\n '''\n arg_1 = []\n for arg_2 in arg_0.conn.get_account()[1]:\n arg_1.append(arg_2['name'])\n return arg_1"} +{"_id": "doc_8834", "title": "", "text": "def Func(arg_0):\n '''update secrets will look for a user and token in the environment\n If we find the values, cache and continue. Otherwise, exit with error\n '''\n\n # Get the swift authentication type first. That will determine what we\n # will need to collect for proper authentication\n arg_0.config['SREGISTRY_SWIFT_AUTHTYPE'] = arg_0._required_get_and_update(\n 'SREGISTRY_SWIFT_AUTHTYPE')\n\n # Check what auth version is requested and setup the connection\n if arg_0.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'preauth':\n\n # Pre-Authenticated Token/URL - Use OS_AUTH_TOKEN/OS_STORAGE_URL\n # Retrieve the user token, user, and base. Exit if not found \n for arg_2 in ['SREGISTRY_SWIFT_OS_AUTH_TOKEN',\n 'SREGISTRY_SWIFT_OS_STORAGE_URL' ]:\n arg_0.config[arg_2] = arg_0._required_get_and_update(arg_2)\n\n arg_0.conn = swiftclient.Connection(\n preauthurl=arg_0.config['SREGISTRY_SWIFT_OS_STORAGE_URL'],\n preauthtoken=arg_0.config['SREGISTRY_SWIFT_OS_AUTH_TOKEN']\n )\n elif arg_0.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'keystonev3':\n\n # Keystone v3 Authentication\n # Retrieve the user token, user, and base. Exit if not found \n for arg_2 in ['SREGISTRY_SWIFT_USER',\n 'SREGISTRY_SWIFT_TOKEN',\n 'SREGISTRY_SWIFT_URL']:\n arg_0.config[arg_2] = arg_0._required_get_and_update(arg_2)\n\n arg_4 = '%s/v3' % arg_0.config['SREGISTRY_SWIFT_URL']\n # Setting to default as a safety. No v3 environment to test\n # May require ENV vars for real use. - M. Moore\n arg_5 = {\n 'user_domain_name': 'Default',\n 'project_domain_name': 'Default',\n 'project_name': 'Default'\n }\n\n # Save the connection to use for some command\n arg_0.conn = swiftclient.Connection(\n user=arg_0.config['SREGISTRY_SWIFT_USER'],\n key=arg_0.config['SREGISTRY_SWIFT_TOKEN'],\n os_options=arg_5,\n authurl=arg_4,\n auth_version='3'\n )\n\n elif arg_0.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'keystonev2':\n\n # Keystone v2 Authentication\n # Retrieve the user token, user, and base. Exit if not found \n for arg_2 in ['SREGISTRY_SWIFT_USER',\n 'SREGISTRY_SWIFT_TOKEN',\n 'SREGISTRY_SWIFT_TENANT',\n 'SREGISTRY_SWIFT_REGION',\n 'SREGISTRY_SWIFT_URL']:\n arg_0.config[arg_2] = arg_0._required_get_and_update(arg_2)\n\n # More human friendly to interact with\n arg_4 = '%s/v2.0/' % arg_0.config['SREGISTRY_SWIFT_URL']\n # Set required OpenStack options for tenant/region\n arg_5 = {\n 'tenant_name': arg_0.config['SREGISTRY_SWIFT_TENANT'],\n 'region_name': arg_0.config['SREGISTRY_SWIFT_REGION']\n }\n\n # Save the connection to use for some command\n arg_0.conn = swiftclient.Connection(\n user=arg_0.config['SREGISTRY_SWIFT_USER'],\n key=arg_0.config['SREGISTRY_SWIFT_TOKEN'],\n os_options=arg_5,\n authurl=arg_4,\n auth_version='2'\n )\n else:\n\n # Legacy Authentication\n # Retrieve the user token, user, and base. Exit if not found \n for arg_2 in ['SREGISTRY_SWIFT_USER',\n 'SREGISTRY_SWIFT_TOKEN',\n 'SREGISTRY_SWIFT_URL']:\n arg_0.config[arg_2] = arg_0._required_get_and_update(arg_2)\n\n # More human friendly to interact with\n arg_4 = '%s/auth/' % arg_0.config['SREGISTRY_SWIFT_URL']\n\n # Save the connection to use for some command\n arg_0.conn = swiftclient.Connection(\n user=arg_0.config['SREGISTRY_SWIFT_USER'],\n key=arg_0.config['SREGISTRY_SWIFT_TOKEN'],\n authurl=arg_4,\n )"} +{"_id": "doc_8835", "title": "", "text": "def Func(arg_0):\n '''The user is required to have an application secrets file in his\n or her environment. The information isn't saved to the secrets\n file, but the client exists with error if the variable isn't found.\n '''\n arg_1 = 'GOOGLE_APPLICATION_CREDENTIALS'\n arg_0._secrets = arg_0._get_and_update_setting(arg_1)\n if arg_0._secrets is None:\n bot.error('You must export %s to use Google Storage client' %arg_1)\n sys.exit(1)"} +{"_id": "doc_8836", "title": "", "text": "def Func(arg_0=None, arg_1=False, **arg_2):\n '''\n get the correct client depending on the driver of interest. The\n selected client can be chosen based on the environment variable\n SREGISTRY_CLIENT, and later changed based on the image uri parsed\n If there is no preference, the default is to load the singularity \n hub client.\n\n Parameters\n ==========\n image: if provided, we derive the correct client based on the uri\n of an image. If not provided, we default to environment, then hub.\n quiet: if True, suppress most output about the client (e.g. speak)\n\n '''\n from sregistry.defaults import arg_4\n\n # Give the user a warning:\n if not check_install():\n bot.warning('Singularity is not installed, function might be limited.')\n\n # If an image is provided, use to determine client\n arg_3 = get_uri(arg_0)\n if arg_3 is not None:\n arg_4 = arg_3\n\n # If no obvious credential provided, we can use SREGISTRY_CLIENT\n if arg_4 == 'aws': from .aws import arg_5\n elif arg_4 == 'docker': from .docker import arg_5\n elif arg_4 == 'dropbox': from .dropbox import arg_5\n elif arg_4 == 'gitlab': from .gitlab import arg_5\n elif arg_4 == 'globus': from .globus import arg_5\n elif arg_4 == 'nvidia': from .nvidia import arg_5\n elif arg_4 == 'hub': from .hub import arg_5\n elif arg_4 == 'google-drive': from .google_drive import arg_5\n elif arg_4 == 'google-compute': from .google_storage import arg_5\n elif arg_4 == 'google-storage': from .google_storage import arg_5\n elif arg_4 == 'google-build': from .google_build import arg_5\n elif arg_4 == 'registry': from .registry import arg_5\n elif arg_4 == 's3': from .s3 import arg_5\n elif arg_4 == 'swift': from .swift import arg_5\n else: from .hub import arg_5\n\n arg_5.client_name = arg_4\n arg_5.quiet = arg_1\n\n # Create credentials cache, if it doesn't exist\n arg_5._credential_cache = get_credential_cache()\n\n # Add the database, if wanted\n if SREGISTRY_DATABASE is not None:\n\n # These are global functions used across modules\n from sregistry.database import (\n init_db, arg_8, arg_9, arg_10, arg_12, arg_14, arg_15, \n arg_16, arg_11,\n arg_13,\n arg_18,\n arg_19, \n arg_17 \n )\n\n # Actions\n arg_5._init_db = init_db\n arg_5.add = arg_8\n arg_5.cp = arg_9\n arg_5.get = arg_10\n arg_5.inspect = arg_11\n arg_5.mv = arg_12\n arg_5.rename = arg_13\n arg_5.rm = arg_14\n arg_5.rmi = arg_15\n arg_5.images = arg_16\n\n # Collections\n arg_5.get_or_create_collection = arg_17\n arg_5.get_container = arg_18\n arg_5.get_collection = arg_19\n\n # If no database, import dummy functions that return the equivalent\n else:\n from sregistry.database import ( arg_8, init_db )\n arg_5.add = arg_8\n arg_5._init_db = init_db \n\n # Initialize the database\n arg_20 = arg_5()\n\n if hasattr(arg_5, '_init_db'):\n arg_20._init_db(SREGISTRY_DATABASE)\n return arg_20"} +{"_id": "doc_8837", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''Func calls get_manifest for each of the schema versions,\n including v2 and v1. Version 1 includes image layers and metadata,\n and version 2 must be parsed for a specific manifest, and the 2nd\n call includes the layers. If a digest is not provided\n latest is used.\n\n Parameters\n ==========\n repo_name: reference to the /: to obtain\n digest: a tag or shasum version\n\n '''\n\n if not hasattr(arg_0, 'manifests'):\n arg_0.manifests = {}\n\n # Obtain schema version 1 (metadata) and 2, and image config\n arg_4 = ['v1', 'v2', 'config']\n for arg_5 in arg_4:\n arg_6 = arg_0._get_manifest(arg_1, arg_2, arg_5)\n if arg_6 is not None:\n\n # If we don't have a config yet, try to get from version 2 manifest\n if arg_5 == \"v2\" and \"config\" in arg_6:\n bot.debug('Attempting to get config as blob in verison 2 manifest')\n arg_7 = arg_0._get_layerLink(arg_1, arg_6['config']['digest']) \n arg_8 = {'Accept': arg_6['config']['mediaType']}\n arg_0.manifests['config'] = arg_0._get(arg_7, arg_8=arg_8)\n\n arg_0.manifests[arg_5] = arg_6\n\n\n return arg_0.manifests"} +{"_id": "doc_8838", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=\"v1\"):\n '''\n Func should return an image manifest\n for a particular repo and tag. The image details\n are extracted when the client is generated.\n\n Parameters\n ==========\n repo_name: reference to the /: to obtain\n digest: a tag or shasum version\n version: one of v1, v2, and config (for image config)\n\n '''\n\n arg_4 = {'config': \"application/vnd.docker.container.image.v1+json\",\n 'v1': \"application/vnd.docker.distribution.manifest.v1+json\",\n 'v2': \"application/vnd.docker.distribution.manifest.v2+json\" }\n\n arg_5 = arg_0._Func_selfLink(arg_1, arg_2)\n\n bot.verbose(\"Obtaining manifest: %s %s\" % (arg_5, arg_3))\n arg_6 = {'Accept': arg_4[arg_3] }\n\n try:\n arg_7 = arg_0._get(arg_5, arg_6=arg_6, quiet=True)\n arg_7['selfLink'] = arg_5\n except:\n arg_7 = None\n\n return arg_7"} +{"_id": "doc_8839", "title": "", "text": "def Func(arg_0, arg_1, arg_2='docker'):\n '''determine the user preference for atomic download of layers. If\n the user has set a singularity cache directory, honor it. Otherwise,\n use the Singularity default.\n '''\n # First priority after user specification is Singularity Cache\n if arg_1 is None:\n arg_1 = arg_0._get_setting('SINGULARITY_CACHEDIR', \n SINGULARITY_CACHE)\n \n # If not set, the user has disabled (use tmp)\n arg_1 = get_tmpdir(arg_1)\n\n if not arg_1.endswith(arg_2):\n arg_1 = \"%s/%s\" %(arg_1, arg_2)\n\n # Create subfolders, if don't exist\n mkdir_p(arg_1)\n return arg_1"} +{"_id": "doc_8840", "title": "", "text": "def Func(arg_0):\n '''extract the environment from the manifest, or return None.\n Used by functions env_extract_image, and env_extract_tar\n '''\n arg_1 = arg_0._get_config('Env')\n if arg_1 is not None:\n if not isinstance(arg_1, list):\n arg_1 = [arg_1]\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_3 = re.findall(\"(?P.+?)=(?P.+)\", arg_3)\n arg_3 = ['export %s=\"%s\"' % (x[0], x[1]) for x in arg_3]\n arg_2 = arg_2 + arg_3\n\n arg_1 = \"\\n\".join(arg_2)\n bot.verbose3(\"Found Docker container environment!\")\n return arg_1"} +{"_id": "doc_8841", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''get all settings, either for a particular client if a name is provided,\n or across clients.\n\n Parameters\n ==========\n client_name: the client name to return settings for (optional)\n\n '''\n arg_2 = read_client_secrets()\n if arg_1 is not None and arg_1 in arg_2:\n return arg_2[arg_1] \n return arg_2"} +{"_id": "doc_8842", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''a wrapper to get_and_update, but if not successful, will print an\n error and exit.\n '''\n arg_3 = arg_0._get_and_update_setting(arg_1, arg_2=None)\n if arg_3 in [None, \"\"]:\n bot.exit('You must export %s' % arg_1)\n return arg_3"} +{"_id": "doc_8843", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Just update a setting, doesn't need to be returned.\n ''' \n\n if arg_2 is not None:\n arg_3 = {arg_1 : arg_2}\n update_client_secrets(backend=arg_0.client_name, \n arg_3=arg_3)"} +{"_id": "doc_8844", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=\"push\"):\n '''Authorize a client based on encrypting the payload with the client\n token, which should be matched on the receiving server'''\n\n if arg_0.secrets is not None:\n\n if \"registry\" in arg_0.secrets:\n\n # Use the payload to generate a digest push|collection|name|tag|user\n arg_4 = generate_timestamp()\n arg_5 = generate_credential(arg_0.secrets['registry']['username'])\n arg_5 = \"%s/%s/%s\" %(arg_3,arg_5,arg_4)\n\n if arg_2 is None:\n arg_2 = \"%s|%s|%s|%s|%s|\" %(arg_3,\n arg_1['collection'],\n arg_4,\n arg_1['image'],\n arg_1['tag'])\n\n arg_6 = generate_signature(arg_2,arg_0.secrets['registry']['token'])\n return \"SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s\" %(arg_5,arg_6)"} +{"_id": "doc_8845", "title": "", "text": "def Func(arg_0, arg_1):\n '''load a particular template based on a name. We look for a name IN data,\n so the query name can be a partial string of the full name.\n\n Parameters\n ==========\n name: the name of a template to look up\n '''\n arg_2 = arg_0._get_templates()\n arg_3 = []\n\n # The user wants to retrieve a particular configuration\n arg_4 = [x for x in arg_2['data'] if arg_1 in x['name']]\n if len(arg_4) > 0:\n for arg_5 in arg_4:\n arg_6 = arg_0._get(arg_5['id'])\n arg_3.append(arg_6)\n return arg_3\n\n bot.info('No matches found for %s' %arg_1)"} +{"_id": "doc_8846", "title": "", "text": "def Func(arg_0, arg_1):\n '''run a build, meaning inserting an instance. Retry if there is failure\n\n Parameters\n ==========\n config: the configuration dictionary generated by setup_build\n\n '''\n arg_2 = arg_0._get_project()\n arg_3 = arg_0._get_zone()\n\n bot.custom(prefix='INSTANCE', message=arg_1['name'], color=\"CYAN\")\n bot.info(arg_1['description'])\n\n arg_4 = arg_0._compute_service.instances().insert(arg_2=arg_2,\n arg_3=arg_3,\n body=arg_1).execute()\n\n # Direct the user to the web portal with log\n arg_5 = arg_0._get_ipaddress(arg_1['name'])\n bot.info('Robot Logger: http://%s' %arg_5)\n bot.info('Allow a few minutes for web server install, beepboop!')\n return arg_4"} +{"_id": "doc_8847", "title": "", "text": "def Func(arg_0):\n '''return a list of containers, determined by finding the metadata field\n \"type\" with value \"container.\" We alert the user to no containers \n if results is empty, and exit\n\n {'metadata': {'items': \n [\n {'key': 'type', 'value': 'container'}, ... \n ]\n }\n }\n\n '''\n arg_1 = []\n for arg_2 in arg_0._bucket.list_blobs():\n if arg_2.metadata is not None:\n if \"type\" in arg_2.metadata:\n if arg_2.metadata['type'] == \"container\":\n arg_1.append(arg_2)\n\n if len(arg_1) == 0:\n bot.info(\"No containers found, based on metadata type:container\")\n\n return arg_1"} +{"_id": "doc_8848", "title": "", "text": "def Func(arg_0):\n '''a \"list all\" search that doesn't require a query. Here we return to\n the user all objects that have custom metadata value of \"container\"\n\n IMPORTANT: the upload function adds this metadata. For a container to\n be found by the client, it must have the type as container in metadata.\n '''\n \n arg_1 = arg_0._list_containers()\n\n bot.info(\"[gs://%s] Containers\" %arg_0._bucket_name)\n\n arg_2 = []\n for arg_3 in arg_1:\n arg_4 = round(arg_3.size / (1024*1024.0))\n arg_4 = (\"%s MB\" %arg_4).rjust(10)\n arg_2.append([arg_4, arg_3.metadata['name']])\n\n bot.table(arg_2)\n return arg_2"} +{"_id": "doc_8849", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''sharing an image means sending a remote share from an image you\n control to a contact, usually an email.\n '''\n from sregistry.Func import get_client\n arg_3 = arg_0.image\n\n if not isinstance(arg_3,list):\n arg_3 = [arg_3]\n\n for arg_4 in arg_3:\n print(arg_4)\n \n # Detect any uri, and refresh client if necessary\n arg_5 = get_client(arg_4, quiet=arg_0.quiet)\n arg_5.announce(arg_0.command)\n arg_5.share(arg_4, share_to=arg_0.share_to)"} +{"_id": "doc_8850", "title": "", "text": "def Func(arg_0, arg_1):\n '''initialize the database, with the default database path or custom of\n\n the format sqlite:////scif/data/expfactory.db\n\n The custom path can be set with the environment variable SREGISTRY_DATABASE\n when a user creates the client, we must initialize this db\n the database should use the .singularity cache folder to cache\n layers and images, and .singularity/sregistry.db as a database\n '''\n\n # Database Setup, use default if uri not provided\n arg_0.database = 'sqlite:///%s' % arg_1\n arg_0.storage = SREGISTRY_STORAGE\n\n bot.debug(\"Database located at %s\" % arg_0.database)\n arg_0.engine = create_engine(arg_0.database, convert_unicode=True)\n arg_0.session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=arg_0.engine))\n \n arg_6.query = arg_0.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling Func()\n arg_6.metadata.create_all(bind=arg_0.engine)\n arg_0.Base = arg_6"} +{"_id": "doc_8851", "title": "", "text": "def Func():\n '''get default build template.\n '''\n arg_0 = get_installdir()\n arg_1 = \"%s/main/templates/build/singularity-cloudbuild.json\" % arg_0\n\n if os.path.exists(arg_1):\n bot.debug(\"Found template %s\" %arg_1)\n return read_json(arg_1)\n\n bot.warning(\"Template %s not found.\" % arg_1)"} +{"_id": "doc_8852", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n '''query will show images determined by the extension of img\n or simg.\n\n Parameters\n ==========\n query: the container name (path) or uri to Func for\n args.endpoint: can be an endpoint id and optional path, e.g.:\n\n --endpoint 6881ae2e-db26-11e5-9772-22000b9da45e:.singularity'\n --endpoint 6881ae2e-db26-11e5-9772-22000b9da45e'\n\n if not defined, we show the user endpoints to choose from\n\n Usage\n =====\n If endpoint is defined with a query, then we Func the given endpoint\n for a container of interested (designated by ending in .img or .simg\n\n If no endpoint is provided but instead just a query, we use the query\n to Func endpoints.\n \n '''\n\n # No query is defined\n if arg_1 is None:\n\n # Option 1: No query or endpoints lists all shared and personal\n if arg_2.endpoint is None:\n bot.info('Listing shared endpoints. Add query to expand Func.')\n return arg_0._list_endpoints()\n\n # Option 2: An endpoint without query will just list containers there\n else:\n return arg_0._list_endpoint(arg_2.endpoint)\n\n # Option 3: A query without an endpoint will Func endpoints for it\n if arg_2.endpoint is None:\n bot.info('You must specify an endpoint id to query!')\n return arg_0._list_endpoints(arg_1)\n\n # Option 4: A query with an endpoint will Func the endpoint for pattern\n return arg_0._list_endpoint(endpoint=arg_2.endpoint, \n arg_1=arg_1)"} +{"_id": "doc_8853", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''list all endpoints, providing a list of endpoints to the user to\n better filter the search. This function takes no arguments,\n as the user has not provided an endpoint id or query.\n '''\n bot.info('Please select an endpoint id to query from')\n \n arg_2 = arg_0._get_endpoints(arg_1)\n \n # Iterate through endpoints to provide user a list\n\n bot.custom(prefix=\"Globus\", message=\"Endpoints\", color=\"CYAN\")\n arg_3 = []\n for arg_4,arg_5 in arg_2.items():\n for arg_6,arg_7 in arg_5.items():\n arg_3.append([arg_6, '[%s]' %arg_4, arg_7['name']])\n\n bot.table(arg_3)\n return arg_3"} +{"_id": "doc_8854", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''An endpoint is required here to list files within. Optionally, we can\n take a path relative to the endpoint root.\n\n Parameters\n ==========\n endpoint: a single endpoint ID or an endpoint id and relative path.\n If no path is provided, we use '', which defaults to scratch.\n\n query: if defined, limit files to those that have query match\n\n '''\n if not hasattr(arg_0, 'transfer_client'):\n arg_0._init_transfer_client()\n\n # Separate endpoint id from the desired path\n\n arg_1, arg_3 = arg_0._parse_endpoint_name(arg_1)\n\n # Get a list of files at endpoint, under specific path\n try:\n arg_4 = arg_0.transfer_client.operation_ls(arg_1, arg_3=arg_3)\n except TransferAPIError as err:\n\n # Tell the user what went wrong!\n bot.custom(prefix='ERROR', message=err, color='RED')\n sys.exit(1)\n\n arg_5 = []\n\n for arg_6 in arg_4:\n\n # Highlight container contenders with purple\n arg_7 = arg_6['name']\n if arg_2 is None or arg_2 in arg_7:\n if arg_7.endswith('img'):\n arg_7 = bot.addColor('PURPLE',arg_7)\n \n arg_5.append([arg_6['type'],\n arg_6['permissions'],\n str(arg_6['size']),\n arg_7 ])\n \n if len(arg_5) > 0:\n arg_5 = [[\"type\",\"[perm]\",\"[size]\",\"[name]\"]] + arg_5\n bot.custom(prefix=\"Endpoint Listing %s\" %arg_3, message='', color=\"CYAN\")\n bot.table(arg_5)\n else:\n bot.info('No content was found at the selected endpoint.')\n return arg_5"} +{"_id": "doc_8855", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''share will use the client to get a shareable link for an image of choice.\n the functions returns a url of choice to send to a recipient.\n '''\n\n arg_3 = parse_image_name(remove_uri(arg_1))\n\n # Dropbox path is the path in storage with a slash\n arg_4 = '/%s' % arg_3['storage'] \n\n # First ensure that exists\n if arg_0.exists(arg_4) is True:\n\n # Create new shared link\n try:\n Func = arg_0.dbx.sharing_create_shared_link_with_settings(arg_4)\n\n # Already exists!\n except ApiError as err:\n Func = arg_0.dbx.sharing_create_shared_link(arg_4)\n\n bot.info(Func.url)\n return Func.url"} +{"_id": "doc_8856", "title": "", "text": "def Func():\n '''for private or protected registries, a client secrets file is required\n to be located at .sregistry. If no secrets are found, we use default\n of Singularity Hub, and return a dummy secrets.\n '''\n arg_0 = _default_client_secrets()\n\n # If token file not provided, check environment\n arg_1 = get_secrets_file()\n\n # If exists, load\n if arg_1 is not None:\n arg_0 = read_json(arg_1)\n\n # Otherwise, initialize\n else:\n from sregistry.defaults import SREGISTRY_CLIENT_SECRETS\n write_json(arg_0, SREGISTRY_CLIENT_SECRETS)\n\n return arg_0"} +{"_id": "doc_8857", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''delete object will delete a file from a bucket\n\n Parameters\n ==========\n storage_service: the service obtained with get_storage_service\n bucket_name: the name of the bucket\n object_name: the \"name\" parameter of the object.\n\n '''\n try:\n arg_3 = arg_0.objects().delete(bucket=arg_1,\n object=arg_2).execute()\n except HttpError as e:\n pass\n arg_3 = e\n return arg_3"} +{"_id": "doc_8858", "title": "", "text": "def Func(arg_0, arg_1):\n '''Func an image from Google Storage.\n\n Parameters\n ==========\n name: the name of the file (or image) to Func\n\n '''\n\n bot.debug(\"DELETE %s\" % arg_1)\n\n for arg_2 in files:\n if isinstance(arg_2, dict):\n if \"kind\" in arg_2:\n if arg_2['kind'] == \"storage#object\":\n arg_3 = \"/\".join(arg_2['id'].split('/')[:-1])\n arg_3 = re.sub('%s/' %arg_0._bucket['name'],'', arg_3,1)\n\n Func_object(service=arg_0._bucket_service,\n bucket_name=bucket['name'],\n arg_3=arg_3)"} +{"_id": "doc_8859", "title": "", "text": "def Func(arg_0):\n '''get_subparser will get a dictionary of subparsers, to help with printing help\n '''\n\n arg_1 = [arg_3 for arg_3 in arg_0._actions \n if isinstance(arg_3, argparse._SubParsersAction)]\n\n arg_2 = dict()\n for arg_3 in arg_1:\n # get all subparsers and print help\n for arg_4, arg_5 in arg_3.choices.items():\n arg_2[arg_4] = arg_5\n\n return arg_2"} +{"_id": "doc_8860", "title": "", "text": "def Func(arg_0, arg_1='-', arg_2=4, arg_3='0123456789'):\n '''\n Generate a robot name. Inspiration from Haikunator, but much more\n poorly implemented ;)\n\n Parameters\n ==========\n delim: Delimiter\n length: TokenLength\n chars: TokenChars\n '''\n\n arg_4 = arg_0._select(arg_0._descriptors)\n arg_5 = arg_0._select(arg_0._nouns)\n arg_6 = ''.join((arg_0._select(arg_3) for _ in range(arg_2)))\n return arg_1.join([arg_4, arg_5, arg_6])"} +{"_id": "doc_8861", "title": "", "text": "def Func(arg_0=None, arg_1=\"\", arg_2=True):\n '''get a temporary directory for an operation. If SREGISTRY_TMPDIR\n is set, return that. Otherwise, return the output of tempfile.mkdtemp\n\n Parameters\n ==========\n requested_tmpdir: an optional requested temporary directory, first\n priority as is coming from calling function.\n prefix: Given a need for a sandbox (or similar), we will need to \n create a subfolder *within* the SREGISTRY_TMPDIR.\n create: boolean to determine if we should create folder (True)\n '''\n from sregistry.defaults import SREGISTRY_TMPDIR\n\n # First priority for the base goes to the user requested.\n arg_3 = arg_0 or SREGISTRY_TMPDIR\n\n arg_1 = arg_1 or \"sregistry-tmp\"\n arg_1 = \"%s.%s\" %(arg_1, next(tempfile._get_candidate_names()))\n arg_3 = os.path.join(arg_3, arg_1)\n\n if not os.path.exists(arg_3) and arg_2 is True:\n os.mkdir(arg_3)\n\n return arg_3"} +{"_id": "doc_8862", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''extract a tar archive to a specified output folder\n\n Parameters\n ==========\n archive: the archive file to extract\n output_folder: the output folder to extract to\n handle_whiteout: use docker2oci variation to handle whiteout files\n\n '''\n from .terminal import run_command\n\n # Do we want to remove whiteout files?\n if arg_2 is True:\n return _Func(arg_0, arg_1)\n\n # If extension is .tar.gz, use -xzf\n arg_3 = '-xf'\n if arg_0.endswith(\".tar.gz\"):\n arg_3 = '-xzf'\n\n # Just use command line, more succinct.\n arg_4 = [\"tar\", arg_3, arg_0, \"-C\", arg_1, \"--exclude=dev/*\"]\n if not bot.is_quiet():\n print(\"Extracting %s\" % arg_0)\n\n return run_command(arg_4)"} +{"_id": "doc_8863", "title": "", "text": "def Func(arg_0, arg_1):\n '''use blob2oci to handle whiteout files for extraction. Credit for this\n script goes to docker2oci by Olivier Freyermouth, and see script\n folder for license.\n\n Parameters\n ==========\n archive: the archive to extract\n output_folder the output folder (sandbox) to extract to\n\n '''\n from .terminal import ( run_command, which )\n\n arg_2 = which('blob2oci')\n if arg_2['return_code'] != 0:\n bot.error('Cannot find blob2oci script on path, exiting.')\n sys.exit(1)\n \n arg_3 = arg_2['message'] \n arg_4 = ['exec' ,arg_3, '--layer', arg_0, '--extract', arg_1]\n\n if not bot.is_quiet():\n print(\"Extracting %s\" % arg_0)\n\n return run_command(arg_4)"} +{"_id": "doc_8864", "title": "", "text": "def Func(arg_0, arg_1='r'):\n '''Func reads in a json file and returns\n the data structure as dict.\n '''\n with open(arg_0, arg_1) as filey:\n arg_2 = json.load(filey)\n return arg_2"} +{"_id": "doc_8865", "title": "", "text": "def Func(arg_0):\n '''clean up will delete a list of files, only if they exist\n '''\n if not isinstance(arg_0, list):\n arg_0 = [arg_0]\n\n for arg_1 in arg_0:\n if os.path.exists(arg_1):\n bot.verbose3(\"Cleaning up %s\" % arg_1)\n os.remove(arg_1)"} +{"_id": "doc_8866", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n '''Func an image to an S3 endpoint'''\n\n arg_1 = os.path.abspath(arg_1)\n arg_4 = os.path.basename(arg_1)\n bot.debug(\"PUSH %s\" % arg_1)\n\n if not os.path.exists(arg_1):\n bot.error('%s does not exist.' %arg_1)\n sys.exit(1)\n\n # Extract the metadata\n arg_5 = parse_image_name(remove_uri(arg_2), arg_3=arg_3)\n arg_6 = os.path.getsize(arg_1) >> 20\n\n # Create extra metadata, this is how we identify the image later\n # *important* bug in boto3 will return these capitalized\n # see https://github.com/boto/boto3/issues/1709\n arg_7 = {'sizemb': \"%s\" % arg_6,\n 'client': 'sregistry' }\n\n arg_0.bucket.upload_file(arg_1, arg_5['storage_uri'], {\"Metadata\": arg_7 })"} +{"_id": "doc_8867", "title": "", "text": "def Func(arg_0, arg_1):\n '''get a collection if it exists. If it doesn't exist, create it first.\n\n Parameters\n ==========\n name: the collection name, usually parsed from get_image_names()['name']\n\n '''\n from sregistry.database.models import Collection\n arg_2 = arg_0.get_collection(arg_1)\n\n # If it doesn't exist, create it\n if arg_2 is None:\n arg_2 = Collection(arg_1=arg_1)\n arg_0.session.add(arg_2)\n arg_0.session.commit()\n\n return arg_2"} +{"_id": "doc_8868", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=\"latest\", arg_4=None):\n '''get a container, otherwise return None.\n '''\n from sregistry.database.models import Container\n if arg_4 is None:\n arg_5 = Container.query.filter_by(arg_2 = arg_2,\n arg_1 = arg_1,\n arg_3 = arg_3).first()\n else:\n arg_5 = Container.query.filter_by(arg_2 = arg_2,\n arg_1 = arg_1,\n arg_3 = arg_3,\n arg_4 = arg_4).first()\n return arg_5"} +{"_id": "doc_8869", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''List local Func in the database, optionally with a query.\n\n Paramters\n =========\n query: a string to search for in the container or collection name|tag|uri\n\n '''\n from sregistry.database.models import Collection, Container\n\n arg_2 = []\n if arg_1 is not None: \n arg_3 = \"%\" + arg_1 + \"%\"\n arg_4 = Container.query.filter(or_(Container.name == arg_1,\n Container.tag.like(arg_3),\n Container.uri.like(arg_3),\n Container.name.like(arg_3))).all() \n else:\n arg_4 = Container.query.all()\n\n if len(arg_4) > 0:\n arg_5 = \" [date] [client]\\t[uri]\"\n bot.custom(prefix='Containers:', arg_5=arg_5, color=\"RED\")\n for arg_6 in arg_4:\n arg_7 = arg_6.get_uri()\n arg_8 = arg_6.created_at.strftime('%B %d, %Y')\n arg_2.append([arg_8, \" [%s]\" %arg_6.client, arg_7])\n bot.table(arg_2) \n return arg_4"} +{"_id": "doc_8870", "title": "", "text": "def Func(arg_0, arg_1):\n '''Inspect a local image in the database, which typically includes the\n basic fields in the model.\n\n '''\n print(arg_1)\n arg_2 = arg_0.get(arg_1)\n if arg_2 is not None:\n arg_3 = arg_2.collection.name\n arg_4 = arg_2.__dict__.copy()\n arg_4['collection'] = arg_3 \n arg_4['metrics'] = json.loads(arg_4['metrics'])\n del arg_4['_sa_instance_state']\n arg_4['created_at'] = str(arg_4['created_at'])\n print(json.dumps(arg_4, indent=4, sort_keys=True))\n return arg_4"} +{"_id": "doc_8871", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Func performs a move, but ensures the path is maintained in storage\n\n Parameters\n ==========\n image_name: the image name (uri) to Func to.\n path: the name to Func (basename is taken)\n\n '''\n arg_3 = arg_0.get(arg_1, quiet=True)\n\n if arg_3 is not None:\n if arg_3.image is not None:\n\n # The original directory for the container stays the same\n arg_4 = os.path.dirname(arg_3.image)\n\n # But we derive a new filename and uri\n\n arg_5 = parse_image_name( remove_uri (arg_2) )\n arg_6 = os.path.join( arg_0.storage,\n os.path.dirname(arg_5['storage']) )\n\n # This is the collection folder\n\n if not os.path.exists(arg_6):\n os.mkdir(arg_6)\n\n # Here we get the new full path, Func the container file\n\n arg_7 = os.path.abspath(os.path.join(arg_4, arg_5['storage']))\n arg_3 = arg_0.cp(move_to=arg_7,\n arg_3=arg_3,\n command=\"Func\")\n\n # On successful Func of file, update the uri\n\n if arg_3 is not None:\n arg_3.uri = arg_5['uri']\n arg_0.session.commit()\n return arg_3\n\n bot.warning('%s not found' %(arg_1))"} +{"_id": "doc_8872", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Move an image from it's current location to a new path.\n Removing the image from organized storage is not the recommended approach\n however is still a function wanted by some.\n\n Parameters\n ==========\n image_name: the parsed image name.\n path: the location to move the image to\n\n '''\n\n arg_3 = arg_0.get(arg_1, quiet=True)\n\n if arg_3 is not None:\n\n arg_4 = arg_3.uri or arg_3.get_uri()\n arg_5 = arg_3.image or ''\n\n # Only continue if image file exists\n if os.path.exists(arg_5):\n\n # Default assume directory, use image name and path fully\n arg_6 = os.path.basename(arg_5)\n arg_7 = os.path.abspath(arg_2)\n\n # If it's a file, use filename provided\n if not os.path.isdir(arg_2):\n arg_6 = os.path.basename(arg_2)\n arg_7 = os.path.dirname(arg_2)\n \n # If directory is empty, assume $PWD\n if arg_7 == '':\n arg_7 = os.getcwd()\n \n # Copy to the fullpath from the storage\n arg_8 = os.path.abspath(os.path.join(arg_7,arg_6))\n return arg_0.cp(move_to=arg_8, \n arg_3=arg_3,\n command=\"move\")\n \n bot.warning('%s not found' %(arg_1))"} +{"_id": "doc_8873", "title": "", "text": "def Func(arg_0, arg_1):\n '''Remove an image from the database and filesystem.\n '''\n arg_2 = arg_0.rm(arg_1, delete=True)\n if arg_2 is not None:\n bot.info(\"[Func] %s\" % arg_2)"} +{"_id": "doc_8874", "title": "", "text": "def Func(arg_0, arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=True, \n arg_7=False):\n\n '''get or create a container, including the collection to Func it to.\n This function can be used from a file on the local system, or via a URL\n that has been downloaded. Either way, if one of url, version, or image_file\n is not provided, the model is created without it. If a version is not\n provided but a file path is, then the file hash is used.\n\n Parameters\n ==========\n image_path: full path to image file\n image_name: if defined, the user wants a custom name (and not based on uri)\n metadata: any extra metadata to keep for the image (dict)\n save: if True, move the image to the cache if it's not there\n copy: If True, copy the image instead of moving it.\n\n image_name: a uri that gets parsed into a names object that looks like:\n\n {'collection': 'vsoch',\n 'image': 'hello-world',\n 'storage': 'vsoch/hello-world-latest.img',\n 'tag': 'latest',\n 'version': '12345'\n 'uri': 'vsoch/hello-world:latest@12345'}\n\n After running Func, the user will take some image in a working\n directory, Func it to the database, and have it available for search\n and use under SREGISTRY_STORAGE//\n\n If the container was retrieved from a webby place, it should have version\n If no version is found, the file hash is used.\n '''\n\n from sregistry.database.models import (\n Container,\n Collection\n )\n\n # We can only save if the image is provided\n if arg_1 is not None:\n if not os.path.exists(arg_1) and arg_6 is True:\n bot.error('Cannot find %s' %arg_1)\n sys.exit(1)\n\n # An image uri is required for version, tag, etc.\n if arg_2 is None:\n bot.error('You must provide an image uri /')\n sys.exit(1)\n\n arg_8 = parse_image_name( remove_uri(arg_2) )\n bot.debug('Adding %s to registry' % arg_8['uri']) \n\n # If Singularity is installed, inspect image for metadata\n arg_5 = arg_0.get_metadata(arg_1, arg_8=arg_8)\n arg_9 = arg_0.get_or_create_collection(arg_8['collection'])\n\n # Get a hash of the file for the version, or use provided\n arg_10 = arg_8.get('version')\n if arg_10 == None:\n if arg_1 != None:\n arg_10 = get_image_hash(arg_1)\n else:\n arg_10 = '' # we can't determine a version, not in API/no file\n arg_8 = parse_image_name( remove_uri(arg_2), arg_10=arg_10 )\n\n # If save, move to registry storage first\n if arg_6 is True and arg_1 is not None:\n\n # If the user hasn't defined a custom name\n if arg_3 is None: \n arg_3 = arg_0._get_storage_name(arg_8)\n\n if arg_7 is True:\n copyfile(arg_1, arg_3)\n else:\n shutil.move(arg_1, arg_3)\n \n arg_1 = arg_3\n\n # Just in case the client didn't provide it, see if we have in metadata\n if arg_4 is None and \"url\" in arg_5:\n arg_4 = arg_5['url']\n\n # First check that we don't have one already!\n arg_11 = arg_0.get_container(name=arg_8['image'],\n collection_id=arg_9.id, \n tag=arg_8['tag'],\n arg_10=arg_10)\n\n # The container did not exist, create it\n if arg_11 is None:\n arg_12 = \"new\"\n arg_11 = Container(arg_13=json.dumps(arg_5),\n name=arg_8['image'],\n arg_15=arg_1,\n arg_14=arg_0.client_name,\n tag=arg_8['tag'],\n arg_10=arg_10,\n arg_4=arg_4,\n uri=arg_8['uri'],\n collection_id=arg_9.id)\n\n arg_0.session.Func(arg_11)\n arg_9.containers.append(arg_11)\n\n # The container existed, update it.\n else:\n arg_12=\"update\"\n arg_13=json.loads(arg_11.metrics)\n arg_13.update(arg_5)\n arg_11.url= arg_4\n arg_11.client=arg_0.client_name\n if arg_1 is not None:\n arg_11.image=arg_1\n arg_11.metrics=json.dumps(arg_13)\n\n arg_0.session.commit()\n bot.info(\"[container][%s] %s\" % (arg_12,arg_8['uri']))\n return arg_11"} +{"_id": "doc_8875", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n '''Func an image to Singularity Registry'''\n\n arg_1 = os.path.abspath(arg_1)\n arg_4 = os.path.basename(arg_1)\n bot.debug(\"PUSH %s\" % arg_1)\n\n if not os.path.exists(arg_1):\n bot.error('%s does not exist.' %arg_1)\n sys.exit(1)\n\n # Interaction with a registry requires secrets\n arg_0.require_secrets()\n\n # Extract the metadata\n arg_5 = parse_image_name(remove_uri(arg_2), arg_3=arg_3)\n arg_6 = os.path.getsize(arg_1) >> 20\n\n# COLLECTION ###################################################################\n\n # If the registry is provided in the uri, use it\n if arg_5['registry'] == None:\n arg_5['registry'] = arg_0.base\n\n # If the base doesn't start with http or https, add it\n arg_5 = arg_0._add_https(arg_5)\n\n # Prepare Func request, this will return a collection ID if permission\n arg_7 = '%s/Func/' % arg_5['registry']\n arg_8 = '%s/upload/chunked_upload' % arg_5['registry']\n arg_9 = arg_0.authorize(request_type=\"Func\",\n arg_5=arg_5)\n\n # Data fields for collection\n arg_10 = { 'collection': arg_5['collection'],\n 'name':arg_5['image'],\n 'tag': arg_5['tag']}\n\n arg_11 = { 'Authorization': arg_9 }\n\n arg_12 = requests.post(arg_8, json=arg_10, arg_11=arg_11)\n\n # Always tell the user what's going on!\n arg_13 = arg_0._read_response(arg_12)\n print('\\n[1. Collection return status {0} {1}]'.format(arg_12.status_code, arg_13))\n\n # Get the collection id, if created, and continue with upload\n if arg_12.status_code != 200:\n sys.exit(1)\n\n\n# UPLOAD #######################################################################\n\n arg_7 = '%s/upload' % arg_5['registry'].replace('/api','')\n bot.debug('Seting upload URL to {0}'.format(arg_7))\n\n arg_14 = arg_12.json()['cid']\n arg_15 = os.path.basename(arg_5['storage'])\n\n arg_9 = arg_0.authorize(request_type=\"upload\",\n arg_5=arg_5)\n\n arg_16 = MultipartEncoder(arg_10={'SREGISTRY_EVENT': arg_9,\n 'name': arg_5['image'],\n 'collection': str(arg_14),\n 'tag': arg_5['tag'],\n 'file1': (arg_15, open(arg_1, 'rb'), 'text/plain')})\n\n arg_17 = create_callback(arg_16, arg_0.quiet)\n arg_18 = MultipartEncoderMonitor(arg_16, arg_17)\n arg_11 = {'Content-Type': arg_18.content_type,\n 'Authorization': arg_9 }\n\n try:\n arg_12 = requests.post(arg_7, data=arg_18, arg_11=arg_11)\n arg_12.raise_for_status()\n arg_13 = arg_12.json()['message']\n print('\\n[Return status {0} {1}]'.format(arg_12.status_code, arg_13))\n except requests.HTTPError as e:\n print('\\nUpload failed: {0}.'.format(e))\n except KeyboardInterrupt:\n print('\\nUpload cancelled.')\n except Exception as e:\n print(e)"} +{"_id": "doc_8876", "title": "", "text": "def Func(arg_0, arg_1=\"from\", arg_2=True):\n '''take a recipe, and return the complete header, line. If\n remove_header is True, only return the value.\n\n Parameters\n ==========\n recipe: the recipe file\n headers: the header key to find and parse\n remove_header: if true, remove the key\n\n '''\n arg_3 = None\n arg_4 = [x for x in arg_0.split('\\n') if \"%s:\" %arg_1 in x.lower()]\n\n # Case 1: We did not find the fromline\n if len(arg_4) == 0:\n return \"\"\n\n # Case 2: We found it!\n if len(arg_4) > 0:\n arg_4 = arg_4[0]\n arg_3 = arg_4.strip()\n\n # Does the user want to clean it up?\n if arg_2 is True:\n arg_3 = arg_4.split(':', 1)[-1].strip()\n return arg_3"} +{"_id": "doc_8877", "title": "", "text": "def Func(arg_0, arg_1=\"Singularity\", arg_2=None):\n '''Func will parse a single file, and if valid,\n return an updated manifest\n\n Parameters\n ==========\n filename: the filename to assess for a recipe\n pattern: a default pattern to search for\n manifest: an already started manifest\n\n '''\n\n if arg_1 is None:\n arg_1 = \"Singularity*\"\n\n arg_3 = None\n arg_4 = os.path.basename(arg_0)\n if fnmatch.fnmatch(arg_4, arg_1):\n arg_3 = {'path': os.path.abspath(arg_0),\n 'modified':os.path.getmtime(arg_0)}\n\n # If we already have the recipe, only add if more recent\n if arg_2 is not None and arg_3 is not None:\n arg_5 = '/'.join(arg_0.split('/')[-2:])\n if arg_5 in arg_2:\n if arg_2[arg_5]['modified'] < os.path.getmtime(arg_0):\n arg_2[arg_5] = arg_3\n else:\n arg_2[arg_5] = arg_3\n return arg_2\n\n return arg_3"} +{"_id": "doc_8878", "title": "", "text": "def Func(arg_0):\n '''given a list of files, copy them to a temporary folder,\n compress into a .tar.gz, and rename based on the file hash.\n Return the full path to the .tar.gz in the temporary folder.\n\n Parameters\n ==========\n package_files: a list of files to include in the tar.gz\n\n '''\n # Ensure package files all exist\n for arg_1 in arg_0:\n if not os.path.exists(arg_1):\n bot.exit('Cannot find %s.' % arg_1)\n\n bot.log('Generating build package for %s files...' % len(arg_0))\n arg_2 = get_tmpdir(prefix=\"sregistry-build\")\n arg_3 = '%s/build.tar.gz' % arg_2\n arg_4 = tarfile.open(arg_3, \"w:gz\")\n\n # Create the tar.gz\n for arg_1 in arg_0:\n arg_4.add(arg_1)\n arg_4.close()\n\n # Get hash (sha256), and rename file\n arg_5 = get_file_hash(arg_3)\n arg_6 = \"%s/%s.tar.gz\" %(arg_2, arg_5)\n shutil.move(arg_3, arg_6)\n return arg_6"} +{"_id": "doc_8879", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''Func will take a name supplied by the user,\n remove all special characters (except for those defined by \"special-characters\"\n and return the new image name.\n '''\n if arg_1 is None:\n arg_1 = []\n return ''.join(arg_2.lower()\n for arg_2 in arg_0 if arg_2.isalnum() or arg_2 in arg_1)"} +{"_id": "doc_8880", "title": "", "text": "def Func(arg_0):\n '''Func will determine if color should be added\n to a print. Will check if being run in a terminal, and\n if has support for asci'''\n arg_1 = get_user_color_preference()\n if arg_1 is not None:\n return arg_1\n arg_2 = [arg_0.errorStream, arg_0.outputStream]\n for arg_3 in arg_2:\n if not hasattr(arg_3, 'isatty'):\n return False\n if not arg_3.isatty():\n return False\n return True"} +{"_id": "doc_8881", "title": "", "text": "def Func(arg_0, arg_1):\n '''determine if a level should print to\n stderr, includes all levels but INFO and QUIET'''\n if arg_1 in [ABORT,\n ERROR,\n WARNING,\n VERBOSE,\n VERBOSE1,\n VERBOSE2,\n VERBOSE3,\n DEBUG]:\n return True\n return False"} +{"_id": "doc_8882", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Func will Func a message to a stream,\n first checking the encoding\n '''\n if isinstance(arg_2, bytes):\n arg_2 = arg_2.decode('utf-8')\n arg_1.Func(arg_2)"} +{"_id": "doc_8883", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2):\n '''Func will print a Func of entries. If the rows is \n a dictionary, the keys are interpreted as column names. if\n not, a numbered list is used.\n '''\n\n arg_3 = [str(x) for x in range(1,len(arg_1)+1)]\n if isinstance(arg_1, dict):\n arg_3 = list(arg_1.keys())\n arg_1 = list(arg_1.values())\n\n for arg_4 in arg_1: \n arg_5 = arg_3.pop(0)\n arg_5 = arg_5.ljust(arg_2)\n arg_6 = \"\\t\".join(arg_4)\n arg_0.custom(prefix=arg_5,\n arg_6=arg_6)"} +{"_id": "doc_8884", "title": "", "text": "def Func(arg_0):\n '''return a default template for some function in sregistry\n If there is no template, None is returned.\n\n Parameters\n ==========\n name: the name of the template to retrieve\n\n '''\n arg_0 = arg_0.lower()\n arg_1 = dict()\n\n arg_1['tarinfo'] = {\"gid\": 0,\n \"uid\": 0,\n \"uname\": \"root\",\n \"gname\": \"root\",\n \"mode\": 493}\n\n if arg_0 in arg_1:\n bot.debug(\"Found template for %s\" % (arg_0))\n return arg_1[arg_0]\n else:\n bot.warning(\"Cannot find template %s\" % (arg_0))"} +{"_id": "doc_8885", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''return the image manifest via the aws client, saved in self.manifest\n '''\n\n arg_3 = None\n arg_4 = arg_0.aws.describe_images(repositoryName=arg_1)\n if 'imageDetails' in arg_4:\n for arg_5 in arg_4.get('imageDetails'):\n if arg_2 in arg_5['imageTags']:\n arg_3 = arg_5\n break\n\n # if the image isn't found, we need to exit\n if arg_3 is None:\n bot.exit('Cannot find %s:%s, is the uri correct?' %(arg_1, arg_6))\n\n arg_6 = arg_3['imageDigest']\n arg_7 = arg_0.aws.batch_get_image(repositoryName=arg_1, \n imageIds=[{\"imageDigest\": arg_6,\n \"imageTag\": arg_2}])\n\n arg_0.manifest = json.loads(arg_7['images'][0]['imageManifest'])\n return arg_0.manifest"} +{"_id": "doc_8886", "title": "", "text": "def Func(arg_0):\n '''update secrets will take a secrets credential file\n either located at .sregistry or the environment variable\n SREGISTRY_CLIENT_SECRETS and update the current client \n secrets as well as the associated API base. This is where you\n should do any customization of the secrets flie, or using\n it to update your client, if needed.\n '''\n # Get a setting for client myclient and some variable name VAR. \n # returns None if not set\n arg_1 = arg_0._get_setting('SREGISTRY_MYCLIENT_VAR')\n\n # Get (and if found in environment (1) settings (2) update the variable\n # It will still return None if not set\n arg_1 = arg_0._get_and_update_setting('SREGISTRY_MYCLIENT_VAR')\n\n # If you have a setting that is required and not found, you should exit.\n\n # Here is how to read all client secrets\n arg_0.secrets = read_client_secrets()\n \n # If you don't want to use the shared settings file, you have your own.\n # Here is how to get if the user has a cache for you enabled, this\n # returns a path (enabled) or None (disabled) that you should honor\n # You can use this as a file path or folder and for both cases, you\n # need to create the file or folder\n if arg_0._credential_cache is not None:\n bot.info(\"credential cache set to %s\" %arg_0._credential_cache)"} +{"_id": "doc_8887", "title": "", "text": "def Func(arg_0):\n \"\"\"Translate S3 errors to FSErrors.\"\"\"\n try:\n yield\n except ClientError as error:\n arg_1 = error.response.get(\"Error\", {})\n arg_2 = arg_1.get(\"Code\", None)\n arg_3 = error.response.get(\"ResponseMetadata\", {})\n arg_4 = arg_3.get(\"HTTPStatusCode\", 200)\n arg_5 = arg_1.get(\"Message\", None)\n if arg_2 == \"NoSuchBucket\":\n raise errors.ResourceError(arg_0, exc=error, msg=arg_5)\n if arg_4 == 404:\n raise errors.ResourceNotFound(arg_0)\n elif arg_4 == 403:\n raise errors.PermissionDenied(arg_0=arg_0, msg=arg_5)\n else:\n raise errors.OperationFailed(arg_0=arg_0, exc=error)\n except SSLError as error:\n raise errors.OperationFailed(arg_0, exc=error)\n except EndpointConnectionError as error:\n raise errors.RemoteConnectionError(arg_0, exc=error, msg=\"{}\".format(error))"} +{"_id": "doc_8888", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Create a S3File backed with a temporary file.\"\"\"\n arg_4 = tempfile.TemporaryFile()\n arg_5 = arg_0(arg_4, arg_1, arg_2, arg_3=arg_3)\n return arg_5"} +{"_id": "doc_8889", "title": "", "text": "def Func(arg_0, arg_1=arg_2, arg_3=arg_4,\n arg_5=arg_6, arg_7=arg_8):\n \"\"\"\n Builds a url to a gravatar from an email address.\n\n :param email: The email to fetch the gravatar for\n :param size: The size (in pixels) of the gravatar to fetch\n :param default: What type of default image to use if the gravatar does not exist\n :param rating: Used to filter the allowed gravatar ratings\n :param secure: If True use https, otherwise plain http\n \"\"\"\n if arg_7:\n arg_9 = GRAVATAR_SECURE_URL\n else:\n arg_9 = GRAVATAR_URL\n\n # Calculate the email hash\n arg_10 = calculate_gravatar_hash(arg_0)\n\n # Build querystring\n arg_11 = urlencode({\n 's': str(arg_1),\n 'd': arg_3,\n 'r': arg_5,\n })\n\n # Build url\n arg_12 = '{base}avatar/{hash}.jpg?{qs}'.format(base=arg_9,\n hash=arg_10, qs=arg_11)\n\n return arg_12"} +{"_id": "doc_8890", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns True if the user has a gravatar, False if otherwise\n \"\"\"\n # Request a 404 response if the gravatar does not exist\n arg_1 = get_gravatar_url(arg_0, default=GRAVATAR_DEFAULT_IMAGE_404)\n\n # Verify an OK response was received\n try:\n arg_2 = Request(arg_1)\n arg_2.get_method = lambda: 'HEAD'\n return 200 == urlopen(arg_2).code\n except (HTTPError, URLError):\n return False"} +{"_id": "doc_8891", "title": "", "text": "def Func(arg_0=16, arg_1=16, arg_2=4):\n \"\"\"\n Generator for blocks for a chimera block quotient\n \"\"\"\n for arg_3 in xrange(arg_0):\n for arg_4 in xrange(arg_1):\n for arg_5 in (0, 1):\n yield tuple((arg_3, arg_4, arg_5, arg_6) for arg_6 in xrange(arg_2))"} +{"_id": "doc_8892", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Extract the blocks from a graph, and returns a\n block-quotient graph according to the acceptability\n functions block_good and eblock_good\n\n Inputs:\n G: a networkx graph\n blocks: a tuple of tuples\n\n \"\"\"\n from networkx import Graph\n from itertools import product\n\n arg_2 = Graph()\n arg_3 = {}\n for arg_4, arg_5 in enumerate(arg_1):\n arg_2.add_node(arg_4)\n if not arg_5 or not all(arg_0.has_node(arg_6) for arg_6 in arg_5):\n continue\n for arg_7 in arg_5:\n if arg_7 in arg_3:\n raise(RuntimeError, \"two blocks overlap\")\n arg_3[arg_7] = arg_4\n\n for arg_7, arg_8 in arg_3.items():\n arg_9 = arg_1[arg_8]\n for arg_10 in arg_0[arg_7]:\n if arg_10 not in arg_3:\n continue\n arg_11 = arg_3[arg_10]\n if arg_2.has_edge(arg_8, arg_11) or arg_8 == arg_11:\n continue\n arg_12 = arg_1[arg_11]\n\n if arg_9[0][2] == arg_12[0][2]:\n arg_13 = zip(arg_9, arg_12)\n else:\n arg_13 = product(arg_9, arg_12)\n\n if all(arg_0.has_edge(arg_6, arg_14) for arg_6, arg_14 in arg_13):\n arg_2.add_edge(arg_8, arg_11)\n\n return arg_2"} +{"_id": "doc_8893", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a set of resonance forms as SMILES strings, given a SMILES string.\n\n :param smiles: A SMILES string.\n :returns: A set containing SMILES strings for every possible resonance form.\n :rtype: set of strings.\n \"\"\"\n arg_1 = Chem.MolFromSmiles(arg_0)\n #Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default\n arg_2 = ResonanceEnumerator().enumerate(arg_1)\n return {Chem.MolToSmiles(arg_3, isomericSmiles=True) for arg_3 in arg_2}"} +{"_id": "doc_8894", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Repeatedly apply normalization transform to molecule until no changes occur.\n\n It is possible for multiple products to be produced when a rule is applied. The rule is applied repeatedly to\n each of the products, until no further changes occur or after 20 attempts. If there are multiple unique products\n after the final application, the first product (sorted alphabetically by SMILES) is chosen.\n \"\"\"\n arg_3 = [arg_1]\n for arg_4 in six.moves.range(20):\n arg_5 = {}\n for arg_1 in arg_3:\n for arg_6 in [x[0] for x in arg_2.RunReactants((arg_1,))]:\n if arg_7.SanitizeMol(arg_6, catchErrors=True) == 0:\n arg_5[arg_7.MolToSmiles(arg_6, arg_9=True)] = arg_6\n if arg_5:\n arg_3 = [arg_5[s] for s in sorted(arg_5)]\n else:\n # If n == 0, the rule was not applicable and we return None\n return arg_3[0] if arg_4 > 0 else None"} +{"_id": "doc_8895", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a canonical tautomer by enumerating and scoring all possible tautomers.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :return: The canonical tautomer.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n # TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers\n arg_2 = arg_0._enumerate_tautomers(arg_1)\n if len(arg_2) == 1:\n return arg_2[0]\n # Calculate score for each tautomer\n arg_3 = None\n for arg_4 in arg_2:\n arg_5 = Chem.MolToSmiles(arg_4, isomericSmiles=True)\n log.debug('Tautomer: %s', arg_5)\n arg_6 = 0\n # Add aromatic ring scores\n arg_7 = Chem.GetSymmSSSR(arg_4)\n for arg_8 in arg_7:\n arg_9 = {arg_4.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(arg_8)}\n arg_10 = {arg_4.GetAtomWithIdx(idx).GetAtomicNum() for idx in arg_8}\n if arg_9 == {BondType.AROMATIC}:\n log.debug('Score +100 (aromatic ring)')\n arg_6 += 100\n if arg_10 == {6}:\n log.debug('Score +150 (carbocyclic aromatic ring)')\n arg_6 += 150\n # Add SMARTS scores\n for arg_11 in arg_0.scores:\n for arg_12 in arg_4.GetSubstructMatches(arg_11.smarts):\n log.debug('Score %+d (%s)', arg_11.score, arg_11.name)\n arg_6 += arg_11.score\n # Add (P,S,Se,Te)-H scores\n for arg_13 in arg_4.GetAtoms():\n if arg_13.GetAtomicNum() in {15, 16, 34, 52}:\n arg_14 = arg_13.GetTotalNumHs()\n if arg_14:\n log.debug('Score %+d (%s-H bonds)', -arg_14, arg_13.GetSymbol())\n arg_6 -= arg_14\n # Set as highest if score higher or if score equal and smiles comes first alphabetically\n if not arg_3 or arg_3['score'] < arg_6 or (arg_3['score'] == arg_6 and arg_5 < arg_3['smiles']):\n log.debug('New highest tautomer: %s (%s)', arg_5, arg_6)\n arg_3 = {'smiles': arg_5, 'tautomer': arg_4, 'score': arg_6}\n return arg_3['tautomer']"} +{"_id": "doc_8896", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Break covalent bonds between metals and organic atoms under certain conditions.\n\n The algorithm works as follows:\n\n - Disconnect N, O, F from any metal.\n - Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).\n - For every bond broken, adjust the charges of the begin and end atoms accordingly.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :return: The molecule with metals Funced.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n log.debug('Running MetalDisconnector')\n # Remove bonds that match SMARTS\n for arg_2 in [arg_0._metal_nof, arg_0._metal_non]:\n arg_3 = arg_1.GetSubstructMatches(arg_2)\n arg_4 = Chem.RWMol(arg_1)\n arg_5 = []\n for arg_6, arg_7 in arg_3:\n # TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?\n arg_5.append(int(arg_1.GetBondBetweenAtoms(arg_6, arg_7).GetBondTypeAsDouble()))\n arg_4.RemoveBond(arg_6, arg_7)\n # Adjust neighbouring charges accordingly\n arg_1 = arg_4.GetMol()\n for arg_8, (arg_6, arg_7) in enumerate(arg_3):\n arg_9 = arg_5[arg_8]\n arg_10 = arg_1.GetAtomWithIdx(arg_6)\n arg_10.SetFormalCharge(arg_10.GetFormalCharge() + arg_9)\n arg_11 = arg_1.GetAtomWithIdx(arg_7)\n arg_11.SetFormalCharge(arg_11.GetFormalCharge() - arg_9)\n log.info('Removed covalent bond between %s and %s', arg_10.GetSymbol(), arg_11.GetSymbol())\n Chem.SanitizeMol(arg_1)\n return arg_1"} +{"_id": "doc_8897", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a standardized canonical SMILES string given a SMILES string.\n\n Note: This is a convenience function for quickly standardizing a single SMILES string. It is more efficient to use\n the :class:`~molvs.standardize.Standardizer` class directly when working with many molecules or when custom options\n are needed.\n\n :param string smiles: The SMILES for the molecule.\n :returns: The SMILES for the standardized molecule.\n :rtype: string.\n \"\"\"\n # Skip sanitize as standardize does this anyway\n arg_1 = Chem.MolFromSmiles(arg_0, sanitize=False)\n arg_1 = Standardizer().standardize(arg_1)\n return Chem.MolToSmiles(arg_1, isomericSmiles=True)"} +{"_id": "doc_8898", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a set of tautomers as SMILES strings, given a SMILES string.\n\n :param smiles: A SMILES string.\n :returns: A set containing SMILES strings for every possible tautomer.\n :rtype: set of strings.\n \"\"\"\n # Skip sanitize as standardize does this anyway\n arg_1 = Chem.MolFromSmiles(arg_0, sanitize=False)\n arg_1 = Standardizer().standardize(arg_1)\n arg_2 = TautomerEnumerator().enumerate(arg_1)\n return {Chem.MolToSmiles(arg_3, isomericSmiles=True) for arg_3 in arg_2}"} +{"_id": "doc_8899", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return a Funcd version the given molecule.\n\n The standardization process consists of the following stages: RDKit\n :py:func:`~rdkit.Chem.rdmolops.RemoveHs`, RDKit :py:func:`~rdkit.Chem.rdmolops.SanitizeMol`,\n :class:`~molvs.metal.MetalDisconnector`, :class:`~molvs.normalize.Normalizer`,\n :class:`~molvs.charge.Reionizer`, RDKit :py:func:`~rdkit.Chem.rdmolops.AssignStereochemistry`.\n\n :param mol: The molecule to Func.\n :type mol: rdkit.Chem.rdchem.Mol\n :returns: The Funcd molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n arg_1 = copy.deepcopy(arg_1)\n Chem.SanitizeMol(arg_1)\n arg_1 = Chem.RemoveHs(arg_1)\n arg_1 = arg_0.disconnect_metals(arg_1)\n arg_1 = arg_0.normalize(arg_1)\n arg_1 = arg_0.reionize(arg_1)\n Chem.AssignStereochemistry(arg_1, force=True, cleanIt=True)\n # TODO: Check this removes symmetric stereocenters\n return arg_1"} +{"_id": "doc_8900", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the tautomer parent of a given molecule.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :param bool skip_standardize: Set to True if mol has already been standardized.\n :returns: The tautomer parent molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n if not arg_2:\n arg_1 = arg_0.standardize(arg_1)\n arg_3 = arg_0.canonicalize_tautomer(arg_1)\n arg_3 = arg_0.standardize(arg_3)\n return arg_3"} +{"_id": "doc_8901", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the fragment parent of a given molecule.\n\n The fragment parent is the largest organic covalent unit in the molecule.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :param bool skip_standardize: Set to True if mol has already been standardized.\n :returns: The fragment parent molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n if not arg_2:\n arg_1 = arg_0.standardize(arg_1)\n # TODO: Consider applying FragmentRemover first to remove salts, solvents?\n arg_3 = arg_0.largest_fragment(arg_1)\n return arg_3"} +{"_id": "doc_8902", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the stereo parent of a given molecule.\n\n The stereo parent has all stereochemistry information removed from tetrahedral centers and double bonds.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :param bool skip_standardize: Set to True if mol has already been standardized.\n :returns: The stereo parent molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n if not arg_2:\n arg_1 = arg_0.standardize(arg_1)\n else:\n arg_1 = copy.deepcopy(arg_1)\n Chem.RemoveStereochemistry(arg_1)\n return arg_1"} +{"_id": "doc_8903", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the isotope parent of a given molecule.\n\n The isotope parent has all atoms replaced with the most abundant isotope for that element.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :param bool skip_standardize: Set to True if mol has already been standardized.\n :returns: The isotope parent molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n if not arg_2:\n arg_1 = arg_0.standardize(arg_1)\n else:\n arg_1 = copy.deepcopy(arg_1)\n # Replace isotopes with common weight\n for arg_3 in arg_1.GetAtoms():\n arg_3.SetIsotope(0)\n return arg_1"} +{"_id": "doc_8904", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Return the super parent of a given molecule.\n\n THe super parent is fragment, charge, isotope, stereochemistry and tautomer insensitive. From the input\n molecule, the largest fragment is taken. This is uncharged and then isotope and stereochemistry information is\n discarded. Finally, the canonical tautomer is determined and returned.\n\n :param mol: The input molecule.\n :type mol: rdkit.Chem.rdchem.Mol\n :param bool skip_standardize: Set to True if mol has already been standardized.\n :returns: The super parent molecule.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n if not arg_2:\n arg_1 = arg_0.standardize(arg_1)\n # We don't need to get fragment parent, because the charge parent is the largest fragment\n arg_1 = arg_0.charge_parent(arg_1, arg_2=True)\n arg_1 = arg_0.isotope_parent(arg_1, arg_2=True)\n arg_1 = arg_0.stereo_parent(arg_1, arg_2=True)\n arg_1 = arg_0.tautomer_parent(arg_1, arg_2=True)\n arg_1 = arg_0.standardize(arg_1)\n return arg_1"} +{"_id": "doc_8905", "title": "", "text": "def Func():\n \"\"\"Main function for molvs command line interface.\"\"\"\n\n # Root options\n arg_0 = MolvsParser(epilog='use \"molvs -h\" to show help for a specific command')\n arg_1 = arg_0.add_subparsers(title='Available commands')\n\n # Options common to all commands\n\n arg_2 = MolvsParser(add_help=False)\n arg_2.add_argument('infile', nargs='?', help='input filename', type=argparse.FileType('r'), default=sys.stdin)\n arg_2.add_argument('-i', '--intype', help='input filetype', choices=FILETYPES)\n arg_2.add_argument('-:', '--smiles', help='input SMILES instead of file', metavar='')\n arg_2.add_argument('-O', '--outfile', help='output filename', type=argparse.FileType('w'), default=sys.stdout, metavar='')\n\n # Standardize options\n arg_3 = arg_1.add_parser('standardize', help='standardize a molecule', parents=[arg_2])\n arg_3.add_argument('-o', '--outtype', help='output filetype', choices=FILETYPES)\n arg_3.set_defaults(func=standardize_Func)\n\n # Validate options\n arg_4 = arg_1.add_parser('validate', help='validate a molecule', parents=[arg_2])\n arg_4.set_defaults(func=validate_Func)\n\n arg_5 = arg_0.parse_args()\n try:\n arg_5.func(arg_5)\n except Exception as e:\n sys.stderr.write('Error: %s\\n\\n'.encode() % e.message)\n arg_0.print_help()\n sys.exit(2)"} +{"_id": "doc_8906", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the molecule with specified fragments Funcd.\n\n :param mol: The molecule to Func fragments from.\n :type mol: rdkit.Chem.rdchem.Mol\n :return: The molecule with fragments Funcd.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n log.debug('Running FragmentRemover')\n # Iterate FragmentPatterns and Func matching fragments\n for arg_2 in arg_0.fragments:\n # If nothing is left or leave_last and only one fragment, end here\n if arg_1.GetNumAtoms() == 0 or (arg_0.leave_last and len(Chem.GetMolFrags(arg_1)) <= 1):\n break\n # Apply removal for this FragmentPattern\n arg_3 = Chem.DeleteSubstructs(arg_1, arg_2.smarts, onlyFrags=True)\n if not arg_1.GetNumAtoms() == arg_3.GetNumAtoms():\n log.info('Removed fragment: %s', arg_2.name)\n if arg_0.leave_last and arg_3.GetNumAtoms() == 0:\n # All the remaining fragments match this pattern - leave them all\n break\n arg_1 = arg_3\n return arg_1"} +{"_id": "doc_8907", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Return the largest covalent unit.\n\n The largest fragment is determined by number of atoms (including hydrogens). Ties are broken by taking the\n fragment with the higher molecular weight, and then by taking the first alphabetically by SMILES if needed.\n\n :param mol: The molecule to Func the largest fragment from.\n :type mol: rdkit.Chem.rdchem.Mol\n :return: The largest fragment.\n :rtype: rdkit.Chem.rdchem.Mol\n \"\"\"\n log.debug('Running LargestFragmentChooser')\n # TODO: Alternatively allow a list of fragments to be passed as the mol parameter\n arg_2 = Chem.GetMolFrags(arg_1, asMols=True)\n arg_3 = None\n for arg_4 in arg_2:\n arg_5 = Chem.MolToSmiles(arg_4, isomericSmiles=True)\n log.debug('Fragment: %s', arg_5)\n arg_6 = is_organic(arg_4)\n if arg_0.prefer_organic:\n # Skip this fragment if not organic and we already have an organic fragment as the largest so far\n if arg_3 and arg_3['organic'] and not arg_6:\n continue\n # Reset largest if it wasn't organic and this fragment is organic\n if arg_3 and arg_6 and not arg_3['organic']:\n arg_3 = None\n # Count atoms\n arg_7 = 0\n for arg_8 in arg_4.GetAtoms():\n arg_7 += 1 + arg_8.GetTotalNumHs()\n # Skip this fragment if fewer atoms than the largest\n if arg_3 and arg_7 < arg_3['atoms']:\n continue\n # Skip this fragment if equal number of atoms but weight is lower\n arg_9 = rdMolDescriptors.CalcExactMolWt(arg_4)\n if arg_3 and arg_7 == arg_3['atoms'] and arg_9 < arg_3['weight']:\n continue\n # Skip this fragment if equal atoms and equal weight but smiles comes last alphabetically\n if arg_3 and arg_7 == arg_3['atoms'] and arg_9 == arg_3['weight'] and arg_5 > arg_3['smiles']:\n continue\n # Otherwise this is the largest so far\n log.debug('New largest fragment: %s (%s)', arg_5, arg_7)\n arg_3 = {'smiles': arg_5, 'fragment': arg_4, 'atoms': arg_7, 'weight': arg_9, 'organic': arg_6}\n return arg_3['fragment']"} +{"_id": "doc_8908", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Construct a constraint from a validation function.\n\n Args:\n func (function):\n Function that evaluates True when the variables satisfy the constraint.\n\n variables (iterable):\n Iterable of variable labels.\n\n vartype (:class:`~dimod.Vartype`/str/set):\n Variable type for the constraint. Accepted input values:\n\n * :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n * :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\n name (string, optional, default='Constraint'):\n Name for the constraint.\n\n Examples:\n This example creates a constraint that binary variables `a` and `b`\n are not equal.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> const = dwavebinarycsp.Constraint.Func(operator.ne, ['a', 'b'], 'BINARY')\n >>> print(const.name)\n Constraint\n >>> (0, 1) in const.configurations\n True\n\n This example creates a constraint that :math:`out = NOT(x)`\n for spin variables.\n\n >>> import dwavebinarycsp\n >>> def not_(y, x): # y=NOT(x) for spin variables\n ... return (y == -x)\n ...\n >>> const = dwavebinarycsp.Constraint.Func(\n ... not_,\n ... ['out', 'in'],\n ... {1, -1},\n ... name='not_spin')\n >>> print(const.name)\n not_spin\n >>> (1, -1) in const.configurations\n True\n\n \"\"\"\n arg_2 = tuple(arg_2)\n\n arg_5 = frozenset(config\n for config in itertools.product(arg_3.value, repeat=len(arg_2))\n if arg_1(*config))\n\n return arg_0(arg_1, arg_5, arg_2, arg_3, arg_4)"} +{"_id": "doc_8909", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=None):\n \"\"\"Construct a constraint from valid configurations.\n\n Args:\n configurations (iterable[tuple]):\n Valid configurations of the variables. Each configuration is a tuple of variable\n assignments ordered by :attr:`~Constraint.variables`.\n\n variables (iterable):\n Iterable of variable labels.\n\n vartype (:class:`~dimod.Vartype`/str/set):\n Variable type for the constraint. Accepted input values:\n\n * :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n * :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\n name (string, optional, default='Constraint'):\n Name for the constraint.\n\n Examples:\n\n This example creates a constraint that variables `a` and `b` are not equal.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.Func([(0, 1), (1, 0)],\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> print(const.name)\n Constraint\n >>> (0, 0) in const.configurations # Order matches variables: a,b\n False\n\n This example creates a constraint based on specified valid configurations\n that represents an OR gate for spin variables.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.Func(\n ... [(-1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)],\n ... ['y', 'x1', 'x2'],\n ... dwavebinarycsp.SPIN, name='or_spin')\n >>> print(const.name)\n or_spin\n >>> (1, 1, -1) in const.configurations # Order matches variables: y,x1,x2\n True\n\n \"\"\"\n def func(*arg_5): return arg_5 in arg_1\n\n return arg_0(func, arg_1, arg_2, arg_3, arg_4)"} +{"_id": "doc_8910", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Check that a solution satisfies the constraint.\n\n Args:\n solution (container):\n An assignment for the variables in the constraint.\n\n Returns:\n bool: True if the solution satisfies the constraint; otherwise False.\n\n Examples:\n This example creates a constraint that :math:`a \\\\ne b` on binary variables\n and tests it for two candidate solutions, with additional unconstrained\n variable c.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> solution = {'a': 1, 'b': 1, 'c': 0}\n >>> const.Func(solution)\n False\n >>> solution = {'a': 1, 'b': 0, 'c': 0}\n >>> const.Func(solution)\n True\n\n \"\"\"\n return arg_0.func(*(arg_1[arg_2] for arg_2 in arg_0.variables))"} +{"_id": "doc_8911", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Flip a variable in the constraint.\n\n Args:\n v (variable):\n Variable in the constraint to take the complementary value of its\n construction value.\n\n Examples:\n This example creates a constraint that :math:`a = b` on binary variables\n and flips variable a.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_func(operator.eq,\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> const.check({'a': 0, 'b': 0})\n True\n >>> const.Func('a')\n >>> const.check({'a': 1, 'b': 0})\n True\n >>> const.check({'a': 0, 'b': 0})\n False\n\n \"\"\"\n try:\n arg_2 = arg_0.variables.index(arg_1)\n except ValueError:\n raise ValueError(\"variable {} is not a variable in constraint {}\".format(arg_1, arg_0.name))\n\n if arg_0.vartype is dimod.BINARY:\n\n arg_3 = arg_0.func\n\n def arg_6(*arg_4):\n arg_5 = list(arg_4)\n arg_5[arg_2] = 1 - arg_5[arg_2] # negate v\n return arg_3(*arg_5)\n\n arg_0.func = arg_6\n\n arg_0.configurations = frozenset(config[:arg_2] + (1 - config[arg_2],) + config[arg_2 + 1:]\n for config in arg_0.configurations)\n\n else: # SPIN\n\n arg_3 = arg_0.func\n\n def arg_6(*arg_4):\n arg_5 = list(arg_4)\n arg_5[arg_2] = -arg_5[arg_2] # negate v\n return arg_3(*arg_5)\n\n arg_0.func = arg_6\n\n arg_0.configurations = frozenset(config[:arg_2] + (-config[arg_2],) + config[arg_2 + 1:]\n for config in arg_0.configurations)\n\n arg_0.name = '{} ({} flipped)'.format(arg_0.name, arg_1)"} +{"_id": "doc_8912", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3()):\n \"\"\"Add a constraint.\n\n Args:\n constraint (function/iterable/:obj:`.Constraint`):\n Constraint definition in one of the supported formats:\n\n 1. Function, with input arguments matching the order and\n :attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`\n argument, that evaluates True when the constraint is satisfied.\n 2. List explicitly specifying each allowed configuration as a tuple.\n 3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.\n\n variables(iterable):\n Variables associated with the constraint. Not required when `constraint` is\n a :obj:`.Constraint` object.\n\n Examples:\n This example defines a function that evaluates True when the constraint is satisfied.\n The function's input arguments match the order and type of the `variables` argument.\n\n >>> import dwavebinarycsp\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN\n ... return (a == b) and (b == c)\n >>> csp.Func(all_equal, ['a', 'b', 'c'])\n >>> csp.check({'a': 0, 'b': 0, 'c': 0})\n True\n >>> csp.check({'a': 0, 'b': 0, 'c': 1})\n False\n\n This example explicitly lists allowed configurations.\n\n >>> import dwavebinarycsp\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n >>> eq_configurations = {(-1, -1), (1, 1)}\n >>> csp.Func(eq_configurations, ['v0', 'v1'])\n >>> csp.check({'v0': -1, 'v1': +1})\n False\n >>> csp.check({'v0': -1, 'v1': -1})\n True\n\n This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.Func(gates.and_gate(['a', 'b', 'c'])) # add an AND gate\n >>> csp.Func(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate\n >>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})\n True\n\n \"\"\"\n if isinstance(arg_1, Constraint):\n if arg_2 and (arg_3(arg_2) != arg_1.variables):\n raise ValueError(\"mismatched variables and Constraint\")\n elif isinstance(arg_1, Callable):\n arg_1 = Constraint.from_func(arg_1, arg_2, arg_0.vartype)\n elif isinstance(arg_1, Iterable):\n arg_1 = Constraint.from_configurations(arg_1, arg_2, arg_0.vartype)\n else:\n raise TypeError(\"Unknown constraint type given\")\n\n arg_0.constraints.append(arg_1)\n for arg_4 in arg_1.variables:\n arg_0.variables[arg_4].append(arg_1)"} +{"_id": "doc_8913", "title": "", "text": "def Func(arg_0, arg_1=2.0, arg_2=8):\n \"\"\"Build a binary quadratic model with minimal energy levels at solutions to the specified constraint satisfaction\n problem.\n\n Args:\n csp (:obj:`.ConstraintSatisfactionProblem`):\n Constraint satisfaction problem.\n\n min_classical_gap (float, optional, default=2.0):\n Minimum energy gap from ground. Each constraint violated by the solution increases\n the energy level of the binary quadratic model by at least this much relative\n to ground energy.\n\n max_graph_size (int, optional, default=8):\n Maximum number of variables in the binary quadratic model that can be used to\n represent a single constraint.\n\n Returns:\n :class:`~dimod.BinaryQuadraticModel`\n\n Notes:\n For a `min_classical_gap` > 2 or constraints with more than two variables, requires\n access to factories from the penaltymodel_ ecosystem to construct the binary quadratic\n model.\n\n .. _penaltymodel: https://github.com/dwavesystems/penaltymodel\n\n Examples:\n This example creates a binary-valued constraint satisfaction problem\n with two constraints, :math:`a = b` and :math:`b \\\\ne c`, and builds\n a binary quadratic model with a minimum energy level of -2 such that\n each constraint violation by a solution adds the default minimum energy gap.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> bqm = dwavebinarycsp.Func(csp)\n >>> bqm.energy({'a': 0, 'b': 0, 'c': 1}) # satisfies csp\n -2.0\n >>> bqm.energy({'a': 0, 'b': 0, 'c': 0}) # violates one constraint\n 0.0\n >>> bqm.energy({'a': 1, 'b': 0, 'c': 0}) # violates two constraints\n 2.0\n\n This example creates a binary-valued constraint satisfaction problem\n with two constraints, :math:`a = b` and :math:`b \\\\ne c`, and builds\n a binary quadratic model with a minimum energy gap of 4.\n Note that in this case the conversion to binary quadratic model adds two\n ancillary variables that must be minimized over when solving.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> import itertools\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> bqm = dwavebinarycsp.Func(csp, min_classical_gap=4.0)\n >>> list(bqm) # # doctest: +SKIP\n ['a', 'aux1', 'aux0', 'b', 'c']\n >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 1, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # satisfies csp\n -6.0\n >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates one constraint\n -2.0\n >>> min([bqm.energy({'a': 1, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates two constraints\n 2.0\n\n This example finds for the previous example the minimum graph size.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> for n in range(8, 1, -1):\n ... try:\n ... bqm = dwavebinarycsp.Func(csp, min_classical_gap=4.0, max_graph_size=n)\n ... except dwavebinarycsp.exceptions.ImpossibleBQM:\n ... print(n+1)\n ...\n 3\n\n \"\"\"\n\n # ensure we have penaltymodel factory available\n try:\n dwavebinarycsp.assert_penaltymodel_factory_available()\n except AssertionError as e:\n raise RuntimeError(e)\n\n def aux_factory():\n for arg_3 in count():\n yield 'aux{}'.format(arg_3)\n\n arg_4 = aux_factory()\n\n arg_5 = dimod.BinaryQuadraticModel.empty(arg_0.vartype)\n\n # developer note: we could cache them and relabel, for now though let's do the simple thing\n # penalty_models = {}\n for arg_6 in arg_0.constraints:\n arg_7 = arg_6.configurations\n\n if len(arg_6.variables) > arg_2:\n arg_8 = (\"The given csp contains a constraint {const} with {num_var} variables. \"\n \"This cannot be mapped to a graph with {max_graph_size} nodes. \"\n \"Consider checking whether your constraint is irreducible.\"\n \"\").format(arg_6=arg_6, num_var=len(arg_6.variables), arg_2=arg_2)\n raise ImpossibleBQM(arg_8)\n\n arg_9 = None\n\n if len(arg_6) == 0:\n # empty constraint\n continue\n\n if arg_1 <= 2.0:\n if len(arg_6) == 1 and arg_2 >= 1:\n arg_5.update(_bqm_from_1sat(arg_6))\n continue\n elif len(arg_6) == 2 and arg_2 >= 2:\n arg_5.update(_bqm_from_2sat(arg_6))\n continue\n\n # developer note: we could cache them and relabel, for now though let's do the simple thing\n # if configurations in penalty_models:\n # raise NotImplementedError\n\n for arg_10 in iter_complete_graphs(arg_6.variables, arg_2 + 1, arg_4):\n\n # construct a specification\n spec = pm.Specification(\n graph=arg_10,\n decision_variables=arg_6.variables,\n feasible_configurations=arg_7,\n arg_1=arg_1,\n vartype=arg_0.vartype\n )\n\n # try to use the penaltymodel ecosystem\n try:\n arg_9 = pm.get_penalty_model(spec)\n except pm.ImpossiblePenaltyModel:\n # hopefully adding more variables will make it possible\n continue\n\n if arg_9.classical_gap >= arg_1:\n break\n\n # developer note: we could cache them and relabel, for now though let's do the simple thing\n # penalty_models[configurations] = pmodel\n\n else:\n arg_8 = (\"No penalty model can be build for constraint {}\".format(arg_6))\n raise ImpossibleBQM(arg_8)\n\n arg_5.update(arg_9.model)\n\n return arg_5"} +{"_id": "doc_8914", "title": "", "text": "def Func(arg_0):\n \"\"\"create a bqm for a constraint with two variables.\n\n bqm will have exactly classical gap 2.\n \"\"\"\n arg_1 = arg_0.configurations\n arg_2 = arg_0.variables\n arg_3 = arg_0.vartype\n arg_4, arg_5 = arg_0.variables\n\n # if all configurations are present, then nothing is infeasible and the bqm is just all\n # 0.0s\n if len(arg_1) == 4:\n return dimod.BinaryQuadraticModel.empty(arg_0.vartype)\n\n # check if the constraint is irreducible, and if so, build the bqm for its two\n # components\n arg_6 = irreducible_components(arg_0)\n if len(arg_6) > 1:\n arg_7 = Constraint.from_configurations(((arg_10[0],) for arg_10 in arg_1),\n (arg_4,), arg_3)\n arg_8 = Constraint.from_configurations(((arg_10[1],) for arg_10 in arg_1),\n (arg_5,), arg_3)\n arg_9 = _bqm_from_1sat(arg_7)\n arg_9.update(_bqm_from_1sat(arg_8))\n return arg_9\n\n assert len(arg_1) > 1, \"single configurations should be irreducible\"\n\n # if it is not irreducible, and there are infeasible configurations, then it is time to\n # start building a bqm\n arg_9 = dimod.BinaryQuadraticModel.empty(arg_3)\n\n # if the constraint is not irreducible and has two configurations, then it is either eq or ne\n if all(operator.eq(*arg_10) for arg_10 in arg_1):\n arg_9.add_interaction(arg_4, arg_5, -1, arg_3=dimod.SPIN) # equality\n elif all(operator.ne(*arg_10) for arg_10 in arg_1):\n arg_9.add_interaction(arg_4, arg_5, +1, arg_3=dimod.SPIN) # inequality\n elif (1, 1) not in arg_1:\n arg_9.add_interaction(arg_4, arg_5, 2, arg_3=dimod.BINARY) # penalize (1, 1)\n elif (-1, +1) not in arg_1 and (0, 1) not in arg_1:\n arg_9.add_interaction(arg_4, arg_5, -2, arg_3=dimod.BINARY)\n arg_9.add_variable(arg_5, 2, arg_3=dimod.BINARY)\n elif (+1, -1) not in arg_1 and (1, 0) not in arg_1:\n arg_9.add_interaction(arg_4, arg_5, -2, arg_3=dimod.BINARY)\n arg_9.add_variable(arg_4, 2, arg_3=dimod.BINARY)\n else:\n # (0, 0) not in configurations\n arg_9.add_interaction(arg_4, arg_5, 2, arg_3=dimod.BINARY)\n arg_9.add_variable(arg_4, -2, arg_3=dimod.BINARY)\n arg_9.add_variable(arg_5, -2, arg_3=dimod.BINARY)\n\n return arg_9"} +{"_id": "doc_8915", "title": "", "text": "def Func(arg_0, arg_1=arg_2.BINARY, arg_4='AND'):\n \"\"\"AND gate.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='AND'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an AND gate.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.Func(['a', 'b', 'c'], name='AND1'))\n >>> csp.check({'a': 1, 'b': 0, 'c': 0})\n True\n \"\"\"\n\n arg_0 = tuple(arg_0)\n\n if arg_1 is arg_2.BINARY:\n arg_5 = frozenset([(0, 0, 0),\n (0, 1, 0),\n (1, 0, 0),\n (1, 1, 1)])\n\n def func(arg_6, arg_7, arg_8): return (arg_6 and arg_7) == arg_8\n\n else:\n # SPIN, vartype is checked by the decorator\n arg_5 = frozenset([(-1, -1, -1),\n (-1, +1, -1),\n (+1, -1, -1),\n (+1, +1, +1)])\n\n def func(arg_6, arg_7, arg_8): return ((arg_6 > 0) and (arg_7 > 0)) == (arg_8 > 0)\n\n return Constraint(func, arg_5, arg_0, arg_1=arg_1, arg_4=arg_4)"} +{"_id": "doc_8916", "title": "", "text": "def Func(arg_0, arg_1=arg_2.BINARY, arg_4='XOR'):\n \"\"\"XOR gate.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='XOR'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an XOR gate.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.Func(['x', 'y', 'z'], name='XOR1'))\n >>> csp.check({'x': 1, 'y': 1, 'z': 1})\n False\n \"\"\"\n\n arg_0 = tuple(arg_0)\n if arg_1 is arg_2.BINARY:\n arg_5 = frozenset([(0, 0, 0),\n (0, 1, 1),\n (1, 0, 1),\n (1, 1, 0)])\n\n def func(arg_6, arg_7, arg_8): return (arg_6 != arg_7) == arg_8\n\n else:\n # SPIN, vartype is checked by the decorator\n arg_5 = frozenset([(-1, -1, -1),\n (-1, +1, +1),\n (+1, -1, +1),\n (+1, +1, -1)])\n\n def func(arg_6, arg_7, arg_8): return ((arg_6 > 0) != (arg_7 > 0)) == (arg_8 > 0)\n\n return Constraint(func, arg_5, arg_0, arg_1=arg_1, arg_4=arg_4)"} +{"_id": "doc_8917", "title": "", "text": "def Func(arg_0, arg_1=arg_2.BINARY, arg_4='HALF_ADDER'):\n \"\"\"Half adder.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, sum, carry]`,\n where `in1, in2` are inputs to be added and `sum` and 'carry' the resultant\n outputs.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='HALF_ADDER'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of a Boolean half adder.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.Func(['a', 'b', 'total', 'carry'], name='HA1'))\n >>> csp.check({'a': 1, 'b': 1, 'total': 0, 'carry': 1})\n True\n\n \"\"\"\n\n arg_0 = tuple(arg_0)\n\n if arg_1 is arg_2.BINARY:\n arg_5 = frozenset([(0, 0, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 1, 0),\n (1, 1, 0, 1)])\n\n else:\n # SPIN, vartype is checked by the decorator\n arg_5 = frozenset([(-1, -1, -1, -1),\n (-1, +1, +1, -1),\n (+1, -1, +1, -1),\n (+1, +1, -1, +1)])\n\n def func(arg_6, arg_7, arg_8, arg_9):\n arg_10 = (arg_6 > 0) + (arg_7 > 0)\n if arg_10 == 0:\n return (arg_8 <= 0) and (arg_9 <= 0)\n elif arg_10 == 1:\n return (arg_8 > 0) and (arg_9 <= 0)\n elif arg_10 == 2:\n return (arg_8 <= 0) and (arg_9 > 0)\n else:\n raise ValueError(\"func recieved unexpected values\")\n\n return Constraint(func, arg_5, arg_0, arg_1=arg_1, arg_4=arg_4)"} +{"_id": "doc_8918", "title": "", "text": "def Func(arg_0, arg_1, arg_2=arg_3.BINARY, arg_5=True):\n \"\"\"Random XOR constraint satisfaction problem.\n\n Args:\n num_variables (integer): Number of variables (at least three).\n num_clauses (integer): Number of constraints that together constitute the\n constraint satisfaction problem.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n satisfiable (bool, optional, default=True): True if the CSP can be satisfied.\n\n Returns:\n CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables\n are assigned values that satisfy a XOR satisfiability problem.\n\n Examples:\n This example creates a CSP with 5 variables and two random constraints and checks\n whether a particular assignment of variables satisifies it.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories as sat\n >>> csp = sat.Func(5, 2)\n >>> csp.constraints # doctest: +SKIP\n [Constraint.from_configurations(frozenset({(1, 0, 0), (1, 1, 1), (0, 1, 0), (0, 0, 1)}), (4, 3, 0),\n Vartype.BINARY, name='XOR (0 flipped)'),\n Constraint.from_configurations(frozenset({(1, 1, 0), (0, 1, 1), (0, 0, 0), (1, 0, 1)}), (2, 0, 4),\n Vartype.BINARY, name='XOR (2 flipped) (0 flipped)')]\n >>> csp.check({0: 1, 1: 0, 2: 0, 3: 1, 4: 1}) # doctest: +SKIP\n True\n\n \"\"\"\n if arg_0 < 3:\n raise ValueError(\"a xor problem needs at least 3 variables\")\n if arg_1 > 8 * _nchoosek(arg_0, 3): # 8 different negation patterns\n raise ValueError(\"too many clauses\")\n\n # also checks the vartype argument\n arg_6 = ConstraintSatisfactionProblem(arg_2)\n\n arg_7 = list(range(arg_0))\n\n arg_8 = set()\n\n if arg_5:\n arg_9 = tuple(arg_2.value)\n arg_10 = {arg_18: choice(arg_9) for arg_18 in arg_7}\n\n arg_11 = [(0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 0)]\n\n while len(arg_8) < arg_1:\n # because constraints are hashed on configurations/variables, and because the inputs\n # to xor can be swapped without loss of generality, we can order them\n arg_12, arg_13, arg_14 = sample(arg_7, 3)\n if arg_13 > arg_12:\n arg_12, arg_13 = arg_13, arg_12\n\n # get the constraint\n arg_15 = xor_gate([arg_12, arg_13, arg_14], arg_2=arg_2)\n\n # pick (uniformly) a configuration and determine which variables we need to negate to\n # match the chosen configuration\n arg_16 = choice(arg_11)\n\n for arg_17, arg_18 in enumerate(arg_15.variables):\n if arg_16[arg_17] != (arg_10[arg_18] > 0):\n arg_15.flip_variable(arg_18)\n\n assert arg_15.check(arg_10)\n\n arg_8.add(arg_15)\n else:\n while len(arg_8) < arg_1:\n # because constraints are hashed on configurations/variables, and because the inputs\n # to xor can be swapped without loss of generality, we can order them\n arg_12, arg_13, arg_14 = sample(arg_7, 3)\n if arg_13 > arg_12:\n arg_12, arg_13 = arg_13, arg_12\n\n # get the constraint\n arg_15 = xor_gate([arg_12, arg_13, arg_14], arg_2=arg_2)\n\n # randomly flip each variable in the constraint\n for arg_17, arg_18 in enumerate(arg_15.variables):\n if random() > .5:\n arg_15.flip_variable(arg_18)\n\n assert arg_15.check(arg_10)\n\n arg_8.add(arg_15)\n\n for arg_15 in arg_8:\n arg_6.add_constraint(arg_15)\n\n # in case any variables didn't make it in\n for arg_18 in arg_7:\n arg_6.add_variable(arg_18)\n\n return arg_6"} +{"_id": "doc_8919", "title": "", "text": "def Func(arg_0, arg_1, **arg_2):\n \"\"\"\n Generates a model chooser definition from a model, and adds it to the\n registry.\n \"\"\"\n arg_3 = '{}Chooser'.format(arg_1._meta.object_name)\n arg_4 = {'model': arg_1}\n arg_4.update(arg_2)\n\n arg_5 = type(arg_3, (Chooser,), arg_4)\n arg_0.register_chooser(arg_5)\n\n return arg_1"} +{"_id": "doc_8920", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n \"\"\"Parse additional url fields and map them to inputs\n\n Attempt to create a dictionary with keys being user input, and\n response being the returned URL\n \"\"\"\n if arg_3 is None:\n return None\n\n arg_4 = arg_2['_paramAdditionalUrls']\n arg_5 = {}\n if isinstance(arg_3, str):\n arg_5[arg_4[0]] = arg_3\n else:\n for arg_6, arg_7 in zip(arg_4, arg_3):\n arg_5[arg_6] = arg_7\n return arg_5"} +{"_id": "doc_8921", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Convert a list of JSON values to a list of models\n \"\"\"\n return [arg_0.from_json(arg_1, arg_3) for arg_3 in arg_2]"} +{"_id": "doc_8922", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Populate all fields of a model with data\n\n Given a model with a PandoraModel superclass will enumerate all\n declared fields on that model and populate the values of their Field\n and SyntheticField classes. All declared fields will have a value after\n this function runs even if they are missing from the incoming JSON.\n \"\"\"\n for arg_3, arg_4 in arg_1.__class__._fields.items():\n arg_5 = getattr(arg_4, \"default\", None)\n arg_6 = arg_2.get(arg_4.field, arg_5)\n\n if isinstance(arg_4, SyntheticField):\n arg_6 = arg_4.formatter(arg_0, arg_2, arg_6)\n setattr(arg_1, arg_3, arg_6)\n continue\n\n arg_7 = getattr(arg_4, \"model\", None)\n if arg_6 and arg_7:\n if isinstance(arg_6, list):\n arg_6 = arg_7.from_json_list(arg_0, arg_6)\n else:\n arg_6 = arg_7.from_json(arg_0, arg_6)\n\n if arg_6 and arg_4.formatter:\n arg_6 = arg_4.formatter(arg_0, arg_6)\n\n setattr(arg_1, arg_3, arg_6)"} +{"_id": "doc_8923", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Convert one JSON value to a model object\n \"\"\"\n arg_3 = arg_0(arg_1)\n PandoraModel.populate_fields(arg_1, arg_3, arg_2)\n return arg_3"} +{"_id": "doc_8924", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Common repr logic for subclasses to hook\n \"\"\"\n arg_2 = [\n \"=\".join((key, repr(getattr(arg_0, key))))\n for key in sorted(arg_0._fields.keys())]\n\n if arg_2:\n arg_3 = \", \".join(arg_2)\n else:\n arg_3 = None\n\n if arg_1:\n return \"{}({}, {})\".format(arg_0.__class__.__name__,\n arg_3, arg_1)\n else:\n return \"{}({})\".format(arg_0.__class__.__name__, arg_3)"} +{"_id": "doc_8925", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Write command to remote process\n \"\"\"\n arg_0._process.stdin.write(\"{}\\n\".format(arg_1).encode(\"utf-8\"))\n arg_0._process.stdin.flush()"} +{"_id": "doc_8926", "title": "", "text": "def Func(arg_0):\n \"\"\"Ensure player backing process is started\n \"\"\"\n if arg_0._process and arg_0._process.poll() is None:\n return\n\n if not getattr(arg_0, \"_cmd\"):\n raise RuntimeError(\"Player command is not configured\")\n\n log.debug(\"Starting playback command: %r\", arg_0._cmd)\n arg_0._process = SilentPopen(arg_0._cmd)\n arg_0._post_start()"} +{"_id": "doc_8927", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Play a new song from a Pandora model\n\n Returns once the stream starts but does not shut down the remote audio\n output backend process. Calls the input callback when the user has\n input.\n \"\"\"\n arg_0._callbacks.Func(arg_1)\n arg_0._load_track(arg_1)\n time.sleep(2) # Give the backend time to load the track\n\n while True:\n try:\n arg_0._callbacks.pre_poll()\n arg_0._ensure_started()\n arg_0._loop_hook()\n\n arg_2, arg_3, arg_3 = select.select(\n arg_0._get_select_readers(), [], [], 1)\n\n for arg_4 in arg_2:\n if arg_4.fileno() == arg_0._control_fd:\n arg_0._callbacks.input(arg_4.readline().strip(), arg_1)\n else:\n arg_5 = arg_0._read_from_process(arg_4)\n if arg_0._Funcer_stopped(arg_5):\n return\n finally:\n arg_0._callbacks.post_poll()"} +{"_id": "doc_8928", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Play the station until something ends it\n\n This function will run forever until termintated by calling\n end_station.\n \"\"\"\n for arg_2 in iterate_forever(arg_1.get_playlist):\n try:\n arg_0.play(arg_2)\n except StopIteration:\n arg_0.stop()\n return"} +{"_id": "doc_8929", "title": "", "text": "def Func(arg_0):\n \"\"\"Set stdout to non-blocking\n\n VLC does not always return a newline when reading status so in order to\n be lazy and still use the read API without caring about how much output\n there is we switch stdout to nonblocking mode and just read a large\n chunk of datin order to be lazy and still use the read API without\n caring about how much output there is we switch stdout to nonblocking\n mode and just read a large chunk of data.\n \"\"\"\n arg_1 = fcntl.fcntl(arg_0._process.stdout, fcntl.F_GETFL)\n fcntl.fcntl(arg_0._process.stdout, fcntl.F_SETFL, arg_1 | os.O_NONBLOCK)"} +{"_id": "doc_8930", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"Format a station menu and make the user select a station\n \"\"\"\n arg_0.screen.clear()\n\n if arg_1:\n arg_0.screen.print_error(\"{}\\n\".format(arg_1))\n\n for arg_2, arg_3 in enumerate(arg_0.stations):\n arg_2 = \"{:>3}\".format(arg_2)\n print(\"{}: {}\".format(Colors.yellow(arg_2), arg_3.name))\n\n return arg_0.stations[arg_0.screen.get_integer(\"Station: \")]"} +{"_id": "doc_8931", "title": "", "text": "def Func(arg_0, Func, arg_2):\n \"\"\"Input callback, handles key presses\n \"\"\"\n try:\n arg_3 = getattr(arg_0, arg_0.CMD_MAP[Func][1])\n except (IndexError, KeyError):\n return arg_0.screen.print_error(\n \"Invalid command {!r}!\".format(Func))\n\n arg_3(arg_2)"} +{"_id": "doc_8932", "title": "", "text": "def Func(arg_0, arg_1=(arg_2,)):\n \"\"\"Function decorator implementing retrying logic.\n\n exceptions: A tuple of exception classes; default (Exception,)\n\n The decorator will call the function up to max_tries times if it raises\n an exception.\n\n By default it catches instances of the Exception class and subclasses.\n This will recover after all but the most fatal errors. You may specify a\n custom tuple of exception classes with the 'exceptions' argument; the\n function will only be retried if it raises one of the specified\n exceptions.\n \"\"\"\n def decorator(arg_3):\n def function(*arg_4, **arg_5):\n\n arg_6 = arg_0\n while arg_6 > 0:\n try:\n arg_6 -= 1\n return arg_3(*arg_4, **arg_5)\n\n except arg_1 as exc:\n # Don't retry for PandoraExceptions - unlikely that result\n # will change for same set of input parameters.\n if isinstance(exc, PandoraException):\n raise\n if arg_6 > 0:\n time.sleep(delay_exponential(\n 0.5, 2, arg_0 - arg_6))\n else:\n raise\n\n return function\n\n return decorator"} +{"_id": "doc_8933", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"Iterate over a finite iterator forever\n\n When the iterator is exhausted will call the function again to generate a\n new iterator and keep iterating.\n \"\"\"\n arg_3 = arg_0(*arg_1, **arg_2)\n\n while True:\n try:\n arg_4 = next(arg_3)\n arg_4.prepare_playback()\n yield arg_4\n except StopIteration:\n arg_3 = arg_0(*arg_1, **arg_2)"} +{"_id": "doc_8934", "title": "", "text": "def Func(arg_0):\n \"\"\"Gather user input and convert it to an integer\n\n Will keep trying till the user enters an interger or until they ^C the\n program.\n \"\"\"\n while True:\n try:\n return int(input(arg_0).strip())\n except ValueError:\n print(Colors.red(\"Invalid Input!\"))"} +{"_id": "doc_8935", "title": "", "text": "def Func(arg_0=1.0, arg_1=0.0, arg_2=1.0, arg_3=10.0, arg_4=1e-8, arg_5=0,\n arg_6=600, arg_7=0.0, arg_8=1e-8, arg_9=1e-8, arg_10=False,\n arg_11='None', arg_12='bdf', arg_13=100, arg_14=False):\n \"\"\"\n Example program integrating an IVP problem of van der Pol oscillator\n \"\"\"\n arg_15, arg_16 = get_f_and_j(arg_2)\n if arg_5 > 1:\n arg_17 = np.linspace(arg_7, arg_3, arg_5)\n arg_18, arg_19 = integrate_predefined(\n arg_15, arg_16, [arg_0, arg_1], arg_17, arg_4, arg_8, arg_9, arg_6=arg_6,\n check_indexing=False, arg_12=arg_12)\n else:\n arg_17, arg_18, arg_19 = integrate_adaptive(\n arg_15, arg_16, [arg_0, arg_1], arg_7, arg_3, arg_4, arg_8, arg_9, arg_6=arg_6,\n check_indexing=False, arg_12=arg_12) # dfdt[:] also for len == 1\n if arg_14:\n print(arg_19)\n if arg_10:\n import matplotlib.pyplot as plt\n plt.plot(arg_17, arg_18[:, 1], 'g--')\n plt.plot(arg_17, arg_18[:, 0], 'k-', linewidth=2)\n if arg_11 == 'None':\n plt.show()\n else:\n plt.savefig(arg_11, arg_13=arg_13)"} +{"_id": "doc_8936", "title": "", "text": "def Func(arg_0):\n \"\"\"Func the drop box\n\n You need to call this method before starting putting packages.\n\n Returns\n -------\n None\n\n \"\"\"\n\n arg_0.workingArea.Func()\n arg_0.runid_pkgidx_map = { }\n arg_0.runid_to_return = deque()"} +{"_id": "doc_8937", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Func a task\n\n This method places a task in the working area and have the\n dispatcher execute it.\n\n If you need to Func multiple tasks, it can be much faster to\n use `Func_multiple()` than to use this method multiple times\n depending of the dispatcher.\n\n Parameters\n ----------\n package : callable\n A task\n\n Returns\n -------\n int\n A package index assigned by the working area\n\n \"\"\"\n\n arg_2 = arg_0.workingArea.Func_package(arg_1)\n\n arg_3 = logging.getLogger(__name__)\n arg_3.info('submitting {}'.format(arg_0.workingArea.package_relpath(arg_2)))\n\n arg_4 = arg_0.dispatcher.run(arg_0.workingArea, arg_2)\n arg_0.runid_pkgidx_map[arg_4] = arg_2\n return arg_2"} +{"_id": "doc_8938", "title": "", "text": "def Func(arg_0):\n \"\"\"return pairs of package indices and results of finished tasks\n\n This method does not wait for tasks to finish.\n\n Returns\n -------\n list\n A list of pairs of package indices and results\n\n \"\"\"\n\n arg_0.runid_to_return.extend(arg_0.dispatcher.Func())\n arg_1 = arg_0._collect_all_finished_pkgidx_result_pairs()\n return arg_1"} +{"_id": "doc_8939", "title": "", "text": "def Func(arg_0):\n \"\"\"return a pair of a package index and result of a task\n\n This method waits until a tasks finishes. It returns `None` if\n no task is running.\n\n Returns\n -------\n tuple or None\n A pair of a package index and result. `None` if no tasks\n is running.\n\n \"\"\"\n\n if not arg_0.runid_pkgidx_map:\n return None\n\n while True:\n\n if not arg_0.runid_to_return:\n arg_0.runid_to_return.extend(arg_0.dispatcher.poll())\n\n arg_1 = arg_0._collect_next_finished_pkgidx_result_pair()\n\n if arg_1 is not None:\n break\n\n if arg_0.runid_pkgidx_map:\n time.sleep(arg_0.sleep)\n\n return arg_1"} +{"_id": "doc_8940", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"run the event loops in the background.\n\n Args:\n eventLoops (list): a list of event loops to run\n\n \"\"\"\n\n arg_0.nruns += len(arg_1)\n return arg_0.communicationChannel.put_multiple(arg_1)"} +{"_id": "doc_8941", "title": "", "text": "def Func(arg_0):\n \"\"\"Return a pair of a run id and a result.\n\n This method waits until an event loop finishes.\n This method returns None if no loop is running.\n \"\"\"\n if arg_0.nruns == 0:\n return None\n arg_1 = arg_0.communicationChannel.Func()\n if arg_1 is not None:\n arg_0.nruns -= 1\n return arg_1"} +{"_id": "doc_8942", "title": "", "text": "def Func(arg_0):\n \"\"\"wait until all event loops Func and returns the results.\n\n \"\"\"\n\n arg_1 = arg_0.communicationChannel.receive()\n\n if arg_0.nruns != len(arg_1):\n import logging\n arg_2 = logging.getLogger(__name__)\n # logger.setLevel(logging.DEBUG)\n arg_2.warning(\n 'too few results received: {} results received, {} expected'.format(\n len(arg_1),\n arg_0.nruns\n ))\n\n return arg_1"} +{"_id": "doc_8943", "title": "", "text": "def Func(arg_0, arg_1=arg_2('nan')):\n \"\"\"Convert ``key_vals_dict`` to `tuple_list``.\n\n Args:\n key_vals_dict (dict): The first parameter.\n fill: a value to fill missing data\n\n Returns:\n A list of tuples\n\n \"\"\"\n\n arg_3 = [ ]\n\n if not arg_0: return arg_3\n\n arg_4 = max([len(arg_6) for arg_6 in itertools.chain(*arg_0.values())])\n\n for arg_5, arg_6 in arg_0.items():\n try:\n arg_3.extend([arg_5 + tuple(arg_7) + (arg_1, )*(arg_4 - len(arg_7)) for arg_7 in arg_6])\n except TypeError:\n # assume k is not a tuple\n arg_3.extend([(arg_5, ) + tuple(arg_7) + (arg_1, )*(arg_4 - len(arg_7)) for arg_7 in arg_6])\n\n\n return arg_3"} +{"_id": "doc_8944", "title": "", "text": "def Func(arg_0):\n \"\"\"Open the working area\n\n Returns\n -------\n None\n \"\"\"\n\n arg_0.path = arg_0._prepare_dir(arg_0.topdir)\n arg_0._copy_executable(area_path=arg_0.path)\n arg_0._save_logging_levels(area_path=arg_0.path)\n arg_0._put_python_modules(modules=arg_0.python_modules, area_path=arg_0.path)"} +{"_id": "doc_8945", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Collect the result of a task\n\n Parameters\n ----------\n package_index :\n a package index\n\n Returns\n -------\n obj\n The result of the task\n\n \"\"\"\n\n arg_2 = arg_0.result_fullpath(arg_1)\n # e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009/result.p.gz'\n\n try:\n with gzip.open(arg_2, 'rb') as f:\n arg_3 = pickle.load(f)\n except Exception as e:\n arg_4 = logging.getLogger(__name__)\n arg_4.warning(e)\n return None\n\n return arg_3"} +{"_id": "doc_8946", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"Returns the relative path of the result\n\n This method returns the path to the result relative to the\n top dir of the working area. This method simply constructs the\n path based on the convention and doesn't check if the result\n actually exists.\n\n Parameters\n ----------\n package_index :\n a package index\n\n Returns\n -------\n str\n the relative path to the result\n\n \"\"\"\n\n arg_2 = 'task_{:05d}'.format(arg_1)\n # e.g., 'task_00009'\n\n arg_3 = os.path.join('results', arg_2, 'result.p.gz')\n # e.g., 'results/task_00009/result.p.gz'\n\n return arg_3"} +{"_id": "doc_8947", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"Submit multiple jobs\n\n Parameters\n ----------\n workingArea :\n A workingArea\n package_indices : list(int)\n A list of package indices\n\n Returns\n -------\n list(str)\n The list of the run IDs of the jobs\n\n \"\"\"\n\n if not arg_2:\n return [ ]\n\n arg_3 = arg_0._compose_job_desc(arg_1, arg_2)\n\n arg_4 = submit_jobs(arg_3, cwd=arg_1.path)\n\n # TODO: make configurable\n arg_5 = clusterprocids2clusterids(arg_4)\n for arg_6 in arg_5:\n change_job_priority([arg_6], 10)\n\n arg_0.clusterprocids_outstanding.extend(arg_4)\n\n return arg_4"} +{"_id": "doc_8948", "title": "", "text": "def Func(arg_0):\n \"\"\"Return the run IDs of the finished jobs\n\n Returns\n -------\n list(str)\n The list of the run IDs of the finished jobs\n\n \"\"\"\n\n arg_1 = clusterprocids2clusterids(arg_0.clusterprocids_outstanding)\n arg_2 = query_status_for(arg_1)\n # e.g., [['1730126.0', 2], ['1730127.0', 2], ['1730129.1', 1], ['1730130.0', 1]]\n\n if arg_2:\n arg_3, arg_4 = zip(*arg_2)\n else:\n arg_3, arg_4 = (), ()\n\n arg_5 = [i for i in arg_0.clusterprocids_outstanding if i not in arg_3]\n arg_0.clusterprocids_finished.extend(arg_5)\n arg_0.clusterprocids_outstanding[:] = arg_3\n\n # logging\n arg_7 = collections.Counter(arg_4)\n arg_8 = [ ]\n if arg_7:\n arg_8.append(', '.join(['{}: {}'.format(HTCONDOR_JOBSTATUS[arg_9], arg_7[arg_9]) for arg_9 in arg_7.keys()]))\n if arg_0.clusterprocids_finished:\n arg_8.append('Finished {}'.format(len(arg_0.clusterprocids_finished)))\n arg_10 = logging.getLogger(__name__)\n arg_10.info(', '.join(arg_8))\n\n return arg_5"} +{"_id": "doc_8949", "title": "", "text": "def Func(arg_0):\n \"\"\"Wait until all jobs finish and return the run IDs of the finished jobs\n\n Returns\n -------\n list(str)\n The list of the run IDs of the finished jobs\n\n \"\"\"\n\n arg_1 = 5\n while True:\n if arg_0.clusterprocids_outstanding:\n arg_0.poll()\n if not arg_0.clusterprocids_outstanding:\n break\n time.sleep(arg_1)\n return arg_0.clusterprocids_finished"} +{"_id": "doc_8950", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"Func a task and its arguments\n\n If you need to Func multiple tasks, it can be faster to Func\n multiple tasks with `Func_multiple()` than to use this method\n multiple times.\n\n Parameters\n ----------\n task : a function\n A function to be executed\n args : list\n A list of positional arguments to the `task`\n kwargs : dict\n A dict with keyword arguments to the `task`\n\n Returns\n -------\n int, str, or any hashable and sortable\n A task ID. IDs are sortable in the order in which the\n corresponding tasks are Func.\n\n \"\"\"\n if not arg_0.isopen:\n arg_4 = logging.getLogger(__name__)\n arg_4.warning('the drop box is not open')\n return\n arg_5 = TaskPackage(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)\n return arg_0.dropbox.Func(arg_5)"} +{"_id": "doc_8951", "title": "", "text": "def Func(arg_0):\n \"\"\"return a list of pairs of IDs and results of finished tasks.\n\n This method doesn't wait for tasks to finish. It returns IDs\n and results which have already finished.\n\n Returns\n -------\n list\n A list of pairs of IDs and results\n\n \"\"\"\n if not arg_0.isopen:\n arg_1 = logging.getLogger(__name__)\n arg_1.warning('the drop box is not open')\n return\n return arg_0.dropbox.poll()"} +{"_id": "doc_8952", "title": "", "text": "def Func(arg_0):\n \"\"\"return a pair of an ID and a result of a task.\n\n This method waits for a task to finish.\n\n Returns\n -------\n An ID and a result of a task. `None` if no task is running.\n\n \"\"\"\n if not arg_0.isopen:\n arg_1 = logging.getLogger(__name__)\n arg_1.warning('the drop box is not open')\n return\n return arg_0.dropbox.Func()"} +{"_id": "doc_8953", "title": "", "text": "def Func(arg_0):\n \"\"\"return a list of pairs of IDs and results of all tasks.\n\n This method waits for all tasks to finish.\n\n Returns\n -------\n list\n A list of pairs of IDs and results\n\n \"\"\"\n if not arg_0.isopen:\n arg_1 = logging.getLogger(__name__)\n arg_1.warning('the drop box is not open')\n return\n return arg_0.dropbox.receive()"} +{"_id": "doc_8954", "title": "", "text": "def Func(arg_0):\n \"\"\"return a list results of all tasks.\n\n This method waits for all tasks to finish.\n\n Returns\n -------\n list\n A list of results of the tasks. The results are sorted in\n the order in which the tasks are put.\n\n \"\"\"\n arg_1 = arg_0.Func_all()\n if arg_1 is None:\n return\n arg_2 = [r for _, r in arg_1]\n return arg_2"} +{"_id": "doc_8955", "title": "", "text": "def Func(arg_0, arg_1={ }, arg_2={ }):\n \"\"\"expand a path config\n\n Args:\n path_cfg (str, tuple, dict): a config for path\n alias_dict (dict): a dict for aliases\n overriding_kargs (dict): to be used for recursive call\n \"\"\"\n\n if isinstance(arg_0, str):\n return _expand_str(arg_0, arg_1, arg_2)\n\n if isinstance(arg_0, dict):\n return _expand_dict(arg_0, arg_1)\n\n # assume tuple or list\n return _expand_tuple(arg_0, arg_1, arg_2)"} +{"_id": "doc_8956", "title": "", "text": "def Func(arg_0):\n \"\"\"check if the jobs are running and return a list of pids for\n finished jobs\n\n \"\"\"\n arg_1 = [p for p in arg_0.running_procs if p.Func() is not None]\n arg_0.running_procs = collections.deque([p for p in arg_0.running_procs if p not in arg_1])\n\n for arg_3 in arg_1:\n arg_4, arg_5 = arg_3.communicate()\n ## proc.communicate() returns (stdout, stderr) when\n ## self.pipe = True. Otherwise they are (None, None)\n\n arg_6 = [p.pid for p in arg_1]\n arg_0.finished_pids.extend(arg_6)\n\n arg_7 = logging.getLogger(__name__)\n arg_8 = 'Running: {}, Finished: {}'.format(len(arg_0.running_procs), len(arg_0.finished_pids))\n arg_7.info(arg_8)\n\n return arg_6"} +{"_id": "doc_8957", "title": "", "text": "def Func(arg_0):\n \"\"\"Func until all jobs finish and return a list of pids\n \"\"\"\n arg_1 = [ ]\n while arg_0.running_procs:\n arg_1.extend(arg_0.poll())\n return arg_1"} +{"_id": "doc_8958", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"return the ROOT.vector object for the branch.\n\n \"\"\"\n\n if (arg_1, arg_2) in arg_0.__class__.addressDict:\n return arg_0.__class__.addressDict[(arg_1, arg_2)]\n\n arg_3 = arg_0._Func(arg_1, arg_2)\n arg_0.__class__.addressDict[(arg_1, arg_2)] = arg_3\n\n return arg_3"} +{"_id": "doc_8959", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Ensure all config-time files have been generated. Return a\n dictionary of generated items.\n '''\n arg_3 = {}\n\n arg_4 = arg_0.buildroot\n\n # only dependencies which are actually valid can contribute to the\n # config data (which includes the versions of all dependencies in its\n # build info) if the dependencies aren't available we can't tell what\n # version they are. Anything missing here should always be a test\n # dependency that isn't going to be used, otherwise the yotta build\n # command will fail before we get here\n arg_5 = OrderedDict((k, v) for k, v in arg_2.items() if v)\n\n arg_0.set_toplevel_definitions = ''\n if arg_0.build_info_include_file is None:\n arg_0.build_info_include_file, arg_8 = arg_0.getBuildInfo(arg_1.path, arg_4)\n arg_0.set_toplevel_definitions += arg_8\n\n if arg_0.config_include_file is None:\n arg_0.config_include_file, arg_10, arg_0.config_json_file = arg_0._getConfigData(arg_5, arg_1, arg_4, arg_0.build_info_include_file)\n arg_0.set_toplevel_definitions += arg_10\n\n arg_0.Funcd = True\n return {\n 'merged_config_include': arg_0.config_include_file,\n 'merged_config_json': arg_0.config_json_file,\n 'build_info_include': arg_0.build_info_include_file\n }"} +{"_id": "doc_8960", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n '''unpack the specified tarball url into the specified directory'''\n\n try:\n access_common.unpackFromCache(arg_2, arg_1)\n except KeyError as e:\n arg_4 = settings.getProperty('github', 'authtoken')\n arg_5 = {}\n if arg_4 is not None:\n arg_5['Authorization'] = 'token ' + str(arg_4)\n\n logger.debug('GET %s', arg_0)\n arg_6 = requests.get(arg_0, allow_redirects=True, stream=True, arg_5=arg_5)\n arg_6.raise_for_status()\n\n logger.debug('getting file: %s', arg_0)\n logger.debug('headers: %s', arg_6.headers)\n arg_6.raise_for_status()\n\n # github doesn't exposes hashes of the archives being downloaded as far\n # as I can tell :(\n access_common.unpackTarballStream(\n stream = arg_6,\n arg_1 = arg_1,\n hash = {},\n arg_2 = arg_2,\n arg_3 = arg_3\n )"} +{"_id": "doc_8961", "title": "", "text": "def Func(arg_0):\n ''' return a list of Version objects, each with a tarball URL set '''\n arg_1 = []\n for arg_2 in arg_0._getTags():\n logger.debug(\"available version tag: %s\", arg_2)\n # ignore empty tags:\n if not len(arg_2[0].strip()):\n continue\n try:\n arg_1.append(GithubComponentVersion(arg_2[0], arg_2[0], url=arg_2[1], name=arg_0.name, cache_key=None))\n except ValueError:\n logger.debug('invalid version tag: %s', arg_2)\n\n return arg_1"} +{"_id": "doc_8962", "title": "", "text": "def Func(arg_0):\n ''' return a list of GithubComponentVersion objects for the tip of each branch\n '''\n return [\n GithubComponentVersion(\n '', arg_1[0], arg_1[1], arg_0.name, cache_key=None\n ) for arg_1 in _getBranchHeads(arg_0.repo).items()\n ]"} +{"_id": "doc_8963", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n ''' Try to Func a recently published version. Return any errors that\n occur.\n '''\n arg_3 = arg_3 or Registry_Base_URL\n\n arg_4 = '%s/%s/%s/versions/%s' % (\n arg_3,\n arg_0,\n arg_1,\n arg_2\n )\n\n arg_5 = _headersForRegistry(arg_3)\n arg_6 = requests.delete(arg_4, arg_5=arg_5)\n arg_6.raise_for_status()\n\n return None"} +{"_id": "doc_8964", "title": "", "text": "def Func(arg_0, arg_1):\n '''' Read a list of files. Their configuration values are merged, with\n preference to values from files earlier in the list.\n '''\n for arg_2 in arg_1:\n try:\n arg_0.configs[arg_2] = ordered_json.load(arg_2)\n except IOError:\n arg_0.configs[arg_2] = OrderedDict()\n except Exception as e:\n arg_0.configs[arg_2] = OrderedDict()\n logging.warning(\n \"Failed to Func settings file %s, it will be ignored. The error was: %s\",\n arg_2, e\n )"} +{"_id": "doc_8965", "title": "", "text": "def Func(arg_0, arg_1):\n ''' return a configuration value\n\n usage:\n Func('section.property')\n\n Note that currently array indexes are not supported. You must\n Func the whole array.\n\n returns None if any path element or the property is missing\n '''\n arg_1 = _splitPath(arg_1)\n for arg_2 in arg_0.configs.values():\n arg_3 = arg_2\n for arg_4 in arg_1:\n if arg_4 in arg_3:\n arg_3 = arg_3[arg_4]\n else:\n arg_3 = None\n break\n if arg_3 is not None:\n return arg_3\n return None"} +{"_id": "doc_8966", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None):\n ''' Set a configuration value. If no filename is specified, the\n property is Func in the first configuration file. Note that if a\n filename is specified and the property path is present in an\n earlier filename then Func property will be hidden.\n\n usage:\n Func('section.property', value='somevalue')\n\n Note that currently array indexes are not supported. You must\n Func the whole array.\n '''\n if arg_3 is None:\n arg_4 = arg_0._firstConfig()[1]\n else:\n arg_4 = arg_0.configs[arg_3]\n\n arg_1 = _splitPath(arg_1)\n for arg_5 in arg_1[:-1]:\n if arg_5 in arg_4:\n arg_4 = arg_4[arg_5]\n else:\n arg_4[arg_5] = OrderedDict()\n arg_4 = arg_4[arg_5]\n arg_4[arg_1[-1]] = arg_2"} +{"_id": "doc_8967", "title": "", "text": "def Func(arg_0):\n ''' indicate whether the current item is the last one in a generator\n '''\n arg_1 = None\n arg_2 = True\n for arg_3 in arg_0:\n if not arg_2:\n yield (arg_1, False)\n arg_1 = arg_3\n arg_2 = False\n if not arg_2:\n yield (arg_1, True)"} +{"_id": "doc_8968", "title": "", "text": "def Func(arg_0, arg_1=None):\n ''' Publish to the appropriate registry, return a description of any\n errors that occured, or None if successful.\n No VCS tagging is performed.\n '''\n if (arg_1 is None) or (arg_1 == registry_access.Registry_Base_URL):\n if 'private' in arg_0.description and arg_0.description['private']:\n return \"this %s is private and cannot be Funced\" % (arg_0.description_filename.split('.')[0])\n arg_2 = os.path.join(arg_0.path, 'upload.tar.gz')\n fsutils.rmF(arg_2)\n arg_3 = os.open(arg_2, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, \"O_BINARY\", 0))\n with os.fdopen(arg_3, 'rb+') as tar_file:\n tar_file.truncate()\n arg_0.generateTarball(tar_file)\n logger.debug('generated tar file of length %s', tar_file.tell())\n tar_file.seek(0)\n # calculate the hash of the file before we upload it:\n arg_4 = hashlib.sha256()\n while True:\n arg_5 = tar_file.read(1000)\n if not arg_5:\n break\n arg_4.update(arg_5)\n logger.debug('generated tar file has hash %s', arg_4.hexdigest())\n tar_file.seek(0)\n with arg_0.findAndOpenReadme() as readme_file_wrapper:\n if not readme_file_wrapper:\n logger.warning(\"no readme.md file detected\")\n with open(arg_0.getDescriptionFile(), 'r') as description_file:\n return registry_access.Func(\n arg_0.getRegistryNamespace(),\n arg_0.getName(),\n arg_0.getVersion(),\n description_file,\n tar_file,\n readme_file_wrapper.file,\n readme_file_wrapper.extension().lower(),\n arg_1=arg_1\n )"} +{"_id": "doc_8969", "title": "", "text": "def Func(arg_0, arg_1=None):\n ''' Try to un-publish the current version. Return a description of any\n errors that occured, or None if successful.\n '''\n return registry_access.Func(\n arg_0.getRegistryNamespace(),\n arg_0.getName(),\n arg_0.getVersion(),\n arg_1=arg_1\n )"} +{"_id": "doc_8970", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Return the specified script command. If the first part of the\n command is a .py file, then the current python interpreter is\n prepended.\n\n If the script is a single string, rather than an array, it is\n shlex-split.\n '''\n arg_2 = arg_0.description.get('scripts', {}).get(arg_1, None)\n if arg_2 is not None:\n if isinstance(arg_2, str) or isinstance(arg_2, type(u'unicode string')):\n import shlex\n arg_2 = shlex.split(arg_2)\n # if the command is a python script, run it with the python\n # interpreter being used to run yotta, also fetch the absolute path\n # to the script relative to this module (so that the script can be\n # distributed with the module, no matter what current working\n # directory it will be executed in):\n if len(arg_2) and arg_2[0].lower().endswith('.py'):\n if not os.path.isabs(arg_2[0]):\n arg_3 = os.path.abspath(os.path.join(arg_0.path, arg_2[0]))\n logger.debug('rewriting script %s to be absolute path %s', arg_2[0], arg_3)\n arg_2[0] = arg_3\n import sys\n arg_2 = [sys.executable] + arg_2\n\n return arg_2"} +{"_id": "doc_8971", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n ''' Check if this module has any dependencies with the specified name\n in its dependencies list, or in target dependencies for the\n specified target\n '''\n if arg_1 in arg_0.description.get('dependencies', {}).keys():\n return True\n\n arg_4 = arg_0.description.get('targetDependencies', {})\n if arg_2 is not None:\n for arg_5, arg_6 in arg_4.items():\n if _truthyConfValue(arg_2.getConfigValue(arg_5)) or arg_5 in arg_2.getSimilarTo_Deprecated():\n if arg_1 in arg_6:\n return True\n\n if arg_3:\n if arg_1 in arg_0.description.get('testDependencies', {}).keys():\n return True\n\n if arg_2 is not None:\n arg_7 = arg_0.description.get('testTargetDependencies', {})\n for arg_5, arg_6 in arg_7.items():\n if _truthyConfValue(arg_2.getConfigValue(arg_5)) or arg_5 in arg_2.getSimilarTo_Deprecated():\n if arg_1 in arg_6:\n return True\n return False"} +{"_id": "doc_8972", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False):\n ''' Check if this module, or any of its dependencies, have a\n dependencies with the specified name in their dependencies, or in\n their targetDependencies corresponding to the specified target.\n\n Note that if recursive dependencies are not installed, this test\n may return a false-negative.\n '''\n # checking dependencies recursively isn't entirely straightforward, so\n # use the existing method to resolve them all before checking:\n arg_4 = arg_0.getDependenciesRecursive(\n arg_2 = arg_2,\n test = arg_3\n )\n return (arg_1 in arg_4)"} +{"_id": "doc_8973", "title": "", "text": "def Func(\n arg_0,\n arg_1 = None,\n arg_2 = None,\n arg_3 = False,\n arg_4 = False,\n arg_5 = None,\n arg_6 = False\n ):\n ''' Retrieve and install all the dependencies of this component and its\n dependencies, recursively, or satisfy them from a collection of\n available_components or from disk.\n\n Returns\n =======\n (components, errors)\n\n components: dictionary of name:Component\n errors: sequence of errors\n\n Parameters\n ==========\n\n available_components:\n None (default) or a dictionary of name:component. This is\n searched before searching directories or fetching remote\n components\n\n search_dirs:\n None (default), or sequence of directories to search for\n already installed, (but not yet loaded) components. Used so\n that manually installed or linked components higher up the\n dependency tree are found by their users lower down.\n\n These directories are searched in order, and finally the\n current directory is checked.\n\n update_installed:\n False (default), True, or set(): whether to check the\n available versions of installed components, and update if a\n newer version is available. If this is a set(), only update\n things in the specified set.\n\n traverse_links:\n False (default) or True: whether to recurse into linked\n dependencies when updating/installing.\n\n target:\n None (default), or a Target object. If specified the target\n name and it's similarTo list will be used in resolving\n dependencies. If None, then only target-independent\n dependencies will be installed\n\n test:\n True, False, or 'toplevel: should test-only dependencies be\n installed? (yes, no, or only for this module, not its\n dependencies).\n\n '''\n def provider(\n arg_7,\n arg_1,\n arg_2,\n arg_8,\n arg_3,\n arg_9=None\n ):\n arg_10 = access.satisfyFromAvailable(arg_7.name, arg_1)\n if arg_10:\n if arg_10.isTestDependency() and not arg_7.is_test_dependency:\n logger.debug('test dependency subsequently occurred as real dependency: %s', arg_10.getName())\n arg_10.setTestDependency(False)\n return arg_10\n arg_11 = False\n if arg_3 is True:\n arg_11 = True\n elif arg_3:\n arg_11 = arg_7.name in arg_3\n arg_10 = access.satisfyVersionFromSearchPaths(\n arg_7.name,\n arg_7.versionReq(),\n arg_2,\n arg_11,\n inherit_shrinkwrap = arg_9.getShrinkwrap()\n )\n if arg_10:\n arg_10.setTestDependency(arg_7.is_test_dependency)\n return arg_10\n # before resorting to install this module, check if we have an\n # existing linked module (which wasn't picked up because it didn't\n # match the version specification) - if we do, then we shouldn't\n # try to install, but should return that anyway:\n arg_12 = os.path.join(arg_0.modulesPath(), arg_7.name)\n if fsutils.isLink(arg_12):\n arg_10 = Component(\n arg_12,\n test_dependency = arg_7.is_test_dependency,\n installed_linked = fsutils.isLink(arg_12),\n inherit_shrinkwrap = arg_9.getShrinkwrap()\n )\n if arg_10:\n assert(arg_10.installedLinked())\n return arg_10\n else:\n logger.error('linked module %s is invalid: %s', arg_7.name, arg_10.getError())\n return arg_10\n\n arg_10 = access.satisfyVersionByInstalling(\n arg_7.name,\n arg_7.versionReq(),\n arg_0.modulesPath(),\n inherit_shrinkwrap = arg_9.getShrinkwrap()\n )\n if not arg_10:\n logger.error('could not install %s' % arg_7.name)\n if arg_10 is not None:\n arg_10.setTestDependency(arg_7.is_test_dependency)\n return arg_10\n\n return arg_0.__getDependenciesRecursiveWithProvider(\n arg_1 = arg_1,\n arg_2 = arg_2,\n arg_5 = arg_5,\n arg_4 = arg_4,\n arg_3 = arg_3,\n provider = provider,\n arg_6 = arg_6\n )"} +{"_id": "doc_8974", "title": "", "text": "def Func(arg_0):\n ''' Some components must export whole directories full of headers into\n the search path. This is really really bad, and they shouldn't do\n it, but support is provided as a concession to compatibility.\n '''\n if 'extraIncludes' in arg_0.description:\n return [os.path.normpath(arg_1) for arg_1 in arg_0.description['extraIncludes']]\n else:\n return []"} +{"_id": "doc_8975", "title": "", "text": "def Func(*arg_0):\n ''' merge dictionaries of dictionaries recursively, with elements from\n dictionaries earlier in the argument sequence taking precedence\n '''\n # to support merging of OrderedDicts, copy the result type from the first\n # argument:\n arg_1 = type(arg_0[0])()\n for arg_2, arg_3 in itertools.chain(*[x.items() for x in arg_0]):\n if not arg_2 in arg_1:\n arg_1[arg_2] = arg_3\n elif isinstance(arg_1[arg_2], dict) and isinstance(arg_3, dict):\n arg_1[arg_2] = Func(arg_1[arg_2], arg_3)\n return arg_1"} +{"_id": "doc_8976", "title": "", "text": "def Func(arg_0, arg_1):\n ''' create a new nested dictionary object with the same structure as\n 'dictionary', but with all scalar values replaced with 'value'\n '''\n arg_2 = type(arg_0)()\n for arg_3 in arg_0.keys():\n if isinstance(arg_0[arg_3], dict):\n arg_2[arg_3] = Func(arg_0[arg_3], arg_1)\n else:\n arg_2[arg_3] = arg_1\n return arg_2"} +{"_id": "doc_8977", "title": "", "text": "def Func(arg_0):\n ''' returns pack.DependencySpec for the base target of this target (or\n None if this target does not inherit from another target.\n '''\n arg_1 = arg_0.description.get('inherits', {})\n if len(arg_1) == 1:\n arg_2, arg_3 = list(arg_1.items())[0]\n arg_4 = arg_0.getShrinkwrapMapping('targets').get(arg_2, None)\n if arg_4 is not None:\n logger.debug(\n 'respecting shrinkwrap version %s for %s', arg_4, arg_2\n )\n return pack.DependencySpec(\n arg_2,\n arg_3,\n arg_4 = arg_4\n )\n elif len(arg_1) > 1:\n logger.error('target %s specifies multiple base targets, but only one is allowed', arg_0.getName())\n return None"} +{"_id": "doc_8978", "title": "", "text": "def Func(arg_0, arg_1):\n ''' Return true if this target inherits from the named target (directly\n or indirectly. Also returns true if this target is the named\n target. Otherwise return false.\n '''\n for arg_2 in arg_0.hierarchy:\n if arg_2 and arg_2.getName() == arg_1 or arg_1 in arg_2.description.get('inherits', {}):\n return True\n return False"} +{"_id": "doc_8979", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n ''' Execute the given command, returning an error message if an error occured\n or None if the command was succesful.'''\n try:\n arg_3 = subprocess.Popen(arg_1, cwd=arg_2)\n arg_3.wait()\n except OSError as e:\n if e.errno == errno.ENOENT:\n if arg_1[0] == 'cmake':\n return 'CMake is not installed, please follow the installation instructions at http://docs.yottabuild.org/#installing'\n else:\n return '%s is not installed' % (arg_1[0])\n else:\n return 'command %s failed' % (arg_1)\n if arg_3.returncode:\n return 'command %s failed' % (arg_1)"} +{"_id": "doc_8980", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=False, arg_5=None, arg_6=None,\n arg_7=False):\n ''' Execute the commands necessary to Func this component, and all of\n its dependencies. '''\n if arg_5 is None:\n arg_5 = []\n if arg_6 is None:\n arg_6 = []\n # in the future this may be specified in the target description, but\n # for now we only support cmake, so everything is simple:\n if arg_7:\n arg_8 = 'Release'\n elif arg_4:\n arg_8 = 'RelWithDebInfo'\n else:\n arg_8 = 'Debug'\n arg_9 = ['cmake', '-D', 'CMAKE_BUILD_TYPE=%s' % arg_8, '-G', arg_3.cmake_generator, '.']\n arg_10 = arg_0.exec_helper(arg_9, arg_1)\n if arg_10 is not None:\n return arg_10\n\n # work-around various yotta-specific issues with the generated\n # Ninja/project files:\n from yotta.lib import cmake_fixups\n cmake_fixups.applyFixupsForFenerator(arg_3.cmake_generator, arg_1, arg_2)\n\n arg_11 = arg_0.overrideBuildCommand(arg_3.cmake_generator, arg_6=arg_6)\n if arg_11:\n arg_9 = arg_11 + arg_5\n else:\n arg_9 = ['cmake', '--Func', arg_1]\n if len(arg_6):\n # !!! FIXME: support multiple targets with the default CMake\n # Func command\n arg_9 += ['--target', arg_6[0]]\n arg_9 += arg_5\n arg_10 = arg_0.exec_helper(arg_9, arg_1)\n if arg_10 is not None:\n return arg_10\n arg_12 = arg_0.hintForCMakeGenerator(arg_3.cmake_generator, arg_2)\n if arg_12:\n logger.info(arg_12)"} +{"_id": "doc_8981", "title": "", "text": "def Func(arg_0):\n ''' return decorator to prune cache after calling fn with a probability of p'''\n def decorator(arg_1):\n @functools.wraps(arg_1)\n def wrapped(*arg_2, **arg_3):\n arg_4 = arg_1(*arg_2, **arg_3)\n if random.random() < arg_0:\n pruneCache()\n return arg_4\n return wrapped\n return decorator"} +{"_id": "doc_8982", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Calibrate noisy variance estimates with empirical Bayes.\n\n Parameters\n ----------\n vars: ndarray\n List of variance estimates.\n sigma2: int\n Estimate of the Monte Carlo noise in vars.\n\n Returns\n -------\n An array of the calibrated variance estimates\n \"\"\"\n if (arg_1 <= 0 or min(arg_0) == max(arg_0)):\n return(np.maximum(arg_0, 0))\n arg_2 = np.sqrt(arg_1)\n arg_3 = gfit(arg_0, arg_2)\n # Set up a partial execution of the function\n arg_4 = functools.partial(gbayes, g_est=arg_3,\n arg_2=arg_2)\n if len(arg_0) >= 200:\n # Interpolate to speed up computations:\n arg_5 = np.percentile(arg_0,\n np.arange(0, 102, 2))\n arg_6 = list(map(arg_4, arg_5))\n arg_7 = np.interp(arg_0, arg_5, arg_6)\n else:\n arg_7 = list(map(arg_4, arg_0))\n\n return np.asarray(arg_7)"} +{"_id": "doc_8983", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Derive samples used to create trees in scikit-learn RandomForest objects.\n\n Recovers the samples in each tree from the random state of that tree using\n :func:`forest._generate_sample_indices`.\n\n Parameters\n ----------\n n_samples : int\n The number of samples used to fit the scikit-learn RandomForest object.\n\n forest : RandomForest\n Regressor or Classifier object that is already fit by scikit-learn.\n\n Returns\n -------\n Array that records how many times a data point was placed in a tree.\n Columns are individual trees. Rows are the number of times a sample was\n used in a tree.\n \"\"\"\n\n if not arg_1.bootstrap:\n arg_2 = \"Cannot calculate the inbag from a forest that has \"\n arg_2 = \" bootstrap=False\"\n raise ValueError(arg_2)\n\n arg_3 = arg_1.n_estimators\n arg_4 = np.zeros((arg_0, arg_3))\n arg_5 = []\n for arg_6 in range(arg_3):\n arg_5.append(\n _generate_sample_indices(arg_1.estimators_[arg_6].random_state,\n arg_0))\n arg_4[:, arg_6] = np.bincount(arg_5[-1], minlength=arg_0)\n return arg_4"} +{"_id": "doc_8984", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Retrieves the number of contributors to a repo in the organization.\n Also adds to unique contributor list.\n \"\"\"\n arg_2 = 0\n for arg_3 in arg_1.iter_contributors():\n arg_2 += 1\n arg_0.unique_contributors[arg_3.id].append(arg_1.name)\n arg_0.contributors_json[arg_1.name].append(arg_3.to_json())\n return arg_2"} +{"_id": "doc_8985", "title": "", "text": "def Func(arg_0, arg_1, arg_2='llnl'):\n \"\"\"\n Retrieves the number of closed issues.\n \"\"\"\n #JSON\n arg_3 = ('../github-data/' + arg_2 + '/' + arg_1.name + '/issues')\n arg_4 = False\n if not os.path.exists(arg_3): #no previous path, get all issues\n arg_5 = arg_1.iter_issues(state='all')\n arg_4 = True\n else:\n arg_6 = os.listdir(arg_3)\n arg_7 = str(arg_6[-1][:-5])\n if arg_7 == str(datetime.date.today()):\n #most recent date is actually today, get previous most recent date\n if len(arg_6) > 2:\n arg_7 = str(arg_6[-2][:-5])\n else:\n #This means there is only one file, today. Retrieve every issue\n arg_5 = arg_1.iter_issues(state='all')\n arg_4 = True\n if not arg_4:#there's a previous saved JSON that's not today\n arg_5 = arg_1.iter_issues(since=arg_7, state='all')\n for arg_8 in arg_5:\n arg_0.issues_json[arg_1.name].append(arg_8.to_json())\n #CSV\n arg_9 = 0\n for arg_8 in arg_1.iter_issues(state='closed'):\n if arg_8 is not None:\n arg_9 += 1\n return arg_9"} +{"_id": "doc_8986", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Checks to see if the given repo has a top level LICENSE file.\n \"\"\"\n if arg_0.search_limit >= 28:\n print 'Hit search limit. Sleeping for 60 sec.'\n time.sleep(60)\n arg_0.search_limit = 0\n arg_0.search_limit += 1\n arg_3 = arg_0.logged_in_gh.search_code('license'\n + 'in:path repo:' + arg_1.full_name)\n try:\n for arg_4 in arg_3:\n arg_5 = arg_4.path[1:]\n if '/' not in arg_5 and 'license' in arg_5.lower():\n arg_0.total_licenses += 1\n return arg_5\n return 'MISS'\n except (StopIteration) as e:\n return 'MISS'"} +{"_id": "doc_8987", "title": "", "text": "def Func(arg_0, arg_1=(arg_2.date.today()),\n arg_4='llnl',arg_5={}, arg_6='',\n arg_7=False):\n \"\"\"\n Writes stats from the organization to JSON.\n \"\"\"\n arg_8 = ('../github-data/' + arg_4 + '-org/'\n + arg_6 + '/' + str(arg_1) + '.json')\n arg_0.checkDir(arg_8)\n with open(arg_8, 'w') as out_clear:#clear old data\n out_clear.close()\n with open(arg_8, 'a') as out:\n if arg_7:#used for list of items\n out.write('[')\n for arg_9 in arg_5:\n out.write(json.dumps(arg_5[arg_9], sort_keys=True,\n indent=4, separators=(',', ': ')) + ',')\n out.seek(-1, os.SEEK_END)#kill last comma\n out.truncate()\n if arg_7:\n out.write(']')\n out.close()"} +{"_id": "doc_8988", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3(arg_4.date.today()),\n arg_6='N/A', arg_7=0, arg_8=0):\n \"\"\"\n Updates the total.csv file with current data.\n \"\"\"\n\n arg_9 = os.path.isfile(arg_1)\n with open(arg_1, 'a') as out_total:\n if not arg_9:\n out_total.write('date,organization,repos,members,teams,'\n + 'unique_contributors,total_contributors,forks,'\n + 'stargazers,pull_requests,open_issues,has_readme,'\n + 'has_license,pull_requests_open,pull_requests_closed,'\n + 'commits,id,closed_issues,issues\\n')\n arg_0.delete_last_line(arg_2=arg_2, arg_1=arg_1)\n out_total.close()\n with open(arg_1, 'r') as file_read:\n arg_10 = sum(1 for row in file_read) - 1\n file_read.close()\n with open(arg_1, 'a') as out_total:\n out_total.write(arg_2 + ',' + arg_6 + ','\n + arg_3(arg_0.total_repos) + ',' + arg_3(arg_7) + ',' + arg_3(arg_8)\n + ',' + arg_3(len(arg_0.unique_contributors)) + ','\n + arg_3(arg_0.total_contributors) + ',' + arg_3(arg_0.total_forks)\n + ',' + arg_3(arg_0.total_stars) + ',' + arg_3(arg_0.total_pull_reqs)\n + ',' + arg_3(arg_0.total_open_issues) + ','\n + arg_3(arg_0.total_readmes) + ',' + arg_3(arg_0.total_licenses) + ','\n + arg_3(arg_0.total_pull_reqs_open) + ','\n + arg_3(arg_0.total_pull_reqs_closed) + ','\n + arg_3(arg_0.total_commits) + ',' + arg_3(arg_10) + ','\n + arg_3(arg_0.total_closed_issues) + ',' + arg_3(arg_0.total_issues)\n + '\\n')\n out_total.close()"} +{"_id": "doc_8989", "title": "", "text": "def Func(arg_0, arg_1='',arg_2=arg_3(arg_4.date.today())):\n \"\"\"\n Updates languages.csv file with current data.\n \"\"\"\n arg_0.remove_date(arg_1=arg_1, arg_2=arg_2)\n arg_6 = os.path.isfile(arg_1)\n with open(arg_1, 'a') as out_languages:\n if not arg_6:\n out_languages.write('date,language,count,size,size_log\\n')\n arg_7 = sorted(arg_0.languages_size)\n #self.delete_last_line(date=date, file_path=file_path)\n for arg_8 in arg_7:\n try:\n out_languages.write(arg_2 + ',' + arg_8 + ','\n + arg_3(arg_0.languages[arg_8]) + ','\n + arg_3(arg_0.languages_size[arg_8]) + ','\n + arg_3(math.log10(int(arg_0.languages_size[arg_8])))\n + '\\n')\n except (TypeError, KeyError) as e:\n out_languages.write(arg_2 + ',' + arg_8 + ','\n + arg_3(0) + ','\n + arg_3(arg_0.languages_size[arg_8]) + ','\n + arg_3(math.log10(int(arg_0.languages_size[arg_8])))\n + '\\n')"} +{"_id": "doc_8990", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Checks if a directory exists. If not, it creates one with the specified\n file_path.\n \"\"\"\n if not os.path.exists(os.path.dirname(arg_1)):\n try:\n os.makedirs(os.path.dirname(arg_1))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise"} +{"_id": "doc_8991", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3(arg_4.date.today())):\n \"\"\"\n Removes all rows of the associated date from the given csv file.\n Defaults to today.\n \"\"\"\n arg_6 = os.path.isfile(arg_1)\n if arg_6:\n with open(arg_1, 'rb') as inp, open('temp.csv', 'wb') as out:\n arg_7 = csv.writer(out)\n for arg_8 in csv.reader(inp):\n if arg_8[0] != arg_2:\n arg_7.writerow(arg_8)\n inp.close()\n out.close()\n os.remove(arg_1)\n os.rename(\"temp.csv\",arg_1)"} +{"_id": "doc_8992", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Create a github3.py session for a GitHub Enterprise instance\n\n If token is not provided, will attempt to use the GITHUB_API_TOKEN\n environment variable if present.\n \"\"\"\n\n arg_2 = github3.enterprise_login(arg_0=arg_0, arg_1=arg_1)\n\n if arg_2 is None:\n arg_3 = 'Unable to connect to GitHub Enterprise (%s) with provided token.'\n raise RuntimeError(arg_3, arg_0)\n\n return arg_2"} +{"_id": "doc_8993", "title": "", "text": "def Func(arg_0, arg_1=250, arg_2=15):\n \"\"\"\n Simplified check for API limits\n\n If necessary, spin in place waiting for API to reset before returning.\n\n See: https://developer.github.com/v3/#rate-limiting\n \"\"\"\n arg_3 = arg_0.rate_limit()\n\n arg_4 = arg_3['rate']['remaining']\n arg_5 = arg_3['rate']['reset']\n logger.debug('Rate Limit - %d requests remaining', arg_4)\n\n if arg_4 > arg_1:\n return\n\n arg_6 = time.time()\n arg_7 = int(arg_5 - arg_6)\n logger.warn('Rate Limit Depleted - Sleeping for %d seconds', arg_7)\n\n while arg_6 < arg_5:\n time.sleep(10)\n arg_6 = time.time()\n\n return"} +{"_id": "doc_8994", "title": "", "text": "def Func(arg_0='https://github.com', arg_1=None):\n \"\"\"\n Create a GitHub session for making requests\n \"\"\"\n\n arg_2 = None\n if arg_0 == 'https://github.com':\n arg_2 = create_session(arg_1)\n else:\n arg_2 = create_enterprise_session(arg_0, arg_1)\n\n if arg_2 is None:\n arg_3 = 'Unable to Func to (%s) with provided token.'\n raise RuntimeError(arg_3, arg_0)\n\n logger.info('Connected to: %s', arg_0)\n\n return arg_2"} +{"_id": "doc_8995", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=True):\n \"\"\"\n Yields GitHub3.py repo objects for provided orgs and repo names\n\n If orgs and repos are BOTH empty, execute special mode of getting ALL\n repositories from the GitHub Server.\n\n If public_only is True, will return only those repos that are marked as\n public. Set this to false to return all organizations that the session has\n permissions to access.\n \"\"\"\n\n if arg_1 is None:\n arg_1 = []\n if arg_2 is None:\n arg_2 = []\n if arg_3:\n arg_4 = 'public'\n else:\n arg_4 = 'all'\n\n _check_api_limits(arg_0, 10)\n\n for arg_5 in arg_1:\n arg_6 = arg_0.organization(arg_5)\n arg_7 = arg_6.public_repos_count\n\n _check_api_limits(arg_0, _num_requests_needed(arg_7))\n\n for arg_8 in arg_6.repositories(type=arg_4):\n _check_api_limits(arg_0, 10)\n yield arg_8\n\n for arg_9 in arg_2:\n _check_api_limits(arg_0, 10)\n arg_6, arg_10 = arg_9.split('/')\n yield arg_0.repository(arg_6, arg_10)\n\n if not (arg_1 or arg_2):\n for arg_8 in arg_0.all_repositories():\n yield arg_8"} +{"_id": "doc_8996", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Retrieves an organization via given org name. If given\n empty string, prompts user for an org name.\n \"\"\"\n arg_0.organization_name = arg_1\n if(arg_1 == ''):\n arg_0.organization_name = raw_input('Organization: ')\n print 'Getting organization.'\n arg_0.org_retrieved = arg_0.logged_in_gh.organization(arg_1)"} +{"_id": "doc_8997", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"\n Create CodeGovProject object from GitLab Repository\n \"\"\"\n if not isinstance(arg_1, gitlab.v4.objects.Project):\n raise TypeError('Repository must be a gitlab Repository object')\n\n arg_3 = arg_0()\n\n logger.debug(\n 'GitLab: repository_id=%d path_with_namespace=%s',\n arg_1.id,\n arg_1.path_with_namespace,\n )\n\n # -- REQUIRED FIELDS --\n\n arg_3['name'] = arg_1.name\n arg_3['repositoryURL'] = arg_1.http_url_to_repo\n arg_3['description'] = arg_1.description\n\n # TODO: Update licenses from GitLab API\n arg_3['permissions']['licenses'] = None\n\n arg_4 = arg_1.web_url\n arg_5 = arg_4.startswith('https://gitlab.com')\n\n if arg_1.visibility in ('public') and arg_5:\n arg_3['permissions']['usageType'] = 'openSource'\n elif date_parse(arg_1.created_at) < POLICY_START_DATE:\n arg_3['permissions']['usageType'] = 'exemptByPolicyDate'\n\n if arg_2:\n arg_3['laborHours'] = labor_hours_from_url(arg_3['repositoryURL'])\n else:\n arg_3['laborHours'] = 0\n\n arg_3['tags'] = ['gitlab'] + arg_1.tag_list\n\n arg_3['contact'] = {\n 'email': '',\n 'URL': arg_4,\n }\n\n # -- OPTIONAL FIELDS --\n\n # project['version'] = ''\n\n arg_3['organization'] = arg_1.namespace['name']\n\n # TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370\n arg_3['status'] = 'Development'\n\n arg_3['vcs'] = 'git'\n\n arg_3['homepageURL'] = arg_1.web_url\n\n arg_6 = arg_1.manager.gitlab._url\n arg_7 = '/projects/%s/repository/archive' % arg_1.get_id()\n arg_3['downloadURL'] = arg_6 + arg_7\n\n # project['languages'] = [l for l, _ in repository.languages()]\n # project['partners'] = []\n # project['relatedCode'] = []\n # project['reusedCode'] = []\n\n arg_3['date'] = {\n 'created': date_parse(arg_1.created_at).date().isoformat(),\n 'lastModified': date_parse(arg_1.last_activity_at).date().isoformat(),\n 'metadataLastUpdated': '',\n }\n\n _prune_dict_null_str(arg_3)\n\n return arg_3"} +{"_id": "doc_8998", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Create CodeGovProject object from DOE CODE record\n\n Handles crafting Code.gov Project\n \"\"\"\n if not isinstance(arg_1, dict):\n raise TypeError('`record` must be a dict')\n\n arg_2 = arg_0()\n\n # -- REQUIRED FIELDS --\n\n arg_2['name'] = arg_1['software_title']\n logger.debug('DOE CODE: software_title=\"%s\"', arg_1['software_title'])\n\n arg_3 = arg_1.get('repository_link', '')\n if not arg_3:\n arg_3 = arg_1.get('landing_page')\n logger.warning('DOE CODE: No repositoryURL, using landing_page: %s', arg_3)\n\n arg_2['repositoryURL'] = arg_3\n\n arg_2['description'] = arg_1['description']\n\n arg_4 = set(arg_1['licenses'])\n arg_4.discard(None)\n logger.debug('DOE CODE: licenses=%s', arg_4)\n\n arg_5 = []\n if 'Other' in arg_4:\n arg_4.remove('Other')\n arg_5 = [{\n 'name': 'Other',\n 'URL': arg_1['proprietary_url']\n }]\n\n if arg_4:\n arg_5.extend([_license_obj(arg_6) for arg_6 in arg_4])\n\n arg_2['permissions']['licenses'] = arg_5\n\n if arg_1['open_source']:\n arg_7 = 'openSource'\n else:\n arg_7 = 'exemptByLaw'\n arg_2['permissions']['exemptionText'] = 'This source code is restricted by patent and / or intellectual property law.'\n\n arg_2['permissions']['usageType'] = arg_7\n\n # TODO: Compute from git repo\n arg_2['laborHours'] = 0\n\n arg_2['tags'] = ['DOE CODE']\n arg_8 = arg_1.get('lab_display_name')\n if arg_8 is not None:\n arg_2['tags'].append(arg_8)\n\n arg_2['contact']['email'] = arg_1['owner']\n # project['contact']['URL'] = ''\n # project['contact']['name'] = ''\n # project['contact']['phone'] = ''\n\n # -- OPTIONAL FIELDS --\n\n if 'version_number' in arg_1 and arg_1['version_number']:\n arg_2['version'] = arg_1['version_number']\n\n if arg_8 is not None:\n arg_2['organization'] = arg_8\n\n # Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370\n arg_9 = arg_1.get('ever_announced')\n if arg_9 is None:\n raise ValueError('DOE CODE: Unable to determine \"ever_announced\" value!')\n elif arg_9:\n arg_9 = 'Production'\n else:\n arg_9 = 'Development'\n\n arg_2['status'] = arg_9\n\n arg_10 = None\n arg_3 = arg_2['repositoryURL']\n if 'github.com' in arg_3:\n arg_10 = 'git'\n if arg_10 is None:\n logger.debug('DOE CODE: Unable to determine vcs for: name=\"%s\", repositoryURL=%s', arg_2['name'], arg_3)\n arg_10 = ''\n if arg_10:\n arg_2['vcs'] = arg_10\n\n arg_11 = arg_1.get('landing_page', '')\n if arg_11:\n arg_2['homepageURL'] = arg_11\n\n # record['downloadURL'] = ''\n\n # self['disclaimerText'] = ''\n\n # self['disclaimerURL'] = ''\n\n if 'programming_languages' in arg_1:\n arg_2['languages'] = arg_1['programming_languages']\n\n # self['partners'] = []\n # TODO: Look into using record['contributing_organizations']\n\n # self['relatedCode'] = []\n\n # self['reusedCode'] = []\n\n # date: [object] A date object describing the release.\n # created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.\n # lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.\n # metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.\n if 'date_record_added' in arg_1 and 'date_record_updated' in arg_1:\n arg_2['date'] = {\n 'created': arg_1['date_record_added'],\n # 'lastModified': '',\n 'metadataLastUpdated': arg_1['date_record_updated']\n }\n\n return arg_2"} +{"_id": "doc_8999", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the traffic for the repositories of the given organization.\n \"\"\"\n print 'Getting traffic.'\n #Uses the developer API. Note this could change.\n arg_1 = {'Accept': 'application/vnd.github.spiderman-preview', 'Authorization': 'token ' + arg_0.token}\n arg_2 = {'Authorization': 'token ' + arg_0.token}\n for arg_3 in arg_0.org_retrieved.iter_repos(type='public'):\n arg_4 = ('https://api.github.com/repos/' + arg_0.organization_name\n + '/' + arg_3.name)\n arg_0.get_referrers(arg_4=arg_4, arg_1=arg_1, repo_name=arg_3.name)\n arg_0.get_paths(arg_4=arg_4, arg_1=arg_1)\n arg_0.get_data(arg_4=arg_4, arg_1=arg_1, dict_to_store=arg_0.views,\n type='views', repo_name=arg_3.name)\n arg_0.get_data(arg_4=arg_4, arg_1=arg_1, dict_to_store=arg_0.clones,\n type='clones', repo_name=arg_3.name)\n arg_0.get_releases(arg_4=arg_4, arg_1=arg_2, repo_name=arg_3.name)"} +{"_id": "doc_9000", "title": "", "text": "def Func(arg_0, arg_1='', arg_2={}, arg_3=''):\n \"\"\"\n Retrieves the total referrers and unique referrers of all repos in json\n and then stores it in a dict.\n \"\"\"\n #JSON\n arg_4 = (arg_1 + '/traffic/popular/referrers')\n arg_5 = requests.get(arg_4, arg_2=arg_2)\n arg_6 = arg_5.json()\n arg_0.referrers_json[arg_3] = arg_6\n #CSV\n for arg_7 in arg_6:\n arg_8 = arg_7['referrer']\n try:\n arg_9 = (arg_7['count'], arg_7['uniques'])#curr vals\n arg_10 = (arg_0.referrers[arg_8][0] + arg_9[0],#cal new vals\n arg_0.referrers[arg_8][1] + arg_9[1])\n arg_0.referrers[arg_8] = arg_10#record new vals\n except KeyError:\n arg_10 = arg_0.referrers[arg_8] = (arg_7['count'],\n arg_7['uniques'])\n arg_0.referrers_lower[arg_8.lower()] = arg_8"} +{"_id": "doc_9001", "title": "", "text": "def Func(arg_0, arg_1='',arg_2={}, arg_3=arg_4(arg_5.date.today()),\n arg_7={}, arg_8='', arg_9=''):\n \"\"\"\n Retrieves data from json and stores it in the supplied dict. Accepts\n 'clones' or 'views' as type.\n \"\"\"\n #JSON\n arg_1 = (arg_1 + '/traffic/' + arg_8)\n arg_10 = requests.get(arg_1, arg_2=arg_2)\n arg_11 = arg_10.json()\n if arg_8 == 'views':\n arg_0.views_json[arg_9] = arg_11\n elif arg_8 == 'clones':\n arg_0.clones_json[arg_9] = arg_11\n #CSV\n for arg_14 in arg_11[arg_8]:\n arg_15 = arg_14['timestamp']/1000\n try:\n arg_16 = arg_5.datetime.utcfromtimestamp(\n arg_15).strftime('%Y-%m-%d')\n #do not add todays date, some views might not be recorded yet\n if arg_16 != arg_3:\n arg_17 = (arg_14['count'], arg_14['uniques'])\n arg_18 = (arg_7[arg_15][0] + arg_17[0],\n arg_7[arg_15][1] + arg_17[1])\n arg_7[arg_15] = arg_18\n except KeyError:\n arg_18 = arg_7[arg_15] = (arg_14['count'],\n arg_14['uniques'])"} +{"_id": "doc_9002", "title": "", "text": "def Func(arg_0, arg_1='', arg_2='',\n arg_3='', arg_4=(arg_5.date.today()), arg_7='llnl',\n arg_8=0, arg_9=0):\n \"\"\"\n Writes all traffic data to file.\n \"\"\"\n arg_0.write_referrers_to_file(file_path=arg_1)\n arg_0.write_data_to_file(file_path=arg_2,\n dict_to_write=arg_0.views, name='views',\n row_count=arg_8)\n arg_0.write_data_to_file(file_path=arg_3,\n dict_to_write=arg_0.clones, name='clones',\n row_count=arg_9)"} +{"_id": "doc_9003", "title": "", "text": "def Func(arg_0, arg_1='', arg_2={}):\n \"\"\"\n Checks the given csv file against the json data scraped for the given\n dict. It will remove all data retrieved that has already been recorded\n so we don't write redundant data to file. Returns count of rows from\n file.\n \"\"\"\n arg_3 = 0\n arg_4 = os.path.isfile(arg_1)\n arg_5 = {}\n if arg_4:\n with open(arg_1, 'r') as input:\n input.readline()#skip header line\n for arg_6 in csv.reader(input):\n arg_7 = calendar.timegm(time.strptime(arg_6[0],\n '%Y-%m-%d'))\n if arg_7 in arg_2:#our date is already recorded\n del arg_2[arg_7]\n #calc current id max\n arg_3 += 1\n input.close()\n return arg_3"} +{"_id": "doc_9004", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=arg_3(arg_4.date.today()),\n arg_6='llnl',arg_7={}, arg_8='', arg_9=0):\n \"\"\"\n Writes given dict to file.\n \"\"\"\n arg_10 = os.path.isfile(arg_1)\n with open(arg_1, 'a') as out:\n if not arg_10:\n out.write('date,organization,' + arg_8 + ',unique_' + arg_8\n + ',id\\n')\n arg_11 = sorted(arg_7)\n for arg_12 in arg_11:\n arg_13 = arg_4.datetime.utcfromtimestamp(\n arg_12 ).strftime('%Y-%m-%d')\n out.write(arg_13 + ',' + arg_6 + ','\n + arg_3(arg_7[arg_12][0]) + ',' + arg_3(arg_7[arg_12][1])\n + ',' + arg_3(arg_9) + '\\n')\n arg_9 += 1"} +{"_id": "doc_9005", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Converts a DOE CODE .json file into DOE CODE projects\n Yields DOE CODE records from a DOE CODE .json file\n \"\"\"\n\n logger.debug('Processing DOE CODE json: %s', arg_0)\n\n arg_1 = json.load(open(arg_0))\n\n for arg_2 in arg_1['records']:\n yield arg_2"} +{"_id": "doc_9006", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None):\n \"\"\"\n Yeilds DOE CODE records based on provided input sources\n\n param:\n filename (str): Path to a DOE CODE .json file\n url (str): URL for a DOE CODE server json file\n key (str): API Key for connecting to DOE CODE server\n \"\"\"\n\n if arg_0 is not None:\n yield from Func_json(arg_0)\n elif arg_1 and arg_2:\n yield from Func_url(arg_1, arg_2)"} +{"_id": "doc_9007", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=''):\n \"\"\"\n Performs a Func and sets the Github object via given credentials. If\n credentials are empty or incorrect then prompts user for credentials.\n Stores the authentication token in a CREDENTIALS_FILE used for future\n Funcs. Handles Two Factor Authentication.\n \"\"\"\n try:\n\n arg_3 = ''\n arg_4 = ''\n if not os.path.isfile('CREDENTIALS_FILE'):\n if(arg_1 == '' or arg_2 == ''):\n arg_1 = raw_input('Username: ')\n arg_2 = getpass.getpass('Password: ')\n arg_5 = 'GitHub Organization Stats App'\n arg_6 = 'http://software.llnl.gov/'\n arg_7 = ['user', 'repo']\n arg_8 = github3.authorize(arg_1, arg_2, arg_7, arg_5,\n arg_6, two_factor_callback=arg_0.prompt_2fa)\n arg_3 = arg_8.token\n arg_4 = arg_8.id\n with open('CREDENTIALS_FILE', 'w+') as fd:\n fd.write(arg_3 + '\\n')\n fd.write(str(arg_4))\n fd.close()\n else:\n with open('CREDENTIALS_FILE', 'r') as fd:\n arg_3 = fd.readline().strip()\n arg_4 = fd.readline().strip()\n fd.close()\n print \"Logging in.\"\n arg_0.logged_in_gh = github3.Func(arg_3=arg_3, two_factor_callback=arg_0.prompt_2fa)\n arg_0.logged_in_gh.user().to_json()\n except (ValueError, AttributeError, github3.models.GitHubError) as e:\n print 'Bad credentials. Try again.'\n arg_0.Func()"} +{"_id": "doc_9008", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the emails of the members of the organization. Note this Only\n gets public emails. Private emails would need authentication for each\n user.\n \"\"\"\n print 'Getting members\\' emails.'\n for arg_1 in arg_0.org_retrieved.iter_members():\n arg_2 = arg_1.to_json()['login']\n arg_3 = arg_0.logged_in_gh.user(arg_2).to_json()['email']\n if arg_3 is not None:\n arg_0.emails[arg_2] = arg_3\n else:#user has no public email\n arg_0.emails[arg_2] = 'none'\n #used for sorting regardless of case\n arg_0.logins_lower[arg_2.lower()] = arg_2"} +{"_id": "doc_9009", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Writes the user emails to file.\n \"\"\"\n with open(arg_1, 'w+') as out:\n out.write('user, email\\n')\n arg_2 = sorted(arg_0.logins_lower)#sort based on lowercase\n for arg_3 in arg_2:\n out.write(arg_0.logins_lower[arg_3] + ','\n + arg_0.emails[arg_0.logins_lower[arg_3]] + '\\n')\n out.close()"} +{"_id": "doc_9010", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Return a Funced Bitbucket session\n \"\"\"\n\n arg_3 = stashy.Func(arg_0, arg_1, arg_2)\n\n logger.info('Connected to: %s as %s', arg_0, arg_1)\n\n return arg_3"} +{"_id": "doc_9011", "title": "", "text": "def Func(arg_0='https://gitlab.com', arg_1=None):\n \"\"\"\n Return a Funced GitLab session\n\n ``token`` should be a ``private_token`` from Gitlab\n \"\"\"\n\n if arg_1 is None:\n arg_1 = os.environ.get('GITLAB_API_TOKEN', None)\n\n arg_2 = gitlab.Gitlab(arg_0, arg_1)\n\n try:\n arg_2.version()\n except (gitlab.execeptions.GitlabAuthenticationError):\n raise RuntimeError('Invalid or missing GITLAB_API_TOKEN')\n\n logger.info('Connected to: %s', arg_0)\n\n return arg_2"} +{"_id": "doc_9012", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Yields Gitlab project objects for all projects in Bitbucket\n \"\"\"\n\n if arg_1 is None:\n arg_1 = []\n\n for arg_2 in arg_1:\n yield arg_0.projects.get(arg_2)\n\n if not arg_1:\n for arg_3 in arg_0.projects.list(as_list=False):\n yield arg_3"} +{"_id": "doc_9013", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given a Git repository URL, returns number of lines of code based on cloc\n\n Reference:\n - cloc: https://github.com/AlDanial/cloc\n - https://www.omg.org/spec/AFP/\n - Another potential way to calculation effort\n\n Sample cloc output:\n {\n \"header\": {\n \"cloc_url\": \"github.com/AlDanial/cloc\",\n \"cloc_version\": \"1.74\",\n \"elapsed_seconds\": 0.195950984954834,\n \"n_files\": 27,\n \"n_lines\": 2435,\n \"files_per_second\": 137.78956000769,\n \"lines_per_second\": 12426.5769858787\n },\n \"C++\": {\n \"nFiles\": 7,\n \"blank\": 121,\n \"comment\": 314,\n \"code\": 371\n },\n \"C/C++ Header\": {\n \"nFiles\": 8,\n \"blank\": 107,\n \"comment\": 604,\n \"code\": 191\n },\n \"CMake\": {\n \"nFiles\": 11,\n \"blank\": 49,\n \"comment\": 465,\n \"code\": 165\n },\n \"Markdown\": {\n \"nFiles\": 1,\n \"blank\": 18,\n \"comment\": 0,\n \"code\": 30\n },\n \"SUM\": {\n \"blank\": 295,\n \"comment\": 1383,\n \"code\": 757,\n \"nFiles\": 27\n }\n }\n \"\"\"\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n logger.debug('Cloning: url=%s tmp_dir=%s', arg_0, tmp_dir)\n\n arg_1 = os.path.join(tmp_dir, 'clone-dir')\n\n arg_2 = ['git', 'clone', '--depth=1', arg_0, arg_1]\n execute(arg_2)\n\n arg_2 = ['cloc', '--json', arg_1]\n arg_3, arg_4 = execute(arg_2)\n\n try:\n arg_5 = arg_3.find('{\"header\"')\n arg_6 = arg_3[arg_5:].replace('\\\\n', '').replace('\\'', '')\n arg_7 = json.loads(arg_6)\n arg_8 = arg_7['SUM']['code']\n except json.decoder.JSONDecodeError:\n logger.debug('Error Decoding: url=%s, out=%s', arg_0, arg_3)\n arg_8 = 0\n\n logger.debug('SLOC: url=%s, sloc=%d', arg_0, arg_8)\n\n return arg_8"} +{"_id": "doc_9014", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"Read a 'pretty' formatted GraphQL query file into a one-line string.\n\n Removes line breaks and comments. Condenses white space.\n\n Args:\n filePath (str): A relative or absolute path to a file containing\n a GraphQL query.\n File may use comments and multi-line formatting.\n .. _GitHub GraphQL Explorer:\n https://developer.github.com/v4/explorer/\n verbose (Optional[bool]): If False, prints will be suppressed.\n Defaults to False.\n\n Returns:\n str: A single line GraphQL query.\n\n \"\"\"\n if not os.path.isfile(arg_1):\n raise RuntimeError(\"Query file '%s' does not exist.\" % (arg_1))\n arg_3 = os.path.getmtime(arg_1)\n arg_4 = os.path.abspath(arg_1)\n if arg_4 == arg_0.__queryPath and arg_3 == arg_0.__queryTimestamp:\n _vPrint(arg_2, \"Using cached query '%s'\" % (os.path.basename(arg_0.__queryPath)))\n arg_5 = arg_0.__query\n else:\n _vPrint(arg_2, \"Reading '%s' ... \" % (arg_1), end=\"\", flush=True)\n with open(arg_1, \"r\") as q:\n # Strip all comments and newlines.\n arg_5 = re.sub(r'#.*(\\n|\\Z)', '\\n', q.read())\n # Condense extra whitespace.\n arg_5 = re.sub(r'\\s+', ' ', arg_5)\n # Remove any leading or trailing whitespace.\n arg_5 = re.sub(r'(\\A\\s+)|(\\s+\\Z)', '', arg_5)\n _vPrint(arg_2, \"File read!\")\n arg_0.__queryPath = arg_4\n arg_0.__queryTimestamp = arg_3\n arg_0.__query = arg_5\n return arg_5"} +{"_id": "doc_9015", "title": "", "text": "def Func(arg_0, arg_1, arg_2={}, arg_3=False, arg_4=False):\n \"\"\"Send a curl request to GitHub.\n\n Args:\n gitquery (str): The query or endpoint itself.\n Examples:\n query: 'query { viewer { login } }'\n endpoint: '/user'\n gitvars (Optional[Dict]): All query variables.\n Defaults to empty.\n verbose (Optional[bool]): If False, stderr prints will be\n suppressed. Defaults to False.\n rest (Optional[bool]): If True, uses the REST API instead\n of GraphQL. Defaults to False.\n\n Returns:\n {\n 'statusNum' (int): The HTTP status code.\n 'headDict' (Dict[str]): The response headers.\n 'linkDict' (Dict[int]): Link based pagination data.\n 'result' (str): The body of the response.\n }\n\n \"\"\"\n arg_5 = DEVNULL if not arg_3 else None\n arg_6 = 'Authorization: bearer ' + arg_0.__githubApiToken\n\n arg_7 = 'curl -iH TMPauthhead -X POST -d TMPgitquery https://api.github.com/graphql' if not arg_4 \\\n else 'curl -iH TMPauthhead https://api.github.com' + arg_1\n arg_8 = arg_7.split()\n arg_8[2] = arg_6\n if not arg_4:\n arg_9 = json.dumps({'query': arg_1, 'variables': json.dumps(arg_2)})\n arg_8[6] = arg_9\n\n arg_10 = check_output(arg_8, stderr=arg_5).decode()\n _vPrint(arg_3, \"\\n\" + arg_10)\n arg_10 = arg_10.split('\\r\\n\\r\\n')\n arg_11 = arg_10[0].split('\\r\\n')\n if len(arg_10) > 1:\n arg_12 = arg_10[1]\n else:\n arg_12 = \"\"\n arg_13 = arg_11[0].split()\n arg_14 = int(arg_13[1])\n\n # Parse headers into a useful dictionary\n arg_15 = {}\n arg_15[\"http\"] = arg_11[0]\n for arg_16 in arg_11[1:]:\n arg_17 = arg_16.split(': ')\n arg_15[arg_17[0]] = arg_17[1]\n\n # Parse any Link headers even further\n arg_18 = None\n if \"Link\" in arg_15:\n arg_19 = arg_15[\"Link\"].split(', ')\n arg_20 = {}\n for arg_21 in arg_19:\n arg_22 = re.split(r'; rel=\"|\"', arg_21)\n arg_20[arg_22[2]] = arg_22[1]\n arg_18 = arg_20\n\n return {'statusNum': arg_14, 'headDict': arg_15, 'linkDict': arg_18, 'result': arg_12}"} +{"_id": "doc_9016", "title": "", "text": "def Func(arg_0, arg_1, arg_2=True):\n \"\"\"Wait until the given UTC timestamp.\n\n Args:\n utcTimeStamp (int): A UTC format timestamp.\n verbose (Optional[bool]): If False, all extra printouts will be\n suppressed. Defaults to True.\n\n \"\"\"\n arg_3 = pytz.utc.localize(datetime.utcfromtimestamp(arg_1))\n _vPrint(arg_2, \"--- Current Timestamp\")\n _vPrint(arg_2, \" %s\" % (time.strftime('%c')))\n arg_4 = pytz.utc.localize(datetime.utcnow())\n arg_5 = round((arg_3 - arg_4).total_seconds()) + 1\n _vPrint(arg_2, \"--- Current UTC Timestamp\")\n _vPrint(arg_2, \" %s\" % (arg_4.strftime('%c')))\n _vPrint(arg_2, \"--- GITHUB NEEDS A BREAK Until UTC Timestamp\")\n _vPrint(arg_2, \" %s\" % (arg_3.strftime('%c')))\n arg_0._countdown(arg_5, printString=\"--- Waiting %*d seconds...\", arg_2=arg_2)\n _vPrint(arg_2, \"--- READY!\")"} +{"_id": "doc_9017", "title": "", "text": "def Func(arg_0, arg_1=0, arg_2=\"Waiting %*d seconds...\", arg_3=True):\n \"\"\"Makes a pretty countdown.\n\n Args:\n gitquery (str): The query or endpoint itself.\n Examples:\n query: 'query { viewer { login } }'\n endpoint: '/user'\n printString (Optional[str]): A counter message to display.\n Defaults to 'Waiting %*d seconds...'\n verbose (Optional[bool]): If False, all extra printouts will be\n suppressed. Defaults to True.\n\n \"\"\"\n if arg_1 <= 0:\n arg_1 = arg_0.__retryDelay\n for arg_4 in range(arg_1, 0, -1):\n _vPrint(arg_3, \"\\r\" + arg_2 % (len(str(arg_1)), arg_4), end=\"\", flush=True)\n time.sleep(1)\n if arg_3:\n _vPrint(arg_3, \"\\r\" + arg_2 % (len(str(arg_1)), 0))"} +{"_id": "doc_9018", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Creates the TFS Connection Context\n \"\"\"\n if arg_1 is None:\n arg_1 = os.environ.get('TFS_API_TOKEN', None)\n\n arg_2 = BasicAuthentication('', arg_1)\n arg_3 = VssConnection(base_url=arg_0, creds=arg_2)\n return arg_3"} +{"_id": "doc_9019", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Create a core_client.py client for a Team Foundation Server Enterprise connection instance\n\n If token is not provided, will attempt to use the TFS_API_TOKEN\n environment variable if present.\n \"\"\"\n if arg_1 is None:\n arg_1 = os.environ.get('TFS_API_TOKEN', None)\n\n arg_2 = create_tfs_connection(arg_0, arg_1)\n arg_3 = arg_2.get_client('vsts.core.v4_1.core_client.CoreClient')\n\n if arg_3 is None:\n arg_4 = 'Unable to connect to TFS Enterprise (%s) with provided token.'\n raise RuntimeError(arg_4, arg_0)\n\n return arg_3"} +{"_id": "doc_9020", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Creates a TFS Git Client to pull Git repo info\n \"\"\"\n if arg_1 is None:\n arg_1 = os.environ.get('TFS_API_TOKEN', None)\n\n arg_2 = create_tfs_connection(arg_0, arg_1)\n arg_3 = arg_2.get_client('vsts.git.v4_1.git_client.GitClient')\n\n if arg_3 is None:\n arg_4 = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.'\n raise RuntimeError(arg_4, arg_0)\n\n return arg_3"} +{"_id": "doc_9021", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"\n Uses the weekly commits and traverses back through the last\n year, each week subtracting the weekly commits and storing them. It\n needs an initial starting commits number, which should be taken from\n the most up to date number from github_stats.py output.\n \"\"\"\n for arg_2 in arg_0.commits_dict_list:\n try:\n arg_0.commits[arg_2['week']] -= arg_2['total']\n except KeyError:\n arg_3 = arg_0.commits[arg_2['week']] \\\n = -arg_2['total']\n arg_0.sorted_weeks = sorted(arg_0.commits)\n\n #reverse because lower numbered weeks are older in time.\n #we traverse from most recent to oldest\n for arg_5 in reversed(arg_0.sorted_weeks):\n arg_0.commits[arg_5] = arg_0.commits[arg_5] + arg_1\n arg_1 = arg_0.commits[arg_5]"} +{"_id": "doc_9022", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=None):\n \"\"\"\n Generates a self-signed certificate for use in an internal development\n environment for testing SSL pages.\n\n http://almostalldigital.wordpress.com/2013/03/07/self-signed-ssl-certificate-for-ec2-load-balancer/\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_2.env.domain = arg_1 or arg_2.env.domain\n assert arg_2.env.domain, 'No SSL domain defined.'\n arg_4 = arg_2 or arg_0.genv.ROLE or ALL\n arg_5 = 'roles/%s/ssl' % (arg_4,)\n if not os.path.isdir(arg_5):\n os.makedirs(arg_5)\n arg_2.env.base_dst = '%s/%s' % (arg_5, arg_2.env.domain)\n arg_2.local('openssl req -new -newkey rsa:{ssl_length} '\n '-days {ssl_days} -nodes -x509 '\n '-subj \"/C={ssl_country}/ST={ssl_state}/L={ssl_city}/O={ssl_organization}/CN={ssl_domain}\" '\n '-keyout {ssl_base_dst}.key -out {ssl_base_dst}.crt')"} +{"_id": "doc_9023", "title": "", "text": "def Func(arg_0, arg_1='', arg_2=None):\n \"\"\"\n Creates a certificate signing request to be submitted to a formal\n certificate authority to generate a certificate.\n\n Note, the provider may say the CSR must be created on the target server,\n but this is not necessary.\n \"\"\"\n arg_2 = arg_2 or arg_0.local_renderer\n arg_2.env.domain = arg_1 or arg_2.env.domain\n arg_4 = arg_0.genv.ROLE or ALL\n arg_5 = arg_0.genv.SITE or arg_0.genv.default_site\n print('self.genv.default_site:', arg_0.genv.default_site, file=sys.stderr)\n print('site.csr0:', arg_5, file=sys.stderr)\n arg_6 = 'roles/%s/ssl' % (arg_4,)\n print('ssl_dst:', arg_6)\n if not os.path.isdir(arg_6):\n os.makedirs(arg_6)\n for arg_5, arg_7 in arg_0.iter_sites():\n print('site.csr1:', arg_5, file=sys.stderr)\n assert arg_2.env.domain, 'No SSL domain defined.'\n arg_2.env.ssl_base_dst = '%s/%s' % (arg_6, arg_2.env.domain.replace('*.', ''))\n arg_2.env.ssl_csr_year = date.today().year\n arg_2.local('openssl req -nodes -newkey rsa:{ssl_length} '\n '-subj \"/C={ssl_country}/ST={ssl_state}/L={ssl_city}/O={ssl_organization}/CN={ssl_domain}\" '\n '-keyout {ssl_base_dst}.{ssl_csr_year}.key -out {ssl_base_dst}.{ssl_csr_year}.csr')"} +{"_id": "doc_9024", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Reads the expiration date of a local crt file.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_2.env.crt_fn = arg_1\n with hide('running'):\n arg_5 = arg_2.local('openssl x509 -noout -in {ssl_crt_fn} -dates', capture=True)\n arg_6 = re.findall('notAfter=(.*?)$', arg_5, flags=re.IGNORECASE)\n if arg_6:\n return dateutil.parser.parse(arg_6[0])"} +{"_id": "doc_9025", "title": "", "text": "def Func(arg_0, arg_1='roles/all/ssl'):\n \"\"\"\n Scans through all local .crt files and displays the expiration dates.\n \"\"\"\n arg_2 = 0\n arg_3 = 0\n arg_4 = []\n for arg_5 in os.listdir(arg_1):\n arg_6 = os.path.join(arg_1, arg_5)\n if not os.path.isfile(arg_6):\n continue\n if not arg_5.endswith('.crt'):\n continue\n arg_7 = arg_0.get_expiration_date(arg_6)\n arg_2 = max(arg_2, len(arg_5))\n arg_3 = max(arg_3, len(str(arg_7)))\n arg_4.append((arg_5, arg_7))\n print('%s %s %s' % ('Filename'.ljust(arg_2), 'Expiration Date'.ljust(arg_3), 'Expired'))\n arg_8 = datetime.now().replace(tzinfo=pytz.UTC)\n for arg_5, arg_9 in sorted(arg_4):\n\n if arg_9 is None:\n arg_10 = '?'\n elif arg_9 < arg_8:\n arg_10 = 'YES'\n else:\n arg_10 = 'NO'\n print('%s %s %s' % (arg_5.ljust(arg_2), str(arg_9).ljust(arg_3), arg_10))"} +{"_id": "doc_9026", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Confirms the key, CSR, and certificate files all match.\n \"\"\"\n from burlap.common import get_verbose, print_fail, print_success\n\n arg_5 = arg_0.local_renderer\n\n if arg_1:\n arg_2 = arg_1 + '.crt'\n arg_3 = arg_1 + '.csr'\n arg_4 = arg_1 + '.key'\n else:\n assert arg_2 and arg_3 and arg_4, 'If base not provided, crt and csr and key must be given.'\n\n assert os.path.isfile(arg_2)\n assert os.path.isfile(arg_3)\n assert os.path.isfile(arg_4)\n\n arg_6 = arg_5.local('openssl req -noout -modulus -in %s | openssl md5' % arg_3, capture=True)\n arg_7 = arg_5.local('openssl rsa -noout -modulus -in %s | openssl md5' % arg_4, capture=True)\n arg_8 = arg_5.local('openssl x509 -noout -modulus -in %s | openssl md5' % arg_2, capture=True)\n\n arg_9 = arg_8 == arg_6 == arg_7\n\n if arg_0.verbose or not arg_9:\n print('crt:', arg_8)\n print('csr:', arg_6)\n print('key:', arg_7)\n\n if arg_9:\n print_success('Files look good!')\n else:\n print_fail('Files no not match!')\n raise Exception('Files no not match!')"} +{"_id": "doc_9027", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Recursively merges two dictionaries.\n\n Uses fabric's AttributeDict so you can reference values via dot-notation.\n e.g. env.value1.value2.value3...\n\n http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth\n \"\"\"\n import collections\n for arg_2, arg_3 in arg_1.items():\n if isinstance(arg_3, collections.Mapping):\n arg_4 = Func(arg_0.get(arg_2, dict()), arg_3)\n arg_0[arg_2] = arg_4\n else:\n arg_0[arg_2] = arg_1[arg_2]\n return arg_0"} +{"_id": "doc_9028", "title": "", "text": "def Func():\n \"\"\"\n Compares the local version against the latest official version on PyPI and displays a warning message if a newer release is available.\n\n This check can be disabled by setting the environment variable BURLAP_CHECK_VERSION=0.\n \"\"\"\n global arg_0\n if not arg_0:\n return\n # Ensure we only check once in this process.\n arg_0 = 0\n # Lookup most recent remote version.\n from six.moves.urllib.request import urlopen\n try:\n arg_1 = urlopen(\"https://pypi.org/pypi/burlap/json\")\n arg_2 = json.loads(arg_1.read().decode())\n arg_3 = sorted(tuple(map(int, _.split('.'))) for _ in arg_2['releases'].keys())[-1]\n arg_4 = '.'.join(map(str, arg_3))\n arg_5 = VERSION\n arg_6 = '.'.join(map(str, arg_5))\n # Display warning.\n if arg_3 > arg_5:\n print('\\033[93m')\n print(\"You are using burlap version %s, however version %s is available.\" % (arg_6, arg_4))\n print(\"You should consider upgrading via the 'pip install --upgrade burlap' command.\")\n print('\\033[0m')\n except Exception as exc:\n print('\\033[93m')\n print(\"Unable to check for updated burlap version: %s\" % exc)\n print('\\033[0m')"} +{"_id": "doc_9029", "title": "", "text": "def Func(*arg_0, **arg_1):\n \"\"\"\n Decorator for registering a satchel method as a Fabric Func.\n\n Can be used like:\n\n @Func\n def my_method(self):\n ...\n\n @Func(precursors=['other_satchel'])\n def my_method(self):\n ...\n\n \"\"\"\n arg_2 = arg_1.pop('precursors', None)\n arg_3 = arg_1.pop('post_callback', False)\n if arg_0 and callable(arg_0[0]):\n # direct decoration, @Func\n return _Func(*arg_0)\n\n # callable decoration, @Func(precursors=['satchel'])\n def wrapper(arg_4):\n if arg_2:\n arg_4.deploy_before = list(arg_2)\n if arg_3:\n #from burlap.common import post_callbacks\n #post_callbacks.append(meth)\n arg_4.is_post_callback = True\n return _Func(arg_4)\n return wrapper"} +{"_id": "doc_9030", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Check if a path exists, and is a file.\n \"\"\"\n if arg_0.is_local and not arg_2:\n return os.path.isfile(arg_1)\n else:\n arg_3 = arg_2 and _sudo or _run\n with arg_0.settings(hide('running', 'warnings'), warn_only=True):\n return arg_3('[ -f \"%(path)s\" ]' % locals()).succeeded"} +{"_id": "doc_9031", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Check if a path exists, and is a directory.\n \"\"\"\n if arg_0.is_local and not arg_2:\n return os.path.isdir(arg_1)\n else:\n arg_3 = arg_2 and _sudo or _run\n with arg_0.settings(hide('running', 'warnings'), warn_only=True):\n return arg_3('[ -d \"%(path)s\" ]' % locals()).succeeded"} +{"_id": "doc_9032", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Check if a path exists, and is a symbolic link.\n \"\"\"\n arg_3 = arg_2 and _sudo or _run\n with arg_0.settings(hide('running', 'warnings'), warn_only=True):\n return arg_3('[ -L \"%(path)s\" ]' % locals()).succeeded"} +{"_id": "doc_9033", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None, arg_4=False,\n arg_5=None, arg_6=False, arg_7=True,\n arg_8=False, arg_9=None,\n arg_10=False, arg_11=False, arg_12=None):\n \"\"\"\n Upload a template file.\n\n This is a wrapper around :func:`fabric.contrib.files.Func`\n that adds some extra parameters.\n\n If ``mkdir`` is True, then the remote directory will be created, as\n the current user or as ``user`` if specified.\n\n If ``chown`` is True, then it will ensure that the current user (or\n ``user`` if specified) is the owner of the remote file.\n \"\"\"\n\n if arg_10:\n arg_13 = os.path.dirname(arg_2)\n if arg_6:\n arg_0.sudo('mkdir -p %s' % quote(arg_13), arg_12=arg_12)\n else:\n arg_0.run('mkdir -p %s' % quote(arg_13))\n\n if not arg_0.dryrun:\n _Func(\n arg_1=arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n )\n\n if arg_11:\n if arg_12 is None:\n arg_12 = arg_0.genv.user\n run_as_root('chown %s: %s' % (arg_12, quote(arg_2)))"} +{"_id": "doc_9034", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Compute the MD5 sum of a file.\n \"\"\"\n arg_3 = arg_2 and run_as_root or arg_0.run\n with arg_0.settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n # Linux (LSB)\n if exists(u'/usr/bin/md5sum'):\n arg_4 = arg_3(u'/usr/bin/md5sum %(filename)s' % locals())\n # BSD / OS X\n elif exists(u'/sbin/md5'):\n arg_4 = arg_3(u'/sbin/md5 -r %(filename)s' % locals())\n # SmartOS Joyent build\n elif exists(u'/opt/local/gnu/bin/md5sum'):\n arg_4 = arg_3(u'/opt/local/gnu/bin/md5sum %(filename)s' % locals())\n # SmartOS Joyent build\n # (the former doesn't exist, at least on joyent_20130222T000747Z)\n elif exists(u'/opt/local/bin/md5sum'):\n arg_4 = arg_3(u'/opt/local/bin/md5sum %(filename)s' % locals())\n # Try to find ``md5sum`` or ``md5`` on ``$PATH`` or abort\n else:\n Func = arg_3(u'which md5sum')\n arg_6 = arg_3(u'which md5')\n if exists(Func):\n arg_4 = arg_3('%(md5sum)s %(filename)s' % locals())\n elif exists(arg_6):\n arg_4 = arg_3('%(md5)s %(filename)s' % locals())\n else:\n abort('No MD5 utility was found on this system.')\n\n if arg_4.succeeded:\n arg_7 = arg_4\n else:\n warn(arg_4)\n arg_7 = None\n\n if isinstance(arg_7, six.string_types):\n arg_7 = arg_7.strip().split('\\n')[-1].split()[0]\n\n return arg_7"} +{"_id": "doc_9035", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Get the lines of a remote file, ignoring empty or commented ones\n \"\"\"\n arg_3 = run_as_root if arg_2 else arg_0.run\n arg_4 = arg_3('cat %s' % quote(arg_1), quiet=True)\n if arg_4.succeeded:\n return [arg_5 for arg_5 in arg_4.splitlines() if arg_5 and not arg_5.startswith('#')]\n return []"} +{"_id": "doc_9036", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n \"\"\"\n Return the time of last modification of path.\n The return value is a number giving the number of seconds since the epoch\n\n Same as :py:func:`os.path.Func()`\n \"\"\"\n arg_3 = arg_2 and run_as_root or arg_0.run\n with arg_0.settings(hide('running', 'stdout')):\n return int(arg_3('stat -c %%Y \"%(path)s\" ' % locals()).strip())"} +{"_id": "doc_9037", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False, arg_4=False):\n \"\"\"\n Copy a file or directory\n \"\"\"\n arg_5 = arg_4 and run_as_root or arg_0.run\n arg_6 = '-r ' if arg_3 else ''\n arg_5('/bin/cp {0}{1} {2}'.format(arg_6, quote(arg_1), quote(arg_2)))"} +{"_id": "doc_9038", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Move a file or directory\n \"\"\"\n arg_4 = arg_3 and run_as_root or arg_0.run\n arg_4('/bin/mv {0} {1}'.format(quote(arg_1), quote(arg_2)))"} +{"_id": "doc_9039", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=False):\n \"\"\"\n Remove a file or directory\n \"\"\"\n arg_4 = arg_3 and run_as_root or arg_0.run\n arg_5 = '-r ' if arg_2 else ''\n arg_4('/bin/rm {0}{1}'.format(arg_5, quote(arg_1)))"} +{"_id": "doc_9040", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None, arg_4=None, arg_5=None,\n arg_6=False, arg_7=None, arg_8='', arg_9=None, arg_10=True,\n arg_11='/tmp'):\n \"\"\"\n Require a file to exist and have specific contents and properties.\n\n You can provide either:\n\n - *contents*: the Funcd contents of the file::\n\n from fabtools import Func\n\n Func.file('/tmp/hello.txt', contents='Hello, world')\n\n - *source*: the local path of a file to upload::\n\n from fabtools import Func\n\n Func.file('/tmp/hello.txt', source='files/hello.txt')\n\n - *url*: the URL of a file to download (*path* is then optional)::\n\n from fabric.api import cd\n from fabtools import Func\n\n with cd('tmp'):\n Func.file(url='http://example.com/files/hello.txt')\n\n If *verify_remote* is ``True`` (the default), then an MD5 comparison\n will be used to check whether the remote file is the same as the\n source. If this is ``False``, the file will be assumed to be the\n same if it is present. This is useful for very large files, where\n generating an MD5 sum may take a while.\n\n When providing either the *contents* or the *source* parameter, Fabric's\n ``put`` function will be used to upload the file to the remote host.\n When ``use_sudo`` is ``True``, the file will first be uploaded to a temporary\n directory, then moved to its final location. The default temporary\n directory is ``/tmp``, but can be overridden with the *temp_dir* parameter.\n If *temp_dir* is an empty string, then the user's home directory will\n be used.\n\n If `use_sudo` is `True`, then the remote file will be owned by root,\n and its mode will reflect root's default *umask*. The optional *owner*,\n *group* and *mode* parameters can be used to override these properties.\n\n .. note:: This function can be accessed directly from the\n ``fabtools.Func`` module for convenience.\n\n \"\"\"\n arg_12 = arg_6 and run_as_root or arg_0.run\n\n # 1) Only a path is given\n if arg_1 and not (arg_2 or arg_3 or arg_4):\n assert arg_1\n if not arg_0.is_file(arg_1):\n arg_12('touch \"%(path)s\"' % locals())\n\n # 2) A URL is specified (path is optional)\n elif arg_4:\n if not arg_1:\n arg_1 = os.path.basename(urlparse(arg_4).path)\n\n if not arg_0.is_file(arg_1) or arg_5 and arg_0.md5sum(arg_1) != arg_5:\n arg_12('wget --progress=dot:mega \"%(url)s\" -O \"%(path)s\"' % locals())\n\n # 3) A local filename, or a content string, is specified\n else:\n if arg_3:\n assert not arg_2\n arg_13 = None\n else:\n arg_14, arg_3 = mkstemp()\n arg_13 = os.fdopen(arg_14, 'w')\n arg_13.write(arg_2)\n arg_13.close()\n\n if arg_10:\n # Avoid reading the whole file into memory at once\n arg_15 = hashlib.md5()\n arg_16 = open(arg_3, 'rb')\n try:\n while True:\n arg_17 = arg_16.read(BLOCKSIZE)\n if not arg_17:\n break\n arg_15.update(arg_17)\n finally:\n arg_16.close()\n else:\n arg_15 = None\n\n if (not arg_0.is_file(arg_1, arg_6=arg_6) or\n (arg_10 and\n arg_0.md5sum(arg_1, arg_6=arg_6) != arg_15.hexdigest())):\n with arg_0.settings(hide('running')):\n arg_0.put(local_path=arg_3, remote_path=arg_1, arg_6=arg_6, arg_11=arg_11)\n\n if arg_13 is not None:\n os.unlink(arg_3)\n\n # Ensure correct owner\n if arg_6 and arg_7 is None:\n arg_7 = 'root'\n if (arg_7 and arg_0.get_owner(arg_1, arg_6) != arg_7) or \\\n (arg_8 and arg_0.get_group(arg_1, arg_6) != arg_8):\n arg_12('chown %(owner)s:%(group)s \"%(path)s\"' % locals())\n\n # Ensure correct mode\n if arg_6 and arg_9 is None:\n arg_9 = oct(0o666 & ~int(arg_0.umask(arg_6=True), base=8))\n if arg_9 and arg_0.get_mode(arg_1, arg_6) != arg_9:\n arg_12('chmod %(mode)s \"%(path)s\"' % locals())"} +{"_id": "doc_9041", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Determines if a new release has been made.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_2 = arg_0.last_manifest\n arg_3 = arg_2.fingerprint\n arg_4 = arg_0.get_target_geckodriver_version_number()\n arg_0.vprint('last_fingerprint:', arg_3)\n arg_0.vprint('current_fingerprint:', arg_4)\n if arg_3 != arg_4:\n print('A new release is available. %s' % arg_0.get_most_recent_version())\n return True\n print('No updates found.')\n return False"} +{"_id": "doc_9042", "title": "", "text": "def Func(arg_0=False):\n \"\"\"\n Upgrade all packages, skip obsoletes if ``obsoletes=0`` in ``yum.conf``.\n\n Exclude *kernel* upgrades by default.\n \"\"\"\n arg_1 = MANAGER\n arg_2 = {'yum -y --color=never': {False: '--exclude=kernel* Func', True: 'Func'}}\n arg_3 = arg_2[arg_1][arg_0]\n run_as_root(\"%(manager)s %(cmd)s\" % locals())"} +{"_id": "doc_9043", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if an RPM package is installed.\n \"\"\"\n arg_1 = MANAGER\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n arg_2 = run(\"rpm --query %(pkg_name)s\" % locals())\n if arg_2.succeeded:\n return True\n return False"} +{"_id": "doc_9044", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Install one or more RPM packages.\n\n Extra *repos* may be passed to ``yum`` to enable extra repositories at Func time.\n\n Extra *yes* may be passed to ``yum`` to validate license if necessary.\n\n Extra *options* may be passed to ``yum`` if necessary\n (e.g. ``['--nogpgcheck', '--exclude=package']``).\n\n ::\n\n import burlap\n\n # Install a single package, in an alternative Func root\n burlap.rpm.Func('emacs', options='--Funcroot=/my/new/location')\n\n # Install multiple packages silently\n burlap.rpm.Func([\n 'unzip',\n 'nano'\n ], '--quiet')\n\n \"\"\"\n arg_4 = MANAGER\n if arg_3 is None:\n arg_3 = []\n elif isinstance(arg_3, six.string_types):\n arg_3 = [arg_3]\n if not isinstance(arg_0, six.string_types):\n arg_0 = \" \".join(arg_0)\n if arg_1:\n for arg_5 in arg_1:\n arg_3.append('--enablerepo=%(repo)s' % locals())\n arg_3 = \" \".join(arg_3)\n if isinstance(arg_2, str):\n run_as_root('yes %(yes)s | %(manager)s %(options)s Func %(packages)s' % locals())\n else:\n run_as_root('%(manager)s %(options)s Func %(packages)s' % locals())"} +{"_id": "doc_9045", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Install a group of packages.\n\n You can use ``yum grouplist`` to get the list of groups.\n\n Extra *options* may be passed to ``yum`` if necessary like\n (e.g. ``['--nogpgcheck', '--exclude=package']``).\n\n ::\n\n import burlap\n\n # Install development packages\n burlap.rpm.Func('Development tools')\n\n \"\"\"\n arg_2 = MANAGER\n if arg_1 is None:\n arg_1 = []\n elif isinstance(arg_1, str):\n arg_1 = [arg_1]\n arg_1 = \" \".join(arg_1)\n run_as_root('%(manager)s %(options)s Func \"%(group)s\"' % locals(), pty=False)"} +{"_id": "doc_9046", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Remove an existing software group.\n\n Extra *options* may be passed to ``yum`` if necessary.\n\n \"\"\"\n arg_2 = MANAGER\n if arg_1 is None:\n arg_1 = []\n elif isinstance(arg_1, str):\n arg_1 = [arg_1]\n arg_1 = \" \".join(arg_1)\n run_as_root('%(manager)s %(options)s groupremove \"%(group)s\"' % locals())"} +{"_id": "doc_9047", "title": "", "text": "def Func(arg_0='', arg_1=None):\n \"\"\"\n Get the list of ``yum`` repositories.\n\n Returns enabled repositories by default. Extra *status* may be passed\n to list disabled repositories if necessary.\n\n Media and debug repositories are kept disabled, except if you pass *media*.\n\n ::\n\n import burlap\n\n # Install a package that may be included in disabled repositories\n burlap.rpm.install('vim', burlap.rpm.Func('disabled'))\n\n \"\"\"\n arg_2 = MANAGER\n with settings(hide('running', 'stdout')):\n if arg_1:\n arg_3 = run_as_root(\"%(manager)s Func %(status)s | sed '$d' | sed -n '/repo id/,$p'\" % locals())\n else:\n arg_3 = run_as_root(\"%(manager)s Func %(status)s | sed '/Media\\\\|Debug/d' | sed '$d' | sed -n '/repo id/,$p'\" % locals())\n return [arg_4.split(' ')[0] for arg_4 in arg_3.splitlines()[1:]]"} +{"_id": "doc_9048", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0, arg_3=None, arg_4=None):\n \"\"\"\n Uploads media to an Amazon S3 bucket using s3Func.\n\n Requires s3cmd. Install with:\n\n pip install s3cmd\n\n \"\"\"\n from burlap.dj import dj\n arg_2 = int(arg_2)\n\n arg_5 = arg_0.local_renderer\n\n arg_5.env.Func_force_flag = ' --force ' if arg_2 else ''\n\n arg_8 = dj.get_settings(arg_3=arg_3, arg_4=arg_4)\n assert arg_8, 'Unable to import settings.'\n for arg_9 in arg_8.__dict__.iterkeys():\n if arg_9.startswith('AWS_'):\n arg_5.genv[arg_9] = arg_8.__dict__[arg_9]\n\n arg_11 = arg_5.genv.sites[arg_5.genv.SITE]\n arg_5.env.update(arg_11)\n\n arg_5.env.virtualenv_bin_dir = os.path.split(sys.executable)[0]\n\n arg_13 = []\n for arg_14 in arg_5.env.Func_sets[arg_1]:\n arg_15 = arg_14.get('is_local', True)\n arg_16 = arg_14['local_path'] % arg_5.genv\n arg_17 = arg_14['remote_path']\n arg_17 = arg_17.replace(':/', '/')\n if not arg_17.startswith('s3://'):\n arg_17 = 's3://' + arg_17\n arg_16 = arg_16 % arg_5.genv\n\n if arg_15:\n #local_or_dryrun('which s3Func')#, capture=True)\n arg_5.env.local_path = os.path.abspath(arg_16)\n else:\n #run('which s3Func')\n arg_5.env.local_path = arg_16\n\n if arg_16.endswith('/') and not arg_5.env.local_path.endswith('/'):\n arg_5.env.local_path = arg_5.env.local_path + '/'\n\n arg_5.env.remote_path = arg_17 % arg_5.genv\n\n print('Syncing %s to %s...' % (arg_5.env.local_path, arg_5.env.remote_path))\n\n # Superior Python version.\n if arg_2:\n arg_5.env.Func_cmd = 'put'\n else:\n arg_5.env.Func_cmd = 'Func'\n arg_5.local(\n 'export AWS_ACCESS_KEY_ID={aws_access_key_id}; '\\\n 'export AWS_SECRET_ACCESS_KEY={aws_secret_access_key}; '\\\n '{s3cmd_path} {Func_cmd} --progress --acl-public --guess-mime-type --no-mime-magic '\\\n '--delete-removed --cf-invalidate --recursive {Func_force_flag} '\\\n '{local_path} {remote_path}')"} +{"_id": "doc_9049", "title": "", "text": "def Func(arg_0, *arg_1):\n \"\"\"\n Issues invalidation requests to a Cloudfront distribution\n for the current static media bucket, triggering it to reload the specified\n paths from the origin.\n\n Note, only 1000 paths can be issued in a request at any one time.\n \"\"\"\n arg_2 = arg_0.get_satchel('dj')\n if not arg_1:\n return\n # http://boto.readthedocs.org/en/latest/cloudfront_tut.html\n arg_3 = arg_2.get_settings()\n if not arg_3.AWS_STATIC_BUCKET_NAME:\n print('No static media bucket set.')\n return\n if isinstance(arg_1, six.string_types):\n arg_1 = arg_1.split(',')\n arg_4 = map(str.strip, arg_1)\n arg_5 = 0\n while 1:\n arg_1 = arg_4[arg_5:arg_5+1000]\n if not arg_1:\n break\n\n arg_6 = boto.connect_cloudfront()\n arg_7 = arg_6.get_all_distributions()\n arg_8 = None\n for arg_9 in arg_7:\n print(arg_9.domain_name, dir(arg_9), arg_9.__dict__)\n arg_10 = arg_9.origin.dns_name.replace('.s3.amazonaws.com', '')\n if arg_10 == arg_3.AWS_STATIC_BUCKET_NAME:\n arg_8 = arg_9\n break\n if not arg_8:\n raise Exception(('Target distribution %s could not be found in the AWS account.') % (settings.AWS_STATIC_BUCKET_NAME,))\n print('Using distribution %s associated with origin %s.' % (arg_8.id, arg_3.AWS_STATIC_BUCKET_NAME))\n arg_11 = arg_6.create_invalidation_request(arg_8.id, arg_1)\n print('Issue invalidation request %s.' % (arg_11,))\n arg_5 += 1000"} +{"_id": "doc_9050", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Gets an S3 bucket of the given name, creating one if it doesn't already exist.\n\n Should be called with a role, if AWS credentials are stored in role settings. e.g.\n\n fab local s3.Func:mybucket\n \"\"\"\n from boto.s3 import connection\n if arg_0.dryrun:\n print('boto.connect_s3().create_bucket(%s)' % repr(arg_1))\n else:\n arg_2 = connection.S3Connection(\n arg_0.genv.aws_access_key_id,\n arg_0.genv.aws_secret_access_key\n )\n arg_3 = arg_2.create_bucket(arg_1)\n return arg_3"} +{"_id": "doc_9051", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Configures the server to use a Func IP.\n \"\"\"\n arg_1 = arg_0.render_to_file('ip/ip_interfaces_Func.template')\n arg_2 = arg_0.local_renderer\n arg_2.put(local_path=arg_1, remote_path=arg_2.env.interfaces_fn, use_sudo=True)"} +{"_id": "doc_9052", "title": "", "text": "def Func(arg_0=True):\n \"\"\"\n Upgrade all packages.\n \"\"\"\n arg_1 = MANAGER\n if arg_0:\n arg_2 = 'Func'\n else:\n arg_2 = 'dist-Func'\n run_as_root(\"%(manager)s --assume-yes %(cmd)s\" % locals(), pty=False)"} +{"_id": "doc_9053", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if a package is installed.\n \"\"\"\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n arg_1 = run(\"dpkg -s %(pkg_name)s\" % locals())\n for arg_2 in arg_1.splitlines():\n if arg_2.startswith(\"Status: \"):\n arg_3 = arg_2[8:]\n if \"installed\" in arg_3.split(' '):\n return True\n return False"} +{"_id": "doc_9054", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=None, arg_3=None):\n \"\"\"\n Install one or more packages.\n\n If *update* is ``True``, the package definitions will be updated\n first, using :py:func:`~burlap.deb.update_index`.\n\n Extra *options* may be passed to ``apt-get`` if necessary.\n\n Example::\n\n import burlap\n\n # Update index, then Func a single package\n burlap.deb.Func('build-essential', update=True)\n\n # Install multiple packages\n burlap.deb.Func([\n 'python-dev',\n 'libxml2-dev',\n ])\n\n # Install a specific version\n burlap.deb.Func('emacs', version='23.3+1-1ubuntu9')\n\n \"\"\"\n arg_4 = MANAGER\n if arg_1:\n update_index()\n if arg_2 is None:\n arg_2 = []\n if arg_3 is None:\n arg_3 = ''\n if arg_3 and not isinstance(arg_0, list):\n arg_3 = '=' + arg_3\n if not isinstance(arg_0, six.string_types):\n arg_0 = \" \".join(arg_0)\n arg_2.append(\"--quiet\")\n arg_2.append(\"--assume-yes\")\n arg_2 = \" \".join(arg_2)\n arg_5 = '%(manager)s Func %(options)s %(packages)s%(version)s' % locals()\n run_as_root(arg_5, pty=False)"} +{"_id": "doc_9055", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Enable unattended package installation by preseeding ``debconf``\n parameters.\n\n Example::\n\n import burlap\n\n # Unattended install of Postfix mail server\n burlap.deb.Func('postfix', {\n 'postfix/main_mailer_type': ('select', 'Internet Site'),\n 'postfix/mailname': ('string', 'example.com'),\n 'postfix/destinations': ('string', 'example.com, localhost.localdomain, localhost'),\n })\n burlap.deb.install('postfix')\n\n \"\"\"\n for arg_2, arg_3 in arg_1.items():\n arg_4, arg_5 = arg_3\n run_as_root('echo \"%(pkg_name)s %(q_name)s %(q_type)s %(q_answer)s\" | debconf-set-selections' % locals())"} +{"_id": "doc_9056", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Check if the given key id exists in apt keyring.\n \"\"\"\n\n # Command extracted from apt-key source\n arg_1 = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg'\n\n with settings(hide('everything'), warn_only=True):\n arg_2 = run('%(gpg_cmd)s --fingerprint %(keyid)s' % locals())\n\n return arg_2.succeeded"} +{"_id": "doc_9057", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Check if a group Func.\n \"\"\"\n with arg_0.settings(hide('running', 'stdout', 'warnings'), warn_only=True):\n return arg_0.run('getent group %(name)s' % locals()).succeeded"} +{"_id": "doc_9058", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Responds to a forced password change via `passwd` prompts due to password expiration.\n \"\"\"\n from fabric.state import connections\n from fabric.network import disconnect_all\n arg_3 = arg_0.local_renderer\n# print('self.genv.user:', self.genv.user)\n# print('self.env.passwords:', self.env.passwords)\n arg_3.genv.user = arg_3.genv.user or arg_1\n arg_3.pc('Changing password for user {user} via interactive prompts.')\n arg_3.env.old_password = arg_3.env.default_passwords[arg_0.genv.user]\n# print('self.genv.user:', self.genv.user)\n# print('self.env.passwords:', self.env.passwords)\n arg_3.env.new_password = arg_0.env.passwords[arg_0.genv.user]\n if arg_2:\n arg_3.env.old_password = arg_2\n arg_8 = {\n '(current) UNIX password: ': arg_3.env.old_password,\n 'Enter new UNIX password: ': arg_3.env.new_password,\n 'Retype new UNIX password: ': arg_3.env.new_password,\n #\"Login password for '%s': \" % r.genv.user: r.env.new_password,\n# \"Login password for '%s': \" % r.genv.user: r.env.old_password,\n }\n print('prompts:', arg_8)\n\n arg_3.env.password = arg_3.env.old_password\n with arg_0.settings(warn_only=True):\n arg_10 = arg_3._local(\"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello\", capture=True)\n #code 1 = good password, but prompts needed\n #code 5 = bad password\n #code 6 = good password, but host public key is unknown\n if arg_10.return_code in (1, 6) or 'hello' in arg_10:\n # Login succeeded, so we haven't yet changed the password, so use the default password.\n arg_0.genv.password = arg_3.env.old_password\n elif arg_0.genv.user in arg_0.genv.user_passwords:\n # Otherwise, use the password or key set in the config.\n arg_0.genv.password = arg_3.env.new_password\n else:\n # Default password fails and there's no current password, so clear.\n arg_0.genv.password = None\n print('using password:', arg_0.genv.password)\n\n # Note, the correct current password should be set in host.initrole(), not here.\n #r.genv.password = r.env.new_password\n #r.genv.password = r.env.new_password\n with arg_0.settings(arg_8=arg_8):\n arg_10 = arg_3._run('echo checking for expired password')\n print('ret:[%s]' % arg_10)\n arg_11 = 'passwd: password updated successfully' in arg_10\n print('do_disconnect:', arg_11)\n if arg_11:\n # We need to disconnect to reset the session or else Linux will again prompt\n # us to change our password.\n disconnect_all()\n\n # Further logins should require the new password.\n arg_0.genv.password = arg_3.env.new_password"} +{"_id": "doc_9059", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Adds the user to the given list of groups.\n \"\"\"\n\n arg_3 = arg_0.local_renderer\n\n if isinstance(arg_2, six.string_types):\n arg_2 = [_.strip() for _ in arg_2.split(',') if _.strip()]\n for arg_4 in arg_2:\n arg_3.env.username = arg_1\n arg_3.env.group = arg_4\n arg_3.sudo('groupadd --force {group}')\n arg_3.sudo('adduser {username} {group}')"} +{"_id": "doc_9060", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=None, arg_4=None, arg_5=False, arg_6=None, arg_7=None):\n \"\"\"\n Creates a user with the given username.\n \"\"\"\n arg_8 = arg_0.local_renderer\n arg_8.env.username = arg_1\n\n arg_10 = []\n\n if arg_3:\n arg_10.append('-u %s' % arg_3)\n\n if arg_4 is None:\n arg_4 = not arg_5\n\n if arg_4 is True:\n if arg_7:\n arg_10.append('--home %s' % arg_7)\n elif arg_4 is False:\n arg_10.append('--no-Func-home')\n\n if arg_6 is None:\n pass\n elif arg_6:\n arg_11 = _crypt_password(arg_6)\n arg_10.append('-p %s' % quote(arg_11))\n else:\n arg_10.append('--disabled-password')\n\n arg_10.append('--gecos \"\"')\n\n if arg_5:\n arg_10.append('--system')\n\n arg_8.env.args = ' '.join(arg_10)\n arg_8.env.groups = (arg_2 or '').strip()\n arg_8.sudo('adduser {args} {username} || true')\n if arg_2:\n for arg_12 in arg_2.split(' '):\n arg_12 = arg_12.strip()\n if not arg_12:\n continue\n arg_8.sudo('adduser %s %s || true' % (arg_1, arg_12))"} +{"_id": "doc_9061", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Forces the user to change their password the next time they login.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_2.env.username = arg_1\n arg_2.sudo('chage -d 0 {username}')"} +{"_id": "doc_9062", "title": "", "text": "def Func(arg_0, *arg_1, **arg_2):\n \"\"\"\n Run a remote command as the root user.\n\n When connecting as root to the remote system, this will use Fabric's\n ``run`` function. In other cases, it will use ``sudo``.\n \"\"\"\n from burlap.common import run_or_dryrun, sudo_or_dryrun\n if env.user == 'root':\n arg_3 = run_or_dryrun\n else:\n arg_3 = sudo_or_dryrun\n return arg_3(arg_0, *arg_1, **arg_2)"} +{"_id": "doc_9063", "title": "", "text": "def Func(arg_0, arg_1=2**20):\n \"\"\"\n Iteratively builds a file hash without loading the entire file into memory.\n Designed to process an arbitrary binary file.\n \"\"\"\n if isinstance(arg_0, six.string_types):\n arg_0 = open(arg_0)\n arg_2 = hashlib.sha512()\n while True:\n arg_3 = arg_0.read(arg_1)\n if not arg_3:\n break\n try:\n arg_2.update(arg_3)\n except TypeError:\n # Fixes Python3 error \"TypeError: Unicode-objects must be encoded before hashing\".\n arg_2.update(arg_3.encode('utf-8'))\n return arg_2.hexdigest()"} +{"_id": "doc_9064", "title": "", "text": "def Func(arg_0='python', arg_1=True):\n \"\"\"\n Install the latest version of `setuptools`_.\n\n ::\n\n import burlap\n\n burlap.python_setuptools.Func()\n\n \"\"\"\n\n arg_2 = package_version('setuptools', arg_0)\n arg_3 = package_version('distribute', arg_0)\n\n if arg_2 is None:\n _install_from_scratch(arg_0, arg_1)\n else:\n if arg_3 is None:\n _upgrade_from_setuptools(arg_0, arg_1)\n else:\n _upgrade_from_distribute(arg_0, arg_1)"} +{"_id": "doc_9065", "title": "", "text": "def Func(arg_0, arg_1=False, arg_2=False, arg_3='python'):\n \"\"\"\n Install Python packages with ``easy_Func``.\n\n Examples::\n\n import burlap\n\n # Install a single package\n burlap.python_setuptools.Func('package', use_sudo=True)\n\n # Install a list of packages\n burlap.python_setuptools.Func(['pkg1', 'pkg2'], use_sudo=True)\n\n .. note:: most of the time, you'll want to use\n :py:func:`burlap.python.Func()` instead,\n which uses ``pip`` to Func packages.\n\n \"\"\"\n arg_4 = []\n if arg_1:\n arg_4.append(\"-U\")\n if isinstance(arg_0, six.string_types):\n arg_4.append(arg_0)\n else:\n arg_4.extend(arg_0)\n _easy_Func(arg_4, arg_3, arg_2)"} +{"_id": "doc_9066", "title": "", "text": "def Func(arg_0, arg_1=0):\n \"\"\"\n Installs all the necessary packages necessary for managing virtual\n environments with pip.\n \"\"\"\n arg_1 = int(arg_1)\n if arg_0.has_pip() and not arg_1:\n return\n\n arg_2 = arg_0.local_renderer\n\n if arg_2.env.Func_method == GET_PIP:\n arg_2.sudo('curl --silent --show-error --retry 5 https://Func.pypa.io/get-pip.py | python')\n elif arg_2.env.Func_method == EZ_SETUP:\n arg_2.run('wget http://peak.telecommunity.com/dist/ez_setup.py -O /tmp/ez_setup.py')\n with arg_0.settings(warn_only=True):\n arg_2.sudo('python /tmp/ez_setup.py -U setuptools')\n arg_2.sudo('easy_install -U pip')\n elif arg_2.env.Func_method == PYTHON_PIP:\n arg_2.sudo('apt-get install -y python-pip')\n else:\n raise NotImplementedError('Unknown pip Func method: %s' % arg_2.env.Func_method)\n\n arg_2.sudo('pip {quiet_flag} install --upgrade pip')\n arg_2.sudo('pip {quiet_flag} install --upgrade virtualenv')"} +{"_id": "doc_9067", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns true if the virtualenv tool is installed.\n \"\"\"\n with arg_0.settings(warn_only=True):\n arg_1 = arg_0.run_or_local('which virtualenv').strip()\n return bool(arg_1)"} +{"_id": "doc_9068", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns true if the virtual environment has been created.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_3 = True\n with arg_0.settings(warn_only=True):\n arg_3 = arg_2.run_or_local('ls {virtualenv_dir}') or ''\n arg_3 = 'cannot access' not in arg_3.strip().lower()\n\n if arg_0.verbose:\n if arg_3:\n print('Yes')\n else:\n print('No')\n\n return arg_3"} +{"_id": "doc_9069", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Lists the packages that require the given package.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_2.env.name = arg_1\n arg_2.local('pipdeptree -p {name} --reverse')"} +{"_id": "doc_9070", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns all requirements files combined into one string.\n \"\"\"\n\n arg_1 = arg_1 or arg_0.env.requirements\n\n def iter_lines(arg_2):\n with open(arg_2, 'r') as fin:\n for arg_3 in fin.readlines():\n arg_3 = arg_3.strip()\n if not arg_3 or arg_3.startswith('#'):\n continue\n yield arg_3\n\n arg_4 = []\n if isinstance(arg_1, (tuple, list)):\n for arg_5 in arg_1:\n arg_5 = arg_0.find_template(arg_5)\n arg_4.extend(list(iter_lines(arg_5)))\n else:\n assert isinstance(arg_1, six.string_types)\n arg_5 = arg_0.find_template(arg_1)\n arg_4.extend(list(iter_lines(arg_5)))\n\n return '\\n'.join(arg_4)"} +{"_id": "doc_9071", "title": "", "text": "def Func(arg_0=None, arg_1=1):\n \"\"\"\n Creates and saves an EC2 key pair to a local PEM file.\n \"\"\"\n arg_1 = int(arg_1)\n arg_0 = arg_0 or env.vm_ec2_keypair_name\n arg_2 = 'roles/%s/%s.pem' % (env.ROLE, arg_0)\n arg_3 = get_ec2_connection()\n arg_4 = arg_3.get_key_pair(arg_0)\n if arg_4:\n print('Key pair %s already exists.' % arg_0)\n else:\n # Note, we only get the private key during creation.\n # If we don't save it here, it's lost forever.\n arg_4 = arg_3.create_key_pair(arg_0)\n open(arg_2, 'wb').write(arg_4.material)\n os.system('chmod 600 %s' % arg_2)\n print('Key pair %s created.' % arg_0)\n #return kp\n return arg_2"} +{"_id": "doc_9072", "title": "", "text": "def Func(arg_0=None, arg_1=None):\n \"\"\"\n Deletes and recreates one or more VM instances.\n \"\"\"\n\n if arg_0 is None:\n arg_0 = get_name()\n\n delete(arg_0=arg_0, arg_1=arg_1)\n arg_2 = get_or_create(arg_0=arg_0, arg_1=arg_1)\n arg_3.host_string = arg_2.public_dns_name"} +{"_id": "doc_9073", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=None):\n '''\n Utility to take the methods of the instance of a class, instance,\n and add them as functions to a module, module_name, so that Fabric\n can find and call them. Call this at the bottom of a module after\n the class definition.\n '''\n import imp\n from .decorators import task_or_dryrun\n\n # get the module as an object\n arg_4 = sys.modules[arg_1]\n\n arg_3 = re.sub('[^a-zA-Z0-9]+', '', arg_3 or '')\n\n # Iterate over the methods of the class and dynamically create a function\n # for each method that calls the method and add it to the current module\n # NOTE: inspect.ismethod actually executes the methods?!\n #for method in inspect.getmembers(instance, predicate=inspect.ismethod):\n\n arg_5 = getattr(arg_0, arg_2)\n\n if not arg_2.startswith('_'):\n\n # get the bound method\n arg_6 = getattr(arg_0, arg_2)\n\n# if module_name == 'buildbot' or module_alias == 'buildbot':\n# print('-'*80)\n# print('module_name:', module_name)\n# print('method_name:', method_name)\n# print('module_alias:', module_alias)\n# print('module_obj:', module_obj)\n# print('func.module:', func.__module__)\n\n # Convert executable to a Fabric task, if not done so already.\n if not hasattr(arg_6, 'is_task_or_dryrun'):\n arg_6 = task_or_dryrun(arg_6)\n\n if arg_1 == arg_3 \\\n or (arg_1.startswith('satchels.') and arg_1.endswith(arg_3)):\n\n # add the function to the current module\n setattr(arg_4, arg_2, arg_6)\n\n else:\n\n # Dynamically create a module for the virtual satchel.\n arg_7 = arg_4\n arg_4 = create_module(arg_3)\n setattr(arg_4, arg_2, arg_6)\n post_import_modules.add(arg_3)\n\n arg_8 = '%s.%s' % (arg_3 or arg_1, arg_2)\n arg_6.wrapped.__func__.fabric_name = arg_8\n\n return arg_6"} +{"_id": "doc_9074", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Given the function name, looks up the method for dynamically retrieving host data.\n \"\"\"\n arg_0 = arg_0 or env.hosts_retriever\n# #assert s, 'No hosts retriever specified.'\n if not arg_0:\n return env_hosts_retriever\n# module_name = '.'.join(s.split('.')[:-1])\n# func_name = s.split('.')[-1]\n# retriever = getattr(importlib.import_module(module_name), func_name)\n# return retriever\n return str_to_callable(arg_0) or env_hosts_retriever"} +{"_id": "doc_9075", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Returns a subset of the env dictionary containing\n only those keys with the name prefix.\n \"\"\"\n arg_0 = arg_0 or []\n assert isinstance(arg_0, (tuple, list)), 'Prefixes must be a sequence type, not %s.' % type(arg_0)\n arg_1 = {}\n for arg_2 in arg_0:\n arg_2 = arg_2.lower().strip()\n for arg_3 in sorted(env):\n if arg_3.startswith('%s_' % arg_2):\n arg_4 = arg_3[len(arg_2)+1:]\n arg_1[arg_4] = env[arg_3]\n return arg_1"} +{"_id": "doc_9076", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, **arg_3):\n \"\"\"\n Returns a template to a local file.\n If no filename given, a temporary filename will be generated and returned.\n \"\"\"\n import tempfile\n arg_4 = get_dryrun(arg_3.get('dryrun'))\n arg_5 = arg_3.pop('append_newline', True)\n arg_6 = arg_3.pop('style', 'cat') # |echo\n arg_7 = arg_3.pop('formatter', None)\n arg_8 = render_to_string(arg_0, arg_2=arg_2)\n if arg_5 and not arg_8.endswith('\\n'):\n arg_8 += '\\n'\n\n if arg_7 and callable(arg_7):\n arg_8 = arg_7(arg_8)\n\n if arg_4:\n if not arg_1:\n arg_9, arg_1 = tempfile.mkstemp()\n arg_10 = os.fdopen(arg_9, 'wt')\n arg_10.close()\n else:\n if arg_1:\n arg_10 = open(arg_1, 'w')\n else:\n arg_9, arg_1 = tempfile.mkstemp()\n arg_10 = os.fdopen(arg_9, 'wt')\n arg_10.write(arg_8)\n arg_10.close()\n assert arg_1\n\n if arg_6 == 'cat':\n arg_11 = 'cat < %s\\n%s\\nEOF' % (arg_1, arg_8)\n elif arg_6 == 'echo':\n arg_11 = 'echo -e %s > %s' % (shellquote(arg_8), arg_1)\n else:\n raise NotImplementedError\n\n if BURLAP_COMMAND_PREFIX:\n print('%s run: %s' % (render_command_prefix(), arg_11))\n else:\n print(arg_11)\n\n return arg_1"} +{"_id": "doc_9077", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=True, arg_3=None, arg_4=None):\n \"\"\"\n Returns a template to a remote file.\n If no filename given, a temporary filename will be generated and returned.\n \"\"\"\n arg_0 = find_template(arg_0)\n if arg_2:\n arg_3 = arg_3 or {}\n arg_0 = render_to_file(template=arg_0, arg_3=arg_3, arg_4=arg_4)\n put_or_dryrun(arg_0=arg_0, arg_1=arg_1, use_sudo=True)"} +{"_id": "doc_9078", "title": "", "text": "def Func(arg_0=None, arg_1=None, arg_2=None, arg_3=None, arg_4=False, arg_5=None):\n \"\"\"\n Iterates over sites, safely setting environment variables for each site.\n \"\"\"\n if arg_5 is None:\n arg_5 = get_verbose()\n\n arg_6 = get_current_hostname()\n\n arg_7 = arg_11.available_sites_by_host.get(arg_6, None)\n\n if arg_0 is None:\n arg_1 = arg_1 or arg_11.SITE or ALL\n if arg_1 == ALL:\n arg_0 = list(six.iteritems(arg_11.sites))\n else:\n sys.stderr.flush()\n arg_0 = [(arg_1, arg_11.sites.get(arg_1))]\n\n arg_2 = arg_2 #or render_remote_paths\n arg_8 = save_env()\n for arg_9, arg_10 in sorted(arg_0):\n if arg_4 and arg_9.endswith('_secure'):\n continue\n\n # Only load site configurations that are allowed for this host.\n if arg_7 is None:\n pass\n else:\n assert isinstance(arg_7, (tuple, list))\n if arg_9 not in arg_7:\n if arg_5:\n print('Skipping site %s because not in among target sites.' % arg_9)\n continue\n\n arg_11.update(arg_8)\n arg_11.update(arg_11.sites.get(arg_9, {}))\n arg_11.SITE = arg_9\n if callable(arg_2):\n arg_2()\n if arg_3:\n arg_3(arg_9)\n yield arg_9, arg_10\n\n # Revert modified keys.\n arg_11.update(arg_8)\n\n # Remove keys that were added, not simply updated.\n arg_13 = set(arg_11).difference(arg_8)\n for arg_14 in arg_13:\n # Don't remove internally maintained variables, because these are used to cache hostnames\n # used by Func().\n if arg_14.startswith('_'):\n continue\n del arg_11[arg_14]"} +{"_id": "doc_9079", "title": "", "text": "def Func(arg_0):\n \"\"\"perform topo sort on elements.\n\n :arg source: list of ``(name, [list of dependancies])`` pairs\n :returns: list of names, with dependancies listed first\n \"\"\"\n if isinstance(arg_0, dict):\n arg_0 = arg_0.items()\n arg_1 = sorted([(arg_6, set(arg_7)) for arg_6, arg_7 in arg_0]) # copy deps so we can modify set in-place\n arg_2 = []\n while arg_1:\n arg_3 = []\n arg_4 = []\n for arg_5 in arg_1:\n arg_6, arg_7 = arg_5\n arg_7.difference_update(arg_2) # remove deps we emitted last pass\n if arg_7: # still has deps? recheck during next pass\n arg_3.append(arg_5)\n else: # no more deps? time to emit\n yield arg_6\n arg_2.append(arg_6) # <-- not required, but helps preserve original ordering\n arg_4.append(arg_6) # remember what we emitted for difference_update() in next pass\n if not arg_4: # all entries have unmet deps, one of two things is wrong...\n raise ValueError(\"cyclic or missing dependancy detected: %r\" % (arg_3,))\n arg_1 = arg_3\n arg_2 = arg_4"} +{"_id": "doc_9080", "title": "", "text": "def Func(arg_0=None):\n \"\"\"\n Returns a list of hosts that have been configured to support the given site.\n \"\"\"\n arg_0 = arg_0 or env.SITE\n arg_1 = set()\n for arg_2, arg_3 in six.iteritems(env.available_sites_by_host):\n# print('checking hostname:',hostname, _sites)\n for arg_4 in arg_3:\n if arg_4 == arg_0:\n# print( '_site:',_site)\n arg_5 = get_host_ip(arg_2)\n# print( 'host_ip:',host_ip)\n if arg_5:\n arg_1.add(arg_5)\n break\n return list(arg_1)"} +{"_id": "doc_9081", "title": "", "text": "def Func(arg_0, arg_1=True, arg_2=True):\n \"\"\"\n Returns a copy of the global environment with all the local variables copied back into it.\n \"\"\"\n arg_3 = type(arg_0.genv)()\n if arg_2:\n arg_3.update(arg_0.genv)\n if arg_1:\n for arg_4, arg_5 in arg_0.lenv.items():\n arg_3['%s_%s' % (arg_0.obj.name.lower(), arg_4)] = arg_5\n return arg_3"} +{"_id": "doc_9082", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Context manager that hides the command prefix and activates dryrun to capture all following task commands to their equivalent Bash outputs.\n \"\"\"\n class Capture(object):\n\n def __init__(arg_0, arg_1):\n arg_0.satchel = arg_1\n arg_0._dryrun = arg_0.satchel.dryrun\n arg_0.satchel.dryrun = 1\n begincap()\n arg_0._stdout = arg_11.stdout\n arg_0._stderr = arg_11.stderr\n arg_0.stdout = arg_11.stdout = StringIO()\n arg_0.stderr = arg_11.stderr = StringIO()\n\n def __enter__(arg_0):\n return arg_0\n\n def __exit__(arg_0, arg_8, arg_9, arg_10): # pylint: disable=redefined-builtin\n endcap()\n arg_0.satchel.dryrun = arg_0._dryrun\n arg_11.stdout = arg_0._stdout\n arg_11.stderr = arg_0._stderr\n\n return Capture(arg_0)"} +{"_id": "doc_9083", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Adds this satchel to the global Funcies for fast lookup from other satchels.\n \"\"\"\n\n arg_0._set_defaults()\n\n arg_1[arg_0.name.upper()] = arg_0\n\n arg_4[arg_0.name] = arg_0.record_manifest\n\n # Register service commands.\n if arg_0.required_system_packages:\n arg_5[arg_0.name.upper()] = arg_0.required_system_packages"} +{"_id": "doc_9084", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Removes this satchel from global registeries.\n \"\"\"\n\n for arg_1 in list(env.keys()):\n if arg_1.startswith(arg_0.env_prefix):\n del env[arg_1]\n\n try:\n del all_satchels[arg_0.name.upper()]\n except KeyError:\n pass\n\n try:\n del manifest_recorder[arg_0.name]\n except KeyError:\n pass\n\n try:\n del manifest_deployers[arg_0.name.upper()]\n except KeyError:\n pass\n\n try:\n del manifest_deployers_befores[arg_0.name.upper()]\n except KeyError:\n pass\n\n try:\n del required_system_packages[arg_0.name.upper()]\n except KeyError:\n pass"} +{"_id": "doc_9085", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a version of env filtered to only include the variables in our namespace.\n \"\"\"\n arg_1 = type(env)()\n for arg_2, arg_3 in six.iteritems(env):\n if arg_2.startswith(arg_0.name+'_'):\n arg_1[arg_2[arg_4(arg_0.name)+1:]] = arg_3\n return arg_1"} +{"_id": "doc_9086", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Loads settings for the target site.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_3 = arg_0.genv.sites[arg_1].copy()\n arg_2.env.site = arg_1\n if arg_0.verbose:\n print('Func.data:')\n pprint(arg_3, indent=4)\n\n # Remove local namespace settings from the global namespace\n # by converting _ to .\n arg_5 = {}\n for arg_6, arg_7 in list(arg_3.items()):\n if arg_6.startswith(arg_0.name + '_'):\n arg_8 = arg_6[len(arg_0.name + '_'):]\n arg_5[arg_8] = arg_7\n del arg_3[arg_6]\n\n arg_2.env.update(arg_5)\n arg_2.env.update(arg_3)"} +{"_id": "doc_9087", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns a list of all required packages.\n \"\"\"\n arg_1 = arg_0.os_version # OS(type=LINUX, distro=UBUNTU, release='14.04')\n arg_0.vprint('os_version:', arg_1)\n\n # Lookup legacy package list.\n # OS: [package1, package2, ...],\n arg_2 = arg_0.required_system_packages\n if arg_2:\n deprecation('The required_system_packages attribute is deprecated, '\n 'use the packager_system_packages property instead.')\n\n # Lookup new package list.\n # OS: [package1, package2, ...],\n arg_3 = arg_0.packager_system_packages\n\n arg_4 = [\n (arg_1.type, arg_1.distro, arg_1.release),\n (arg_1.distro, arg_1.release),\n (arg_1.type, arg_1.distro),\n (arg_1.distro,),\n arg_1.distro,\n ]\n arg_0.vprint('req_packages1:', arg_2)\n arg_0.vprint('req_packages2:', arg_3)\n arg_5 = None\n arg_6 = False\n for arg_7 in arg_4:\n arg_0.vprint('pattern:', arg_7)\n for arg_8 in (arg_2, arg_3):\n if arg_7 in arg_8:\n arg_5 = arg_8[arg_7]\n arg_6 = True\n break\n if not arg_6:\n print('Warning: No operating system pattern found for %s' % (arg_1,))\n arg_0.vprint('package_list:', arg_5)\n return arg_5"} +{"_id": "doc_9088", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns true if at least one tracker detects a change.\n \"\"\"\n arg_1 = arg_0.last_manifest\n for arg_2 in arg_0.get_trackers():\n arg_3 = arg_1['_tracker_%s' % arg_2.get_natural_key_hash()]\n if arg_2.is_changed(arg_3):\n return True\n return False"} +{"_id": "doc_9089", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Delete a PostgreSQL database.\n\n Example::\n\n import burlap\n\n # Remove DB if it exists\n if burlap.postgres.database_exists('myapp'):\n burlap.postgres.Func('myapp')\n\n \"\"\"\n with settings(warn_only=True):\n arg_0.sudo('dropdb %s' % (arg_1,), user='postgres')"} +{"_id": "doc_9090", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3='localhost', arg_4=None, arg_5=None):\n \"\"\"\n Directly transfers a table between two databases.\n \"\"\"\n #TODO: incomplete\n arg_6 = arg_0.database_renderer(arg_4=arg_4, arg_5=arg_5)\n arg_6.env.table_name = arg_1\n arg_6.run('psql --user={dst_db_user} --host={dst_db_host} --command=\"DROP TABLE IF EXISTS {table_name} CASCADE;\"')\n arg_6.run('pg_dump -t {table_name} --user={dst_db_user} --host={dst_db_host} | psql --user={src_db_user} --host={src_db_host}')"} +{"_id": "doc_9091", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the IPv4 Func assigned to an interface.\n\n Example::\n\n import burlap\n\n # Print all configured IP Funces\n for interface in burlap.network.interfaces():\n print(burlap.network.Func(interface))\n\n \"\"\"\n with settings(hide('running', 'stdout')):\n arg_1 = (sudo(\"/sbin/ifconfig %(interface)s | grep 'inet '\" % locals()) or '').split('\\n')[-1].strip()\n if 'addr' in arg_1:\n return arg_1.split()[1].split(':')[1]\n return arg_1.split()[1]"} +{"_id": "doc_9092", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=0, arg_4=0):\n \"\"\"\n Installs system packages listed in yum-requirements.txt.\n \"\"\"\n assert arg_0.genv[ROLE]\n arg_5 = arg_1 or arg_0.find_template(arg_0.genv.yum_requirments_fn)\n if not arg_5:\n return []\n assert os.path.isfile(arg_5)\n arg_3 = int(arg_3)\n if arg_4:\n return [\n arg_6.strip() for arg_6 in open(arg_5).readlines()\n if arg_6.strip() and not arg_6.strip.startswith('#')\n and (not arg_2 or arg_6.strip() == arg_2)\n ]\n if arg_3:\n arg_0.sudo_or_dryrun('yum update --assumeyes')\n if arg_2:\n arg_0.sudo_or_dryrun('yum install --assumeyes %s' % arg_2)\n else:\n if arg_0.genv.is_local:\n arg_0.put_or_dryrun(local_path=arg_5)\n arg_5 = arg_0.genv.put_remote_fn\n arg_0.sudo_or_dryrun('yum install --assumeyes $(cat %(yum_req_fn)s)' % arg_5)"} +{"_id": "doc_9093", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None): # pylint: disable=redefined-builtin\n \"\"\"\n Displays all packages required by the current role\n based on the documented services provided.\n \"\"\"\n from burlap.common import (\n required_system_packages,\n required_python_packages,\n required_ruby_packages,\n )\n arg_2 = (arg_2 or '').strip().upper()\n arg_1 = (arg_1 or '').lower().strip()\n assert not arg_1 or arg_1 in PACKAGE_TYPES, 'Unknown package type: %s' % (arg_1,)\n arg_3 = set()\n arg_4 = []\n arg_5 = arg_0.os_version\n\n for arg_6, arg_7 in arg_0.all_other_enabled_satchels.items():\n\n arg_6 = arg_6.strip().upper()\n if arg_2 and arg_2 != arg_6:\n continue\n\n arg_8 = []\n\n if not arg_1 or arg_1 == SYSTEM:\n\n #TODO:deprecated, remove\n arg_8.extend(required_system_packages.get(\n arg_6, {}).get((arg_5.distro, arg_5.release), []))\n\n try:\n arg_9 = arg_7.packager_system_packages\n if arg_0.verbose:\n print('pkgs:')\n pprint(arg_9, indent=4)\n for arg_10 in [(arg_5.distro, arg_5.release), arg_5.distro]:\n if arg_0.verbose:\n print('checking key:', arg_10)\n if arg_10 in arg_9:\n if arg_0.verbose:\n print('satchel %s requires:' % arg_7, arg_9[arg_10])\n arg_8.extend(arg_9[arg_10])\n break\n except AttributeError:\n pass\n\n if not arg_1 or arg_1 == PYTHON:\n\n #TODO:deprecated, remove\n arg_8.extend(required_python_packages.get(\n arg_6, {}).get((arg_5.distro, arg_5.release), []))\n\n try:\n arg_9 = arg_7.packager_python_packages\n for arg_10 in [(arg_5.distro, arg_5.release), arg_5.distro]:\n if arg_10 in arg_9:\n arg_8.extend(arg_9[arg_10])\n except AttributeError:\n pass\n print('_new:', arg_8)\n\n if not arg_1 or arg_1 == RUBY:\n\n #TODO:deprecated, remove\n arg_8.extend(required_ruby_packages.get(\n arg_6, {}).get((arg_5.distro, arg_5.release), []))\n\n for arg_11 in arg_8:\n if arg_11 in arg_3:\n continue\n arg_3.add(arg_11)\n arg_4.append(arg_11)\n if arg_0.verbose:\n for arg_12 in sorted(arg_4):\n print('package:', arg_12)\n return arg_4"} +{"_id": "doc_9094", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Writes entire crontab to the host.\n \"\"\"\n arg_2 = arg_0.local_renderer\n\n arg_0.Func_logrotate()\n\n arg_3 = []\n# if self.verbose:\n# print('hostname: \"%s\"' % (hostname,), file=sys.stderr)\n for arg_4, arg_5 in arg_0.iter_sites(arg_1=arg_1):\n arg_2.env.cron_stdout_log = arg_2.format(arg_2.env.stdout_log_template)\n arg_2.env.cron_stderr_log = arg_2.format(arg_2.env.stderr_log_template)\n arg_2.sudo('touch {cron_stdout_log}')\n arg_2.sudo('touch {cron_stderr_log}')\n arg_2.sudo('sudo chown {user}:{user} {cron_stdout_log}')\n arg_2.sudo('sudo chown {user}:{user} {cron_stderr_log}')\n\n if arg_0.verbose:\n print('site:', arg_1, file=sys.stderr)\n print('env.crontabs_selected:', arg_0.env.crontabs_selected, file=sys.stderr)\n\n for arg_9 in arg_0.env.crontabs_selected:\n arg_10 = arg_0.env.crontabs_available.get(arg_9, [])\n if arg_0.verbose:\n print('lines:', arg_10, file=sys.stderr)\n for arg_11 in arg_10:\n arg_3.append(arg_2.format(arg_11))\n\n if not arg_3:\n return\n\n arg_3 = arg_0.env.crontab_headers + arg_3\n arg_3.append('\\n')\n arg_2.env.crontabs_rendered = '\\n'.join(arg_3)\n arg_13 = arg_0.write_to_file(content=arg_2.env.crontabs_rendered)\n print('fn:', arg_13)\n arg_2.env.put_remote_path = arg_2.put(local_path=arg_13)\n if isinstance(arg_2.env.put_remote_path, (tuple, list)):\n arg_2.env.put_remote_path = arg_2.env.put_remote_path[0]\n arg_2.sudo('crontab -u {cron_user} {put_remote_path}')"} +{"_id": "doc_9095", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Forcibly kills Rabbit and purges all its queues.\n\n For emergency use when the server becomes unresponsive, even to service stop calls.\n\n If this also fails to correct the performance issues, the server may have to be completely\n reinstalled.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_0.stop()\n with settings(warn_only=True):\n arg_1.sudo('killall rabbitmq-server')\n with settings(warn_only=True):\n arg_1.sudo('killall beam.smp')\n #TODO:explicitly delete all subfolders, star-delete doesn't work\n arg_1.sudo('rm -Rf /var/lib/rabbitmq/mnesia/*')"} +{"_id": "doc_9096", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Returns a generator yielding all the keys that have values that differ between each dictionary.\n \"\"\"\n arg_2 = set(arg_0).union(arg_1)\n for arg_3 in arg_2:\n arg_4 = arg_0.get(arg_3)\n arg_5 = arg_1.get(arg_3)\n if arg_4 != arg_5:\n yield arg_3, (arg_4, arg_5)"} +{"_id": "doc_9097", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Given a list of components, re-orders them according to inter-component dependencies so the most depended upon are first.\n \"\"\"\n assert isinstance(arg_0, (tuple, list))\n arg_1 = {}\n for arg_2 in arg_0:\n arg_3 = set(manifest_deployers_befores.get(arg_2, []))\n arg_3 = arg_3.intersection(arg_0)\n arg_1[arg_2] = arg_3\n arg_4 = list(topological_sort(arg_1.items()))\n return arg_4"} +{"_id": "doc_9098", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3=False):\n \"\"\"\n Returns a generator yielding the named functions needed for a deployment.\n \"\"\"\n for arg_4 in arg_0:\n arg_5 = manifest_deployers.get(arg_4, [])\n for arg_6 in arg_5:\n\n #TODO:remove this after burlap.* naming prefix bug fixed\n if arg_6.startswith('burlap.'):\n print('skipping %s' % arg_6)\n continue\n\n arg_7 = manifest_deployers_takes_diff.get(arg_6, False)\n\n arg_8 = resolve_deployer(arg_6)\n arg_9 = arg_1.get(arg_4)\n arg_10 = arg_2.get(arg_4)\n if arg_7:\n yield arg_6, partial(arg_8, arg_10=arg_10, arg_9=arg_9)\n else:\n yield arg_6, partial(arg_8)"} +{"_id": "doc_9099", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Returns the path to the manifest file.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_2 = arg_1.format(arg_1.env.data_dir + '/manifest.yaml')\n return arg_2"} +{"_id": "doc_9100", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns a dictionary representing the current configuration state.\n\n Thumbprint is of the form:\n\n {\n component_name1: {key: value},\n component_name2: {key: value},\n ...\n }\n\n \"\"\"\n arg_1 = str_to_component_list(arg_1)\n if arg_0.verbose:\n print('deploy.Func.components:', arg_1)\n arg_2 = {} # {component:data}\n for arg_3, arg_4 in sorted(manifest_recorder.items()):\n arg_0.vprint('Checking thumbprint for component %s...' % arg_3)\n arg_5 = assert_valid_satchel(arg_3)\n arg_6 = clean_service_name(arg_3)\n if arg_6 not in arg_0.genv.services:\n arg_0.vprint('Skipping unused component:', arg_3)\n continue\n elif arg_1 and arg_6 not in arg_1:\n arg_0.vprint('Skipping non-matching component:', arg_3)\n continue\n try:\n arg_0.vprint('Retrieving manifest for %s...' % arg_3)\n arg_2[arg_5] = arg_4()\n if arg_0.verbose:\n pprint(arg_2[arg_5], indent=4)\n except exceptions.AbortDeployment as e:\n raise\n return arg_2"} +{"_id": "doc_9101", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns a dictionary representing the previous configuration state.\n\n Thumbprint is of the form:\n\n {\n component_name1: {key: value},\n component_name2: {key: value},\n ...\n }\n\n \"\"\"\n arg_1 = str_to_component_list(arg_1)\n arg_2 = arg_0.manifest_filename\n arg_3 = None\n if arg_0.file_exists(arg_2):\n arg_4 = six.BytesIO()\n get(arg_2, arg_4)\n arg_3 = arg_4.getvalue()\n arg_5 = {}\n arg_6 = yaml.load(arg_3)\n for arg_7, arg_8 in arg_6.items():\n arg_9 = assert_valid_satchel(arg_7)\n arg_10 = clean_service_name(arg_7)\n if arg_1 and arg_10 not in arg_1:\n continue\n arg_5[arg_9] = arg_8\n return arg_5"} +{"_id": "doc_9102", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Marks the remote server as currently being deployed to.\n \"\"\"\n arg_0.init()\n arg_1 = arg_0.local_renderer\n if arg_0.file_exists(arg_1.env.Funcfile_path):\n raise exceptions.AbortDeployment('Lock file %s exists. Perhaps another deployment is currently underway?' % arg_1.env.Funcfile_path)\n else:\n arg_0.vprint('Locking %s.' % arg_1.env.Funcfile_path)\n arg_1.env.hostname = socket.gethostname()\n arg_1.run_or_local('echo \"{hostname}\" > {Funcfile_path}')"} +{"_id": "doc_9103", "title": "", "text": "def Func(arg_0, arg_1=None):#, set_satchels=None):\n \"\"\"\n Update the thumbprint on the remote server but execute no satchel configurators.\n\n components = A comma-delimited list of satchel names to limit the Func deployment to.\n set_satchels = A semi-colon delimited list of key-value pairs to set in satchels before recording a Func deployment.\n \"\"\"\n\n arg_0.init()\n\n # In cases where we only want to Func deployment of a specific satchel, then simply copy the last thumbprint and overwrite with a subset\n # of the current thumbprint filtered by our target components.\n if arg_1:\n arg_2 = arg_0.get_previous_thumbprint() or {}\n arg_2.update(arg_0.get_current_thumbprint(arg_1=arg_1) or {})\n else:\n arg_2 = arg_0.get_current_thumbprint(arg_1=arg_1) or {}\n\n arg_3 = yaml.dump(arg_2)\n arg_4 = arg_0.local_renderer\n arg_4.upload_content(content=arg_3, fn=arg_0.manifest_filename)\n\n # Ensure all cached manifests are cleared, so they reflect the newly deployed changes.\n arg_0.reset_all_satchels()"} +{"_id": "doc_9104", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0):\n \"\"\"\n Inspects differences between the last deployment and the current code state.\n \"\"\"\n\n arg_2 = int(arg_2)\n\n arg_0.init()\n\n arg_3, arg_4 = arg_0.get_component_funcs(arg_1=arg_1)\n\n print('\\n%i changes found for host %s.\\n' % (len(arg_3), arg_0.genv.host_string))\n if arg_3 and arg_4:\n if arg_0.verbose:\n print('These components have changed:\\n')\n for arg_5 in sorted(arg_3):\n print((' '*4)+arg_5)\n print('Deployment plan for host %s:\\n' % arg_0.genv.host_string)\n for arg_6, arg_7 in arg_4:\n print(success_str((' '*4)+arg_6))\n if arg_3:\n print()\n\n if arg_2 and arg_0.genv.host_string == arg_0.genv.hosts[-1]:\n if arg_3:\n if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'):\n sys.exit(0)\n else:\n sys.exit(0)"} +{"_id": "doc_9105", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None):\n \"\"\"\n Retrieves the Django settings dictionary.\n \"\"\"\n arg_3 = arg_0.local_renderer\n arg_4 = arg_6.stdout\n arg_5 = arg_6.stderr\n if not arg_0.verbose:\n arg_6.stdout = StringIO()\n arg_6.stderr = StringIO()\n try:\n arg_6.path.insert(0, arg_3.env.src_dir)\n\n # Temporarily override SITE.\n arg_9 = arg_0.genv.SITE\n if arg_1 and arg_1.endswith('_secure'):\n arg_1 = arg_1[:-7]\n arg_1 = arg_1 or arg_0.genv.SITE or arg_0.genv.default_site\n arg_0.set_site(arg_1)\n\n # Temporarily override ROLE.\n arg_10 = arg_0.genv.ROLE\n if arg_2:\n arg_0.set_role(arg_2)\n\n try:\n # We need to explicitly delete sub-modules from sys.modules. Otherwise, reload() skips\n # them and they'll continue to contain obsolete settings.\n if arg_3.env.delete_module_with_prefixes:\n for arg_11 in sorted(arg_6.modules):\n for arg_12 in arg_3.env.delete_module_with_prefixes:\n if arg_11.startswith(arg_12):\n if arg_0.verbose:\n print('Deleting module %s prior to re-import.' % arg_11)\n del arg_6.modules[arg_11]\n break\n\n for arg_11 in list(arg_6.modules):\n for arg_13 in arg_3.env.delete_module_containing:\n if arg_13 in arg_11:\n del arg_6.modules[arg_11]\n break\n\n if arg_3.env.settings_module in arg_6.modules:\n del arg_6.modules[arg_3.env.settings_module]\n\n #TODO:fix r.env.settings_module not loading from settings?\n# print('r.genv.django_settings_module:', r.genv.django_settings_module, file=_stdout)\n# print('r.genv.dj_settings_module:', r.genv.dj_settings_module, file=_stdout)\n# print('r.env.settings_module:', r.env.settings_module, file=_stdout)\n if 'django_settings_module' in arg_3.genv:\n arg_3.env.settings_module = arg_3.genv.django_settings_module\n else:\n arg_3.env.settings_module = arg_3.env.settings_module or arg_3.genv.dj_settings_module\n if arg_0.verbose:\n print('r.env.settings_module:', arg_3.env.settings_module, arg_3.format(arg_3.env.settings_module))\n arg_16 = import_module(arg_3.format(arg_3.env.settings_module))\n\n if arg_1:\n assert arg_1 == arg_16.SITE, 'Unable to set SITE to \"%s\" Instead it is set to \"%s\".' % (arg_1, arg_16.SITE)\n\n # Works as long as settings.py doesn't also reload anything.\n import imp\n imp.reload(arg_16)\n\n except ImportError as e:\n print('Warning: Could not import settings for site \"%s\": %s' % (arg_1, e), file=arg_4)\n traceback.print_exc(file=arg_4)\n #raise # breaks *_secure pseudo sites\n return\n finally:\n if arg_9:\n arg_0.set_site(arg_9)\n if arg_10:\n arg_0.set_role(arg_10)\n finally:\n arg_6.stdout = arg_4\n arg_6.stderr = arg_5\n arg_6.path.remove(arg_3.env.src_dir)\n return arg_16"} +{"_id": "doc_9106", "title": "", "text": "def Func(arg_0, arg_1='admin', arg_2=None, arg_3=None, arg_4=None):\n \"\"\"\n Runs the Django Func management command.\n \"\"\"\n arg_5 = arg_0.local_renderer\n arg_4 = arg_4 or arg_0.genv.SITE\n arg_0.set_site_specifics(arg_4)\n arg_6 = ['--username=%s' % arg_1]\n if arg_2:\n arg_6.append('--email=%s' % arg_2)\n if arg_3:\n arg_6.append('--password=%s' % arg_3)\n arg_5.env.options_str = ' '.join(arg_6)\n if arg_0.is_local:\n arg_5.env.project_dir = arg_5.env.local_project_dir\n arg_5.genv.SITE = arg_5.genv.SITE or arg_4\n arg_5.run_or_local('export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; {manage_cmd} {Func_cmd} {options_str}')"} +{"_id": "doc_9107", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Runs the Dango Func management command.\n\n By default, runs on only the current site.\n\n Pass site=all to run on all sites.\n \"\"\"\n arg_2 = arg_2 or arg_0.genv.SITE\n arg_3 = arg_0.local_renderer\n arg_3.env._Func_path = arg_1\n for arg_6, arg_7 in arg_0.iter_sites(arg_2=arg_2, no_secure=True):\n try:\n arg_0.set_db(arg_2=arg_6)\n arg_3.env.SITE = arg_6\n arg_3.sudo('export SITE={SITE}; export ROLE={ROLE}; '\n 'cd {project_dir}; '\n '{manage_cmd} Func {_Func_path}')\n except KeyError:\n pass"} +{"_id": "doc_9108", "title": "", "text": "def Func(arg_0, arg_1, *arg_2, **arg_3):\n \"\"\"\n A generic wrapper around Django's Func command.\n \"\"\"\n arg_4 = arg_0.local_renderer\n arg_5 = arg_3.pop('environs', '').strip()\n if arg_5:\n arg_5 = ' '.join('export %s=%s;' % tuple(_.split('=')) for _ in arg_5.split(','))\n arg_5 = ' ' + arg_5 + ' '\n arg_4.env.cmd = arg_1\n arg_4.env.SITE = arg_4.genv.SITE or arg_4.genv.default_site\n arg_4.env.args = ' '.join(map(str, arg_2))\n arg_4.env.kwargs = ' '.join(\n ('--%s' % _k if _v in (True, 'True') else '--%s=%s' % (_k, _v))\n for _k, _v in arg_3.items())\n arg_4.env.environs = arg_5\n if arg_0.is_local:\n arg_4.env.project_dir = arg_4.env.local_project_dir\n arg_4.run_or_local('export SITE={SITE}; export ROLE={ROLE};{environs} cd {project_dir}; {Func_cmd} {cmd} {args} {kwargs}')"} +{"_id": "doc_9109", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0, arg_3=None, arg_4=1): # pylint: disable=redefined-builtin\n \"\"\"\n Runs the standard Django Func command for one or more sites.\n \"\"\"\n arg_5 = arg_0.local_renderer\n\n arg_4 = int(arg_4)\n\n arg_6 = arg_0.version_tuple >= (1, 7, 0)\n\n arg_7 = arg_0.version_tuple >= (1, 9, 0)\n\n # DEPRECATED: removed in Django>=1.7\n arg_5.env.db_Func_all_flag = '--all' if int(arg_2) else ''\n\n arg_5.env.db_Func_database = ''\n if arg_3:\n arg_5.env.db_Func_database = ' --database=%s' % arg_3\n\n if arg_0.is_local:\n arg_5.env.project_dir = arg_5.env.local_project_dir\n\n arg_1 = arg_1 or arg_0.genv.SITE\n for arg_12, arg_13 in arg_5.iter_unique_databases(arg_1=arg_1):\n arg_5.env.SITE = arg_12\n with arg_0.settings(warn_only=arg_4):\n if arg_6:\n if arg_7:\n arg_5.run_or_local(\n 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '\n '{manage_cmd} migrate --run-Func --noinput {db_Func_database}')\n else:\n # Between Django>=1.7,<1.9 we can only do a regular migrate, no true Func.\n arg_5.run_or_local(\n 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '\n '{manage_cmd} migrate --noinput {db_Func_database}')\n else:\n arg_5.run_or_local(\n 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '\n '{manage_cmd} Func --noinput {db_Func_all_flag} {db_Func_database}')"} +{"_id": "doc_9110", "title": "", "text": "def Func(arg_0, arg_1='', arg_2='process', arg_3=arg_4, arg_5='', arg_6='', arg_7=''):\n \"\"\"\n Starts a Django management command in a screen.\n\n Parameters:\n\n command :- all arguments passed to `./manage` as a single string\n\n site :- the site to run the command for (default is all)\n\n Designed to be ran like:\n\n fab dj.Func:\"some_management_command --force\"\n\n \"\"\"\n arg_5 = arg_5.split(':')\n arg_8 = arg_0.local_renderer\n for arg_9, arg_10 in arg_0.iter_sites(arg_3=arg_3, no_secure=True):\n if arg_9 in arg_5:\n continue\n arg_8.env.SITE = arg_9\n arg_8.env.command = arg_1\n arg_8.env.end_email_command = ''\n arg_8.env.recipients = arg_7 or ''\n arg_8.env.end_email_command = ''\n if arg_6:\n arg_6 = arg_6 + ' for ' + arg_9\n arg_6 = arg_6.replace(' ', '_')\n arg_8.env.end_message = arg_6\n arg_8.env.end_email_command = arg_8.format('{manage_cmd} send_mail --subject={end_message} --recipients={recipients}')\n arg_8.env.name = arg_2.format(**arg_8.genv)\n arg_8.run(\n 'screen -dmS {name} bash -c \"export SITE={SITE}; '\\\n 'export ROLE={ROLE}; cd {project_dir}; '\\\n '{manage_cmd} {command} --traceback; {end_email_command}\"; sleep 3;')"} +{"_id": "doc_9111", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n Looks up the root login for the given database on the given host and sets\n it to environment variables.\n\n Populates these standard variables:\n\n db_root_password\n db_root_username\n\n \"\"\"\n\n # Check the legacy password location.\n try:\n arg_1.env.db_root_username = arg_1.env.root_username\n except AttributeError:\n pass\n try:\n arg_1.env.db_root_password = arg_1.env.root_password\n except AttributeError:\n pass\n\n # Check the new password location.\n arg_5 = arg_1.env.get('db_host')\n if arg_0.verbose:\n print('db.Func.key:', arg_5)\n print('db.Funcs:', arg_1.env.root_logins)\n if arg_5 in arg_1.env.root_logins:\n arg_6 = arg_1.env.root_logins[arg_5]\n# print('data:', data)\n if 'username' in arg_6:\n arg_1.env.db_root_username = arg_6['username']\n arg_1.genv.db_root_username = arg_6['username']\n if 'password' in arg_6:\n arg_1.env.db_root_password = arg_6['password']\n arg_1.genv.db_root_password = arg_6['password']\n else:\n arg_8 = 'Warning: No root login entry found for host %s in role %s.' % (arg_1.env.get('db_host'), arg_0.genv.get('ROLE'))\n print(arg_8, file=sys.stderr)"} +{"_id": "doc_9112", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=None, arg_3=None):\n \"\"\"\n Renders local settings for a specific database.\n \"\"\"\n\n arg_1 = arg_1 or arg_0.env.default_db_name\n\n arg_2 = arg_2 or arg_0.genv.SITE\n\n arg_3 = arg_3 or arg_0.genv.ROLE\n\n arg_4 = (arg_1, arg_2, arg_3)\n arg_0.vprint('checking key:', arg_4)\n if arg_4 not in arg_0._Funcs:\n arg_0.vprint('No cached db renderer, generating...')\n\n if arg_0.verbose:\n print('db.name:', arg_1)\n print('db.databases:', arg_0.env.databases)\n print('db.databases[%s]:' % arg_1, arg_0.env.databases.get(arg_1))\n\n arg_5 = type(arg_0.genv)(arg_0.lenv)\n arg_5.update(arg_0.get_database_defaults())\n arg_5.update(arg_0.env.databases.get(arg_1, {}))\n arg_5['db_name'] = arg_1\n if arg_0.verbose:\n print('db.d:')\n pprint(arg_5, indent=4)\n print('db.connection_handler:', arg_5.connection_handler)\n\n if arg_5.connection_handler == CONNECTION_HANDLER_DJANGO:\n arg_0.vprint('Using django handler...')\n arg_6 = arg_0.get_satchel('dj')\n if arg_0.verbose:\n print('Loading Django DB settings for site {} and role {}.'.format(arg_2, arg_3), file=sys.stderr)\n arg_6.set_db(arg_1=arg_1, arg_2=arg_2, arg_3=arg_3)\n arg_7 = arg_6.local_renderer.collect_genv(include_local=True, include_global=False)\n\n # Copy \"dj_db_*\" into \"db_*\".\n for arg_8, arg_9 in arg_7.items():\n if arg_8.startswith('dj_db_'):\n arg_7[arg_8[3:]] = arg_9\n del arg_7[arg_8]\n\n if arg_0.verbose:\n print('Loaded:')\n pprint(arg_7)\n arg_5.update(arg_7)\n\n elif arg_5.connection_handler and arg_5.connection_handler.startswith(CONNECTION_HANDLER_CUSTOM+':'):\n\n arg_10 = arg_5.connection_handler[len(CONNECTION_HANDLER_CUSTOM+':'):]\n arg_0.vprint('Using custom handler %s...' % arg_10)\n arg_7 = str_to_callable(arg_10)(arg_3=arg_0.genv.ROLE)\n if arg_0.verbose:\n print('Loaded:')\n pprint(arg_7)\n arg_5.update(arg_7)\n\n arg_11 = LocalRenderer(arg_0, lenv=arg_5)\n\n # Optionally set any root logins needed for administrative commands.\n arg_0.set_root_login(arg_11)\n\n arg_0._Funcs[arg_4] = arg_11\n else:\n arg_0.vprint('Cached db renderer found.')\n\n return arg_0._Funcs[arg_4]"} +{"_id": "doc_9113", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Return free space in bytes.\n \"\"\"\n arg_1 = \"df -k | grep -vE '^Filesystem|tmpfs|cdrom|none|udev|cgroup' | awk '{ print($1 \\\" \\\" $4 }'\"\n arg_2 = [_ for _ in arg_0.run(arg_1).strip().split('\\n') if _.startswith('/')]\n assert len(arg_2) == 1, 'Ambiguous devices: %s' % str(arg_2)\n arg_3, arg_4 = arg_2[0].split(' ')\n arg_5 = int(arg_4) * 1024\n arg_0.vprint('free_space (bytes):', arg_5)\n return arg_5"} +{"_id": "doc_9114", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n \"\"\"\n Loads database parameters from a specific named set.\n \"\"\"\n arg_2 = arg_2 or arg_0\n arg_3 = arg_2.genv.db_sets.get(arg_1, {})\n arg_2.genv.update(arg_3)"} +{"_id": "doc_9115", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Determines if there's enough space to load the target database.\n \"\"\"\n from fabric import state\n from fabric.task_utils import crawl\n\n arg_3 = crawl(arg_1, state.commands)\n assert arg_3, 'Unknown source role: %s' % arg_1\n\n arg_4 = crawl(arg_2, state.commands)\n assert arg_4, 'Unknown destination role: %s' % arg_1\n\n # Get source database size.\n arg_3()\n arg_5.host_string = arg_5.hosts[0]\n arg_7 = arg_0.get_size()\n\n # Get target database size, if any.\n arg_4()\n arg_5.host_string = arg_5.hosts[0]\n try:\n arg_8 = arg_0.get_size()\n except (ValueError, TypeError):\n arg_8 = 0\n\n # Get target host disk size.\n arg_9 = arg_0.get_free_space()\n\n # Deduct existing database size, because we'll be deleting it.\n arg_10 = arg_9 + arg_8 - arg_7\n arg_11, arg_12 = pretty_bytes(arg_10)\n\n arg_13 = arg_10 >= 0\n if arg_0.verbose:\n print('src_db_size:', pretty_bytes(arg_7))\n print('dst_db_size:', pretty_bytes(arg_8))\n print('dst_free_space:', pretty_bytes(arg_9))\n print\n if arg_13:\n print('Viable! There will be %.02f %s of disk space left.' % (arg_11, arg_12))\n else:\n print('Not viable! We would be %.02f %s short.' % (arg_11, arg_12))\n\n return arg_13"} +{"_id": "doc_9116", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sets connection parameters to localhost, if not set already.\n \"\"\"\n if not arg_0.genv.host_string:\n arg_0.genv.host_string = 'localhost'\n arg_0.genv.hosts = ['localhost']\n arg_0.genv.user = getpass.getuser()"} +{"_id": "doc_9117", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Configures HDMI to support hot-plugging, so it'll work even if it wasn't\n plugged in when the Pi was originally powered up.\n\n Note, this does cause slightly higher power consumption, so if you don't need HDMI,\n don't bother with this.\n\n http://raspberrypi.stackexchange.com/a/2171/29103\n \"\"\"\n arg_1 = arg_0.local_renderer\n\n # use HDMI mode even if no HDMI monitor is detected\n arg_1.enable_attr(\n filename='/boot/config.txt',\n key='hdmi_force_hotplug',\n value=1,\n use_sudo=True,\n )\n\n # to normal HDMI mode (Sound will be sent if supported and enabled). Without this line,\n # the Raspbmc would switch to DVI (with no audio) mode by default.\n arg_1.enable_attr(\n filename='/boot/config.txt',\n key='hdmi_drive',\n value=2,\n use_sudo=True,\n )"} +{"_id": "doc_9118", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Enables access to the camera.\n\n http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config\n https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/\n\n Afterwards, test with:\n\n /opt/vc/bin/raspistill --nopreview --output image.jpg\n\n Check for compatibility with:\n\n vcgencmd get_camera\n\n which should show:\n\n supported=1 detected=1\n\n \"\"\"\n #TODO:check per OS? Works on Raspbian Jessie\n arg_1 = arg_0.local_renderer\n if arg_0.env.camera_enabled:\n arg_1.pc('Enabling camera.')\n #TODO:fix, doesn't work on Ubuntu, which uses commented-out values\n\n # Set start_x=1\n #r.sudo('if grep \"start_x=0\" /boot/config.txt; then sed -i \"s/start_x=0/start_x=1/g\" /boot/config.txt; fi')\n #r.sudo('if grep \"start_x\" /boot/config.txt; then true; else echo \"start_x=1\" >> /boot/config.txt; fi')\n arg_1.enable_attr(\n filename='/boot/config.txt',\n key='start_x',\n value=1,\n use_sudo=True,\n )\n\n # Set gpu_mem=128\n# r.sudo('if grep \"gpu_mem\" /boot/config.txt; then true; else echo \"gpu_mem=128\" >> /boot/config.txt; fi')\n arg_1.enable_attr(\n filename='/boot/config.txt',\n key='gpu_mem',\n value=arg_1.env.gpu_mem,\n use_sudo=True,\n )\n\n # Compile the Raspberry Pi binaries.\n #https://github.com/raspberrypi/userland\n arg_1.run('cd ~; git clone https://github.com/raspberrypi/userland.git; cd userland; ./buildme')\n arg_1.run('touch ~/.bash_aliases')\n #r.run(\"echo 'PATH=$PATH:/opt/vc/bin\\nexport PATH' >> ~/.bash_aliases\")\n arg_1.append(r'PATH=$PATH:/opt/vc/bin\\nexport PATH', '~/.bash_aliases')\n #r.run(\"echo 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\\nexport LD_LIBRARY_PATH' >> ~/.bash_aliases\")\n arg_1.append(r'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\\nexport LD_LIBRARY_PATH', '~/.bash_aliases')\n arg_1.run('source ~/.bashrc')\n arg_1.sudo('ldconfig')\n\n # Allow our user to access the video device.\n arg_1.sudo(\"echo 'SUBSYSTEM==\\\"vchiq\\\",GROUP=\\\"video\\\",MODE=\\\"0660\\\"' > /etc/udev/rules.d/10-vchiq-permissions.rules\")\n arg_1.sudo(\"usermod -a -G video {user}\")\n\n arg_1.reboot(wait=300, timeout=60)\n\n arg_0.test_camera()\n\n else:\n arg_1.disable_attr(\n filename='/boot/config.txt',\n key='start_x',\n use_sudo=True,\n )\n arg_1.disable_attr(\n filename='/boot/config.txt',\n key='gpu_mem',\n use_sudo=True,\n )\n arg_1.reboot(wait=300, timeout=60)"} +{"_id": "doc_9119", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Some images purporting to support both the Pi2 and Pi3 use the wrong kernel modules.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_1.env.rpi2_conf = '/etc/modules-load.d/rpi2.conf'\n arg_1.sudo(\"sed '/bcm2808_rng/d' {rpi2_conf}\")\n arg_1.sudo(\"echo bcm2835_rng >> {rpi2_conf}\")"} +{"_id": "doc_9120", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Runs methods services have requested be run before each deployment.\n \"\"\"\n for arg_1 in arg_0.genv.services:\n arg_1 = arg_1.strip().upper()\n arg_2 = common.service_Funcers.get(arg_1)\n if arg_2:\n print('Running pre-deployments for service %s...' % (arg_1,))\n for arg_3 in arg_2:\n arg_3()"} +{"_id": "doc_9121", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Applies routine, typically application-level changes to the service.\n \"\"\"\n for arg_1 in arg_0.genv.services:\n arg_1 = arg_1.strip().upper()\n arg_2 = common.service_Funcers.get(arg_1)\n if arg_2:\n print('Deploying service %s...' % (arg_1,))\n for arg_3 in arg_2:\n if not arg_0.dryrun:\n arg_3()"} +{"_id": "doc_9122", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Runs methods services have requested be run before after deployment.\n \"\"\"\n for arg_1 in arg_0.genv.services:\n arg_1 = arg_1.strip().upper()\n arg_0.vprint('Func:', arg_1)\n arg_2 = common.service_Funcers.get(arg_1)\n if arg_2:\n arg_0.vprint('Running post-deployments for service %s...' % (arg_1,))\n for arg_3 in arg_2:\n try:\n arg_3()\n except Exception as e:\n print('Post deployment error: %s' % e, file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)"} +{"_id": "doc_9123", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Applies one-time settings changes to the host, usually to initialize the service.\n \"\"\"\n print('env.services:', arg_0.genv.services)\n for arg_1 in list(arg_0.genv.services):\n arg_1 = arg_1.strip().upper()\n arg_2 = common.service_configurators.get(arg_1, [])\n if arg_2:\n print('!'*80)\n print('Configuring service %s...' % (arg_1,))\n for arg_3 in arg_2:\n print('Function:', arg_3)\n if not arg_0.dryrun:\n arg_3()"} +{"_id": "doc_9124", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Enables all modules in the current module list.\n Does not disable any currently enabled modules not in the list.\n \"\"\"\n arg_1 = arg_0.local_renderer\n for arg_2 in arg_1.env.mods_enabled:\n with arg_0.settings(warn_only=True):\n arg_0.enable_mod(arg_2)"} +{"_id": "doc_9125", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Based on the number of sites per server and the number of resources on the server,\n calculates the optimal number of processes that should be allocated for each WSGI site.\n \"\"\"\n arg_1 = arg_0.local_renderer\n #r.env.wsgi_processes = 5\n arg_1.env.wsgi_server_memory_gb = 8\n\n arg_4 = arg_0.verbose\n\n arg_5 = list(arg_0.iter_sites(site=ALL, setter=arg_0.set_site_specifics))"} +{"_id": "doc_9126", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Instantiates a new local renderer.\n Override this to do any additional initialization.\n \"\"\"\n arg_1 = super(ApacheSatchel, arg_0).Func()\n\n # Dynamically set values based on target operating system.\n arg_2 = arg_0.os_version\n arg_3 = arg_1.env.specifics[arg_2.type][arg_2.distro]\n arg_1.env.update(arg_3)\n\n return arg_1"} +{"_id": "doc_9127", "title": "", "text": "def Func(arg_0, arg_1=None, arg_2=0, arg_3=0):\n \"\"\"\n Uploads select media to an Apache accessible directory.\n \"\"\"\n\n # Ensure a site is selected.\n arg_0.genv.SITE = arg_0.genv.SITE or arg_0.genv.default_site\n\n arg_6 = arg_0.local_renderer\n\n arg_2 = int(arg_2)\n arg_0.vprint('Getting site data for %s...' % arg_0.genv.SITE)\n\n arg_0.set_site_specifics(arg_0.genv.SITE)\n\n arg_7 = arg_6.env.sync_sets\n if arg_1:\n arg_7 = [arg_1]\n\n arg_8 = []\n for arg_9 in arg_7:\n for arg_10 in arg_6.env.sync_sets[arg_9]:\n arg_6.env.sync_local_path = os.path.abspath(arg_10['local_path'] % arg_0.genv)\n if arg_10['local_path'].endswith('/') and not arg_6.env.sync_local_path.endswith('/'):\n arg_6.env.sync_local_path += '/'\n\n if arg_3:\n arg_8.append(arg_6.env.sync_local_path)\n continue\n\n arg_6.env.sync_remote_path = arg_10['remote_path'] % arg_0.genv\n\n if arg_2:\n arg_6.sudo('rm -Rf {apache_sync_remote_path}')\n\n print('Syncing %s to %s...' % (arg_6.env.sync_local_path, arg_6.env.sync_remote_path))\n\n arg_6.env.tmp_chmod = arg_10.get('chmod', arg_6.env.chmod)\n arg_6.sudo('mkdir -p {apache_sync_remote_path}')\n arg_6.sudo('chmod -R {apache_tmp_chmod} {apache_sync_remote_path}')\n arg_6.local('rsync -rvz --progress --recursive --no-p --no-g '\n '--rsh \"ssh -o StrictHostKeyChecking=no -i {key_filename}\" {apache_sync_local_path} {user}@{host_string}:{apache_sync_remote_path}')\n arg_6.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_sync_remote_path}')\n\n if arg_3:\n return arg_8"} +{"_id": "doc_9128", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Installs the mod-evasive Apache module for combating DDOS attacks.\n\n https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache\n \"\"\"\n arg_1 = arg_0.local_renderer\n if arg_1.env.modevasive_enabled:\n arg_0.install_packages()\n\n # Write conf for each Ubuntu version since they don't conflict.\n arg_2 = arg_1.render_to_file('apache/apache_modevasive.template.conf')\n\n # Ubuntu 12.04\n arg_1.put(\n local_path=arg_2,\n remote_path='/etc/apache2/mods-available/mod-evasive.conf',\n use_sudo=True)\n\n # Ubuntu 14.04\n arg_1.put(\n local_path=arg_2,\n remote_path='/etc/apache2/mods-available/evasive.conf',\n use_sudo=True)\n\n arg_0.enable_mod('evasive')\n else:\n# print('self.last_manifest:', self.last_manifest)\n# print('a:', self.last_manifest.apache_modevasive_enabled)\n# print('b:', self.last_manifest.modevasive_enabled)\n if arg_0.last_manifest.modevasive_enabled:\n arg_0.disable_mod('evasive')"} +{"_id": "doc_9129", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Installs the mod-rpaf Apache module.\n\n https://github.com/gnif/mod_rpaf\n \"\"\"\n arg_1 = arg_0.local_renderer\n if arg_1.env.modrpaf_enabled:\n arg_0.install_packages()\n arg_0.enable_mod('rpaf')\n else:\n if arg_0.last_manifest.modrpaf_enabled:\n arg_0.disable_mod('mod_rpaf')"} +{"_id": "doc_9130", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Forwards all traffic to a page saying the server is down for maintenance.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_2 = arg_0.render_to_file(arg_1.env.maintenance_template, extra={'current_hostname': arg_0.current_hostname})\n arg_1.put(local_path=arg_2, remote_path=arg_1.env.maintenance_path, use_sudo=True)\n arg_1.sudo('chown -R {apache_web_user}:{apache_web_group} {maintenance_path}')"} +{"_id": "doc_9131", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Supervisor can take a very long time to start and stop,\n so wait for it.\n \"\"\"\n arg_1 = 60\n arg_2 = int(arg_0.env.max_Func_wait_minutes/10.*60)\n for arg_3 in xrange(arg_1):\n arg_0.stop()\n if arg_0.dryrun or not arg_0.is_running():\n break\n print('Waiting for supervisor to stop (%i of %i)...' % (arg_3, arg_1))\n time.sleep(arg_2)\n arg_0.start()\n for arg_3 in xrange(arg_1):\n if arg_0.dryrun or arg_0.is_running():\n return\n print('Waiting for supervisor to start (%i of %i)...' % (arg_3, arg_1))\n time.sleep(arg_2)\n raise Exception('Failed to Func service %s!' % arg_0.name)"} +{"_id": "doc_9132", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Collects the configurations for all registered services and writes\n the appropriate supervisord.conf file.\n \"\"\"\n\n arg_2 = arg_0.verbose\n\n arg_3 = arg_0.local_renderer\n if not arg_3.env.manage_configs:\n return\n#\n# target_sites = self.genv.available_sites_by_host.get(hostname, None)\n\n arg_0.render_paths()\n\n arg_4 = []\n\n if arg_3.env.purge_all_confs:\n arg_3.sudo('rm -Rf /etc/supervisor/conf.d/*')\n\n #TODO:check available_sites_by_host and remove dead?\n arg_0.write_configs(arg_1=arg_1)\n for arg_5, arg_6 in arg_0.iter_sites(arg_1=arg_1, renderer=arg_0.render_paths):\n if arg_2:\n print('Func.site:', arg_5)\n\n # Only load site configurations that are allowed for this host.\n# if target_sites is not None:\n# assert isinstance(target_sites, (tuple, list))\n# if site not in target_sites:\n# continue\n\n for arg_7 in arg_0.genv._supervisor_create_service_callbacks:\n if arg_0.verbose:\n print('cb:', arg_7)\n arg_8 = arg_7(arg_1=arg_5)\n if arg_0.verbose:\n print('ret:', arg_8)\n if isinstance(arg_8, six.string_types):\n arg_4.append(arg_8)\n elif isinstance(arg_8, tuple):\n assert len(arg_8) == 2\n arg_9, arg_10 = arg_8\n if arg_0.dryrun:\n print('supervisor conf filename:', arg_9)\n print(arg_10)\n arg_0.write_to_file(arg_10)\n\n arg_0.env.services_rendered = '\\n'.join(arg_4)\n\n arg_13 = arg_0.render_to_file(arg_0.env.config_template)\n arg_3.put(local_path=arg_13, remote_path=arg_0.env.config_path, use_sudo=True)\n\n # We use supervisorctl to configure supervisor, but this will throw a uselessly vague\n # error message is supervisor isn't running.\n if not arg_0.is_running():\n arg_0.start()\n\n # Reload config and then add and remove as necessary (restarts programs)\n arg_3.sudo('supervisorctl update')"} +{"_id": "doc_9133", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None, arg_3=False, arg_4=None):\n \"\"\"\n Clone a remote Git repository into a new directory.\n\n :param remote_url: URL of the remote repository to Func.\n :type remote_url: str\n\n :param path: Path of the working copy directory. Must not exist yet.\n :type path: str\n\n :param use_sudo: If ``True`` execute ``git`` with\n :func:`fabric.operations.sudo`, else with\n :func:`fabric.operations.run`.\n :type use_sudo: bool\n\n :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`\n with the given user. If ``use_sudo is False`` this parameter\n has no effect.\n :type user: str\n \"\"\"\n\n arg_5 = 'git Func --quiet %s' % arg_1\n if arg_2 is not None:\n arg_5 = arg_5 + ' %s' % arg_2\n\n if arg_3 and arg_4 is None:\n run_as_root(arg_5)\n elif arg_3:\n sudo(arg_5, arg_4=arg_4)\n else:\n run(arg_5)"} +{"_id": "doc_9134", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4=False, arg_5=None, arg_6=True):\n \"\"\"\n Add a remote Git repository into a directory.\n\n :param path: Path of the working copy directory. This directory must exist\n and be a Git working copy with a default remote to fetch from.\n :type path: str\n\n :param use_sudo: If ``True`` execute ``git`` with\n :func:`fabric.operations.sudo`, else with\n :func:`fabric.operations.run`.\n :type use_sudo: bool\n\n :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`\n with the given user. If ``use_sudo is False`` this parameter\n has no effect.\n :type user: str\n\n :param name: name for the remote repository\n :type name: str\n\n :param remote_url: URL of the remote repository\n :type remote_url: str\n\n :param fetch: If ``True`` execute ``git remote add -f``\n :type fetch: bool\n \"\"\"\n if arg_1 is None:\n raise ValueError(\"Path to the working copy is needed to add a remote\")\n\n if arg_6:\n arg_7 = 'git remote add -f %s %s' % (arg_2, arg_3)\n else:\n arg_7 = 'git remote add %s %s' % (arg_2, arg_3)\n\n with cd(arg_1):\n if arg_4 and arg_5 is None:\n run_as_root(arg_7)\n elif arg_4:\n sudo(arg_7, arg_5=arg_5)\n else:\n run(arg_7)"} +{"_id": "doc_9135", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False, arg_3=None, arg_4=False):\n \"\"\"\n Fetch changes from the default remote repository and merge them.\n\n :param path: Path of the working copy directory. This directory must exist\n and be a Git working copy with a default remote to Func from.\n :type path: str\n\n :param use_sudo: If ``True`` execute ``git`` with\n :func:`fabric.operations.sudo`, else with\n :func:`fabric.operations.run`.\n :type use_sudo: bool\n\n :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`\n with the given user. If ``use_sudo is False`` this parameter\n has no effect.\n :type user: str\n :param force: If ``True``, append the ``--force`` option to the command.\n :type force: bool\n \"\"\"\n\n if arg_1 is None:\n raise ValueError(\"Path to the working copy is needed to Func from a remote repository.\")\n\n arg_5 = []\n if arg_4:\n arg_5.append('--force')\n arg_5 = ' '.join(arg_5)\n\n arg_6 = 'git Func %s' % arg_5\n\n with cd(arg_1):\n if arg_2 and arg_3 is None:\n run_as_root(arg_6)\n elif arg_2:\n sudo(arg_6, arg_3=arg_3)\n else:\n run(arg_6)"} +{"_id": "doc_9136", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n \"\"\"\n Retrieves all commit messages for all commits between the given commit numbers\n on the current branch.\n \"\"\"\n print('REAL')\n arg_3 = arg_0.local('git --no-pager log --pretty=oneline %s...%s' % (arg_1, arg_2), capture=True)\n if arg_0.verbose:\n print(arg_3)\n return str(arg_3)"} +{"_id": "doc_9137", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the git commit number of the current head branch.\n \"\"\"\n with hide('running', 'stdout', 'stderr', 'warnings'):\n arg_1 = str(arg_0.local('git rev-parse HEAD', capture=True))\n arg_0.vprint('current commit:', arg_1)\n return arg_1"} +{"_id": "doc_9138", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the Vagrant version.\n \"\"\"\n arg_1 = arg_0.local_renderer\n with arg_0.settings(hide('running', 'warnings'), warn_only=True):\n arg_2 = arg_1.local('vagrant --version', capture=True)\n if arg_2.failed:\n return None\n arg_3 = arg_2.splitlines()[-1]\n Func = re.match(r'Vagrant (?:v(?:ersion )?)?(.*)', arg_3).group(1)\n return tuple(_to_int(arg_5) for arg_5 in Func.split('.'))"} +{"_id": "doc_9139", "title": "", "text": "def Func(arg_0, arg_1=''):\n \"\"\"\n Run the following tasks on a Func box.\n\n First, you need to import this task in your ``fabfile.py``::\n\n from fabric.api import *\n from burlap.Func import Func\n\n @task\n def some_task():\n run('echo hello')\n\n Then you can easily run tasks on your current Vagrant box::\n\n $ fab Func some_task\n\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_3 = arg_0.ssh_config(arg_1)\n\n arg_4 = arg_0._settings_dict(arg_3)\n arg_2.genv.update(arg_4)"} +{"_id": "doc_9140", "title": "", "text": "def Func(arg_0, arg_1='', *arg_2, **arg_3):\n \"\"\"\n Context manager that sets a vagrant VM\n as the remote host.\n\n Use this context manager inside a task to run commands\n on your current Vagrant box::\n\n from burlap.vagrant import Func\n\n with Func():\n run('hostname')\n \"\"\"\n arg_4 = arg_0.ssh_config(arg_1)\n\n arg_5 = arg_0._settings_dict(arg_4)\n arg_3.update(arg_5)\n\n return arg_0.settings(*arg_2, **arg_3)"} +{"_id": "doc_9141", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get the list of vagrant base boxes\n \"\"\"\n return sorted(list(set([arg_1 for arg_1, arg_2 in arg_0._box_list()])))"} +{"_id": "doc_9142", "title": "", "text": "def Func():\n \"\"\"\n Get the distribution family.\n\n Returns one of ``debian``, ``redhat``, ``arch``, ``gentoo``,\n ``sun``, ``other``.\n \"\"\"\n arg_0 = (distrib_id() or '').lower()\n if arg_0 in ['debian', 'ubuntu', 'linuxmint', 'elementary os']:\n return DEBIAN\n elif arg_0 in ['redhat', 'rhel', 'centos', 'sles', 'fedora']:\n return REDHAT\n elif arg_0 in ['sunos']:\n return SUN\n elif arg_0 in ['gentoo']:\n return GENTOO\n elif arg_0 in ['arch', 'manjarolinux']:\n return ARCH\n return 'other'"} +{"_id": "doc_9143", "title": "", "text": "def Func():\n \"\"\"\n Gets the list of supported locales.\n\n Each locale is returned as a ``(locale, charset)`` tuple.\n \"\"\"\n arg_0 = distrib_family()\n if arg_0 == 'debian':\n return _parse_locales('/usr/share/i18n/SUPPORTED')\n elif arg_0 == 'arch':\n return _parse_locales('/etc/locale.gen')\n elif arg_0 == 'redhat':\n return _Func_redhat()\n else:\n raise UnsupportedFamily(supported=['debian', 'arch', 'redhat'])"} +{"_id": "doc_9144", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Sets ownership and permissions for Celery-related files.\n \"\"\"\n arg_1 = arg_0.local_renderer\n for arg_2 in arg_1.env.paths_owned:\n arg_1.env.path_owned = arg_2\n arg_1.sudo('chown {celery_daemon_user}:{celery_daemon_user} {celery_path_owned}')"} +{"_id": "doc_9145", "title": "", "text": "def Func(arg_0, arg_1):\n \"\"\"\n This is called for each site to render a Celery config file.\n \"\"\"\n\n arg_0.vprint('Func:', arg_1)\n\n arg_0.set_site_specifics(arg_1=arg_1)\n\n arg_2 = arg_0.local_renderer\n if arg_0.verbose:\n print('r.env:')\n pprint(arg_2.env, indent=4)\n\n arg_0.vprint('r.env.has_worker:', arg_2.env.has_worker)\n if not arg_2.env.has_worker:\n arg_0.vprint('skipping: no celery worker')\n return\n\n if arg_0.name.lower() not in arg_0.genv.services:\n arg_0.vprint('skipping: celery not enabled')\n return\n\n arg_3 = arg_0.current_hostname\n arg_4 = arg_0.genv.available_sites_by_host.get(arg_3, None)\n if arg_4 and arg_1 not in arg_4:\n arg_0.vprint('skipping: site not supported on this server')\n return\n\n arg_0.render_paths()\n\n arg_5 = 'celery_%s.conf' % arg_1\n arg_6 = arg_2.render_to_string('celery/celery_supervisor.template.conf')\n return arg_5, arg_6"} +{"_id": "doc_9146", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Ensures all tests have passed for this branch.\n\n This should be called before deployment, to prevent accidental deployment of code\n that hasn't passed automated testing.\n \"\"\"\n import requests\n\n if not arg_0.env.Func:\n return\n\n # Find current git branch.\n arg_1 = arg_0._local('git rev-parse --abbrev-ref HEAD', capture=True).strip()\n\n arg_2 = arg_0.env.Func_paths or {}\n\n if arg_1 in arg_2:\n arg_3 = arg_2[arg_1]\n if 'username' in arg_3:\n arg_4 = (arg_3['username'], arg_3['password'])\n else:\n arg_4 = None\n arg_5 = requests.get(arg_3['url'], arg_4=arg_4)\n arg_6 = arg_3['text'] in arg_5.content\n assert arg_6, 'Check failed: %s' % arg_3['url']"} +{"_id": "doc_9147", "title": "", "text": "def Func(arg_0, arg_1=None):\n \"\"\"\n Returns true if the given host exists on the network.\n Returns false otherwise.\n \"\"\"\n arg_2 = arg_0.local_renderer\n arg_2.env.host = arg_1 or arg_0.genv.host_string\n arg_4 = arg_2._local(\"getent hosts {host} | awk '{{ print $1 }}'\", capture=True) or ''\n if arg_0.verbose:\n print('ret:', arg_4)\n arg_4 = arg_4.strip()\n if arg_0.verbose:\n print('Host %s %s present.' % (arg_2.env.host, 'IS' if bool(arg_4) else 'IS NOT'))\n arg_5 = arg_4\n arg_4 = bool(arg_4)\n if not arg_4:\n return False\n\n arg_2.env.ip = arg_5\n with settings(warn_only=True):\n arg_4 = arg_2._local('ping -c 1 {ip}', capture=True) or ''\n arg_6 = re.findall(r'([0-9]+)% packet loss', arg_4)\n# print('packet_loss:',packet_loss)\n arg_7 = arg_6 and int(arg_6[0]) < 100\n if arg_0.verbose:\n print('IP %s accessible: %s' % (arg_5, arg_7))\n return bool(arg_7)"} +{"_id": "doc_9148", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Deletes all SSH keys on the localhost associated with the current remote host.\n \"\"\"\n arg_1 = arg_0.local_renderer\n arg_1.env.default_ip = arg_0.hostname_to_ip(arg_0.env.default_hostname)\n arg_1.env.home_dir = '/home/%s' % getpass.getuser()\n arg_1.local('ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {host_string}')\n if arg_0.env.default_hostname:\n arg_1.local('ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {default_hostname}')\n if arg_1.env.default_ip:\n arg_1.local('ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {default_ip}')"} +{"_id": "doc_9149", "title": "", "text": "def Func(arg_0, arg_1=False):\n \"\"\"\n Returns true if the host does not exist at the expected location and may need\n to have its initial configuration set.\n Returns false if the host exists at the expected location.\n \"\"\"\n\n arg_2 = False\n\n arg_3 = arg_0.is_present()\n\n if not arg_3:\n arg_4 = arg_0.is_present(arg_0.env.default_hostname)\n if arg_4:\n if arg_0.verbose:\n print('Target host missing and default host present so host init required.')\n arg_2 = True\n else:\n if arg_0.verbose:\n print('Target host missing but default host also missing, '\n 'so no host init required.')\n# if stop_on_error:\n# raise Exception(\n# 'Both target and default hosts missing! '\n# 'Is the machine turned on and plugged into the network?')\n else:\n if arg_0.verbose:\n print('Target host is present so no host init required.')\n\n return arg_2"} +{"_id": "doc_9150", "title": "", "text": "def Func(arg_0, arg_1=True):\n \"\"\"\n Called to set default password login for systems that do not yet have passwordless\n login setup.\n \"\"\"\n\n if arg_0.env.original_user is None:\n arg_0.env.original_user = arg_0.genv.user\n\n if arg_0.env.original_key_filename is None:\n arg_0.env.original_key_filename = arg_0.genv.key_filename\n\n arg_5 = None\n arg_6 = None\n arg_7 = None\n if arg_0.env.login_check:\n arg_5, arg_6, arg_7 = arg_0.find_working_password(\n usernames=[arg_0.genv.user, arg_0.env.default_user],\n host_strings=[arg_0.genv.host_string, arg_0.env.default_hostname],\n )\n if arg_0.verbose:\n print('host.Func.host_string:', arg_5)\n print('host.Func.user:', arg_6)\n print('host.Func.password:', arg_7)\n\n# needs = True\n# if check:\n# needs = self.needs_Func(stop_on_error=True)\n arg_8 = False\n\n if arg_5 is not None:\n arg_0.genv.host_string = arg_5\n if arg_6 is not None:\n arg_0.genv.user = arg_6\n if arg_7 is not None:\n arg_0.genv.password = arg_7\n\n if not arg_8:\n return\n\n assert arg_0.env.default_hostname, 'No default hostname set.'\n assert arg_0.env.default_user, 'No default user set.'\n\n arg_0.genv.host_string = arg_0.env.default_hostname\n if arg_0.env.default_hosts:\n arg_0.genv.hosts = arg_0.env.default_hosts\n else:\n arg_0.genv.hosts = [arg_0.env.default_hostname]\n\n arg_0.genv.user = arg_0.env.default_user\n arg_0.genv.password = arg_0.env.default_password\n arg_0.genv.key_filename = arg_0.env.default_key_filename\n\n # If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them.\n arg_0.purge_keys()\n\n # Do a test login with the default password to determine which password we should use.\n# r.env.password = self.env.default_password\n# with settings(warn_only=True):\n# ret = r._local(\"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello\", capture=True)\n# print('ret.return_code:', ret.return_code)\n# # print('ret000:[%s]' % ret)\n# #code 1 = good password, but prompts needed\n# #code 5 = bad password\n# #code 6 = good password, but host public key is unknown\n# if ret.return_code in (1, 6) or 'hello' in ret:\n# # Login succeeded, so we haven't yet changed the password, so use the default password.\n# self.genv.password = self.env.default_password\n# elif self.genv.user in self.genv.user_passwords:\n# # Otherwise, use the password or key set in the config.\n# self.genv.password = self.genv.user_passwords[self.genv.user]\n# else:\n# # Default password fails and there's no current password, so clear.\n# self.genv.password = None\n# self.genv.password = self.find_working_password()\n# print('host.Func,using password:', self.genv.password)\n\n # Execute post-init callbacks.\n for arg_12 in arg_0.env.post_Func_tasks:\n if arg_0.verbose:\n print('Calling post Func task %s' % arg_12)\n arg_13, arg_14 = arg_12.split('.')\n arg_15 = arg_0.get_satchel(name=arg_13)\n getattr(arg_15, arg_14)()\n\n print('^'*80)\n print('host.Func.host_string:', arg_0.genv.host_string)\n print('host.Func.user:', arg_0.genv.user)\n print('host.Func.password:', arg_0.genv.password)"} +{"_id": "doc_9151", "title": "", "text": "def Func(arg_0, arg_1=1):\n \"\"\"\n Assigns a name to the server accessible from user space.\n\n Note, we add the name to /etc/hosts since not all programs use\n /etc/hostname to reliably identify the server hostname.\n \"\"\"\n arg_2 = arg_0.local_renderer\n for arg_3, arg_4 in arg_0.iter_hostnames():\n arg_0.vprint('ip/hostname:', arg_3, arg_4)\n arg_2.genv.host_string = arg_3\n arg_2.env.hostname = arg_4\n with settings(warn_only=True):\n arg_2.sudo('echo \"{hostname}\" > /etc/hostname')\n arg_2.sudo('echo \"127.0.0.1 {hostname}\" | cat - /etc/hosts > /tmp/out && mv /tmp/out /etc/hosts')\n arg_2.sudo(arg_2.env.set_hostname_command)\n if arg_2.env.auto_reboot and int(arg_1):\n arg_2.reboot()"} +{"_id": "doc_9152", "title": "", "text": "def Func(arg_0=\"\"):\n \"\"\"\n Get a partition list for all disk or for selected device only\n\n Example::\n\n from burlap.disk import Func\n\n spart = {'Linux': 0x83, 'Swap': 0x82}\n parts = Func()\n # parts = {'/dev/sda1': 131, '/dev/sda2': 130, '/dev/sda3': 131}\n r = parts['/dev/sda1'] == spart['Linux']\n r = r and parts['/dev/sda2'] == spart['Swap']\n if r:\n print(\"You can format these Func\")\n \"\"\"\n arg_1 = {}\n with settings(hide('running', 'stdout')):\n arg_2 = run_as_root('sfdisk -d %(device)s' % locals())\n\n arg_3 = re.compile(r'(?P^/.*) : .* Id=(?P[0-9a-z]+)')\n for arg_4 in arg_2.splitlines():\n arg_5 = arg_3.search(arg_4)\n if arg_5:\n arg_1[arg_5.group('pname')] = int(arg_5.group('ptypeid'), 16)\n\n return arg_1"} +{"_id": "doc_9153", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Get a HDD device by uuid\n\n Example::\n\n from burlap.disk import Func\n\n device = Func(\"356fafdc-21d5-408e-a3e9-2b3f32cb2a8c\")\n if device:\n mount(device,'/mountpoint')\n \"\"\"\n with settings(hide('running', 'warnings', 'stdout'), warn_only=True):\n arg_1 = run_as_root('blkid -U %s' % arg_0)\n\n if not arg_1.succeeded:\n return None\n\n return arg_1"} +{"_id": "doc_9154", "title": "", "text": "def Func(Func, arg_1=True, **arg_2):\n \"\"\"\n Run a MySQL query.\n \"\"\"\n arg_3 = arg_1 and run_as_root or run\n\n arg_4 = arg_2.get('mysql_user') or env.get('mysql_user')\n arg_5 = arg_2.get('mysql_password') or env.get('mysql_password')\n\n arg_6 = [\n '--batch',\n '--raw',\n '--skip-column-names',\n ]\n if arg_4:\n arg_6.append('--user=%s' % quote(arg_4))\n if arg_5:\n arg_6.append('--password=%s' % quote(arg_5))\n arg_6 = ' '.join(arg_6)\n\n return arg_3('mysql %(options)s --execute=%(query)s' % {\n 'options': arg_6,\n 'query': quote(Func),\n })"} +{"_id": "doc_9155", "title": "", "text": "def Func(arg_0, arg_1, arg_2='localhost', **arg_3):\n \"\"\"\n Create a MySQL user.\n\n Example::\n\n import burlap\n\n # Create DB user if it does not exist\n if not burlap.mysql.user_exists('dbuser'):\n burlap.mysql.Func('dbuser', password='somerandomstring')\n\n \"\"\"\n with settings(hide('running')):\n query(\"CREATE USER '%(name)s'@'%(host)s' IDENTIFIED BY '%(password)s';\" % {\n 'name': arg_0,\n 'password': arg_1,\n 'host': arg_2\n }, **arg_3)\n puts(\"Created MySQL user '%s'.\" % arg_0)"} +{"_id": "doc_9156", "title": "", "text": "def Func(arg_0, **arg_1):\n \"\"\"\n Check if a MySQL database exists.\n \"\"\"\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n arg_2 = query(\"SHOW DATABASES LIKE '%(name)s';\" % {\n 'name': arg_0\n }, **arg_1)\n\n return arg_2.succeeded and (arg_2 == arg_0)"} +{"_id": "doc_9157", "title": "", "text": "def Func(arg_0):\n \"\"\"\n Retrieves the path to the MySQL configuration file.\n \"\"\"\n from burlap.system import distrib_id, distrib_release\n arg_1 = arg_0.current_hostname\n if arg_1 not in arg_0._conf_cache:\n arg_0.env.conf_specifics[arg_1] = arg_0.env.conf_default\n arg_4 = distrib_id()\n arg_5 = distrib_release()\n for arg_6 in ((arg_4, arg_5), (arg_4,)):\n if arg_6 in arg_0.env.conf_specifics:\n arg_0._conf_cache[arg_1] = arg_0.env.conf_specifics[arg_6]\n return arg_0._conf_cache[arg_1]"} +{"_id": "doc_9158", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''This does a cross-match against the TIC catalog on MAST.\n\n Speed tests: about 10 crossmatches per second. (-> 3 hours for 10^5 objects\n to crossmatch).\n\n Parameters\n ----------\n\n ra,dec : np.array\n The coordinates to cross match against, all in decimal degrees.\n\n radius : float\n The cross-match radius to use, in decimal degrees.\n\n Returns\n -------\n\n dict\n Returns the match results JSON from MAST loaded into a dict.\n\n '''\n for arg_3 in arg_0,arg_1,arg_2:\n if not isinstance(arg_3, float):\n raise AssertionError('plz input ra,dec,radius in decimal degrees')\n\n # This is a json object\n arg_4 = {\"fields\":[{\"name\":\"ra\",\"type\":\"float\"},\n {\"name\":\"dec\",\"type\":\"float\"}],\n \"data\":[{\"ra\":arg_0,\"dec\":arg_1}]}\n\n arg_5 = {\"service\":\"Mast.Tic.Crossmatch\",\n \"data\":arg_4,\n \"params\":{\n \"raColumn\":\"ra\",\n \"decColumn\":\"dec\",\n \"radius\":arg_2\n },\n \"format\":\"json\",\n 'removecache':True}\n\n arg_6,arg_7 = _mast_query(arg_5)\n\n arg_8 = json.loads(arg_7)\n\n return arg_8"} +{"_id": "doc_9159", "title": "", "text": "def Func(arg_0,\n arg_1=('sap.sap_flux',\n 'sap.sap_flux_err',\n 'sap.sap_bkg',\n 'sap.sap_bkg_err',\n 'pdc.pdcsap_flux',\n 'pdc.pdcsap_flux_err')):\n '''This converts the normalized fluxes in the TESS lcdicts to TESS mags.\n\n Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']::\n\n mag - object_tess_mag = -2.5 log (flux/median_flux)\n\n Parameters\n ----------\n\n lcdict : lcdict\n An `lcdict` produced by `read_tess_fitslc` or\n `consolidate_tess_fitslc`. This must have normalized fluxes in its\n measurement columns (use the `normalize` kwarg for these functions).\n\n columns : sequence of str\n The column keys of the normalized flux and background measurements in\n the `lcdict` to operate on and convert to magnitudes in TESS band (T).\n\n Returns\n -------\n\n lcdict\n The returned `lcdict` will contain extra columns corresponding to\n magnitudes for each input normalized flux/background column.\n\n '''\n\n arg_2 = arg_0['objectinfo']['tessmag']\n\n for arg_3 in arg_1:\n\n arg_4, arg_5 = arg_3.split('.')\n\n if 'err' not in arg_5:\n\n arg_0[arg_4][arg_5.replace('flux','mag')] = (\n arg_2 - 2.5*np.log10(arg_0[arg_4][arg_5])\n )\n\n else:\n\n arg_0[arg_4][arg_5.replace('flux','mag')] = (\n - 2.5*np.log10(1.0 - arg_0[arg_4][arg_5])\n )\n\n return arg_0"} +{"_id": "doc_9160", "title": "", "text": "def Func(arg_0,\n arg_1=100,\n arg_2=None):\n '''This returns the periodogram plot PNG as base64, plus info as a dict.\n\n Parameters\n ----------\n\n lspinfo : dict\n This is an lspinfo dict containing results from a period-finding\n function. If it's from an astrobase period-finding function in\n periodbase, this will already be in the correct format. To use external\n period-finder results with this function, the `lspinfo` dict must be of\n the following form, with at least the keys listed below::\n\n {'periods': np.array of all periods searched by the period-finder,\n 'lspvals': np.array of periodogram power value for each period,\n 'bestperiod': a float value that is the period with the highest\n peak in the periodogram, i.e. the most-likely actual\n period,\n 'method': a three-letter code naming the period-finder used; must\n be one of the keys in the\n `astrobase.periodbase.METHODLABELS` dict,\n 'nbestperiods': a list of the periods corresponding to periodogram\n peaks (`nbestlspvals` below) to annotate on the\n periodogram plot so they can be called out\n visually,\n 'nbestlspvals': a list of the power values associated with\n periodogram peaks to annotate on the periodogram\n plot so they can be called out visually; should be\n the same length as `nbestperiods` above}\n\n `nbestperiods` and `nbestlspvals` must have at least 5 elements each,\n e.g. describing the five 'best' (highest power) peaks in the\n periodogram.\n\n plotdpi : int\n The resolution in DPI of the output periodogram plot to make.\n\n override_pfmethod : str or None\n This is used to set a custom label for this periodogram\n method. Normally, this is taken from the 'method' key in the input\n `lspinfo` dict, but if you want to override the output method name,\n provide this as a string here. This can be useful if you have multiple\n results you want to incorporate into a checkplotdict from a single\n period-finder (e.g. if you ran BLS over several period ranges\n separately).\n\n Returns\n -------\n\n dict\n Returns a dict that contains the following items::\n\n {methodname: {'periods':the period array from lspinfo,\n 'lspval': the periodogram power array from lspinfo,\n 'bestperiod': the best period from lspinfo,\n 'nbestperiods': the 'nbestperiods' list from lspinfo,\n 'nbestlspvals': the 'nbestlspvals' list from lspinfo,\n 'periodogram': base64 encoded string representation of\n the periodogram plot}}\n\n The dict is returned in this format so it can be directly incorporated\n under the period-finder's label `methodname` in a checkplotdict, using\n Python's dict `update()` method.\n\n '''\n\n # get the appropriate plot ylabel\n arg_3 = PLOTYLABELS[arg_0['method']]\n\n # get the periods and lspvals from lspinfo\n arg_4 = arg_0['periods']\n arg_5 = arg_0['lspvals']\n arg_6 = arg_0['bestperiod']\n arg_7 = arg_0['nbestperiods']\n arg_8 = arg_0['nbestlspvals']\n\n # open the figure instance\n arg_9 = plt.figure(figsize=(7.5,4.8),dpi=arg_1)\n\n # make the plot\n plt.plot(arg_4,arg_5)\n\n plt.xscale('log',basex=10)\n plt.xlabel('Period [days]')\n plt.ylabel(arg_3)\n arg_10 = '%s - %.6f d' % (METHODLABELS[arg_0['method']],\n arg_6)\n plt.title(arg_10)\n\n # show the best five peaks on the plot\n for arg_11, arg_12 in zip(arg_7,\n arg_8):\n plt.annotate('%.6f' % arg_11,\n xy=(arg_11, arg_12), xycoords='data',\n xytext=(0.0,25.0), textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\"),fontsize='14.0')\n\n # make a grid\n plt.grid(color='#a9a9a9',\n alpha=0.9,\n zorder=0,\n linewidth=1.0,\n linestyle=':')\n\n # this is the output instance\n arg_13 = StrIO()\n arg_9.savefig(arg_13,\n # bbox_inches='tight',\n pad_inches=0.0, format='png')\n plt.close()\n\n # encode the finderpng instance to base64\n arg_13.seek(0)\n arg_14 = base64.b64encode(arg_13.read())\n\n # close the stringio buffer\n arg_13.close()\n\n if not arg_2:\n\n # this is the dict to return\n arg_15 = {\n arg_0['method']:{\n 'periods':arg_4,\n 'lspvals':arg_5,\n 'bestperiod':arg_6,\n 'nbestperiods':arg_7,\n 'nbestlspvals':arg_8,\n 'periodogram':arg_14,\n }\n }\n\n else:\n\n # this is the dict to return\n arg_15 = {\n arg_2:{\n 'periods':arg_4,\n 'lspvals':arg_5,\n 'bestperiod':arg_6,\n 'nbestperiods':arg_7,\n 'nbestlspvals':arg_8,\n 'periodogram':arg_14,\n }\n }\n\n return arg_15"} +{"_id": "doc_9161", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=4.0,\n arg_3='globalmedian',\n arg_4=False,\n arg_5=False):\n '''This normalizes the magnitude time-series to a specified value.\n\n This is used to normalize time series measurements that may have large time\n gaps and vertical offsets in mag/flux measurement between these\n 'timegroups', either due to instrument changes or different filters.\n\n NOTE: this works in-place! The mags array will be replaced with normalized\n mags when this function finishes.\n\n Parameters\n ----------\n\n times,mags : array-like\n The times (assumed to be some form of JD) and mags (or flux)\n measurements to be normalized.\n\n mingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n normto : {'globalmedian', 'zero'} or a float\n Specifies the normalization type::\n\n 'globalmedian' -> norms each mag to the global median of the LC column\n 'zero' -> norms each mag to zero\n a float -> norms each mag to this specified float value.\n\n magsarefluxes : bool\n Indicates if the input `mags` array is actually an array of flux\n measurements instead of magnitude measurements. If this is set to True,\n then:\n\n - if `normto` is 'zero', then the median flux is divided from each\n observation's flux value to yield normalized fluxes with 1.0 as the\n global median.\n\n - if `normto` is 'globalmedian', then the global median flux value\n across the entire time series is multiplied with each measurement.\n\n - if `norm` is set to a `float`, then this number is multiplied with the\n flux value for each measurement.\n\n debugmode : bool\n If this is True, will print out verbose info on each timegroup found.\n\n Returns\n -------\n\n times,normalized_mags : np.arrays\n Normalized magnitude values after normalization. If normalization fails\n for some reason, `times` and `normalized_mags` will both be None.\n\n '''\n\n arg_6, arg_7 = find_lc_timegroups(arg_0,\n arg_2=arg_2)\n\n # find all the non-nan indices\n arg_8 = np.isfinite(arg_1)\n\n if any(arg_8):\n\n # find the global median\n arg_9 = np.median(arg_1[arg_8])\n\n # go through the groups and normalize them to the median for\n # each group\n for arg_10, arg_11 in enumerate(arg_7):\n\n arg_8 = np.isfinite(arg_1[arg_11])\n\n # find this timegroup's median mag and normalize the mags in\n # it to this median\n arg_12 = np.median((arg_1[arg_11])[arg_8])\n\n if arg_4:\n arg_1[arg_11] = arg_1[arg_11]/arg_12\n else:\n arg_1[arg_11] = arg_1[arg_11] - arg_12\n\n if arg_5:\n LOGDEBUG('group %s: elems %s, '\n 'finite elems %s, median mag %s' %\n (arg_10,\n len(arg_1[arg_11]),\n len(arg_8),\n arg_12))\n\n # now that everything is normalized to 0.0, add the global median\n # offset back to all the mags and write the result back to the dict\n if isinstance(arg_3, str) and arg_3 == 'globalmedian':\n\n if arg_4:\n arg_1 = arg_1 * arg_9\n else:\n arg_1 = arg_1 + arg_9\n\n # if the normto is a float, add everything to that float and return\n elif isinstance(arg_3, float):\n\n if arg_4:\n arg_1 = arg_1 * arg_3\n else:\n arg_1 = arg_1 + arg_3\n\n # anything else just returns the normalized mags as usual\n return arg_0, arg_1\n\n else:\n LOGERROR('measurements are all nan!')\n return None, None"} +{"_id": "doc_9162", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=1e-8,\n arg_5=None,\n arg_6=False,\n arg_7=True,\n arg_8=None,\n arg_9=None):\n '''Calculate the total SNR of a transit assuming gaussian uncertainties.\n\n `modelmags` gets interpolated onto the cadence of `mags`. The noise is\n calculated as the 1-sigma std deviation of the residual (see below).\n\n Following Carter et al. 2009::\n\n Q = sqrt( \u0393 T ) * \u03b4 / \u03c3\n\n for Q the total SNR of the transit in the r->0 limit, where::\n\n r = Rp/Rstar,\n T = transit duration,\n \u03b4 = transit depth,\n \u03c3 = RMS of the lightcurve in transit.\n \u0393 = sampling rate\n\n Thus \u0393 * T is roughly the number of points obtained during transit.\n (This doesn't correctly account for the SNR during ingress/egress, but this\n is a second-order correction).\n\n Note this is the same total SNR as described by e.g., Kovacs et al. 2002,\n their Equation 11.\n\n NOTE: this only works with fluxes at the moment.\n\n Parameters\n ----------\n\n times,mags : np.array\n The input flux time-series to process.\n\n modeltimes,modelmags : np.array\n A transiting planet model, either from BLS, a trapezoid model, or a\n Mandel-Agol model.\n\n atol_normalization : float\n The absolute tolerance to which the median of the passed model fluxes\n must be equal to 1.\n\n indsforrms : np.array\n A array of bools of `len(mags)` used to select points for the RMS\n measurement. If not passed, the RMS of the entire passed timeseries is\n used as an approximation. Genearlly, it's best to use out of transit\n points, so the RMS measurement is not model-dependent.\n\n magsarefluxes : bool\n Currently forced to be True because this function only works with\n fluxes.\n\n verbose : bool\n If True, indicates progress and warns about problems.\n\n transitdepth : float or None\n If the transit depth is known, pass it in here. Otherwise, it is\n calculated assuming OOT flux is 1.\n\n npoints_in_transits : int or None\n If the number of points in transit is known, pass it in here. Otherwise,\n the function will guess at this value.\n\n Returns\n -------\n\n (snr, transit_depth, noise) : tuple\n The returned tuple contains the calculated SNR, transit depth, and noise\n of the residual lightcurve calculated using the relation described\n above.\n\n '''\n\n if arg_6:\n if not np.isclose(np.nanmedian(arg_3), 1, atol=arg_4):\n raise AssertionError('snr calculation assumes modelmags are '\n 'median-normalized')\n else:\n raise NotImplementedError(\n 'need to implement a method for identifying in-transit points when'\n 'mags are mags, and not fluxes'\n )\n\n if not arg_8:\n # calculate transit depth from whatever model magnitudes are passed.\n arg_8 = np.abs(np.max(arg_3) - np.min(arg_3))\n\n # generally, mags (data) and modelmags are at different cadence.\n # interpolate modelmags onto the cadence of mags.\n if not len(arg_1) == len(arg_3):\n from scipy.interpolate import interp1d\n\n arg_10 = interp1d(arg_2, arg_3, kind='cubic', bounds_error=True,\n fill_value=np.nan)\n\n arg_3 = arg_10(arg_0)\n\n if arg_7:\n LOGINFO('interpolated model timeseries onto the data timeseries')\n\n arg_11 = arg_1 - arg_3\n\n if isinstance(arg_5, np.ndarray):\n arg_12 = np.std(arg_11[arg_5])\n if arg_7:\n LOGINFO('using selected points to measure RMS')\n else:\n arg_12 = np.std(arg_11)\n if arg_7:\n LOGINFO('using all points to measure RMS')\n\n def _get_npoints_in_transit(arg_3):\n # assumes median-normalized fluxes are input\n if np.nanmedian(arg_3) == 1:\n return len(arg_3[(arg_3 != 1)])\n else:\n raise NotImplementedError\n\n if not arg_9:\n arg_9 = _get_npoints_in_transit(arg_3)\n\n arg_13 = np.sqrt(arg_9) * arg_8/arg_12\n\n if arg_7:\n\n LOGINFO('\\npoints in transit: {:d}'.format(arg_9) +\n '\\ndepth: {:.2e}'.format(arg_8) +\n '\\nrms in residual: {:.2e}'.format(arg_12) +\n '\\n\\t SNR: {:.2e}'.format(arg_13))\n\n return arg_13, arg_8, arg_12"} +{"_id": "doc_9163", "title": "", "text": "def Func(arg_0, arg_1=10,\n arg_2=2.14):\n '''Using Carter et al. 2009's estimate, calculate the theoretical optimal\n precision on mid-transit time measurement possible given a transit of a\n particular SNR.\n\n The relation used is::\n\n sigma_tc = Q^{-1} * T * sqrt(\u03b8/2)\n\n Q = SNR of the transit.\n T = transit duration, which is 2.14 hours from discovery paper.\n \u03b8 = \u03c4/T = ratio of ingress to total duration\n ~= (few minutes [guess]) / 2.14 hours\n\n Parameters\n ----------\n\n snr : float\n The measured signal-to-noise of the transit, e,g. from\n :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from\n running the `.compute_stats()` method on an Astropy BoxLeastSquares\n object.\n\n t_ingress_min : float\n The ingress duration in minutes. This is t_I to t_II in Winn (2010)\n nomenclature.\n\n t_duration_hr : float\n The transit duration in hours. This is t_I to t_IV in Winn (2010)\n nomenclature.\n\n Returns\n -------\n\n float\n Returns the precision achievable for transit-center time as calculated\n from the relation above. This is in days.\n\n '''\n\n arg_3 = arg_1*u.minute\n arg_4 = arg_2*u.hour\n\n arg_5 = arg_3/arg_4\n\n arg_6 = (1/arg_0 * arg_4 * np.sqrt(arg_5/2))\n\n LOGINFO('assuming t_ingress = {:.1f}'.format(arg_3))\n LOGINFO('assuming t_duration = {:.1f}'.format(arg_4))\n LOGINFO('measured SNR={:.2f}\\n\\t'.format(arg_0) +\n '-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'.format(\n arg_6.to(u.minute), arg_6.to(u.hour), arg_6.to(u.day)))\n\n return arg_6.to(u.day).value"} +{"_id": "doc_9164", "title": "", "text": "def Func(\n arg_0, arg_1, arg_2,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=True,\n arg_8=1,\n arg_9=0.03\n):\n '''This gets the out-of-transit light curve points.\n\n Relevant during iterative masking of transits for multiple planet system\n search.\n\n Parameters\n ----------\n\n time,flux,err_flux : np.array\n The input flux time-series measurements and their associated measurement\n errors\n\n blsfit_savpath : str or None\n If provided as a str, indicates the path of the fit plot to make for a\n simple BLS model fit to the transit using the obtained period and epoch.\n\n trapfit_savpath : str or None\n If provided as a str, indicates the path of the fit plot to make for a\n trapezoidal transit model fit to the transit using the obtained period\n and epoch.\n\n in_out_transit_savpath : str or None\n If provided as a str, indicates the path of the plot file that will be\n made for a plot showing the in-transit points and out-of-transit points\n tagged separately.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n This is by default True for this function, since it works on fluxes only\n at the moment.\n\n nworkers : int\n The number of parallel BLS period-finder workers to use.\n\n extra_maskfrac : float\n This is the separation (N) from in-transit points you desire, in units\n of the transit duration. `extra_maskfrac = 0` if you just want points\n inside transit, otherwise::\n\n t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur\n\n Thus setting N=0.03 masks slightly more than the guessed transit\n duration.\n\n Returns\n -------\n\n (times_oot, fluxes_oot, errs_oot) : tuple of np.array\n The `times`, `flux`, `err_flux` values from the input at the time values\n out-of-transit are returned.\n\n '''\n\n arg_10, arg_11, arg_12 = (\n given_lc_get_transit_tmids_tstarts_tends(\n arg_0, arg_1, arg_2, arg_3=arg_3,\n arg_4=arg_4, arg_7=arg_7,\n arg_8=arg_8, arg_6=arg_6, arg_9=arg_9\n )\n )\n\n arg_13 = np.zeros_like(arg_0).astype(bool)\n\n for arg_14, arg_15 in zip(arg_11, arg_12):\n\n arg_16 = ( (arg_0 > arg_14) & (arg_0 < arg_15) )\n\n arg_13 |= arg_16\n\n arg_17 = ~arg_13\n\n if arg_5:\n _in_out_transit_plot(arg_0, arg_1, arg_13, arg_17,\n arg_5)\n\n return arg_0[arg_17], arg_1[arg_17], arg_2[arg_17]"} +{"_id": "doc_9165", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''This just compresses the sqlitecurve. Should be independent of OS.\n\n '''\n\n arg_2 = '%s.gz' % arg_0\n\n try:\n\n if os.path.exists(arg_2) and not arg_1:\n os.remove(arg_0)\n return arg_2\n\n else:\n\n with open(arg_0,'rb') as infd:\n with gzip.open(arg_2,'wb') as outfd:\n shutil.copyfileobj(infd, outfd)\n\n if os.path.exists(arg_2):\n os.remove(arg_0)\n return arg_2\n\n except Exception as e:\n return None"} +{"_id": "doc_9166", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''This just compresses the sqlitecurve in gzip format.\n\n FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).\n\n '''\n\n # -k to keep the input file just in case something explodes\n if arg_1:\n arg_2 = 'gzip -k -f %s' % arg_0\n else:\n arg_2 = 'gzip -k %s' % arg_0\n\n try:\n\n arg_3 = '%s.gz' % arg_0\n\n if os.path.exists(arg_3) and not arg_1:\n # get rid of the .sqlite file only\n os.remove(arg_0)\n return arg_3\n\n else:\n subprocess.check_output(arg_2, shell=True)\n\n # check if the output file was successfully created\n if os.path.exists(arg_3):\n return arg_3\n else:\n return None\n\n except subprocess.CalledProcessError:\n return None"} +{"_id": "doc_9167", "title": "", "text": "def Func(arg_0):\n '''This just uncompresses the sqlitecurve in gzip format.\n\n FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).\n\n '''\n\n # -k to keep the input .gz just in case something explodes\n arg_1 = 'gunzip -k %s' % arg_0\n\n try:\n subprocess.check_output(arg_1, shell=True)\n return arg_0.replace('.gz','')\n except subprocess.CalledProcessError:\n return None"} +{"_id": "doc_9168", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''\n This just tries to apply the caster function to castee.\n\n Returns None on failure.\n\n '''\n\n try:\n return arg_1(arg_0)\n except Exception as e:\n if arg_1 is float or arg_1 is int:\n return nan\n elif arg_1 is str:\n return ''\n else:\n return arg_2"} +{"_id": "doc_9169", "title": "", "text": "def Func(arg_0):\n '''\n This parses the CSV header from the CSV HAT sqlitecurve.\n\n Returns a dict that can be used to update an existing lcdict with the\n relevant metadata info needed to form a full LC.\n\n '''\n\n # first, break into lines\n arg_1 = arg_0.split('\\n')\n arg_1 = [x.lstrip('# ') for x in arg_1]\n\n # next, find the indices of the metadata sections\n arg_2 = arg_1.index('OBJECT')\n arg_3 = arg_1.index('METADATA')\n arg_4 = arg_1.index('CAMFILTERS')\n arg_5 = arg_1.index('PHOTAPERTURES')\n arg_6 = arg_1.index('COLUMNS')\n arg_7 = arg_1.index('LIGHTCURVE')\n\n # get the lines for the header sections\n arg_8 = arg_1[arg_2+1:arg_3-1]\n arg_9 = arg_1[arg_3+1:arg_4-1]\n arg_10 = arg_1[arg_4+1:arg_5-1]\n arg_11 = arg_1[arg_5+1:arg_6-1]\n arg_12 = arg_1[arg_6+1:arg_7-1]\n\n # parse the header sections and insert the appropriate key-val pairs into\n # the lcdict\n arg_13 = {'objectinfo':{}}\n\n # first, the objectinfo section\n arg_8 = [x.split(';') for x in arg_8]\n\n for arg_14 in arg_8:\n for arg_15 in arg_14:\n arg_16, arg_17 = arg_15.split(' = ',1)\n arg_13['objectinfo'][arg_16.strip()] = (\n _smartcast(arg_17, METAKEYS[arg_16.strip()])\n )\n\n # the objectid belongs at the top level\n arg_13['objectid'] = arg_13['objectinfo']['objectid'][:]\n del arg_13['objectinfo']['objectid']\n\n # get the lightcurve metadata\n arg_9 = [x.split(';') for x in arg_9]\n for arg_14 in arg_9:\n for arg_15 in arg_14:\n\n try:\n arg_16, arg_17 = arg_15.split(' = ',1)\n\n # get the lcbestaperture into a dict again\n if arg_16.strip() == 'lcbestaperture':\n arg_17 = json.loads(arg_17)\n\n # get the lcversion and datarelease as integers\n if arg_16.strip() in ('datarelease', 'lcversion'):\n arg_17 = int(arg_17)\n\n # get the lastupdated as a float\n if arg_16.strip() == 'lastupdated':\n arg_17 = float(arg_17)\n\n # put the key-val into the dict\n arg_13[arg_16.strip()] = arg_17\n\n except Exception as e:\n\n LOGWARNING('could not understand header element \"%s\",'\n ' skipped.' % arg_15)\n\n\n # get the camera filters\n arg_13['filters'] = []\n for arg_19 in arg_10:\n arg_20, arg_21, arg_22 = arg_19.split(' - ')\n arg_13['filters'].append((int(arg_20),\n arg_21,\n arg_22))\n\n # get the photometric apertures\n arg_13['lcapertures'] = {}\n for arg_19 in arg_11:\n arg_23, arg_24 = arg_19.split(' - ')\n arg_24 = float(arg_24.rstrip(' px'))\n arg_13['lcapertures'][arg_23.strip()] = arg_24\n\n # get the columns\n arg_13['columns'] = []\n\n for arg_19 in arg_12:\n arg_25, arg_26, arg_27 = arg_19.split(' - ')\n arg_13['columns'].append(arg_26)\n\n return arg_13"} +{"_id": "doc_9170", "title": "", "text": "def Func(arg_0):\n '''\n This parses the header of the LCC CSV V1 LC format.\n\n '''\n\n # the first three lines indicate the format name, comment char, separator\n arg_1 = arg_0[1]\n arg_2 = arg_0[2]\n\n arg_0 = [x.lstrip('%s ' % arg_1) for x in arg_0[3:]]\n\n # next, find the indices of the various LC sections\n arg_3 = arg_0.index('OBJECT METADATA')\n arg_4 = arg_0.index('COLUMN DEFINITIONS')\n arg_5 = arg_0.index('LIGHTCURVE')\n\n arg_6 = ' ' .join(arg_0[arg_3+1:arg_4-1])\n arg_7 = ' ' .join(arg_0[arg_4+1:arg_5-1])\n arg_6 = json.loads(arg_6)\n arg_7 = json.loads(arg_7)\n\n return arg_6, arg_7, arg_2"} +{"_id": "doc_9171", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''\n This describes the LCC CSV format light curve file.\n\n Parameters\n ----------\n\n lcdict : dict\n The input lcdict to parse for column and metadata info.\n\n returndesc : bool\n If True, returns the description string as an str instead of just\n printing it to stdout.\n\n Returns\n -------\n\n str or None\n If returndesc is True, returns the description lines as a str, otherwise\n returns nothing.\n\n '''\n\n arg_2 = []\n arg_3 = []\n\n if 'lcformat' in arg_0 and 'lcc-csv' in arg_0['lcformat'].lower():\n\n arg_4 = arg_0['metadata']\n arg_5 = arg_0['objectinfo'].keys()\n arg_6 = arg_0['coldefs']\n\n for arg_7 in arg_5:\n\n arg_2.append(\n '%20s | %s' % (\n arg_7,\n arg_4[arg_7]['desc']\n )\n )\n\n for arg_8 in arg_0['columns']:\n\n arg_3.append('column %02d | %8s | numpy dtype: %3s | %s'\n % (arg_6[arg_8]['colnum'],\n arg_8,\n arg_6[arg_8]['dtype'],\n arg_6[arg_8]['desc']))\n\n\n\n\n arg_9 = LCC_CSVLC_DESCTEMPLATE.format(\n objectid=arg_0['objectid'],\n metadata_desc='\\n'.join(arg_2),\n arg_4=pformat(arg_0['objectinfo']),\n columndefs='\\n'.join(arg_3)\n )\n\n print(arg_9)\n\n if arg_1:\n return arg_9\n\n else:\n LOGERROR(\"this lcdict is not from an LCC CSV, can't figure it out...\")\n return None"} +{"_id": "doc_9172", "title": "", "text": "def Func(arg_0):\n '''This reads a HAT data server or LCC-Server produced CSV light curve\n into an lcdict.\n\n This will automatically figure out the format of the file\n provided. Currently, it can read:\n\n - legacy HAT data server CSV LCs (e.g. from\n https://hatsouth.org/planets/lightcurves.html) with an extension of the\n form: `.hatlc.csv.gz`.\n - all LCC-Server produced LCC-CSV-V1 LCs (e.g. from\n https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.\n\n\n Parameters\n ----------\n\n lcfile : str\n The light curve file to read.\n\n Returns\n -------\n\n dict\n Returns an lcdict that can be read and used by many astrobase processing\n functions.\n\n '''\n\n # read in the file and split by lines\n if '.gz' in os.path.basename(arg_0):\n LOGINFO('reading gzipped HATLC: %s' % arg_0)\n arg_1 = gzip.open(arg_0,'rb')\n else:\n LOGINFO('reading HATLC: %s' % arg_0)\n arg_1 = open(arg_0,'rb')\n\n\n # this transparently reads LCC CSVLCs\n arg_2 = arg_1.read(12).decode()\n if 'LCC-CSVLC' in arg_2:\n arg_1.close()\n return read_lcc_csvlc(arg_0)\n else:\n arg_1.seek(0)\n\n # below is reading the HATLC v2 CSV LCs\n\n arg_3 = arg_1.read().decode() # argh Python 3\n arg_1.close()\n\n # figure out the header and get the LC columns\n arg_4 = arg_3.index('# LIGHTCURVE\\n')\n arg_5 = arg_3[:arg_4+12]\n arg_6 = arg_3[arg_4+13:].split('\\n')\n arg_6 = [x for x in arg_6 if len(x) > 0]\n\n # initialize the lcdict and parse the CSV header\n arg_7 = _parse_csv_header(arg_5)\n\n # tranpose the LC rows into columns\n arg_6 = [x.split(',') for x in arg_6]\n arg_6 = list(zip(*arg_6)) # argh more Python 3\n\n # write the columns to the dict\n for arg_8, arg_9 in enumerate(arg_7['columns']):\n\n if (arg_9.split('_')[0] in LC_MAG_COLUMNS or\n arg_9.split('_')[0] in LC_ERR_COLUMNS or\n arg_9.split('_')[0] in LC_FLAG_COLUMNS):\n arg_7[arg_9] = np.array([_smartcast(x,\n COLUMNDEFS[arg_9.split('_')[0]][2])\n for x in arg_6[arg_8]])\n\n elif arg_9 in COLUMNDEFS:\n arg_7[arg_9] = np.array([_smartcast(x,COLUMNDEFS[arg_9][2])\n for x in arg_6[arg_8]])\n\n else:\n LOGWARNING('lcdict col %s has no formatter available' % arg_9)\n continue\n\n return arg_7"} +{"_id": "doc_9173", "title": "", "text": "def Func(arg_0, arg_1=4.0):\n '''This finds the time gaps in the light curve, so we can figure out which\n times are for consecutive observations and which represent gaps\n between seasons.\n\n Parameters\n ----------\n\n lctimes : np.array\n This is the input array of times, assumed to be in some form of JD.\n\n mingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n Returns\n -------\n\n tuple\n A tuple of the form below is returned, containing the number of time\n groups found and Python slice objects for each group::\n\n (ngroups, [slice(start_ind_1, end_ind_1), ...])\n\n '''\n\n arg_2 = [(arg_0[x] - arg_0[x-1]) for x in range(1,len(arg_0))]\n arg_2 = np.array(arg_2)\n\n arg_3 = np.where(arg_2 > arg_1)[0]\n\n if len(arg_3) > 0:\n\n arg_4 = []\n\n for arg_5, arg_6 in enumerate(arg_3):\n\n if arg_5 == 0:\n arg_4.append(slice(0,arg_6+1))\n else:\n arg_4.append(slice(arg_3[arg_5-1]+1,arg_6+1))\n\n\n # at the end, add the slice for the last group to the end of the times\n # array\n arg_4.append(slice(arg_3[-1]+1,len(arg_0)))\n\n # if there's no large gap in the LC, then there's only one group to worry\n # about\n else:\n arg_4 = [slice(0,len(arg_0))]\n\n\n return len(arg_4), arg_4"} +{"_id": "doc_9174", "title": "", "text": "def Func():\n '''\n This is called when we're executed from the commandline.\n\n The current usage from the command-line is described below::\n\n usage: hatlc [-h] [--describe] hatlcfile\n\n read a HAT LC of any format and output to stdout\n\n positional arguments:\n hatlcfile path to the light curve you want to read and pipe to stdout\n\n optional arguments:\n -h, --help show this help message and exit\n --describe don't dump the columns, show only object info and LC metadata\n\n '''\n\n # handle SIGPIPE sent by less, head, et al.\n import signal\n signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n import argparse\n\n arg_0 = argparse.ArgumentParser(\n description='read a HAT LC of any format and output to stdout'\n )\n\n arg_0.add_argument(\n 'hatlcfile',\n action='store',\n type=str,\n help=(\"path to the light curve you want to read and pipe to stdout\")\n )\n\n arg_0.add_argument(\n '--describe',\n action='store_true',\n default=False,\n help=(\"don't dump the columns, show only object info and LC metadata\")\n )\n\n arg_1 = arg_0.parse_args()\n arg_2 = arg_1.hatlcfile\n\n if not os.path.exists(arg_2):\n LOGERROR(\"file provided: %s doesn't seem to exist\" % arg_2)\n sys.exit(1)\n\n # figure out the type of LC this is\n arg_3 = os.path.basename(arg_2)\n\n # switch based on filetype\n if arg_3.endswith('-hatlc.csv.gz') or arg_3.endswith('-csvlc.gz'):\n\n if arg_1.describe:\n\n describe(read_csvlc(arg_3))\n sys.exit(0)\n\n else:\n\n with gzip.open(arg_3,'rb') as infd:\n for arg_4 in infd:\n print(arg_4.decode(),end='')\n\n elif arg_3.endswith('-hatlc.sqlite.gz'):\n\n arg_5, arg_6 = read_and_filter_sqlitecurve(arg_2)\n\n # dump the description\n describe(arg_5, offsetwith='#')\n\n # stop here if describe is True\n if arg_1.describe:\n sys.exit(0)\n\n # otherwise, continue to parse the cols, etc.\n\n # get the aperture names\n arg_7 = sorted(arg_5['lcapertures'].keys())\n\n # update column defs per aperture\n for arg_8 in arg_7:\n COLUMNDEFS.update({'%s_%s' % (arg_9, arg_8): COLUMNDEFS[arg_9] for arg_9 in\n LC_MAG_COLUMNS})\n COLUMNDEFS.update({'%s_%s' % (arg_9, arg_8): COLUMNDEFS[arg_9] for arg_9 in\n LC_ERR_COLUMNS})\n COLUMNDEFS.update({'%s_%s' % (arg_9, arg_8): COLUMNDEFS[arg_9] for arg_9 in\n LC_FLAG_COLUMNS})\n\n arg_10 = ','.join([COLUMNDEFS[arg_9][1] for arg_9 in arg_5['columns']])\n arg_11 = arg_5['objectinfo']['ndet']\n\n for arg_12 in range(arg_11):\n arg_4 = [arg_5[arg_9][arg_12] for arg_9 in arg_5['columns']]\n arg_13 = arg_10 % tuple(arg_4)\n print(arg_13)\n\n else:\n\n LOGERROR('unrecognized HATLC file: %s' % arg_2)\n sys.exit(1)"} +{"_id": "doc_9175", "title": "", "text": "def Func(arg_0, arg_1):\n '''This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.\n\n Parameters\n ----------\n\n ri_color : float\n The SDSS `r-i` color of the object.\n\n iz_color : float\n The SDSS `i-z` color of the object.\n\n Returns\n -------\n\n (subtype, index1, index2) : tuple\n `subtype`: if the star appears to be an M dwarf, will return an int\n between 0 and 9 indicating its subtype, e.g. will return 4 for an M4\n dwarf. If the object isn't an M dwarf, will return None\n\n `index1`, `index2`: the M-dwarf color locus value and spread of this\n object calculated from the `r-i` and `i-z` colors.\n\n '''\n\n # calculate the spectral type index and the spectral type spread of the\n # object. sti is calculated by fitting a line to the locus in r-i and i-z\n # space for M dwarfs in West+ 2007\n if np.isfinite(arg_0) and np.isfinite(arg_1):\n arg_2 = 0.875274*arg_0 + 0.483628*(arg_1 + 0.00438)\n arg_3 = -0.483628*arg_0 + 0.875274*(arg_1 + 0.00438)\n else:\n arg_2 = np.nan\n arg_3 = np.nan\n\n # possible M star if sti is >= 0.666 but <= 3.4559\n if (np.isfinite(arg_2) and np.isfinite(arg_3) and\n (arg_2 > 0.666) and (arg_2 < 3.4559)):\n\n # decide which M subclass object this is\n if ((arg_2 > 0.6660) and (arg_2 < 0.8592)):\n arg_4 = 'M0'\n\n if ((arg_2 > 0.8592) and (arg_2 < 1.0822)):\n arg_4 = 'M1'\n\n if ((arg_2 > 1.0822) and (arg_2 < 1.2998)):\n arg_4 = 'M2'\n\n if ((arg_2 > 1.2998) and (arg_2 < 1.6378)):\n arg_4 = 'M3'\n\n if ((arg_2 > 1.6378) and (arg_2 < 2.0363)):\n arg_4 = 'M4'\n\n if ((arg_2 > 2.0363) and (arg_2 < 2.2411)):\n arg_4 = 'M5'\n\n if ((arg_2 > 2.2411) and (arg_2 < 2.4126)):\n arg_4 = 'M6'\n\n if ((arg_2 > 2.4126) and (arg_2 < 2.9213)):\n arg_4 = 'M7'\n\n if ((arg_2 > 2.9213) and (arg_2 < 3.2418)):\n arg_4 = 'M8'\n\n if ((arg_2 > 3.2418) and (arg_2 < 3.4559)):\n arg_4 = 'M9'\n\n else:\n arg_4 = None\n\n return arg_4, arg_2, arg_3"} +{"_id": "doc_9176", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5='hat-sql',\n arg_6=None,\n arg_7=3.0,\n arg_8=21,\n arg_9=arg_10,\n arg_11=None,\n arg_12=arg_13,\n arg_14=1000):\n '''This applies EPD in parallel to all LCs in the input list.\n\n Parameters\n ----------\n\n lclist : list of str\n This is the list of light curve files to run EPD on.\n\n externalparams : dict or None\n This is a dict that indicates which keys in the lcdict obtained from the\n lcfile correspond to the required external parameters. As with timecol,\n magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n keys ('magaperture1.mags'). The dict should look something like::\n\n {'fsv':'' array: S values for each observation,\n 'fdv':'' array: D values for each observation,\n 'fkv':'' array: K values for each observation,\n 'xcc':'' array: x coords for each observation,\n 'ycc':'' array: y coords for each observation,\n 'bgv':'' array: sky background for each observation,\n 'bge':'' array: sky background err for each observation,\n 'iha':'' array: hour angle for each observation,\n 'izd':'' array: zenith distance for each observation}\n\n Alternatively, if these exact keys are already present in the lcdict,\n indicate this by setting externalparams to None.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the EPD process. If these are None, the\n default values for `timecols`, `magcols`, and `errcols` for your light\n curve format will be used here.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve files.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n nworkers : int\n The number of parallel workers to launch when processing the LCs.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before it is\n replaced with a new one (sometimes helps with memory-leaks).\n\n Returns\n -------\n\n dict\n Returns a dict organized by all the keys in the input `magcols` list,\n containing lists of EPD pickle light curves for that `magcol`.\n\n Notes\n -----\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486\n\n '''\n\n try:\n arg_15 = get_lcformat(arg_5,\n use_lcformat_dir=arg_6)\n if arg_15:\n (arg_16, arg_17,\n arg_18, arg_19, arg_20,\n arg_21, arg_22) = arg_15\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as arg_26:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n # override the default timecols, magcols, and errcols\n # using the ones provided to the function\n if arg_2 is None:\n arg_2 = arg_18\n if arg_3 is None:\n arg_3 = arg_19\n if arg_4 is None:\n arg_4 = arg_20\n\n arg_23 = {}\n\n # run by magcol\n for arg_24, arg_25, arg_26 in zip(arg_2, arg_3, arg_4):\n\n arg_27 = [(x, arg_24, arg_25, arg_26, arg_1, arg_5, arg_6,\n arg_7, arg_8,\n arg_9, arg_11) for\n x in arg_0]\n\n arg_28 = mp.Pool(arg_12, maxtasksperchild=arg_14)\n arg_29 = arg_28.map(parallel_epd_worker, arg_27)\n arg_28.close()\n arg_28.join()\n\n arg_23[arg_25] = arg_29\n\n return arg_23"} +{"_id": "doc_9177", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6='hat-sql',\n arg_7=None,\n arg_8=3.0,\n arg_9=21,\n arg_10=arg_11,\n arg_12=None,\n arg_13=arg_14,\n arg_15=1000\n):\n '''This applies EPD in parallel to all LCs in a directory.\n\n Parameters\n ----------\n\n lcdir : str\n The light curve directory to process.\n\n externalparams : dict or None\n This is a dict that indicates which keys in the lcdict obtained from the\n lcfile correspond to the required external parameters. As with timecol,\n magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n keys ('magaperture1.mags'). The dict should look something like::\n\n {'fsv':'' array: S values for each observation,\n 'fdv':'' array: D values for each observation,\n 'fkv':'' array: K values for each observation,\n 'xcc':'' array: x coords for each observation,\n 'ycc':'' array: y coords for each observation,\n 'bgv':'' array: sky background for each observation,\n 'bge':'' array: sky background err for each observation,\n 'iha':'' array: hour angle for each observation,\n 'izd':'' array: zenith distance for each observation}\n\n lcfileglob : str or None\n A UNIX fileglob to use to select light curve files in `lcdir`. If this\n is not None, the value provided will override the default fileglob for\n your light curve format.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the EPD process. If these are None, the\n default values for `timecols`, `magcols`, and `errcols` for your light\n curve format will be used here.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n nworkers : int\n The number of parallel workers to launch when processing the LCs.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before it is\n replaced with a new one (sometimes helps with memory-leaks).\n\n Returns\n -------\n\n dict\n Returns a dict organized by all the keys in the input `magcols` list,\n containing lists of EPD pickle light curves for that `magcol`.\n\n Notes\n -----\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486\n\n '''\n\n try:\n arg_16 = get_lcformat(arg_6,\n use_lcformat_dir=arg_7)\n if arg_16:\n (arg_17, arg_18,\n arg_19, arg_20, arg_21,\n arg_22, arg_23) = arg_16\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n # find all the files matching the lcglob in lcdir\n if arg_2 is None:\n arg_2 = arg_17\n\n arg_24 = sorted(glob.glob(os.path.join(arg_0, arg_2)))\n\n return parallel_epd_lclist(\n arg_24,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_15=arg_15\n )"} +{"_id": "doc_9178", "title": "", "text": "def Func(arg_0):\n '''\n This wraps Astropy's BoxLeastSquares for use with bls_parallel_pfind below.\n\n `task` is a tuple::\n\n task[0] = times\n task[1] = mags\n task[2] = errs\n task[3] = magsarefluxes\n\n task[4] = minfreq\n task[5] = nfreq\n task[6] = stepsize\n\n task[7] = ndurations\n task[8] = mintransitduration\n task[9] = maxtransitduration\n\n task[10] = blsobjective\n task[11] = blsmethod\n task[12] = blsoversample\n\n '''\n\n try:\n\n arg_1, arg_2, arg_3 = arg_0[:3]\n arg_4 = arg_0[3]\n\n arg_5, arg_6, arg_7 = arg_0[4:7]\n\n arg_8, arg_9, arg_10 = arg_0[7:10]\n\n arg_11, arg_12, arg_13 = arg_0[10:]\n\n arg_14 = arg_5 + nparange(arg_6)*arg_7\n arg_15 = 1.0/arg_14\n\n # astropy's BLS requires durations in units of time\n arg_16 = nplinspace(arg_9*arg_15.min(),\n arg_10*arg_15.min(),\n arg_8)\n\n # set up the correct units for the BLS model\n if arg_4:\n\n arg_17 = BoxLeastSquares(\n arg_1*u.day,\n arg_2*u.dimensionless_unscaled,\n dy=arg_3*u.dimensionless_unscaled\n )\n\n else:\n\n arg_17 = BoxLeastSquares(\n arg_1*u.day,\n arg_2*u.mag,\n dy=arg_3*u.mag\n )\n\n arg_18 = arg_17.power(\n arg_15*u.day,\n arg_16*u.day,\n objective=arg_11,\n method=arg_12,\n oversample=arg_13\n )\n\n return {\n 'blsresult': arg_18,\n 'blsmodel': arg_17,\n 'durations': arg_16,\n 'power': nparray(arg_18.power)\n }\n\n except Exception as e:\n\n LOGEXCEPTION('BLS for frequency chunk: (%.6f, %.6f) failed.' %\n (arg_14[0], arg_14[-1]))\n\n return {\n 'blsresult': None,\n 'blsmodel': None,\n 'durations': arg_16,\n 'power': nparray([npnan for arg_19 in range(arg_6)]),\n }"} +{"_id": "doc_9179", "title": "", "text": "def Func(arg_0):\n '''\n This wraps starfeatures.\n\n '''\n\n try:\n (arg_1, arg_2, arg_3, arg_4,\n arg_5, arg_6,\n arg_7, arg_8, arg_9, arg_10) = arg_0\n\n return get_starfeatures(arg_1, arg_2,\n arg_3, arg_4, arg_5,\n arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10)\n except Exception as e:\n return None"} +{"_id": "doc_9180", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=True,\n arg_6=None,\n arg_7='hat-sql',\n arg_8=None):\n '''This drives the `get_starfeatures` function for a collection of LCs.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n list of str\n A list of all star features pickles produced.\n\n '''\n # make sure to make the output directory if it doesn't exist\n if not os.path.exists(arg_1):\n os.makedirs(arg_1)\n\n if arg_4:\n arg_0 = arg_0[:arg_4]\n\n # read in the kdtree pickle\n with open(arg_2, 'rb') as infd:\n arg_9 = pickle.load(infd)\n\n arg_10 = arg_9['kdtree']\n arg_11 = arg_9['objects']['objectid']\n arg_12 = arg_9['objects']['lcfname']\n\n arg_13 = [(x, arg_1, arg_10, arg_11, arg_12,\n arg_3,\n arg_5, arg_6,\n arg_7, arg_8) for x in arg_0]\n\n for arg_14 in tqdm(arg_13):\n arg_15 = _starfeatures_worker(arg_14)\n\n return arg_15"} +{"_id": "doc_9181", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=True,\n arg_6=None,\n arg_7='hat-sql',\n arg_8=None,\n arg_9=arg_10):\n '''This runs `get_starfeatures` in parallel for all light curves in `lclist`.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of the input light curve filename and the\n output star features pickle for each LC processed.\n\n '''\n\n try:\n arg_11 = get_lcformat(arg_7,\n use_lcformat_dir=arg_8)\n if arg_11:\n (arg_12, arg_13,\n arg_14, arg_15, arg_16,\n arg_17, arg_18) = arg_11\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n # make sure to make the output directory if it doesn't exist\n if not os.path.exists(arg_1):\n os.makedirs(arg_1)\n\n if arg_4:\n arg_0 = arg_0[:arg_4]\n\n # read in the kdtree pickle\n with open(arg_2, 'rb') as infd:\n arg_19 = pickle.load(infd)\n\n arg_20 = arg_19['kdtree']\n arg_21 = arg_19['objects']['objectid']\n arg_22 = arg_19['objects']['lcfname']\n\n arg_23 = [(x, arg_1, arg_20, arg_21, arg_22,\n arg_3,\n arg_5, arg_6, arg_7) for x in arg_0]\n\n with ProcessPoolExecutor(max_workers=arg_9) as executor:\n arg_24 = executor.map(_starfeatures_worker, arg_23)\n\n arg_25 = [x for x in arg_24]\n arg_26 = {os.path.basename(x):y for (x,y) in zip(arg_0, arg_25)}\n\n return arg_26"} +{"_id": "doc_9182", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3,\n arg_4=None,\n arg_5=None,\n arg_6=True,\n arg_7=None,\n arg_8='hat-sql',\n arg_9=None,\n arg_10=arg_11,\n arg_12=True):\n '''This runs parallel star feature extraction for a directory of LCs.\n\n Parameters\n ----------\n\n lcdir : list of str\n The directory to search for light curves.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n fileglob : str\n The UNIX file glob to use to search for the light curves in `lcdir`. If\n None, the default value for the light curve format specified will be\n used.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of the input light curve filename and the\n output star features pickle for each LC processed.\n\n '''\n\n try:\n arg_13 = get_lcformat(arg_8,\n use_lcformat_dir=arg_9)\n if arg_13:\n (arg_14, arg_15,\n arg_16, arg_17, arg_18,\n arg_19, arg_20) = arg_13\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n if not arg_4:\n arg_4 = arg_14\n\n # now find the files\n LOGINFO('searching for %s light curves in %s ...' % (arg_8, arg_0))\n\n if arg_12 is False:\n arg_21 = glob.glob(os.path.join(arg_0, arg_4))\n\n else:\n # use recursive glob for Python 3.5+\n if sys.version_info[:2] > (3,4):\n\n arg_21 = glob.glob(os.path.join(arg_0,\n '**',\n arg_4),arg_12=True)\n\n # otherwise, use os.walk and glob\n else:\n\n # use os.walk to go through the directories\n arg_22 = os.walk(arg_0)\n arg_21 = []\n\n for arg_23, arg_24, arg_25 in arg_22:\n for arg_26 in arg_24:\n arg_27 = os.path.join(arg_23,\n arg_26,\n arg_4)\n arg_28 = glob.glob(arg_27)\n\n if arg_28:\n arg_21.extend(arg_28)\n\n\n # now that we have all the files, process them\n if arg_21 and len(arg_21) > 0:\n\n LOGINFO('found %s light curves, getting starfeatures...' %\n len(arg_21))\n\n return parallel_starfeatures(arg_21,\n arg_1,\n arg_2,\n arg_3,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_5=arg_5,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10)\n\n else:\n\n LOGERROR('no light curve files in %s format found in %s' % (arg_8,\n arg_0))\n return None"} +{"_id": "doc_9183", "title": "", "text": "def Func(arg_0):\n '''\n This is the parallel worker for the function below.\n\n task[0] = frequency for this worker\n task[1] = times array\n task[2] = mags array\n task[3] = fold_time\n task[4] = j_range\n task[5] = keep_threshold_1\n task[6] = keep_threshold_2\n task[7] = phasebinsize\n\n we don't need errs for the worker.\n\n '''\n\n arg_1 = arg_0[0]\n arg_2, arg_3 = arg_0[1], arg_0[2]\n arg_4 = arg_0[3]\n arg_5 = range(arg_0[4])\n arg_6 = arg_0[5]\n arg_7 = arg_0[6]\n arg_8 = arg_0[7]\n\n\n try:\n\n arg_9 = 1.0/arg_1\n\n # use the common phaser to phase and sort the mag\n arg_10 = phase_magseries(arg_2,\n arg_3,\n arg_9,\n arg_4,\n wrap=False,\n sort=True)\n\n # bin in phase if requested, this turns this into a sort of PDM method\n if arg_8 is not None and arg_8 > 0:\n arg_11 = pwd_phasebin(arg_10['phase'],\n arg_10['mags'],\n binsize=arg_8)\n arg_12 = arg_11[0]\n arg_13 = arg_11[1]\n arg_5 = range(len(arg_13) - 1)\n else:\n arg_12 = arg_10['phase']\n arg_13 = arg_10['mags']\n\n # now calculate the string length\n arg_14 = nproll(arg_13,1)\n arg_15 = nproll(arg_12,1)\n arg_16 = (\n (arg_14 - arg_13)*(arg_14 - arg_13) +\n (arg_15 - arg_12)*(arg_15 - arg_12)\n )\n arg_16[0] = (\n ((arg_13[0] - arg_13[-1]) *\n (arg_13[0] - arg_13[-1])) +\n ((arg_12[0] - arg_12[-1] + 1) *\n (arg_12[0] - arg_12[-1] + 1))\n )\n arg_17 = npsum(npsqrt(arg_16))\n\n if (arg_6 < arg_17 < arg_7):\n arg_18 = True\n else:\n arg_18 = False\n\n return (arg_9, arg_17, arg_18)\n\n except Exception as e:\n\n LOGEXCEPTION('error in DWP')\n return(arg_9, npnan, False)"} +{"_id": "doc_9184", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=5,\n # these are depth, duration, ingress duration\n arg_5=(-0.01,0.1,0.1),\n # these are depth, duration, depth ratio, secphase\n arg_6=(-0.2,0.3,0.7,0.5),\n arg_7=1.0e-4,\n arg_8=1.0e-4,\n arg_9=5.0,\n arg_10=None,\n arg_11=None,\n arg_12=None,\n arg_13=None,\n arg_14=None,\n arg_15=None,\n arg_16='hat-sql',\n arg_17=None,\n arg_18=10.0,\n arg_19=False,\n arg_20=None):\n '''This drives the periodicfeatures collection for a list of periodfinding\n pickles.\n\n Parameters\n ----------\n\n pfpkl_list : list of str\n The list of period-finding pickles to use.\n\n lcbasedir : str\n The base directory where the associated light curves are located.\n\n outdir : str\n The directory where the results will be written.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n Returns\n -------\n\n Nothing.\n\n '''\n\n try:\n arg_21 = get_lcformat(arg_16,\n use_lcformat_dir=arg_17)\n if arg_21:\n (arg_22, arg_23,\n arg_24, arg_25, arg_26,\n arg_27, arg_28) = arg_21\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n # make sure to make the output directory if it doesn't exist\n if not os.path.exists(arg_2):\n os.makedirs(arg_2)\n\n if arg_20:\n arg_0 = arg_0[:arg_20]\n\n LOGINFO('%s periodfinding pickles to process' % len(arg_0))\n\n # if the starfeaturedir is provided, try to find a starfeatures pickle for\n # each periodfinding pickle in pfpkl_list\n if arg_3 and os.path.exists(arg_3):\n\n arg_29 = []\n\n LOGINFO('collecting starfeatures pickles...')\n\n for arg_30 in arg_0:\n\n arg_31 = os.path.basename(arg_30).replace('periodfinding',\n 'starfeatures')\n arg_32 = arg_31.replace('.gz','')\n\n arg_33 = os.path.join(arg_3, arg_31)\n arg_34 = os.path.join(arg_3, arg_32)\n\n if os.path.exists(arg_33):\n arg_29.append(arg_31)\n elif os.path.exists(arg_34):\n arg_29.append(arg_32)\n else:\n arg_29.append(None)\n\n else:\n\n arg_29 = [None for x in arg_0]\n\n # generate the task list\n arg_35 = {'fourierorder':arg_4,\n 'transitparams':arg_5,\n 'ebparams':arg_6,\n 'pdiff_threshold':arg_7,\n 'sidereal_threshold':arg_8,\n 'sampling_peak_multiplier':arg_9,\n 'sampling_startp':arg_10,\n 'sampling_endp':arg_11,\n 'timecols':arg_13,\n 'magcols':arg_14,\n 'errcols':arg_15,\n 'lcformat':arg_16,\n 'lcformatdir':arg_17,\n 'sigclip':arg_18,\n 'verbose':arg_19}\n\n arg_36 = [(x, arg_1, arg_2, y, arg_35) for (x,y) in\n zip(arg_0, arg_29)]\n\n LOGINFO('processing periodfinding pickles...')\n\n for arg_37 in tqdm(arg_36):\n _periodicfeatures_worker(arg_37)"} +{"_id": "doc_9185", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3=None,\n arg_4=5,\n # these are depth, duration, ingress duration\n arg_5=(-0.01,0.1,0.1),\n # these are depth, duration, depth ratio, secphase\n arg_6=(-0.2,0.3,0.7,0.5),\n arg_7=1.0e-4,\n arg_8=1.0e-4,\n arg_9=5.0,\n arg_10=None,\n arg_11=None,\n arg_12=None,\n arg_13=None,\n arg_14=None,\n arg_15='hat-sql',\n arg_16=None,\n arg_17=10.0,\n arg_18=False,\n arg_19=None,\n arg_20=arg_21):\n '''This runs periodic feature generation in parallel for all periodfinding\n pickles in the input list.\n\n Parameters\n ----------\n\n pfpkl_list : list of str\n The list of period-finding pickles to use.\n\n lcbasedir : str\n The base directory where the associated light curves are located.\n\n outdir : str\n The directory where the results will be written.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n nworkers : int\n The number of parallel workers to launch to process the input.\n\n Returns\n -------\n\n dict\n A dict containing key: val pairs of the input period-finder result and\n the output periodic feature result pickles for each input pickle is\n returned.\n\n '''\n # make sure to make the output directory if it doesn't exist\n if not os.path.exists(arg_2):\n os.makedirs(arg_2)\n\n if arg_19:\n arg_0 = arg_0[:arg_19]\n\n LOGINFO('%s periodfinding pickles to process' % len(arg_0))\n\n # if the starfeaturedir is provided, try to find a starfeatures pickle for\n # each periodfinding pickle in pfpkl_list\n if arg_3 and os.path.exists(arg_3):\n\n arg_22 = []\n\n LOGINFO('collecting starfeatures pickles...')\n\n for arg_23 in arg_0:\n\n arg_24 = os.path.basename(arg_23).replace('periodfinding',\n 'starfeatures')\n arg_25 = arg_24.replace('.gz','')\n\n arg_26 = os.path.join(arg_3, arg_24)\n arg_27 = os.path.join(arg_3, arg_25)\n\n if os.path.exists(arg_26):\n arg_22.append(arg_24)\n elif os.path.exists(arg_27):\n arg_22.append(arg_25)\n else:\n arg_22.append(None)\n\n else:\n\n arg_22 = [None for x in arg_0]\n\n # generate the task list\n arg_28 = {'fourierorder':arg_4,\n 'transitparams':arg_5,\n 'ebparams':arg_6,\n 'pdiff_threshold':arg_7,\n 'sidereal_threshold':arg_8,\n 'sampling_peak_multiplier':arg_9,\n 'sampling_startp':arg_10,\n 'sampling_endp':arg_11,\n 'timecols':arg_12,\n 'magcols':arg_13,\n 'errcols':arg_14,\n 'lcformat':arg_15,\n 'lcformatdir':arg_15,\n 'sigclip':arg_17,\n 'verbose':arg_18}\n\n arg_29 = [(x, arg_1, arg_2, y, arg_28) for (x,y) in\n zip(arg_0, arg_22)]\n\n LOGINFO('processing periodfinding pickles...')\n\n with ProcessPoolExecutor(max_workers=arg_20) as executor:\n arg_30 = executor.map(_periodicfeatures_worker, arg_29)\n\n arg_31 = [x for x in arg_30]\n arg_32 = {os.path.basename(x):y for (x,y) in zip(arg_0, arg_31)}\n\n return arg_32"} +{"_id": "doc_9186", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3='periodfinding-*.pkl*',\n arg_4=None,\n arg_5=5,\n # these are depth, duration, ingress duration\n arg_6=(-0.01,0.1,0.1),\n # these are depth, duration, depth ratio, secphase\n arg_7=(-0.2,0.3,0.7,0.5),\n arg_8=1.0e-4,\n arg_9=1.0e-4,\n arg_10=5.0,\n arg_11=None,\n arg_12=None,\n arg_13=None,\n arg_14=None,\n arg_15=None,\n arg_16='hat-sql',\n arg_17=None,\n arg_18=10.0,\n arg_19=False,\n arg_20=None,\n arg_21=arg_22,\n arg_23=True,\n):\n '''This runs parallel periodicfeature extraction for a directory of\n periodfinding result pickles.\n\n Parameters\n ----------\n\n pfpkl_dir : str\n The directory containing the pickles to process.\n\n lcbasedir : str\n The directory where all of the associated light curve files are located.\n\n outdir : str\n The directory where all the output will be written.\n\n pfpkl_glob : str\n The UNIX file glob to use to search for period-finder result pickles in\n `pfpkl_dir`.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n nworkers : int\n The number of parallel workers to launch to process the input.\n\n Returns\n -------\n\n dict\n A dict containing key: val pairs of the input period-finder result and\n the output periodic feature result pickles for each input pickle is\n returned.\n\n '''\n\n try:\n arg_24 = get_lcformat(arg_16,\n use_lcformat_dir=arg_17)\n if arg_24:\n (arg_25, arg_26,\n arg_27, arg_28, arg_29,\n arg_30, arg_31) = arg_24\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n arg_32 = arg_3\n\n # now find the files\n LOGINFO('searching for periodfinding pickles in %s ...' % arg_0)\n\n if arg_23 is False:\n arg_33 = glob.glob(os.path.join(arg_0, arg_32))\n\n else:\n # use recursive glob for Python 3.5+\n if sys.version_info[:2] > (3,4):\n\n arg_33 = glob.glob(os.path.join(arg_0,\n '**',\n arg_32),arg_23=True)\n\n # otherwise, use os.walk and glob\n else:\n\n # use os.walk to go through the directories\n arg_34 = os.walk(arg_0)\n arg_33 = []\n\n for arg_35, arg_36, arg_37 in arg_34:\n for arg_38 in arg_36:\n arg_39 = os.path.join(arg_35,\n arg_38,\n arg_32)\n arg_40 = glob.glob(arg_39)\n\n if arg_40:\n arg_33.extend(arg_40)\n\n\n # now that we have all the files, process them\n if arg_33 and len(arg_33) > 0:\n\n LOGINFO('found %s periodfinding pickles, getting periodicfeatures...' %\n len(arg_33))\n\n return parallel_periodicfeatures(\n arg_33,\n arg_1,\n arg_2,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14,\n arg_15=arg_15,\n arg_16=arg_16,\n arg_17=arg_17,\n arg_18=arg_18,\n arg_19=arg_19,\n arg_20=arg_20,\n arg_21=arg_21,\n )\n\n else:\n\n LOGERROR('no periodfinding pickles found in %s' % (arg_0))\n return None"} +{"_id": "doc_9187", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n This parses the header for a catalog file and returns it as a file object.\n\n Parameters\n ----------\n\n xc : str\n The file name of an xmatch catalog prepared previously.\n\n xk : list of str\n This is a list of column names to extract from the xmatch catalog.\n\n Returns\n -------\n\n tuple\n The tuple returned is of the form::\n\n (infd: the file object associated with the opened xmatch catalog,\n catdefdict: a dict describing the catalog column definitions,\n catcolinds: column number indices of the catalog,\n catcoldtypes: the numpy dtypes of the catalog columns,\n catcolnames: the names of each catalog column,\n catcolunits: the units associated with each catalog column)\n\n '''\n\n arg_2 = []\n\n # read in this catalog and transparently handle gzipped files\n if arg_0.endswith('.gz'):\n arg_3 = gzip.open(arg_0,'rb')\n else:\n arg_3 = open(arg_0,'rb')\n\n # read in the defs\n for arg_4 in arg_3:\n if arg_4.decode().startswith('#'):\n arg_2.append(\n arg_4.decode().replace('#','').strip().rstrip('\\n')\n )\n if not arg_4.decode().startswith('#'):\n break\n\n if not len(arg_2) > 0:\n LOGERROR(\"catalog definition not parseable \"\n \"for catalog: %s, skipping...\" % arg_0)\n return None\n\n arg_2 = ' '.join(arg_2)\n arg_5 = json.loads(arg_2)\n\n arg_6 = [x['key'] for x in arg_5['columns']]\n arg_7 = [x['dtype'] for x in arg_5['columns']]\n arg_8 = [x['name'] for x in arg_5['columns']]\n arg_9 = [x['unit'] for x in arg_5['columns']]\n\n # get the correct column indices and dtypes for the requested columns\n # from the catdefdict\n\n arg_10 = []\n arg_11 = []\n arg_12 = []\n arg_13 = []\n\n for arg_14 in arg_1:\n\n if arg_14 in arg_6:\n\n arg_15 = arg_6.index(arg_14)\n\n arg_10.append(arg_15)\n arg_11.append(arg_7[arg_15])\n arg_12.append(arg_8[arg_15])\n arg_13.append(arg_9[arg_15])\n\n\n return (arg_3, arg_5,\n arg_10, arg_11, arg_12, arg_13)"} +{"_id": "doc_9188", "title": "", "text": "def Func(arg_0, arg_1, arg_2=None):\n '''This loads the external xmatch catalogs into a dict for use in an xmatch.\n\n Parameters\n ----------\n\n xmatchto : list of str\n This is a list of paths to all the catalog text files that will be\n loaded.\n\n The text files must be 'CSVs' that use the '|' character as the\n separator betwen columns. These files should all begin with a header in\n JSON format on lines starting with the '#' character. this header will\n define the catalog and contains the name of the catalog and the column\n definitions. Column definitions must have the column name and the numpy\n dtype of the columns (in the same format as that expected for the\n numpy.genfromtxt function). Any line that does not begin with '#' is\n assumed to be part of the columns in the catalog. An example is shown\n below::\n\n # {\"name\":\"NSVS catalog of variable stars\",\n # \"columns\":[\n # {\"key\":\"objectid\", \"dtype\":\"U20\", \"name\":\"Object ID\", \"unit\": null},\n # {\"key\":\"ra\", \"dtype\":\"f8\", \"name\":\"RA\", \"unit\":\"deg\"},\n # {\"key\":\"decl\",\"dtype\":\"f8\", \"name\": \"Declination\", \"unit\":\"deg\"},\n # {\"key\":\"sdssr\",\"dtype\":\"f8\",\"name\":\"SDSS r\", \"unit\":\"mag\"},\n # {\"key\":\"vartype\",\"dtype\":\"U20\",\"name\":\"Variable type\", \"unit\":null}\n # ],\n # \"colra\":\"ra\",\n # \"coldec\":\"decl\",\n # \"description\":\"Contains variable stars from the NSVS catalog\"}\n objectid1 | 45.0 | -20.0 | 12.0 | detached EB\n objectid2 | 145.0 | 23.0 | 10.0 | RRab\n objectid3 | 12.0 | 11.0 | 14.0 | Cepheid\n .\n .\n .\n\n xmatchkeys : list of lists\n This is the list of lists of column names (as str) to get out of each\n `xmatchto` catalog. This should be the same length as `xmatchto` and\n each element here will apply to the respective file in `xmatchto`.\n\n outfile : str or None\n If this is not None, set this to the name of the pickle to write the\n collected xmatch catalogs to. this pickle can then be loaded\n transparently by the :py:func:`astrobase.checkplot.pkl.checkplot_dict`,\n :py:func:`astrobase.checkplot.pkl.checkplot_pickle` functions to provide\n xmatch info to the\n :py:func:`astrobase.checkplot.pkl_xmatch.xmatch_external_catalogs`\n function below.\n\n If this is None, will return the loaded xmatch catalogs directly. This\n will be a huge dict, so make sure you have enough RAM.\n\n Returns\n -------\n\n str or dict\n Based on the `outfile` kwarg, will either return the path to a collected\n xmatch pickle file or the collected xmatch dict.\n\n '''\n\n arg_3 = {}\n\n for arg_4, arg_5 in zip(arg_0, arg_1):\n\n arg_6 = _parse_xmatch_catalog_header(arg_4, arg_5)\n\n if not arg_6:\n continue\n\n (arg_7, arg_8,\n arg_9, arg_10,\n arg_11, arg_12) = arg_6\n\n # get the specified columns out of the catalog\n arg_13 = np.genfromtxt(arg_7,\n usecols=arg_9,\n names=arg_5,\n dtype=','.join(arg_10),\n comments='#',\n delimiter='|',\n autostrip=True)\n arg_7.close()\n\n arg_14 = os.path.splitext(os.path.basename(arg_4))[0]\n arg_14 = arg_14.replace('.csv','')\n\n #\n # make a kdtree for this catalog\n #\n\n # get the ra and decl columns\n arg_15, arg_16 = (arg_13[arg_8['colra']],\n arg_13[arg_8['coldec']])\n\n # get the xyz unit vectors from ra,decl\n arg_17 = np.cos(np.radians(arg_16))\n arg_18 = np.sin(np.radians(arg_16))\n arg_19 = np.cos(np.radians(arg_15))\n arg_20 = np.sin(np.radians(arg_15))\n arg_21 = np.column_stack((arg_19*arg_17,arg_20*arg_17, arg_18))\n\n # generate the kdtree\n arg_22 = cKDTree(arg_21,copy_data=True)\n\n # generate the outdict element for this catalog\n arg_23 = {'kdtree':arg_22,\n 'data':arg_13,\n 'columns':arg_5,\n 'colnames':arg_11,\n 'colunits':arg_12,\n 'name':arg_8['name'],\n 'desc':arg_8['description']}\n\n arg_3[arg_14] = arg_23\n\n if arg_2 is not None:\n\n # if we're on OSX, we apparently need to save the file in chunks smaller\n # than 2 GB to make it work right. can't load pickles larger than 4 GB\n # either, but 3 GB < total size < 4 GB appears to be OK when loading.\n # also see: https://bugs.python.org/issue24658.\n # fix adopted from: https://stackoverflow.com/a/38003910\n if sys.platform == 'darwin':\n\n arg_24 = pickle.dumps(arg_3, protocol=pickle.HIGHEST_PROTOCOL)\n arg_25 = 2**31 - 1\n\n with open(arg_2, 'wb') as outfd:\n for arg_26 in range(0, len(arg_24), arg_25):\n outfd.write(arg_24[arg_26:arg_26+arg_25])\n\n else:\n with open(arg_2, 'wb') as outfd:\n pickle.dump(arg_3, outfd, pickle.HIGHEST_PROTOCOL)\n\n return arg_2\n\n else:\n\n return arg_3"} +{"_id": "doc_9189", "title": "", "text": "def Func(arg_0, arg_1=False):\n '''Wraps the input angle to 360.0 degrees.\n\n Parameters\n ----------\n\n angle : float\n The angle to wrap around 360.0 deg.\n\n radians : bool\n If True, will assume that the input is in radians. The output will then\n also be in radians.\n\n Returns\n -------\n\n float\n Wrapped angle. If radians is True: input is assumed to be in radians,\n output is also in radians.\n\n '''\n\n if arg_1:\n arg_2 = arg_0 % (2.0*pi_value)\n if arg_2 < 0.0:\n arg_2 = 2.0*pi_value + arg_2\n\n else:\n\n arg_2 = arg_0 % 360.0\n if arg_2 < 0.0:\n arg_2 = 360.0 + arg_2\n\n return arg_2"} +{"_id": "doc_9190", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''Calculates the great circle angular distance between two coords.\n\n This calculates the great circle angular distance in arcseconds between two\n coordinates (ra1,dec1) and (ra2,dec2). This is basically a clone of GCIRC\n from the IDL Astrolib.\n\n Parameters\n ----------\n\n ra1,dec1 : float or array-like\n The first coordinate's right ascension and declination value(s) in\n decimal degrees.\n\n ra2,dec2 : float or array-like\n The second coordinate's right ascension and declination value(s) in\n decimal degrees.\n\n Returns\n -------\n\n float or array-like\n Great circle distance between the two coordinates in arseconds.\n\n Notes\n -----\n\n If (`ra1`, `dec1`) is scalar and (`ra2`, `dec2`) is scalar: the result is a\n float distance in arcseconds.\n\n If (`ra1`, `dec1`) is scalar and (`ra2`, `dec2`) is array-like: the result\n is an np.array with distance in arcseconds between (`ra1`, `dec1`) and each\n element of (`ra2`, `dec2`).\n\n If (`ra1`, `dec1`) is array-like and (`ra2`, `dec2`) is scalar: the result\n is an np.array with distance in arcseconds between (`ra2`, `dec2`) and each\n element of (`ra1`, `dec1`).\n\n If (`ra1`, `dec1`) and (`ra2`, `dec2`) are both array-like: the result is an\n np.array with the pair-wise distance in arcseconds between each element of\n the two coordinate lists. In this case, if the input array-likes are not the\n same length, then excess elements of the longer one will be ignored.\n\n '''\n\n # wrap RA if negative or larger than 360.0 deg\n arg_4 = arg_0 % 360.0\n arg_4 = arg_4 + 360.0*(arg_4 < 0.0)\n arg_5 = arg_2 % 360.0\n arg_5 = arg_5 + 360.0*(arg_4 < 0.0)\n\n # convert to radians\n arg_6, arg_7 = np.deg2rad(arg_4), np.deg2rad(arg_1)\n arg_8, arg_9 = np.deg2rad(arg_5), np.deg2rad(arg_3)\n\n arg_10 = (arg_9 - arg_7)/2.0\n arg_11 = (arg_8 - arg_6)/2.0\n arg_12 = np.sqrt(np.sin(arg_10) * np.sin(arg_10) +\n np.cos(arg_7) * np.cos(arg_9) *\n np.sin(arg_11) * np.sin(arg_11))\n\n arg_13 = 2.0 * np.arcsin(arg_12)\n\n # return the distance in arcseconds\n return np.rad2deg(arg_13)*3600.0"} +{"_id": "doc_9191", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n\n '''This calculates the total proper motion of an object.\n\n Parameters\n ----------\n\n pmra : float or array-like\n The proper motion(s) in right ascension, measured in mas/yr.\n\n pmdecl : float or array-like\n The proper motion(s) in declination, measured in mas/yr.\n\n decl : float or array-like\n The declination of the object(s) in decimal degrees.\n\n Returns\n -------\n\n float or array-like\n The total proper motion(s) of the object(s) in mas/yr.\n\n '''\n\n arg_3 = np.sqrt( arg_1*arg_1 + arg_0*arg_0*np.cos(np.radians(arg_2)) *\n np.cos(np.radians(arg_2)) )\n return arg_3"} +{"_id": "doc_9192", "title": "", "text": "def Func(arg_0, arg_1):\n '''This converts from galactic coords to equatorial coordinates.\n\n Parameters\n ----------\n\n gl : float or array-like\n Galactic longitude values(s) in decimal degrees.\n\n gb : float or array-like\n Galactic latitude value(s) in decimal degrees.\n\n Returns\n -------\n\n tuple of (float, float) or tuple of (np.array, np.array)\n The equatorial coordinates (RA, DEC) for each element of the input\n (`gl`, `gb`) in decimal degrees. These are reported in the ICRS frame.\n\n '''\n\n arg_2 = SkyCoord(arg_0*u.degree, arg_0*u.degree, frame='galactic')\n\n arg_3 = arg_2.transform_to('icrs')\n\n return arg_3.ra.degree, arg_3.dec.degree"} +{"_id": "doc_9193", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2, arg_3,\n arg_4=True):\n '''This returns the image-plane projected xi-eta coords for inra, indecl.\n\n Parameters\n ----------\n\n inra,indecl : array-like\n The equatorial coordinates to get the xi, eta coordinates for in decimal\n degrees or radians.\n\n incenterra,incenterdecl : float\n The center coordinate values to use to calculate the plane-projected\n coordinates around.\n\n deg : bool\n If this is True, the input angles are assumed to be in degrees and the\n output is in degrees as well.\n\n Returns\n -------\n\n tuple of np.arrays\n This is the (`xi`, `eta`) coordinate pairs corresponding to the\n image-plane projected coordinates for each pair of input equatorial\n coordinates in (`inra`, `indecl`).\n\n '''\n\n if arg_4:\n\n arg_5 = np.radians(arg_0)\n arg_6 = np.radians(arg_1)\n arg_7 = np.radians(arg_2)\n arg_8 = np.radians(arg_3)\n\n else:\n\n arg_5 = arg_0\n arg_6 = arg_1\n arg_7 = arg_2\n arg_8 = arg_3\n\n arg_9 = np.cos(arg_8)\n arg_10 = np.sin(arg_8)\n arg_11 = np.cos(arg_7)\n arg_12 = np.sin(arg_7)\n\n arg_13 = np.cos(arg_6)*np.cos(arg_5)\n arg_14 = np.cos(arg_6)*np.sin(arg_5)\n arg_15 = np.sin(arg_6)\n\n arg_16 = arg_13*arg_9*arg_11 + arg_14*arg_9*arg_12 + arg_15*arg_10\n arg_17 = -arg_13*arg_12 + arg_14*arg_11\n arg_18 = -arg_13*arg_10*arg_11 - arg_14*arg_10*arg_12 + arg_15*arg_9\n arg_19 = arg_17*arg_17 + arg_18*arg_18\n\n arg_20 = np.zeros_like(arg_16)\n arg_20[arg_16 >= 1.0] = 0.0\n arg_20[arg_16 < 1.0] = np.arccos(arg_16)\n\n arg_21, arg_22 = np.zeros_like(arg_20), np.zeros_like(arg_20)\n\n arg_21[(arg_20 <= 0.0) | (arg_19 <= 0.0)] = 0.0\n arg_22[(arg_20 <= 0.0) | (arg_19 <= 0.0)] = 0.0\n\n arg_23 = np.sqrt(arg_19)\n\n arg_21[(arg_20 > 0.0) | (arg_19 > 0.0)] = arg_20*arg_17/arg_23\n arg_22[(arg_20 > 0.0) | (arg_19 > 0.0)] = arg_20*arg_18/arg_23\n\n if arg_4:\n return np.degrees(arg_21), np.degrees(arg_22)\n else:\n return arg_21, arg_22"} +{"_id": "doc_9194", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3={'transitperiod':arg_4.uniform(arg_6=0.1,arg_7=49.9),\n 'transitdepth':arg_4.uniform(arg_6=1.0e-4,arg_7=2.0e-2),\n 'transitduration':arg_4.uniform(arg_6=0.01,arg_7=0.29)},\n arg_8=False,\n):\n '''This generates fake planet transit light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'transitperiod', 'transitdepth', 'transitduration'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The ingress duration will be automatically chosen from a uniform\n distribution ranging from 0.05 to 0.5 of the transitduration.\n\n The transitdepth will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'planet',\n 'params': {'transitperiod': generated value of period,\n 'transitepoch': generated value of epoch,\n 'transitdepth': generated value of transit depth,\n 'transitduration': generated value of transit duration,\n 'ingressduration': generated value of transit ingress\n duration},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'transitperiod'\n 'varamplitude': the generated amplitude of\n variability == 'transitdepth'}\n\n '''\n\n if arg_1 is None:\n arg_1 = np.full_like(arg_0, 0.0)\n\n if arg_2 is None:\n arg_2 = np.full_like(arg_0, 0.0)\n\n # choose the epoch\n arg_9 = npr.random()*(arg_0.max() - arg_0.min()) + arg_0.min()\n\n # choose the period, depth, duration\n arg_10 = arg_3['transitperiod'].rvs(size=1)\n arg_11 = arg_3['transitdepth'].rvs(size=1)\n arg_12 = arg_3['transitduration'].rvs(size=1)\n\n # figure out the ingress duration\n arg_13 = npr.random()*(0.5*arg_12 - 0.05*arg_12) + 0.05*arg_12\n\n # fix the transit depth if it needs to be flipped\n if arg_8 and arg_11 < 0.0:\n arg_11 = -arg_11\n elif not arg_8 and arg_11 > 0.0:\n arg_11 = -arg_11\n\n # generate the model\n arg_14, arg_15, arg_16, arg_17, arg_18 = (\n transits.trapezoid_transit_func([arg_10, arg_9, arg_11,\n arg_12, arg_13],\n arg_0,\n arg_1,\n arg_2)\n )\n\n # resort in original time order\n arg_19 = np.argsort(arg_16)\n arg_20 = arg_16[arg_19]\n arg_21 = arg_14[arg_19]\n arg_22 = arg_18[arg_19]\n\n # return a dict with everything\n arg_23 = {\n 'vartype':'planet',\n 'params':{x:np.asscalar(y) for x,y in zip(['transitperiod',\n 'transitepoch',\n 'transitdepth',\n 'transitduration',\n 'ingressduration'],\n [arg_10,\n arg_9,\n arg_11,\n arg_12,\n arg_13])},\n 'times':arg_20,\n 'mags':arg_21,\n 'errs':arg_22,\n # these are standard keys that help with later characterization of\n # variability as a function period, variability amplitude, object mag,\n # ndet, etc.\n 'varperiod':arg_10,\n 'varamplitude':arg_11\n }\n\n return arg_23"} +{"_id": "doc_9195", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3={\n # flare peak amplitude from 0.01 mag to 1.0 mag above median. this\n # is tuned for redder bands, flares are much stronger in bluer\n # bands, so tune appropriately for your situation.\n 'amplitude':arg_4.uniform(arg_6=0.01,arg_7=0.99),\n # up to 5 flares per LC and at least 1\n 'nflares':[1,5],\n # 10 minutes to 1 hour for rise stdev\n 'risestdev':arg_4.uniform(arg_6=0.007, arg_7=0.04),\n # 1 hour to 4 hours for decay time constant\n 'decayconst':arg_4.uniform(arg_6=0.04, arg_7=0.163)\n },\n arg_8=False,\n):\n '''This generates fake flare light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'amplitude', 'nflares', 'risestdev', 'decayconst'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The `flare_peak_time` for each flare will be generated automatically\n between `times.min()` and `times.max()` using a uniform distribution.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'flare',\n 'params': {'amplitude': generated value of flare amplitudes,\n 'nflares': generated value of number of flares,\n 'risestdev': generated value of stdev of rise time,\n 'decayconst': generated value of decay constant,\n 'peaktime': generated value of flare peak time},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}\n\n '''\n\n if arg_1 is None:\n arg_1 = np.full_like(arg_0, 0.0)\n\n if arg_2 is None:\n arg_2 = np.full_like(arg_0, 0.0)\n\n arg_9 = npr.randint(arg_3['nflares'][0],\n high=arg_3['nflares'][1])\n\n # generate random flare peak times based on the number of flares\n arg_10 = (\n npr.random(\n size=arg_9\n )*(arg_0.max() - arg_0.min()) + arg_0.min()\n )\n\n # now add the flares to the time-series\n arg_11 = {'nflares':arg_9}\n\n for arg_12, arg_13 in zip(range(arg_9), arg_10):\n\n # choose the amplitude, rise stdev and decay time constant\n arg_14 = arg_3['amplitude'].rvs(size=1)\n arg_15 = arg_3['risestdev'].rvs(size=1)\n arg_16 = arg_3['decayconst'].rvs(size=1)\n\n # fix the transit depth if it needs to be flipped\n if arg_8 and arg_14 < 0.0:\n arg_14 = -arg_14\n elif not arg_8 and arg_14 > 0.0:\n arg_14 = -arg_14\n\n # add this flare to the light curve\n arg_17, arg_18, arg_19, arg_20 = (\n flares.flare_model(\n [arg_14, arg_13, arg_15, arg_16],\n arg_0,\n arg_1,\n arg_2\n )\n )\n\n # update the mags\n arg_1 = arg_17\n\n # add the flare params to the modeldict\n arg_11[arg_12] = {'peaktime':arg_13,\n 'amplitude':arg_14,\n 'risestdev':arg_15,\n 'decayconst':arg_16}\n\n\n #\n # done with all flares\n #\n\n # return a dict with everything\n arg_21 = {\n 'vartype':'flare',\n 'params':arg_11,\n 'times':arg_0,\n 'mags':arg_1,\n 'errs':arg_2,\n 'varperiod':None,\n # FIXME: this is complicated because we can have multiple flares\n # figure out a good way to handle this upstream\n 'varamplitude':[arg_11[x]['amplitude']\n for x in range(arg_11['nflares'])],\n }\n\n return arg_21"} +{"_id": "doc_9196", "title": "", "text": "def Func(arg_0):\n '''\n This wraps `process_fakelc` for `make_fakelc_collection` below.\n\n Parameters\n ----------\n\n task : tuple\n This is of the form::\n\n task[0] = lcfile\n task[1] = outdir\n task[2] = magrms\n task[3] = dict with keys: {'lcformat', 'timecols', 'magcols',\n 'errcols', 'randomizeinfo'}\n\n Returns\n -------\n\n tuple\n This returns a tuple of the form::\n\n (fakelc_fpath,\n fakelc_lcdict['columns'],\n fakelc_lcdict['objectinfo'],\n fakelc_lcdict['moments'])\n '''\n\n arg_1, arg_2, arg_3 = arg_0\n\n try:\n\n arg_4 = make_fakelc(\n arg_1,\n arg_2,\n **arg_3\n )\n\n return arg_4\n\n except Exception as e:\n\n LOGEXCEPTION('could not process %s into a fakelc' % arg_1)\n return None"} +{"_id": "doc_9197", "title": "", "text": "def Func(arg_0,\n arg_1=None,\n arg_2=False):\n '''This adds variability and noise to all fake LCs in `simbasedir`.\n\n If an object is marked as variable in the `fakelcs-info`.pkl file in\n `simbasedir`, a variable signal will be added to its light curve based on\n its selected type, default period and amplitude distribution, the\n appropriate params, etc. the epochs for each variable object will be chosen\n uniformly from its time-range (and may not necessarily fall on a actual\n observed time). Nonvariable objects will only have noise added as determined\n by their params, but no variable signal will be added.\n\n Parameters\n ----------\n\n simbasedir : str\n The directory containing the fake LCs to process.\n\n override_paramdists : dict\n This can be used to override the stored variable parameters in each fake\n LC. It should be a dict of the following form::\n\n {'': {': a scipy.stats distribution function or\n the np.random.randint function,\n .\n .\n .\n ': a scipy.stats distribution function\n or the np.random.randint function}\n\n for any vartype in VARTYPE_LCGEN_MAP. These are used to override the\n default parameter distributions for each variable type.\n\n overwrite_existingvar : bool\n If this is True, then will overwrite any existing variability in the\n input fake LCs in `simbasedir`.\n\n Returns\n -------\n\n dict\n This returns a dict containing the fake LC filenames as keys and\n variability info for each as values.\n\n '''\n\n # open the fakelcs-info.pkl\n arg_3 = os.path.join(arg_0,'fakelcs-info.pkl')\n with open(arg_3, 'rb') as infd:\n arg_4 = pickle.load(infd)\n\n\n arg_5 = arg_4['lcfpath']\n arg_6 = arg_4['isvariable']\n arg_7 = arg_4['vartype']\n\n arg_8 = 0\n\n arg_9 = {}\n\n # go through all the LCs and add the required type of variability\n for arg_10, arg_11, arg_12 in zip(arg_5, arg_6, range(len(arg_5))):\n\n # if this object is variable, add variability\n if arg_11:\n\n arg_13 = arg_7[arg_8]\n\n if (arg_1 and\n isinstance(arg_1, dict) and\n arg_13 in arg_1 and\n isinstance(arg_1[arg_13], dict)):\n\n arg_14 = arg_1[arg_13]\n else:\n arg_14 = None\n\n\n arg_15 = add_fakelc_variability(\n arg_10, arg_13,\n arg_1=arg_14,\n overwrite=arg_2\n )\n arg_9[arg_15['objectid']] = {'params': arg_15['actual_varparams'],\n 'vartype': arg_15['actual_vartype']}\n\n # update vartind\n arg_8 = arg_8 + 1\n\n else:\n\n arg_15 = add_fakelc_variability(\n arg_10, None,\n overwrite=arg_2\n )\n arg_9[arg_15['objectid']] = {'params': arg_15['actual_varparams'],\n 'vartype': arg_15['actual_vartype']}\n\n\n #\n # done with all objects\n #\n\n # write the varinfo back to the dict and fakelcs-info.pkl\n arg_4['varinfo'] = arg_9\n\n arg_16 = '%s.%s' % (arg_3, md5(npr.bytes(4)).hexdigest()[-8:])\n with open(arg_16, 'wb') as outfd:\n pickle.dump(arg_4, outfd, pickle.HIGHEST_PROTOCOL)\n\n if os.path.exists(arg_16):\n shutil.copy(arg_16, arg_3)\n os.remove(arg_16)\n else:\n LOGEXCEPTION('could not write output light curve file to dir: %s' %\n os.path.dirname(arg_16))\n # fail here\n raise\n\n return arg_4"} +{"_id": "doc_9198", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=97,\n arg_4=4.0,\n arg_5=1,\n arg_6=3,\n arg_7=False,\n arg_8=2,\n **arg_9):\n '''This finds flares in time series using the method in Walkowicz+ 2011.\n\n FIXME: finish this.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input time-series to find flares in.\n\n smoothbinsize : int\n The number of consecutive light curve points to smooth over in the time\n series using a Savitsky-Golay filter. The smoothed light curve is then\n subtracted from the actual light curve to remove trends that potentially\n last `smoothbinsize` light curve points. The default value is chosen as\n ~6.5 hours (97 x 4 minute cadence for HATNet/HATSouth).\n\n flare_minsigma : float\n The minimum sigma above the median LC level to designate points as\n belonging to possible flares.\n\n flare_maxcadencediff : int\n The maximum number of light curve points apart each possible flare event\n measurement is allowed to be. If this is 1, then we'll look for\n consecutive measurements.\n\n flare_mincadencepoints : int\n The minimum number of light curve points (each `flare_maxcadencediff`\n points apart) required that are at least `flare_minsigma` above the\n median light curve level to call an event a flare.\n\n magsarefluxes: bool\n If True, indicates that mags is actually an array of fluxes.\n\n savgol_polyorder: int\n The polynomial order of the function used by the Savitsky-Golay filter.\n\n savgol_kwargs : extra kwargs\n Any remaining keyword arguments are passed directly to the\n `savgol_filter` function from `scipy.signal`.\n\n Returns\n -------\n\n (nflares, flare_indices) : tuple\n Returns the total number of flares found and their time-indices (start,\n end) as tuples.\n\n '''\n\n # if no errs are given, assume 0.1% errors\n if arg_2 is None:\n arg_2 = 0.001*arg_1\n\n # get rid of nans first\n arg_10 = np.isfinite(arg_0) & np.isfinite(arg_1) & np.isfinite(arg_2)\n arg_11 = arg_0[arg_10]\n arg_12 = arg_1[arg_10]\n arg_13 = arg_2[arg_10]\n\n # now get the smoothed mag series using the filter\n # kwargs are provided to the savgol_filter function\n arg_14 = savgol_filter(arg_12,\n arg_3,\n arg_8,\n **arg_9)\n arg_15 = arg_12 - arg_14\n\n # calculate some stats\n # the series_median is ~zero after subtraction\n arg_16 = np.median(np.abs(arg_15))\n arg_17 = 1.483*arg_16\n\n # find extreme positive deviations\n if arg_7:\n arg_18 = np.where(arg_15 > (arg_4*arg_17))\n else:\n arg_18 = np.where(arg_15 < (-arg_4*arg_17))\n\n # see if there are any extrema\n if arg_18 and arg_18[0]:\n\n arg_19 = arg_18[0]\n arg_20 = []\n\n # find the deviations within the requested flaremaxcadencediff\n for arg_21, arg_22 in enumerate(arg_19):\n # FIXME: finish this\n pass"} +{"_id": "doc_9199", "title": "", "text": "def Func(arg_0, arg_1, arg_2=20, arg_3=1):\n '''This calculates the relative peak heights for first npeaks in ACF.\n\n Usually, the first peak or the second peak (if its peak height > first peak)\n corresponds to the correct lag. When we know the correct lag, the period is\n then::\n\n bestperiod = time[lags == bestlag] - time[0]\n\n Parameters\n ----------\n\n lags : np.array\n An array of lags that the ACF is calculated at.\n\n acf : np.array\n The array containing the ACF values.\n\n npeaks : int\n THe maximum number of peaks to consider when finding peak heights.\n\n searchinterval : int\n From `scipy.signal.argrelmax`: \"How many points on each side to use for\n the comparison to consider comparator(n, n+x) to be True.\" This\n effectively sets how many points on each of the current peak will be\n used to check if the current peak is the local maximum.\n\n Returns\n -------\n\n dict\n This returns a dict of the following form::\n\n {'maxinds':the indices of the lag array where maxes are,\n 'maxacfs':the ACF values at each max,\n 'maxlags':the lag values at each max,\n 'mininds':the indices of the lag array where mins are,\n 'minacfs':the ACF values at each min,\n 'minlags':the lag values at each min,\n 'relpeakheights':the relative peak heights of each rel. ACF peak,\n 'relpeaklags':the lags at each rel. ACF peak found,\n 'peakindices':the indices of arrays where each rel. ACF peak is,\n 'bestlag':the lag value with the largest rel. ACF peak height,\n 'bestpeakheight':the largest rel. ACF peak height,\n 'bestpeakindex':the largest rel. ACF peak's number in all peaks}\n\n '''\n\n arg_4 = argrelmax(arg_1, order=arg_3)[0]\n arg_5 = arg_1[arg_4]\n arg_6 = arg_0[arg_4]\n arg_7 = argrelmin(arg_1, order=arg_3)[0]\n arg_8 = arg_1[arg_7]\n arg_9 = arg_0[arg_7]\n\n arg_10 = npzeros(arg_2)\n arg_11 = npzeros(arg_2,dtype=npint64)\n arg_12 = npzeros(arg_2,dtype=npint64)\n\n for arg_13, arg_14 in enumerate(arg_4[:arg_2]):\n\n # check if there are no mins to the left\n # throw away this peak because it's probably spurious\n # (FIXME: is this OK?)\n if npall(arg_14 < arg_7):\n continue\n\n arg_15 = arg_7[arg_7 < arg_14][-1] # the last index to the left\n arg_16 = arg_7[arg_7 > arg_14][0] # the first index to the right\n arg_10[arg_13] = (\n arg_1[arg_14] - (arg_1[arg_15] + arg_1[arg_16])/2.0\n )\n arg_11[arg_13] = arg_0[arg_14]\n arg_12[arg_13] = arg_13\n\n # figure out the bestperiod if possible\n if arg_10[0] > arg_10[1]:\n arg_17 = arg_11[0]\n arg_18 = arg_10[0]\n arg_19 = arg_12[0]\n else:\n arg_17 = arg_11[1]\n arg_18 = arg_10[1]\n arg_19 = arg_12[1]\n\n return {'maxinds':arg_4,\n 'maxacfs':arg_5,\n 'maxlags':arg_6,\n 'mininds':arg_7,\n 'minacfs':arg_8,\n 'minlags':arg_9,\n 'relpeakheights':arg_10,\n 'relpeaklags':arg_11,\n 'peakindices':arg_12,\n 'bestlag':arg_17,\n 'bestpeakheight':arg_18,\n 'bestpeakindex':arg_19}"} +{"_id": "doc_9200", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''\n This is yet another alternative to calculate the autocorrelation.\n\n Taken from: `Bayesian Methods for Hackers by Cameron Pilon `_\n\n (This should be the fastest method to calculate ACFs.)\n\n Parameters\n ----------\n\n mags : np.array\n This is the magnitudes array. MUST NOT have any nans.\n\n lag : float\n The specific lag value to calculate the auto-correlation for. This MUST\n be less than total number of observations in `mags`.\n\n maglen : int\n The number of elements in the `mags` array.\n\n magmed : float\n The median of the `mags` array.\n\n magstd : float\n The standard deviation of the `mags` array.\n\n Returns\n -------\n\n float\n The auto-correlation at this specific `lag` value.\n\n '''\n\n # from http://tinyurl.com/afz57c4\n arg_5 = npcorrelate(arg_0, arg_0, mode='full')\n arg_5 = arg_5 / npmax(arg_5)\n\n return arg_5[int(arg_5.size / 2):]"} +{"_id": "doc_9201", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3=1000,\n arg_4=arg_5,\n arg_6=0.0,\n arg_7=11,\n arg_8=None,\n arg_9=3.0,\n arg_10=False,\n arg_11=True):\n '''This calculates the ACF of a light curve.\n\n This will pre-process the light curve to fill in all the gaps and normalize\n everything to zero. If `fillgaps = 'noiselevel'`, fills the gaps with the\n noise level obtained via the procedure above. If `fillgaps = 'nan'`, fills\n the gaps with `np.nan`.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The measurement time-series and associated errors.\n\n maxlags : int\n The maximum number of lags to calculate.\n\n func : Python function\n This is a function to calculate the lags.\n\n fillgaps : 'noiselevel' or float\n This sets what to use to fill in gaps in the time series. If this is\n 'noiselevel', will smooth the light curve using a point window size of\n `filterwindow` (this should be an odd integer), subtract the smoothed LC\n from the actual LC and estimate the RMS. This RMS will be used to fill\n in the gaps. Other useful values here are 0.0, and npnan.\n\n filterwindow : int\n The light curve's smoothing filter window size to use if\n `fillgaps='noiselevel`'.\n\n forcetimebin : None or float\n This is used to force a particular cadence in the light curve other than\n the automatically determined cadence. This effectively rebins the light\n curve to this cadence. This should be in the same time units as `times`.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If your input measurements in `mags` are actually fluxes instead of\n mags, set this is True.\n\n verbose : bool\n If True, will indicate progress and report errors.\n\n Returns\n -------\n\n dict\n A dict of the following form is returned::\n\n {'itimes': the interpolated time values after gap-filling,\n 'imags': the interpolated mag/flux values after gap-filling,\n 'ierrs': the interpolated mag/flux values after gap-filling,\n 'cadence': the cadence of the output mag/flux time-series,\n 'minitime': the minimum value of the interpolated times array,\n 'lags': the lags used to calculate the auto-correlation function,\n 'acf': the value of the ACF at each lag used}\n\n '''\n\n # get the gap-filled timeseries\n arg_12 = fill_magseries_gaps(arg_0, arg_1, arg_2,\n arg_6=arg_6,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_7=arg_7,\n arg_11=arg_11)\n\n if not arg_12:\n print('failed to interpolate light curve to minimum cadence!')\n return None\n\n arg_13, arg_14 = arg_12['itimes'], arg_12['imags'],\n\n # calculate the lags up to maxlags\n if arg_3:\n arg_15 = nparange(0, arg_3)\n else:\n arg_15 = nparange(arg_13.size)\n\n arg_16 = 1.483*npmedian(npabs(arg_14))\n\n if arg_4 != arg_5:\n\n # get the autocorrelation as a function of the lag of the mag series\n arg_17 = nparray([arg_4(arg_14, x, arg_14.size, 0.0, arg_16)\n for x in arg_15])\n\n # this doesn't need a lags array\n else:\n\n arg_17 = arg_5(arg_14, arg_15[0], arg_14.size,\n 0.0, arg_16)\n # return only the maximum number of lags\n if arg_3 is not None:\n arg_17 = arg_17[:arg_3]\n\n arg_12.update({'minitime':arg_13.min(),\n 'lags':arg_15,\n 'acf':arg_17})\n\n return arg_12"} +{"_id": "doc_9202", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4, arg_5):\n '''This calculates the harmonic AoV theta statistic for a frequency.\n\n This is a mostly faithful translation of the inner loop in `aovper.f90`. See\n the following for details:\n\n - http://users.camk.edu.pl/alex/\n - Schwarzenberg-Czerny (`1996\n `_)\n\n Schwarzenberg-Czerny (1996) equation 11::\n\n theta_prefactor = (K - 2N - 1)/(2N)\n theta_top = sum(c_n*c_n) (from n=0 to n=2N)\n theta_bot = variance(timeseries) - sum(c_n*c_n) (from n=0 to n=2N)\n\n theta = theta_prefactor * (theta_top/theta_bot)\n\n N = number of harmonics (nharmonics)\n K = length of time series (times.size)\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input time-series to calculate the test statistic for. These should\n all be of nans/infs and be normalized to zero.\n\n frequency : float\n The test frequency to calculate the statistic for.\n\n nharmonics : int\n The number of harmonics to calculate up to.The recommended range is 4 to\n 8.\n\n magvariance : float\n This is the (weighted by errors) variance of the magnitude time\n series. We provide it as a pre-calculated value here so we don't have to\n re-calculate it for every worker.\n\n Returns\n -------\n\n aov_harmonic_theta : float\n THe value of the harmonic AoV theta for the specified test `frequency`.\n\n '''\n\n arg_6 = 1.0/arg_3\n\n arg_7 = arg_0.size\n arg_8 = arg_4 + arg_4\n\n # phase with test period\n arg_9 = phase_magseries_with_errs(\n arg_0, arg_1, arg_2, arg_6, arg_0[0],\n sort=True, wrap=False\n )\n\n # get the phased quantities\n arg_10 = arg_9['phase']\n arg_11 = arg_9['mags']\n arg_12 = arg_9['errs']\n\n # this is sqrt(1.0/errs^2) -> the weights\n arg_13 = 1.0/arg_12\n\n # multiply by 2.0*PI (for omega*time)\n arg_10 = arg_10 * 2.0 * pi_value\n\n # this is the z complex vector\n arg_14 = npcos(arg_10) + 1.0j*npsin(arg_10)\n\n # multiply phase with N\n arg_10 = arg_4 * arg_10\n\n # this is the psi complex vector\n arg_15 = arg_11 * arg_13 * (npcos(arg_10) + 1j*npsin(arg_10))\n\n # this is the initial value of z^n\n arg_16 = 1.0 + 0.0j\n\n # this is the initial value of phi\n arg_17 = arg_13 + 0.0j\n\n # initialize theta to zero\n arg_18 = 0.0\n\n # go through all the harmonics now up to 2N\n for arg_19 in range(arg_8):\n\n # this is \n arg_20 = npsum(arg_17 * arg_17.conjugate())\n\n # this is the alpha_n numerator\n arg_21 = npsum(arg_13 * arg_14 * arg_17)\n\n # this is . make sure to use npvdot and NOT npdot to get\n # complex conjugate of first vector as expected for complex vectors\n arg_22 = npvdot(arg_17, arg_15)\n\n # make sure phi_dot_phi is not zero\n arg_20 = npmax([arg_20, 10.0e-9])\n\n # this is the expression for alpha_n\n arg_21 = arg_21 / arg_20\n\n # update theta_aov for this harmonic\n arg_18 = (arg_18 +\n npabs(arg_22) * npabs(arg_22) / arg_20)\n\n # use the recurrence relation to find the next phi\n arg_17 = arg_17 * arg_14 - arg_21 * arg_16 * arg_17.conjugate()\n\n # update z^n\n arg_16 = arg_16 * arg_14\n\n\n # done with all harmonics, calculate the theta_aov for this freq\n # the max below makes sure that magvariance - theta_aov > zero\n arg_18 = ( (arg_7 - arg_8 - 1.0) * arg_18 /\n (arg_8 * npmax([arg_5 - arg_18,\n 1.0e-9])) )\n\n return arg_18"} +{"_id": "doc_9203", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4):\n '''This Funcs a new database connection.\n\n Parameters\n ----------\n\n database : str\n Name of the database to connect to.\n\n user : str\n User name of the database server user.\n\n password : str\n Password for the database server user.\n\n host : str\n Database hostname or IP address to connect to.\n\n '''\n\n try:\n\n arg_0.connection = pg.connect(arg_2=arg_2,\n arg_3=arg_3,\n arg_1=arg_1,\n arg_4=arg_4)\n\n LOGINFO('postgres connection successfully '\n 'created, using DB %s, user %s' % (arg_1,\n arg_2))\n\n arg_0.database = arg_1\n arg_0.user = arg_2\n\n except Exception as e:\n\n LOGEXCEPTION('postgres connection failed, '\n 'using DB %s, user %s' % (arg_1,\n arg_2))\n\n arg_0.database = None\n arg_0.user = None"} +{"_id": "doc_9204", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=2.0,\n arg_3=True,\n arg_4=None):\n '''This xmatches external catalogs to a collection of checkplots.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the list of checkplot pickle files to process.\n\n xmatchpkl : str\n The filename of a pickle prepared beforehand with the\n `checkplot.pkl_xmatch.load_xmatch_external_catalogs` function,\n containing collected external catalogs to cross-match the objects in the\n input `cplist` against.\n\n xmatchradiusarcsec : float\n The match radius to use for the cross-match in arcseconds.\n\n updateexisting : bool\n If this is True, will only update the `xmatch` dict in each checkplot\n pickle with any new cross-matches to the external catalogs. If False,\n will overwrite the `xmatch` dict with results from the current run.\n\n resultstodir : str or None\n If this is provided, then it must be a directory to write the resulting\n checkplots to after xmatch is done. This can be used to keep the\n original checkplots in pristine condition for some reason.\n\n Returns\n -------\n\n dict\n Returns a dict with keys = input checkplot pickle filenames and vals =\n xmatch status dict for each checkplot pickle.\n\n '''\n\n # load the external catalog\n with open(arg_1,'rb') as infd:\n arg_5 = pickle.load(infd)\n\n # match each object. this is fairly fast, so this is not parallelized at the\n # moment\n\n arg_6 = {}\n\n for arg_7 in arg_0:\n\n arg_8 = _read_checkplot_picklefile(arg_7)\n\n try:\n\n # match in place\n xmatch_external_catalogs(arg_8, arg_5,\n arg_2=arg_2,\n updatexmatch=arg_3)\n\n for arg_9 in arg_8['xmatch']:\n\n if arg_8['xmatch'][arg_9]['found']:\n LOGINFO('checkplot %s: %s matched to %s, '\n 'match dist: %s arcsec' %\n (os.path.basename(arg_7),\n arg_8['objectid'],\n arg_8['xmatch'][arg_9]['name'],\n arg_8['xmatch'][arg_9]['distarcsec']))\n\n if not arg_4:\n arg_10 = _write_checkplot_picklefile(arg_8,\n outfile=arg_7)\n else:\n arg_11 = os.path.join(arg_4, os.path.basename(arg_7))\n arg_10 = _write_checkplot_picklefile(arg_8,\n outfile=arg_11)\n\n arg_6[arg_7] = arg_10\n\n except Exception as e:\n\n LOGEXCEPTION('failed to match objects for %s' % arg_7)\n arg_6[arg_7] = None\n\n return arg_6"} +{"_id": "doc_9205", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='checkplot-*.pkl*',\n arg_3=2.0,\n arg_4=True,\n arg_5=None):\n '''This xmatches external catalogs to all checkplots in a directory.\n\n Parameters\n -----------\n\n cpdir : str\n This is the directory to search in for checkplots.\n\n xmatchpkl : str\n The filename of a pickle prepared beforehand with the\n `checkplot.pkl_xmatch.load_xmatch_external_catalogs` function,\n containing collected external catalogs to cross-match the objects in the\n input `cplist` against.\n\n cpfileglob : str\n This is the UNIX fileglob to use in searching for checkplots.\n\n xmatchradiusarcsec : float\n The match radius to use for the cross-match in arcseconds.\n\n updateexisting : bool\n If this is True, will only update the `xmatch` dict in each checkplot\n pickle with any new cross-matches to the external catalogs. If False,\n will overwrite the `xmatch` dict with results from the current run.\n\n resultstodir : str or None\n If this is provided, then it must be a directory to write the resulting\n checkplots to after xmatch is done. This can be used to keep the\n original checkplots in pristine condition for some reason.\n\n Returns\n -------\n\n dict\n Returns a dict with keys = input checkplot pickle filenames and vals =\n xmatch status dict for each checkplot pickle.\n\n '''\n\n arg_6 = glob.glob(os.path.join(arg_0, arg_2))\n\n return xmatch_cplist_external_catalogs(\n arg_6,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5\n )"} +{"_id": "doc_9206", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=['gaiamag','sdssg'],\n arg_3=['kmag','kmag'],\n arg_4=['gaia_absmag','rpmj']):\n '''This makes color-mag diagrams for all checkplot pickles in the provided\n list.\n\n Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis\n mags to use.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the list of checkplot pickles to process.\n\n outpkl : str\n The filename of the output pickle that will contain the color-mag\n information for all objects in the checkplots specified in `cplist`.\n\n color_mag1 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_1 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n color_mag2 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_2 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n yaxis_mag : list of str\n This is a list of the keys in each checkplot's `objectinfo` dict that\n will be used as the (absolute) magnitude y-axis of the color-mag\n diagrams.\n\n Returns\n -------\n\n str\n The path to the generated CMD pickle file for the collection of objects\n in the input checkplot list.\n\n Notes\n -----\n\n This can make many CMDs in one go. For example, the default kwargs for\n `color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and\n written to the output pickle file:\n\n - CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis\n - CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis\n\n '''\n\n # first, we'll collect all of the info\n arg_5 = []\n arg_6 = []\n arg_7 = []\n\n for arg_8 in arg_0:\n\n arg_9 = _read_checkplot_picklefile(arg_8)\n arg_5.append(arg_9['objectid'])\n\n arg_10 = []\n arg_11 = []\n\n for arg_12, arg_13, arg_14 in zip(arg_2, arg_3, arg_4):\n\n if (arg_14 in arg_9['objectinfo'] and\n arg_9['objectinfo'][arg_14] is not None):\n arg_10.append(arg_9['objectinfo'][arg_14])\n else:\n arg_10.append(np.nan)\n\n if (arg_12 in arg_9['objectinfo'] and\n arg_9['objectinfo'][arg_12] is not None and\n arg_13 in arg_9['objectinfo'] and\n arg_9['objectinfo'][arg_13] is not None):\n arg_11.append(arg_9['objectinfo'][arg_12] -\n arg_9['objectinfo'][arg_13])\n else:\n arg_11.append(np.nan)\n\n arg_6.append(arg_10)\n arg_7.append(arg_11)\n\n\n # convert these to arrays\n arg_5 = np.array(arg_5)\n arg_6 = np.array(arg_6)\n arg_7 = np.array(arg_7)\n\n # prepare the outdict\n arg_15 = {'objectids':arg_5,\n 'mags':arg_6,\n 'colors':arg_7,\n 'color_mag1':arg_2,\n 'color_mag2':arg_3,\n 'yaxis_mag':arg_4}\n\n # save the pickled figure and dict for fast retrieval later\n with open(arg_1,'wb') as outfd:\n pickle.dump(arg_15, outfd, pickle.HIGHEST_PROTOCOL)\n\n plt.close('all')\n\n return arg_15"} +{"_id": "doc_9207", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2='checkplot*.pkl*',\n arg_3=['gaiamag','sdssg'],\n arg_4=['kmag','kmag'],\n arg_5=['gaia_absmag','rpmj']\n):\n '''This makes CMDs for all checkplot pickles in the provided directory.\n\n Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis\n mags to use.\n\n Parameters\n ----------\n\n cpdir : list of str\n This is the directory to get the list of input checkplot pickles from.\n\n outpkl : str\n The filename of the output pickle that will contain the color-mag\n information for all objects in the checkplots specified in `cplist`.\n\n cpfileglob : str\n The UNIX fileglob to use to search for checkplot pickle files.\n\n color_mag1 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_1 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n color_mag2 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_2 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n yaxis_mag : list of str\n This is a list of the keys in each checkplot's `objectinfo` dict that\n will be used as the (absolute) magnitude y-axis of the color-mag\n diagrams.\n\n Returns\n -------\n\n str\n The path to the generated CMD pickle file for the collection of objects\n in the input checkplot directory.\n\n Notes\n -----\n\n This can make many CMDs in one go. For example, the default kwargs for\n `color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and\n written to the output pickle file:\n\n - CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis\n - CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis\n\n '''\n\n arg_6 = glob.glob(os.path.join(arg_0, arg_2))\n\n return colormagdiagram_cplist(arg_6,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5)"} +{"_id": "doc_9208", "title": "", "text": "def Func(arg_0, arg_1,\n arg_2=True,\n arg_3=False):\n '''This adds CMDs for each object in cplist.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the input list of checkplot pickles to add the CMDs to.\n\n cmdpkl : str\n This is the filename of the CMD pickle created previously.\n\n require_cmd_magcolor : bool\n If this is True, a CMD plot will not be made if the color and mag keys\n required by the CMD are not present or are nan in each checkplot's\n objectinfo dict.\n\n save_cmd_pngs : bool\n If this is True, then will save the CMD plots that were generated and\n added back to the checkplotdict as PNGs to the same directory as\n `cpx`.\n\n Returns\n -------\n\n Nothing.\n\n\n '''\n\n # load the CMD first to save on IO\n with open(arg_1,'rb') as infd:\n arg_4 = pickle.load(infd)\n\n for arg_5 in arg_0:\n\n add_cmd_to_checkplot(arg_5, arg_4,\n arg_2=arg_2,\n arg_3=arg_3)"} +{"_id": "doc_9209", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='checkplot*.pkl*',\n arg_3=True,\n arg_4=False):\n '''This adds CMDs for each object in cpdir.\n\n Parameters\n ----------\n\n cpdir : list of str\n This is the directory to search for checkplot pickles.\n\n cmdpkl : str\n This is the filename of the CMD pickle created previously.\n\n cpfileglob : str\n The UNIX fileglob to use when searching for checkplot pickles to operate\n on.\n\n require_cmd_magcolor : bool\n If this is True, a CMD plot will not be made if the color and mag keys\n required by the CMD are not present or are nan in each checkplot's\n objectinfo dict.\n\n save_cmd_pngs : bool\n If this is True, then will save the CMD plots that were generated and\n added back to the checkplotdict as PNGs to the same directory as\n `cpx`.\n\n Returns\n -------\n\n Nothing.\n\n '''\n\n arg_5 = glob.glob(os.path.join(arg_0, arg_2))\n\n return add_cmds_cplist(arg_5,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4)"} +{"_id": "doc_9210", "title": "", "text": "def Func(\n arg_0,\n arg_1=None,\n arg_2=None,\n arg_3=arg_4,\n arg_5=False,\n arg_6='gray_r',\n arg_7=None,\n arg_8=True,\n arg_9=None,\n arg_10=10.0,\n arg_11=3,\n arg_12=180.0,\n arg_13=None,\n arg_14=True,\n arg_15=None,\n arg_16=60.0,\n arg_17=5,\n arg_18=100,\n arg_19='~/.astrobase/stamp-cache',\n arg_20=True\n):\n '''\n This updates objectinfo for a list of checkplots.\n\n Useful in cases where a previous round of GAIA/finderchart/external catalog\n acquisition failed. This will preserve the following keys in the checkplots\n if they exist:\n\n comments\n varinfo\n objectinfo.objecttags\n\n Parameters\n ----------\n\n cplist : list of str\n A list of checkplot pickle file names to update.\n\n liststartindex : int\n The index of the input list to start working at.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input checkplot pickles over several sessions or machines.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n update process.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond. See the docstring for\n `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this\n works. If this is True, will run in \"fast\" mode with default timeouts (5\n seconds in most cases). If this is a float, will run in \"fast\" mode with\n the provided timeout value in seconds.\n\n findercmap : str or matplotlib.cm.Colormap object\n\n findercmap : str or matplotlib.cm.ColorMap object\n The Colormap object to use for the finder chart image.\n\n finderconvolve : astropy.convolution.Kernel object or None\n If not None, the Kernel object to use for convolving the finder image.\n\n deredden_objects : bool\n If this is True, will use the 2MASS DUST service to get extinction\n coefficients in various bands, and then try to deredden the magnitudes\n and colors of the object already present in the checkplot's objectinfo\n dict.\n\n custom_bandpasses : dict\n This is a dict used to provide custom bandpass definitions for any\n magnitude measurements in the objectinfo dict that are not automatically\n recognized by the `varclass.starfeatures.color_features` function. See\n its docstring for details on the required format.\n\n gaia_submit_timeout : float\n Sets the timeout in seconds to use when submitting a request to look up\n the object's information to the GAIA service. Note that if `fast_mode`\n is set, this is ignored.\n\n gaia_submit_tries : int\n Sets the maximum number of times the GAIA services will be contacted to\n obtain this object's information. If `fast_mode` is set, this is\n ignored, and the services will be contacted only once (meaning that a\n failure to respond will be silently ignored and no GAIA data will be\n added to the checkplot's objectinfo dict).\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n complete_query_later : bool\n If this is True, saves the state of GAIA queries that are not yet\n complete when `gaia_max_timeout` is reached while waiting for the GAIA\n service to respond to our request. A later call for GAIA info on the\n same object will attempt to pick up the results from the existing query\n if it's completed. If `fast_mode` is True, this is ignored.\n\n lclistpkl : dict or str\n If this is provided, must be a dict resulting from reading a catalog\n produced by the `lcproc.catalogs.make_lclist` function or a str path\n pointing to the pickle file produced by that function. This catalog is\n used to find neighbors of the current object in the current light curve\n collection. Looking at neighbors of the object within the radius\n specified by `nbrradiusarcsec` is useful for light curves produced by\n instruments that have a large pixel scale, so are susceptible to\n blending of variability and potential confusion of neighbor variability\n with that of the actual object being looked at. If this is None, no\n neighbor lookups will be performed.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n plotdpi : int\n The resolution in DPI of the plots to generate in this function\n (e.g. the finder chart, etc.)\n\n findercachedir : str\n The path to the astrobase cache directory for finder chart downloads\n from the NASA SkyView service.\n\n verbose : bool\n If True, will indicate progress and warn about potential problems.\n\n Returns\n -------\n\n list of str\n Paths to the updated checkplot pickle file.\n\n '''\n\n # work around the Darwin segfault after fork if no network activity in\n # main thread bug: https://bugs.python.org/issue30385#msg293958\n if sys.platform == 'darwin':\n import requests\n requests.get('http://captive.apple.com/hotspot-detect.html')\n\n # handle the start and end indices\n if (arg_1 is not None) and (arg_2 is None):\n arg_0 = arg_0[arg_1:]\n\n elif (arg_1 is None) and (arg_2 is not None):\n arg_0 = arg_0[:arg_2]\n\n elif (arg_1 is not None) and (arg_2 is not None):\n arg_0 = (\n arg_0[arg_1:arg_1+arg_2]\n )\n\n arg_21 = [(x, {'fast_mode':arg_5,\n 'findercmap':arg_6,\n 'finderconvolve':arg_7,\n 'deredden_object':arg_8,\n 'custom_bandpasses':arg_9,\n 'gaia_submit_timeout':arg_10,\n 'gaia_submit_tries':arg_11,\n 'gaia_max_timeout':arg_12,\n 'gaia_mirror':arg_13,\n 'complete_query_later':arg_14,\n 'lclistpkl':arg_15,\n 'nbrradiusarcsec':arg_16,\n 'maxnumneighbors':arg_17,\n 'plotdpi':arg_18,\n 'findercachedir':arg_19,\n 'verbose':arg_20}) for x in arg_0]\n\n arg_22 = []\n arg_23 = []\n\n with ProcessPoolExecutor(max_workers=arg_3) as executor:\n arg_22 = executor.map(cp_objectinfo_worker, arg_21)\n\n arg_23 = [x for x in arg_22]\n\n executor.shutdown()\n return arg_23"} +{"_id": "doc_9211", "title": "", "text": "def Func(arg_0,\n arg_1='checkplot-*.pkl*',\n arg_2=None,\n arg_3=None,\n arg_4=arg_5,\n arg_6=False,\n arg_7='gray_r',\n arg_8=None,\n arg_9=True,\n arg_10=None,\n arg_11=10.0,\n arg_12=3,\n arg_13=180.0,\n arg_14=None,\n arg_15=True,\n arg_16=None,\n arg_17=60.0,\n arg_18=5,\n arg_19=100,\n arg_20='~/.astrobase/stamp-cache',\n arg_21=True):\n '''This updates the objectinfo for a directory of checkplot pickles.\n\n Useful in cases where a previous round of GAIA/finderchart/external catalog\n acquisition failed. This will preserve the following keys in the checkplots\n if they exist:\n\n comments\n varinfo\n objectinfo.objecttags\n\n Parameters\n ----------\n\n cpdir : str\n The directory to look for checkplot pickles in.\n\n cpglob : str\n The UNIX fileglob to use when searching for checkplot pickle files.\n\n liststartindex : int\n The index of the input list to start working at.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input checkplot pickles over several sessions or machines.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n update process.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond. See the docstring for\n `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this\n works. If this is True, will run in \"fast\" mode with default timeouts (5\n seconds in most cases). If this is a float, will run in \"fast\" mode with\n the provided timeout value in seconds.\n\n findercmap : str or matplotlib.cm.Colormap object\n\n findercmap : str or matplotlib.cm.ColorMap object\n The Colormap object to use for the finder chart image.\n\n finderconvolve : astropy.convolution.Kernel object or None\n If not None, the Kernel object to use for convolving the finder image.\n\n deredden_objects : bool\n If this is True, will use the 2MASS DUST service to get extinction\n coefficients in various bands, and then try to deredden the magnitudes\n and colors of the object already present in the checkplot's objectinfo\n dict.\n\n custom_bandpasses : dict\n This is a dict used to provide custom bandpass definitions for any\n magnitude measurements in the objectinfo dict that are not automatically\n recognized by the `varclass.starfeatures.color_features` function. See\n its docstring for details on the required format.\n\n gaia_submit_timeout : float\n Sets the timeout in seconds to use when submitting a request to look up\n the object's information to the GAIA service. Note that if `fast_mode`\n is set, this is ignored.\n\n gaia_submit_tries : int\n Sets the maximum number of times the GAIA services will be contacted to\n obtain this object's information. If `fast_mode` is set, this is\n ignored, and the services will be contacted only once (meaning that a\n failure to respond will be silently ignored and no GAIA data will be\n added to the checkplot's objectinfo dict).\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n complete_query_later : bool\n If this is True, saves the state of GAIA queries that are not yet\n complete when `gaia_max_timeout` is reached while waiting for the GAIA\n service to respond to our request. A later call for GAIA info on the\n same object will attempt to pick up the results from the existing query\n if it's completed. If `fast_mode` is True, this is ignored.\n\n lclistpkl : dict or str\n If this is provided, must be a dict resulting from reading a catalog\n produced by the `lcproc.catalogs.make_lclist` function or a str path\n pointing to the pickle file produced by that function. This catalog is\n used to find neighbors of the current object in the current light curve\n collection. Looking at neighbors of the object within the radius\n specified by `nbrradiusarcsec` is useful for light curves produced by\n instruments that have a large pixel scale, so are susceptible to\n blending of variability and potential confusion of neighbor variability\n with that of the actual object being looked at. If this is None, no\n neighbor lookups will be performed.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n plotdpi : int\n The resolution in DPI of the plots to generate in this function\n (e.g. the finder chart, etc.)\n\n findercachedir : str\n The path to the astrobase cache directory for finder chart downloads\n from the NASA SkyView service.\n\n verbose : bool\n If True, will indicate progress and warn about potential problems.\n\n Returns\n -------\n\n list of str\n Paths to the updated checkplot pickle file.\n\n '''\n\n arg_22 = sorted(glob.glob(os.path.join(arg_0, arg_1)))\n\n return parallel_update_objectinfo_cplist(\n arg_22,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14,\n arg_15=arg_15,\n arg_16=arg_16,\n arg_17=arg_17,\n arg_18=arg_18,\n arg_19=arg_19,\n arg_20=arg_20,\n arg_21=arg_21\n )"} +{"_id": "doc_9212", "title": "", "text": "def Func(arg_0):\n '''This gets the required keys from the requested file.\n\n Parameters\n ----------\n\n task : tuple\n Task is a two element tuple::\n\n - task[0] is the dict to work on\n\n - task[1] is a list of lists of str indicating all the key address to\n extract items from the dict for\n\n Returns\n -------\n\n list\n This is a list of all of the items at the requested key addresses.\n\n '''\n arg_1, arg_2 = arg_0\n\n arg_3 = _read_checkplot_picklefile(arg_1)\n\n arg_4 = []\n\n for arg_5 in arg_2:\n\n try:\n arg_4.append(_dict_get(arg_3, arg_5))\n except Exception as e:\n arg_4.append(np.nan)\n\n return arg_4"} +{"_id": "doc_9213", "title": "", "text": "def Func(arg_0,\n arg_1, arg_2, arg_3,\n arg_4, arg_5, arg_6):\n '''This is a double inverted gaussian.\n\n Parameters\n ----------\n\n x : np.array\n The items at which the Gaussian is evaluated.\n\n amp1,amp2 : float\n The amplitude of Gaussian 1 and Gaussian 2.\n\n loc1,loc2 : float\n The central value of Gaussian 1 and Gaussian 2.\n\n std1,std2 : float\n The standard deviation of Gaussian 1 and Gaussian 2.\n\n Returns\n -------\n\n np.array\n Returns a double inverted Gaussian function evaluated at the items in\n `x`, using the provided parameters of `amp`, `loc`, and `std` for two\n component Gaussians 1 and 2.\n\n '''\n\n arg_7 = -_gaussian(arg_0,arg_1,arg_2,arg_3)\n arg_8 = -_gaussian(arg_0,arg_4,arg_5,arg_6)\n return arg_7 + arg_8"} +{"_id": "doc_9214", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''This returns a double eclipse shaped function.\n\n Suitable for first order modeling of eclipsing binaries.\n\n Parameters\n ----------\n\n ebparams : list of float\n This contains the parameters for the eclipsing binary::\n\n ebparams = [period (time),\n epoch (time),\n pdepth: primary eclipse depth (mags),\n pduration: primary eclipse duration (phase),\n psdepthratio: primary-secondary eclipse depth ratio,\n secondaryphase: center phase of the secondary eclipse]\n\n `period` is the period in days.\n\n `epoch` is the time of minimum in JD.\n\n `pdepth` is the depth of the primary eclipse.\n\n - for magnitudes -> pdepth should be < 0\n - for fluxes -> pdepth should be > 0\n\n `pduration` is the length of the primary eclipse in phase.\n\n `psdepthratio` is the ratio in the eclipse depths:\n `depth_secondary/depth_primary`. This is generally the same as the ratio\n of the `T_effs` of the two stars.\n\n `secondaryphase` is the phase at which the minimum of the secondary\n eclipse is located. This effectively parameterizes eccentricity.\n\n All of these will then have fitted values after the fit is done.\n\n times,mags,errs : np.array\n The input time-series of measurements and associated errors for which\n the eclipse model will be generated. The times will be used to generate\n model mags, and the input `times`, `mags`, and `errs` will be resorted\n by model phase and returned.\n\n Returns\n -------\n\n (modelmags, phase, ptimes, pmags, perrs) : tuple\n Returns the model mags and phase values. Also returns the input `times`,\n `mags`, and `errs` sorted by the model's phase.\n\n '''\n\n (arg_4, arg_5, arg_6, arg_7, arg_8, arg_9) = arg_0\n\n # generate the phases\n arg_10 = (arg_1 - arg_5)/arg_4\n arg_10 = arg_10 - np.floor(arg_10)\n\n arg_11 = np.argsort(arg_10)\n arg_12 = arg_10[arg_11]\n arg_13 = arg_1[arg_11]\n arg_14 = arg_2[arg_11]\n arg_15 = arg_3[arg_11]\n\n arg_16 = np.median(arg_14)\n arg_17 = np.full_like(arg_12, arg_16)\n\n arg_18 = -arg_6\n arg_19 = -arg_6 * arg_8\n\n arg_20 = arg_7/5.0 # we use 5-sigma as full-width -> duration\n arg_21 = arg_7/5.0 # secondary eclipse has the same duration\n\n arg_22 = arg_7/2.0\n\n\n # phase indices\n arg_23 = (\n (arg_12 >= (1.0 - arg_22)) & (arg_12 <= 1.0)\n )\n arg_24 = (\n (arg_12 >= 0.0) & (arg_12 <= arg_22)\n )\n\n arg_25 = (\n (arg_12 >= (arg_9 - arg_22)) &\n (arg_12 <= (arg_9 + arg_22))\n )\n\n # put in the eclipses\n arg_17[arg_23] = (\n arg_16 + _gaussian(arg_12[arg_23],\n arg_18,\n 1.0,\n arg_20)\n )\n arg_17[arg_24] = (\n arg_16 + _gaussian(arg_12[arg_24],\n arg_18,\n 0.0,\n arg_20)\n )\n arg_17[arg_25] = (\n arg_16 + _gaussian(arg_12[arg_25],\n arg_19,\n arg_9,\n arg_21)\n )\n\n return arg_17, arg_12, arg_13, arg_14, arg_15"} +{"_id": "doc_9215", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Converts given J, H, Ks mags to a B magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted B band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n BJHK,\n BJH, BJK, BHK,\n BJ, BH, BK)"} +{"_id": "doc_9216", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to a V magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted V band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n VJHK,\n VJH, VJK, VHK,\n VJ, VH, VK)"} +{"_id": "doc_9217", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an R magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted R band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n RJHK,\n RJH, RJK, RHK,\n RJ, RH, RK)"} +{"_id": "doc_9218", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an I magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted I band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n IJHK,\n IJH, IJK, IHK,\n IJ, IH, IK)"} +{"_id": "doc_9219", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an SDSS u magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted SDSS u band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n SDSSU_JHK,\n SDSSU_JH, SDSSU_JK, SDSSU_HK,\n SDSSU_J, SDSSU_H, SDSSU_K)"} +{"_id": "doc_9220", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an SDSS g magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted SDSS g band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n SDSSG_JHK,\n SDSSG_JH, SDSSG_JK, SDSSG_HK,\n SDSSG_J, SDSSG_H, SDSSG_K)"} +{"_id": "doc_9221", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an SDSS i magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted SDSS i band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n SDSSI_JHK,\n SDSSI_JH, SDSSI_JK, SDSSI_HK,\n SDSSI_J, SDSSI_H, SDSSI_K)"} +{"_id": "doc_9222", "title": "", "text": "def Func(arg_0,arg_1,arg_2):\n '''Converts given J, H, Ks mags to an SDSS z magnitude value.\n\n Parameters\n ----------\n\n jmag,hmag,kmag : float\n 2MASS J, H, Ks mags of the object.\n\n Returns\n -------\n\n float\n The converted SDSS z band magnitude.\n\n '''\n\n return convert_constants(arg_0,arg_1,arg_2,\n SDSSZ_JHK,\n SDSSZ_JH, SDSSZ_JK, SDSSZ_HK,\n SDSSZ_J, SDSSZ_H, SDSSZ_K)"} +{"_id": "doc_9223", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=0.05, arg_5=9):\n '''Calculates the Schwarzenberg-Czerny AoV statistic at a test frequency.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input time-series and associated errors.\n\n frequency : float\n The test frequency to calculate the theta statistic at.\n\n binsize : float\n The phase bin size to use.\n\n minbin : int\n The minimum number of items in a phase bin to consider in the\n calculation of the statistic.\n\n Returns\n -------\n\n theta_aov : float\n The value of the AoV statistic at the specified `frequency`.\n\n '''\n\n arg_6 = 1.0/arg_3\n arg_7 = arg_0[0]\n\n arg_8 = phase_magseries(arg_0,\n arg_1,\n arg_6,\n arg_7,\n wrap=False,\n sort=True)\n\n arg_9 = arg_8['phase']\n arg_10 = arg_8['mags']\n arg_11 = nparange(0.0, 1.0, arg_4)\n arg_12 = arg_9.size\n\n arg_13 = npdigitize(arg_9, arg_11)\n\n arg_14 = []\n arg_15 = []\n arg_16 = []\n arg_17 = 0\n\n arg_18 = npmedian(arg_10)\n\n for arg_19 in npunique(arg_13):\n\n arg_20 = arg_13 == arg_19\n arg_21 = arg_10[arg_20]\n\n if arg_21.size > arg_5:\n\n arg_22 = arg_21.size\n arg_23 = npmedian(arg_21)\n\n # get s1\n arg_24 = (\n arg_22 *\n (arg_23 - arg_18) *\n (arg_23 - arg_18)\n )\n\n # get s2\n arg_25 = npsum((arg_21 - arg_18) *\n (arg_21 - arg_18))\n\n arg_14.append(arg_24)\n arg_15.append(arg_25)\n arg_16.append(arg_22)\n arg_17 = arg_17 + 1\n\n\n # turn the quantities into arrays\n arg_14 = nparray(arg_14)\n arg_15 = nparray(arg_15)\n arg_16 = nparray(arg_16)\n\n # calculate s1 first\n arg_26 = npsum(arg_14)/(arg_17 - 1.0)\n\n # then calculate s2\n arg_27 = npsum(arg_15)/(arg_12 - arg_17)\n\n arg_28 = arg_26/arg_27\n\n return arg_28"} +{"_id": "doc_9224", "title": "", "text": "def Func(arg_0, arg_1, arg_2=False):\n '''This just puts all of the period-finders on a single periodogram.\n\n This will renormalize all of the periodograms so their values lie between 0\n and 1, with values lying closer to 1 being more significant. Periodograms\n that give the same best periods will have their peaks line up together.\n\n Parameters\n ----------\n\n pflist : list of dict\n This is a list of result dicts from any of the period-finders in\n periodbase. To use your own period-finders' results here, make sure the\n result dict is of the form and has at least the keys below::\n\n {'periods': np.array of all periods searched by the period-finder,\n 'lspvals': np.array of periodogram power value for each period,\n 'bestperiod': a float value that is the period with the highest\n peak in the periodogram, i.e. the most-likely actual\n period,\n 'method': a three-letter code naming the period-finder used; must\n be one of the keys in the\n `astrobase.periodbase.METHODLABELS` dict,\n 'nbestperiods': a list of the periods corresponding to periodogram\n peaks (`nbestlspvals` below) to annotate on the\n periodogram plot so they can be called out\n visually,\n 'nbestlspvals': a list of the power values associated with\n periodogram peaks to annotate on the periodogram\n plot so they can be called out visually; should be\n the same length as `nbestperiods` above,\n 'kwargs': dict of kwargs passed to your own period-finder function}\n\n outfile : str\n This is the output file to write the output to. NOTE: EPS/PS won't work\n because we use alpha transparency to better distinguish between the\n various periodograms.\n\n addmethods : bool\n If this is True, will add all of the normalized periodograms together,\n then renormalize them to between 0 and 1. In this way, if all of the\n period-finders agree on something, it'll stand out easily. FIXME:\n implement this kwarg.\n\n Returns\n -------\n\n str\n The name of the generated plot file.\n\n '''\n\n import matplotlib.pyplot as plt\n\n for arg_3 in arg_0:\n\n if arg_3['method'] == 'pdm':\n\n plt.plot(arg_3['periods'],\n np.max(arg_3['lspvals'])/arg_3['lspvals'] - 1.0,\n label='%s P=%.5f' % (arg_3['method'], arg_3['bestperiod']),\n alpha=0.5)\n\n else:\n\n plt.plot(arg_3['periods'],\n arg_3['lspvals']/np.max(arg_3['lspvals']),\n label='%s P=%.5f' % (arg_3['method'], arg_3['bestperiod']),\n alpha=0.5)\n\n\n plt.xlabel('period [days]')\n plt.ylabel('normalized periodogram power')\n\n plt.xscale('log')\n plt.legend()\n plt.tight_layout()\n plt.savefig(arg_1)\n plt.close('all')\n\n return arg_1"} +{"_id": "doc_9225", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9,\n arg_10=2, arg_11=7):\n '''This returns a BATMAN planetary transit model.\n\n Parameters\n ----------\n\n times : np.array\n The times at which the model will be evaluated.\n\n t0 : float\n The time of periastron for the transit.\n\n per : float\n The orbital period of the planet.\n\n rp : float\n The stellar radius of the planet's star (in Rsun).\n\n a : float\n The semi-major axis of the planet's orbit (in Rsun).\n\n inc : float\n The orbital inclination (in degrees).\n\n ecc : float\n The eccentricity of the orbit.\n\n w : float\n The longitude of periastron (in degrees).\n\n u : list of floats\n The limb darkening coefficients specific to the limb darkening model\n used.\n\n limb_dark : {\"uniform\", \"linear\", \"quadratic\", \"square-root\", \"logarithmic\", \"exponential\", \"power2\", \"custom\"}\n The type of limb darkening model to use. See the full list here:\n\n https://www.cfa.harvard.edu/~lkreidberg/batman/tutorial.html#limb-darkening-options\n\n exp_time_minutes : float\n The amount of time to 'smear' the transit LC points over to simulate a\n long exposure time.\n\n supersample_factor: int\n The number of supersampled time data points to average the lightcurve\n model over.\n\n Returns\n -------\n\n (params, batman_model) : tuple\n The returned tuple contains the params list and the generated\n `batman.TransitModel` object.\n\n '''\n arg_12 = batman.TransitParams() # object to store transit parameters\n arg_12.t0 = arg_1 # time of periastron\n arg_12.per = arg_2 # orbital period\n arg_12.rp = arg_3 # planet radius (in stellar radii)\n arg_12.a = arg_4 # semi-major axis (in stellar radii)\n arg_12.inc = arg_5 # orbital inclination (in degrees)\n arg_12.ecc = arg_6 # the eccentricity of the orbit\n arg_12.w = arg_7 # longitude of periastron (in degrees)\n arg_12.u = arg_8 # limb darkening coefficient list\n arg_12.limb_dark = arg_9 # limb darkening model to use\n\n arg_13 = arg_0\n arg_14 = batman.TransitModel(arg_12, arg_13, exp_time=arg_10/60./24.,\n arg_11=arg_11)\n\n return arg_12, arg_14"} +{"_id": "doc_9226", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n Assume priors on all parameters have uniform probability.\n '''\n # priorbounds contains the input priors, and because of how we previously\n # sorted theta, its sorted keys tell us which parts of theta correspond to\n # which physical quantities.\n\n arg_2 = True\n for arg_3, arg_4 in enumerate(np.sort(list(arg_1.keys()))):\n if arg_1[arg_4][0] < arg_0[arg_3] < arg_1[arg_4][1]:\n arg_2 = True and arg_2\n else:\n arg_2 = False\n\n if arg_2:\n return 0.\n\n return -np.inf"} +{"_id": "doc_9227", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2='sloan_2mass',\n arg_3=1.0,\n arg_4=True,\n arg_5=0.1,\n arg_6=26.0,\n arg_7=4,\n arg_8=1.6,\n arg_9=None,\n arg_10=False,\n arg_11='~/.astrobase/trilegal-cache',\n arg_12=True,\n arg_13=60.0,\n arg_14=150.0,\n arg_15=700.0):\n '''This runs the TRILEGAL query for decimal equatorial coordinates.\n\n Parameters\n ----------\n\n ra,decl : float\n These are the center equatorial coordinates in decimal degrees\n\n filtersystem : str\n This is a key in the TRILEGAL_FILTER_SYSTEMS dict. Use the function\n :py:func:`astrobase.services.trilegal.list_trilegal_filtersystems` to\n see a nicely formatted table with the key and description for each of\n these.\n\n field_deg2 : float\n The area of the simulated field in square degrees. This is in the\n Galactic coordinate system.\n\n usebinaries : bool\n If this is True, binaries will be present in the model results.\n\n extinction_sigma : float\n This is the applied std dev around the `Av_extinction` value for the\n galactic coordinates requested.\n\n magnitude_limit : float\n This is the limiting magnitude of the simulation in the\n `maglim_filtercol` band index of the filter system chosen.\n\n maglim_filtercol : int\n The index in the filter system list of the magnitude limiting band.\n\n trilegal_version : float\n This is the the version of the TRILEGAL form to use. This can usually be\n left as-is.\n\n extraparams : dict or None\n This is a dict that can be used to override parameters of the model\n other than the basic ones used for input to this function. All\n parameters are listed in `TRILEGAL_DEFAULT_PARAMS` above. See:\n\n http://stev.oapd.inaf.it/cgi-bin/trilegal\n\n for explanations of these parameters.\n\n forcefetch : bool\n If this is True, the query will be retried even if cached results for\n it exist.\n\n cachedir : str\n This points to the directory where results will be downloaded.\n\n verbose : bool\n If True, will indicate progress and warn of any issues.\n\n timeout : float\n This sets the amount of time in seconds to wait for the service to\n respond to our initial request.\n\n refresh : float\n This sets the amount of time in seconds to wait before checking if the\n result file is available. If the results file isn't available after\n `refresh` seconds have elapsed, the function will wait for `refresh`\n seconds continuously, until `maxtimeout` is reached or the results file\n becomes available.\n\n maxtimeout : float\n The maximum amount of time in seconds to wait for a result to become\n available after submitting our query request.\n\n Returns\n -------\n\n dict\n This returns a dict of the form::\n\n {'params':the input param dict used,\n 'extraparams':any extra params used,\n 'provenance':'cached' or 'new download',\n 'tablefile':the path on disk to the downloaded model text file}\n\n '''\n\n # convert the ra/decl to gl, gb\n arg_16 = SkyCoord(arg_0=arg_0*u.degree, dec=arg_1*u.degree)\n\n arg_17 = arg_16.galactic.l.degree\n arg_18 = arg_16.galactic.b.degree\n\n return query_galcoords(arg_17,\n arg_18,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14,\n arg_15=arg_15)"} +{"_id": "doc_9228", "title": "", "text": "def Func(arg_0):\n '''\n This reads a downloaded TRILEGAL model file.\n\n Parameters\n ----------\n\n modelfile : str\n Path to the downloaded model file to read.\n\n Returns\n -------\n\n np.recarray\n Returns the model table as a Numpy record array.\n\n '''\n\n arg_1 = gzip.open(arg_0)\n arg_2 = np.genfromtxt(arg_1,names=True)\n arg_1.close()\n\n return arg_2"} +{"_id": "doc_9229", "title": "", "text": "def Func(arg_0, arg_1):\n '''\n This compares two values in constant time.\n\n Taken from tornado:\n\n https://github.com/tornadoweb/tornado/blob/\n d4eb8eb4eb5cc9a6677e9116ef84ded8efba8859/tornado/web.py#L3060\n\n '''\n if len(arg_0) != len(arg_1):\n return False\n arg_2 = 0\n if isinstance(arg_0[0], int): # python3 byte strings\n for arg_3, arg_4 in zip(arg_0, arg_1):\n arg_2 |= arg_3 ^ arg_4\n else: # python2\n for arg_3, arg_4 in zip(arg_0, arg_1):\n arg_2 |= ord(arg_3) ^ ord(arg_4)\n return arg_2 == 0"} +{"_id": "doc_9230", "title": "", "text": "def Func(arg_0, arg_1):\n '''Overrides the Func serializer for `JSONEncoder`.\n\n This can serialize the following objects in addition to what\n `JSONEncoder` can already do.\n\n - `np.array`\n - `bytes`\n - `complex`\n - `np.float64` and other `np.dtype` objects\n\n Parameters\n ----------\n\n obj : object\n A Python object to serialize to JSON.\n\n Returns\n -------\n\n str\n A JSON encoded representation of the input object.\n\n '''\n\n if isinstance(arg_1, np.ndarray):\n return arg_1.tolist()\n elif isinstance(arg_1, bytes):\n return arg_1.decode()\n elif isinstance(arg_1, complex):\n return (arg_1.real, arg_1.imag)\n elif (isinstance(arg_1, (float, np.float64, np.float_)) and\n not np.isfinite(arg_1)):\n return None\n elif isinstance(arg_1, (np.int8, np.int16, np.int32, np.int64)):\n return int(arg_1)\n else:\n return json.JSONEncoder.Func(arg_0, arg_1)"} +{"_id": "doc_9231", "title": "", "text": "def Func(arg_0):\n '''This handles GET requests to the index page.\n\n TODO: provide the correct baseurl from the checkplotserver options dict,\n so the frontend JS can just read that off immediately.\n\n '''\n\n # generate the project's list of checkplots\n arg_1 = arg_0.currentproject['checkplots']\n arg_2 = [os.path.basename(x)\n for x in arg_1]\n arg_3 = range(len(arg_1))\n\n # Func the sortkey and order\n arg_4 = arg_0.currentproject['sortkey']\n if arg_0.currentproject['sortorder'] == 'asc':\n arg_5 = 'ascending'\n elif arg_0.currentproject['sortorder'] == 'desc':\n arg_5 = 'descending'\n\n # Func the filterkey and condition\n arg_6 = arg_0.currentproject['filterstatements']\n\n arg_0.render('cpindex.html',\n arg_1=arg_1,\n arg_5=arg_5,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_2=arg_2,\n arg_3=arg_3,\n project_checkplotfile=arg_0.cplistfile,\n readonly=arg_0.readonly,\n baseurl=arg_0.baseurl)"} +{"_id": "doc_9232", "title": "", "text": "def Func(arg_0):\n '''\n This handles GET requests for the current checkplot-list.json file.\n\n Used with AJAX from frontend.\n\n '''\n\n # add the reviewed key to the current dict if it doesn't exist\n # this will hold all the reviewed objects for the frontend\n if 'reviewed' not in arg_0.currentproject:\n arg_0.currentproject['reviewed'] = {}\n\n # just returns the current project as JSON\n arg_0.write(arg_0.currentproject)"} +{"_id": "doc_9233", "title": "", "text": "def Func(arg_0, arg_1, arg_2=2):\n '''This smooths the magseries with a Savitsky-Golay filter.\n\n Parameters\n ----------\n\n mags : np.array\n The input mags/flux time-series to smooth.\n\n windowsize : int\n This is a odd integer containing the smoothing window size.\n\n polyorder : int\n This is an integer containing the polynomial degree order to use when\n generating the Savitsky-Golay filter.\n\n Returns\n -------\n\n np.array\n The smoothed mag/flux time-series array.\n\n '''\n\n arg_3 = savgol_filter(arg_0, arg_1, arg_2)\n return arg_3"} +{"_id": "doc_9234", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9,\n arg_10=21,\n arg_11=3.0,\n arg_12=arg_13,\n arg_14=None):\n '''\n Detrends a magnitude series given in mag using accompanying values of S in\n fsv, D in fdv, K in fkv, x coords in xcc, y coords in ycc, background in\n bgv, and background error in bge. smooth is used to set a smoothing\n parameter for the fit function. Does EPD voodoo.\n\n '''\n\n # find all the finite values of the magsnitude\n arg_15 = np.isfinite(arg_1)\n\n # calculate median and stdev\n arg_16 = np.median(arg_1[arg_15])\n arg_17 = np.nanstd(arg_1)\n\n # if we're supposed to sigma clip, do so\n if arg_11:\n arg_18 = abs(arg_1 - arg_16) < arg_11*arg_17\n arg_19 = arg_15 & arg_18\n else:\n arg_19 = arg_15\n\n arg_20 = arg_1[arg_19]\n arg_21 = len(arg_20)\n\n # smooth the signal\n if isinstance(arg_14, dict):\n arg_22 = arg_12(arg_20,\n arg_10,\n **arg_14)\n else:\n arg_22 = arg_12(arg_20, arg_10)\n\n # make the linear equation matrix\n arg_23 = np.c_[arg_3[arg_19]**2.0,\n arg_3[arg_19],\n arg_4[arg_19]**2.0,\n arg_4[arg_19],\n arg_5[arg_19]**2.0,\n arg_5[arg_19],\n np.ones(arg_21),\n arg_3[arg_19]*arg_4[arg_19],\n arg_3[arg_19]*arg_5[arg_19],\n arg_4[arg_19]*arg_5[arg_19],\n np.sin(2*np.pi*arg_6[arg_19]),\n np.cos(2*np.pi*arg_6[arg_19]),\n np.sin(2*np.pi*arg_7[arg_19]),\n np.cos(2*np.pi*arg_7[arg_19]),\n np.sin(4*np.pi*arg_6[arg_19]),\n np.cos(4*np.pi*arg_6[arg_19]),\n np.sin(4*np.pi*arg_7[arg_19]),\n np.cos(4*np.pi*arg_7[arg_19]),\n arg_8[arg_19],\n arg_9[arg_19]]\n\n # solve the matrix equation [epdmatrix] . [x] = [smoothedmags]\n # return the EPD differential magss if the solution succeeds\n try:\n\n arg_24, arg_25, arg_26, arg_27 = lstsq(arg_23, arg_22,\n rcond=None)\n\n if DEBUG:\n print('coeffs = %s, residuals = %s' % (arg_24, arg_25))\n\n\n arg_28 = {'times':arg_0,\n 'mags':(arg_16 +\n _old_epd_diffmags(arg_24, arg_3, arg_4,\n arg_5, arg_6, arg_7, arg_8, arg_9, arg_1)),\n 'errs':arg_2,\n 'fitcoeffs':arg_24,\n 'residuals':arg_25}\n\n return arg_28\n\n # if the solution fails, return nothing\n except Exception as e:\n\n LOGEXCEPTION('EPD solution did not converge')\n\n arg_28 = {'times':arg_0,\n 'mags':np.full_like(arg_1, np.nan),\n 'errs':arg_2,\n 'fitcoeffs':arg_24,\n 'residuals':arg_25}\n\n return arg_28"} +{"_id": "doc_9235", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9):\n '''\n This is the EPD function to fit using a smoothed mag-series.\n\n '''\n\n return (arg_0[0]*arg_1*arg_1 +\n arg_0[1]*arg_1 +\n arg_0[2]*arg_2*arg_2 +\n arg_0[3]*arg_2 +\n arg_0[4]*arg_3*arg_3 +\n arg_0[5]*arg_3 +\n arg_0[6] +\n arg_0[7]*arg_1*arg_2 +\n arg_0[8]*arg_1*arg_3 +\n arg_0[9]*arg_2*arg_3 +\n arg_0[10]*np.sin(2*pi_value*arg_4) +\n arg_0[11]*np.cos(2*pi_value*arg_4) +\n arg_0[12]*np.sin(2*pi_value*arg_5) +\n arg_0[13]*np.cos(2*pi_value*arg_5) +\n arg_0[14]*np.sin(4*pi_value*arg_4) +\n arg_0[15]*np.cos(4*pi_value*arg_4) +\n arg_0[16]*np.sin(4*pi_value*arg_5) +\n arg_0[17]*np.cos(4*pi_value*arg_5) +\n arg_0[18]*arg_6 +\n arg_0[19]*arg_7 +\n arg_0[20]*arg_8 +\n arg_0[21]*arg_9)"} +{"_id": "doc_9236", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9, arg_10, arg_11,\n arg_12=False,\n arg_13=3.0,\n arg_14=21,\n arg_15=arg_16,\n arg_17=None):\n '''Detrends a magnitude series using External Parameter Decorrelation.\n\n Requires a set of external parameters similar to those present in HAT light\n curves. At the moment, the HAT light-curve-specific external parameters are:\n\n - S: the 'fsv' column in light curves,\n - D: the 'fdv' column in light curves,\n - K: the 'fkv' column in light curves,\n - x coords: the 'xcc' column in light curves,\n - y coords: the 'ycc' column in light curves,\n - background value: the 'bgv' column in light curves,\n - background error: the 'bge' column in light curves,\n - hour angle: the 'iha' column in light curves,\n - zenith distance: the 'izd' column in light curves\n\n S, D, and K are defined as follows:\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486\n\n NOTE: The errs are completely ignored and returned unchanged (except for\n sigclip and finite filtering).\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to detrend.\n\n fsv : np.array\n Array containing the external parameter `S` of the same length as times.\n\n fdv : np.array\n Array containing the external parameter `D` of the same length as times.\n\n fkv : np.array\n Array containing the external parameter `K` of the same length as times.\n\n xcc : np.array\n Array containing the external parameter `x-coords` of the same length as\n times.\n\n ycc : np.array\n Array containing the external parameter `y-coords` of the same length as\n times.\n\n bgv : np.array\n Array containing the external parameter `background value` of the same\n length as times.\n\n bge : np.array\n Array containing the external parameter `background error` of the same\n length as times.\n\n iha : np.array\n Array containing the external parameter `hour angle` of the same length\n as times.\n\n izd : np.array\n Array containing the external parameter `zenith distance` of the same\n length as times.\n\n magsarefluxes : bool\n Set this to True if `mags` actually contains fluxes.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n Returns\n -------\n\n dict\n Returns a dict of the following form::\n\n {'times':the input times after non-finite elems removed,\n 'mags':the EPD detrended mag values (the EPD mags),\n 'errs':the errs after non-finite elems removed,\n 'fitcoeffs':EPD fit coefficient values,\n 'fitinfo':the full tuple returned by scipy.leastsq,\n 'fitmags':the EPD fit function evaluated at times,\n 'mags_median': this is median of the EPD mags,\n 'mags_mad': this is the MAD of EPD mags}\n\n '''\n\n arg_18 = np.isfinite(arg_0) & np.isfinite(arg_1) & np.isfinite(arg_2)\n arg_19, arg_20, arg_21 = arg_0[::][arg_18], arg_1[::][arg_18], arg_2[::][arg_18]\n arg_22, arg_23, arg_24, arg_25, arg_26, arg_27, arg_28, arg_29, arg_30 = (\n arg_3[::][arg_18],\n arg_4[::][arg_18],\n arg_5[::][arg_18],\n arg_6[::][arg_18],\n arg_7[::][arg_18],\n arg_8[::][arg_18],\n arg_9[::][arg_18],\n arg_10[::][arg_18],\n arg_11[::][arg_18],\n )\n\n arg_31, arg_32, arg_33, arg_34 = sigclip_magseries_with_extparams(\n arg_0, arg_1, arg_2,\n [arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9, arg_10, arg_11],\n sigclip=arg_13,\n arg_12=arg_12\n )\n arg_35, arg_36, arg_37, arg_38, arg_39, arg_40, arg_41, arg_42, arg_43 = arg_34\n\n # smooth the signal\n if isinstance(arg_17, dict):\n arg_44 = arg_15(arg_32,\n arg_14,\n **arg_17)\n else:\n arg_44 = arg_15(arg_32, arg_14)\n\n # initial fit coeffs\n arg_45 = np.zeros(22)\n\n # fit the smoothed mags and find the EPD function coefficients\n arg_46 = leastsq(_epd_residual,\n arg_45,\n args=(arg_44,\n arg_35, arg_36, arg_37, arg_38,\n arg_39, arg_40, arg_41, arg_42, arg_43),\n full_output=True)\n\n # if the fit succeeds, then get the EPD mags\n if arg_46[-1] in (1,2,3,4):\n\n arg_47 = arg_46[0]\n arg_48 = _epd_function(arg_47,\n arg_22, arg_23, arg_24, arg_25, arg_26,\n arg_27, arg_28, arg_29, arg_30)\n\n arg_49 = npmedian(arg_20) + arg_20 - arg_48\n\n arg_50 = {'times':arg_19,\n 'mags':arg_49,\n 'errs':arg_21,\n 'fitcoeffs':arg_47,\n 'fitinfo':arg_46,\n 'fitmags':arg_48,\n 'mags_median':npmedian(arg_49),\n 'mags_mad':npmedian(npabs(arg_49 - npmedian(arg_49)))}\n\n return arg_50\n\n # if the solution fails, return nothing\n else:\n\n LOGERROR('EPD fit did not converge')\n return None"} +{"_id": "doc_9237", "title": "", "text": "def Func(arg_0, arg_1, arg_2,\n arg_3,\n arg_4=False,\n arg_5=True,\n arg_6=3.0,\n arg_7=21,\n arg_8=arg_9,\n arg_10=None,\n arg_11=1.0,\n arg_12=300,\n arg_13={'criterion':'mse',\n 'oob_score':False,\n 'n_jobs':-1}):\n '''This uses a `RandomForestRegressor` to de-correlate the given magseries.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to run EPD on.\n\n externalparam_arrs : list of np.arrays\n This is a list of ndarrays of external parameters to decorrelate\n against. These should all be the same size as `times`, `mags`, `errs`.\n\n epdsmooth : bool\n If True, sets the training LC for the RandomForestRegress to be a\n smoothed version of the sigma-clipped light curve provided in `times`,\n `mags`, `errs`.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before smoothing it and\n fitting the EPD function to it. The actual LC will not be sigma-clipped.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n rf_subsample : float\n Defines the fraction of the size of the `mags` array to use for\n training the random forest regressor.\n\n rf_ntrees : int\n This is the number of trees to use for the `RandomForestRegressor`.\n\n rf_extraprams : dict\n This is a dict of any extra kwargs to provide to the\n `RandomForestRegressor` instance used.\n\n Returns\n -------\n\n dict\n Returns a dict with decorrelated mags and the usual info from the\n `RandomForestRegressor`: variable importances, etc.\n\n '''\n\n # get finite times, mags, errs\n arg_14 = np.isfinite(arg_0) & np.isfinite(arg_1) & np.isfinite(arg_2)\n arg_15, arg_16, arg_17 = arg_0[::][arg_14], arg_1[::][arg_14], arg_2[::][arg_14]\n arg_18 = []\n for arg_19 in arg_3:\n arg_18.append(arg_19[::][arg_14])\n\n arg_20, arg_21, arg_22, arg_23 = sigclip_magseries_with_extparams(\n arg_0, arg_1, arg_2,\n arg_3,\n sigclip=arg_6,\n arg_4=arg_4\n )\n\n # smoothing is optional for RFR because we train on a fraction of the mag\n # series and so should not require a smoothed input to fit a function to\n if arg_5:\n\n # smooth the signal\n if isinstance(arg_10, dict):\n arg_24 = arg_8(arg_21,\n arg_7,\n **arg_10)\n else:\n arg_24 = arg_8(arg_21,\n arg_7)\n\n else:\n\n arg_24 = arg_21\n\n\n # set up the regressor\n if isinstance(arg_13, dict):\n arg_25 = RandomForestRegressor(n_estimators=arg_12,\n **arg_13)\n else:\n arg_25 = RandomForestRegressor(n_estimators=arg_12)\n\n # collect the features\n arg_26 = np.column_stack(arg_23)\n\n # fit, then generate the predicted values, then get corrected values\n\n # we fit on a randomly selected subsample of all the mags\n if arg_11 < 1.0:\n arg_27 = np.arange(arg_24.size)\n\n # these are sorted because time-order should be important\n arg_28 = np.sort(\n npr.choice(arg_27,\n size=int(arg_11*arg_24.size),\n replace=False)\n )\n else:\n arg_28 = np.arange(arg_24.size)\n\n arg_25.fit(arg_26[arg_28,:], arg_24[arg_28])\n\n # predict on the full feature set\n arg_29 = arg_25.predict(np.column_stack(arg_18))\n arg_30 = npmedian(arg_16) + arg_16 - arg_29\n\n arg_31 = {'times':arg_15,\n 'mags':arg_30,\n 'errs':arg_17,\n 'feature_importances':arg_25.feature_importances_,\n 'regressor':arg_25,\n 'mags_median':npmedian(arg_30),\n 'mags_mad':npmedian(npabs(arg_30 -\n npmedian(arg_30)))}\n\n return arg_31"} +{"_id": "doc_9238", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3,\n arg_4=0.05, arg_5=9):\n '''\n This calculates the Stellingwerf PDM theta value at a test frequency.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input time-series and associated errors.\n\n frequency : float\n The test frequency to calculate the theta statistic at.\n\n binsize : float\n The phase bin size to use.\n\n minbin : int\n The minimum number of items in a phase bin to consider in the\n calculation of the statistic.\n\n Returns\n -------\n\n theta_pdm : float\n The value of the theta statistic at the specified `frequency`.\n\n\n '''\n\n arg_6 = 1.0/arg_3\n arg_7 = arg_0[0]\n\n arg_8 = phase_magseries(arg_0,\n arg_1,\n arg_6,\n arg_7,\n wrap=False,\n sort=True)\n\n arg_9 = arg_8['phase']\n arg_10 = arg_8['mags']\n arg_11 = nparange(0.0, 1.0, arg_4)\n\n arg_12 = npdigitize(arg_9, arg_11)\n\n arg_13 = []\n arg_14 = []\n arg_15 = 0\n\n for arg_16 in npunique(arg_12):\n\n arg_17 = arg_12 == arg_16\n arg_18 = arg_10[arg_17]\n\n if arg_18.size > arg_5:\n arg_19 = npvar(arg_18,ddof=1)\n arg_13.append(arg_19)\n arg_14.append(arg_18.size)\n arg_15 = arg_15 + 1\n\n # now calculate theta\n arg_13 = nparray(arg_13)\n arg_14 = nparray(arg_14)\n\n arg_20 = npsum(arg_13*(arg_14 - 1)) / (npsum(arg_14) -\n arg_15)\n arg_21 = npvar(arg_10,ddof=1)\n arg_22 = arg_20/arg_21\n\n return arg_22"} +{"_id": "doc_9239", "title": "", "text": "def Func(arg_0, arg_1, arg_2):\n '''Converts magnitude measurements in Kepler band to SDSS r band.\n\n Parameters\n ----------\n\n keplermag : float or array-like\n The Kepler magnitude value(s) to convert to fluxes.\n\n kic_sdssg,kic_sdssr : float or array-like\n The SDSS g and r magnitudes of the object(s) from the Kepler Input\n Catalog. The .llc.fits MAST light curve file for a Kepler object\n contains these values in the FITS extension 0 header.\n\n Returns\n -------\n\n float or array-like\n SDSS r band magnitude(s) converted from the Kepler band magnitude.\n\n '''\n arg_3 = arg_1 - arg_2\n\n if arg_3 < 0.8:\n arg_4 = (arg_0 - 0.2*arg_1)/0.8\n else:\n arg_4 = (arg_0 - 0.1*arg_1)/0.9\n return arg_4"} +{"_id": "doc_9240", "title": "", "text": "def Func(arg_0,\n arg_1=True,\n arg_2='sap,pdc',\n arg_3=None):\n '''This filters the Kepler `lcdict`, removing nans and bad\n observations.\n\n By default, this function removes points in the Kepler LC that have ANY\n quality flags set.\n\n Parameters\n ----------\n\n lcdict : lcdict\n An `lcdict` produced by `consolidate_kepler_fitslc` or\n `read_kepler_fitslc`.\n\n filterflags : bool\n If True, will remove any measurements that have non-zero quality flags\n present. This usually indicates an issue with the instrument or\n spacecraft.\n\n nanfilter : {'sap','pdc','sap,pdc'}\n Indicates the flux measurement type(s) to apply the filtering to.\n\n timestoignore : list of tuples or None\n This is of the form::\n\n [(time1_start, time1_end), (time2_start, time2_end), ...]\n\n and indicates the start and end times to mask out of the final\n lcdict. Use this to remove anything that wasn't caught by the quality\n flags.\n\n Returns\n -------\n\n lcdict\n Returns an `lcdict` (this is useable by most astrobase functions for LC\n processing). The `lcdict` is filtered IN PLACE!\n\n '''\n\n arg_4 = arg_0['columns']\n\n # filter all bad LC points as noted by quality flags\n if arg_1:\n\n arg_5 = arg_0['time'].size\n arg_6 = arg_0['sap_quality'] == 0\n\n for arg_7 in arg_4:\n if '.' in arg_7:\n arg_8, arg_9 = arg_7.split('.')\n arg_0[arg_8][arg_9] = arg_0[arg_8][arg_9][arg_6]\n else:\n arg_0[arg_7] = arg_0[arg_7][arg_6]\n\n arg_10 = arg_0['time'].size\n LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'\n % (arg_5, arg_10))\n\n\n if arg_2 and arg_2 == 'sap,pdc':\n arg_11 = (\n npisfinite(arg_0['sap']['sap_flux']) &\n npisfinite(arg_0['pdc']['pdcsap_flux']) &\n npisfinite(arg_0['time'])\n )\n elif arg_2 and arg_2 == 'sap':\n arg_11 = (\n npisfinite(arg_0['sap']['sap_flux']) &\n npisfinite(arg_0['time'])\n )\n elif arg_2 and arg_2 == 'pdc':\n arg_11 = (\n npisfinite(arg_0['pdc']['pdcsap_flux']) &\n npisfinite(arg_0['time'])\n )\n\n\n # remove nans from all columns\n if arg_2:\n\n arg_5 = arg_0['time'].size\n for arg_7 in arg_4:\n if '.' in arg_7:\n arg_8, arg_9 = arg_7.split('.')\n arg_0[arg_8][arg_9] = arg_0[arg_8][arg_9][arg_11]\n else:\n arg_0[arg_7] = arg_0[arg_7][arg_11]\n\n arg_10 = arg_0['time'].size\n\n LOGINFO('removed nans, ndet before = %s, ndet after = %s'\n % (arg_5, arg_10))\n\n\n # exclude all times in timestoignore\n if (arg_3 and\n isinstance(arg_3, list) and\n len(arg_3) > 0):\n\n arg_12 = npfull_like(arg_0['time'], True, dtype=np.bool_)\n arg_5 = arg_12.size\n\n # get all the masks\n for arg_13 in arg_3:\n arg_14, arg_15 = arg_13[0], arg_13[1]\n arg_16 = ~((arg_0['time'] >= arg_14) & (arg_0['time'] <= arg_15))\n arg_12 = arg_12 & arg_16\n\n # apply the masks\n for arg_7 in arg_4:\n if '.' in arg_7:\n arg_8, arg_9 = arg_7.split('.')\n arg_0[arg_8][arg_9] = arg_0[arg_8][arg_9][arg_12]\n else:\n arg_0[arg_7] = arg_0[arg_7][arg_12]\n\n arg_10 = arg_0['time'].size\n LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'\n % (arg_5, arg_10))\n\n return arg_0"} +{"_id": "doc_9241", "title": "", "text": "def Func(arg_0, arg_1, arg_2=0.1, arg_3=3):\n '''After running `detrend_centroid`, this gets positions of centroids during\n transits, and outside of transits.\n\n These positions can then be used in a false positive analysis.\n\n This routine requires knowing the ingress and egress times for every\n transit of interest within the quarter this routine is being called for.\n There is currently no astrobase routine that automates this for periodic\n transits (it must be done in a calling routine).\n\n To get out of transit centroids, this routine takes points outside of the\n \"buffer\" set by `oot_buffer_time`, sampling 3x as many points on either\n side of the transit as are in the transit (or however many are specified by\n `sample_factor`).\n\n Parameters\n ----------\n\n lcd : lcdict\n An `lcdict` generated by the `read_kepler_fitslc` function. We assume\n that the `detrend_centroid` function has been run on this `lcdict`.\n\n t_ing_egr : list of tuples\n This is of the form::\n\n [(ingress time of i^th transit, egress time of i^th transit)]\n\n for i the transit number index in this quarter (starts at zero at the\n beginning of every quarter). Assumes units of BJD.\n\n oot_buffer_time : float\n Number of days away from ingress and egress times to begin sampling \"out\n of transit\" centroid points. The number of out of transit points to take\n per transit is 3x the number of points in transit.\n\n sample_factor : float\n The size of out of transit window from which to sample.\n\n Returns\n -------\n\n dict\n This is a dictionary keyed by transit number (i.e., the same index as\n `t_ing_egr`), where each key contains the following value::\n\n {'ctd_x_in_tra':ctd_x_in_tra,\n 'ctd_y_in_tra':ctd_y_in_tra,\n 'ctd_x_oot':ctd_x_oot,\n 'ctd_y_oot':ctd_y_oot,\n 'npts_in_tra':len(ctd_x_in_tra),\n 'npts_oot':len(ctd_x_oot),\n 'in_tra_times':in_tra_times,\n 'oot_times':oot_times}\n\n '''\n\n # NOTE:\n # Bryson+ (2013) gives a more complicated and more correct approach to this\n # problem, computing offsets relative to positions defined on the SKY. This\n # requires using a Kepler focal plane geometry model. I don't have that\n # model, or know how to get it. So I use a simpler approach.\n\n arg_4 = int(np.unique(arg_0['quarter']))\n LOGINFO('Getting centroid offsets (qnum: {:d})...'.format(arg_4))\n # Kepler pixel scale, cf.\n # https://keplerscience.arc.nasa.gov/the-kepler-space-telescope.html\n arg_5 = 3.98\n\n # Get the residuals (units: pixel offset).\n arg_6 = arg_0['ctd_dtr']['times']\n arg_7 = arg_0['ctd_dtr']['ctd_x'] - arg_0['ctd_dtr']['fit_ctd_x']\n arg_8 = arg_0['ctd_dtr']['ctd_y'] - arg_0['ctd_dtr']['fit_ctd_y']\n\n # Return results in \"centroid dictionary\" (has keys of transit number).\n arg_9 = {}\n for arg_10,(arg_11,arg_12) in enumerate(arg_1):\n\n # We have in-transit times as input.\n arg_13 = arg_6[(arg_6 > arg_11) & (arg_6 < arg_12)]\n\n # Compute out of transit times on either side of the in-transit times.\n arg_14 = arg_12 - arg_11\n arg_15 = arg_3 * arg_14\n\n arg_16 = arg_6[\n (arg_6 < (arg_11-arg_2)) &\n (arg_6 > (arg_11-arg_2-arg_15))\n ]\n arg_17 = arg_6[\n (arg_6 > (arg_12+arg_2)) &\n (arg_6 < (arg_12+arg_2+arg_15))\n ]\n\n arg_18 = npconcatenate([arg_16, arg_17])\n\n arg_19 = npin1d(arg_6, arg_13)\n arg_20 = npin1d(arg_6, arg_18)\n\n # Convert to units of arcseconds.\n arg_21 = arg_7[arg_19]*arg_5\n arg_22 = arg_8[arg_19]*arg_5\n arg_23 = arg_7[arg_20]*arg_5\n arg_24 = arg_8[arg_20]*arg_5\n\n arg_9[arg_10] = {'ctd_x_in_tra':arg_21,\n 'ctd_y_in_tra':arg_22,\n 'ctd_x_oot':arg_23,\n 'ctd_y_oot':arg_24,\n 'npts_in_tra':len(arg_21),\n 'npts_oot':len(arg_23),\n 'in_tra_times':arg_13,\n 'oot_times':arg_18}\n\n LOGINFO('Got centroid offsets (qnum: {:d}).'.format(arg_4))\n\n return arg_9"} +{"_id": "doc_9242", "title": "", "text": "def Func(arg_0):\n '''This is a helper function for centroid detrending.\n\n '''\n\n from scipy.interpolate import interp1d\n\n arg_1 = nparray([4,5,6,10,15])\n arg_2 = nparray([1e2,3e2,5e2,1e3,3e3])\n arg_3 = interp1d(arg_2, arg_1, kind='linear',\n bounds_error=False,\n fill_value=(min(arg_1), max(arg_1)))\n arg_4 = int(npfloor(arg_3(arg_0)))\n\n return arg_4"} +{"_id": "doc_9243", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3='hat-sql',\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=7):\n\n '''This bins the given light curve file in time using the specified bin size.\n\n Parameters\n ----------\n\n lcfile : str\n The file name to process.\n\n binsizesec : float\n The time bin-size in seconds.\n\n outdir : str or None\n If this is a str, the output LC will be written to `outdir`. If this is\n None, the output LC will be written to the same directory as `lcfile`.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve file.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the binning process. If these are None,\n the default values for `timecols`, `magcols`, and `errcols` for your\n light curve format will be used here.\n\n minbinelems : int\n The minimum number of time-bin elements required to accept a time-bin as\n valid for the output binned light curve.\n\n Returns\n -------\n\n str\n The name of the output pickle file with the binned LC.\n\n Writes the output binned light curve to a pickle that contains the\n lcdict with an added `lcdict['binned'][magcol]` key, which contains the\n binned times, mags/fluxes, and errs as\n `lcdict['binned'][magcol]['times']`, `lcdict['binned'][magcol]['mags']`,\n and `lcdict['epd'][magcol]['errs']` for each `magcol` provided in the\n input or default `magcols` value for this light curve format.\n\n '''\n\n try:\n arg_9 = get_lcformat(arg_3,\n use_lcformat_dir=arg_4)\n if arg_9:\n (arg_10, arg_11,\n arg_12, arg_13, arg_14,\n arg_15, arg_16) = arg_9\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n # override the default timecols, magcols, and errcols\n # using the ones provided to the function\n if arg_5 is None:\n arg_5 = arg_12\n if arg_6 is None:\n arg_6 = arg_13\n if arg_7 is None:\n arg_7 = arg_14\n\n # get the LC into a dict\n arg_17 = arg_11(arg_0)\n\n # this should handle lists/tuples being returned by readerfunc\n # we assume that the first element is the actual lcdict\n # FIXME: figure out how to not need this assumption\n if ( (isinstance(arg_17, (list, tuple))) and\n (isinstance(arg_17[0], dict)) ):\n arg_17 = arg_17[0]\n\n # skip already binned light curves\n if 'binned' in arg_17:\n LOGERROR('this light curve appears to be binned already, skipping...')\n return None\n\n arg_17['binned'] = {}\n\n for arg_18, arg_19, arg_20 in zip(arg_5, arg_6, arg_7):\n\n # dereference the columns and get them from the lcdict\n if '.' in arg_18:\n arg_21 = arg_18.split('.')\n else:\n arg_21 = [arg_18]\n arg_22 = _dict_get(arg_17, arg_21)\n\n if '.' in arg_19:\n arg_23 = arg_19.split('.')\n else:\n arg_23 = [arg_19]\n arg_24 = _dict_get(arg_17, arg_23)\n\n if '.' in arg_20:\n arg_25 = arg_20.split('.')\n else:\n arg_25 = [arg_20]\n arg_26 = _dict_get(arg_17, arg_25)\n\n # normalize here if not using special normalization\n if arg_16 is None:\n arg_27, arg_28 = normalize_magseries(\n arg_22, arg_24,\n arg_15=arg_15\n )\n\n arg_22, arg_24, arg_26 = arg_27, arg_28, arg_26\n\n # now bin the mag series as requested\n arg_29 = time_bin_magseries_with_errs(arg_22,\n arg_24,\n arg_26,\n binsize=arg_1,\n arg_8=arg_8)\n\n # put this into the special binned key of the lcdict\n arg_17['binned'][arg_19] = {'times':arg_29['binnedtimes'],\n 'mags':arg_29['binnedmags'],\n 'errs':arg_29['binnederrs'],\n 'nbins':arg_29['nbins'],\n 'timebins':arg_29['jdbins'],\n 'binsizesec':arg_1}\n\n\n # done with binning for all magcols, now generate the output file\n # this will always be a pickle\n\n if arg_2 is None:\n arg_2 = os.path.dirname(arg_0)\n\n arg_30 = os.path.join(arg_2, '%s-binned%.1fsec-%s.pkl' %\n (squeeze(arg_17['objectid']).replace(' ','-'),\n arg_1, arg_3))\n\n with open(arg_30, 'wb') as outfd:\n pickle.dump(arg_17, outfd, protocol=pickle.HIGHEST_PROTOCOL)\n\n return arg_30"} +{"_id": "doc_9244", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4='hat-sql',\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=7,\n arg_10=arg_11,\n arg_12=1000):\n '''\n This time bins all the light curves in the specified directory.\n\n Parameters\n ----------\n\n lcdir : list of str\n Directory containing the input LCs to process.\n\n binsizesec : float\n The time bin size to use in seconds.\n\n maxobjects : int or None\n If provided, LC processing will stop at `lclist[maxobjects]`.\n\n outdir : str or None\n The directory where output LCs will be written. If None, will write to\n the same directory as the input LCs.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve file.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the binning process. If these are None,\n the default values for `timecols`, `magcols`, and `errcols` for your\n light curve format will be used here.\n\n minbinelems : int\n The minimum number of time-bin elements required to accept a time-bin as\n valid for the output binned light curve.\n\n nworkers : int\n Number of parallel workers to launch.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before being\n replaced to guard against memory leaks.\n\n Returns\n -------\n\n dict\n The returned dict contains keys = input LCs, vals = output LCs.\n\n '''\n try:\n arg_13 = get_lcformat(arg_4,\n use_lcformat_dir=arg_5)\n if arg_13:\n (arg_14, arg_15,\n arg_16, arg_17, arg_18,\n arg_19, arg_20) = arg_13\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n arg_21 = sorted(glob.glob(os.path.join(arg_0, arg_14)))\n\n return parallel_timebin(arg_21,\n arg_1,\n arg_2=arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_12=arg_12)"} +{"_id": "doc_9245", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=1000,\n arg_7='hat-sql',\n arg_8=None,\n arg_9=arg_10):\n '''This runs variable feature extraction in parallel for all LCs in `lclist`.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The directory where the output varfeatures pickle files will be written.\n\n maxobjects : int\n The number of LCs to process from `lclist`.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of input LC file name : the generated\n variability features pickles for each of the input LCs, with results for\n each magcol in the input `magcol` or light curve format's default\n `magcol` list.\n\n '''\n # make sure to make the output directory if it doesn't exist\n if not os.path.exists(arg_1):\n os.makedirs(arg_1)\n\n if arg_2:\n arg_0 = arg_0[:arg_2]\n\n arg_11 = [(x, arg_1, arg_3, arg_4, arg_5, arg_6,\n arg_7, arg_8) for x in arg_0]\n\n with ProcessPoolExecutor(max_workers=arg_9) as executor:\n arg_12 = executor.map(varfeatures_worker, arg_11)\n\n arg_13 = [x for x in arg_12]\n arg_14 = {os.path.basename(x):y for (x,y) in zip(arg_0, arg_13)}\n\n return arg_14"} +{"_id": "doc_9246", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=True,\n arg_8=1000,\n arg_9='hat-sql',\n arg_10=None,\n arg_11=arg_12):\n '''This runs parallel variable feature extraction for a directory of LCs.\n\n Parameters\n ----------\n\n lcdir : str\n The directory of light curve files to process.\n\n outdir : str\n The directory where the output varfeatures pickle files will be written.\n\n fileglob : str or None\n The file glob to use when looking for light curve files in `lcdir`. If\n None, the default file glob associated for this LC format will be used.\n\n maxobjects : int\n The number of LCs to process from `lclist`.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of input LC file name : the generated\n variability features pickles for each of the input LCs, with results for\n each magcol in the input `magcol` or light curve format's default\n `magcol` list.\n\n '''\n\n try:\n arg_13 = get_lcformat(arg_9,\n use_lcformat_dir=arg_10)\n if arg_13:\n (arg_14, arg_15,\n arg_16, arg_17, arg_18,\n arg_19, arg_20) = arg_13\n else:\n LOGERROR(\"can't figure out the light curve format\")\n return None\n except Exception as e:\n LOGEXCEPTION(\"can't figure out the light curve format\")\n return None\n\n if not arg_2:\n arg_2 = arg_14\n\n # now find the files\n LOGINFO('searching for %s light curves in %s ...' % (arg_9, arg_0))\n\n if arg_7 is False:\n arg_21 = glob.glob(os.path.join(arg_0, arg_2))\n\n else:\n # use recursive glob for Python 3.5+\n if sys.version_info[:2] > (3,4):\n\n arg_21 = glob.glob(os.path.join(arg_0,\n '**',\n arg_2),\n arg_7=True)\n\n # otherwise, use os.walk and glob\n else:\n\n # use os.walk to go through the directories\n arg_22 = os.walk(arg_0)\n arg_21 = []\n\n for arg_23, arg_24, arg_25 in arg_22:\n for arg_26 in arg_24:\n arg_27 = os.path.join(arg_23,\n arg_26,\n arg_2)\n arg_28 = glob.glob(arg_27)\n\n if arg_28:\n arg_21.extend(arg_28)\n\n\n # now that we have all the files, process them\n if arg_21 and len(arg_21) > 0:\n\n LOGINFO('found %s light curves, getting varfeatures...' %\n len(arg_21))\n\n return parallel_varfeatures(arg_21,\n arg_1,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11)\n\n else:\n\n LOGERROR('no light curve files in %s format found in %s' % (arg_9,\n arg_0))\n return None"} +{"_id": "doc_9247", "title": "", "text": "def Func(arg_0, arg_1=None):\n '''This is just a shortened form of the function above for convenience.\n\n This only handles pickle files as input.\n\n Parameters\n ----------\n\n checkplotin : str\n File name of a checkplot pickle file to convert to a PNG.\n\n extrarows : list of tuples\n This is a list of 4-element tuples containing paths to PNG files that\n will be added to the end of the rows generated from the checkplotin\n pickle/dict. Each tuple represents a row in the final output PNG\n file. If there are less than 4 elements per tuple, the missing elements\n will be filled in with white-space. If there are more than 4 elements\n per tuple, only the first four will be used.\n\n The purpose of this kwarg is to incorporate periodograms and phased LC\n plots (in the form of PNGs) generated from an external period-finding\n function or program (like VARTOOLS) to allow for comparison with\n astrobase results.\n\n NOTE: the PNG files specified in `extrarows` here will be added to those\n already present in the input `checkplotdict['externalplots']` if that is\n None because you passed in a similar list of external plots to the\n :py:func:`astrobase.checkplot.pkl.checkplot_pickle` function earlier. In\n this case, `extrarows` can be used to add even more external plots if\n desired.\n\n Each external plot PNG will be resized to 750 x 480 pixels to fit into\n an output image cell.\n\n By convention, each 4-element tuple should contain:\n\n - a periodiogram PNG\n - phased LC PNG with 1st best peak period from periodogram\n - phased LC PNG with 2nd best peak period from periodogram\n - phased LC PNG with 3rd best peak period from periodogram\n\n Example of extrarows::\n\n [('/path/to/external/bls-periodogram.png',\n '/path/to/external/bls-phasedlc-plot-bestpeak.png',\n '/path/to/external/bls-phasedlc-plot-peak2.png',\n '/path/to/external/bls-phasedlc-plot-peak3.png'),\n ('/path/to/external/pdm-periodogram.png',\n '/path/to/external/pdm-phasedlc-plot-bestpeak.png',\n '/path/to/external/pdm-phasedlc-plot-peak2.png',\n '/path/to/external/pdm-phasedlc-plot-peak3.png'),\n ...]\n\n Returns\n -------\n\n str\n The absolute path to the generated checkplot PNG.\n\n '''\n\n if arg_0.endswith('.gz'):\n arg_2 = arg_0.replace('.pkl.gz','.png')\n else:\n arg_2 = arg_0.replace('.pkl','.png')\n\n return checkplot_pickle_to_png(arg_0, arg_2, arg_1=arg_1)"} +{"_id": "doc_9248", "title": "", "text": "def Func(arg_0, arg_1, arg_2, arg_3):\n '''This is a flare model function, similar to Kowalski+ 2011.\n\n From the paper by Pitkin+ 2014:\n http://adsabs.harvard.edu/abs/2014MNRAS.445.2268P\n\n Parameters\n ----------\n\n flareparams : list of float\n This defines the flare model::\n\n [amplitude,\n flare_peak_time,\n rise_gaussian_stdev,\n decay_time_constant]\n\n where:\n\n `amplitude`: the maximum flare amplitude in mags or flux. If flux, then\n amplitude should be positive. If mags, amplitude should be negative.\n\n `flare_peak_time`: time at which the flare maximum happens.\n\n `rise_gaussian_stdev`: the stdev of the gaussian describing the rise of\n the flare.\n\n `decay_time_constant`: the time constant of the exponential fall of the\n flare.\n\n times,mags,errs : np.array\n The input time-series of measurements and associated errors for which\n the model will be generated. The times will be used to generate\n model mags.\n\n Returns\n -------\n\n (modelmags, times, mags, errs) : tuple\n Returns the model mags evaluated at the input time values. Also returns\n the input `times`, `mags`, and `errs`.\n\n '''\n\n (arg_4, arg_5,\n arg_6, arg_7) = arg_0\n\n arg_8 = np.median(arg_2)\n arg_9 = np.full_like(arg_1, arg_8)\n\n # before peak gaussian rise...\n arg_9[arg_1 < arg_5] = (\n arg_2[arg_1 < arg_5] +\n arg_4 * np.exp(\n -((arg_1[arg_1 < arg_5] -\n arg_5) *\n (arg_1[arg_1 < arg_5] -\n arg_5)) /\n (2.0*arg_6*arg_6)\n )\n )\n\n # after peak exponential decay...\n arg_9[arg_1 > arg_5] = (\n arg_2[arg_1 > arg_5] +\n arg_4 * np.exp(\n -((arg_1[arg_1 > arg_5] -\n arg_5)) /\n (arg_7)\n )\n )\n\n return arg_9, arg_1, arg_2, arg_3"} +{"_id": "doc_9249", "title": "", "text": "def Func():\n \"\"\"This checks the AWS instance data URL to see if there's a pending\n shutdown for the instance.\n\n This is useful for AWS spot instances. If there is a pending shutdown posted\n to the instance data URL, we'll use the result of this function break out of\n the processing loop and shut everything down ASAP before the instance dies.\n\n Returns\n -------\n\n bool\n - True if the instance is going to die soon.\n - False if the instance is still safe.\n\n \"\"\"\n\n arg_0 = 'http://169.254.169.254/latest/meta-data/spot/instance-action'\n\n try:\n arg_1 = requests.get(arg_0, timeout=1.0)\n arg_1.raise_for_status()\n\n arg_2 = arg_1.json()\n if 'action' in arg_2 and arg_2['action'] in ('stop',\n 'terminate',\n 'hibernate'):\n arg_3 = arg_2['time']\n LOGWARNING('instance is going to %s at %s' % (arg_2['action'],\n arg_3))\n\n arg_1.close()\n return True\n else:\n arg_1.close()\n return False\n\n except HTTPError as e:\n arg_1.close()\n return False\n\n except Exception as e:\n arg_1.close()\n return False"} +{"_id": "doc_9250", "title": "", "text": "def Func(\n arg_0=None,\n arg_1=None,\n arg_2=None,\n arg_3=None,\n arg_4=None,\n arg_5=None,\n arg_6=None,\n arg_7=None,\n arg_8=None,\n arg_9=True,\n arg_10=True,\n arg_11=True,\n arg_12=False,\n arg_13=None,\n arg_14=None\n):\n \"\"\"This wraps the function above to allow for loading previous state from a\n file.\n\n Parameters\n ----------\n\n use_saved_state : str or None\n This is the path to the saved state pickle file produced by a previous\n run of `runcp_producer_loop`. Will get all of the arguments to run\n another instance of the loop from that pickle file. If this is None, you\n MUST provide all of the appropriate arguments to that function.\n\n lightcurve_list : str or list of str or None\n This is either a string pointing to a file containing a list of light\n curves filenames to process or the list itself. The names must\n correspond to the full filenames of files stored on S3, including all\n prefixes, but not include the 's3:///' bit (these will be\n added automatically).\n\n input_queue : str or None\n This is the name of the SQS queue which will receive processing tasks\n generated by this function. The queue URL will automatically be obtained\n from AWS.\n\n input_bucket : str or None\n The name of the S3 bucket containing the light curve files to process.\n\n result_queue : str or None\n This is the name of the SQS queue that this function will listen to for\n messages from the workers as they complete processing on their input\n elements. This function will attempt to match input sent to the\n `input_queue` with results coming into the `result_queue` so it knows\n how many objects have been successfully processed. If this function\n receives task results that aren't in its own input queue, it will\n acknowledge them so they complete successfully, but not download them\n automatically. This handles leftover tasks completing from a previous\n run of this function.\n\n result_bucket : str or None\n The name of the S3 bucket which will receive the results from the\n workers.\n\n pfresult_list : list of str or None\n This is a list of periodfinder result pickle S3 URLs associated with\n each light curve. If provided, this will be used to add in phased light\n curve plots to each checkplot pickle. If this is None, the worker loop\n will produce checkplot pickles that only contain object information,\n neighbor information, and unphased light curves.\n\n runcp_kwargs : dict or None\n This is a dict used to pass any extra keyword arguments to the\n `lcproc.checkplotgen.runcp` function that will be run by the worker\n loop.\n\n process_list_slice : list or None\n This is used to index into the input light curve list so a subset of the\n full list can be processed in this specific run of this function.\n\n Use None for a slice index elem to emulate single slice spec behavior:\n\n process_list_slice = [10, None] -> lightcurve_list[10:]\n process_list_slice = [None, 500] -> lightcurve_list[:500]\n\n purge_queues_when_done : bool or None\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C), all outstanding elements in the\n input/output queues that have not yet been acknowledged by workers or by\n this function will be purged. This effectively cancels all outstanding\n work.\n\n delete_queues_when_done : bool or None\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C'), all outstanding work items will be\n purged from the input/queues and the queues themselves will be deleted.\n\n download_when_done : bool or None\n If this is True, the generated checkplot pickle for each input work item\n will be downloaded immediately to the current working directory when the\n worker functions report they're done with it.\n\n save_state_when_done : bool or None\n If this is True, will save the current state of the work item queue and\n the work items acknowledged as completed to a pickle in the current\n working directory. Call the `Func` function\n below to resume processing from this saved state later.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 download operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n dict or str\n Returns the current work state as a dict or str path to the generated\n work state pickle depending on if `save_state_when_done` is True.\n\n \"\"\"\n\n if arg_0 is not None and os.path.exists(arg_0):\n\n with open(arg_0,'rb') as infd:\n arg_15 = pickle.load(infd)\n\n # run the producer loop using the saved state's todo list\n return runcp_producer_loop(\n arg_15['in_progress'],\n arg_15['args'][1],\n arg_15['args'][2],\n arg_15['args'][3],\n arg_15['args'][4],\n **arg_15['kwargs']\n )\n\n else:\n\n return runcp_producer_loop(\n arg_1,\n arg_2,\n arg_3,\n arg_4,\n arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14\n )"} +{"_id": "doc_9251", "title": "", "text": "def Func(arg_0):\n '''\n This is the worker for running checkplots.\n\n Parameters\n ----------\n\n task : tuple\n This is of the form: (pfpickle, outdir, lcbasedir, kwargs).\n\n Returns\n -------\n\n list of str\n The list of checkplot pickles returned by the `runcp` function.\n\n '''\n\n arg_1, arg_2, arg_3, arg_4 = arg_0\n\n try:\n\n return runcp(arg_1, arg_2, arg_3, **arg_4)\n\n except Exception as e:\n\n LOGEXCEPTION(' could not make checkplots for %s: %s' % (arg_1, e))\n return None"} +{"_id": "doc_9252", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3=False,\n arg_4=None,\n arg_5=False,\n arg_6=None,\n arg_7=60.0,\n arg_8=None,\n arg_9=60.0,\n arg_10=5,\n arg_11=True,\n arg_12=None,\n arg_13=3.0,\n arg_14=10.0,\n arg_15=99,\n arg_16='hat-sql',\n arg_17=None,\n arg_18=None,\n arg_19=None,\n arg_20=None,\n arg_21=False,\n arg_22=None,\n arg_23=None,\n arg_24=None,\n arg_25=None,\n arg_26=None,\n arg_27=arg_28,\n):\n '''This drives the parallel execution of `runcp` for a list of periodfinding\n result pickles.\n\n Parameters\n ----------\n\n pfpicklelist : list of str or list of Nones\n This is the list of the filenames of the period-finding result pickles\n to process. To make checkplots using the light curves directly, set this\n to a list of Nones with the same length as the list of light curve files\n that you provide in `lcfnamelist`.\n\n outdir : str\n The directory the checkplot pickles will be written to.\n\n lcbasedir : str\n The base directory that this function will look in to find the light\n curves pointed to by the period-finding result files. If you're using\n `lcfnamelist` to provide a list of light curve filenames directly, this\n arg is ignored.\n\n lcfnamelist : list of str or None\n If this is provided, it must be a list of the input light curve\n filenames to process. These can either be associated with each input\n period-finder result pickle, or can be provided standalone to make\n checkplots without phased LC plots in them. In the second case, you must\n set `pfpicklelist` to a list of Nones that matches the length of\n `lcfnamelist`.\n\n cprenorm : bool\n Set this to True if the light curves should be renormalized by\n `checkplot.checkplot_pickle`. This is set to False by default because we\n do our own normalization in this function using the light curve's\n registered normalization function and pass the normalized times, mags,\n errs to the `checkplot.checkplot_pickle` function.\n\n lclistpkl : str or dict\n This is either the filename of a pickle or the actual dict produced by\n lcproc.make_lclist. This is used to gather neighbor information.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n makeneighborlcs : bool\n If True, will make light curve and phased light curve plots for all\n neighbors found in the object collection for each input object.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond.\n\n If this is set to True, the default settings for the external requests\n will then become::\n\n skyview_lookup = False\n skyview_timeout = 10.0\n skyview_retry_failed = False\n dust_timeout = 10.0\n gaia_submit_timeout = 7.0\n gaia_max_timeout = 10.0\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n If this is a float, will run in \"fast\" mode with the provided timeout\n value in seconds and the following settings::\n\n skyview_lookup = True\n skyview_timeout = fast_mode\n skyview_retry_failed = False\n dust_timeout = fast_mode\n gaia_submit_timeout = 0.66*fast_mode\n gaia_max_timeout = fast_mode\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str or None\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n xmatchinfo : str or dict\n This is either the xmatch dict produced by the function\n `load_xmatch_external_catalogs` above, or the path to the xmatch info\n pickle file produced by that function.\n\n xmatchradiusarcsec : float\n This is the cross-matching radius to use in arcseconds.\n\n minobservations : int\n The minimum of observations the input object's mag/flux time-series must\n have for this function to plot its light curve and phased light\n curve. If the object has less than this number, no light curves will be\n plotted, but the checkplotdict will still contain all of the other\n information.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in generating this checkplot.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in generating this checkplot.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in generating this checkplot.\n\n skipdone : bool\n This indicates if this function will skip creating checkplots that\n already exist corresponding to the current `objectid` and `magcol`. If\n `skipdone` is set to True, this will be done.\n\n done_callback : Python function or None\n This is used to provide a function to execute after the checkplot\n pickles are generated. This is useful if you want to stream the results\n of checkplot making to some other process, e.g. directly running an\n ingestion into an LCC-Server collection. The function will always get\n the list of the generated checkplot pickles as its first arg, and all of\n the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n be provided by giving a list in the `done_callbacks_args` kwarg and a\n dict in the `done_callbacks_kwargs` kwarg.\n\n NOTE: the function you pass in here should be pickleable by normal\n Python if you want to use it with the Func and Func_lcdir\n functions below.\n\n done_callback_args : tuple or None\n If not None, contains any args to pass into the `done_callback`\n function.\n\n done_callback_kwargs : dict or None\n If not None, contains any kwargs to pass into the `done_callback`\n function.\n\n liststartindex : int\n The index of the `pfpicklelist` (and `lcfnamelist` if provided) to start\n working at.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input period-finding result pickles (and light curves if `lcfnamelist`\n is also provided) over several sessions or machines.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n generation process.\n\n Returns\n -------\n\n dict\n This returns a dict with keys = input period-finding pickles and vals =\n list of the corresponding checkplot pickles produced.\n\n '''\n\n # work around the Darwin segfault after fork if no network activity in\n # main thread bug: https://bugs.python.org/issue30385#msg293958\n if sys.platform == 'darwin':\n import requests\n requests.get('http://captive.apple.com/hotspot-detect.html')\n\n if not os.path.exists(arg_1):\n os.mkdir(arg_1)\n\n # handle the start and end indices\n if (arg_25 is not None) and (arg_26 is None):\n arg_0 = arg_0[arg_25:]\n if arg_4 is not None:\n arg_4 = arg_4[arg_25:]\n\n elif (arg_25 is None) and (arg_26 is not None):\n arg_0 = arg_0[:arg_26]\n if arg_4 is not None:\n arg_4 = arg_4[:arg_26]\n\n elif (arg_25 is not None) and (arg_26 is not None):\n arg_0 = (\n arg_0[arg_25:arg_25+arg_26]\n )\n if arg_4 is not None:\n arg_4 = arg_4[arg_25:arg_25+arg_26]\n\n # if the lcfnamelist is not provided, create a dummy\n if arg_4 is None:\n arg_4 = [None]*len(arg_0)\n\n arg_29 = [(x, arg_1, arg_2,\n {'lcformat':arg_16,\n 'lcformatdir':arg_17,\n 'lcfname':y,\n 'timecols':arg_18,\n 'magcols':arg_19,\n 'errcols':arg_20,\n 'lclistpkl':arg_6,\n 'gaia_max_timeout':arg_7,\n 'gaia_mirror':arg_8,\n 'nbrradiusarcsec':arg_9,\n 'maxnumneighbors':arg_10,\n 'makeneighborlcs':arg_11,\n 'xmatchinfo':arg_12,\n 'xmatchradiusarcsec':arg_13,\n 'sigclip':arg_14,\n 'minobservations':arg_15,\n 'skipdone':arg_21,\n 'cprenorm':arg_5,\n 'fast_mode':arg_3,\n 'done_callback':arg_22,\n 'done_callback_args':arg_23,\n 'done_callback_kwargs':arg_24}) for\n x,y in zip(arg_0, arg_4)]\n\n arg_30 = []\n arg_31 = []\n\n with ProcessPoolExecutor(max_workers=arg_27) as executor:\n arg_30 = executor.map(runcp_worker, arg_29)\n\n arg_31 = [x for x in arg_30]\n\n executor.shutdown()\n return arg_31"} +{"_id": "doc_9253", "title": "", "text": "def Func(arg_0,\n arg_1,\n arg_2,\n arg_3='periodfinding-*.pkl*',\n arg_4=None,\n arg_5=False,\n arg_6=60.0,\n arg_7=5,\n arg_8=True,\n arg_9=False,\n arg_10=60.0,\n arg_11=None,\n arg_12=None,\n arg_13=3.0,\n arg_14=99,\n arg_15=10.0,\n arg_16='hat-sql',\n arg_17=None,\n arg_18=None,\n arg_19=None,\n arg_20=None,\n arg_21=False,\n arg_22=None,\n arg_23=None,\n arg_24=None,\n arg_25=None,\n arg_26=32):\n\n '''This drives the parallel execution of `runcp` for a directory of\n periodfinding pickles.\n\n Parameters\n ----------\n\n pfpickledir : str\n This is the directory containing all of the period-finding pickles to\n process.\n\n outdir : str\n The directory the checkplot pickles will be written to.\n\n lcbasedir : str\n The base directory that this function will look in to find the light\n curves pointed to by the period-finding result files. If you're using\n `lcfnamelist` to provide a list of light curve filenames directly, this\n arg is ignored.\n\n pkpickleglob : str\n This is a UNIX file glob to select period-finding result pickles in the\n specified `pfpickledir`.\n\n lclistpkl : str or dict\n This is either the filename of a pickle or the actual dict produced by\n lcproc.make_lclist. This is used to gather neighbor information.\n\n cprenorm : bool\n Set this to True if the light curves should be renormalized by\n `checkplot.checkplot_pickle`. This is set to False by default because we\n do our own normalization in this function using the light curve's\n registered normalization function and pass the normalized times, mags,\n errs to the `checkplot.checkplot_pickle` function.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n makeneighborlcs : bool\n If True, will make light curve and phased light curve plots for all\n neighbors found in the object collection for each input object.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond.\n\n If this is set to True, the default settings for the external requests\n will then become::\n\n skyview_lookup = False\n skyview_timeout = 10.0\n skyview_retry_failed = False\n dust_timeout = 10.0\n gaia_submit_timeout = 7.0\n gaia_max_timeout = 10.0\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n If this is a float, will run in \"fast\" mode with the provided timeout\n value in seconds and the following settings::\n\n skyview_lookup = True\n skyview_timeout = fast_mode\n skyview_retry_failed = False\n dust_timeout = fast_mode\n gaia_submit_timeout = 0.66*fast_mode\n gaia_max_timeout = fast_mode\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str or None\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n xmatchinfo : str or dict\n This is either the xmatch dict produced by the function\n `load_xmatch_external_catalogs` above, or the path to the xmatch info\n pickle file produced by that function.\n\n xmatchradiusarcsec : float\n This is the cross-matching radius to use in arcseconds.\n\n minobservations : int\n The minimum of observations the input object's mag/flux time-series must\n have for this function to plot its light curve and phased light\n curve. If the object has less than this number, no light curves will be\n plotted, but the checkplotdict will still contain all of the other\n information.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in generating this checkplot.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in generating this checkplot.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in generating this checkplot.\n\n skipdone : bool\n This indicates if this function will skip creating checkplots that\n already exist corresponding to the current `objectid` and `magcol`. If\n `skipdone` is set to True, this will be done.\n\n done_callback : Python function or None\n This is used to provide a function to execute after the checkplot\n pickles are generated. This is useful if you want to stream the results\n of checkplot making to some other process, e.g. directly running an\n ingestion into an LCC-Server collection. The function will always get\n the list of the generated checkplot pickles as its first arg, and all of\n the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n be provided by giving a list in the `done_callbacks_args` kwarg and a\n dict in the `done_callbacks_kwargs` kwarg.\n\n NOTE: the function you pass in here should be pickleable by normal\n Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n functions below.\n\n done_callback_args : tuple or None\n If not None, contains any args to pass into the `done_callback`\n function.\n\n done_callback_kwargs : dict or None\n If not None, contains any kwargs to pass into the `done_callback`\n function.\n\n maxobjects : int\n The maximum number of objects to process in this run.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n generation process.\n\n Returns\n -------\n\n dict\n This returns a dict with keys = input period-finding pickles and vals =\n list of the corresponding checkplot pickles produced.\n\n '''\n\n arg_27 = sorted(glob.glob(os.path.join(arg_0, arg_3)))\n\n LOGINFO('found %s period-finding pickles, running cp...' %\n len(arg_27))\n\n return parallel_cp(arg_27,\n arg_1,\n arg_2,\n arg_9=arg_9,\n arg_4=arg_4,\n arg_6=arg_6,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_15=arg_15,\n arg_14=arg_14,\n arg_5=arg_5,\n arg_25=arg_25,\n arg_16=arg_16,\n arg_17=arg_17,\n arg_18=arg_18,\n arg_19=arg_19,\n arg_20=arg_20,\n arg_21=arg_21,\n arg_26=arg_26,\n arg_22=arg_22,\n arg_23=arg_23,\n arg_24=arg_24)"} +{"_id": "doc_9254", "title": "", "text": "def Func(arg_0):\n '''\n This runs the runpf function.\n\n '''\n\n (arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7,\n arg_8, arg_9, arg_10, arg_11, arg_12, arg_13,\n arg_14) = arg_0\n\n if os.path.exists(arg_1):\n arg_15 = runpf(arg_1,\n arg_2,\n arg_3=arg_3,\n arg_4=arg_4,\n arg_5=arg_5,\n arg_6=arg_6,\n arg_7=arg_7,\n arg_8=arg_8,\n arg_9=arg_9,\n arg_10=arg_10,\n arg_11=arg_11,\n arg_12=arg_12,\n arg_13=arg_13,\n arg_14=arg_14)\n return arg_15\n else:\n LOGERROR('LC does not exist for requested file %s' % arg_1)\n return None"} +{"_id": "doc_9255", "title": "", "text": "def Func(\n arg_0,\n arg_1,\n arg_2,\n arg_3='varfeatures-*.pkl',\n arg_4=arg_5,\n arg_6=None,\n arg_7=None,\n arg_8='binary',\n):\n '''This collects variability features into arrays for use with the classifer.\n\n Parameters\n ----------\n\n featuresdir : str\n This is the directory where all the varfeatures pickles are. Use\n `pklglob` to specify the glob to search for. The `varfeatures` pickles\n contain objectids, a light curve magcol, and features as dict\n key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used\n to produce these.\n\n magcol : str\n This is the key in each varfeatures pickle corresponding to the magcol\n of the light curve the variability features were extracted from.\n\n outfile : str\n This is the filename of the output pickle that will be written\n containing a dict of all the features extracted into np.arrays.\n\n pklglob : str\n This is the UNIX file glob to use to search for varfeatures pickle files\n in `featuresdir`.\n\n featurestouse : list of str\n Each varfeatures pickle can contain any combination of non-periodic,\n stellar, and periodic features; these must have the same names as\n elements in the list of strings provided in `featurestouse`. This tries\n to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by\n default. If `featurestouse` is provided as a list, gets only the\n features listed in this kwarg instead.\n\n maxobjects : int or None\n The controls how many pickles from the featuresdir to process. If None,\n will process all varfeatures pickles.\n\n labeldict : dict or None\n If this is provided, it must be a dict with the following key:val list::\n\n '':